summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/call
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/call')
-rw-r--r--third_party/libwebrtc/call/BUILD.gn700
-rw-r--r--third_party/libwebrtc/call/DEPS32
-rw-r--r--third_party/libwebrtc/call/OWNERS8
-rw-r--r--third_party/libwebrtc/call/adaptation/BUILD.gn137
-rw-r--r--third_party/libwebrtc/call/adaptation/OWNERS3
-rw-r--r--third_party/libwebrtc/call/adaptation/adaptation_constraint.cc17
-rw-r--r--third_party/libwebrtc/call/adaptation/adaptation_constraint.h41
-rw-r--r--third_party/libwebrtc/call/adaptation/broadcast_resource_listener.cc122
-rw-r--r--third_party/libwebrtc/call/adaptation/broadcast_resource_listener.h75
-rw-r--r--third_party/libwebrtc/call/adaptation/broadcast_resource_listener_unittest.cc121
-rw-r--r--third_party/libwebrtc/call/adaptation/degradation_preference_provider.cc14
-rw-r--r--third_party/libwebrtc/call/adaptation/degradation_preference_provider.h27
-rw-r--r--third_party/libwebrtc/call/adaptation/encoder_settings.cc54
-rw-r--r--third_party/libwebrtc/call/adaptation/encoder_settings.h48
-rw-r--r--third_party/libwebrtc/call/adaptation/resource_adaptation_gn/moz.build246
-rw-r--r--third_party/libwebrtc/call/adaptation/resource_adaptation_processor.cc378
-rw-r--r--third_party/libwebrtc/call/adaptation/resource_adaptation_processor.h167
-rw-r--r--third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.cc20
-rw-r--r--third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.h67
-rw-r--r--third_party/libwebrtc/call/adaptation/resource_adaptation_processor_unittest.cc740
-rw-r--r--third_party/libwebrtc/call/adaptation/resource_unittest.cc55
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.cc40
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.h42
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.cc27
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.h69
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_resource.cc46
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_resource.h45
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.cc35
-rw-r--r--third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.h32
-rw-r--r--third_party/libwebrtc/call/adaptation/test/mock_resource_listener.h30
-rw-r--r--third_party/libwebrtc/call/adaptation/video_source_restrictions.cc173
-rw-r--r--third_party/libwebrtc/call/adaptation/video_source_restrictions.h89
-rw-r--r--third_party/libwebrtc/call/adaptation/video_source_restrictions_unittest.cc146
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_adapter.cc753
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_adapter.h271
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_adapter_unittest.cc951
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_input_state.cc80
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_input_state.h53
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.cc54
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.h41
-rw-r--r--third_party/libwebrtc/call/adaptation/video_stream_input_state_provider_unittest.cc62
-rw-r--r--third_party/libwebrtc/call/audio_receive_stream.cc24
-rw-r--r--third_party/libwebrtc/call/audio_receive_stream.h210
-rw-r--r--third_party/libwebrtc/call/audio_send_stream.cc108
-rw-r--r--third_party/libwebrtc/call/audio_send_stream.h203
-rw-r--r--third_party/libwebrtc/call/audio_sender.h30
-rw-r--r--third_party/libwebrtc/call/audio_sender_interface_gn/moz.build216
-rw-r--r--third_party/libwebrtc/call/audio_state.cc18
-rw-r--r--third_party/libwebrtc/call/audio_state.h69
-rw-r--r--third_party/libwebrtc/call/bitrate_allocator.cc593
-rw-r--r--third_party/libwebrtc/call/bitrate_allocator.h170
-rw-r--r--third_party/libwebrtc/call/bitrate_allocator_gn/moz.build236
-rw-r--r--third_party/libwebrtc/call/bitrate_allocator_unittest.cc1037
-rw-r--r--third_party/libwebrtc/call/bitrate_configurator_gn/moz.build236
-rw-r--r--third_party/libwebrtc/call/bitrate_estimator_tests.cc329
-rw-r--r--third_party/libwebrtc/call/call.cc1428
-rw-r--r--third_party/libwebrtc/call/call.h147
-rw-r--r--third_party/libwebrtc/call/call_basic_stats.cc20
-rw-r--r--third_party/libwebrtc/call/call_basic_stats.h21
-rw-r--r--third_party/libwebrtc/call/call_config.cc41
-rw-r--r--third_party/libwebrtc/call/call_config.h91
-rw-r--r--third_party/libwebrtc/call/call_factory.cc119
-rw-r--r--third_party/libwebrtc/call/call_factory.h37
-rw-r--r--third_party/libwebrtc/call/call_gn/moz.build239
-rw-r--r--third_party/libwebrtc/call/call_interfaces_gn/moz.build243
-rw-r--r--third_party/libwebrtc/call/call_perf_tests.cc1209
-rw-r--r--third_party/libwebrtc/call/call_unittest.cc516
-rw-r--r--third_party/libwebrtc/call/degraded_call.cc382
-rw-r--r--third_party/libwebrtc/call/degraded_call.h202
-rw-r--r--third_party/libwebrtc/call/fake_network_pipe.cc382
-rw-r--r--third_party/libwebrtc/call/fake_network_pipe.h230
-rw-r--r--third_party/libwebrtc/call/fake_network_pipe_unittest.cc509
-rw-r--r--third_party/libwebrtc/call/flexfec_receive_stream.cc26
-rw-r--r--third_party/libwebrtc/call/flexfec_receive_stream.h79
-rw-r--r--third_party/libwebrtc/call/flexfec_receive_stream_impl.cc201
-rw-r--r--third_party/libwebrtc/call/flexfec_receive_stream_impl.h99
-rw-r--r--third_party/libwebrtc/call/flexfec_receive_stream_unittest.cc154
-rw-r--r--third_party/libwebrtc/call/packet_receiver.h45
-rw-r--r--third_party/libwebrtc/call/rampup_tests.cc711
-rw-r--r--third_party/libwebrtc/call/rampup_tests.h170
-rw-r--r--third_party/libwebrtc/call/receive_stream.h72
-rw-r--r--third_party/libwebrtc/call/receive_stream_interface_gn/moz.build220
-rw-r--r--third_party/libwebrtc/call/receive_time_calculator.cc120
-rw-r--r--third_party/libwebrtc/call/receive_time_calculator.h63
-rw-r--r--third_party/libwebrtc/call/receive_time_calculator_unittest.cc249
-rw-r--r--third_party/libwebrtc/call/rtp_bitrate_configurator.cc135
-rw-r--r--third_party/libwebrtc/call/rtp_bitrate_configurator.h77
-rw-r--r--third_party/libwebrtc/call/rtp_bitrate_configurator_unittest.cc300
-rw-r--r--third_party/libwebrtc/call/rtp_config.cc203
-rw-r--r--third_party/libwebrtc/call/rtp_config.h175
-rw-r--r--third_party/libwebrtc/call/rtp_demuxer.cc444
-rw-r--r--third_party/libwebrtc/call/rtp_demuxer.h218
-rw-r--r--third_party/libwebrtc/call/rtp_demuxer_unittest.cc1286
-rw-r--r--third_party/libwebrtc/call/rtp_interfaces_gn/moz.build236
-rw-r--r--third_party/libwebrtc/call/rtp_packet_sink_interface.h26
-rw-r--r--third_party/libwebrtc/call/rtp_payload_params.cc790
-rw-r--r--third_party/libwebrtc/call/rtp_payload_params.h134
-rw-r--r--third_party/libwebrtc/call/rtp_payload_params_unittest.cc1398
-rw-r--r--third_party/libwebrtc/call/rtp_receiver_gn/moz.build239
-rw-r--r--third_party/libwebrtc/call/rtp_sender_gn/moz.build239
-rw-r--r--third_party/libwebrtc/call/rtp_stream_receiver_controller.cc71
-rw-r--r--third_party/libwebrtc/call/rtp_stream_receiver_controller.h80
-rw-r--r--third_party/libwebrtc/call/rtp_stream_receiver_controller_interface.h43
-rw-r--r--third_party/libwebrtc/call/rtp_transport_config.h53
-rw-r--r--third_party/libwebrtc/call/rtp_transport_controller_send.cc708
-rw-r--r--third_party/libwebrtc/call/rtp_transport_controller_send.h216
-rw-r--r--third_party/libwebrtc/call/rtp_transport_controller_send_factory.h34
-rw-r--r--third_party/libwebrtc/call/rtp_transport_controller_send_factory_interface.h30
-rw-r--r--third_party/libwebrtc/call/rtp_transport_controller_send_interface.h160
-rw-r--r--third_party/libwebrtc/call/rtp_video_sender.cc1031
-rw-r--r--third_party/libwebrtc/call/rtp_video_sender.h223
-rw-r--r--third_party/libwebrtc/call/rtp_video_sender_interface.h69
-rw-r--r--third_party/libwebrtc/call/rtp_video_sender_unittest.cc1232
-rw-r--r--third_party/libwebrtc/call/rtx_receive_stream.cc88
-rw-r--r--third_party/libwebrtc/call/rtx_receive_stream.h59
-rw-r--r--third_party/libwebrtc/call/rtx_receive_stream_unittest.cc271
-rw-r--r--third_party/libwebrtc/call/simulated_network.cc276
-rw-r--r--third_party/libwebrtc/call/simulated_network.h134
-rw-r--r--third_party/libwebrtc/call/simulated_network_unittest.cc513
-rw-r--r--third_party/libwebrtc/call/simulated_packet_receiver.h42
-rw-r--r--third_party/libwebrtc/call/syncable.cc17
-rw-r--r--third_party/libwebrtc/call/syncable.h46
-rw-r--r--third_party/libwebrtc/call/test/mock_audio_send_stream.h51
-rw-r--r--third_party/libwebrtc/call/test/mock_bitrate_allocator.h32
-rw-r--r--third_party/libwebrtc/call/test/mock_rtp_packet_sink_interface.h25
-rw-r--r--third_party/libwebrtc/call/test/mock_rtp_transport_controller_send.h105
-rw-r--r--third_party/libwebrtc/call/version.cc25
-rw-r--r--third_party/libwebrtc/call/version.h25
-rw-r--r--third_party/libwebrtc/call/version_gn/moz.build221
-rw-r--r--third_party/libwebrtc/call/video_receive_stream.cc170
-rw-r--r--third_party/libwebrtc/call/video_receive_stream.h344
-rw-r--r--third_party/libwebrtc/call/video_send_stream.cc127
-rw-r--r--third_party/libwebrtc/call/video_send_stream.h274
-rw-r--r--third_party/libwebrtc/call/video_stream_api_gn/moz.build237
134 files changed, 30547 insertions, 0 deletions
diff --git a/third_party/libwebrtc/call/BUILD.gn b/third_party/libwebrtc/call/BUILD.gn
new file mode 100644
index 0000000000..47018a570a
--- /dev/null
+++ b/third_party/libwebrtc/call/BUILD.gn
@@ -0,0 +1,700 @@
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../webrtc.gni")
+
+rtc_library("version") {
+ sources = [
+ "version.cc",
+ "version.h",
+ ]
+ visibility = [ ":*" ]
+}
+
+rtc_library("call_interfaces") {
+ sources = [
+ "audio_receive_stream.cc",
+ "audio_receive_stream.h",
+ "audio_send_stream.cc",
+ "audio_send_stream.h",
+ "audio_state.cc",
+ "audio_state.h",
+ "call.h",
+ "call_config.cc",
+ "call_config.h",
+ "flexfec_receive_stream.cc",
+ "flexfec_receive_stream.h",
+ "packet_receiver.h",
+ "syncable.cc",
+ "syncable.h",
+ ]
+ if (build_with_mozilla) {
+ sources += [
+ "call_basic_stats.cc",
+ "call_basic_stats.h",
+ ]
+ }
+
+ deps = [
+ ":audio_sender_interface",
+ ":receive_stream_interface",
+ ":rtp_interfaces",
+ ":video_stream_api",
+ "../api:fec_controller_api",
+ "../api:field_trials_view",
+ "../api:frame_transformer_interface",
+ "../api:network_state_predictor_api",
+ "../api:rtc_error",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:rtp_sender_setparameters_callback",
+ "../api:scoped_refptr",
+ "../api:transport_api",
+ "../api/adaptation:resource_adaptation_api",
+ "../api/audio:audio_frame_processor",
+ "../api/audio:audio_mixer_api",
+ "../api/audio_codecs:audio_codecs_api",
+ "../api/crypto:frame_encryptor_interface",
+ "../api/crypto:options",
+ "../api/metronome",
+ "../api/neteq:neteq_api",
+ "../api/task_queue",
+ "../api/transport:bitrate_settings",
+ "../api/transport:network_control",
+ "../modules/async_audio_processing",
+ "../modules/audio_device",
+ "../modules/audio_processing",
+ "../modules/audio_processing:api",
+ "../modules/audio_processing:audio_processing_statistics",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:audio_format_to_string",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:network_route",
+ "../rtc_base:refcount",
+ "../rtc_base:stringutils",
+ "../rtc_base/network:sent_packet",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/functional:bind_front",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_source_set("audio_sender_interface") {
+ visibility = [ "*" ]
+ sources = [ "audio_sender.h" ]
+ deps = [ "../api/audio:audio_frame_api" ]
+}
+
+# TODO(nisse): These RTP targets should be moved elsewhere
+# when interfaces have stabilized. See also TODO for `mock_rtp_interfaces`.
+rtc_library("rtp_interfaces") {
+ # Client code SHOULD NOT USE THIS TARGET, but for now it needs to be public
+ # because there exists client code that uses it.
+ # TODO(bugs.webrtc.org/9808): Move to private visibility as soon as that
+ # client code gets updated.
+ visibility = [ "*" ]
+ sources = [
+ "rtp_config.cc",
+ "rtp_config.h",
+ "rtp_packet_sink_interface.h",
+ "rtp_stream_receiver_controller_interface.h",
+ "rtp_transport_config.h",
+ "rtp_transport_controller_send_factory_interface.h",
+ "rtp_transport_controller_send_interface.h",
+ ]
+ deps = [
+ "../api:array_view",
+ "../api:fec_controller_api",
+ "../api:field_trials_view",
+ "../api:frame_transformer_interface",
+ "../api:network_state_predictor_api",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api/crypto:options",
+ "../api/rtc_event_log",
+ "../api/transport:bitrate_settings",
+ "../api/transport:network_control",
+ "../api/units:timestamp",
+ "../common_video:frame_counts",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:checks",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:stringutils",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("rtp_receiver") {
+ visibility = [ "*" ]
+ sources = [
+ "rtp_demuxer.cc",
+ "rtp_demuxer.h",
+ "rtp_stream_receiver_controller.cc",
+ "rtp_stream_receiver_controller.h",
+ "rtx_receive_stream.cc",
+ "rtx_receive_stream.h",
+ ]
+ deps = [
+ ":rtp_interfaces",
+ "../api:array_view",
+ "../api:rtp_headers",
+ "../api:sequence_checker",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:stringutils",
+ "../rtc_base/containers:flat_map",
+ "../rtc_base/containers:flat_set",
+ "../rtc_base/system:no_unique_address",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("rtp_sender") {
+ sources = [
+ "rtp_payload_params.cc",
+ "rtp_payload_params.h",
+ "rtp_transport_controller_send.cc",
+ "rtp_transport_controller_send.h",
+ "rtp_transport_controller_send_factory.h",
+ "rtp_video_sender.cc",
+ "rtp_video_sender.h",
+ "rtp_video_sender_interface.h",
+ ]
+ deps = [
+ ":bitrate_configurator",
+ ":rtp_interfaces",
+ "../api:array_view",
+ "../api:bitrate_allocation",
+ "../api:fec_controller_api",
+ "../api:field_trials_view",
+ "../api:network_state_predictor_api",
+ "../api:rtp_parameters",
+ "../api:sequence_checker",
+ "../api:transport_api",
+ "../api/rtc_event_log",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/task_queue:task_queue",
+ "../api/transport:field_trial_based_config",
+ "../api/transport:goog_cc",
+ "../api/transport:network_control",
+ "../api/units:data_rate",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../api/video:video_frame",
+ "../api/video:video_layers_allocation",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:video_codecs_api",
+ "../logging:rtc_event_bwe",
+ "../modules/congestion_controller",
+ "../modules/congestion_controller/rtp:control_handler",
+ "../modules/congestion_controller/rtp:transport_feedback",
+ "../modules/pacing",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/rtp_rtcp:rtp_video_header",
+ "../modules/video_coding:chain_diff_calculator",
+ "../modules/video_coding:codec_globals_headers",
+ "../modules/video_coding:frame_dependencies_calculator",
+ "../modules/video_coding:video_codec_interface",
+ "../rtc_base:checks",
+ "../rtc_base:event_tracer",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:network_route",
+ "../rtc_base:race_checker",
+ "../rtc_base:random",
+ "../rtc_base:rate_limiter",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:timeutils",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/task_utils:repeating_task",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+rtc_library("bitrate_configurator") {
+ sources = [
+ "rtp_bitrate_configurator.cc",
+ "rtp_bitrate_configurator.h",
+ ]
+ deps = [
+ ":rtp_interfaces",
+
+ # For api/bitrate_constraints.h
+ "../api:libjingle_peerconnection_api",
+ "../api/transport:bitrate_settings",
+ "../api/units:data_rate",
+ "../rtc_base:checks",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("bitrate_allocator") {
+ sources = [
+ "bitrate_allocator.cc",
+ "bitrate_allocator.h",
+ ]
+ deps = [
+ "../api:bitrate_allocation",
+ "../api:sequence_checker",
+ "../api/transport:network_control",
+ "../api/units:data_rate",
+ "../api/units:time_delta",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:safe_minmax",
+ "../rtc_base/system:no_unique_address",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ "../system_wrappers:metrics",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ]
+}
+
+rtc_library("call") {
+ sources = [
+ "call.cc",
+ "call_factory.cc",
+ "call_factory.h",
+ "degraded_call.cc",
+ "degraded_call.h",
+ "flexfec_receive_stream_impl.cc",
+ "flexfec_receive_stream_impl.h",
+ "receive_time_calculator.cc",
+ "receive_time_calculator.h",
+ ]
+
+ deps = [
+ ":bitrate_allocator",
+ ":call_interfaces",
+ ":fake_network",
+ ":rtp_interfaces",
+ ":rtp_receiver",
+ ":rtp_sender",
+ ":simulated_network",
+ ":version",
+ ":video_stream_api",
+ "../api:array_view",
+ "../api:callfactory_api",
+ "../api:fec_controller_api",
+ "../api:field_trials_view",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:sequence_checker",
+ "../api:simulated_network_api",
+ "../api:transport_api",
+ "../api/rtc_event_log",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/transport:network_control",
+ "../api/units:time_delta",
+ "../api/video_codecs:video_codecs_api",
+ "../audio",
+ "../logging:rtc_event_audio",
+ "../logging:rtc_event_rtp_rtcp",
+ "../logging:rtc_event_video",
+ "../logging:rtc_stream_config",
+ "../modules/congestion_controller",
+ "../modules/pacing",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/video_coding",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:event_tracer",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:rate_limiter",
+ "../rtc_base:rtc_event",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:safe_minmax",
+ "../rtc_base:stringutils",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base/experiments:field_trial_parser",
+ "../rtc_base/network:sent_packet",
+ "../rtc_base/system:no_unique_address",
+ "../rtc_base/task_utils:repeating_task",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ "../system_wrappers:metrics",
+ "../video",
+ "../video:decode_synchronizer",
+ "../video/config:encoder_config",
+ "adaptation:resource_adaptation",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:bind_front",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ if (build_with_mozilla) { # See Bug 1820869.
+ sources -= [
+ "call_factory.cc",
+ "degraded_call.cc",
+ ]
+ deps -= [
+ ":fake_network",
+ ":simulated_network",
+ ]
+ }
+}
+
+rtc_source_set("receive_stream_interface") {
+ sources = [ "receive_stream.h" ]
+ deps = [
+ "../api:frame_transformer_interface",
+ "../api:rtp_parameters",
+ "../api:scoped_refptr",
+ "../api/crypto:frame_decryptor_interface",
+ "../api/transport/rtp:rtp_source",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ ]
+}
+
+rtc_library("video_stream_api") {
+ sources = [
+ "video_receive_stream.cc",
+ "video_receive_stream.h",
+ "video_send_stream.cc",
+ "video_send_stream.h",
+ ]
+ deps = [
+ ":receive_stream_interface",
+ ":rtp_interfaces",
+ "../api:frame_transformer_interface",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:rtp_sender_setparameters_callback",
+ "../api:scoped_refptr",
+ "../api:transport_api",
+ "../api/adaptation:resource_adaptation_api",
+ "../api/crypto:frame_encryptor_interface",
+ "../api/crypto:options",
+ "../api/video:recordable_encoded_frame",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video:video_stream_encoder",
+ "../api/video_codecs:scalability_mode",
+ "../api/video_codecs:video_codecs_api",
+ "../common_video",
+ "../common_video:frame_counts",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:checks",
+ "../rtc_base:stringutils",
+ "../video/config:encoder_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("simulated_network") {
+ sources = [
+ "simulated_network.cc",
+ "simulated_network.h",
+ ]
+ deps = [
+ "../api:sequence_checker",
+ "../api:simulated_network_api",
+ "../api/units:data_rate",
+ "../api/units:data_size",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../rtc_base:checks",
+ "../rtc_base:macromagic",
+ "../rtc_base:race_checker",
+ "../rtc_base:random",
+ "../rtc_base/synchronization:mutex",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_source_set("simulated_packet_receiver") {
+ sources = [ "simulated_packet_receiver.h" ]
+ deps = [
+ ":call_interfaces",
+ "../api:simulated_network_api",
+ ]
+}
+
+rtc_library("fake_network") {
+ sources = [
+ "fake_network_pipe.cc",
+ "fake_network_pipe.h",
+ ]
+ deps = [
+ ":call_interfaces",
+ ":simulated_network",
+ ":simulated_packet_receiver",
+ "../api:rtp_parameters",
+ "../api:sequence_checker",
+ "../api:simulated_network_api",
+ "../api:transport_api",
+ "../api/units:timestamp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base/synchronization:mutex",
+ "../system_wrappers",
+ ]
+}
+
+if (rtc_include_tests) {
+ if (!build_with_chromium) {
+ rtc_library("call_tests") {
+ testonly = true
+
+ sources = [
+ "bitrate_allocator_unittest.cc",
+ "bitrate_estimator_tests.cc",
+ "call_unittest.cc",
+ "flexfec_receive_stream_unittest.cc",
+ "receive_time_calculator_unittest.cc",
+ "rtp_bitrate_configurator_unittest.cc",
+ "rtp_demuxer_unittest.cc",
+ "rtp_payload_params_unittest.cc",
+ "rtp_video_sender_unittest.cc",
+ "rtx_receive_stream_unittest.cc",
+ ]
+ deps = [
+ ":bitrate_allocator",
+ ":bitrate_configurator",
+ ":call",
+ ":call_interfaces",
+ ":mock_rtp_interfaces",
+ ":rtp_interfaces",
+ ":rtp_receiver",
+ ":rtp_sender",
+ ":simulated_network",
+ "../api:array_view",
+ "../api:create_frame_generator",
+ "../api:mock_audio_mixer",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:transport_api",
+ "../api/audio_codecs:builtin_audio_decoder_factory",
+ "../api/rtc_event_log",
+ "../api/task_queue:default_task_queue_factory",
+ "../api/test/video:function_video_factory",
+ "../api/transport:field_trial_based_config",
+ "../api/units:timestamp",
+ "../api/video:builtin_video_bitrate_allocator_factory",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../audio",
+ "../modules/audio_device:mock_audio_device",
+ "../modules/audio_mixer",
+ "../modules/audio_mixer:audio_mixer_impl",
+ "../modules/audio_processing:mocks",
+ "../modules/congestion_controller",
+ "../modules/pacing",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:mock_rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/video_coding",
+ "../modules/video_coding:codec_globals_headers",
+ "../modules/video_coding:video_codec_interface",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:random",
+ "../rtc_base:rate_limiter",
+ "../rtc_base:rtc_event",
+ "../rtc_base:safe_conversions",
+ "../rtc_base:task_queue_for_test",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base/synchronization:mutex",
+ "../system_wrappers",
+ "../test:audio_codec_mocks",
+ "../test:direct_transport",
+ "../test:encoder_settings",
+ "../test:explicit_key_value_config",
+ "../test:fake_video_codecs",
+ "../test:field_trial",
+ "../test:frame_generator_capturer",
+ "../test:mock_frame_transformer",
+ "../test:mock_transport",
+ "../test:run_loop",
+ "../test:scoped_key_value_config",
+ "../test:test_common",
+ "../test:test_support",
+ "../test:video_test_constants",
+ "../test/scenario",
+ "../test/time_controller:time_controller",
+ "../video",
+ "adaptation:resource_adaptation_test_utilities",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+ }
+
+ rtc_library("call_perf_tests") {
+ testonly = true
+
+ sources = [
+ "call_perf_tests.cc",
+ "rampup_tests.cc",
+ "rampup_tests.h",
+ ]
+ deps = [
+ ":call_interfaces",
+ ":simulated_network",
+ ":video_stream_api",
+ "../api:rtc_event_log_output_file",
+ "../api:simulated_network_api",
+ "../api/audio_codecs:builtin_audio_encoder_factory",
+ "../api/numerics",
+ "../api/rtc_event_log",
+ "../api/rtc_event_log:rtc_event_log_factory",
+ "../api/task_queue",
+ "../api/task_queue:default_task_queue_factory",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/test/metrics:global_metrics_logger_and_exporter",
+ "../api/test/metrics:metric",
+ "../api/video:builtin_video_bitrate_allocator_factory",
+ "../api/video:video_bitrate_allocation",
+ "../api/video_codecs:video_codecs_api",
+ "../media:rtc_internal_video_codecs",
+ "../media:rtc_simulcast_encoder_adapter",
+ "../modules/audio_coding",
+ "../modules/audio_device",
+ "../modules/audio_device:test_audio_device_module",
+ "../modules/audio_mixer:audio_mixer_impl",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:platform_thread",
+ "../rtc_base:rtc_event",
+ "../rtc_base:stringutils",
+ "../rtc_base:task_queue_for_test",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/task_utils:repeating_task",
+ "../system_wrappers",
+ "../system_wrappers:metrics",
+ "../test:direct_transport",
+ "../test:encoder_settings",
+ "../test:fake_video_codecs",
+ "../test:field_trial",
+ "../test:fileutils",
+ "../test:frame_generator_capturer",
+ "../test:null_transport",
+ "../test:test_common",
+ "../test:test_flags",
+ "../test:test_support",
+ "../test:video_test_common",
+ "../test:video_test_constants",
+ "../video",
+ "../video/config:encoder_config",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/strings",
+ ]
+ }
+ }
+
+ # TODO(eladalon): This should be moved, as with the TODO for `rtp_interfaces`.
+ rtc_source_set("mock_rtp_interfaces") {
+ testonly = true
+
+ sources = [
+ "test/mock_rtp_packet_sink_interface.h",
+ "test/mock_rtp_transport_controller_send.h",
+ ]
+ deps = [
+ ":rtp_interfaces",
+ "../api:frame_transformer_interface",
+ "../api:libjingle_peerconnection_api",
+ "../api/crypto:frame_encryptor_interface",
+ "../api/crypto:options",
+ "../api/transport:bitrate_settings",
+ "../modules/pacing",
+ "../rtc_base:network_route",
+ "../rtc_base:rate_limiter",
+ "../rtc_base/network:sent_packet",
+ "../test:test_support",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+ }
+ rtc_source_set("mock_bitrate_allocator") {
+ testonly = true
+
+ sources = [ "test/mock_bitrate_allocator.h" ]
+ deps = [
+ ":bitrate_allocator",
+ "../test:test_support",
+ ]
+ }
+ rtc_source_set("mock_call_interfaces") {
+ testonly = true
+
+ sources = [ "test/mock_audio_send_stream.h" ]
+ deps = [
+ ":call_interfaces",
+ "../test:test_support",
+ ]
+ }
+
+ rtc_library("fake_network_pipe_unittests") {
+ testonly = true
+
+ sources = [
+ "fake_network_pipe_unittest.cc",
+ "simulated_network_unittest.cc",
+ ]
+ deps = [
+ ":fake_network",
+ ":simulated_network",
+ "../api:simulated_network_api",
+ "../api/units:data_rate",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:checks",
+ "../system_wrappers",
+ "../test:test_support",
+ "//testing/gtest",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ]
+ }
+}
diff --git a/third_party/libwebrtc/call/DEPS b/third_party/libwebrtc/call/DEPS
new file mode 100644
index 0000000000..b1b66ac3ce
--- /dev/null
+++ b/third_party/libwebrtc/call/DEPS
@@ -0,0 +1,32 @@
+include_rules = [
+ "+audio",
+ "+logging/rtc_event_log",
+ "+modules/async_audio_processing",
+ "+modules/audio_coding",
+ "+modules/audio_device",
+ "+modules/audio_mixer",
+ "+modules/audio_processing",
+ "+modules/bitrate_controller",
+ "+modules/congestion_controller",
+ "+modules/video_coding",
+ "+modules/pacing",
+ "+modules/rtp_rtcp",
+ "+modules/utility",
+ "+system_wrappers",
+ "+video",
+]
+
+specific_include_rules = {
+ "video_receive_stream\.h": [
+ "+common_video/frame_counts.h",
+ ],
+ "video_send_stream\.h": [
+ "+common_video",
+ ],
+ "rtp_transport_controller_send_interface\.h": [
+ "+common_video/frame_counts.h",
+ ],
+ "call_perf_tests\.cc": [
+ "+media/engine",
+ ]
+}
diff --git a/third_party/libwebrtc/call/OWNERS b/third_party/libwebrtc/call/OWNERS
new file mode 100644
index 0000000000..e275834bb4
--- /dev/null
+++ b/third_party/libwebrtc/call/OWNERS
@@ -0,0 +1,8 @@
+sprang@webrtc.org
+danilchap@webrtc.org
+brandtr@webrtc.org
+tommi@webrtc.org
+mflodman@webrtc.org
+stefan@webrtc.org
+
+per-file version.cc=webrtc-version-updater@webrtc-ci.iam.gserviceaccount.com
diff --git a/third_party/libwebrtc/call/adaptation/BUILD.gn b/third_party/libwebrtc/call/adaptation/BUILD.gn
new file mode 100644
index 0000000000..b69196f021
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/BUILD.gn
@@ -0,0 +1,137 @@
+# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("resource_adaptation") {
+ sources = [
+ "adaptation_constraint.cc",
+ "adaptation_constraint.h",
+ "broadcast_resource_listener.cc",
+ "broadcast_resource_listener.h",
+ "degradation_preference_provider.cc",
+ "degradation_preference_provider.h",
+ "encoder_settings.cc",
+ "encoder_settings.h",
+ "resource_adaptation_processor.cc",
+ "resource_adaptation_processor.h",
+ "resource_adaptation_processor_interface.cc",
+ "resource_adaptation_processor_interface.h",
+ "video_source_restrictions.cc",
+ "video_source_restrictions.h",
+ "video_stream_adapter.cc",
+ "video_stream_adapter.h",
+ "video_stream_input_state.cc",
+ "video_stream_input_state.h",
+ "video_stream_input_state_provider.cc",
+ "video_stream_input_state_provider.h",
+ ]
+ deps = [
+ "../../api:field_trials_view",
+ "../../api:make_ref_counted",
+ "../../api:rtp_parameters",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/adaptation:resource_adaptation_api",
+ "../../api/task_queue:task_queue",
+ "../../api/video:video_adaptation",
+ "../../api/video:video_frame",
+ "../../api/video:video_stream_encoder",
+ "../../api/video_codecs:video_codecs_api",
+ "../../modules/video_coding:video_coding_utility",
+ "../../modules/video_coding/svc:scalability_mode_util",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:refcount",
+ "../../rtc_base:rtc_task_queue",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base:stringutils",
+ "../../rtc_base/experiments:balanced_degradation_settings",
+ "../../rtc_base/synchronization:mutex",
+ "../../rtc_base/system:no_unique_address",
+ "../../video:video_stream_encoder_interface",
+ "../../video/config:encoder_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ "//third_party/abseil-cpp/absl/types:variant",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("resource_adaptation_tests") {
+ testonly = true
+
+ sources = [
+ "broadcast_resource_listener_unittest.cc",
+ "resource_adaptation_processor_unittest.cc",
+ "resource_unittest.cc",
+ "video_source_restrictions_unittest.cc",
+ "video_stream_adapter_unittest.cc",
+ "video_stream_input_state_provider_unittest.cc",
+ ]
+ deps = [
+ ":resource_adaptation",
+ ":resource_adaptation_test_utilities",
+ "../../api:scoped_refptr",
+ "../../api/adaptation:resource_adaptation_api",
+ "../../api/task_queue:default_task_queue_factory",
+ "../../api/task_queue:task_queue",
+ "../../api/video:video_adaptation",
+ "../../api/video_codecs:video_codecs_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:gunit_helpers",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_task_queue",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../rtc_base/synchronization:mutex",
+ "../../test:field_trial",
+ "../../test:rtc_expect_death",
+ "../../test:scoped_key_value_config",
+ "../../test:test_support",
+ "../../video/config:encoder_config",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ }
+
+ rtc_source_set("resource_adaptation_test_utilities") {
+ testonly = true
+
+ sources = [
+ "test/fake_adaptation_constraint.cc",
+ "test/fake_adaptation_constraint.h",
+ "test/fake_frame_rate_provider.cc",
+ "test/fake_frame_rate_provider.h",
+ "test/fake_resource.cc",
+ "test/fake_resource.h",
+ "test/fake_video_stream_input_state_provider.cc",
+ "test/fake_video_stream_input_state_provider.h",
+ "test/mock_resource_listener.h",
+ ]
+ deps = [
+ ":resource_adaptation",
+ "../../api:make_ref_counted",
+ "../../api:scoped_refptr",
+ "../../api:sequence_checker",
+ "../../api/adaptation:resource_adaptation_api",
+ "../../api/task_queue:task_queue",
+ "../../api/video:video_stream_encoder",
+ "../../test:test_support",
+ "../../video:video_stream_encoder_interface",
+ "../../video/config:encoder_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/call/adaptation/OWNERS b/third_party/libwebrtc/call/adaptation/OWNERS
new file mode 100644
index 0000000000..bd56595d2e
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/OWNERS
@@ -0,0 +1,3 @@
+eshr@webrtc.org
+hbos@webrtc.org
+ilnik@webrtc.org
diff --git a/third_party/libwebrtc/call/adaptation/adaptation_constraint.cc b/third_party/libwebrtc/call/adaptation/adaptation_constraint.cc
new file mode 100644
index 0000000000..d62bb74f87
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/adaptation_constraint.cc
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/adaptation_constraint.h"
+
+namespace webrtc {
+
+AdaptationConstraint::~AdaptationConstraint() {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/adaptation_constraint.h b/third_party/libwebrtc/call/adaptation/adaptation_constraint.h
new file mode 100644
index 0000000000..9ad6414cd1
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/adaptation_constraint.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_
+#define CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_
+
+#include <string>
+
+#include "api/adaptation/resource.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state.h"
+
+namespace webrtc {
+
+// Adaptation constraints have the ability to prevent applying a proposed
+// adaptation (expressed as restrictions before/after adaptation).
+class AdaptationConstraint {
+ public:
+ virtual ~AdaptationConstraint();
+
+ virtual std::string Name() const = 0;
+
+ // TODO(https://crbug.com/webrtc/11172): When we have multi-stream adaptation
+ // support, this interface needs to indicate which stream the adaptation
+ // applies to.
+ virtual bool IsAdaptationUpAllowed(
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after) const = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_
diff --git a/third_party/libwebrtc/call/adaptation/broadcast_resource_listener.cc b/third_party/libwebrtc/call/adaptation/broadcast_resource_listener.cc
new file mode 100644
index 0000000000..505036db3d
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/broadcast_resource_listener.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/broadcast_resource_listener.h"
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "api/make_ref_counted.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+// The AdapterResource redirects resource usage measurements from its parent to
+// a single ResourceListener.
+class BroadcastResourceListener::AdapterResource : public Resource {
+ public:
+ explicit AdapterResource(absl::string_view name) : name_(std::move(name)) {}
+ ~AdapterResource() override { RTC_DCHECK(!listener_); }
+
+ // The parent is letting us know we have a usage neasurement.
+ void OnResourceUsageStateMeasured(ResourceUsageState usage_state) {
+ MutexLock lock(&lock_);
+ if (!listener_)
+ return;
+ listener_->OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource>(this),
+ usage_state);
+ }
+
+ // Resource implementation.
+ std::string Name() const override { return name_; }
+ void SetResourceListener(ResourceListener* listener) override {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(!listener_ || !listener);
+ listener_ = listener;
+ }
+
+ private:
+ const std::string name_;
+ Mutex lock_;
+ ResourceListener* listener_ RTC_GUARDED_BY(lock_) = nullptr;
+};
+
+BroadcastResourceListener::BroadcastResourceListener(
+ rtc::scoped_refptr<Resource> source_resource)
+ : source_resource_(source_resource), is_listening_(false) {
+ RTC_DCHECK(source_resource_);
+}
+
+BroadcastResourceListener::~BroadcastResourceListener() {
+ RTC_DCHECK(!is_listening_);
+}
+
+rtc::scoped_refptr<Resource> BroadcastResourceListener::SourceResource() const {
+ return source_resource_;
+}
+
+void BroadcastResourceListener::StartListening() {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(!is_listening_);
+ source_resource_->SetResourceListener(this);
+ is_listening_ = true;
+}
+
+void BroadcastResourceListener::StopListening() {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(is_listening_);
+ RTC_DCHECK(adapters_.empty());
+ source_resource_->SetResourceListener(nullptr);
+ is_listening_ = false;
+}
+
+rtc::scoped_refptr<Resource>
+BroadcastResourceListener::CreateAdapterResource() {
+ MutexLock lock(&lock_);
+ RTC_DCHECK(is_listening_);
+ rtc::scoped_refptr<AdapterResource> adapter =
+ rtc::make_ref_counted<AdapterResource>(source_resource_->Name() +
+ "Adapter");
+ adapters_.push_back(adapter);
+ return adapter;
+}
+
+void BroadcastResourceListener::RemoveAdapterResource(
+ rtc::scoped_refptr<Resource> resource) {
+ MutexLock lock(&lock_);
+ auto it = std::find(adapters_.begin(), adapters_.end(), resource);
+ RTC_DCHECK(it != adapters_.end());
+ adapters_.erase(it);
+}
+
+std::vector<rtc::scoped_refptr<Resource>>
+BroadcastResourceListener::GetAdapterResources() {
+ std::vector<rtc::scoped_refptr<Resource>> resources;
+ MutexLock lock(&lock_);
+ for (const auto& adapter : adapters_) {
+ resources.push_back(adapter);
+ }
+ return resources;
+}
+
+void BroadcastResourceListener::OnResourceUsageStateMeasured(
+ rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ RTC_DCHECK_EQ(resource, source_resource_);
+ MutexLock lock(&lock_);
+ for (const auto& adapter : adapters_) {
+ adapter->OnResourceUsageStateMeasured(usage_state);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/broadcast_resource_listener.h b/third_party/libwebrtc/call/adaptation/broadcast_resource_listener.h
new file mode 100644
index 0000000000..2c5a5c703b
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/broadcast_resource_listener.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_
+#define CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_
+
+#include <vector>
+
+#include "api/adaptation/resource.h"
+#include "api/scoped_refptr.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+// Responsible for forwarding 1 resource usage measurement to N listeners by
+// creating N "adapter" resources.
+//
+// Example:
+// If we have ResourceA, ResourceListenerX and ResourceListenerY we can create a
+// BroadcastResourceListener that listens to ResourceA, use CreateAdapter() to
+// spawn adapter resources ResourceX and ResourceY and let ResourceListenerX
+// listen to ResourceX and ResourceListenerY listen to ResourceY. When ResourceA
+// makes a measurement it will be echoed by both ResourceX and ResourceY.
+//
+// TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor is
+// moved to call there will only be one ResourceAdaptationProcessor that needs
+// to listen to the injected resources. When this is the case, delete this class
+// and DCHECK that a Resource's listener is never overwritten.
+class BroadcastResourceListener : public ResourceListener {
+ public:
+ explicit BroadcastResourceListener(
+ rtc::scoped_refptr<Resource> source_resource);
+ ~BroadcastResourceListener() override;
+
+ rtc::scoped_refptr<Resource> SourceResource() const;
+ void StartListening();
+ void StopListening();
+
+ // Creates a Resource that redirects any resource usage measurements that
+ // BroadcastResourceListener receives to its listener.
+ rtc::scoped_refptr<Resource> CreateAdapterResource();
+
+ // Unregister the adapter from the BroadcastResourceListener; it will no
+ // longer receive resource usage measurement and will no longer be referenced.
+ // Use this to prevent memory leaks of old adapters.
+ void RemoveAdapterResource(rtc::scoped_refptr<Resource> resource);
+ std::vector<rtc::scoped_refptr<Resource>> GetAdapterResources();
+
+ // ResourceListener implementation.
+ void OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) override;
+
+ private:
+ class AdapterResource;
+ friend class AdapterResource;
+
+ const rtc::scoped_refptr<Resource> source_resource_;
+ Mutex lock_;
+ bool is_listening_ RTC_GUARDED_BY(lock_);
+ // The AdapterResource unregisters itself prior to destruction, guaranteeing
+ // that these pointers are safe to use.
+ std::vector<rtc::scoped_refptr<AdapterResource>> adapters_
+ RTC_GUARDED_BY(lock_);
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_
diff --git a/third_party/libwebrtc/call/adaptation/broadcast_resource_listener_unittest.cc b/third_party/libwebrtc/call/adaptation/broadcast_resource_listener_unittest.cc
new file mode 100644
index 0000000000..9cd80500c2
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/broadcast_resource_listener_unittest.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/broadcast_resource_listener.h"
+
+#include "call/adaptation/test/fake_resource.h"
+#include "call/adaptation/test/mock_resource_listener.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::StrictMock;
+
+TEST(BroadcastResourceListenerTest, CreateAndRemoveAdapterResource) {
+ rtc::scoped_refptr<FakeResource> source_resource =
+ FakeResource::Create("SourceResource");
+ BroadcastResourceListener broadcast_resource_listener(source_resource);
+ broadcast_resource_listener.StartListening();
+
+ EXPECT_TRUE(broadcast_resource_listener.GetAdapterResources().empty());
+ rtc::scoped_refptr<Resource> adapter =
+ broadcast_resource_listener.CreateAdapterResource();
+ StrictMock<MockResourceListener> listener;
+ adapter->SetResourceListener(&listener);
+ EXPECT_EQ(std::vector<rtc::scoped_refptr<Resource>>{adapter},
+ broadcast_resource_listener.GetAdapterResources());
+
+ // The removed adapter is not referenced by the broadcaster.
+ broadcast_resource_listener.RemoveAdapterResource(adapter);
+ EXPECT_TRUE(broadcast_resource_listener.GetAdapterResources().empty());
+ // The removed adapter is not forwarding measurements.
+ EXPECT_CALL(listener, OnResourceUsageStateMeasured(_, _)).Times(0);
+ source_resource->SetUsageState(ResourceUsageState::kOveruse);
+ // Cleanup.
+ adapter->SetResourceListener(nullptr);
+ broadcast_resource_listener.StopListening();
+}
+
+TEST(BroadcastResourceListenerTest, AdapterNameIsBasedOnSourceResourceName) {
+ rtc::scoped_refptr<FakeResource> source_resource =
+ FakeResource::Create("FooBarResource");
+ BroadcastResourceListener broadcast_resource_listener(source_resource);
+ broadcast_resource_listener.StartListening();
+
+ rtc::scoped_refptr<Resource> adapter =
+ broadcast_resource_listener.CreateAdapterResource();
+ EXPECT_EQ("FooBarResourceAdapter", adapter->Name());
+
+ broadcast_resource_listener.RemoveAdapterResource(adapter);
+ broadcast_resource_listener.StopListening();
+}
+
+TEST(BroadcastResourceListenerTest, AdaptersForwardsUsageMeasurements) {
+ rtc::scoped_refptr<FakeResource> source_resource =
+ FakeResource::Create("SourceResource");
+ BroadcastResourceListener broadcast_resource_listener(source_resource);
+ broadcast_resource_listener.StartListening();
+
+ StrictMock<MockResourceListener> destination_listener1;
+ StrictMock<MockResourceListener> destination_listener2;
+ rtc::scoped_refptr<Resource> adapter1 =
+ broadcast_resource_listener.CreateAdapterResource();
+ adapter1->SetResourceListener(&destination_listener1);
+ rtc::scoped_refptr<Resource> adapter2 =
+ broadcast_resource_listener.CreateAdapterResource();
+ adapter2->SetResourceListener(&destination_listener2);
+
+ // Expect kOveruse to be echoed.
+ EXPECT_CALL(destination_listener1, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([adapter1](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(adapter1, resource);
+ EXPECT_EQ(ResourceUsageState::kOveruse, usage_state);
+ });
+ EXPECT_CALL(destination_listener2, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([adapter2](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(adapter2, resource);
+ EXPECT_EQ(ResourceUsageState::kOveruse, usage_state);
+ });
+ source_resource->SetUsageState(ResourceUsageState::kOveruse);
+
+ // Expect kUnderuse to be echoed.
+ EXPECT_CALL(destination_listener1, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([adapter1](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(adapter1, resource);
+ EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state);
+ });
+ EXPECT_CALL(destination_listener2, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([adapter2](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(adapter2, resource);
+ EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state);
+ });
+ source_resource->SetUsageState(ResourceUsageState::kUnderuse);
+
+ // Adapters have to be unregistered before they or the broadcaster is
+ // destroyed, ensuring safe use of raw pointers.
+ adapter1->SetResourceListener(nullptr);
+ adapter2->SetResourceListener(nullptr);
+
+ broadcast_resource_listener.RemoveAdapterResource(adapter1);
+ broadcast_resource_listener.RemoveAdapterResource(adapter2);
+ broadcast_resource_listener.StopListening();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/degradation_preference_provider.cc b/third_party/libwebrtc/call/adaptation/degradation_preference_provider.cc
new file mode 100644
index 0000000000..c87e49f366
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/degradation_preference_provider.cc
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/degradation_preference_provider.h"
+
+webrtc::DegradationPreferenceProvider::~DegradationPreferenceProvider() =
+ default;
diff --git a/third_party/libwebrtc/call/adaptation/degradation_preference_provider.h b/third_party/libwebrtc/call/adaptation/degradation_preference_provider.h
new file mode 100644
index 0000000000..1f75901cc5
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/degradation_preference_provider.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_
+#define CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_
+
+#include "api/rtp_parameters.h"
+
+namespace webrtc {
+
+class DegradationPreferenceProvider {
+ public:
+ virtual ~DegradationPreferenceProvider();
+
+ virtual DegradationPreference degradation_preference() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_
diff --git a/third_party/libwebrtc/call/adaptation/encoder_settings.cc b/third_party/libwebrtc/call/adaptation/encoder_settings.cc
new file mode 100644
index 0000000000..c894e833ed
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/encoder_settings.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/encoder_settings.h"
+
+#include <utility>
+
+namespace webrtc {
+
+EncoderSettings::EncoderSettings(VideoEncoder::EncoderInfo encoder_info,
+ VideoEncoderConfig encoder_config,
+ VideoCodec video_codec)
+ : encoder_info_(std::move(encoder_info)),
+ encoder_config_(std::move(encoder_config)),
+ video_codec_(std::move(video_codec)) {}
+
+EncoderSettings::EncoderSettings(const EncoderSettings& other)
+ : encoder_info_(other.encoder_info_),
+ encoder_config_(other.encoder_config_.Copy()),
+ video_codec_(other.video_codec_) {}
+
+EncoderSettings& EncoderSettings::operator=(const EncoderSettings& other) {
+ encoder_info_ = other.encoder_info_;
+ encoder_config_ = other.encoder_config_.Copy();
+ video_codec_ = other.video_codec_;
+ return *this;
+}
+
+const VideoEncoder::EncoderInfo& EncoderSettings::encoder_info() const {
+ return encoder_info_;
+}
+
+const VideoEncoderConfig& EncoderSettings::encoder_config() const {
+ return encoder_config_;
+}
+
+const VideoCodec& EncoderSettings::video_codec() const {
+ return video_codec_;
+}
+
+VideoCodecType GetVideoCodecTypeOrGeneric(
+ const absl::optional<EncoderSettings>& settings) {
+ return settings.has_value() ? settings->encoder_config().codec_type
+ : kVideoCodecGeneric;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/encoder_settings.h b/third_party/libwebrtc/call/adaptation/encoder_settings.h
new file mode 100644
index 0000000000..30ce0a05bc
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/encoder_settings.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_ENCODER_SETTINGS_H_
+#define CALL_ADAPTATION_ENCODER_SETTINGS_H_
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+
+// Information about an encoder available when reconfiguring the encoder.
+class EncoderSettings {
+ public:
+ EncoderSettings(VideoEncoder::EncoderInfo encoder_info,
+ VideoEncoderConfig encoder_config,
+ VideoCodec video_codec);
+ EncoderSettings(const EncoderSettings& other);
+ EncoderSettings& operator=(const EncoderSettings& other);
+
+ // Encoder capabilities, implementation info, etc.
+ const VideoEncoder::EncoderInfo& encoder_info() const;
+ // Configuration parameters, ultimately coming from the API and negotiation.
+ const VideoEncoderConfig& encoder_config() const;
+ // Lower level config, heavily based on the VideoEncoderConfig.
+ const VideoCodec& video_codec() const;
+
+ private:
+ VideoEncoder::EncoderInfo encoder_info_;
+ VideoEncoderConfig encoder_config_;
+ VideoCodec video_codec_;
+};
+
+VideoCodecType GetVideoCodecTypeOrGeneric(
+ const absl::optional<EncoderSettings>& settings);
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_ENCODER_SETTINGS_H_
diff --git a/third_party/libwebrtc/call/adaptation/resource_adaptation_gn/moz.build b/third_party/libwebrtc/call/adaptation/resource_adaptation_gn/moz.build
new file mode 100644
index 0000000000..d8893a7341
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/resource_adaptation_gn/moz.build
@@ -0,0 +1,246 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/adaptation/adaptation_constraint.cc",
+ "/third_party/libwebrtc/call/adaptation/broadcast_resource_listener.cc",
+ "/third_party/libwebrtc/call/adaptation/degradation_preference_provider.cc",
+ "/third_party/libwebrtc/call/adaptation/encoder_settings.cc",
+ "/third_party/libwebrtc/call/adaptation/resource_adaptation_processor.cc",
+ "/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.cc",
+ "/third_party/libwebrtc/call/adaptation/video_source_restrictions.cc",
+ "/third_party/libwebrtc/call/adaptation/video_stream_adapter.cc",
+ "/third_party/libwebrtc/call/adaptation/video_stream_input_state.cc",
+ "/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("resource_adaptation_gn")
diff --git a/third_party/libwebrtc/call/adaptation/resource_adaptation_processor.cc b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor.cc
new file mode 100644
index 0000000000..f4d1bf3538
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor.cc
@@ -0,0 +1,378 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/resource_adaptation_processor.h"
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/string_view.h"
+#include "api/sequence_checker.h"
+#include "api/video/video_adaptation_counters.h"
+#include "call/adaptation/video_stream_adapter.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+ResourceAdaptationProcessor::ResourceListenerDelegate::ResourceListenerDelegate(
+ ResourceAdaptationProcessor* processor)
+ : task_queue_(TaskQueueBase::Current()), processor_(processor) {
+ RTC_DCHECK(task_queue_);
+}
+
+void ResourceAdaptationProcessor::ResourceListenerDelegate::
+ OnProcessorDestroyed() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ processor_ = nullptr;
+}
+
+void ResourceAdaptationProcessor::ResourceListenerDelegate::
+ OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ if (!task_queue_->IsCurrent()) {
+ task_queue_->PostTask(
+ [this_ref = rtc::scoped_refptr<ResourceListenerDelegate>(this),
+ resource, usage_state] {
+ this_ref->OnResourceUsageStateMeasured(resource, usage_state);
+ });
+ return;
+ }
+ RTC_DCHECK_RUN_ON(task_queue_);
+ if (processor_) {
+ processor_->OnResourceUsageStateMeasured(resource, usage_state);
+ }
+}
+
+ResourceAdaptationProcessor::MitigationResultAndLogMessage::
+ MitigationResultAndLogMessage()
+ : result(MitigationResult::kAdaptationApplied), message() {}
+
+ResourceAdaptationProcessor::MitigationResultAndLogMessage::
+ MitigationResultAndLogMessage(MitigationResult result,
+ absl::string_view message)
+ : result(result), message(message) {}
+
+ResourceAdaptationProcessor::ResourceAdaptationProcessor(
+ VideoStreamAdapter* stream_adapter)
+ : task_queue_(TaskQueueBase::Current()),
+ resource_listener_delegate_(
+ rtc::make_ref_counted<ResourceListenerDelegate>(this)),
+ resources_(),
+ stream_adapter_(stream_adapter),
+ last_reported_source_restrictions_(),
+ previous_mitigation_results_() {
+ RTC_DCHECK(task_queue_);
+ stream_adapter_->AddRestrictionsListener(this);
+}
+
+ResourceAdaptationProcessor::~ResourceAdaptationProcessor() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ RTC_DCHECK(resources_.empty())
+ << "There are resource(s) attached to a ResourceAdaptationProcessor "
+ << "being destroyed.";
+ stream_adapter_->RemoveRestrictionsListener(this);
+ resource_listener_delegate_->OnProcessorDestroyed();
+}
+
+void ResourceAdaptationProcessor::AddResourceLimitationsListener(
+ ResourceLimitationsListener* limitations_listener) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ RTC_DCHECK(std::find(resource_limitations_listeners_.begin(),
+ resource_limitations_listeners_.end(),
+ limitations_listener) ==
+ resource_limitations_listeners_.end());
+ resource_limitations_listeners_.push_back(limitations_listener);
+}
+
+void ResourceAdaptationProcessor::RemoveResourceLimitationsListener(
+ ResourceLimitationsListener* limitations_listener) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ auto it =
+ std::find(resource_limitations_listeners_.begin(),
+ resource_limitations_listeners_.end(), limitations_listener);
+ RTC_DCHECK(it != resource_limitations_listeners_.end());
+ resource_limitations_listeners_.erase(it);
+}
+
+void ResourceAdaptationProcessor::AddResource(
+ rtc::scoped_refptr<Resource> resource) {
+ RTC_DCHECK(resource);
+ {
+ MutexLock crit(&resources_lock_);
+ RTC_DCHECK(absl::c_find(resources_, resource) == resources_.end())
+ << "Resource \"" << resource->Name() << "\" was already registered.";
+ resources_.push_back(resource);
+ }
+ resource->SetResourceListener(resource_listener_delegate_.get());
+ RTC_LOG(LS_INFO) << "Registered resource \"" << resource->Name() << "\".";
+}
+
+std::vector<rtc::scoped_refptr<Resource>>
+ResourceAdaptationProcessor::GetResources() const {
+ MutexLock crit(&resources_lock_);
+ return resources_;
+}
+
+void ResourceAdaptationProcessor::RemoveResource(
+ rtc::scoped_refptr<Resource> resource) {
+ RTC_DCHECK(resource);
+ RTC_LOG(LS_INFO) << "Removing resource \"" << resource->Name() << "\".";
+ resource->SetResourceListener(nullptr);
+ {
+ MutexLock crit(&resources_lock_);
+ auto it = absl::c_find(resources_, resource);
+ RTC_DCHECK(it != resources_.end()) << "Resource \"" << resource->Name()
+ << "\" was not a registered resource.";
+ resources_.erase(it);
+ }
+ RemoveLimitationsImposedByResource(std::move(resource));
+}
+
+void ResourceAdaptationProcessor::RemoveLimitationsImposedByResource(
+ rtc::scoped_refptr<Resource> resource) {
+ if (!task_queue_->IsCurrent()) {
+ task_queue_->PostTask(
+ [this, resource]() { RemoveLimitationsImposedByResource(resource); });
+ return;
+ }
+ RTC_DCHECK_RUN_ON(task_queue_);
+ auto resource_adaptation_limits =
+ adaptation_limits_by_resources_.find(resource);
+ if (resource_adaptation_limits != adaptation_limits_by_resources_.end()) {
+ VideoStreamAdapter::RestrictionsWithCounters adaptation_limits =
+ resource_adaptation_limits->second;
+ adaptation_limits_by_resources_.erase(resource_adaptation_limits);
+ if (adaptation_limits_by_resources_.empty()) {
+ // Only the resource being removed was adapted so clear restrictions.
+ stream_adapter_->ClearRestrictions();
+ return;
+ }
+
+ VideoStreamAdapter::RestrictionsWithCounters most_limited =
+ FindMostLimitedResources().second;
+
+ if (adaptation_limits.counters.Total() <= most_limited.counters.Total()) {
+ // The removed limitations were less limited than the most limited
+ // resource. Don't change the current restrictions.
+ return;
+ }
+
+ // Apply the new most limited resource as the next restrictions.
+ Adaptation adapt_to = stream_adapter_->GetAdaptationTo(
+ most_limited.counters, most_limited.restrictions);
+ RTC_DCHECK_EQ(adapt_to.status(), Adaptation::Status::kValid);
+ stream_adapter_->ApplyAdaptation(adapt_to, nullptr);
+
+ RTC_LOG(LS_INFO)
+ << "Most limited resource removed. Restoring restrictions to "
+ "next most limited restrictions: "
+ << most_limited.restrictions.ToString() << " with counters "
+ << most_limited.counters.ToString();
+ }
+}
+
+void ResourceAdaptationProcessor::OnResourceUsageStateMeasured(
+ rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ RTC_DCHECK(resource);
+ // `resource` could have been removed after signalling.
+ {
+ MutexLock crit(&resources_lock_);
+ if (absl::c_find(resources_, resource) == resources_.end()) {
+ RTC_LOG(LS_INFO) << "Ignoring signal from removed resource \""
+ << resource->Name() << "\".";
+ return;
+ }
+ }
+ MitigationResultAndLogMessage result_and_message;
+ switch (usage_state) {
+ case ResourceUsageState::kOveruse:
+ result_and_message = OnResourceOveruse(resource);
+ break;
+ case ResourceUsageState::kUnderuse:
+ result_and_message = OnResourceUnderuse(resource);
+ break;
+ }
+ // Maybe log the result of the operation.
+ auto it = previous_mitigation_results_.find(resource.get());
+ if (it != previous_mitigation_results_.end() &&
+ it->second == result_and_message.result) {
+ // This resource has previously reported the same result and we haven't
+ // successfully adapted since - don't log to avoid spam.
+ return;
+ }
+ RTC_LOG(LS_INFO) << "Resource \"" << resource->Name() << "\" signalled "
+ << ResourceUsageStateToString(usage_state) << ". "
+ << result_and_message.message;
+ if (result_and_message.result == MitigationResult::kAdaptationApplied) {
+ previous_mitigation_results_.clear();
+ } else {
+ previous_mitigation_results_.insert(
+ std::make_pair(resource.get(), result_and_message.result));
+ }
+}
+
+ResourceAdaptationProcessor::MitigationResultAndLogMessage
+ResourceAdaptationProcessor::OnResourceUnderuse(
+ rtc::scoped_refptr<Resource> reason_resource) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ // How can this stream be adapted up?
+ Adaptation adaptation = stream_adapter_->GetAdaptationUp();
+ if (adaptation.status() != Adaptation::Status::kValid) {
+ rtc::StringBuilder message;
+ message << "Not adapting up because VideoStreamAdapter returned "
+ << Adaptation::StatusToString(adaptation.status());
+ return MitigationResultAndLogMessage(MitigationResult::kRejectedByAdapter,
+ message.Release());
+ }
+ // Check that resource is most limited.
+ std::vector<rtc::scoped_refptr<Resource>> most_limited_resources;
+ VideoStreamAdapter::RestrictionsWithCounters most_limited_restrictions;
+ std::tie(most_limited_resources, most_limited_restrictions) =
+ FindMostLimitedResources();
+
+ // If the most restricted resource is less limited than current restrictions
+ // then proceed with adapting up.
+ if (!most_limited_resources.empty() &&
+ most_limited_restrictions.counters.Total() >=
+ stream_adapter_->adaptation_counters().Total()) {
+ // If `reason_resource` is not one of the most limiting resources then abort
+ // adaptation.
+ if (absl::c_find(most_limited_resources, reason_resource) ==
+ most_limited_resources.end()) {
+ rtc::StringBuilder message;
+ message << "Resource \"" << reason_resource->Name()
+ << "\" was not the most limited resource.";
+ return MitigationResultAndLogMessage(
+ MitigationResult::kNotMostLimitedResource, message.Release());
+ }
+
+ if (most_limited_resources.size() > 1) {
+ // If there are multiple most limited resources, all must signal underuse
+ // before the adaptation is applied.
+ UpdateResourceLimitations(reason_resource, adaptation.restrictions(),
+ adaptation.counters());
+ rtc::StringBuilder message;
+ message << "Resource \"" << reason_resource->Name()
+ << "\" was not the only most limited resource.";
+ return MitigationResultAndLogMessage(
+ MitigationResult::kSharedMostLimitedResource, message.Release());
+ }
+ }
+ // Apply adaptation.
+ stream_adapter_->ApplyAdaptation(adaptation, reason_resource);
+ rtc::StringBuilder message;
+ message << "Adapted up successfully. Unfiltered adaptations: "
+ << stream_adapter_->adaptation_counters().ToString();
+ return MitigationResultAndLogMessage(MitigationResult::kAdaptationApplied,
+ message.Release());
+}
+
+ResourceAdaptationProcessor::MitigationResultAndLogMessage
+ResourceAdaptationProcessor::OnResourceOveruse(
+ rtc::scoped_refptr<Resource> reason_resource) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ // How can this stream be adapted up?
+ Adaptation adaptation = stream_adapter_->GetAdaptationDown();
+ if (adaptation.status() == Adaptation::Status::kLimitReached) {
+ // Add resource as most limited.
+ VideoStreamAdapter::RestrictionsWithCounters restrictions;
+ std::tie(std::ignore, restrictions) = FindMostLimitedResources();
+ UpdateResourceLimitations(reason_resource, restrictions.restrictions,
+ restrictions.counters);
+ }
+ if (adaptation.status() != Adaptation::Status::kValid) {
+ rtc::StringBuilder message;
+ message << "Not adapting down because VideoStreamAdapter returned "
+ << Adaptation::StatusToString(adaptation.status());
+ return MitigationResultAndLogMessage(MitigationResult::kRejectedByAdapter,
+ message.Release());
+ }
+ // Apply adaptation.
+ UpdateResourceLimitations(reason_resource, adaptation.restrictions(),
+ adaptation.counters());
+ stream_adapter_->ApplyAdaptation(adaptation, reason_resource);
+ rtc::StringBuilder message;
+ message << "Adapted down successfully. Unfiltered adaptations: "
+ << stream_adapter_->adaptation_counters().ToString();
+ return MitigationResultAndLogMessage(MitigationResult::kAdaptationApplied,
+ message.Release());
+}
+
+std::pair<std::vector<rtc::scoped_refptr<Resource>>,
+ VideoStreamAdapter::RestrictionsWithCounters>
+ResourceAdaptationProcessor::FindMostLimitedResources() const {
+ std::vector<rtc::scoped_refptr<Resource>> most_limited_resources;
+ VideoStreamAdapter::RestrictionsWithCounters most_limited_restrictions{
+ VideoSourceRestrictions(), VideoAdaptationCounters()};
+
+ for (const auto& resource_and_adaptation_limit_ :
+ adaptation_limits_by_resources_) {
+ const auto& restrictions_with_counters =
+ resource_and_adaptation_limit_.second;
+ if (restrictions_with_counters.counters.Total() >
+ most_limited_restrictions.counters.Total()) {
+ most_limited_restrictions = restrictions_with_counters;
+ most_limited_resources.clear();
+ most_limited_resources.push_back(resource_and_adaptation_limit_.first);
+ } else if (most_limited_restrictions.counters ==
+ restrictions_with_counters.counters) {
+ most_limited_resources.push_back(resource_and_adaptation_limit_.first);
+ }
+ }
+ return std::make_pair(std::move(most_limited_resources),
+ most_limited_restrictions);
+}
+
+void ResourceAdaptationProcessor::UpdateResourceLimitations(
+ rtc::scoped_refptr<Resource> reason_resource,
+ const VideoSourceRestrictions& restrictions,
+ const VideoAdaptationCounters& counters) {
+ auto& adaptation_limits = adaptation_limits_by_resources_[reason_resource];
+ if (adaptation_limits.restrictions == restrictions &&
+ adaptation_limits.counters == counters) {
+ return;
+ }
+ adaptation_limits = {restrictions, counters};
+
+ std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters> limitations;
+ for (const auto& p : adaptation_limits_by_resources_) {
+ limitations.insert(std::make_pair(p.first, p.second.counters));
+ }
+ for (auto limitations_listener : resource_limitations_listeners_) {
+ limitations_listener->OnResourceLimitationChanged(reason_resource,
+ limitations);
+ }
+}
+
+void ResourceAdaptationProcessor::OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ if (reason) {
+ UpdateResourceLimitations(reason, unfiltered_restrictions,
+ adaptation_counters);
+ } else if (adaptation_counters.Total() == 0) {
+ // Adaptations are cleared.
+ adaptation_limits_by_resources_.clear();
+ previous_mitigation_results_.clear();
+ for (auto limitations_listener : resource_limitations_listeners_) {
+ limitations_listener->OnResourceLimitationChanged(nullptr, {});
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/resource_adaptation_processor.h b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor.h
new file mode 100644
index 0000000000..db3b4c2506
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
+#define CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/rtp_parameters.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_adaptation_counters.h"
+#include "api/video/video_frame.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_adapter.h"
+#include "call/adaptation/video_stream_input_state.h"
+#include "call/adaptation/video_stream_input_state_provider.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+// The Resource Adaptation Processor is responsible for reacting to resource
+// usage measurements (e.g. overusing or underusing CPU). When a resource is
+// overused the Processor is responsible for performing mitigations in order to
+// consume less resources.
+//
+// Today we have one Processor per VideoStreamEncoder and the Processor is only
+// capable of restricting resolution or frame rate of the encoded stream. In the
+// future we should have a single Processor responsible for all encoded streams,
+// and it should be capable of reconfiguring other things than just
+// VideoSourceRestrictions (e.g. reduce render frame rate).
+// See Resource-Adaptation hotlist:
+// https://bugs.chromium.org/u/590058293/hotlists/Resource-Adaptation
+//
+// The ResourceAdaptationProcessor is single-threaded. It may be constructed on
+// any thread but MUST subsequently be used and destroyed on a single sequence,
+// i.e. the "resource adaptation task queue". Resources can be added and removed
+// from any thread.
+class ResourceAdaptationProcessor : public ResourceAdaptationProcessorInterface,
+ public VideoSourceRestrictionsListener,
+ public ResourceListener {
+ public:
+ explicit ResourceAdaptationProcessor(
+ VideoStreamAdapter* video_stream_adapter);
+ ~ResourceAdaptationProcessor() override;
+
+ // ResourceAdaptationProcessorInterface implementation.
+ void AddResourceLimitationsListener(
+ ResourceLimitationsListener* limitations_listener) override;
+ void RemoveResourceLimitationsListener(
+ ResourceLimitationsListener* limitations_listener) override;
+ void AddResource(rtc::scoped_refptr<Resource> resource) override;
+ std::vector<rtc::scoped_refptr<Resource>> GetResources() const override;
+ void RemoveResource(rtc::scoped_refptr<Resource> resource) override;
+
+ // ResourceListener implementation.
+ // Triggers OnResourceUnderuse() or OnResourceOveruse().
+ void OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) override;
+
+ // VideoSourceRestrictionsListener implementation.
+ void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) override;
+
+ private:
+ // If resource usage measurements happens off the adaptation task queue, this
+ // class takes care of posting the measurement for the processor to handle it
+ // on the adaptation task queue.
+ class ResourceListenerDelegate : public rtc::RefCountInterface,
+ public ResourceListener {
+ public:
+ explicit ResourceListenerDelegate(ResourceAdaptationProcessor* processor);
+
+ void OnProcessorDestroyed();
+
+ // ResourceListener implementation.
+ void OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) override;
+
+ private:
+ TaskQueueBase* task_queue_;
+ ResourceAdaptationProcessor* processor_ RTC_GUARDED_BY(task_queue_);
+ };
+
+ enum class MitigationResult {
+ kNotMostLimitedResource,
+ kSharedMostLimitedResource,
+ kRejectedByAdapter,
+ kAdaptationApplied,
+ };
+
+ struct MitigationResultAndLogMessage {
+ MitigationResultAndLogMessage();
+ MitigationResultAndLogMessage(MitigationResult result,
+ absl::string_view message);
+ MitigationResult result;
+ std::string message;
+ };
+
+ // Performs the adaptation by getting the next target, applying it and
+ // informing listeners of the new VideoSourceRestriction and adaptation
+ // counters.
+ MitigationResultAndLogMessage OnResourceUnderuse(
+ rtc::scoped_refptr<Resource> reason_resource);
+ MitigationResultAndLogMessage OnResourceOveruse(
+ rtc::scoped_refptr<Resource> reason_resource);
+
+ void UpdateResourceLimitations(rtc::scoped_refptr<Resource> reason_resource,
+ const VideoSourceRestrictions& restrictions,
+ const VideoAdaptationCounters& counters)
+ RTC_RUN_ON(task_queue_);
+
+ // Searches `adaptation_limits_by_resources_` for each resource with the
+ // highest total adaptation counts. Adaptation up may only occur if the
+ // resource performing the adaptation is the only most limited resource. This
+ // function returns the list of all most limited resources as well as the
+ // corresponding adaptation of that resource.
+ std::pair<std::vector<rtc::scoped_refptr<Resource>>,
+ VideoStreamAdapter::RestrictionsWithCounters>
+ FindMostLimitedResources() const RTC_RUN_ON(task_queue_);
+
+ void RemoveLimitationsImposedByResource(
+ rtc::scoped_refptr<Resource> resource);
+
+ TaskQueueBase* task_queue_;
+ rtc::scoped_refptr<ResourceListenerDelegate> resource_listener_delegate_;
+ // Input and output.
+ mutable Mutex resources_lock_;
+ std::vector<rtc::scoped_refptr<Resource>> resources_
+ RTC_GUARDED_BY(resources_lock_);
+ std::vector<ResourceLimitationsListener*> resource_limitations_listeners_
+ RTC_GUARDED_BY(task_queue_);
+ // Purely used for statistics, does not ensure mapped resources stay alive.
+ std::map<rtc::scoped_refptr<Resource>,
+ VideoStreamAdapter::RestrictionsWithCounters>
+ adaptation_limits_by_resources_ RTC_GUARDED_BY(task_queue_);
+ // Responsible for generating and applying possible adaptations.
+ VideoStreamAdapter* const stream_adapter_ RTC_GUARDED_BY(task_queue_);
+ VideoSourceRestrictions last_reported_source_restrictions_
+ RTC_GUARDED_BY(task_queue_);
+ // Keeps track of previous mitigation results per resource since the last
+ // successful adaptation. Used to avoid RTC_LOG spam.
+ std::map<Resource*, MitigationResult> previous_mitigation_results_
+ RTC_GUARDED_BY(task_queue_);
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
diff --git a/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.cc b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.cc
new file mode 100644
index 0000000000..79f099b267
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.cc
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+
+namespace webrtc {
+
+ResourceAdaptationProcessorInterface::~ResourceAdaptationProcessorInterface() =
+ default;
+
+ResourceLimitationsListener::~ResourceLimitationsListener() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.h b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.h
new file mode 100644
index 0000000000..4729488150
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_interface.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_
+#define CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_
+
+#include <map>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/rtp_parameters.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_adaptation_counters.h"
+#include "api/video/video_frame.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/video_source_restrictions.h"
+
+namespace webrtc {
+
+class ResourceLimitationsListener {
+ public:
+ virtual ~ResourceLimitationsListener();
+
+ // The limitations on a resource were changed. This does not mean the current
+ // video restrictions have changed.
+ virtual void OnResourceLimitationChanged(
+ rtc::scoped_refptr<Resource> resource,
+ const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
+ resource_limitations) = 0;
+};
+
+// The Resource Adaptation Processor is responsible for reacting to resource
+// usage measurements (e.g. overusing or underusing CPU). When a resource is
+// overused the Processor is responsible for performing mitigations in order to
+// consume less resources.
+class ResourceAdaptationProcessorInterface {
+ public:
+ virtual ~ResourceAdaptationProcessorInterface();
+
+ virtual void AddResourceLimitationsListener(
+ ResourceLimitationsListener* limitations_listener) = 0;
+ virtual void RemoveResourceLimitationsListener(
+ ResourceLimitationsListener* limitations_listener) = 0;
+ // Starts or stops listening to resources, effectively enabling or disabling
+ // processing. May be called from anywhere.
+ // TODO(https://crbug.com/webrtc/11172): Automatically register and unregister
+ // with AddResource() and RemoveResource() instead. When the processor is
+ // multi-stream aware, stream-specific resouces will get added and removed
+ // over time.
+ virtual void AddResource(rtc::scoped_refptr<Resource> resource) = 0;
+ virtual std::vector<rtc::scoped_refptr<Resource>> GetResources() const = 0;
+ virtual void RemoveResource(rtc::scoped_refptr<Resource> resource) = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_
diff --git a/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_unittest.cc b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_unittest.cc
new file mode 100644
index 0000000000..ccccd3fe04
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/resource_adaptation_processor_unittest.cc
@@ -0,0 +1,740 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/resource_adaptation_processor.h"
+
+#include "api/adaptation/resource.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_adaptation_counters.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "call/adaptation/test/fake_frame_rate_provider.h"
+#include "call/adaptation/test/fake_resource.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state_provider.h"
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+
+namespace {
+
+const int kDefaultFrameRate = 30;
+const int kDefaultFrameSize = 1280 * 720;
+constexpr TimeDelta kDefaultTimeout = TimeDelta::Seconds(5);
+
+class VideoSourceRestrictionsListenerForTesting
+ : public VideoSourceRestrictionsListener {
+ public:
+ VideoSourceRestrictionsListenerForTesting()
+ : restrictions_updated_count_(0),
+ restrictions_(),
+ adaptation_counters_(),
+ reason_(nullptr) {}
+ ~VideoSourceRestrictionsListenerForTesting() override {}
+
+ size_t restrictions_updated_count() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return restrictions_updated_count_;
+ }
+ VideoSourceRestrictions restrictions() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return restrictions_;
+ }
+ VideoAdaptationCounters adaptation_counters() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return adaptation_counters_;
+ }
+ rtc::scoped_refptr<Resource> reason() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return reason_;
+ }
+
+ // VideoSourceRestrictionsListener implementation.
+ void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) override {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ ++restrictions_updated_count_;
+ restrictions_ = restrictions;
+ adaptation_counters_ = adaptation_counters;
+ reason_ = reason;
+ }
+
+ private:
+ SequenceChecker sequence_checker_;
+ size_t restrictions_updated_count_ RTC_GUARDED_BY(&sequence_checker_);
+ VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&sequence_checker_);
+ VideoAdaptationCounters adaptation_counters_
+ RTC_GUARDED_BY(&sequence_checker_);
+ rtc::scoped_refptr<Resource> reason_ RTC_GUARDED_BY(&sequence_checker_);
+};
+
+class ResourceAdaptationProcessorTest : public ::testing::Test {
+ public:
+ ResourceAdaptationProcessorTest()
+ : frame_rate_provider_(),
+ input_state_provider_(&frame_rate_provider_),
+ resource_(FakeResource::Create("FakeResource")),
+ other_resource_(FakeResource::Create("OtherFakeResource")),
+ video_stream_adapter_(
+ std::make_unique<VideoStreamAdapter>(&input_state_provider_,
+ &frame_rate_provider_,
+ field_trials_)),
+ processor_(std::make_unique<ResourceAdaptationProcessor>(
+ video_stream_adapter_.get())) {
+ video_stream_adapter_->AddRestrictionsListener(&restrictions_listener_);
+ processor_->AddResource(resource_);
+ processor_->AddResource(other_resource_);
+ }
+ ~ResourceAdaptationProcessorTest() override {
+ if (processor_) {
+ DestroyProcessor();
+ }
+ }
+
+ void SetInputStates(bool has_input, int fps, int frame_size) {
+ input_state_provider_.OnHasInputChanged(has_input);
+ frame_rate_provider_.set_fps(fps);
+ input_state_provider_.OnFrameSizeObserved(frame_size);
+ }
+
+ void RestrictSource(VideoSourceRestrictions restrictions) {
+ SetInputStates(
+ true, restrictions.max_frame_rate().value_or(kDefaultFrameRate),
+ restrictions.target_pixels_per_frame().has_value()
+ ? restrictions.target_pixels_per_frame().value()
+ : restrictions.max_pixels_per_frame().value_or(kDefaultFrameSize));
+ }
+
+ void DestroyProcessor() {
+ if (resource_) {
+ processor_->RemoveResource(resource_);
+ }
+ if (other_resource_) {
+ processor_->RemoveResource(other_resource_);
+ }
+ video_stream_adapter_->RemoveRestrictionsListener(&restrictions_listener_);
+ processor_.reset();
+ }
+
+ static void WaitUntilTaskQueueIdle() {
+ ASSERT_TRUE(rtc::Thread::Current()->ProcessMessages(0));
+ }
+
+ protected:
+ rtc::AutoThread main_thread_;
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ FakeFrameRateProvider frame_rate_provider_;
+ VideoStreamInputStateProvider input_state_provider_;
+ rtc::scoped_refptr<FakeResource> resource_;
+ rtc::scoped_refptr<FakeResource> other_resource_;
+ std::unique_ptr<VideoStreamAdapter> video_stream_adapter_;
+ std::unique_ptr<ResourceAdaptationProcessor> processor_;
+ VideoSourceRestrictionsListenerForTesting restrictions_listener_;
+};
+
+} // namespace
+
+TEST_F(ResourceAdaptationProcessorTest, DisabledByDefault) {
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ // Adaptation does not happen when disabled.
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count());
+}
+
+TEST_F(ResourceAdaptationProcessorTest, InsufficientInput) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ // Adaptation does not happen if input is insufficient.
+ // When frame size is missing (OnFrameSizeObserved not called yet).
+ input_state_provider_.OnHasInputChanged(true);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count());
+ // When "has input" is missing.
+ SetInputStates(false, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count());
+ // Note: frame rate cannot be missing, if unset it is 0.
+}
+
+// These tests verify that restrictions are applied, but not exactly how much
+// the source is restricted. This ensures that the VideoStreamAdapter is wired
+// up correctly but not exactly how the VideoStreamAdapter generates
+// restrictions. For that, see video_stream_adapter_unittest.cc.
+TEST_F(ResourceAdaptationProcessorTest,
+ OveruseTriggersRestrictingResolutionInMaintainFrameRate) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+ EXPECT_TRUE(
+ restrictions_listener_.restrictions().max_pixels_per_frame().has_value());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ OveruseTriggersRestrictingFrameRateInMaintainResolution) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_RESOLUTION);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+ EXPECT_TRUE(
+ restrictions_listener_.restrictions().max_frame_rate().has_value());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ OveruseTriggersRestrictingFrameRateAndResolutionInBalanced) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::BALANCED);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ // Adapting multiple times eventually resticts both frame rate and
+ // resolution. Exactly many times we need to adapt depends on
+ // BalancedDegradationSettings, VideoStreamAdapter and default input
+ // states. This test requires it to be achieved within 4 adaptations.
+ for (size_t i = 0; i < 4; ++i) {
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(i + 1, restrictions_listener_.restrictions_updated_count());
+ RestrictSource(restrictions_listener_.restrictions());
+ }
+ EXPECT_TRUE(
+ restrictions_listener_.restrictions().max_pixels_per_frame().has_value());
+ EXPECT_TRUE(
+ restrictions_listener_.restrictions().max_frame_rate().has_value());
+}
+
+TEST_F(ResourceAdaptationProcessorTest, AwaitingPreviousAdaptation) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+ // If we don't restrict the source then adaptation will not happen again
+ // due to "awaiting previous adaptation". This prevents "double-adapt".
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+}
+
+TEST_F(ResourceAdaptationProcessorTest, CannotAdaptUpWhenUnrestricted) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count());
+}
+
+TEST_F(ResourceAdaptationProcessorTest, UnderuseTakesUsBackToUnrestricted) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(2u, restrictions_listener_.restrictions_updated_count());
+ EXPECT_EQ(VideoSourceRestrictions(), restrictions_listener_.restrictions());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ ResourcesCanNotAdaptUpIfNeverAdaptedDown) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // Other resource signals under-use
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ ResourcesCanNotAdaptUpIfNotAdaptedDownAfterReset) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count());
+
+ video_stream_adapter_->ClearRestrictions();
+ EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total());
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // resource_ did not overuse after we reset the restrictions, so adapt
+ // up should be disallowed.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+}
+
+TEST_F(ResourceAdaptationProcessorTest, OnlyMostLimitedResourceMayAdaptUp) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // `other_resource_` is most limited, resource_ can't adapt up.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // `resource_` and `other_resource_` are now most limited, so both must
+ // signal underuse to adapt up.
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ MultipleResourcesCanTriggerMultipleAdaptations) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // resource_ is not most limited so can't adapt from underuse.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ // resource_ is still not most limited so can't adapt from underuse.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // However it will be after overuse
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // Now other_resource_ can't adapt up as it is not most restricted.
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ // resource_ is limited at 3 adaptations and other_resource_ 2.
+ // With the most limited resource signalling underuse in the following
+ // order we get back to unrestricted video.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ // Both resource_ and other_resource_ are most limited.
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ // Again both are most limited.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ MostLimitedResourceAdaptationWorksAfterChangingDegradataionPreference) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ // Adapt down until we can't anymore.
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ int last_total = restrictions_listener_.adaptation_counters().Total();
+
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_RESOLUTION);
+ // resource_ can not adapt up since we have never reduced FPS.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(last_total, restrictions_listener_.adaptation_counters().Total());
+
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(last_total + 1,
+ restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+ // other_resource_ is most limited so should be able to adapt up.
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(last_total, restrictions_listener_.adaptation_counters().Total());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ AdaptsDownWhenOtherResourceIsAlwaysUnderused) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ // Does not trigger adapataion because there's no restriction.
+ EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total());
+
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ // Adapts down even if other resource asked for adapting up.
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+
+ RestrictSource(restrictions_listener_.restrictions());
+ other_resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ // Doesn't adapt up because adaptation is due to another resource.
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ TriggerOveruseNotOnAdaptationTaskQueue) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ TaskQueueForTest resource_task_queue("ResourceTaskQueue");
+ resource_task_queue.PostTask(
+ [&]() { resource_->SetUsageState(ResourceUsageState::kOveruse); });
+
+ EXPECT_EQ_WAIT(1u, restrictions_listener_.restrictions_updated_count(),
+ kDefaultTimeout.ms());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ DestroyProcessorWhileResourceListenerDelegateHasTaskInFlight) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ // Wait for `resource_` to signal oversue first so we know that the delegate
+ // has passed it on to the processor's task queue.
+ rtc::Event resource_event;
+ TaskQueueForTest resource_task_queue("ResourceTaskQueue");
+ resource_task_queue.PostTask([&]() {
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ resource_event.Set();
+ });
+
+ EXPECT_TRUE(resource_event.Wait(kDefaultTimeout));
+ // Now destroy the processor while handling the overuse is in flight.
+ DestroyProcessor();
+
+ // Because the processor was destroyed by the time the delegate's task ran,
+ // the overuse signal must not have been handled.
+ EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count());
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ ResourceOveruseIgnoredWhenSignalledDuringRemoval) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ rtc::Event overuse_event;
+ TaskQueueForTest resource_task_queue("ResourceTaskQueue");
+ // Queues task for `resource_` overuse while `processor_` is still listening.
+ resource_task_queue.PostTask([&]() {
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ overuse_event.Set();
+ });
+ EXPECT_TRUE(overuse_event.Wait(kDefaultTimeout));
+ // Once we know the overuse task is queued, remove `resource_` so that
+ // `processor_` is not listening to it.
+ processor_->RemoveResource(resource_);
+
+ // Runs the queued task so `processor_` gets signalled kOveruse from
+ // `resource_` even though `processor_` was not listening.
+ WaitUntilTaskQueueIdle();
+
+ // No restrictions should change even though `resource_` signaled `kOveruse`.
+ EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovingOnlyAdaptedResourceResetsAdaptation) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ RestrictSource(restrictions_listener_.restrictions());
+
+ processor_->RemoveResource(resource_);
+ EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovingMostLimitedResourceSetsAdaptationToNextLimitedLevel) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::BALANCED);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ VideoSourceRestrictions next_limited_restrictions =
+ restrictions_listener_.restrictions();
+ VideoAdaptationCounters next_limited_counters =
+ restrictions_listener_.adaptation_counters();
+
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+
+ // Removing most limited `resource_` should revert us back to
+ processor_->RemoveResource(resource_);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions());
+ EXPECT_EQ(next_limited_counters,
+ restrictions_listener_.adaptation_counters());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovingMostLimitedResourceSetsAdaptationIfInputStateUnchanged) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ VideoSourceRestrictions next_limited_restrictions =
+ restrictions_listener_.restrictions();
+ VideoAdaptationCounters next_limited_counters =
+ restrictions_listener_.adaptation_counters();
+
+ // Overuse twice and underuse once. After the underuse we don't restrict the
+ // source. Normally this would block future underuses.
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+
+ // Removing most limited `resource_` should revert us back to, even though we
+ // did not call RestrictSource() after `resource_` was overused. Normally
+ // adaptation for MAINTAIN_FRAMERATE would be blocked here but for removal we
+ // allow this anyways.
+ processor_->RemoveResource(resource_);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions());
+ EXPECT_EQ(next_limited_counters,
+ restrictions_listener_.adaptation_counters());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovingResourceNotMostLimitedHasNoEffectOnLimitations) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::BALANCED);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ VideoSourceRestrictions current_restrictions =
+ restrictions_listener_.restrictions();
+ VideoAdaptationCounters current_counters =
+ restrictions_listener_.adaptation_counters();
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+
+ // Removing most limited `resource_` should revert us back to
+ processor_->RemoveResource(other_resource_);
+ EXPECT_EQ(current_restrictions, restrictions_listener_.restrictions());
+ EXPECT_EQ(current_counters, restrictions_listener_.adaptation_counters());
+
+ // Delete `other_resource_` for cleanup.
+ other_resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovingMostLimitedResourceAfterSwitchingDegradationPreferences) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ VideoSourceRestrictions next_limited_restrictions =
+ restrictions_listener_.restrictions();
+ VideoAdaptationCounters next_limited_counters =
+ restrictions_listener_.adaptation_counters();
+
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_RESOLUTION);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+
+ // Revert to `other_resource_` when removing `resource_` even though the
+ // degradation preference was different when it was overused.
+ processor_->RemoveResource(resource_);
+ EXPECT_EQ(next_limited_counters,
+ restrictions_listener_.adaptation_counters());
+
+ // After switching back to MAINTAIN_FRAMERATE, the next most limited settings
+ // are restored.
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovingMostLimitedResourceSetsNextLimitationsInDisabled) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ VideoSourceRestrictions next_limited_restrictions =
+ restrictions_listener_.restrictions();
+ VideoAdaptationCounters next_limited_counters =
+ restrictions_listener_.adaptation_counters();
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total());
+
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::DISABLED);
+
+ // Revert to `other_resource_` when removing `resource_` even though the
+ // current degradataion preference is disabled.
+ processor_->RemoveResource(resource_);
+
+ // After switching back to MAINTAIN_FRAMERATE, the next most limited settings
+ // are restored.
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions());
+ EXPECT_EQ(next_limited_counters,
+ restrictions_listener_.adaptation_counters());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovedResourceSignalsIgnoredByProcessor) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ processor_->RemoveResource(resource_);
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ RemovingResourceWhenMultipleMostLimtedHasNoEffect) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+ // Adapt `resource_` up and then down so that both resource's are most
+ // limited at 1 adaptation.
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ RestrictSource(restrictions_listener_.restrictions());
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+
+ // Removing `resource_` has no effect since both `resource_` and
+ // `other_resource_` are most limited.
+ processor_->RemoveResource(resource_);
+ EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total());
+
+ // Delete `resource_` for cleanup.
+ resource_ = nullptr;
+}
+
+TEST_F(ResourceAdaptationProcessorTest,
+ ResourceOverusedAtLimitReachedWillShareMostLimited) {
+ video_stream_adapter_->SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize);
+
+ bool has_reached_min_pixels = false;
+ ON_CALL(frame_rate_provider_, OnMinPixelLimitReached())
+ .WillByDefault(testing::Assign(&has_reached_min_pixels, true));
+
+ // Adapt 10 times, which should make us hit the limit.
+ for (int i = 0; i < 10; ++i) {
+ resource_->SetUsageState(ResourceUsageState::kOveruse);
+ RestrictSource(restrictions_listener_.restrictions());
+ }
+ EXPECT_TRUE(has_reached_min_pixels);
+ auto last_update_count = restrictions_listener_.restrictions_updated_count();
+ other_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ // Now both `resource_` and `other_resource_` are most limited. Underuse of
+ // `resource_` will not adapt up.
+ resource_->SetUsageState(ResourceUsageState::kUnderuse);
+ EXPECT_EQ(last_update_count,
+ restrictions_listener_.restrictions_updated_count());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/resource_unittest.cc b/third_party/libwebrtc/call/adaptation/resource_unittest.cc
new file mode 100644
index 0000000000..a2291dfdce
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/resource_unittest.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/adaptation/resource.h"
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "call/adaptation/test/fake_resource.h"
+#include "call/adaptation/test/mock_resource_listener.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::StrictMock;
+
+class ResourceTest : public ::testing::Test {
+ public:
+ ResourceTest() : fake_resource_(FakeResource::Create("FakeResource")) {}
+
+ protected:
+ rtc::scoped_refptr<FakeResource> fake_resource_;
+};
+
+TEST_F(ResourceTest, RegisteringListenerReceivesCallbacks) {
+ StrictMock<MockResourceListener> resource_listener;
+ fake_resource_->SetResourceListener(&resource_listener);
+ EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(ResourceUsageState::kOveruse, usage_state);
+ });
+ fake_resource_->SetUsageState(ResourceUsageState::kOveruse);
+ fake_resource_->SetResourceListener(nullptr);
+}
+
+TEST_F(ResourceTest, UnregisteringListenerStopsCallbacks) {
+ StrictMock<MockResourceListener> resource_listener;
+ fake_resource_->SetResourceListener(&resource_listener);
+ fake_resource_->SetResourceListener(nullptr);
+ EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _)).Times(0);
+ fake_resource_->SetUsageState(ResourceUsageState::kOveruse);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.cc b/third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.cc
new file mode 100644
index 0000000000..dbb31f0d3b
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/test/fake_adaptation_constraint.h"
+
+#include <utility>
+
+#include "absl/strings/string_view.h"
+
+namespace webrtc {
+
+FakeAdaptationConstraint::FakeAdaptationConstraint(absl::string_view name)
+ : name_(name), is_adaptation_up_allowed_(true) {}
+
+FakeAdaptationConstraint::~FakeAdaptationConstraint() = default;
+
+void FakeAdaptationConstraint::set_is_adaptation_up_allowed(
+ bool is_adaptation_up_allowed) {
+ is_adaptation_up_allowed_ = is_adaptation_up_allowed;
+}
+
+std::string FakeAdaptationConstraint::Name() const {
+ return name_;
+}
+
+bool FakeAdaptationConstraint::IsAdaptationUpAllowed(
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after) const {
+ return is_adaptation_up_allowed_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.h b/third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.h
new file mode 100644
index 0000000000..5c684335f2
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_adaptation_constraint.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_TEST_FAKE_ADAPTATION_CONSTRAINT_H_
+#define CALL_ADAPTATION_TEST_FAKE_ADAPTATION_CONSTRAINT_H_
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "call/adaptation/adaptation_constraint.h"
+
+namespace webrtc {
+
+class FakeAdaptationConstraint : public AdaptationConstraint {
+ public:
+ explicit FakeAdaptationConstraint(absl::string_view name);
+ ~FakeAdaptationConstraint() override;
+
+ void set_is_adaptation_up_allowed(bool is_adaptation_up_allowed);
+
+ // AdaptationConstraint implementation.
+ std::string Name() const override;
+ bool IsAdaptationUpAllowed(
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after) const override;
+
+ private:
+ const std::string name_;
+ bool is_adaptation_up_allowed_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_TEST_FAKE_ADAPTATION_CONSTRAINT_H_
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.cc b/third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.cc
new file mode 100644
index 0000000000..65fee6a7ba
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/test/fake_frame_rate_provider.h"
+
+#include "test/gmock.h"
+
+using ::testing::Return;
+
+namespace webrtc {
+
+FakeFrameRateProvider::FakeFrameRateProvider() {
+ set_fps(0);
+}
+
+void FakeFrameRateProvider::set_fps(int fps) {
+ EXPECT_CALL(*this, GetInputFrameRate()).WillRepeatedly(Return(fps));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.h b/third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.h
new file mode 100644
index 0000000000..b8815f592a
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_frame_rate_provider.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_TEST_FAKE_FRAME_RATE_PROVIDER_H_
+#define CALL_ADAPTATION_TEST_FAKE_FRAME_RATE_PROVIDER_H_
+
+#include <string>
+#include <vector>
+
+#include "test/gmock.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+class MockVideoStreamEncoderObserver : public VideoStreamEncoderObserver {
+ public:
+ MOCK_METHOD(void, OnEncodedFrameTimeMeasured, (int, int), (override));
+ MOCK_METHOD(void, OnIncomingFrame, (int, int), (override));
+ MOCK_METHOD(void,
+ OnSendEncodedImage,
+ (const EncodedImage&, const CodecSpecificInfo*),
+ (override));
+ MOCK_METHOD(void,
+ OnEncoderImplementationChanged,
+ (EncoderImplementation),
+ (override));
+ MOCK_METHOD(void, OnFrameDropped, (DropReason), (override));
+ MOCK_METHOD(void,
+ OnEncoderReconfigured,
+ (const VideoEncoderConfig&, const std::vector<VideoStream>&),
+ (override));
+ MOCK_METHOD(void,
+ OnAdaptationChanged,
+ (VideoAdaptationReason,
+ const VideoAdaptationCounters&,
+ const VideoAdaptationCounters&),
+ (override));
+ MOCK_METHOD(void, ClearAdaptationStats, (), (override));
+ MOCK_METHOD(void,
+ UpdateAdaptationSettings,
+ (AdaptationSettings, AdaptationSettings),
+ (override));
+ MOCK_METHOD(void, OnMinPixelLimitReached, (), (override));
+ MOCK_METHOD(void, OnInitialQualityResolutionAdaptDown, (), (override));
+ MOCK_METHOD(void, OnSuspendChange, (bool), (override));
+ MOCK_METHOD(void,
+ OnBitrateAllocationUpdated,
+ (const VideoCodec&, const VideoBitrateAllocation&),
+ (override));
+ MOCK_METHOD(void, OnEncoderInternalScalerUpdate, (bool), (override));
+ MOCK_METHOD(int, GetInputFrameRate, (), (const, override));
+};
+
+class FakeFrameRateProvider : public MockVideoStreamEncoderObserver {
+ public:
+ FakeFrameRateProvider();
+ void set_fps(int fps);
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_TEST_FAKE_FRAME_RATE_PROVIDER_H_
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_resource.cc b/third_party/libwebrtc/call/adaptation/test/fake_resource.cc
new file mode 100644
index 0000000000..48b4768550
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_resource.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/test/fake_resource.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "api/make_ref_counted.h"
+
+namespace webrtc {
+
+// static
+rtc::scoped_refptr<FakeResource> FakeResource::Create(absl::string_view name) {
+ return rtc::make_ref_counted<FakeResource>(name);
+}
+
+FakeResource::FakeResource(absl::string_view name)
+ : Resource(), name_(name), listener_(nullptr) {}
+
+FakeResource::~FakeResource() {}
+
+void FakeResource::SetUsageState(ResourceUsageState usage_state) {
+ if (listener_) {
+ listener_->OnResourceUsageStateMeasured(rtc::scoped_refptr<Resource>(this),
+ usage_state);
+ }
+}
+
+std::string FakeResource::Name() const {
+ return name_;
+}
+
+void FakeResource::SetResourceListener(ResourceListener* listener) {
+ listener_ = listener;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_resource.h b/third_party/libwebrtc/call/adaptation/test/fake_resource.h
new file mode 100644
index 0000000000..1119a9614f
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_resource.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2019 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_TEST_FAKE_RESOURCE_H_
+#define CALL_ADAPTATION_TEST_FAKE_RESOURCE_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/scoped_refptr.h"
+
+namespace webrtc {
+
+// Fake resource used for testing.
+class FakeResource : public Resource {
+ public:
+ static rtc::scoped_refptr<FakeResource> Create(absl::string_view name);
+
+ explicit FakeResource(absl::string_view name);
+ ~FakeResource() override;
+
+ void SetUsageState(ResourceUsageState usage_state);
+
+ // Resource implementation.
+ std::string Name() const override;
+ void SetResourceListener(ResourceListener* listener) override;
+
+ private:
+ const std::string name_;
+ ResourceListener* listener_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_TEST_FAKE_RESOURCE_H_
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.cc b/third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.cc
new file mode 100644
index 0000000000..ce92dfb204
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/test/fake_video_stream_input_state_provider.h"
+
+namespace webrtc {
+
+FakeVideoStreamInputStateProvider::FakeVideoStreamInputStateProvider()
+ : VideoStreamInputStateProvider(nullptr) {}
+
+FakeVideoStreamInputStateProvider::~FakeVideoStreamInputStateProvider() =
+ default;
+
+void FakeVideoStreamInputStateProvider::SetInputState(
+ int input_pixels,
+ int input_fps,
+ int min_pixels_per_frame) {
+ fake_input_state_.set_has_input(true);
+ fake_input_state_.set_frame_size_pixels(input_pixels);
+ fake_input_state_.set_frames_per_second(input_fps);
+ fake_input_state_.set_min_pixels_per_frame(min_pixels_per_frame);
+}
+
+VideoStreamInputState FakeVideoStreamInputStateProvider::InputState() {
+ return fake_input_state_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.h b/third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.h
new file mode 100644
index 0000000000..93f7dba7e6
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/fake_video_stream_input_state_provider.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_TEST_FAKE_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
+#define CALL_ADAPTATION_TEST_FAKE_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
+
+#include "call/adaptation/video_stream_input_state_provider.h"
+
+namespace webrtc {
+
+class FakeVideoStreamInputStateProvider : public VideoStreamInputStateProvider {
+ public:
+ FakeVideoStreamInputStateProvider();
+ virtual ~FakeVideoStreamInputStateProvider();
+
+ void SetInputState(int input_pixels, int input_fps, int min_pixels_per_frame);
+ VideoStreamInputState InputState() override;
+
+ private:
+ VideoStreamInputState fake_input_state_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_TEST_FAKE_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
diff --git a/third_party/libwebrtc/call/adaptation/test/mock_resource_listener.h b/third_party/libwebrtc/call/adaptation/test/mock_resource_listener.h
new file mode 100644
index 0000000000..1c4df31a13
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/test/mock_resource_listener.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_TEST_MOCK_RESOURCE_LISTENER_H_
+#define CALL_ADAPTATION_TEST_MOCK_RESOURCE_LISTENER_H_
+
+#include "api/adaptation/resource.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockResourceListener : public ResourceListener {
+ public:
+ MOCK_METHOD(void,
+ OnResourceUsageStateMeasured,
+ (rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state),
+ (override));
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_TEST_MOCK_RESOURCE_LISTENER_H_
diff --git a/third_party/libwebrtc/call/adaptation/video_source_restrictions.cc b/third_party/libwebrtc/call/adaptation/video_source_restrictions.cc
new file mode 100644
index 0000000000..719bc53278
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_source_restrictions.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/video_source_restrictions.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+VideoSourceRestrictions::VideoSourceRestrictions()
+ : max_pixels_per_frame_(absl::nullopt),
+ target_pixels_per_frame_(absl::nullopt),
+ max_frame_rate_(absl::nullopt) {}
+
+VideoSourceRestrictions::VideoSourceRestrictions(
+ absl::optional<size_t> max_pixels_per_frame,
+ absl::optional<size_t> target_pixels_per_frame,
+ absl::optional<double> max_frame_rate)
+ : max_pixels_per_frame_(std::move(max_pixels_per_frame)),
+ target_pixels_per_frame_(std::move(target_pixels_per_frame)),
+ max_frame_rate_(std::move(max_frame_rate)) {
+ RTC_DCHECK(!max_pixels_per_frame_.has_value() ||
+ max_pixels_per_frame_.value() <
+ static_cast<size_t>(std::numeric_limits<int>::max()));
+ RTC_DCHECK(!max_frame_rate_.has_value() ||
+ max_frame_rate_.value() < std::numeric_limits<int>::max());
+ RTC_DCHECK(!max_frame_rate_.has_value() || max_frame_rate_.value() > 0.0);
+}
+
+std::string VideoSourceRestrictions::ToString() const {
+ rtc::StringBuilder ss;
+ ss << "{";
+ if (max_frame_rate_)
+ ss << " max_fps=" << max_frame_rate_.value();
+ if (max_pixels_per_frame_)
+ ss << " max_pixels_per_frame=" << max_pixels_per_frame_.value();
+ if (target_pixels_per_frame_)
+ ss << " target_pixels_per_frame=" << target_pixels_per_frame_.value();
+ ss << " }";
+ return ss.Release();
+}
+
+const absl::optional<size_t>& VideoSourceRestrictions::max_pixels_per_frame()
+ const {
+ return max_pixels_per_frame_;
+}
+
+const absl::optional<size_t>& VideoSourceRestrictions::target_pixels_per_frame()
+ const {
+ return target_pixels_per_frame_;
+}
+
+const absl::optional<double>& VideoSourceRestrictions::max_frame_rate() const {
+ return max_frame_rate_;
+}
+
+void VideoSourceRestrictions::set_max_pixels_per_frame(
+ absl::optional<size_t> max_pixels_per_frame) {
+ max_pixels_per_frame_ = std::move(max_pixels_per_frame);
+}
+
+void VideoSourceRestrictions::set_target_pixels_per_frame(
+ absl::optional<size_t> target_pixels_per_frame) {
+ target_pixels_per_frame_ = std::move(target_pixels_per_frame);
+}
+
+void VideoSourceRestrictions::set_max_frame_rate(
+ absl::optional<double> max_frame_rate) {
+ max_frame_rate_ = std::move(max_frame_rate);
+}
+
+void VideoSourceRestrictions::UpdateMin(const VideoSourceRestrictions& other) {
+ if (max_pixels_per_frame_.has_value()) {
+ max_pixels_per_frame_ = std::min(*max_pixels_per_frame_,
+ other.max_pixels_per_frame().value_or(
+ std::numeric_limits<size_t>::max()));
+ } else {
+ max_pixels_per_frame_ = other.max_pixels_per_frame();
+ }
+ if (target_pixels_per_frame_.has_value()) {
+ target_pixels_per_frame_ = std::min(
+ *target_pixels_per_frame_, other.target_pixels_per_frame().value_or(
+ std::numeric_limits<size_t>::max()));
+ } else {
+ target_pixels_per_frame_ = other.target_pixels_per_frame();
+ }
+ if (max_frame_rate_.has_value()) {
+ max_frame_rate_ = std::min(
+ *max_frame_rate_,
+ other.max_frame_rate().value_or(std::numeric_limits<double>::max()));
+ } else {
+ max_frame_rate_ = other.max_frame_rate();
+ }
+}
+
+bool DidRestrictionsIncrease(VideoSourceRestrictions before,
+ VideoSourceRestrictions after) {
+ bool decreased_resolution = DidDecreaseResolution(before, after);
+ bool decreased_framerate = DidDecreaseFrameRate(before, after);
+ bool same_resolution =
+ before.max_pixels_per_frame() == after.max_pixels_per_frame();
+ bool same_framerate = before.max_frame_rate() == after.max_frame_rate();
+
+ return (decreased_resolution && decreased_framerate) ||
+ (decreased_resolution && same_framerate) ||
+ (same_resolution && decreased_framerate);
+}
+
+bool DidRestrictionsDecrease(VideoSourceRestrictions before,
+ VideoSourceRestrictions after) {
+ bool increased_resolution = DidIncreaseResolution(before, after);
+ bool increased_framerate = DidIncreaseFrameRate(before, after);
+ bool same_resolution =
+ before.max_pixels_per_frame() == after.max_pixels_per_frame();
+ bool same_framerate = before.max_frame_rate() == after.max_frame_rate();
+
+ return (increased_resolution && increased_framerate) ||
+ (increased_resolution && same_framerate) ||
+ (same_resolution && increased_framerate);
+}
+
+bool DidIncreaseResolution(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after) {
+ if (!restrictions_before.max_pixels_per_frame().has_value())
+ return false;
+ if (!restrictions_after.max_pixels_per_frame().has_value())
+ return true;
+ return restrictions_after.max_pixels_per_frame().value() >
+ restrictions_before.max_pixels_per_frame().value();
+}
+
+bool DidDecreaseResolution(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after) {
+ if (!restrictions_after.max_pixels_per_frame().has_value())
+ return false;
+ if (!restrictions_before.max_pixels_per_frame().has_value())
+ return true;
+ return restrictions_after.max_pixels_per_frame().value() <
+ restrictions_before.max_pixels_per_frame().value();
+}
+
+bool DidIncreaseFrameRate(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after) {
+ if (!restrictions_before.max_frame_rate().has_value())
+ return false;
+ if (!restrictions_after.max_frame_rate().has_value())
+ return true;
+ return restrictions_after.max_frame_rate().value() >
+ restrictions_before.max_frame_rate().value();
+}
+
+bool DidDecreaseFrameRate(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after) {
+ if (!restrictions_after.max_frame_rate().has_value())
+ return false;
+ if (!restrictions_before.max_frame_rate().has_value())
+ return true;
+ return restrictions_after.max_frame_rate().value() <
+ restrictions_before.max_frame_rate().value();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/video_source_restrictions.h b/third_party/libwebrtc/call/adaptation/video_source_restrictions.h
new file mode 100644
index 0000000000..be8520a385
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_source_restrictions.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_
+#define CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_
+
+#include <string>
+#include <utility>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+// Describes optional restrictions to the resolution and frame rate of a video
+// source.
+class VideoSourceRestrictions {
+ public:
+ // Constructs without any restrictions.
+ VideoSourceRestrictions();
+ // All values must be positive or nullopt.
+ // TODO(hbos): Support expressing "disable this stream"?
+ VideoSourceRestrictions(absl::optional<size_t> max_pixels_per_frame,
+ absl::optional<size_t> target_pixels_per_frame,
+ absl::optional<double> max_frame_rate);
+
+ bool operator==(const VideoSourceRestrictions& rhs) const {
+ return max_pixels_per_frame_ == rhs.max_pixels_per_frame_ &&
+ target_pixels_per_frame_ == rhs.target_pixels_per_frame_ &&
+ max_frame_rate_ == rhs.max_frame_rate_;
+ }
+ bool operator!=(const VideoSourceRestrictions& rhs) const {
+ return !(*this == rhs);
+ }
+
+ std::string ToString() const;
+
+ // The source must produce a resolution less than or equal to
+ // max_pixels_per_frame().
+ const absl::optional<size_t>& max_pixels_per_frame() const;
+ // The source should produce a resolution as close to the
+ // target_pixels_per_frame() as possible, provided this does not exceed
+ // max_pixels_per_frame().
+ // The actual pixel count selected depends on the capabilities of the source.
+ // TODO(hbos): Clarify how "target" is used. One possible implementation: open
+ // the camera in the smallest resolution that is greater than or equal to the
+ // target and scale it down to the target if it is greater. Is this an
+ // accurate description of what this does today, or do we do something else?
+ const absl::optional<size_t>& target_pixels_per_frame() const;
+ const absl::optional<double>& max_frame_rate() const;
+
+ void set_max_pixels_per_frame(absl::optional<size_t> max_pixels_per_frame);
+ void set_target_pixels_per_frame(
+ absl::optional<size_t> target_pixels_per_frame);
+ void set_max_frame_rate(absl::optional<double> max_frame_rate);
+
+ // Update `this` with min(`this`, `other`).
+ void UpdateMin(const VideoSourceRestrictions& other);
+
+ private:
+ // These map to rtc::VideoSinkWants's `max_pixel_count` and
+ // `target_pixel_count`.
+ absl::optional<size_t> max_pixels_per_frame_;
+ absl::optional<size_t> target_pixels_per_frame_;
+ absl::optional<double> max_frame_rate_;
+};
+
+bool DidRestrictionsIncrease(VideoSourceRestrictions before,
+ VideoSourceRestrictions after);
+bool DidRestrictionsDecrease(VideoSourceRestrictions before,
+ VideoSourceRestrictions after);
+bool DidIncreaseResolution(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after);
+bool DidDecreaseResolution(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after);
+bool DidIncreaseFrameRate(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after);
+bool DidDecreaseFrameRate(VideoSourceRestrictions restrictions_before,
+ VideoSourceRestrictions restrictions_after);
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_
diff --git a/third_party/libwebrtc/call/adaptation/video_source_restrictions_unittest.cc b/third_party/libwebrtc/call/adaptation/video_source_restrictions_unittest.cc
new file mode 100644
index 0000000000..8c1ae4c896
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_source_restrictions_unittest.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/video_source_restrictions.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kHdPixels = 1280 * 720;
+
+const VideoSourceRestrictions kUnlimited;
+const VideoSourceRestrictions k15fps(absl::nullopt, absl::nullopt, 15.0);
+const VideoSourceRestrictions kHd(kHdPixels, kHdPixels, absl::nullopt);
+const VideoSourceRestrictions kHd15fps(kHdPixels, kHdPixels, 15.0);
+const VideoSourceRestrictions kVga7fps(kHdPixels / 2, kHdPixels / 2, 7.0);
+
+VideoSourceRestrictions RestrictionsFromMaxPixelsPerFrame(
+ size_t max_pixels_per_frame) {
+ return VideoSourceRestrictions(max_pixels_per_frame, absl::nullopt,
+ absl::nullopt);
+}
+
+VideoSourceRestrictions RestrictionsFromMaxFrameRate(double max_frame_rate) {
+ return VideoSourceRestrictions(absl::nullopt, absl::nullopt, max_frame_rate);
+}
+
+} // namespace
+
+TEST(VideoSourceRestrictionsTest, DidIncreaseResolution) {
+ // smaller restrictions -> larger restrictions
+ EXPECT_TRUE(DidIncreaseResolution(RestrictionsFromMaxPixelsPerFrame(10),
+ RestrictionsFromMaxPixelsPerFrame(11)));
+ // unrestricted -> restricted
+ EXPECT_FALSE(DidIncreaseResolution(VideoSourceRestrictions(),
+ RestrictionsFromMaxPixelsPerFrame(10)));
+ // restricted -> unrestricted
+ EXPECT_TRUE(DidIncreaseResolution(RestrictionsFromMaxPixelsPerFrame(10),
+ VideoSourceRestrictions()));
+ // restricted -> equally restricted
+ EXPECT_FALSE(DidIncreaseResolution(RestrictionsFromMaxPixelsPerFrame(10),
+ RestrictionsFromMaxPixelsPerFrame(10)));
+ // unrestricted -> unrestricted
+ EXPECT_FALSE(DidIncreaseResolution(VideoSourceRestrictions(),
+ VideoSourceRestrictions()));
+ // larger restrictions -> smaller restrictions
+ EXPECT_FALSE(DidIncreaseResolution(RestrictionsFromMaxPixelsPerFrame(10),
+ RestrictionsFromMaxPixelsPerFrame(9)));
+}
+
+TEST(VideoSourceRestrictionsTest, DidDecreaseFrameRate) {
+ // samller restrictions -> larger restrictions
+ EXPECT_FALSE(DidDecreaseFrameRate(RestrictionsFromMaxFrameRate(10),
+ RestrictionsFromMaxFrameRate(11)));
+ // unrestricted -> restricted
+ EXPECT_TRUE(DidDecreaseFrameRate(VideoSourceRestrictions(),
+ RestrictionsFromMaxFrameRate(10)));
+ // restricted -> unrestricted
+ EXPECT_FALSE(DidDecreaseFrameRate(RestrictionsFromMaxFrameRate(10),
+ VideoSourceRestrictions()));
+ // restricted -> equally restricted
+ EXPECT_FALSE(DidDecreaseFrameRate(RestrictionsFromMaxFrameRate(10),
+ RestrictionsFromMaxFrameRate(10)));
+ // unrestricted -> unrestricted
+ EXPECT_FALSE(DidDecreaseFrameRate(VideoSourceRestrictions(),
+ VideoSourceRestrictions()));
+ // larger restrictions -> samller restrictions
+ EXPECT_TRUE(DidDecreaseFrameRate(RestrictionsFromMaxFrameRate(10),
+ RestrictionsFromMaxFrameRate(9)));
+}
+
+TEST(VideoSourceRestrictionsTest, DidRestrictionsChangeFalseForSame) {
+ EXPECT_FALSE(DidRestrictionsDecrease(kUnlimited, kUnlimited));
+ EXPECT_FALSE(DidRestrictionsIncrease(kUnlimited, kUnlimited));
+
+ // Both resolution and fps restricted.
+ EXPECT_FALSE(DidRestrictionsDecrease(kHd15fps, kHd15fps));
+ EXPECT_FALSE(DidRestrictionsIncrease(kHd15fps, kHd15fps));
+}
+
+TEST(VideoSourceRestrictions,
+ DidRestrictionsIncreaseTrueWhenPixelsOrFrameRateDecreased) {
+ // Unlimited > Limited resolution.
+ EXPECT_TRUE(DidRestrictionsIncrease(kUnlimited, kHd));
+ // Unlimited > limited fps.
+ EXPECT_TRUE(DidRestrictionsIncrease(kUnlimited, k15fps));
+ // Unlimited > limited resolution + limited fps.
+ EXPECT_TRUE(DidRestrictionsIncrease(kUnlimited, kHd15fps));
+ // Limited resolution > limited resolution + limited fps.
+ EXPECT_TRUE(DidRestrictionsIncrease(kHd, kHd15fps));
+ // Limited fps > limited resolution + limited fps.
+ EXPECT_TRUE(DidRestrictionsIncrease(k15fps, kHd15fps));
+ // Limited resolution + fps > More limited resolution + more limited fps
+ EXPECT_TRUE(DidRestrictionsIncrease(kHd15fps, kVga7fps));
+}
+
+TEST(VideoSourceRestrictions,
+ DidRestrictionsDecreaseTrueWhenPixelsOrFrameRateIncreased) {
+ // Limited resolution < Unlimited.
+ EXPECT_TRUE(DidRestrictionsDecrease(kHd, kUnlimited));
+ // Limited fps < Unlimited.
+ EXPECT_TRUE(DidRestrictionsDecrease(k15fps, kUnlimited));
+ // Limited resolution + limited fps < unlimited.
+ EXPECT_TRUE(DidRestrictionsDecrease(kHd15fps, kUnlimited));
+ // Limited resolution + limited fps < limited resolution.
+ EXPECT_TRUE(DidRestrictionsDecrease(kHd15fps, kHd));
+ // Limited resolution + limited fps < limited fps.
+ EXPECT_TRUE(DidRestrictionsDecrease(kHd15fps, k15fps));
+ // More limited resolution + more limited fps < limited resolution + fps
+ EXPECT_TRUE(DidRestrictionsDecrease(kVga7fps, kHd15fps));
+}
+
+TEST(VideoSourceRestrictions,
+ DidRestrictionsChangeFalseWhenFrameRateAndPixelsChangeDifferently) {
+ // One changed framerate, the other resolution; not an increase or decrease.
+ EXPECT_FALSE(DidRestrictionsIncrease(kHd, k15fps));
+ EXPECT_FALSE(DidRestrictionsDecrease(kHd, k15fps));
+}
+
+TEST(VideoSourceRestrictions, UpdateMin) {
+ VideoSourceRestrictions one(kHdPixels / 2, kHdPixels, 7.0);
+ VideoSourceRestrictions two(kHdPixels, kHdPixels / 3, 15.0);
+
+ one.UpdateMin(two);
+
+ EXPECT_EQ(one.max_pixels_per_frame(), kHdPixels / 2);
+ EXPECT_EQ(one.target_pixels_per_frame(), kHdPixels / 3);
+ EXPECT_EQ(one.max_frame_rate(), 7.0);
+
+ two.UpdateMin(one);
+
+ EXPECT_EQ(two.max_pixels_per_frame(), kHdPixels / 2);
+ EXPECT_EQ(two.target_pixels_per_frame(), kHdPixels / 3);
+ EXPECT_EQ(two.max_frame_rate(), 7.0);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_adapter.cc b/third_party/libwebrtc/call/adaptation/video_stream_adapter.cc
new file mode 100644
index 0000000000..5a970fb2ef
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_adapter.cc
@@ -0,0 +1,753 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/video_stream_adapter.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "api/sequence_checker.h"
+#include "api/video/video_adaptation_counters.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+const int kMinFrameRateFps = 2;
+
+namespace {
+
+// For frame rate, the steps we take are 2/3 (down) and 3/2 (up).
+int GetLowerFrameRateThan(int fps) {
+ RTC_DCHECK(fps != std::numeric_limits<int>::max());
+ return (fps * 2) / 3;
+}
+// TODO(hbos): Use absl::optional<> instead?
+int GetHigherFrameRateThan(int fps) {
+ return fps != std::numeric_limits<int>::max()
+ ? (fps * 3) / 2
+ : std::numeric_limits<int>::max();
+}
+
+int GetIncreasedMaxPixelsWanted(int target_pixels) {
+ if (target_pixels == std::numeric_limits<int>::max())
+ return std::numeric_limits<int>::max();
+ // When we decrease resolution, we go down to at most 3/5 of current pixels.
+ // Thus to increase resolution, we need 3/5 to get back to where we started.
+ // When going up, the desired max_pixels_per_frame() has to be significantly
+ // higher than the target because the source's native resolutions might not
+ // match the target. We pick 12/5 of the target.
+ //
+ // (This value was historically 4 times the old target, which is (3/5)*4 of
+ // the new target - or 12/5 - assuming the target is adjusted according to
+ // the above steps.)
+ RTC_DCHECK(target_pixels != std::numeric_limits<int>::max());
+ return (target_pixels * 12) / 5;
+}
+
+bool CanDecreaseResolutionTo(int target_pixels,
+ int target_pixels_min,
+ const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions) {
+ int max_pixels_per_frame =
+ rtc::dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
+ std::numeric_limits<int>::max()));
+ return target_pixels < max_pixels_per_frame &&
+ target_pixels_min >= input_state.min_pixels_per_frame();
+}
+
+bool CanIncreaseResolutionTo(int target_pixels,
+ const VideoSourceRestrictions& restrictions) {
+ int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels);
+ int max_pixels_per_frame =
+ rtc::dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
+ std::numeric_limits<int>::max()));
+ return max_pixels_wanted > max_pixels_per_frame;
+}
+
+bool CanDecreaseFrameRateTo(int max_frame_rate,
+ const VideoSourceRestrictions& restrictions) {
+ const int fps_wanted = std::max(kMinFrameRateFps, max_frame_rate);
+ return fps_wanted <
+ rtc::dchecked_cast<int>(restrictions.max_frame_rate().value_or(
+ std::numeric_limits<int>::max()));
+}
+
+bool CanIncreaseFrameRateTo(int max_frame_rate,
+ const VideoSourceRestrictions& restrictions) {
+ return max_frame_rate >
+ rtc::dchecked_cast<int>(restrictions.max_frame_rate().value_or(
+ std::numeric_limits<int>::max()));
+}
+
+bool MinPixelLimitReached(const VideoStreamInputState& input_state) {
+ if (input_state.single_active_stream_pixels().has_value()) {
+ return GetLowerResolutionThan(
+ input_state.single_active_stream_pixels().value()) <
+ input_state.min_pixels_per_frame();
+ }
+ return input_state.frame_size_pixels().has_value() &&
+ GetLowerResolutionThan(input_state.frame_size_pixels().value()) <
+ input_state.min_pixels_per_frame();
+}
+
+} // namespace
+
+VideoSourceRestrictionsListener::~VideoSourceRestrictionsListener() = default;
+
+VideoSourceRestrictions FilterRestrictionsByDegradationPreference(
+ VideoSourceRestrictions source_restrictions,
+ DegradationPreference degradation_preference) {
+ switch (degradation_preference) {
+ case DegradationPreference::BALANCED:
+ break;
+ case DegradationPreference::MAINTAIN_FRAMERATE:
+ source_restrictions.set_max_frame_rate(absl::nullopt);
+ break;
+ case DegradationPreference::MAINTAIN_RESOLUTION:
+ source_restrictions.set_max_pixels_per_frame(absl::nullopt);
+ source_restrictions.set_target_pixels_per_frame(absl::nullopt);
+ break;
+ case DegradationPreference::DISABLED:
+ source_restrictions.set_max_pixels_per_frame(absl::nullopt);
+ source_restrictions.set_target_pixels_per_frame(absl::nullopt);
+ source_restrictions.set_max_frame_rate(absl::nullopt);
+ }
+ return source_restrictions;
+}
+
+// For resolution, the steps we take are 3/5 (down) and 5/3 (up).
+// Notice the asymmetry of which restriction property is set depending on if
+// we are adapting up or down:
+// - VideoSourceRestrictor::DecreaseResolution() sets the max_pixels_per_frame()
+// to the desired target and target_pixels_per_frame() to null.
+// - VideoSourceRestrictor::IncreaseResolutionTo() sets the
+// target_pixels_per_frame() to the desired target, and max_pixels_per_frame()
+// is set according to VideoSourceRestrictor::GetIncreasedMaxPixelsWanted().
+int GetLowerResolutionThan(int pixel_count) {
+ RTC_DCHECK(pixel_count != std::numeric_limits<int>::max());
+ return (pixel_count * 3) / 5;
+}
+
+// TODO(hbos): Use absl::optional<> instead?
+int GetHigherResolutionThan(int pixel_count) {
+ return pixel_count != std::numeric_limits<int>::max()
+ ? (pixel_count * 5) / 3
+ : std::numeric_limits<int>::max();
+}
+
+// static
+const char* Adaptation::StatusToString(Adaptation::Status status) {
+ switch (status) {
+ case Adaptation::Status::kValid:
+ return "kValid";
+ case Adaptation::Status::kLimitReached:
+ return "kLimitReached";
+ case Adaptation::Status::kAwaitingPreviousAdaptation:
+ return "kAwaitingPreviousAdaptation";
+ case Status::kInsufficientInput:
+ return "kInsufficientInput";
+ case Status::kAdaptationDisabled:
+ return "kAdaptationDisabled";
+ case Status::kRejectedByConstraint:
+ return "kRejectedByConstraint";
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+Adaptation::Adaptation(int validation_id,
+ VideoSourceRestrictions restrictions,
+ VideoAdaptationCounters counters,
+ VideoStreamInputState input_state)
+ : validation_id_(validation_id),
+ status_(Status::kValid),
+ input_state_(std::move(input_state)),
+ restrictions_(std::move(restrictions)),
+ counters_(std::move(counters)) {}
+
+Adaptation::Adaptation(int validation_id, Status invalid_status)
+ : validation_id_(validation_id), status_(invalid_status) {
+ RTC_DCHECK_NE(status_, Status::kValid);
+}
+
+Adaptation::Status Adaptation::status() const {
+ return status_;
+}
+
+const VideoStreamInputState& Adaptation::input_state() const {
+ return input_state_;
+}
+
+const VideoSourceRestrictions& Adaptation::restrictions() const {
+ return restrictions_;
+}
+
+const VideoAdaptationCounters& Adaptation::counters() const {
+ return counters_;
+}
+
+VideoStreamAdapter::VideoStreamAdapter(
+ VideoStreamInputStateProvider* input_state_provider,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ const FieldTrialsView& field_trials)
+ : input_state_provider_(input_state_provider),
+ encoder_stats_observer_(encoder_stats_observer),
+ balanced_settings_(field_trials),
+ adaptation_validation_id_(0),
+ degradation_preference_(DegradationPreference::DISABLED),
+ awaiting_frame_size_change_(absl::nullopt) {
+ sequence_checker_.Detach();
+ RTC_DCHECK(input_state_provider_);
+ RTC_DCHECK(encoder_stats_observer_);
+}
+
+VideoStreamAdapter::~VideoStreamAdapter() {
+ RTC_DCHECK(adaptation_constraints_.empty())
+ << "There are constaint(s) attached to a VideoStreamAdapter being "
+ "destroyed.";
+}
+
+VideoSourceRestrictions VideoStreamAdapter::source_restrictions() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return current_restrictions_.restrictions;
+}
+
+const VideoAdaptationCounters& VideoStreamAdapter::adaptation_counters() const {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ return current_restrictions_.counters;
+}
+
+void VideoStreamAdapter::ClearRestrictions() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ // Invalidate any previously returned Adaptation.
+ RTC_LOG(LS_INFO) << "Resetting restrictions";
+ ++adaptation_validation_id_;
+ current_restrictions_ = {VideoSourceRestrictions(),
+ VideoAdaptationCounters()};
+ awaiting_frame_size_change_ = absl::nullopt;
+ BroadcastVideoRestrictionsUpdate(input_state_provider_->InputState(),
+ nullptr);
+}
+
+void VideoStreamAdapter::AddRestrictionsListener(
+ VideoSourceRestrictionsListener* restrictions_listener) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(std::find(restrictions_listeners_.begin(),
+ restrictions_listeners_.end(),
+ restrictions_listener) == restrictions_listeners_.end());
+ restrictions_listeners_.push_back(restrictions_listener);
+}
+
+void VideoStreamAdapter::RemoveRestrictionsListener(
+ VideoSourceRestrictionsListener* restrictions_listener) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ auto it = std::find(restrictions_listeners_.begin(),
+ restrictions_listeners_.end(), restrictions_listener);
+ RTC_DCHECK(it != restrictions_listeners_.end());
+ restrictions_listeners_.erase(it);
+}
+
+void VideoStreamAdapter::AddAdaptationConstraint(
+ AdaptationConstraint* adaptation_constraint) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(std::find(adaptation_constraints_.begin(),
+ adaptation_constraints_.end(),
+ adaptation_constraint) == adaptation_constraints_.end());
+ adaptation_constraints_.push_back(adaptation_constraint);
+}
+
+void VideoStreamAdapter::RemoveAdaptationConstraint(
+ AdaptationConstraint* adaptation_constraint) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ auto it = std::find(adaptation_constraints_.begin(),
+ adaptation_constraints_.end(), adaptation_constraint);
+ RTC_DCHECK(it != adaptation_constraints_.end());
+ adaptation_constraints_.erase(it);
+}
+
+void VideoStreamAdapter::SetDegradationPreference(
+ DegradationPreference degradation_preference) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (degradation_preference_ == degradation_preference)
+ return;
+ // Invalidate any previously returned Adaptation.
+ ++adaptation_validation_id_;
+ bool balanced_switch =
+ degradation_preference == DegradationPreference::BALANCED ||
+ degradation_preference_ == DegradationPreference::BALANCED;
+ degradation_preference_ = degradation_preference;
+ if (balanced_switch) {
+ // ClearRestrictions() calls BroadcastVideoRestrictionsUpdate(nullptr).
+ ClearRestrictions();
+ } else {
+ BroadcastVideoRestrictionsUpdate(input_state_provider_->InputState(),
+ nullptr);
+ }
+}
+
+struct VideoStreamAdapter::RestrictionsOrStateVisitor {
+ Adaptation operator()(const RestrictionsWithCounters& r) const {
+ return Adaptation(adaptation_validation_id, r.restrictions, r.counters,
+ input_state);
+ }
+ Adaptation operator()(const Adaptation::Status& status) const {
+ RTC_DCHECK_NE(status, Adaptation::Status::kValid);
+ return Adaptation(adaptation_validation_id, status);
+ }
+
+ const int adaptation_validation_id;
+ const VideoStreamInputState& input_state;
+};
+
+Adaptation VideoStreamAdapter::RestrictionsOrStateToAdaptation(
+ VideoStreamAdapter::RestrictionsOrState step_or_state,
+ const VideoStreamInputState& input_state) const {
+ RTC_DCHECK(!step_or_state.valueless_by_exception());
+ return absl::visit(
+ RestrictionsOrStateVisitor{adaptation_validation_id_, input_state},
+ step_or_state);
+}
+
+Adaptation VideoStreamAdapter::GetAdaptationUp(
+ const VideoStreamInputState& input_state) const {
+ RestrictionsOrState step = GetAdaptationUpStep(input_state);
+ // If an adaptation proposed, check with the constraints that it is ok.
+ if (absl::holds_alternative<RestrictionsWithCounters>(step)) {
+ RestrictionsWithCounters restrictions =
+ absl::get<RestrictionsWithCounters>(step);
+ for (const auto* constraint : adaptation_constraints_) {
+ if (!constraint->IsAdaptationUpAllowed(input_state,
+ current_restrictions_.restrictions,
+ restrictions.restrictions)) {
+ RTC_LOG(LS_INFO) << "Not adapting up because constraint \""
+ << constraint->Name() << "\" disallowed it";
+ step = Adaptation::Status::kRejectedByConstraint;
+ }
+ }
+ }
+ return RestrictionsOrStateToAdaptation(step, input_state);
+}
+
+Adaptation VideoStreamAdapter::GetAdaptationUp() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoStreamInputState input_state = input_state_provider_->InputState();
+ ++adaptation_validation_id_;
+ Adaptation adaptation = GetAdaptationUp(input_state);
+ return adaptation;
+}
+
+VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::GetAdaptationUpStep(
+ const VideoStreamInputState& input_state) const {
+ if (!HasSufficientInputForAdaptation(input_state)) {
+ return Adaptation::Status::kInsufficientInput;
+ }
+ // Don't adapt if we're awaiting a previous adaptation to have an effect.
+ if (awaiting_frame_size_change_ &&
+ awaiting_frame_size_change_->pixels_increased &&
+ degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE &&
+ input_state.frame_size_pixels().value() <=
+ awaiting_frame_size_change_->frame_size_pixels) {
+ return Adaptation::Status::kAwaitingPreviousAdaptation;
+ }
+
+ // Maybe propose targets based on degradation preference.
+ switch (degradation_preference_) {
+ case DegradationPreference::BALANCED: {
+ // Attempt to increase target frame rate.
+ RestrictionsOrState increase_frame_rate =
+ IncreaseFramerate(input_state, current_restrictions_);
+ if (absl::holds_alternative<RestrictionsWithCounters>(
+ increase_frame_rate)) {
+ return increase_frame_rate;
+ }
+ // else, increase resolution.
+ [[fallthrough]];
+ }
+ case DegradationPreference::MAINTAIN_FRAMERATE: {
+ // Attempt to increase pixel count.
+ return IncreaseResolution(input_state, current_restrictions_);
+ }
+ case DegradationPreference::MAINTAIN_RESOLUTION: {
+ // Scale up framerate.
+ return IncreaseFramerate(input_state, current_restrictions_);
+ }
+ case DegradationPreference::DISABLED:
+ return Adaptation::Status::kAdaptationDisabled;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+Adaptation VideoStreamAdapter::GetAdaptationDown() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoStreamInputState input_state = input_state_provider_->InputState();
+ ++adaptation_validation_id_;
+ RestrictionsOrState restrictions_or_state =
+ GetAdaptationDownStep(input_state, current_restrictions_);
+ if (MinPixelLimitReached(input_state)) {
+ encoder_stats_observer_->OnMinPixelLimitReached();
+ }
+ // Check for min_fps
+ if (degradation_preference_ == DegradationPreference::BALANCED &&
+ absl::holds_alternative<RestrictionsWithCounters>(
+ restrictions_or_state)) {
+ restrictions_or_state = AdaptIfFpsDiffInsufficient(
+ input_state,
+ absl::get<RestrictionsWithCounters>(restrictions_or_state));
+ }
+ return RestrictionsOrStateToAdaptation(restrictions_or_state, input_state);
+}
+
+VideoStreamAdapter::RestrictionsOrState
+VideoStreamAdapter::AdaptIfFpsDiffInsufficient(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& restrictions) const {
+ RTC_DCHECK_EQ(degradation_preference_, DegradationPreference::BALANCED);
+ int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
+ input_state.frame_size_pixels().value());
+ absl::optional<int> min_fps_diff =
+ balanced_settings_.MinFpsDiff(frame_size_pixels);
+ if (current_restrictions_.counters.fps_adaptations <
+ restrictions.counters.fps_adaptations &&
+ min_fps_diff && input_state.frames_per_second() > 0) {
+ int fps_diff = input_state.frames_per_second() -
+ restrictions.restrictions.max_frame_rate().value();
+ if (fps_diff < min_fps_diff.value()) {
+ return GetAdaptationDownStep(input_state, restrictions);
+ }
+ }
+ return restrictions;
+}
+
+VideoStreamAdapter::RestrictionsOrState
+VideoStreamAdapter::GetAdaptationDownStep(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) const {
+ if (!HasSufficientInputForAdaptation(input_state)) {
+ return Adaptation::Status::kInsufficientInput;
+ }
+ // Don't adapt if we're awaiting a previous adaptation to have an effect or
+ // if we switched degradation preference.
+ if (awaiting_frame_size_change_ &&
+ !awaiting_frame_size_change_->pixels_increased &&
+ degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE &&
+ input_state.frame_size_pixels().value() >=
+ awaiting_frame_size_change_->frame_size_pixels) {
+ return Adaptation::Status::kAwaitingPreviousAdaptation;
+ }
+ // Maybe propose targets based on degradation preference.
+ switch (degradation_preference_) {
+ case DegradationPreference::BALANCED: {
+ // Try scale down framerate, if lower.
+ RestrictionsOrState decrease_frame_rate =
+ DecreaseFramerate(input_state, current_restrictions);
+ if (absl::holds_alternative<RestrictionsWithCounters>(
+ decrease_frame_rate)) {
+ return decrease_frame_rate;
+ }
+ // else, decrease resolution.
+ [[fallthrough]];
+ }
+ case DegradationPreference::MAINTAIN_FRAMERATE: {
+ return DecreaseResolution(input_state, current_restrictions);
+ }
+ case DegradationPreference::MAINTAIN_RESOLUTION: {
+ return DecreaseFramerate(input_state, current_restrictions);
+ }
+ case DegradationPreference::DISABLED:
+ return Adaptation::Status::kAdaptationDisabled;
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::DecreaseResolution(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) {
+ int target_pixels =
+ GetLowerResolutionThan(input_state.frame_size_pixels().value());
+ // Use single active stream if set, this stream could be lower than the input.
+ int target_pixels_min =
+ GetLowerResolutionThan(input_state.single_active_stream_pixels().value_or(
+ input_state.frame_size_pixels().value()));
+ if (!CanDecreaseResolutionTo(target_pixels, target_pixels_min, input_state,
+ current_restrictions.restrictions)) {
+ return Adaptation::Status::kLimitReached;
+ }
+ RestrictionsWithCounters new_restrictions = current_restrictions;
+ RTC_LOG(LS_INFO) << "Scaling down resolution, max pixels: " << target_pixels;
+ new_restrictions.restrictions.set_max_pixels_per_frame(
+ target_pixels != std::numeric_limits<int>::max()
+ ? absl::optional<size_t>(target_pixels)
+ : absl::nullopt);
+ new_restrictions.restrictions.set_target_pixels_per_frame(absl::nullopt);
+ ++new_restrictions.counters.resolution_adaptations;
+ return new_restrictions;
+}
+
+VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::DecreaseFramerate(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) const {
+ int max_frame_rate;
+ if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) {
+ max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second());
+ } else if (degradation_preference_ == DegradationPreference::BALANCED) {
+ int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
+ input_state.frame_size_pixels().value());
+ max_frame_rate = balanced_settings_.MinFps(input_state.video_codec_type(),
+ frame_size_pixels);
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second());
+ }
+ if (!CanDecreaseFrameRateTo(max_frame_rate,
+ current_restrictions.restrictions)) {
+ return Adaptation::Status::kLimitReached;
+ }
+ RestrictionsWithCounters new_restrictions = current_restrictions;
+ max_frame_rate = std::max(kMinFrameRateFps, max_frame_rate);
+ RTC_LOG(LS_INFO) << "Scaling down framerate: " << max_frame_rate;
+ new_restrictions.restrictions.set_max_frame_rate(
+ max_frame_rate != std::numeric_limits<int>::max()
+ ? absl::optional<double>(max_frame_rate)
+ : absl::nullopt);
+ ++new_restrictions.counters.fps_adaptations;
+ return new_restrictions;
+}
+
+VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::IncreaseResolution(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) {
+ int target_pixels = input_state.frame_size_pixels().value();
+ if (current_restrictions.counters.resolution_adaptations == 1) {
+ RTC_LOG(LS_INFO) << "Removing resolution down-scaling setting.";
+ target_pixels = std::numeric_limits<int>::max();
+ }
+ target_pixels = GetHigherResolutionThan(target_pixels);
+ if (!CanIncreaseResolutionTo(target_pixels,
+ current_restrictions.restrictions)) {
+ return Adaptation::Status::kLimitReached;
+ }
+ int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels);
+ RestrictionsWithCounters new_restrictions = current_restrictions;
+ RTC_LOG(LS_INFO) << "Scaling up resolution, max pixels: "
+ << max_pixels_wanted;
+ new_restrictions.restrictions.set_max_pixels_per_frame(
+ max_pixels_wanted != std::numeric_limits<int>::max()
+ ? absl::optional<size_t>(max_pixels_wanted)
+ : absl::nullopt);
+ new_restrictions.restrictions.set_target_pixels_per_frame(
+ max_pixels_wanted != std::numeric_limits<int>::max()
+ ? absl::optional<size_t>(target_pixels)
+ : absl::nullopt);
+ --new_restrictions.counters.resolution_adaptations;
+ RTC_DCHECK_GE(new_restrictions.counters.resolution_adaptations, 0);
+ return new_restrictions;
+}
+
+VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::IncreaseFramerate(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) const {
+ int max_frame_rate;
+ if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) {
+ max_frame_rate = GetHigherFrameRateThan(input_state.frames_per_second());
+ } else if (degradation_preference_ == DegradationPreference::BALANCED) {
+ int frame_size_pixels = input_state.single_active_stream_pixels().value_or(
+ input_state.frame_size_pixels().value());
+ max_frame_rate = balanced_settings_.MaxFps(input_state.video_codec_type(),
+ frame_size_pixels);
+ // Temporary fix for cases when there are fewer framerate adaptation steps
+ // up than down. Make number of down/up steps equal.
+ if (max_frame_rate == std::numeric_limits<int>::max() &&
+ current_restrictions.counters.fps_adaptations > 1) {
+ // Do not unrestrict framerate to allow additional adaptation up steps.
+ RTC_LOG(LS_INFO) << "Modifying framerate due to remaining fps count.";
+ max_frame_rate -= current_restrictions.counters.fps_adaptations;
+ }
+ // In BALANCED, the max_frame_rate must be checked before proceeding. This
+ // is because the MaxFps might be the current Fps and so the balanced
+ // settings may want to scale up the resolution.
+ if (!CanIncreaseFrameRateTo(max_frame_rate,
+ current_restrictions.restrictions)) {
+ return Adaptation::Status::kLimitReached;
+ }
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ max_frame_rate = GetHigherFrameRateThan(input_state.frames_per_second());
+ }
+ if (current_restrictions.counters.fps_adaptations == 1) {
+ RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
+ max_frame_rate = std::numeric_limits<int>::max();
+ }
+ if (!CanIncreaseFrameRateTo(max_frame_rate,
+ current_restrictions.restrictions)) {
+ return Adaptation::Status::kLimitReached;
+ }
+ RTC_LOG(LS_INFO) << "Scaling up framerate: " << max_frame_rate;
+ RestrictionsWithCounters new_restrictions = current_restrictions;
+ new_restrictions.restrictions.set_max_frame_rate(
+ max_frame_rate != std::numeric_limits<int>::max()
+ ? absl::optional<double>(max_frame_rate)
+ : absl::nullopt);
+ --new_restrictions.counters.fps_adaptations;
+ RTC_DCHECK_GE(new_restrictions.counters.fps_adaptations, 0);
+ return new_restrictions;
+}
+
+Adaptation VideoStreamAdapter::GetAdaptDownResolution() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoStreamInputState input_state = input_state_provider_->InputState();
+ switch (degradation_preference_) {
+ case DegradationPreference::DISABLED:
+ return RestrictionsOrStateToAdaptation(
+ Adaptation::Status::kAdaptationDisabled, input_state);
+ case DegradationPreference::MAINTAIN_RESOLUTION:
+ return RestrictionsOrStateToAdaptation(Adaptation::Status::kLimitReached,
+ input_state);
+ case DegradationPreference::MAINTAIN_FRAMERATE:
+ return GetAdaptationDown();
+ case DegradationPreference::BALANCED: {
+ return RestrictionsOrStateToAdaptation(
+ GetAdaptDownResolutionStepForBalanced(input_state), input_state);
+ }
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+VideoStreamAdapter::RestrictionsOrState
+VideoStreamAdapter::GetAdaptDownResolutionStepForBalanced(
+ const VideoStreamInputState& input_state) const {
+ // Adapt twice if the first adaptation did not decrease resolution.
+ auto first_step = GetAdaptationDownStep(input_state, current_restrictions_);
+ if (!absl::holds_alternative<RestrictionsWithCounters>(first_step)) {
+ return first_step;
+ }
+ auto first_restrictions = absl::get<RestrictionsWithCounters>(first_step);
+ if (first_restrictions.counters.resolution_adaptations >
+ current_restrictions_.counters.resolution_adaptations) {
+ return first_step;
+ }
+ // We didn't decrease resolution so force it; amend a resolution resuction
+ // to the existing framerate reduction in `first_restrictions`.
+ auto second_step = DecreaseResolution(input_state, first_restrictions);
+ if (absl::holds_alternative<RestrictionsWithCounters>(second_step)) {
+ return second_step;
+ }
+ // If the second step was not successful then settle for the first one.
+ return first_step;
+}
+
+void VideoStreamAdapter::ApplyAdaptation(
+ const Adaptation& adaptation,
+ rtc::scoped_refptr<Resource> resource) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK_EQ(adaptation.validation_id_, adaptation_validation_id_);
+ if (adaptation.status() != Adaptation::Status::kValid)
+ return;
+ // Remember the input pixels and fps of this adaptation. Used to avoid
+ // adapting again before this adaptation has had an effect.
+ if (DidIncreaseResolution(current_restrictions_.restrictions,
+ adaptation.restrictions())) {
+ awaiting_frame_size_change_.emplace(
+ true, adaptation.input_state().frame_size_pixels().value());
+ } else if (DidDecreaseResolution(current_restrictions_.restrictions,
+ adaptation.restrictions())) {
+ awaiting_frame_size_change_.emplace(
+ false, adaptation.input_state().frame_size_pixels().value());
+ } else {
+ awaiting_frame_size_change_ = absl::nullopt;
+ }
+ current_restrictions_ = {adaptation.restrictions(), adaptation.counters()};
+ BroadcastVideoRestrictionsUpdate(adaptation.input_state(), resource);
+}
+
+Adaptation VideoStreamAdapter::GetAdaptationTo(
+ const VideoAdaptationCounters& counters,
+ const VideoSourceRestrictions& restrictions) {
+ // Adapts up/down from the current levels so counters are equal.
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoStreamInputState input_state = input_state_provider_->InputState();
+ return Adaptation(adaptation_validation_id_, restrictions, counters,
+ input_state);
+}
+
+void VideoStreamAdapter::BroadcastVideoRestrictionsUpdate(
+ const VideoStreamInputState& input_state,
+ const rtc::scoped_refptr<Resource>& resource) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ VideoSourceRestrictions filtered = FilterRestrictionsByDegradationPreference(
+ source_restrictions(), degradation_preference_);
+ if (last_filtered_restrictions_ == filtered) {
+ return;
+ }
+ for (auto* restrictions_listener : restrictions_listeners_) {
+ restrictions_listener->OnVideoSourceRestrictionsUpdated(
+ filtered, current_restrictions_.counters, resource,
+ source_restrictions());
+ }
+ last_video_source_restrictions_ = current_restrictions_.restrictions;
+ last_filtered_restrictions_ = filtered;
+}
+
+bool VideoStreamAdapter::HasSufficientInputForAdaptation(
+ const VideoStreamInputState& input_state) const {
+ return input_state.HasInputFrameSizeAndFramesPerSecond() &&
+ (degradation_preference_ !=
+ DegradationPreference::MAINTAIN_RESOLUTION ||
+ input_state.frames_per_second() >= kMinFrameRateFps);
+}
+
+VideoStreamAdapter::AwaitingFrameSizeChange::AwaitingFrameSizeChange(
+ bool pixels_increased,
+ int frame_size_pixels)
+ : pixels_increased(pixels_increased),
+ frame_size_pixels(frame_size_pixels) {}
+
+absl::optional<uint32_t> VideoStreamAdapter::GetSingleActiveLayerPixels(
+ const VideoCodec& codec) {
+ int num_active = 0;
+ absl::optional<uint32_t> pixels;
+ if (codec.codecType == VideoCodecType::kVideoCodecAV1 &&
+ codec.GetScalabilityMode().has_value()) {
+ for (int i = 0;
+ i < ScalabilityModeToNumSpatialLayers(*(codec.GetScalabilityMode()));
+ ++i) {
+ if (codec.spatialLayers[i].active) {
+ ++num_active;
+ pixels = codec.spatialLayers[i].width * codec.spatialLayers[i].height;
+ }
+ }
+ } else if (codec.codecType == VideoCodecType::kVideoCodecVP9) {
+ for (int i = 0; i < codec.VP9().numberOfSpatialLayers; ++i) {
+ if (codec.spatialLayers[i].active) {
+ ++num_active;
+ pixels = codec.spatialLayers[i].width * codec.spatialLayers[i].height;
+ }
+ }
+ } else {
+ for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
+ if (codec.simulcastStream[i].active) {
+ ++num_active;
+ pixels =
+ codec.simulcastStream[i].width * codec.simulcastStream[i].height;
+ }
+ }
+ }
+ return (num_active > 1) ? absl::nullopt : pixels;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_adapter.h b/third_party/libwebrtc/call/adaptation/video_stream_adapter.h
new file mode 100644
index 0000000000..5c174178e4
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_adapter.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
+#define CALL_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "api/adaptation/resource.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_parameters.h"
+#include "api/video/video_adaptation_counters.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/degradation_preference_provider.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state.h"
+#include "call/adaptation/video_stream_input_state_provider.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "rtc_base/experiments/balanced_degradation_settings.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+// The listener is responsible for carrying out the reconfiguration of the video
+// source such that the VideoSourceRestrictions are fulfilled.
+class VideoSourceRestrictionsListener {
+ public:
+ virtual ~VideoSourceRestrictionsListener();
+
+ // The `restrictions` are filtered by degradation preference but not the
+ // `adaptation_counters`, which are currently only reported for legacy stats
+ // calculation purposes.
+ virtual void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) = 0;
+};
+
+class VideoStreamAdapter;
+
+extern const int kMinFrameRateFps;
+
+VideoSourceRestrictions FilterRestrictionsByDegradationPreference(
+ VideoSourceRestrictions source_restrictions,
+ DegradationPreference degradation_preference);
+
+int GetLowerResolutionThan(int pixel_count);
+int GetHigherResolutionThan(int pixel_count);
+
+// Either represents the next VideoSourceRestrictions the VideoStreamAdapter
+// will take, or provides a Status code indicating the reason for not adapting
+// if the adaptation is not valid.
+class Adaptation final {
+ public:
+ enum class Status {
+ // Applying this adaptation will have an effect. All other Status codes
+ // indicate that adaptation is not possible and why.
+ kValid,
+ // Cannot adapt. The minimum or maximum adaptation has already been reached.
+ // There are no more steps to take.
+ kLimitReached,
+ // Cannot adapt. The resolution or frame rate requested by a recent
+ // adaptation has not yet been reflected in the input resolution or frame
+ // rate; adaptation is refused to avoid "double-adapting".
+ kAwaitingPreviousAdaptation,
+ // Not enough input.
+ kInsufficientInput,
+ // Adaptation disabled via degradation preference.
+ kAdaptationDisabled,
+ // Adaptation up was rejected by a VideoAdaptationConstraint.
+ kRejectedByConstraint,
+ };
+
+ static const char* StatusToString(Status status);
+
+ Status status() const;
+ const VideoStreamInputState& input_state() const;
+ const VideoSourceRestrictions& restrictions() const;
+ const VideoAdaptationCounters& counters() const;
+
+ private:
+ friend class VideoStreamAdapter;
+
+ // Constructs with a valid adaptation. Status is kValid.
+ Adaptation(int validation_id,
+ VideoSourceRestrictions restrictions,
+ VideoAdaptationCounters counters,
+ VideoStreamInputState input_state);
+ // Constructor when adaptation is not valid. Status MUST NOT be kValid.
+ Adaptation(int validation_id, Status invalid_status);
+
+ // An Adaptation can become invalidated if the state of VideoStreamAdapter is
+ // modified before the Adaptation is applied. To guard against this, this ID
+ // has to match VideoStreamAdapter::adaptation_validation_id_ when applied.
+ // TODO(https://crbug.com/webrtc/11700): Remove the validation_id_.
+ const int validation_id_;
+ const Status status_;
+ // Input state when adaptation was made.
+ const VideoStreamInputState input_state_;
+ const VideoSourceRestrictions restrictions_;
+ const VideoAdaptationCounters counters_;
+};
+
+// Owns the VideoSourceRestriction for a single stream and is responsible for
+// adapting it up or down when told to do so. This class serves the following
+// purposes:
+// 1. Keep track of a stream's restrictions.
+// 2. Provide valid ways to adapt up or down the stream's restrictions.
+// 3. Modify the stream's restrictions in one of the valid ways.
+class VideoStreamAdapter {
+ public:
+ VideoStreamAdapter(VideoStreamInputStateProvider* input_state_provider,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ const FieldTrialsView& field_trials);
+ ~VideoStreamAdapter();
+
+ VideoSourceRestrictions source_restrictions() const;
+ const VideoAdaptationCounters& adaptation_counters() const;
+ void ClearRestrictions();
+
+ void AddRestrictionsListener(
+ VideoSourceRestrictionsListener* restrictions_listener);
+ void RemoveRestrictionsListener(
+ VideoSourceRestrictionsListener* restrictions_listener);
+ void AddAdaptationConstraint(AdaptationConstraint* adaptation_constraint);
+ void RemoveAdaptationConstraint(AdaptationConstraint* adaptation_constraint);
+
+ // TODO(hbos): Setting the degradation preference should not clear
+ // restrictions! This is not defined in the spec and is unexpected, there is a
+ // tiny risk that people would discover and rely on this behavior.
+ void SetDegradationPreference(DegradationPreference degradation_preference);
+
+ // Returns an adaptation that we are guaranteed to be able to apply, or a
+ // status code indicating the reason why we cannot adapt.
+ Adaptation GetAdaptationUp();
+ Adaptation GetAdaptationDown();
+ Adaptation GetAdaptationTo(const VideoAdaptationCounters& counters,
+ const VideoSourceRestrictions& restrictions);
+ // Tries to adapt the resolution one step. This is used for initial frame
+ // dropping. Does nothing if the degradation preference is not BALANCED or
+ // MAINTAIN_FRAMERATE. In the case of BALANCED, it will try twice to reduce
+ // the resolution. If it fails twice it gives up.
+ Adaptation GetAdaptDownResolution();
+
+ // Updates source_restrictions() the Adaptation.
+ void ApplyAdaptation(const Adaptation& adaptation,
+ rtc::scoped_refptr<Resource> resource);
+
+ struct RestrictionsWithCounters {
+ VideoSourceRestrictions restrictions;
+ VideoAdaptationCounters counters;
+ };
+
+ static absl::optional<uint32_t> GetSingleActiveLayerPixels(
+ const VideoCodec& codec);
+
+ private:
+ void BroadcastVideoRestrictionsUpdate(
+ const VideoStreamInputState& input_state,
+ const rtc::scoped_refptr<Resource>& resource);
+
+ bool HasSufficientInputForAdaptation(const VideoStreamInputState& input_state)
+ const RTC_RUN_ON(&sequence_checker_);
+
+ using RestrictionsOrState =
+ absl::variant<RestrictionsWithCounters, Adaptation::Status>;
+ RestrictionsOrState GetAdaptationUpStep(
+ const VideoStreamInputState& input_state) const
+ RTC_RUN_ON(&sequence_checker_);
+ RestrictionsOrState GetAdaptationDownStep(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) const
+ RTC_RUN_ON(&sequence_checker_);
+ RestrictionsOrState GetAdaptDownResolutionStepForBalanced(
+ const VideoStreamInputState& input_state) const
+ RTC_RUN_ON(&sequence_checker_);
+ RestrictionsOrState AdaptIfFpsDiffInsufficient(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& restrictions) const
+ RTC_RUN_ON(&sequence_checker_);
+
+ Adaptation GetAdaptationUp(const VideoStreamInputState& input_state) const
+ RTC_RUN_ON(&sequence_checker_);
+ Adaptation GetAdaptationDown(const VideoStreamInputState& input_state) const
+ RTC_RUN_ON(&sequence_checker_);
+
+ static RestrictionsOrState DecreaseResolution(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions);
+ static RestrictionsOrState IncreaseResolution(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions);
+ // Framerate methods are member functions because they need internal state
+ // if the degradation preference is BALANCED.
+ RestrictionsOrState DecreaseFramerate(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) const
+ RTC_RUN_ON(&sequence_checker_);
+ RestrictionsOrState IncreaseFramerate(
+ const VideoStreamInputState& input_state,
+ const RestrictionsWithCounters& current_restrictions) const
+ RTC_RUN_ON(&sequence_checker_);
+
+ struct RestrictionsOrStateVisitor;
+ Adaptation RestrictionsOrStateToAdaptation(
+ RestrictionsOrState step_or_state,
+ const VideoStreamInputState& input_state) const
+ RTC_RUN_ON(&sequence_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_
+ RTC_GUARDED_BY(&sequence_checker_);
+ // Gets the input state which is the basis of all adaptations.
+ // Thread safe.
+ VideoStreamInputStateProvider* input_state_provider_;
+ // Used to signal when min pixel limit has been reached.
+ VideoStreamEncoderObserver* const encoder_stats_observer_;
+ // Decides the next adaptation target in DegradationPreference::BALANCED.
+ const BalancedDegradationSettings balanced_settings_;
+ // To guard against applying adaptations that have become invalidated, an
+ // Adaptation that is applied has to have a matching validation ID.
+ int adaptation_validation_id_ RTC_GUARDED_BY(&sequence_checker_);
+ // When deciding the next target up or down, different strategies are used
+ // depending on the DegradationPreference.
+ // https://w3c.github.io/mst-content-hint/#dom-rtcdegradationpreference
+ DegradationPreference degradation_preference_
+ RTC_GUARDED_BY(&sequence_checker_);
+ // Used to avoid adapting twice. Stores the resolution at the time of the last
+ // adaptation.
+ // TODO(hbos): Can we implement a more general "cooldown" mechanism of
+ // resources intead? If we already have adapted it seems like we should wait
+ // a while before adapting again, so that we are not acting on usage
+ // measurements that are made obsolete/unreliable by an "ongoing" adaptation.
+ struct AwaitingFrameSizeChange {
+ AwaitingFrameSizeChange(bool pixels_increased, int frame_size);
+ const bool pixels_increased;
+ const int frame_size_pixels;
+ };
+ absl::optional<AwaitingFrameSizeChange> awaiting_frame_size_change_
+ RTC_GUARDED_BY(&sequence_checker_);
+ // The previous restrictions value. Starts as unrestricted.
+ VideoSourceRestrictions last_video_source_restrictions_
+ RTC_GUARDED_BY(&sequence_checker_);
+ VideoSourceRestrictions last_filtered_restrictions_
+ RTC_GUARDED_BY(&sequence_checker_);
+
+ std::vector<VideoSourceRestrictionsListener*> restrictions_listeners_
+ RTC_GUARDED_BY(&sequence_checker_);
+ std::vector<AdaptationConstraint*> adaptation_constraints_
+ RTC_GUARDED_BY(&sequence_checker_);
+
+ RestrictionsWithCounters current_restrictions_
+ RTC_GUARDED_BY(&sequence_checker_);
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_adapter_unittest.cc b/third_party/libwebrtc/call/adaptation/video_stream_adapter_unittest.cc
new file mode 100644
index 0000000000..d4bc650856
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_adapter_unittest.cc
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/video_stream_adapter.h"
+
+#include <string>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_adaptation_reason.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/test/fake_frame_rate_provider.h"
+#include "call/adaptation/test/fake_resource.h"
+#include "call/adaptation/test/fake_video_stream_input_state_provider.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "call/adaptation/video_stream_input_state.h"
+#include "rtc_base/string_encode.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+#include "test/testsupport/rtc_expect_death.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::Return;
+using ::testing::SaveArg;
+
+namespace {
+
+const int kBalancedHighResolutionPixels = 1280 * 720;
+const int kBalancedHighFrameRateFps = 30;
+
+const int kBalancedMediumResolutionPixels = 640 * 480;
+const int kBalancedMediumFrameRateFps = 20;
+
+const int kBalancedLowResolutionPixels = 320 * 240;
+const int kBalancedLowFrameRateFps = 10;
+
+std::string BalancedFieldTrialConfig() {
+ return "WebRTC-Video-BalancedDegradationSettings/pixels:" +
+ rtc::ToString(kBalancedLowResolutionPixels) + "|" +
+ rtc::ToString(kBalancedMediumResolutionPixels) + "|" +
+ rtc::ToString(kBalancedHighResolutionPixels) +
+ ",fps:" + rtc::ToString(kBalancedLowFrameRateFps) + "|" +
+ rtc::ToString(kBalancedMediumFrameRateFps) + "|" +
+ rtc::ToString(kBalancedHighFrameRateFps) + "/";
+}
+
+// Responsible for adjusting the inputs to VideoStreamAdapter (SetInput), such
+// as pixels and frame rate, according to the most recent source restrictions.
+// This helps tests that apply adaptations multiple times: if the input is not
+// adjusted between adaptations, the subsequent adaptations fail with
+// kAwaitingPreviousAdaptation.
+class FakeVideoStream {
+ public:
+ FakeVideoStream(VideoStreamAdapter* adapter,
+ FakeVideoStreamInputStateProvider* provider,
+ int input_pixels,
+ int input_fps,
+ int min_pixels_per_frame)
+ : adapter_(adapter),
+ provider_(provider),
+ input_pixels_(input_pixels),
+ input_fps_(input_fps),
+ min_pixels_per_frame_(min_pixels_per_frame) {
+ provider_->SetInputState(input_pixels_, input_fps_, min_pixels_per_frame_);
+ }
+
+ int input_pixels() const { return input_pixels_; }
+ int input_fps() const { return input_fps_; }
+
+ // Performs ApplyAdaptation() followed by SetInput() with input pixels and
+ // frame rate adjusted according to the resulting restrictions.
+ void ApplyAdaptation(Adaptation adaptation) {
+ adapter_->ApplyAdaptation(adaptation, nullptr);
+ // Update input pixels and fps according to the resulting restrictions.
+ auto restrictions = adapter_->source_restrictions();
+ if (restrictions.target_pixels_per_frame().has_value()) {
+ RTC_DCHECK(!restrictions.max_pixels_per_frame().has_value() ||
+ restrictions.max_pixels_per_frame().value() >=
+ restrictions.target_pixels_per_frame().value());
+ input_pixels_ = restrictions.target_pixels_per_frame().value();
+ } else if (restrictions.max_pixels_per_frame().has_value()) {
+ input_pixels_ = restrictions.max_pixels_per_frame().value();
+ }
+ if (restrictions.max_frame_rate().has_value()) {
+ input_fps_ = restrictions.max_frame_rate().value();
+ }
+ provider_->SetInputState(input_pixels_, input_fps_, min_pixels_per_frame_);
+ }
+
+ private:
+ VideoStreamAdapter* adapter_;
+ FakeVideoStreamInputStateProvider* provider_;
+ int input_pixels_;
+ int input_fps_;
+ int min_pixels_per_frame_;
+};
+
+class FakeVideoStreamAdapterListner : public VideoSourceRestrictionsListener {
+ public:
+ void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions,
+ const VideoAdaptationCounters& adaptation_counters,
+ rtc::scoped_refptr<Resource> reason,
+ const VideoSourceRestrictions& unfiltered_restrictions) override {
+ calls_++;
+ last_restrictions_ = unfiltered_restrictions;
+ }
+
+ int calls() const { return calls_; }
+
+ VideoSourceRestrictions last_restrictions() const {
+ return last_restrictions_;
+ }
+
+ private:
+ int calls_ = 0;
+ VideoSourceRestrictions last_restrictions_;
+};
+
+class MockAdaptationConstraint : public AdaptationConstraint {
+ public:
+ MOCK_METHOD(bool,
+ IsAdaptationUpAllowed,
+ (const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after),
+ (const, override));
+
+ // MOCK_METHOD(std::string, Name, (), (const, override));
+ std::string Name() const override { return "MockAdaptationConstraint"; }
+};
+
+} // namespace
+
+class VideoStreamAdapterTest : public ::testing::Test {
+ public:
+ VideoStreamAdapterTest()
+ : field_trials_(BalancedFieldTrialConfig()),
+ resource_(FakeResource::Create("FakeResource")),
+ adapter_(&input_state_provider_,
+ &encoder_stats_observer_,
+ field_trials_) {}
+
+ protected:
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ FakeVideoStreamInputStateProvider input_state_provider_;
+ rtc::scoped_refptr<Resource> resource_;
+ testing::StrictMock<MockVideoStreamEncoderObserver> encoder_stats_observer_;
+ VideoStreamAdapter adapter_;
+};
+
+TEST_F(VideoStreamAdapterTest, NoRestrictionsByDefault) {
+ EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(0, adapter_.adaptation_counters().Total());
+}
+
+TEST_F(VideoStreamAdapterTest, MaintainFramerate_DecreasesPixelsToThreeFifths) {
+ const int kInputPixels = 1280 * 720;
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ input_state_provider_.SetInputState(kInputPixels, 30,
+ kDefaultMinPixelsPerFrame);
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ adapter_.ApplyAdaptation(adaptation, nullptr);
+ EXPECT_EQ(static_cast<size_t>((kInputPixels * 3) / 5),
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt, adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest,
+ MaintainFramerate_DecreasesPixelsToLimitReached) {
+ const int kMinPixelsPerFrame = 640 * 480;
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ input_state_provider_.SetInputState(kMinPixelsPerFrame + 1, 30,
+ kMinPixelsPerFrame);
+ EXPECT_CALL(encoder_stats_observer_, OnMinPixelLimitReached());
+ // Even though we are above kMinPixelsPerFrame, because adapting down would
+ // have exceeded the limit, we are said to have reached the limit already.
+ // This differs from the frame rate adaptation logic, which would have clamped
+ // to the limit in the first step and reported kLimitReached in the second
+ // step.
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kLimitReached, adaptation.status());
+}
+
+TEST_F(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToFiveThirds) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Go down twice, ensuring going back up is still a restricted resolution.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations);
+ int input_pixels = fake_stream.input_pixels();
+ // Go up once. The target is 5/3 and the max is 12/5 of the target.
+ const int target = (input_pixels * 5) / 3;
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp());
+ EXPECT_EQ(static_cast<size_t>((target * 12) / 5),
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(static_cast<size_t>(target),
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt, adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToUnrestricted) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // We are unrestricted by default and should not be able to adapt up.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter_.GetAdaptationUp().status());
+ // If we go down once and then back up we should not have any restrictions.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp());
+ EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(0, adapter_.adaptation_counters().Total());
+}
+
+TEST_F(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToTwoThirds) {
+ const int kInputFps = 30;
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ input_state_provider_.SetInputState(1280 * 720, kInputFps,
+ kDefaultMinPixelsPerFrame);
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ adapter_.ApplyAdaptation(adaptation, nullptr);
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>((kInputFps * 2) / 3),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToLimitReached) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720,
+ kMinFrameRateFps + 1, kDefaultMinPixelsPerFrame);
+ // If we are not yet at the limit and the next step would exceed it, the step
+ // is clamped such that we end up exactly on the limit.
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(static_cast<double>(kMinFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ // Having reached the limit, the next adaptation down is not valid.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter_.GetAdaptationDown().status());
+}
+
+TEST_F(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToThreeHalves) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Go down twice, ensuring going back up is still a restricted frame rate.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(2, adapter_.adaptation_counters().fps_adaptations);
+ int input_fps = fake_stream.input_fps();
+ // Go up once. The target is 3/2 of the input.
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>((input_fps * 3) / 2),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToUnrestricted) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // We are unrestricted by default and should not be able to adapt up.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter_.GetAdaptationUp().status());
+ // If we go down once and then back up we should not have any restrictions.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp());
+ EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(0, adapter_.adaptation_counters().Total());
+}
+
+TEST_F(VideoStreamAdapterTest, Balanced_DecreaseFrameRate) {
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ input_state_provider_.SetInputState(kBalancedMediumResolutionPixels,
+ kBalancedHighFrameRateFps,
+ kDefaultMinPixelsPerFrame);
+ // If our frame rate is higher than the frame rate associated with our
+ // resolution we should try to adapt to the frame rate associated with our
+ // resolution: kBalancedMediumFrameRateFps.
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ adapter_.ApplyAdaptation(adaptation, nullptr);
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedMediumFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest, Balanced_DecreaseResolution) {
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ FakeVideoStream fake_stream(
+ &adapter_, &input_state_provider_, kBalancedHighResolutionPixels,
+ kBalancedHighFrameRateFps, kDefaultMinPixelsPerFrame);
+ // If we are not below the current resolution's frame rate limit, we should
+ // adapt resolution according to "maintain-framerate" logic (three fifths).
+ //
+ // However, since we are unlimited at the start and input frame rate is not
+ // below kBalancedHighFrameRateFps, we first restrict the frame rate to
+ // kBalancedHighFrameRateFps even though that is our current frame rate. This
+ // does prevent the source from going higher, though, so it's technically not
+ // a NO-OP.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ }
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ // Verify "maintain-framerate" logic the second time we adapt: Frame rate
+ // restrictions remains the same and resolution goes down.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ }
+ constexpr size_t kReducedPixelsFirstStep =
+ static_cast<size_t>((kBalancedHighResolutionPixels * 3) / 5);
+ EXPECT_EQ(kReducedPixelsFirstStep,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ // If we adapt again, because the balanced settings' proposed frame rate is
+ // still kBalancedHighFrameRateFps, "maintain-framerate" will trigger again.
+ static_assert(kReducedPixelsFirstStep > kBalancedMediumResolutionPixels,
+ "The reduced resolution is still greater than the next lower "
+ "balanced setting resolution");
+ constexpr size_t kReducedPixelsSecondStep = (kReducedPixelsFirstStep * 3) / 5;
+ {
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ }
+ EXPECT_EQ(kReducedPixelsSecondStep,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+}
+
+// Testing when to adapt frame rate and when to adapt resolution is quite
+// entangled, so this test covers both cases.
+//
+// There is an asymmetry: When we adapt down we do it in one order, but when we
+// adapt up we don't do it in the reverse order. Instead we always try to adapt
+// frame rate first according to balanced settings' configs and only when the
+// frame rate is already achieved do we adjust the resolution.
+TEST_F(VideoStreamAdapterTest, Balanced_IncreaseFrameRateAndResolution) {
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ FakeVideoStream fake_stream(
+ &adapter_, &input_state_provider_, kBalancedHighResolutionPixels,
+ kBalancedHighFrameRateFps, kDefaultMinPixelsPerFrame);
+ // The desired starting point of this test is having adapted frame rate twice.
+ // This requires performing a number of adaptations.
+ constexpr size_t kReducedPixelsFirstStep =
+ static_cast<size_t>((kBalancedHighResolutionPixels * 3) / 5);
+ constexpr size_t kReducedPixelsSecondStep = (kReducedPixelsFirstStep * 3) / 5;
+ constexpr size_t kReducedPixelsThirdStep = (kReducedPixelsSecondStep * 3) / 5;
+ static_assert(kReducedPixelsFirstStep > kBalancedMediumResolutionPixels,
+ "The first pixel reduction is greater than the balanced "
+ "settings' medium pixel configuration");
+ static_assert(kReducedPixelsSecondStep > kBalancedMediumResolutionPixels,
+ "The second pixel reduction is greater than the balanced "
+ "settings' medium pixel configuration");
+ static_assert(kReducedPixelsThirdStep <= kBalancedMediumResolutionPixels,
+ "The third pixel reduction is NOT greater than the balanced "
+ "settings' medium pixel configuration");
+ // The first adaptation should affect the frame rate: See
+ // Balanced_DecreaseResolution for explanation why.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ // The next three adaptations affects the resolution, because we have to reach
+ // kBalancedMediumResolutionPixels before a lower frame rate is considered by
+ // BalancedDegradationSettings. The number three is derived from the
+ // static_asserts above.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(kReducedPixelsFirstStep,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(kReducedPixelsSecondStep,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(kReducedPixelsThirdStep,
+ adapter_.source_restrictions().max_pixels_per_frame());
+ // Thus, the next adaptation will reduce frame rate to
+ // kBalancedMediumFrameRateFps.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(static_cast<double>(kBalancedMediumFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(3, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(2, adapter_.adaptation_counters().fps_adaptations);
+ // Adapt up!
+ // While our resolution is in the medium-range, the frame rate associated with
+ // the next resolution configuration up ("high") is kBalancedHighFrameRateFps
+ // and "balanced" prefers adapting frame rate if not already applied.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(3, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ }
+ // Now that we have already achieved the next frame rate up, we act according
+ // to "maintain-framerate". We go back up in resolution. Due to rounding
+ // errors we don't end up back at kReducedPixelsSecondStep. Rather we get to
+ // kReducedPixelsSecondStepUp, which is off by one compared to
+ // kReducedPixelsSecondStep.
+ constexpr size_t kReducedPixelsSecondStepUp =
+ (kReducedPixelsThirdStep * 5) / 3;
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(kReducedPixelsSecondStepUp,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ }
+ // Now that our resolution is back in the high-range, the next frame rate to
+ // try out is "unlimited".
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(absl::nullopt, adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations);
+ }
+ // Now only adapting resolution remains.
+ constexpr size_t kReducedPixelsFirstStepUp =
+ (kReducedPixelsSecondStepUp * 5) / 3;
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(kReducedPixelsFirstStepUp,
+ adapter_.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations);
+ }
+ // The last step up should make us entirely unrestricted.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(0, adapter_.adaptation_counters().Total());
+ }
+}
+
+TEST_F(VideoStreamAdapterTest, Balanced_LimitReached) {
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ FakeVideoStream fake_stream(
+ &adapter_, &input_state_provider_, kBalancedLowResolutionPixels,
+ kBalancedLowFrameRateFps, kDefaultMinPixelsPerFrame);
+ // Attempting to adapt up while unrestricted should result in kLimitReached.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter_.GetAdaptationUp().status());
+ // Adapting down once result in restricted frame rate, in this case we reach
+ // the lowest possible frame rate immediately: kBalancedLowFrameRateFps.
+ EXPECT_CALL(encoder_stats_observer_, OnMinPixelLimitReached()).Times(2);
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(static_cast<double>(kBalancedLowFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ // Any further adaptation must follow "maintain-framerate" rules (these are
+ // covered in more depth by the MaintainFramerate tests). This test does not
+ // assert exactly how resolution is adjusted, only that resolution always
+ // decreases and that we eventually reach kLimitReached.
+ size_t previous_resolution = kBalancedLowResolutionPixels;
+ bool did_reach_limit = false;
+ // If we have not reached the limit within 5 adaptations something is wrong...
+ for (int i = 0; i < 5; i++) {
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ if (adaptation.status() == Adaptation::Status::kLimitReached) {
+ did_reach_limit = true;
+ break;
+ }
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_LT(adapter_.source_restrictions().max_pixels_per_frame().value(),
+ previous_resolution);
+ previous_resolution =
+ adapter_.source_restrictions().max_pixels_per_frame().value();
+ }
+ EXPECT_TRUE(did_reach_limit);
+ // Frame rate restrictions are the same as before.
+ EXPECT_EQ(static_cast<double>(kBalancedLowFrameRateFps),
+ adapter_.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+}
+
+// kAwaitingPreviousAdaptation is only supported in "maintain-framerate".
+TEST_F(VideoStreamAdapterTest,
+ MaintainFramerate_AwaitingPreviousAdaptationDown) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt down once, but don't update the input.
+ adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr);
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+ {
+ // Having performed the adaptation, but not updated the input based on the
+ // new restrictions, adapting again in the same direction will not work.
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation,
+ adaptation.status());
+ }
+}
+
+// kAwaitingPreviousAdaptation is only supported in "maintain-framerate".
+TEST_F(VideoStreamAdapterTest, MaintainFramerate_AwaitingPreviousAdaptationUp) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Perform two adaptation down so that adapting up twice is possible.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations);
+ // Adapt up once, but don't update the input.
+ adapter_.ApplyAdaptation(adapter_.GetAdaptationUp(), nullptr);
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+ {
+ // Having performed the adaptation, but not updated the input based on the
+ // new restrictions, adapting again in the same direction will not work.
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation,
+ adaptation.status());
+ }
+}
+
+TEST_F(VideoStreamAdapterTest,
+ MaintainResolution_AdaptsUpAfterSwitchingDegradationPreference) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt down in fps for later.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations);
+
+ // We should be able to adapt in framerate one last time after the change of
+ // degradation preference.
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp());
+ EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest,
+ MaintainFramerate_AdaptsUpAfterSwitchingDegradationPreference) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt down in resolution for later.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations);
+
+ // We should be able to adapt in framerate one last time after the change of
+ // degradation preference.
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp());
+ EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest,
+ PendingResolutionIncreaseAllowsAdaptUpAfterSwitchToMaintainResolution) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt fps down so we can adapt up later in the test.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ // Apply adaptation up but don't update input.
+ adapter_.ApplyAdaptation(adapter_.GetAdaptationUp(), nullptr);
+ EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation,
+ adapter_.GetAdaptationUp().status());
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+}
+
+TEST_F(VideoStreamAdapterTest,
+ MaintainFramerate_AdaptsDownAfterSwitchingDegradationPreference) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt down once, should change FPS.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ // Adaptation down should apply after the degradation prefs change.
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest,
+ MaintainResolution_AdaptsDownAfterSwitchingDegradationPreference) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt down once, should change FPS.
+ fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown());
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+
+ EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations);
+ EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations);
+}
+
+TEST_F(
+ VideoStreamAdapterTest,
+ PendingResolutionDecreaseAllowsAdaptDownAfterSwitchToMaintainResolution) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Apply adaptation but don't update the input.
+ adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr);
+ EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation,
+ adapter_.GetAdaptationDown().status());
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+}
+
+TEST_F(VideoStreamAdapterTest, RestrictionBroadcasted) {
+ FakeVideoStreamAdapterListner listener;
+ adapter_.AddRestrictionsListener(&listener);
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Not broadcast on invalid ApplyAdaptation.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ adapter_.ApplyAdaptation(adaptation, nullptr);
+ EXPECT_EQ(0, listener.calls());
+ }
+
+ // Broadcast on ApplyAdaptation.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(1, listener.calls());
+ EXPECT_EQ(adaptation.restrictions(), listener.last_restrictions());
+ }
+
+ // Broadcast on ClearRestrictions().
+ adapter_.ClearRestrictions();
+ EXPECT_EQ(2, listener.calls());
+ EXPECT_EQ(VideoSourceRestrictions(), listener.last_restrictions());
+}
+
+TEST_F(VideoStreamAdapterTest, AdaptationHasNextRestrcitions) {
+ // Any non-disabled DegradationPreference will do.
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // When adaptation is not possible.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kLimitReached, adaptation.status());
+ EXPECT_EQ(adaptation.restrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(0, adaptation.counters().Total());
+ }
+ // When we adapt down.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(adaptation.restrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(adaptation.counters(), adapter_.adaptation_counters());
+ }
+ // When we adapt up.
+ {
+ Adaptation adaptation = adapter_.GetAdaptationUp();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(adaptation.restrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(adaptation.counters(), adapter_.adaptation_counters());
+ }
+}
+
+TEST_F(VideoStreamAdapterTest,
+ SetDegradationPreferenceToOrFromBalancedClearsRestrictions) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr);
+ EXPECT_NE(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_NE(0, adapter_.adaptation_counters().Total());
+ // Changing from non-balanced to balanced clears the restrictions.
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(0, adapter_.adaptation_counters().Total());
+ // Apply adaptation again.
+ adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr);
+ EXPECT_NE(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_NE(0, adapter_.adaptation_counters().Total());
+ // Changing from balanced to non-balanced clears the restrictions.
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions());
+ EXPECT_EQ(0, adapter_.adaptation_counters().Total());
+}
+
+TEST_F(VideoStreamAdapterTest,
+ GetAdaptDownResolutionAdaptsResolutionInMaintainFramerate) {
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+
+ auto adaptation = adapter_.GetAdaptDownResolution();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ EXPECT_EQ(1, adaptation.counters().resolution_adaptations);
+ EXPECT_EQ(0, adaptation.counters().fps_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest,
+ GetAdaptDownResolutionReturnsWithStatusInDisabledAndMaintainResolution) {
+ adapter_.SetDegradationPreference(DegradationPreference::DISABLED);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ EXPECT_EQ(Adaptation::Status::kAdaptationDisabled,
+ adapter_.GetAdaptDownResolution().status());
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter_.GetAdaptDownResolution().status());
+}
+
+TEST_F(VideoStreamAdapterTest,
+ GetAdaptDownResolutionAdaptsFpsAndResolutionInBalanced) {
+ // Note: This test depends on BALANCED implementation, but with current
+ // implementation and input state settings, BALANCED will adapt resolution and
+ // frame rate once.
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+
+ auto adaptation = adapter_.GetAdaptDownResolution();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ EXPECT_EQ(1, adaptation.counters().resolution_adaptations);
+ EXPECT_EQ(1, adaptation.counters().fps_adaptations);
+}
+
+TEST_F(
+ VideoStreamAdapterTest,
+ GetAdaptDownResolutionAdaptsOnlyResolutionIfFpsAlreadyAdapterInBalanced) {
+ // Note: This test depends on BALANCED implementation, but with current
+ // implementation and input state settings, BALANCED will adapt resolution
+ // only.
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ input_state_provider_.SetInputState(1280 * 720, 5, kDefaultMinPixelsPerFrame);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+
+ auto first_adaptation = adapter_.GetAdaptationDown();
+ fake_stream.ApplyAdaptation(first_adaptation);
+
+ auto adaptation = adapter_.GetAdaptDownResolution();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ EXPECT_EQ(1, adaptation.counters().resolution_adaptations);
+ EXPECT_EQ(first_adaptation.counters().fps_adaptations,
+ adaptation.counters().fps_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest,
+ GetAdaptDownResolutionAdaptsOnlyFpsIfResolutionLowInBalanced) {
+ // Note: This test depends on BALANCED implementation, but with current
+ // implementation and input state settings, BALANCED will adapt resolution
+ // only.
+ adapter_.SetDegradationPreference(DegradationPreference::BALANCED);
+ input_state_provider_.SetInputState(kDefaultMinPixelsPerFrame, 30,
+ kDefaultMinPixelsPerFrame);
+
+ auto adaptation = adapter_.GetAdaptDownResolution();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ EXPECT_EQ(0, adaptation.counters().resolution_adaptations);
+ EXPECT_EQ(1, adaptation.counters().fps_adaptations);
+}
+
+TEST_F(VideoStreamAdapterTest,
+ AdaptationDisabledStatusAlwaysWhenDegradationPreferenceDisabled) {
+ adapter_.SetDegradationPreference(DegradationPreference::DISABLED);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ EXPECT_EQ(Adaptation::Status::kAdaptationDisabled,
+ adapter_.GetAdaptationDown().status());
+ EXPECT_EQ(Adaptation::Status::kAdaptationDisabled,
+ adapter_.GetAdaptationUp().status());
+ EXPECT_EQ(Adaptation::Status::kAdaptationDisabled,
+ adapter_.GetAdaptDownResolution().status());
+}
+
+TEST_F(VideoStreamAdapterTest, AdaptationConstraintAllowsAdaptationsUp) {
+ testing::StrictMock<MockAdaptationConstraint> adaptation_constraint;
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ adapter_.AddAdaptationConstraint(&adaptation_constraint);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt down once so we can adapt up later.
+ auto first_adaptation = adapter_.GetAdaptationDown();
+ fake_stream.ApplyAdaptation(first_adaptation);
+
+ EXPECT_CALL(adaptation_constraint,
+ IsAdaptationUpAllowed(_, first_adaptation.restrictions(), _))
+ .WillOnce(Return(true));
+ EXPECT_EQ(Adaptation::Status::kValid, adapter_.GetAdaptationUp().status());
+ adapter_.RemoveAdaptationConstraint(&adaptation_constraint);
+}
+
+TEST_F(VideoStreamAdapterTest, AdaptationConstraintDisallowsAdaptationsUp) {
+ testing::StrictMock<MockAdaptationConstraint> adaptation_constraint;
+ adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ adapter_.AddAdaptationConstraint(&adaptation_constraint);
+ input_state_provider_.SetInputState(1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30,
+ kDefaultMinPixelsPerFrame);
+ // Adapt down once so we can adapt up later.
+ auto first_adaptation = adapter_.GetAdaptationDown();
+ fake_stream.ApplyAdaptation(first_adaptation);
+
+ EXPECT_CALL(adaptation_constraint,
+ IsAdaptationUpAllowed(_, first_adaptation.restrictions(), _))
+ .WillOnce(Return(false));
+ EXPECT_EQ(Adaptation::Status::kRejectedByConstraint,
+ adapter_.GetAdaptationUp().status());
+ adapter_.RemoveAdaptationConstraint(&adaptation_constraint);
+}
+
+// Death tests.
+// Disabled on Android because death tests misbehave on Android, see
+// base/test/gtest_util.h.
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(VideoStreamAdapterDeathTest,
+ SetDegradationPreferenceInvalidatesAdaptations) {
+ webrtc::test::ScopedKeyValueConfig field_trials;
+ FakeVideoStreamInputStateProvider input_state_provider;
+ testing::StrictMock<MockVideoStreamEncoderObserver> encoder_stats_observer_;
+ VideoStreamAdapter adapter(&input_state_provider, &encoder_stats_observer_,
+ field_trials);
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ input_state_provider.SetInputState(1280 * 720, 30, kDefaultMinPixelsPerFrame);
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ EXPECT_DEATH(adapter.ApplyAdaptation(adaptation, nullptr), "");
+}
+
+TEST(VideoStreamAdapterDeathTest, AdaptDownInvalidatesAdaptations) {
+ webrtc::test::ScopedKeyValueConfig field_trials;
+ FakeVideoStreamInputStateProvider input_state_provider;
+ testing::StrictMock<MockVideoStreamEncoderObserver> encoder_stats_observer_;
+ VideoStreamAdapter adapter(&input_state_provider, &encoder_stats_observer_,
+ field_trials);
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ input_state_provider.SetInputState(1280 * 720, 30, kDefaultMinPixelsPerFrame);
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ adapter.GetAdaptationDown();
+ EXPECT_DEATH(adapter.ApplyAdaptation(adaptation, nullptr), "");
+}
+
+#endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_input_state.cc b/third_party/libwebrtc/call/adaptation/video_stream_input_state.cc
new file mode 100644
index 0000000000..9c0d475902
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_input_state.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/video_stream_input_state.h"
+
+#include "api/video_codecs/video_encoder.h"
+
+namespace webrtc {
+
+VideoStreamInputState::VideoStreamInputState()
+ : has_input_(false),
+ frame_size_pixels_(absl::nullopt),
+ frames_per_second_(0),
+ video_codec_type_(VideoCodecType::kVideoCodecGeneric),
+ min_pixels_per_frame_(kDefaultMinPixelsPerFrame),
+ single_active_stream_pixels_(absl::nullopt) {}
+
+void VideoStreamInputState::set_has_input(bool has_input) {
+ has_input_ = has_input;
+}
+
+void VideoStreamInputState::set_frame_size_pixels(
+ absl::optional<int> frame_size_pixels) {
+ frame_size_pixels_ = frame_size_pixels;
+}
+
+void VideoStreamInputState::set_frames_per_second(int frames_per_second) {
+ frames_per_second_ = frames_per_second;
+}
+
+void VideoStreamInputState::set_video_codec_type(
+ VideoCodecType video_codec_type) {
+ video_codec_type_ = video_codec_type;
+}
+
+void VideoStreamInputState::set_min_pixels_per_frame(int min_pixels_per_frame) {
+ min_pixels_per_frame_ = min_pixels_per_frame;
+}
+
+void VideoStreamInputState::set_single_active_stream_pixels(
+ absl::optional<int> single_active_stream_pixels) {
+ single_active_stream_pixels_ = single_active_stream_pixels;
+}
+
+bool VideoStreamInputState::has_input() const {
+ return has_input_;
+}
+
+absl::optional<int> VideoStreamInputState::frame_size_pixels() const {
+ return frame_size_pixels_;
+}
+
+int VideoStreamInputState::frames_per_second() const {
+ return frames_per_second_;
+}
+
+VideoCodecType VideoStreamInputState::video_codec_type() const {
+ return video_codec_type_;
+}
+
+int VideoStreamInputState::min_pixels_per_frame() const {
+ return min_pixels_per_frame_;
+}
+
+absl::optional<int> VideoStreamInputState::single_active_stream_pixels() const {
+ return single_active_stream_pixels_;
+}
+
+bool VideoStreamInputState::HasInputFrameSizeAndFramesPerSecond() const {
+ return has_input_ && frame_size_pixels_.has_value();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_input_state.h b/third_party/libwebrtc/call/adaptation/video_stream_input_state.h
new file mode 100644
index 0000000000..191e22386a
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_input_state.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_H_
+#define CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_H_
+
+#include "absl/types/optional.h"
+#include "api/video/video_codec_type.h"
+
+namespace webrtc {
+
+// The source resolution, frame rate and other properties of a
+// VideoStreamEncoder.
+class VideoStreamInputState {
+ public:
+ VideoStreamInputState();
+
+ void set_has_input(bool has_input);
+ void set_frame_size_pixels(absl::optional<int> frame_size_pixels);
+ void set_frames_per_second(int frames_per_second);
+ void set_video_codec_type(VideoCodecType video_codec_type);
+ void set_min_pixels_per_frame(int min_pixels_per_frame);
+ void set_single_active_stream_pixels(
+ absl::optional<int> single_active_stream_pixels);
+
+ bool has_input() const;
+ absl::optional<int> frame_size_pixels() const;
+ int frames_per_second() const;
+ VideoCodecType video_codec_type() const;
+ int min_pixels_per_frame() const;
+ absl::optional<int> single_active_stream_pixels() const;
+
+ bool HasInputFrameSizeAndFramesPerSecond() const;
+
+ private:
+ bool has_input_;
+ absl::optional<int> frame_size_pixels_;
+ int frames_per_second_;
+ VideoCodecType video_codec_type_;
+ int min_pixels_per_frame_;
+ absl::optional<int> single_active_stream_pixels_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_H_
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.cc b/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.cc
new file mode 100644
index 0000000000..3261af39ea
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/video_stream_input_state_provider.h"
+
+#include "call/adaptation/video_stream_adapter.h"
+
+namespace webrtc {
+
+VideoStreamInputStateProvider::VideoStreamInputStateProvider(
+ VideoStreamEncoderObserver* frame_rate_provider)
+ : frame_rate_provider_(frame_rate_provider) {}
+
+VideoStreamInputStateProvider::~VideoStreamInputStateProvider() {}
+
+void VideoStreamInputStateProvider::OnHasInputChanged(bool has_input) {
+ MutexLock lock(&mutex_);
+ input_state_.set_has_input(has_input);
+}
+
+void VideoStreamInputStateProvider::OnFrameSizeObserved(int frame_size_pixels) {
+ RTC_DCHECK_GT(frame_size_pixels, 0);
+ MutexLock lock(&mutex_);
+ input_state_.set_frame_size_pixels(frame_size_pixels);
+}
+
+void VideoStreamInputStateProvider::OnEncoderSettingsChanged(
+ EncoderSettings encoder_settings) {
+ MutexLock lock(&mutex_);
+ input_state_.set_video_codec_type(
+ encoder_settings.encoder_config().codec_type);
+ input_state_.set_min_pixels_per_frame(
+ encoder_settings.encoder_info().scaling_settings.min_pixels_per_frame);
+ input_state_.set_single_active_stream_pixels(
+ VideoStreamAdapter::GetSingleActiveLayerPixels(
+ encoder_settings.video_codec()));
+}
+
+VideoStreamInputState VideoStreamInputStateProvider::InputState() {
+ // GetInputFrameRate() is thread-safe.
+ int input_fps = frame_rate_provider_->GetInputFrameRate();
+ MutexLock lock(&mutex_);
+ input_state_.set_frames_per_second(input_fps);
+ return input_state_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.h b/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.h
new file mode 100644
index 0000000000..81996e6eb9
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
+#define CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
+
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/video_stream_input_state.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "video/video_stream_encoder_observer.h"
+
+namespace webrtc {
+
+class VideoStreamInputStateProvider {
+ public:
+ VideoStreamInputStateProvider(
+ VideoStreamEncoderObserver* frame_rate_provider);
+ virtual ~VideoStreamInputStateProvider();
+
+ void OnHasInputChanged(bool has_input);
+ void OnFrameSizeObserved(int frame_size_pixels);
+ void OnEncoderSettingsChanged(EncoderSettings encoder_settings);
+
+ virtual VideoStreamInputState InputState();
+
+ private:
+ Mutex mutex_;
+ VideoStreamEncoderObserver* const frame_rate_provider_;
+ VideoStreamInputState input_state_ RTC_GUARDED_BY(mutex_);
+};
+
+} // namespace webrtc
+
+#endif // CALL_ADAPTATION_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_
diff --git a/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider_unittest.cc b/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider_unittest.cc
new file mode 100644
index 0000000000..5da2ef21cd
--- /dev/null
+++ b/third_party/libwebrtc/call/adaptation/video_stream_input_state_provider_unittest.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/adaptation/video_stream_input_state_provider.h"
+
+#include <utility>
+
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/test/fake_frame_rate_provider.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(VideoStreamInputStateProviderTest, DefaultValues) {
+ FakeFrameRateProvider frame_rate_provider;
+ VideoStreamInputStateProvider input_state_provider(&frame_rate_provider);
+ VideoStreamInputState input_state = input_state_provider.InputState();
+ EXPECT_EQ(false, input_state.has_input());
+ EXPECT_EQ(absl::nullopt, input_state.frame_size_pixels());
+ EXPECT_EQ(0, input_state.frames_per_second());
+ EXPECT_EQ(VideoCodecType::kVideoCodecGeneric, input_state.video_codec_type());
+ EXPECT_EQ(kDefaultMinPixelsPerFrame, input_state.min_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt, input_state.single_active_stream_pixels());
+}
+
+TEST(VideoStreamInputStateProviderTest, ValuesSet) {
+ FakeFrameRateProvider frame_rate_provider;
+ VideoStreamInputStateProvider input_state_provider(&frame_rate_provider);
+ input_state_provider.OnHasInputChanged(true);
+ input_state_provider.OnFrameSizeObserved(42);
+ frame_rate_provider.set_fps(123);
+ VideoEncoder::EncoderInfo encoder_info;
+ encoder_info.scaling_settings.min_pixels_per_frame = 1337;
+ VideoEncoderConfig encoder_config;
+ encoder_config.codec_type = VideoCodecType::kVideoCodecVP9;
+ VideoCodec video_codec;
+ video_codec.codecType = VideoCodecType::kVideoCodecVP8;
+ video_codec.numberOfSimulcastStreams = 2;
+ video_codec.simulcastStream[0].active = false;
+ video_codec.simulcastStream[1].active = true;
+ video_codec.simulcastStream[1].width = 111;
+ video_codec.simulcastStream[1].height = 222;
+ input_state_provider.OnEncoderSettingsChanged(EncoderSettings(
+ std::move(encoder_info), std::move(encoder_config), video_codec));
+ VideoStreamInputState input_state = input_state_provider.InputState();
+ EXPECT_EQ(true, input_state.has_input());
+ EXPECT_EQ(42, input_state.frame_size_pixels());
+ EXPECT_EQ(123, input_state.frames_per_second());
+ EXPECT_EQ(VideoCodecType::kVideoCodecVP9, input_state.video_codec_type());
+ EXPECT_EQ(1337, input_state.min_pixels_per_frame());
+ EXPECT_EQ(111 * 222, input_state.single_active_stream_pixels());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/audio_receive_stream.cc b/third_party/libwebrtc/call/audio_receive_stream.cc
new file mode 100644
index 0000000000..0766eb6bbb
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_receive_stream.cc
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/audio_receive_stream.h"
+
+namespace webrtc {
+
+AudioReceiveStreamInterface::Stats::Stats() = default;
+AudioReceiveStreamInterface::Stats::~Stats() = default;
+
+AudioReceiveStreamInterface::Config::Config() = default;
+AudioReceiveStreamInterface::Config::~Config() = default;
+
+AudioReceiveStreamInterface::Config::Rtp::Rtp() = default;
+AudioReceiveStreamInterface::Config::Rtp::~Rtp() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/audio_receive_stream.h b/third_party/libwebrtc/call/audio_receive_stream.h
new file mode 100644
index 0000000000..88b74b44ac
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_receive_stream.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_AUDIO_RECEIVE_STREAM_H_
+#define CALL_AUDIO_RECEIVE_STREAM_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/call/transport.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "api/crypto/crypto_options.h"
+#include "api/rtp_parameters.h"
+#include "call/receive_stream.h"
+#include "call/rtp_config.h"
+
+namespace webrtc {
+class AudioSinkInterface;
+
+class AudioReceiveStreamInterface : public MediaReceiveStreamInterface {
+ public:
+ struct Stats {
+ Stats();
+ ~Stats();
+ uint32_t remote_ssrc = 0;
+ int64_t payload_bytes_received = 0;
+ int64_t header_and_padding_bytes_received = 0;
+ uint32_t packets_received = 0;
+ uint64_t fec_packets_received = 0;
+ uint64_t fec_packets_discarded = 0;
+ int32_t packets_lost = 0;
+ uint64_t packets_discarded = 0;
+ uint32_t nacks_sent = 0;
+ std::string codec_name;
+ absl::optional<int> codec_payload_type;
+ uint32_t jitter_ms = 0;
+ uint32_t jitter_buffer_ms = 0;
+ uint32_t jitter_buffer_preferred_ms = 0;
+ uint32_t delay_estimate_ms = 0;
+ int32_t audio_level = -1;
+ // Stats below correspond to similarly-named fields in the WebRTC stats
+ // spec. https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
+ double total_output_energy = 0.0;
+ uint64_t total_samples_received = 0;
+ double total_output_duration = 0.0;
+ uint64_t concealed_samples = 0;
+ uint64_t silent_concealed_samples = 0;
+ uint64_t concealment_events = 0;
+ double jitter_buffer_delay_seconds = 0.0;
+ uint64_t jitter_buffer_emitted_count = 0;
+ double jitter_buffer_target_delay_seconds = 0.0;
+ double jitter_buffer_minimum_delay_seconds = 0.0;
+ uint64_t inserted_samples_for_deceleration = 0;
+ uint64_t removed_samples_for_acceleration = 0;
+ // Stats below DO NOT correspond directly to anything in the WebRTC stats
+ float expand_rate = 0.0f;
+ float speech_expand_rate = 0.0f;
+ float secondary_decoded_rate = 0.0f;
+ float secondary_discarded_rate = 0.0f;
+ float accelerate_rate = 0.0f;
+ float preemptive_expand_rate = 0.0f;
+ uint64_t delayed_packet_outage_samples = 0;
+ int32_t decoding_calls_to_silence_generator = 0;
+ int32_t decoding_calls_to_neteq = 0;
+ int32_t decoding_normal = 0;
+ // TODO(alexnarest): Consider decoding_neteq_plc for consistency
+ int32_t decoding_plc = 0;
+ int32_t decoding_codec_plc = 0;
+ int32_t decoding_cng = 0;
+ int32_t decoding_plc_cng = 0;
+ int32_t decoding_muted_output = 0;
+ int64_t capture_start_ntp_time_ms = 0;
+ // The timestamp at which the last packet was received, i.e. the time of the
+ // local clock when it was received - not the RTP timestamp of that packet.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-lastpacketreceivedtimestamp
+ absl::optional<Timestamp> last_packet_received;
+ uint64_t jitter_buffer_flushes = 0;
+ double relative_packet_arrival_delay_seconds = 0.0;
+ int32_t interruption_count = 0;
+ int32_t total_interruption_duration_ms = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp
+ absl::optional<int64_t> estimated_playout_ntp_timestamp_ms;
+ // Remote outbound stats derived by the received RTCP sender reports.
+ // https://w3c.github.io/webrtc-stats/#remoteoutboundrtpstats-dict*
+ absl::optional<int64_t> last_sender_report_timestamp_ms;
+ absl::optional<int64_t> last_sender_report_remote_timestamp_ms;
+ uint64_t sender_reports_packets_sent = 0;
+ uint64_t sender_reports_bytes_sent = 0;
+ uint64_t sender_reports_reports_count = 0;
+ absl::optional<TimeDelta> round_trip_time;
+ TimeDelta total_round_trip_time = TimeDelta::Zero();
+ int round_trip_time_measurements = 0;
+ };
+
+ struct Config {
+ Config();
+ ~Config();
+
+ std::string ToString() const;
+
+ // Receive-stream specific RTP settings.
+ struct Rtp : public ReceiveStreamRtpConfig {
+ Rtp();
+ ~Rtp();
+
+ std::string ToString() const;
+
+ // See NackConfig for description.
+ NackConfig nack;
+
+ RtcpEventObserver* rtcp_event_observer = nullptr;
+ } rtp;
+
+ // Receive-side RTT.
+ bool enable_non_sender_rtt = false;
+
+ Transport* rtcp_send_transport = nullptr;
+
+ // NetEq settings.
+ size_t jitter_buffer_max_packets = 200;
+ bool jitter_buffer_fast_accelerate = false;
+ int jitter_buffer_min_delay_ms = 0;
+
+ // Identifier for an A/V synchronization group. Empty string to disable.
+ // TODO(pbos): Synchronize streams in a sync group, not just one video
+ // stream to one audio stream. Tracked by issue webrtc:4762.
+ std::string sync_group;
+
+ // Decoder specifications for every payload type that we can receive.
+ std::map<int, SdpAudioFormat> decoder_map;
+
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory;
+
+ absl::optional<AudioCodecPairId> codec_pair_id;
+
+ // Per PeerConnection crypto options.
+ webrtc::CryptoOptions crypto_options;
+
+ // An optional custom frame decryptor that allows the entire frame to be
+ // decrypted in whatever way the caller choses. This is not required by
+ // default.
+ // TODO(tommi): Remove this member variable from the struct. It's not
+ // a part of the AudioReceiveStreamInterface state but rather a pass through
+ // variable.
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor;
+
+ // An optional frame transformer used by insertable streams to transform
+ // encoded frames.
+ // TODO(tommi): Remove this member variable from the struct. It's not
+ // a part of the AudioReceiveStreamInterface state but rather a pass through
+ // variable.
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
+ };
+
+ // Methods that support reconfiguring the stream post initialization.
+ virtual void SetDecoderMap(std::map<int, SdpAudioFormat> decoder_map) = 0;
+ virtual void SetNackHistory(int history_ms) = 0;
+ virtual void SetNonSenderRttMeasurement(bool enabled) = 0;
+
+ // Returns true if the stream has been started.
+ virtual bool IsRunning() const = 0;
+
+ virtual Stats GetStats(bool get_and_clear_legacy_stats) const = 0;
+ Stats GetStats() { return GetStats(/*get_and_clear_legacy_stats=*/true); }
+
+ // Sets an audio sink that receives unmixed audio from the receive stream.
+ // Ownership of the sink is managed by the caller.
+ // Only one sink can be set and passing a null sink clears an existing one.
+ // NOTE: Audio must still somehow be pulled through AudioTransport for audio
+ // to stream through this sink. In practice, this happens if mixed audio
+ // is being pulled+rendered and/or if audio is being pulled for the purposes
+ // of feeding to the AEC.
+ virtual void SetSink(AudioSinkInterface* sink) = 0;
+
+ // Sets playback gain of the stream, applied when mixing, and thus after it
+ // is potentially forwarded to any attached AudioSinkInterface implementation.
+ virtual void SetGain(float gain) = 0;
+
+ // Sets a base minimum for the playout delay. Base minimum delay sets lower
+ // bound on minimum delay value determining lower bound on playout delay.
+ //
+ // Returns true if value was successfully set, false overwise.
+ virtual bool SetBaseMinimumPlayoutDelayMs(int delay_ms) = 0;
+
+ // Returns current value of base minimum delay in milliseconds.
+ virtual int GetBaseMinimumPlayoutDelayMs() const = 0;
+
+ // Synchronization source (stream identifier) to be received.
+ // This member will not change mid-stream and can be assumed to be const
+ // post initialization.
+ virtual uint32_t remote_ssrc() const = 0;
+
+ protected:
+ virtual ~AudioReceiveStreamInterface() {}
+};
+
+} // namespace webrtc
+
+#endif // CALL_AUDIO_RECEIVE_STREAM_H_
diff --git a/third_party/libwebrtc/call/audio_send_stream.cc b/third_party/libwebrtc/call/audio_send_stream.cc
new file mode 100644
index 0000000000..a36050a9f7
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_send_stream.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/audio_send_stream.h"
+
+#include <stddef.h>
+
+#include "rtc_base/strings/audio_format_to_string.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+AudioSendStream::Stats::Stats() = default;
+AudioSendStream::Stats::~Stats() = default;
+
+AudioSendStream::Config::Config(Transport* send_transport)
+ : send_transport(send_transport) {}
+
+AudioSendStream::Config::~Config() = default;
+
+std::string AudioSendStream::Config::ToString() const {
+ rtc::StringBuilder ss;
+ ss << "{rtp: " << rtp.ToString();
+ ss << ", rtcp_report_interval_ms: " << rtcp_report_interval_ms;
+ ss << ", send_transport: " << (send_transport ? "(Transport)" : "null");
+ ss << ", min_bitrate_bps: " << min_bitrate_bps;
+ ss << ", max_bitrate_bps: " << max_bitrate_bps;
+ ss << ", has audio_network_adaptor_config: "
+ << (audio_network_adaptor_config ? "true" : "false");
+ ss << ", has_dscp: " << (has_dscp ? "true" : "false");
+ ss << ", send_codec_spec: "
+ << (send_codec_spec ? send_codec_spec->ToString() : "<unset>");
+ ss << "}";
+ return ss.Release();
+}
+
+AudioSendStream::Config::Rtp::Rtp() = default;
+
+AudioSendStream::Config::Rtp::~Rtp() = default;
+
+std::string AudioSendStream::Config::Rtp::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{ssrc: " << ssrc;
+ if (!rid.empty()) {
+ ss << ", rid: " << rid;
+ }
+ if (!mid.empty()) {
+ ss << ", mid: " << mid;
+ }
+ ss << ", extmap-allow-mixed: " << (extmap_allow_mixed ? "true" : "false");
+ ss << ", extensions: [";
+ for (size_t i = 0; i < extensions.size(); ++i) {
+ ss << extensions[i].ToString();
+ if (i != extensions.size() - 1) {
+ ss << ", ";
+ }
+ }
+ ss << ']';
+ ss << ", c_name: " << c_name;
+ ss << '}';
+ return ss.str();
+}
+
+AudioSendStream::Config::SendCodecSpec::SendCodecSpec(
+ int payload_type,
+ const SdpAudioFormat& format)
+ : payload_type(payload_type), format(format) {}
+AudioSendStream::Config::SendCodecSpec::~SendCodecSpec() = default;
+
+std::string AudioSendStream::Config::SendCodecSpec::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{nack_enabled: " << (nack_enabled ? "true" : "false");
+ ss << ", transport_cc_enabled: " << (transport_cc_enabled ? "true" : "false");
+ ss << ", enable_non_sender_rtt: "
+ << (enable_non_sender_rtt ? "true" : "false");
+ ss << ", cng_payload_type: "
+ << (cng_payload_type ? rtc::ToString(*cng_payload_type) : "<unset>");
+ ss << ", red_payload_type: "
+ << (red_payload_type ? rtc::ToString(*red_payload_type) : "<unset>");
+ ss << ", payload_type: " << payload_type;
+ ss << ", format: " << rtc::ToString(format);
+ ss << '}';
+ return ss.str();
+}
+
+bool AudioSendStream::Config::SendCodecSpec::operator==(
+ const AudioSendStream::Config::SendCodecSpec& rhs) const {
+ if (nack_enabled == rhs.nack_enabled &&
+ transport_cc_enabled == rhs.transport_cc_enabled &&
+ enable_non_sender_rtt == rhs.enable_non_sender_rtt &&
+ cng_payload_type == rhs.cng_payload_type &&
+ red_payload_type == rhs.red_payload_type &&
+ payload_type == rhs.payload_type && format == rhs.format &&
+ target_bitrate_bps == rhs.target_bitrate_bps) {
+ return true;
+ }
+ return false;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/audio_send_stream.h b/third_party/libwebrtc/call/audio_send_stream.h
new file mode 100644
index 0000000000..5f4f871bf0
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_send_stream.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_AUDIO_SEND_STREAM_H_
+#define CALL_AUDIO_SEND_STREAM_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_codec_pair_id.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/call/transport.h"
+#include "api/crypto/crypto_options.h"
+#include "api/crypto/frame_encryptor_interface.h"
+#include "api/frame_transformer_interface.h"
+#include "api/rtp_parameters.h"
+#include "api/rtp_sender_setparameters_callback.h"
+#include "api/scoped_refptr.h"
+#include "call/audio_sender.h"
+#include "call/rtp_config.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+
+namespace webrtc {
+
+class AudioSendStream : public AudioSender {
+ public:
+ struct Stats {
+ Stats();
+ ~Stats();
+
+ // TODO(solenberg): Harmonize naming and defaults with receive stream stats.
+ uint32_t local_ssrc = 0;
+ int64_t payload_bytes_sent = 0;
+ int64_t header_and_padding_bytes_sent = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-retransmittedbytessent
+ uint64_t retransmitted_bytes_sent = 0;
+ int32_t packets_sent = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalpacketsenddelay
+ TimeDelta total_packet_send_delay = TimeDelta::Zero();
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-retransmittedpacketssent
+ uint64_t retransmitted_packets_sent = 0;
+ int32_t packets_lost = -1;
+ float fraction_lost = -1.0f;
+ std::string codec_name;
+ absl::optional<int> codec_payload_type;
+ int32_t jitter_ms = -1;
+ int64_t rtt_ms = -1;
+ int16_t audio_level = 0;
+ // See description of "totalAudioEnergy" in the WebRTC stats spec:
+ // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+ double total_input_energy = 0.0;
+ double total_input_duration = 0.0;
+
+ ANAStats ana_statistics;
+ AudioProcessingStats apm_statistics;
+ RtcpPacketTypeCounter rtcp_packet_type_counts;
+
+ int64_t target_bitrate_bps = 0;
+ // A snapshot of Report Blocks with additional data of interest to
+ // statistics. Within this list, the sender-source SSRC pair is unique and
+ // per-pair the ReportBlockData represents the latest Report Block that was
+ // received for that pair.
+ std::vector<ReportBlockData> report_block_datas;
+ uint32_t nacks_received = 0;
+ };
+
+ struct Config {
+ Config() = delete;
+ explicit Config(Transport* send_transport);
+ ~Config();
+ std::string ToString() const;
+
+ // Send-stream specific RTP settings.
+ struct Rtp {
+ Rtp();
+ ~Rtp();
+ std::string ToString() const;
+
+ // Sender SSRC.
+ uint32_t ssrc = 0;
+
+ // The value to send in the RID RTP header extension if the extension is
+ // included in the list of extensions.
+ std::string rid;
+
+ // The value to send in the MID RTP header extension if the extension is
+ // included in the list of extensions.
+ std::string mid;
+
+ // Corresponds to the SDP attribute extmap-allow-mixed.
+ bool extmap_allow_mixed = false;
+
+ // RTP header extensions used for the sent stream.
+ std::vector<RtpExtension> extensions;
+
+ // RTCP CNAME, see RFC 3550.
+ std::string c_name;
+ } rtp;
+
+ // Time interval between RTCP report for audio
+ int rtcp_report_interval_ms = 5000;
+
+ // Transport for outgoing packets. The transport is expected to exist for
+ // the entire life of the AudioSendStream and is owned by the API client.
+ Transport* send_transport = nullptr;
+
+ // Bitrate limits used for variable audio bitrate streams. Set both to -1 to
+ // disable audio bitrate adaptation.
+ // Note: This is still an experimental feature and not ready for real usage.
+ int min_bitrate_bps = -1;
+ int max_bitrate_bps = -1;
+
+ double bitrate_priority = 1.0;
+ bool has_dscp = false;
+
+ // Defines whether to turn on audio network adaptor, and defines its config
+ // string.
+ absl::optional<std::string> audio_network_adaptor_config;
+
+ struct SendCodecSpec {
+ SendCodecSpec(int payload_type, const SdpAudioFormat& format);
+ ~SendCodecSpec();
+ std::string ToString() const;
+
+ bool operator==(const SendCodecSpec& rhs) const;
+ bool operator!=(const SendCodecSpec& rhs) const {
+ return !(*this == rhs);
+ }
+
+ int payload_type;
+ SdpAudioFormat format;
+ bool nack_enabled = false;
+ bool transport_cc_enabled = false;
+ bool enable_non_sender_rtt = false;
+ absl::optional<int> cng_payload_type;
+ absl::optional<int> red_payload_type;
+ // If unset, use the encoder's default target bitrate.
+ absl::optional<int> target_bitrate_bps;
+ };
+
+ absl::optional<SendCodecSpec> send_codec_spec;
+ rtc::scoped_refptr<AudioEncoderFactory> encoder_factory;
+ absl::optional<AudioCodecPairId> codec_pair_id;
+
+ // Track ID as specified during track creation.
+ std::string track_id;
+
+ // Per PeerConnection crypto options.
+ webrtc::CryptoOptions crypto_options;
+
+ // An optional custom frame encryptor that allows the entire frame to be
+ // encryptor in whatever way the caller choses. This is not required by
+ // default.
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor;
+
+ // An optional frame transformer used by insertable streams to transform
+ // encoded frames.
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
+ };
+
+ virtual ~AudioSendStream() = default;
+
+ virtual const webrtc::AudioSendStream::Config& GetConfig() const = 0;
+
+ // Reconfigure the stream according to the Configuration.
+ virtual void Reconfigure(const Config& config,
+ SetParametersCallback callback) = 0;
+
+ // Starts stream activity.
+ // When a stream is active, it can receive, process and deliver packets.
+ virtual void Start() = 0;
+ // Stops stream activity.
+ // When a stream is stopped, it can't receive, process or deliver packets.
+ virtual void Stop() = 0;
+
+ // TODO(solenberg): Make payload_type a config property instead.
+ virtual bool SendTelephoneEvent(int payload_type,
+ int payload_frequency,
+ int event,
+ int duration_ms) = 0;
+
+ virtual void SetMuted(bool muted) = 0;
+
+ virtual Stats GetStats() const = 0;
+ virtual Stats GetStats(bool has_remote_tracks) const = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_AUDIO_SEND_STREAM_H_
diff --git a/third_party/libwebrtc/call/audio_sender.h b/third_party/libwebrtc/call/audio_sender.h
new file mode 100644
index 0000000000..daab070879
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_sender.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_AUDIO_SENDER_H_
+#define CALL_AUDIO_SENDER_H_
+
+#include <memory>
+
+#include "api/audio/audio_frame.h"
+
+namespace webrtc {
+
+class AudioSender {
+ public:
+ // Encode and send audio.
+ virtual void SendAudioData(std::unique_ptr<AudioFrame> audio_frame) = 0;
+
+ virtual ~AudioSender() = default;
+};
+
+} // namespace webrtc
+
+#endif // CALL_AUDIO_SENDER_H_
diff --git a/third_party/libwebrtc/call/audio_sender_interface_gn/moz.build b/third_party/libwebrtc/call/audio_sender_interface_gn/moz.build
new file mode 100644
index 0000000000..2b42e8ebf9
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_sender_interface_gn/moz.build
@@ -0,0 +1,216 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_sender_interface_gn")
diff --git a/third_party/libwebrtc/call/audio_state.cc b/third_party/libwebrtc/call/audio_state.cc
new file mode 100644
index 0000000000..725d27f423
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_state.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/audio_state.h"
+
+namespace webrtc {
+
+AudioState::Config::Config() = default;
+AudioState::Config::~Config() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/audio_state.h b/third_party/libwebrtc/call/audio_state.h
new file mode 100644
index 0000000000..79fb5cf981
--- /dev/null
+++ b/third_party/libwebrtc/call/audio_state.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_AUDIO_STATE_H_
+#define CALL_AUDIO_STATE_H_
+
+#include "api/audio/audio_mixer.h"
+#include "api/scoped_refptr.h"
+#include "modules/async_audio_processing/async_audio_processing.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/ref_count.h"
+
+namespace webrtc {
+
+class AudioTransport;
+
+// AudioState holds the state which must be shared between multiple instances of
+// webrtc::Call for audio processing purposes.
+class AudioState : public rtc::RefCountInterface {
+ public:
+ struct Config {
+ Config();
+ ~Config();
+
+ // The audio mixer connected to active receive streams. One per
+ // AudioState.
+ rtc::scoped_refptr<AudioMixer> audio_mixer;
+
+ // The audio processing module.
+ rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing;
+
+ // TODO(solenberg): Temporary: audio device module.
+ rtc::scoped_refptr<webrtc::AudioDeviceModule> audio_device_module;
+
+ rtc::scoped_refptr<AsyncAudioProcessing::Factory>
+ async_audio_processing_factory;
+ };
+
+ virtual AudioProcessing* audio_processing() = 0;
+ virtual AudioTransport* audio_transport() = 0;
+
+ // Enable/disable playout of the audio channels. Enabled by default.
+ // This will stop playout of the underlying audio device but start a task
+ // which will poll for audio data every 10ms to ensure that audio processing
+ // happens and the audio stats are updated.
+ virtual void SetPlayout(bool enabled) = 0;
+
+ // Enable/disable recording of the audio channels. Enabled by default.
+ // This will stop recording of the underlying audio device and no audio
+ // packets will be encoded or transmitted.
+ virtual void SetRecording(bool enabled) = 0;
+
+ virtual void SetStereoChannelSwapping(bool enable) = 0;
+
+ static rtc::scoped_refptr<AudioState> Create(
+ const AudioState::Config& config);
+
+ ~AudioState() override {}
+};
+} // namespace webrtc
+
+#endif // CALL_AUDIO_STATE_H_
diff --git a/third_party/libwebrtc/call/bitrate_allocator.cc b/third_party/libwebrtc/call/bitrate_allocator.cc
new file mode 100644
index 0000000000..2684a1650e
--- /dev/null
+++ b/third_party/libwebrtc/call/bitrate_allocator.cc
@@ -0,0 +1,593 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "call/bitrate_allocator.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+using bitrate_allocator_impl::AllocatableTrack;
+
+// Allow packets to be transmitted in up to 2 times max video bitrate if the
+// bandwidth estimate allows it.
+const uint8_t kTransmissionMaxBitrateMultiplier = 2;
+const int kDefaultBitrateBps = 300000;
+
+// Require a bitrate increase of max(10%, 20kbps) to resume paused streams.
+const double kToggleFactor = 0.1;
+const uint32_t kMinToggleBitrateBps = 20000;
+
+const int64_t kBweLogIntervalMs = 5000;
+
+double MediaRatio(uint32_t allocated_bitrate, uint32_t protection_bitrate) {
+ RTC_DCHECK_GT(allocated_bitrate, 0);
+ if (protection_bitrate == 0)
+ return 1.0;
+
+ uint32_t media_bitrate = allocated_bitrate - protection_bitrate;
+ return media_bitrate / static_cast<double>(allocated_bitrate);
+}
+
+bool EnoughBitrateForAllObservers(
+ const std::vector<AllocatableTrack>& allocatable_tracks,
+ uint32_t bitrate,
+ uint32_t sum_min_bitrates) {
+ if (bitrate < sum_min_bitrates)
+ return false;
+
+ uint32_t extra_bitrate_per_observer =
+ (bitrate - sum_min_bitrates) /
+ static_cast<uint32_t>(allocatable_tracks.size());
+ for (const auto& observer_config : allocatable_tracks) {
+ if (observer_config.config.min_bitrate_bps + extra_bitrate_per_observer <
+ observer_config.MinBitrateWithHysteresis()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Splits `bitrate` evenly to observers already in `allocation`.
+// `include_zero_allocations` decides if zero allocations should be part of
+// the distribution or not. The allowed max bitrate is `max_multiplier` x
+// observer max bitrate.
+void DistributeBitrateEvenly(
+ const std::vector<AllocatableTrack>& allocatable_tracks,
+ uint32_t bitrate,
+ bool include_zero_allocations,
+ int max_multiplier,
+ std::map<BitrateAllocatorObserver*, int>* allocation) {
+ RTC_DCHECK_EQ(allocation->size(), allocatable_tracks.size());
+
+ std::multimap<uint32_t, const AllocatableTrack*> list_max_bitrates;
+ for (const auto& observer_config : allocatable_tracks) {
+ if (include_zero_allocations ||
+ allocation->at(observer_config.observer) != 0) {
+ list_max_bitrates.insert(
+ {observer_config.config.max_bitrate_bps, &observer_config});
+ }
+ }
+ auto it = list_max_bitrates.begin();
+ while (it != list_max_bitrates.end()) {
+ RTC_DCHECK_GT(bitrate, 0);
+ uint32_t extra_allocation =
+ bitrate / static_cast<uint32_t>(list_max_bitrates.size());
+ uint32_t total_allocation =
+ extra_allocation + allocation->at(it->second->observer);
+ bitrate -= extra_allocation;
+ if (total_allocation > max_multiplier * it->first) {
+ // There is more than we can fit for this observer, carry over to the
+ // remaining observers.
+ bitrate += total_allocation - max_multiplier * it->first;
+ total_allocation = max_multiplier * it->first;
+ }
+ // Finally, update the allocation for this observer.
+ allocation->at(it->second->observer) = total_allocation;
+ it = list_max_bitrates.erase(it);
+ }
+}
+
+// From the available `bitrate`, each observer will be allocated a
+// proportional amount based upon its bitrate priority. If that amount is
+// more than the observer's capacity, it will be allocated its capacity, and
+// the excess bitrate is still allocated proportionally to other observers.
+// Allocating the proportional amount means an observer with twice the
+// bitrate_priority of another will be allocated twice the bitrate.
+void DistributeBitrateRelatively(
+ const std::vector<AllocatableTrack>& allocatable_tracks,
+ uint32_t remaining_bitrate,
+ const std::map<BitrateAllocatorObserver*, int>& observers_capacities,
+ std::map<BitrateAllocatorObserver*, int>* allocation) {
+ RTC_DCHECK_EQ(allocation->size(), allocatable_tracks.size());
+ RTC_DCHECK_EQ(observers_capacities.size(), allocatable_tracks.size());
+
+ struct PriorityRateObserverConfig {
+ BitrateAllocatorObserver* allocation_key;
+ // The amount of bitrate bps that can be allocated to this observer.
+ int capacity_bps;
+ double bitrate_priority;
+ };
+
+ double bitrate_priority_sum = 0;
+ std::vector<PriorityRateObserverConfig> priority_rate_observers;
+ for (const auto& observer_config : allocatable_tracks) {
+ priority_rate_observers.push_back(PriorityRateObserverConfig{
+ observer_config.observer,
+ observers_capacities.at(observer_config.observer),
+ observer_config.config.bitrate_priority});
+ bitrate_priority_sum += observer_config.config.bitrate_priority;
+ }
+
+ // Iterate in the order observers can be allocated their full capacity.
+
+ // We want to sort by which observers will be allocated their full capacity
+ // first. By dividing each observer's capacity by its bitrate priority we
+ // are "normalizing" the capacity of an observer by the rate it will be
+ // filled. This is because the amount allocated is based upon bitrate
+ // priority. We allocate twice as much bitrate to an observer with twice the
+ // bitrate priority of another.
+ absl::c_sort(priority_rate_observers, [](const auto& a, const auto& b) {
+ return a.capacity_bps / a.bitrate_priority <
+ b.capacity_bps / b.bitrate_priority;
+ });
+ size_t i;
+ for (i = 0; i < priority_rate_observers.size(); ++i) {
+ const auto& priority_rate_observer = priority_rate_observers[i];
+ // We allocate the full capacity to an observer only if its relative
+ // portion from the remaining bitrate is sufficient to allocate its full
+ // capacity. This means we aren't greedily allocating the full capacity, but
+ // that it is only done when there is also enough bitrate to allocate the
+ // proportional amounts to all other observers.
+ double observer_share =
+ priority_rate_observer.bitrate_priority / bitrate_priority_sum;
+ double allocation_bps = observer_share * remaining_bitrate;
+ bool enough_bitrate = allocation_bps >= priority_rate_observer.capacity_bps;
+ if (!enough_bitrate)
+ break;
+ allocation->at(priority_rate_observer.allocation_key) +=
+ priority_rate_observer.capacity_bps;
+ remaining_bitrate -= priority_rate_observer.capacity_bps;
+ bitrate_priority_sum -= priority_rate_observer.bitrate_priority;
+ }
+
+ // From the remaining bitrate, allocate the proportional amounts to the
+ // observers that aren't allocated their max capacity.
+ for (; i < priority_rate_observers.size(); ++i) {
+ const auto& priority_rate_observer = priority_rate_observers[i];
+ double fraction_allocated =
+ priority_rate_observer.bitrate_priority / bitrate_priority_sum;
+ allocation->at(priority_rate_observer.allocation_key) +=
+ fraction_allocated * remaining_bitrate;
+ }
+}
+
+// Allocates bitrate to observers when there isn't enough to allocate the
+// minimum to all observers.
+std::map<BitrateAllocatorObserver*, int> LowRateAllocation(
+ const std::vector<AllocatableTrack>& allocatable_tracks,
+ uint32_t bitrate) {
+ std::map<BitrateAllocatorObserver*, int> allocation;
+ // Start by allocating bitrate to observers enforcing a min bitrate, hence
+ // remaining_bitrate might turn negative.
+ int64_t remaining_bitrate = bitrate;
+ for (const auto& observer_config : allocatable_tracks) {
+ int32_t allocated_bitrate = 0;
+ if (observer_config.config.enforce_min_bitrate)
+ allocated_bitrate = observer_config.config.min_bitrate_bps;
+
+ allocation[observer_config.observer] = allocated_bitrate;
+ remaining_bitrate -= allocated_bitrate;
+ }
+
+ // Allocate bitrate to all previously active streams.
+ if (remaining_bitrate > 0) {
+ for (const auto& observer_config : allocatable_tracks) {
+ if (observer_config.config.enforce_min_bitrate ||
+ observer_config.LastAllocatedBitrate() == 0)
+ continue;
+
+ uint32_t required_bitrate = observer_config.MinBitrateWithHysteresis();
+ if (remaining_bitrate >= required_bitrate) {
+ allocation[observer_config.observer] = required_bitrate;
+ remaining_bitrate -= required_bitrate;
+ }
+ }
+ }
+
+ // Allocate bitrate to previously paused streams.
+ if (remaining_bitrate > 0) {
+ for (const auto& observer_config : allocatable_tracks) {
+ if (observer_config.LastAllocatedBitrate() != 0)
+ continue;
+
+ // Add a hysteresis to avoid toggling.
+ uint32_t required_bitrate = observer_config.MinBitrateWithHysteresis();
+ if (remaining_bitrate >= required_bitrate) {
+ allocation[observer_config.observer] = required_bitrate;
+ remaining_bitrate -= required_bitrate;
+ }
+ }
+ }
+
+ // Split a possible remainder evenly on all streams with an allocation.
+ if (remaining_bitrate > 0)
+ DistributeBitrateEvenly(allocatable_tracks, remaining_bitrate, false, 1,
+ &allocation);
+
+ RTC_DCHECK_EQ(allocation.size(), allocatable_tracks.size());
+ return allocation;
+}
+
+// Allocates bitrate to all observers when the available bandwidth is enough
+// to allocate the minimum to all observers but not enough to allocate the
+// max bitrate of each observer.
+
+// Allocates the bitrate based on the bitrate priority of each observer. This
+// bitrate priority defines the priority for bitrate to be allocated to that
+// observer in relation to other observers. For example with two observers, if
+// observer 1 had a bitrate_priority = 1.0, and observer 2 has a
+// bitrate_priority = 2.0, the expected behavior is that observer 2 will be
+// allocated twice the bitrate as observer 1 above the each observer's
+// min_bitrate_bps values, until one of the observers hits its max_bitrate_bps.
+std::map<BitrateAllocatorObserver*, int> NormalRateAllocation(
+ const std::vector<AllocatableTrack>& allocatable_tracks,
+ uint32_t bitrate,
+ uint32_t sum_min_bitrates) {
+ std::map<BitrateAllocatorObserver*, int> allocation;
+ std::map<BitrateAllocatorObserver*, int> observers_capacities;
+ for (const auto& observer_config : allocatable_tracks) {
+ allocation[observer_config.observer] =
+ observer_config.config.min_bitrate_bps;
+ observers_capacities[observer_config.observer] =
+ observer_config.config.max_bitrate_bps -
+ observer_config.config.min_bitrate_bps;
+ }
+
+ bitrate -= sum_min_bitrates;
+
+ // TODO(srte): Implement fair sharing between prioritized streams, currently
+ // they are treated on a first come first serve basis.
+ for (const auto& observer_config : allocatable_tracks) {
+ int64_t priority_margin = observer_config.config.priority_bitrate_bps -
+ allocation[observer_config.observer];
+ if (priority_margin > 0 && bitrate > 0) {
+ int64_t extra_bitrate = std::min<int64_t>(priority_margin, bitrate);
+ allocation[observer_config.observer] +=
+ rtc::dchecked_cast<int>(extra_bitrate);
+ observers_capacities[observer_config.observer] -= extra_bitrate;
+ bitrate -= extra_bitrate;
+ }
+ }
+
+ // From the remaining bitrate, allocate a proportional amount to each observer
+ // above the min bitrate already allocated.
+ if (bitrate > 0)
+ DistributeBitrateRelatively(allocatable_tracks, bitrate,
+ observers_capacities, &allocation);
+
+ return allocation;
+}
+
+// Allocates bitrate to observers when there is enough available bandwidth
+// for all observers to be allocated their max bitrate.
+std::map<BitrateAllocatorObserver*, int> MaxRateAllocation(
+ const std::vector<AllocatableTrack>& allocatable_tracks,
+ uint32_t bitrate,
+ uint32_t sum_max_bitrates) {
+ std::map<BitrateAllocatorObserver*, int> allocation;
+
+ for (const auto& observer_config : allocatable_tracks) {
+ allocation[observer_config.observer] =
+ observer_config.config.max_bitrate_bps;
+ bitrate -= observer_config.config.max_bitrate_bps;
+ }
+ DistributeBitrateEvenly(allocatable_tracks, bitrate, true,
+ kTransmissionMaxBitrateMultiplier, &allocation);
+ return allocation;
+}
+
+// Allocates zero bitrate to all observers.
+std::map<BitrateAllocatorObserver*, int> ZeroRateAllocation(
+ const std::vector<AllocatableTrack>& allocatable_tracks) {
+ std::map<BitrateAllocatorObserver*, int> allocation;
+ for (const auto& observer_config : allocatable_tracks)
+ allocation[observer_config.observer] = 0;
+ return allocation;
+}
+
+std::map<BitrateAllocatorObserver*, int> AllocateBitrates(
+ const std::vector<AllocatableTrack>& allocatable_tracks,
+ uint32_t bitrate) {
+ if (allocatable_tracks.empty())
+ return std::map<BitrateAllocatorObserver*, int>();
+
+ if (bitrate == 0)
+ return ZeroRateAllocation(allocatable_tracks);
+
+ uint32_t sum_min_bitrates = 0;
+ uint32_t sum_max_bitrates = 0;
+ for (const auto& observer_config : allocatable_tracks) {
+ sum_min_bitrates += observer_config.config.min_bitrate_bps;
+ sum_max_bitrates += observer_config.config.max_bitrate_bps;
+ }
+
+ // Not enough for all observers to get an allocation, allocate according to:
+ // enforced min bitrate -> allocated bitrate previous round -> restart paused
+ // streams.
+ if (!EnoughBitrateForAllObservers(allocatable_tracks, bitrate,
+ sum_min_bitrates))
+ return LowRateAllocation(allocatable_tracks, bitrate);
+
+ // All observers will get their min bitrate plus a share of the rest. This
+ // share is allocated to each observer based on its bitrate_priority.
+ if (bitrate <= sum_max_bitrates)
+ return NormalRateAllocation(allocatable_tracks, bitrate, sum_min_bitrates);
+
+ // All observers will get up to transmission_max_bitrate_multiplier_ x max.
+ return MaxRateAllocation(allocatable_tracks, bitrate, sum_max_bitrates);
+}
+
+} // namespace
+
+BitrateAllocator::BitrateAllocator(LimitObserver* limit_observer)
+ : limit_observer_(limit_observer),
+ last_target_bps_(0),
+ last_stable_target_bps_(0),
+ last_non_zero_bitrate_bps_(kDefaultBitrateBps),
+ last_fraction_loss_(0),
+ last_rtt_(0),
+ last_bwe_period_ms_(1000),
+ num_pause_events_(0),
+ last_bwe_log_time_(0) {
+ sequenced_checker_.Detach();
+}
+
+BitrateAllocator::~BitrateAllocator() {
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Call.NumberOfPauseEvents",
+ num_pause_events_);
+}
+
+void BitrateAllocator::UpdateStartRate(uint32_t start_rate_bps) {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ last_non_zero_bitrate_bps_ = start_rate_bps;
+}
+
+void BitrateAllocator::OnNetworkEstimateChanged(TargetTransferRate msg) {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ last_target_bps_ = msg.target_rate.bps();
+ last_stable_target_bps_ = msg.stable_target_rate.bps();
+ last_non_zero_bitrate_bps_ =
+ last_target_bps_ > 0 ? last_target_bps_ : last_non_zero_bitrate_bps_;
+
+ int loss_ratio_255 = msg.network_estimate.loss_rate_ratio * 255;
+ last_fraction_loss_ =
+ rtc::dchecked_cast<uint8_t>(rtc::SafeClamp(loss_ratio_255, 0, 255));
+ last_rtt_ = msg.network_estimate.round_trip_time.ms();
+ last_bwe_period_ms_ = msg.network_estimate.bwe_period.ms();
+
+ // Periodically log the incoming BWE.
+ int64_t now = msg.at_time.ms();
+ if (now > last_bwe_log_time_ + kBweLogIntervalMs) {
+ RTC_LOG(LS_INFO) << "Current BWE " << last_target_bps_;
+ last_bwe_log_time_ = now;
+ }
+
+ auto allocation = AllocateBitrates(allocatable_tracks_, last_target_bps_);
+ auto stable_bitrate_allocation =
+ AllocateBitrates(allocatable_tracks_, last_stable_target_bps_);
+
+ for (auto& config : allocatable_tracks_) {
+ uint32_t allocated_bitrate = allocation[config.observer];
+ uint32_t allocated_stable_target_rate =
+ stable_bitrate_allocation[config.observer];
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::BitsPerSec(allocated_bitrate);
+ update.stable_target_bitrate =
+ DataRate::BitsPerSec(allocated_stable_target_rate);
+ update.packet_loss_ratio = last_fraction_loss_ / 256.0;
+ update.round_trip_time = TimeDelta::Millis(last_rtt_);
+ update.bwe_period = TimeDelta::Millis(last_bwe_period_ms_);
+ update.cwnd_reduce_ratio = msg.cwnd_reduce_ratio;
+ uint32_t protection_bitrate = config.observer->OnBitrateUpdated(update);
+
+ if (allocated_bitrate == 0 && config.allocated_bitrate_bps > 0) {
+ if (last_target_bps_ > 0)
+ ++num_pause_events_;
+ // The protection bitrate is an estimate based on the ratio between media
+ // and protection used before this observer was muted.
+ uint32_t predicted_protection_bps =
+ (1.0 - config.media_ratio) * config.config.min_bitrate_bps;
+ RTC_LOG(LS_INFO) << "Pausing observer " << config.observer
+ << " with configured min bitrate "
+ << config.config.min_bitrate_bps
+ << " and current estimate of " << last_target_bps_
+ << " and protection bitrate "
+ << predicted_protection_bps;
+ } else if (allocated_bitrate > 0 && config.allocated_bitrate_bps == 0) {
+ if (last_target_bps_ > 0)
+ ++num_pause_events_;
+ RTC_LOG(LS_INFO) << "Resuming observer " << config.observer
+ << ", configured min bitrate "
+ << config.config.min_bitrate_bps
+ << ", current allocation " << allocated_bitrate
+ << " and protection bitrate " << protection_bitrate;
+ }
+
+ // Only update the media ratio if the observer got an allocation.
+ if (allocated_bitrate > 0)
+ config.media_ratio = MediaRatio(allocated_bitrate, protection_bitrate);
+ config.allocated_bitrate_bps = allocated_bitrate;
+ }
+ UpdateAllocationLimits();
+}
+
+void BitrateAllocator::AddObserver(BitrateAllocatorObserver* observer,
+ MediaStreamAllocationConfig config) {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ RTC_DCHECK_GT(config.bitrate_priority, 0);
+ RTC_DCHECK(std::isnormal(config.bitrate_priority));
+ auto it = absl::c_find_if(
+ allocatable_tracks_,
+ [observer](const auto& config) { return config.observer == observer; });
+ // Update settings if the observer already exists, create a new one otherwise.
+ if (it != allocatable_tracks_.end()) {
+ it->config = config;
+ } else {
+ allocatable_tracks_.push_back(AllocatableTrack(observer, config));
+ }
+
+ if (last_target_bps_ > 0) {
+ // Calculate a new allocation and update all observers.
+
+ auto allocation = AllocateBitrates(allocatable_tracks_, last_target_bps_);
+ auto stable_bitrate_allocation =
+ AllocateBitrates(allocatable_tracks_, last_stable_target_bps_);
+ for (auto& config : allocatable_tracks_) {
+ uint32_t allocated_bitrate = allocation[config.observer];
+ uint32_t allocated_stable_bitrate =
+ stable_bitrate_allocation[config.observer];
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::BitsPerSec(allocated_bitrate);
+ update.stable_target_bitrate =
+ DataRate::BitsPerSec(allocated_stable_bitrate);
+ update.packet_loss_ratio = last_fraction_loss_ / 256.0;
+ update.round_trip_time = TimeDelta::Millis(last_rtt_);
+ update.bwe_period = TimeDelta::Millis(last_bwe_period_ms_);
+ uint32_t protection_bitrate = config.observer->OnBitrateUpdated(update);
+ config.allocated_bitrate_bps = allocated_bitrate;
+ if (allocated_bitrate > 0)
+ config.media_ratio = MediaRatio(allocated_bitrate, protection_bitrate);
+ }
+ } else {
+ // Currently, an encoder is not allowed to produce frames.
+ // But we still have to return the initial config bitrate + let the
+ // observer know that it can not produce frames.
+
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::Zero();
+ update.stable_target_bitrate = DataRate::Zero();
+ update.packet_loss_ratio = last_fraction_loss_ / 256.0;
+ update.round_trip_time = TimeDelta::Millis(last_rtt_);
+ update.bwe_period = TimeDelta::Millis(last_bwe_period_ms_);
+ observer->OnBitrateUpdated(update);
+ }
+ UpdateAllocationLimits();
+}
+
+void BitrateAllocator::UpdateAllocationLimits() {
+ BitrateAllocationLimits limits;
+ for (const auto& config : allocatable_tracks_) {
+ uint32_t stream_padding = config.config.pad_up_bitrate_bps;
+ if (config.config.enforce_min_bitrate) {
+ limits.min_allocatable_rate +=
+ DataRate::BitsPerSec(config.config.min_bitrate_bps);
+ } else if (config.allocated_bitrate_bps == 0) {
+ stream_padding =
+ std::max(config.MinBitrateWithHysteresis(), stream_padding);
+ }
+ limits.max_padding_rate += DataRate::BitsPerSec(stream_padding);
+ limits.max_allocatable_rate +=
+ DataRate::BitsPerSec(config.config.max_bitrate_bps);
+ }
+
+ if (limits.min_allocatable_rate == current_limits_.min_allocatable_rate &&
+ limits.max_allocatable_rate == current_limits_.max_allocatable_rate &&
+ limits.max_padding_rate == current_limits_.max_padding_rate) {
+ return;
+ }
+ current_limits_ = limits;
+
+ RTC_LOG(LS_INFO) << "UpdateAllocationLimits : total_requested_min_bitrate: "
+ << ToString(limits.min_allocatable_rate)
+ << ", total_requested_padding_bitrate: "
+ << ToString(limits.max_padding_rate)
+ << ", total_requested_max_bitrate: "
+ << ToString(limits.max_allocatable_rate);
+
+ limit_observer_->OnAllocationLimitsChanged(limits);
+}
+
+void BitrateAllocator::RemoveObserver(BitrateAllocatorObserver* observer) {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ for (auto it = allocatable_tracks_.begin(); it != allocatable_tracks_.end();
+ ++it) {
+ if (it->observer == observer) {
+ allocatable_tracks_.erase(it);
+ break;
+ }
+ }
+
+ UpdateAllocationLimits();
+}
+
+int BitrateAllocator::GetStartBitrate(
+ BitrateAllocatorObserver* observer) const {
+ RTC_DCHECK_RUN_ON(&sequenced_checker_);
+ auto it = absl::c_find_if(
+ allocatable_tracks_,
+ [observer](const auto& config) { return config.observer == observer; });
+ if (it == allocatable_tracks_.end()) {
+ // This observer hasn't been added yet, just give it its fair share.
+ return last_non_zero_bitrate_bps_ /
+ static_cast<int>((allocatable_tracks_.size() + 1));
+ } else if (it->allocated_bitrate_bps == -1) {
+ // This observer hasn't received an allocation yet, so do the same.
+ return last_non_zero_bitrate_bps_ /
+ static_cast<int>(allocatable_tracks_.size());
+ } else {
+ // This observer already has an allocation.
+ return it->allocated_bitrate_bps;
+ }
+}
+
+uint32_t bitrate_allocator_impl::AllocatableTrack::LastAllocatedBitrate()
+ const {
+ // Return the configured minimum bitrate for newly added observers, to avoid
+ // requiring an extra high bitrate for the observer to get an allocated
+ // bitrate.
+ return allocated_bitrate_bps == -1 ? config.min_bitrate_bps
+ : allocated_bitrate_bps;
+}
+
+uint32_t bitrate_allocator_impl::AllocatableTrack::MinBitrateWithHysteresis()
+ const {
+ uint32_t min_bitrate = config.min_bitrate_bps;
+ if (LastAllocatedBitrate() == 0) {
+ min_bitrate += std::max(static_cast<uint32_t>(kToggleFactor * min_bitrate),
+ kMinToggleBitrateBps);
+ }
+ // Account for protection bitrate used by this observer in the previous
+ // allocation.
+ // Note: the ratio will only be updated when the stream is active, meaning a
+ // paused stream won't get any ratio updates. This might lead to waiting a bit
+ // longer than necessary if the network condition improves, but this is to
+ // avoid too much toggling.
+ if (media_ratio > 0.0 && media_ratio < 1.0)
+ min_bitrate += min_bitrate * (1.0 - media_ratio);
+
+ return min_bitrate;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/bitrate_allocator.h b/third_party/libwebrtc/call/bitrate_allocator.h
new file mode 100644
index 0000000000..204fc6f94d
--- /dev/null
+++ b/third_party/libwebrtc/call/bitrate_allocator.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_BITRATE_ALLOCATOR_H_
+#define CALL_BITRATE_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/call/bitrate_allocation.h"
+#include "api/sequence_checker.h"
+#include "api/transport/network_types.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class Clock;
+
+// Used by all send streams with adaptive bitrate, to get the currently
+// allocated bitrate for the send stream. The current network properties are
+// given at the same time, to let the send stream decide about possible loss
+// protection.
+class BitrateAllocatorObserver {
+ public:
+ // Returns the amount of protection used by the BitrateAllocatorObserver
+ // implementation, as bitrate in bps.
+ virtual uint32_t OnBitrateUpdated(BitrateAllocationUpdate update) = 0;
+
+ protected:
+ virtual ~BitrateAllocatorObserver() {}
+};
+
+// Struct describing parameters for how a media stream should get bitrate
+// allocated to it.
+
+struct MediaStreamAllocationConfig {
+ // Minimum bitrate supported by track. 0 equals no min bitrate.
+ uint32_t min_bitrate_bps;
+ // Maximum bitrate supported by track. 0 equals no max bitrate.
+ uint32_t max_bitrate_bps;
+ uint32_t pad_up_bitrate_bps;
+ int64_t priority_bitrate_bps;
+ // True means track may not be paused by allocating 0 bitrate will allocate at
+ // least `min_bitrate_bps` for this observer, even if the BWE is too low,
+ // false will allocate 0 to the observer if BWE doesn't allow
+ // `min_bitrate_bps`.
+ bool enforce_min_bitrate;
+ // The amount of bitrate allocated to this observer relative to all other
+ // observers. If an observer has twice the bitrate_priority of other
+ // observers, it should be allocated twice the bitrate above its min.
+ double bitrate_priority;
+};
+
+// Interface used for mocking
+class BitrateAllocatorInterface {
+ public:
+ virtual void AddObserver(BitrateAllocatorObserver* observer,
+ MediaStreamAllocationConfig config) = 0;
+ virtual void RemoveObserver(BitrateAllocatorObserver* observer) = 0;
+ virtual int GetStartBitrate(BitrateAllocatorObserver* observer) const = 0;
+
+ protected:
+ virtual ~BitrateAllocatorInterface() = default;
+};
+
+namespace bitrate_allocator_impl {
+struct AllocatableTrack {
+ AllocatableTrack(BitrateAllocatorObserver* observer,
+ MediaStreamAllocationConfig allocation_config)
+ : observer(observer),
+ config(allocation_config),
+ allocated_bitrate_bps(-1),
+ media_ratio(1.0) {}
+ BitrateAllocatorObserver* observer;
+ MediaStreamAllocationConfig config;
+ int64_t allocated_bitrate_bps;
+ double media_ratio; // Part of the total bitrate used for media [0.0, 1.0].
+
+ uint32_t LastAllocatedBitrate() const;
+ // The minimum bitrate required by this observer, including
+ // enable-hysteresis if the observer is in a paused state.
+ uint32_t MinBitrateWithHysteresis() const;
+};
+} // namespace bitrate_allocator_impl
+
+// Usage: this class will register multiple RtcpBitrateObserver's one at each
+// RTCP module. It will aggregate the results and run one bandwidth estimation
+// and push the result to the encoders via BitrateAllocatorObserver(s).
+class BitrateAllocator : public BitrateAllocatorInterface {
+ public:
+ // Used to get notified when send stream limits such as the minimum send
+ // bitrate and max padding bitrate is changed.
+ class LimitObserver {
+ public:
+ virtual void OnAllocationLimitsChanged(BitrateAllocationLimits limits) = 0;
+
+ protected:
+ virtual ~LimitObserver() = default;
+ };
+
+ explicit BitrateAllocator(LimitObserver* limit_observer);
+ ~BitrateAllocator() override;
+
+ void UpdateStartRate(uint32_t start_rate_bps);
+
+ // Allocate target_bitrate across the registered BitrateAllocatorObservers.
+ void OnNetworkEstimateChanged(TargetTransferRate msg);
+
+ // Set the configuration used by the bandwidth management.
+ // `observer` updates bitrates if already in use.
+ // `config` is the configuration to use for allocation.
+ // Note that `observer`->OnBitrateUpdated() will be called
+ // within the scope of this method with the current rtt, fraction_loss and
+ // available bitrate and that the bitrate in OnBitrateUpdated will be zero if
+ // the `observer` is currently not allowed to send data.
+ void AddObserver(BitrateAllocatorObserver* observer,
+ MediaStreamAllocationConfig config) override;
+
+ // Removes a previously added observer, but will not trigger a new bitrate
+ // allocation.
+ void RemoveObserver(BitrateAllocatorObserver* observer) override;
+
+ // Returns initial bitrate allocated for `observer`. If `observer` is not in
+ // the list of added observers, a best guess is returned.
+ int GetStartBitrate(BitrateAllocatorObserver* observer) const override;
+
+ private:
+ using AllocatableTrack = bitrate_allocator_impl::AllocatableTrack;
+
+ // Calculates the minimum requested send bitrate and max padding bitrate and
+ // calls LimitObserver::OnAllocationLimitsChanged.
+ void UpdateAllocationLimits() RTC_RUN_ON(&sequenced_checker_);
+
+ // Allow packets to be transmitted in up to 2 times max video bitrate if the
+ // bandwidth estimate allows it.
+ // TODO(bugs.webrtc.org/8541): May be worth to refactor to keep this logic in
+ // video send stream.
+ static uint8_t GetTransmissionMaxBitrateMultiplier();
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequenced_checker_;
+ LimitObserver* const limit_observer_ RTC_GUARDED_BY(&sequenced_checker_);
+ // Stored in a list to keep track of the insertion order.
+ std::vector<AllocatableTrack> allocatable_tracks_
+ RTC_GUARDED_BY(&sequenced_checker_);
+ uint32_t last_target_bps_ RTC_GUARDED_BY(&sequenced_checker_);
+ uint32_t last_stable_target_bps_ RTC_GUARDED_BY(&sequenced_checker_);
+ uint32_t last_non_zero_bitrate_bps_ RTC_GUARDED_BY(&sequenced_checker_);
+ uint8_t last_fraction_loss_ RTC_GUARDED_BY(&sequenced_checker_);
+ int64_t last_rtt_ RTC_GUARDED_BY(&sequenced_checker_);
+ int64_t last_bwe_period_ms_ RTC_GUARDED_BY(&sequenced_checker_);
+ // Number of mute events based on too low BWE, not network up/down.
+ int num_pause_events_ RTC_GUARDED_BY(&sequenced_checker_);
+ int64_t last_bwe_log_time_ RTC_GUARDED_BY(&sequenced_checker_);
+ BitrateAllocationLimits current_limits_ RTC_GUARDED_BY(&sequenced_checker_);
+};
+
+} // namespace webrtc
+#endif // CALL_BITRATE_ALLOCATOR_H_
diff --git a/third_party/libwebrtc/call/bitrate_allocator_gn/moz.build b/third_party/libwebrtc/call/bitrate_allocator_gn/moz.build
new file mode 100644
index 0000000000..a56b55faf3
--- /dev/null
+++ b/third_party/libwebrtc/call/bitrate_allocator_gn/moz.build
@@ -0,0 +1,236 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/bitrate_allocator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("bitrate_allocator_gn")
diff --git a/third_party/libwebrtc/call/bitrate_allocator_unittest.cc b/third_party/libwebrtc/call/bitrate_allocator_unittest.cc
new file mode 100644
index 0000000000..69bdd83397
--- /dev/null
+++ b/third_party/libwebrtc/call/bitrate_allocator_unittest.cc
@@ -0,0 +1,1037 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/bitrate_allocator.h"
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::NiceMock;
+
+namespace webrtc {
+
+namespace {
+auto AllocationLimitsEq(uint32_t min_allocatable_rate_bps,
+ uint32_t max_padding_rate_bps,
+ uint32_t max_allocatable_rate_bps) {
+ return AllOf(Field(&BitrateAllocationLimits::min_allocatable_rate,
+ DataRate::BitsPerSec(min_allocatable_rate_bps)),
+ Field(&BitrateAllocationLimits::max_allocatable_rate,
+ DataRate::BitsPerSec(max_allocatable_rate_bps)),
+ Field(&BitrateAllocationLimits::max_padding_rate,
+ DataRate::BitsPerSec(max_padding_rate_bps)));
+}
+
+auto AllocationLimitsEq(uint32_t min_allocatable_rate_bps,
+ uint32_t max_padding_rate_bps) {
+ return AllOf(Field(&BitrateAllocationLimits::min_allocatable_rate,
+ DataRate::BitsPerSec(min_allocatable_rate_bps)),
+ Field(&BitrateAllocationLimits::max_padding_rate,
+ DataRate::BitsPerSec(max_padding_rate_bps)));
+}
+
+class MockLimitObserver : public BitrateAllocator::LimitObserver {
+ public:
+ MOCK_METHOD(void,
+ OnAllocationLimitsChanged,
+ (BitrateAllocationLimits),
+ (override));
+};
+
+class TestBitrateObserver : public BitrateAllocatorObserver {
+ public:
+ TestBitrateObserver()
+ : last_bitrate_bps_(0),
+ last_fraction_loss_(0),
+ last_rtt_ms_(0),
+ last_probing_interval_ms_(0),
+ protection_ratio_(0.0) {}
+
+ void SetBitrateProtectionRatio(double protection_ratio) {
+ protection_ratio_ = protection_ratio;
+ }
+
+ uint32_t OnBitrateUpdated(BitrateAllocationUpdate update) override {
+ last_bitrate_bps_ = update.target_bitrate.bps();
+ last_fraction_loss_ =
+ rtc::dchecked_cast<uint8_t>(update.packet_loss_ratio * 256);
+ last_rtt_ms_ = update.round_trip_time.ms();
+ last_probing_interval_ms_ = update.bwe_period.ms();
+ return update.target_bitrate.bps() * protection_ratio_;
+ }
+ uint32_t last_bitrate_bps_;
+ uint8_t last_fraction_loss_;
+ int64_t last_rtt_ms_;
+ int last_probing_interval_ms_;
+ double protection_ratio_;
+};
+
+constexpr int64_t kDefaultProbingIntervalMs = 3000;
+const double kDefaultBitratePriority = 1.0;
+
+TargetTransferRate CreateTargetRateMessage(uint32_t target_bitrate_bps,
+ uint8_t fraction_loss,
+ int64_t rtt_ms,
+ int64_t bwe_period_ms) {
+ TargetTransferRate msg;
+ // The timestamp is just for log output, keeping it fixed just means fewer log
+ // messages in the test.
+ msg.at_time = Timestamp::Seconds(10000);
+ msg.target_rate = DataRate::BitsPerSec(target_bitrate_bps);
+ msg.stable_target_rate = msg.target_rate;
+ msg.network_estimate.bandwidth = msg.target_rate;
+ msg.network_estimate.loss_rate_ratio = fraction_loss / 255.0;
+ msg.network_estimate.round_trip_time = TimeDelta::Millis(rtt_ms);
+ msg.network_estimate.bwe_period = TimeDelta::Millis(bwe_period_ms);
+ return msg;
+}
+} // namespace
+
+class BitrateAllocatorTest : public ::testing::Test {
+ protected:
+ BitrateAllocatorTest() : allocator_(new BitrateAllocator(&limit_observer_)) {
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000u, 0, 0, kDefaultProbingIntervalMs));
+ }
+ ~BitrateAllocatorTest() {}
+ void AddObserver(BitrateAllocatorObserver* observer,
+ uint32_t min_bitrate_bps,
+ uint32_t max_bitrate_bps,
+ uint32_t pad_up_bitrate_bps,
+ bool enforce_min_bitrate,
+ double bitrate_priority) {
+ allocator_->AddObserver(
+ observer,
+ {min_bitrate_bps, max_bitrate_bps, pad_up_bitrate_bps,
+ /* priority_bitrate */ 0, enforce_min_bitrate, bitrate_priority});
+ }
+ MediaStreamAllocationConfig DefaultConfig() const {
+ MediaStreamAllocationConfig default_config;
+ default_config.min_bitrate_bps = 0;
+ default_config.max_bitrate_bps = 1500000;
+ default_config.pad_up_bitrate_bps = 0;
+ default_config.priority_bitrate_bps = 0;
+ default_config.enforce_min_bitrate = true;
+ default_config.bitrate_priority = kDefaultBitratePriority;
+ return default_config;
+ }
+
+ NiceMock<MockLimitObserver> limit_observer_;
+ std::unique_ptr<BitrateAllocator> allocator_;
+};
+
+TEST_F(BitrateAllocatorTest, RespectsPriorityBitrate) {
+ TestBitrateObserver stream_a;
+ auto config_a = DefaultConfig();
+ config_a.min_bitrate_bps = 100000;
+ config_a.priority_bitrate_bps = 0;
+ allocator_->AddObserver(&stream_a, config_a);
+
+ TestBitrateObserver stream_b;
+ auto config_b = DefaultConfig();
+ config_b.min_bitrate_bps = 100000;
+ config_b.max_bitrate_bps = 300000;
+ config_b.priority_bitrate_bps = 300000;
+ allocator_->AddObserver(&stream_b, config_b);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(100000, 0, 0, 0));
+ EXPECT_EQ(stream_a.last_bitrate_bps_, 100000u);
+ EXPECT_EQ(stream_b.last_bitrate_bps_, 100000u);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(200000, 0, 0, 0));
+ EXPECT_EQ(stream_a.last_bitrate_bps_, 100000u);
+ EXPECT_EQ(stream_b.last_bitrate_bps_, 100000u);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000, 0, 0, 0));
+ EXPECT_EQ(stream_a.last_bitrate_bps_, 100000u);
+ EXPECT_EQ(stream_b.last_bitrate_bps_, 200000u);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(400000, 0, 0, 0));
+ EXPECT_EQ(stream_a.last_bitrate_bps_, 100000u);
+ EXPECT_EQ(stream_b.last_bitrate_bps_, 300000u);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(800000, 0, 0, 0));
+ EXPECT_EQ(stream_a.last_bitrate_bps_, 500000u);
+ EXPECT_EQ(stream_b.last_bitrate_bps_, 300000u);
+}
+
+TEST_F(BitrateAllocatorTest, UpdatingBitrateObserver) {
+ TestBitrateObserver bitrate_observer;
+ const uint32_t kMinSendBitrateBps = 100000;
+ const uint32_t kPadUpToBitrateBps = 50000;
+ const uint32_t kMaxBitrateBps = 1500000;
+
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(
+ kMinSendBitrateBps, kPadUpToBitrateBps, kMaxBitrateBps)));
+ AddObserver(&bitrate_observer, kMinSendBitrateBps, kMaxBitrateBps,
+ kPadUpToBitrateBps, true, kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer));
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(200000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(200000, allocator_->GetStartBitrate(&bitrate_observer));
+
+ // TODO(pbos): Expect capping to 1.5M instead of 3M when not boosting the max
+ // bitrate for FEC/retransmissions (see todo in BitrateAllocator).
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(4000000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(3000000, allocator_->GetStartBitrate(&bitrate_observer));
+
+ // Expect `max_padding_bitrate_bps` to change to 0 if the observer is updated.
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(
+ AllocationLimitsEq(kMinSendBitrateBps, 0)));
+ AddObserver(&bitrate_observer, kMinSendBitrateBps, 4000000, 0, true,
+ kDefaultBitratePriority);
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(
+ AllocationLimitsEq(kMinSendBitrateBps, 0)));
+ EXPECT_EQ(4000000, allocator_->GetStartBitrate(&bitrate_observer));
+
+ AddObserver(&bitrate_observer, kMinSendBitrateBps, kMaxBitrateBps, 0, true,
+ kDefaultBitratePriority);
+ EXPECT_EQ(3000000, allocator_->GetStartBitrate(&bitrate_observer));
+ EXPECT_EQ(3000000u, bitrate_observer.last_bitrate_bps_);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(kMaxBitrateBps, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(1500000u, bitrate_observer.last_bitrate_bps_);
+}
+
+TEST_F(BitrateAllocatorTest, TwoBitrateObserversOneRtcpObserver) {
+ TestBitrateObserver bitrate_observer_1;
+ TestBitrateObserver bitrate_observer_2;
+ const uint32_t kObs1StartBitrateBps = 100000;
+ const uint32_t kObs2StartBitrateBps = 200000;
+ const uint32_t kObs1MaxBitrateBps = 300000;
+ const uint32_t kObs2MaxBitrateBps = 300000;
+
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(
+ kObs1StartBitrateBps, 0, kObs1MaxBitrateBps)));
+ AddObserver(&bitrate_observer_1, kObs1StartBitrateBps, kObs1MaxBitrateBps, 0,
+ true, kDefaultBitratePriority);
+ EXPECT_EQ(static_cast<int>(kObs1MaxBitrateBps),
+ allocator_->GetStartBitrate(&bitrate_observer_1));
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(
+ kObs1StartBitrateBps + kObs2StartBitrateBps, 0,
+ kObs1MaxBitrateBps + kObs2MaxBitrateBps)));
+ AddObserver(&bitrate_observer_2, kObs2StartBitrateBps, kObs2MaxBitrateBps, 0,
+ true, kDefaultBitratePriority);
+ EXPECT_EQ(static_cast<int>(kObs2StartBitrateBps),
+ allocator_->GetStartBitrate(&bitrate_observer_2));
+
+ // Test too low start bitrate, hence lower than sum of min. Min bitrates
+ // will
+ // be allocated to all observers.
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kObs2StartBitrateBps, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0, bitrate_observer_1.last_fraction_loss_);
+ EXPECT_EQ(50, bitrate_observer_1.last_rtt_ms_);
+ EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_bps_);
+ EXPECT_EQ(0, bitrate_observer_2.last_fraction_loss_);
+ EXPECT_EQ(50, bitrate_observer_2.last_rtt_ms_);
+
+ // Test a bitrate which should be distributed equally.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(500000, 0, 50, kDefaultProbingIntervalMs));
+ const uint32_t kBitrateToShare =
+ 500000 - kObs2StartBitrateBps - kObs1StartBitrateBps;
+ EXPECT_EQ(100000u + kBitrateToShare / 2,
+ bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(200000u + kBitrateToShare / 2,
+ bitrate_observer_2.last_bitrate_bps_);
+
+ // Limited by 2x max bitrates since we leave room for FEC and
+ // retransmissions.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(1500000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(600000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(600000u, bitrate_observer_2.last_bitrate_bps_);
+
+ // Verify that if the bandwidth estimate is set to zero, the allocated
+ // rate is
+ // zero.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(0, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+}
+
+TEST_F(BitrateAllocatorTest, RemoveObserverTriggersLimitObserver) {
+ TestBitrateObserver bitrate_observer;
+ const uint32_t kMinSendBitrateBps = 100000;
+ const uint32_t kPadUpToBitrateBps = 50000;
+ const uint32_t kMaxBitrateBps = 1500000;
+
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(
+ kMinSendBitrateBps, kPadUpToBitrateBps, kMaxBitrateBps)));
+ AddObserver(&bitrate_observer, kMinSendBitrateBps, kMaxBitrateBps,
+ kPadUpToBitrateBps, true, kDefaultBitratePriority);
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(0, 0)));
+ allocator_->RemoveObserver(&bitrate_observer);
+}
+
+class BitrateAllocatorTestNoEnforceMin : public ::testing::Test {
+ protected:
+ BitrateAllocatorTestNoEnforceMin()
+ : allocator_(new BitrateAllocator(&limit_observer_)) {
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000u, 0, 0, kDefaultProbingIntervalMs));
+ }
+ ~BitrateAllocatorTestNoEnforceMin() {}
+ void AddObserver(BitrateAllocatorObserver* observer,
+ uint32_t min_bitrate_bps,
+ uint32_t max_bitrate_bps,
+ uint32_t pad_up_bitrate_bps,
+ bool enforce_min_bitrate,
+ absl::string_view track_id,
+ double bitrate_priority) {
+ allocator_->AddObserver(
+ observer, {min_bitrate_bps, max_bitrate_bps, pad_up_bitrate_bps, 0,
+ enforce_min_bitrate, bitrate_priority});
+ }
+ NiceMock<MockLimitObserver> limit_observer_;
+ std::unique_ptr<BitrateAllocator> allocator_;
+};
+
+// The following three tests verify enforcing a minimum bitrate works as
+// intended.
+TEST_F(BitrateAllocatorTestNoEnforceMin, OneBitrateObserver) {
+ TestBitrateObserver bitrate_observer_1;
+ // Expect OnAllocationLimitsChanged with `min_send_bitrate_bps` = 0 since
+ // AddObserver is called with `enforce_min_bitrate` = false.
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(0, 0)));
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(0, 120000)));
+ AddObserver(&bitrate_observer_1, 100000, 400000, 0, false, "",
+ kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer_1));
+
+ // High BWE.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(150000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(150000u, bitrate_observer_1.last_bitrate_bps_);
+
+ // Low BWE.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(10000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(0, 0)));
+ allocator_->RemoveObserver(&bitrate_observer_1);
+}
+
+TEST_F(BitrateAllocatorTestNoEnforceMin, ThreeBitrateObservers) {
+ TestBitrateObserver bitrate_observer_1;
+ TestBitrateObserver bitrate_observer_2;
+ TestBitrateObserver bitrate_observer_3;
+ // Set up the observers with min bitrates at 100000, 200000, and 300000.
+ AddObserver(&bitrate_observer_1, 100000, 400000, 0, false, "",
+ kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer_1));
+
+ AddObserver(&bitrate_observer_2, 200000, 400000, 0, false, "",
+ kDefaultBitratePriority);
+ EXPECT_EQ(200000, allocator_->GetStartBitrate(&bitrate_observer_2));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+
+ AddObserver(&bitrate_observer_3, 300000, 400000, 0, false, "",
+ kDefaultBitratePriority);
+ EXPECT_EQ(0, allocator_->GetStartBitrate(&bitrate_observer_3));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_bps_);
+
+ // High BWE. Make sure the controllers get a fair share of the surplus (i.e.,
+ // what is left after each controller gets its min rate).
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(690000, 0, 0, kDefaultProbingIntervalMs));
+ // Verify that each observer gets its min rate (sum of min rates is 600000),
+ // and that the remaining 90000 is divided equally among the three.
+ uint32_t bitrate_to_share = 690000u - 100000u - 200000u - 300000u;
+ EXPECT_EQ(100000u + bitrate_to_share / 3,
+ bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(200000u + bitrate_to_share / 3,
+ bitrate_observer_2.last_bitrate_bps_);
+ EXPECT_EQ(300000u + bitrate_to_share / 3,
+ bitrate_observer_3.last_bitrate_bps_);
+
+ // BWE below the sum of observer's min bitrate.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_); // Min bitrate.
+ EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_bps_); // Min bitrate.
+ EXPECT_EQ(0u, bitrate_observer_3.last_bitrate_bps_); // Nothing.
+
+ // Increased BWE, but still below the sum of configured min bitrates for all
+ // observers and too little for observer 3. 1 and 2 will share the rest.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(500000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(200000u, bitrate_observer_1.last_bitrate_bps_); // Min + split.
+ EXPECT_EQ(300000u, bitrate_observer_2.last_bitrate_bps_); // Min + split.
+ EXPECT_EQ(0u, bitrate_observer_3.last_bitrate_bps_); // Nothing.
+
+ // Below min for all.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(10000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_3.last_bitrate_bps_);
+
+ // Verify that zero estimated bandwidth, means that that all gets zero,
+ // regardless of set min bitrate.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(0, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_3.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&bitrate_observer_1);
+ allocator_->RemoveObserver(&bitrate_observer_2);
+ allocator_->RemoveObserver(&bitrate_observer_3);
+}
+
+TEST_F(BitrateAllocatorTestNoEnforceMin, OneBitrateObserverWithPacketLoss) {
+ const uint32_t kMinBitrateBps = 100000;
+ const uint32_t kMaxBitrateBps = 400000;
+ // Hysteresis adds another 10% or 20kbps to min bitrate.
+ const uint32_t kMinStartBitrateBps =
+ kMinBitrateBps + std::max(20000u, kMinBitrateBps / 10);
+
+ // Expect OnAllocationLimitsChanged with `min_send_bitrate_bps` = 0 since
+ // AddObserver is called with `enforce_min_bitrate` = false.
+ TestBitrateObserver bitrate_observer;
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(
+ AllocationLimitsEq(0, 0, kMaxBitrateBps)));
+ AddObserver(&bitrate_observer, kMinBitrateBps, kMaxBitrateBps, 0, false, "",
+ kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer));
+
+ // High BWE.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(150000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(150000u, bitrate_observer.last_bitrate_bps_);
+
+ // Add loss and use a part of the bitrate for protection.
+ const double kProtectionRatio = 0.4;
+ const uint8_t fraction_loss = kProtectionRatio * 256;
+ bitrate_observer.SetBitrateProtectionRatio(kProtectionRatio);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ 200000, 0, fraction_loss, kDefaultProbingIntervalMs));
+ EXPECT_EQ(200000u, bitrate_observer.last_bitrate_bps_);
+
+ // Above the min threshold, but not enough given the protection used.
+ // Limits changed, as we will video is now off and we need to pad up to the
+ // start bitrate.
+ // Verify the hysteresis is added for the protection.
+ const uint32_t kMinStartBitrateWithProtectionBps =
+ static_cast<uint32_t>(kMinStartBitrateBps * (1 + kProtectionRatio));
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(
+ 0, kMinStartBitrateWithProtectionBps, kMaxBitrateBps)));
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kMinStartBitrateBps + 1000, 0, fraction_loss, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(kMinStartBitrateWithProtectionBps - 1000, 0,
+ fraction_loss, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer.last_bitrate_bps_);
+
+ // Just enough to enable video again.
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(
+ AllocationLimitsEq(0, 0, kMaxBitrateBps)));
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(kMinStartBitrateWithProtectionBps, 0,
+ fraction_loss, kDefaultProbingIntervalMs));
+ EXPECT_EQ(kMinStartBitrateWithProtectionBps,
+ bitrate_observer.last_bitrate_bps_);
+
+ // Remove all protection and make sure video is not paused as earlier.
+ bitrate_observer.SetBitrateProtectionRatio(0.0);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(kMinStartBitrateWithProtectionBps - 1000, 0, 0,
+ kDefaultProbingIntervalMs));
+ EXPECT_EQ(kMinStartBitrateWithProtectionBps - 1000,
+ bitrate_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kMinStartBitrateBps, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(kMinStartBitrateBps, bitrate_observer.last_bitrate_bps_);
+
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(0, 0, 0)));
+ allocator_->RemoveObserver(&bitrate_observer);
+}
+
+TEST_F(BitrateAllocatorTest,
+ TotalAllocationLimitsAreUnaffectedByProtectionRatio) {
+ TestBitrateObserver bitrate_observer;
+
+ const uint32_t kMinBitrateBps = 100000;
+ const uint32_t kMaxBitrateBps = 400000;
+
+ // Register `bitrate_observer` and expect total allocation limits to change.
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(AllocationLimitsEq(
+ kMinBitrateBps, 0, kMaxBitrateBps)))
+ .Times(1);
+ MediaStreamAllocationConfig allocation_config = DefaultConfig();
+ allocation_config.min_bitrate_bps = kMinBitrateBps;
+ allocation_config.max_bitrate_bps = kMaxBitrateBps;
+ allocator_->AddObserver(&bitrate_observer, allocation_config);
+
+ // Observer uses 20% of it's allocated bitrate for protection.
+ bitrate_observer.SetBitrateProtectionRatio(/*protection_ratio=*/0.2);
+ // Total allocation limits are unaffected by the protection rate change.
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(_)).Times(0);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(200000u, 0, 100, kDefaultProbingIntervalMs));
+
+ // Observer uses 0% of it's allocated bitrate for protection.
+ bitrate_observer.SetBitrateProtectionRatio(/*protection_ratio=*/0.0);
+ // Total allocation limits are unaffected by the protection rate change.
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(_)).Times(0);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(200000u, 0, 100, kDefaultProbingIntervalMs));
+
+ // Observer again uses 20% of it's allocated bitrate for protection.
+ bitrate_observer.SetBitrateProtectionRatio(/*protection_ratio=*/0.2);
+ // Total allocation limits are unaffected by the protection rate change.
+ EXPECT_CALL(limit_observer_, OnAllocationLimitsChanged(_)).Times(0);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(200000u, 0, 100, kDefaultProbingIntervalMs));
+}
+
+TEST_F(BitrateAllocatorTestNoEnforceMin, TwoBitrateObserverWithPacketLoss) {
+ TestBitrateObserver bitrate_observer_1;
+ TestBitrateObserver bitrate_observer_2;
+
+ AddObserver(&bitrate_observer_1, 100000, 400000, 0, false, "",
+ kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer_1));
+ AddObserver(&bitrate_observer_2, 200000, 400000, 0, false, "",
+ kDefaultBitratePriority);
+ EXPECT_EQ(200000, allocator_->GetStartBitrate(&bitrate_observer_2));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+
+ // Enough bitrate for both.
+ bitrate_observer_2.SetBitrateProtectionRatio(0.5);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_bps_);
+
+ // Above min for observer 2, but too little given the protection used.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(330000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(330000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(100000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(99999, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(119000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(120000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(120000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+
+ // Verify the protection is accounted for before resuming observer 2.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(429000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(400000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(430000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(330000u, bitrate_observer_2.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&bitrate_observer_1);
+ allocator_->RemoveObserver(&bitrate_observer_2);
+}
+
+TEST_F(BitrateAllocatorTest, ThreeBitrateObserversLowBweEnforceMin) {
+ TestBitrateObserver bitrate_observer_1;
+ TestBitrateObserver bitrate_observer_2;
+ TestBitrateObserver bitrate_observer_3;
+
+ AddObserver(&bitrate_observer_1, 100000, 400000, 0, true,
+ kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer_1));
+
+ AddObserver(&bitrate_observer_2, 200000, 400000, 0, true,
+ kDefaultBitratePriority);
+ EXPECT_EQ(200000, allocator_->GetStartBitrate(&bitrate_observer_2));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_);
+
+ AddObserver(&bitrate_observer_3, 300000, 400000, 0, true,
+ kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer_3));
+ EXPECT_EQ(100000, static_cast<int>(bitrate_observer_1.last_bitrate_bps_));
+ EXPECT_EQ(200000, static_cast<int>(bitrate_observer_2.last_bitrate_bps_));
+
+ // Low BWE. Verify that all observers still get their respective min
+ // bitrate.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(1000, 0, 0, kDefaultProbingIntervalMs));
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_bps_); // Min cap.
+ EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_bps_); // Min cap.
+ EXPECT_EQ(300000u, bitrate_observer_3.last_bitrate_bps_); // Min cap.
+
+ allocator_->RemoveObserver(&bitrate_observer_1);
+ allocator_->RemoveObserver(&bitrate_observer_2);
+ allocator_->RemoveObserver(&bitrate_observer_3);
+}
+
+TEST_F(BitrateAllocatorTest, AddObserverWhileNetworkDown) {
+ TestBitrateObserver bitrate_observer_1;
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(50000, 0)));
+
+ AddObserver(&bitrate_observer_1, 50000, 400000, 0, true,
+ kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&bitrate_observer_1));
+
+ // Set network down, ie, no available bitrate.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(0, 0, 0, kDefaultProbingIntervalMs));
+
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+
+ TestBitrateObserver bitrate_observer_2;
+ // Adding an observer while the network is down should not affect the limits.
+ EXPECT_CALL(limit_observer_,
+ OnAllocationLimitsChanged(AllocationLimitsEq(50000 + 50000, 0)));
+ AddObserver(&bitrate_observer_2, 50000, 400000, 0, true,
+ kDefaultBitratePriority);
+
+ // Expect the start_bitrate to be set as if the network was still up but that
+ // the new observer have been notified that the network is down.
+ EXPECT_EQ(300000 / 2, allocator_->GetStartBitrate(&bitrate_observer_2));
+ EXPECT_EQ(0u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_bps_);
+
+ // Set network back up.
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(1500000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(750000u, bitrate_observer_1.last_bitrate_bps_);
+ EXPECT_EQ(750000u, bitrate_observer_2.last_bitrate_bps_);
+}
+
+TEST_F(BitrateAllocatorTest, MixedEnforecedConfigs) {
+ TestBitrateObserver enforced_observer;
+ AddObserver(&enforced_observer, 6000, 30000, 0, true,
+ kDefaultBitratePriority);
+ EXPECT_EQ(60000, allocator_->GetStartBitrate(&enforced_observer));
+
+ TestBitrateObserver not_enforced_observer;
+ AddObserver(&not_enforced_observer, 30000, 2500000, 0, false,
+ kDefaultBitratePriority);
+ EXPECT_EQ(270000, allocator_->GetStartBitrate(&not_enforced_observer));
+ EXPECT_EQ(30000u, enforced_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(36000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(6000u, enforced_observer.last_bitrate_bps_);
+ EXPECT_EQ(30000u, not_enforced_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(35000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(30000u, enforced_observer.last_bitrate_bps_);
+ EXPECT_EQ(0u, not_enforced_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(5000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(6000u, enforced_observer.last_bitrate_bps_);
+ EXPECT_EQ(0u, not_enforced_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(36000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(30000u, enforced_observer.last_bitrate_bps_);
+ EXPECT_EQ(0u, not_enforced_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(55000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(30000u, enforced_observer.last_bitrate_bps_);
+ EXPECT_EQ(0u, not_enforced_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(56000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(6000u, enforced_observer.last_bitrate_bps_);
+ EXPECT_EQ(50000u, not_enforced_observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(56000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(16000u, enforced_observer.last_bitrate_bps_);
+ EXPECT_EQ(40000u, not_enforced_observer.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&enforced_observer);
+ allocator_->RemoveObserver(&not_enforced_observer);
+}
+
+TEST_F(BitrateAllocatorTest, AvoidToggleAbsolute) {
+ TestBitrateObserver observer;
+ AddObserver(&observer, 30000, 300000, 0, false, kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&observer));
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(30000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(30000u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(20000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(30000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(49000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(50000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(50000u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(30000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(30000u, observer.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer);
+}
+
+TEST_F(BitrateAllocatorTest, AvoidTogglePercent) {
+ TestBitrateObserver observer;
+ AddObserver(&observer, 300000, 600000, 0, false, kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&observer));
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(300000u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(200000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(329000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(0u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(330000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(330000u, observer.last_bitrate_bps_);
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000, 0, 50, kDefaultProbingIntervalMs));
+ EXPECT_EQ(300000u, observer.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer);
+}
+
+TEST_F(BitrateAllocatorTest, PassProbingInterval) {
+ TestBitrateObserver observer;
+ AddObserver(&observer, 300000, 600000, 0, false, kDefaultBitratePriority);
+ EXPECT_EQ(300000, allocator_->GetStartBitrate(&observer));
+
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(300000, 0, 50, 5000));
+ EXPECT_EQ(5000, observer.last_probing_interval_ms_);
+
+ allocator_->RemoveObserver(&observer);
+}
+
+TEST_F(BitrateAllocatorTest, PriorityRateOneObserverBasic) {
+ TestBitrateObserver observer;
+ const uint32_t kMinSendBitrateBps = 10;
+ const uint32_t kMaxSendBitrateBps = 60;
+ const uint32_t kNetworkBandwidthBps = 30;
+
+ AddObserver(&observer, kMinSendBitrateBps, kMaxSendBitrateBps, 0, true, 2.0);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kNetworkBandwidthBps, 0, 0, kDefaultProbingIntervalMs));
+
+ EXPECT_EQ(kNetworkBandwidthBps, observer.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer);
+}
+
+// Tests that two observers with the same bitrate priority are allocated
+// their bitrate evenly.
+TEST_F(BitrateAllocatorTest, PriorityRateTwoObserversBasic) {
+ TestBitrateObserver observer_low_1;
+ TestBitrateObserver observer_low_2;
+ const uint32_t kMinSendBitrateBps = 10;
+ const uint32_t kMaxSendBitrateBps = 60;
+ const uint32_t kNetworkBandwidthBps = 60;
+ AddObserver(&observer_low_1, kMinSendBitrateBps, kMaxSendBitrateBps, 0, false,
+ 2.0);
+ AddObserver(&observer_low_2, kMinSendBitrateBps, kMaxSendBitrateBps, 0, false,
+ 2.0);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kNetworkBandwidthBps, 0, 0, kDefaultProbingIntervalMs));
+
+ EXPECT_EQ(kNetworkBandwidthBps / 2, observer_low_1.last_bitrate_bps_);
+ EXPECT_EQ(kNetworkBandwidthBps / 2, observer_low_2.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low_1);
+ allocator_->RemoveObserver(&observer_low_2);
+}
+
+// Tests that there is no difference in functionality when the min bitrate is
+// enforced.
+TEST_F(BitrateAllocatorTest, PriorityRateTwoObserversBasicMinEnforced) {
+ TestBitrateObserver observer_low_1;
+ TestBitrateObserver observer_low_2;
+ const uint32_t kMinSendBitrateBps = 0;
+ const uint32_t kMaxSendBitrateBps = 60;
+ const uint32_t kNetworkBandwidthBps = 60;
+ AddObserver(&observer_low_1, kMinSendBitrateBps, kMaxSendBitrateBps, 0, true,
+ 2.0);
+ AddObserver(&observer_low_2, kMinSendBitrateBps, kMaxSendBitrateBps, 0, true,
+ 2.0);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kNetworkBandwidthBps, 0, 0, kDefaultProbingIntervalMs));
+
+ EXPECT_EQ(kNetworkBandwidthBps / 2, observer_low_1.last_bitrate_bps_);
+ EXPECT_EQ(kNetworkBandwidthBps / 2, observer_low_2.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low_1);
+ allocator_->RemoveObserver(&observer_low_2);
+}
+
+// Tests that if the available bandwidth is the sum of the max bitrate
+// of all observers, they will be allocated their max.
+TEST_F(BitrateAllocatorTest, PriorityRateTwoObserversBothAllocatedMax) {
+ TestBitrateObserver observer_low;
+ TestBitrateObserver observer_mid;
+ const uint32_t kMinSendBitrateBps = 0;
+ const uint32_t kMaxSendBitrateBps = 60;
+ const uint32_t kNetworkBandwidthBps = kMaxSendBitrateBps * 2;
+ AddObserver(&observer_low, kMinSendBitrateBps, kMaxSendBitrateBps, 0, true,
+ 2.0);
+ AddObserver(&observer_mid, kMinSendBitrateBps, kMaxSendBitrateBps, 0, true,
+ 4.0);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kNetworkBandwidthBps, 0, 0, kDefaultProbingIntervalMs));
+
+ EXPECT_EQ(kMaxSendBitrateBps, observer_low.last_bitrate_bps_);
+ EXPECT_EQ(kMaxSendBitrateBps, observer_mid.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low);
+ allocator_->RemoveObserver(&observer_mid);
+}
+
+// Tests that after a higher bitrate priority observer has been allocated its
+// max bitrate the lower priority observer will then be allocated the remaining
+// bitrate.
+TEST_F(BitrateAllocatorTest, PriorityRateTwoObserversOneAllocatedToMax) {
+ TestBitrateObserver observer_low;
+ TestBitrateObserver observer_mid;
+ AddObserver(&observer_low, 10, 50, 0, false, 2.0);
+ AddObserver(&observer_mid, 10, 50, 0, false, 4.0);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(90, 0, 0, kDefaultProbingIntervalMs));
+
+ EXPECT_EQ(40u, observer_low.last_bitrate_bps_);
+ EXPECT_EQ(50u, observer_mid.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low);
+ allocator_->RemoveObserver(&observer_mid);
+}
+
+// Tests that three observers with three different bitrate priorities will all
+// be allocated bitrate according to their relative bitrate priority.
+TEST_F(BitrateAllocatorTest,
+ PriorityRateThreeObserversAllocatedRelativeAmounts) {
+ TestBitrateObserver observer_low;
+ TestBitrateObserver observer_mid;
+ TestBitrateObserver observer_high;
+ const uint32_t kMaxBitrate = 100;
+ // Not enough bandwidth to fill any observer's max bitrate.
+ const uint32_t kNetworkBandwidthBps = 70;
+ const double kLowBitratePriority = 2.0;
+ const double kMidBitratePriority = 4.0;
+ const double kHighBitratePriority = 8.0;
+ const double kTotalBitratePriority =
+ kLowBitratePriority + kMidBitratePriority + kHighBitratePriority;
+ AddObserver(&observer_low, 0, kMaxBitrate, 0, false, kLowBitratePriority);
+ AddObserver(&observer_mid, 0, kMaxBitrate, 0, false, kMidBitratePriority);
+ AddObserver(&observer_high, 0, kMaxBitrate, 0, false, kHighBitratePriority);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kNetworkBandwidthBps, 0, 0, kDefaultProbingIntervalMs));
+
+ const double kLowFractionAllocated =
+ kLowBitratePriority / kTotalBitratePriority;
+ const double kMidFractionAllocated =
+ kMidBitratePriority / kTotalBitratePriority;
+ const double kHighFractionAllocated =
+ kHighBitratePriority / kTotalBitratePriority;
+ EXPECT_EQ(kLowFractionAllocated * kNetworkBandwidthBps,
+ observer_low.last_bitrate_bps_);
+ EXPECT_EQ(kMidFractionAllocated * kNetworkBandwidthBps,
+ observer_mid.last_bitrate_bps_);
+ EXPECT_EQ(kHighFractionAllocated * kNetworkBandwidthBps,
+ observer_high.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low);
+ allocator_->RemoveObserver(&observer_mid);
+ allocator_->RemoveObserver(&observer_high);
+}
+
+// Tests that after the high priority observer has been allocated its maximum
+// bitrate, the other two observers are still allocated bitrate according to
+// their relative bitrate priority.
+TEST_F(BitrateAllocatorTest, PriorityRateThreeObserversHighAllocatedToMax) {
+ TestBitrateObserver observer_low;
+ const double kLowBitratePriority = 2.0;
+ TestBitrateObserver observer_mid;
+ const double kMidBitratePriority = 4.0;
+ TestBitrateObserver observer_high;
+ const double kHighBitratePriority = 8.0;
+
+ const uint32_t kAvailableBitrate = 90;
+ const uint32_t kMaxBitrate = 40;
+ const uint32_t kMinBitrate = 10;
+ // Remaining bitrate after allocating to all mins and knowing that the high
+ // priority observer will have its max bitrate allocated.
+ const uint32_t kRemainingBitrate =
+ kAvailableBitrate - kMaxBitrate - (2 * kMinBitrate);
+
+ AddObserver(&observer_low, kMinBitrate, kMaxBitrate, 0, false,
+ kLowBitratePriority);
+ AddObserver(&observer_mid, kMinBitrate, kMaxBitrate, 0, false,
+ kMidBitratePriority);
+ AddObserver(&observer_high, kMinBitrate, kMaxBitrate, 0, false,
+ kHighBitratePriority);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kAvailableBitrate, 0, 0, kDefaultProbingIntervalMs));
+
+ const double kLowFractionAllocated =
+ kLowBitratePriority / (kLowBitratePriority + kMidBitratePriority);
+ const double kMidFractionAllocated =
+ kMidBitratePriority / (kLowBitratePriority + kMidBitratePriority);
+ EXPECT_EQ(kMinBitrate + (kRemainingBitrate * kLowFractionAllocated),
+ observer_low.last_bitrate_bps_);
+ EXPECT_EQ(kMinBitrate + (kRemainingBitrate * kMidFractionAllocated),
+ observer_mid.last_bitrate_bps_);
+ EXPECT_EQ(40u, observer_high.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low);
+ allocator_->RemoveObserver(&observer_mid);
+ allocator_->RemoveObserver(&observer_high);
+}
+
+// Tests that after the low priority observer has been allocated its maximum
+// bitrate, the other two observers are still allocated bitrate according to
+// their relative bitrate priority.
+TEST_F(BitrateAllocatorTest, PriorityRateThreeObserversLowAllocatedToMax) {
+ TestBitrateObserver observer_low;
+ const double kLowBitratePriority = 2.0;
+ const uint32_t kLowMaxBitrate = 10;
+ TestBitrateObserver observer_mid;
+ const double kMidBitratePriority = 4.0;
+ TestBitrateObserver observer_high;
+ const double kHighBitratePriority = 8.0;
+
+ const uint32_t kMinBitrate = 0;
+ const uint32_t kMaxBitrate = 60;
+ const uint32_t kAvailableBitrate = 100;
+ // Remaining bitrate knowing that the low priority observer is allocated its
+ // max bitrate. We know this because it is allocated 2.0/14.0 (1/7) of the
+ // available bitrate, so 70 bps would be sufficient network bandwidth.
+ const uint32_t kRemainingBitrate = kAvailableBitrate - kLowMaxBitrate;
+
+ AddObserver(&observer_low, kMinBitrate, kLowMaxBitrate, 0, false,
+ kLowBitratePriority);
+ AddObserver(&observer_mid, kMinBitrate, kMaxBitrate, 0, false,
+ kMidBitratePriority);
+ AddObserver(&observer_high, kMinBitrate, kMaxBitrate, 0, false,
+ kHighBitratePriority);
+ allocator_->OnNetworkEstimateChanged(CreateTargetRateMessage(
+ kAvailableBitrate, 0, 0, kDefaultProbingIntervalMs));
+
+ const double kMidFractionAllocated =
+ kMidBitratePriority / (kMidBitratePriority + kHighBitratePriority);
+ const double kHighFractionAllocated =
+ kHighBitratePriority / (kMidBitratePriority + kHighBitratePriority);
+ EXPECT_EQ(kLowMaxBitrate, observer_low.last_bitrate_bps_);
+ EXPECT_EQ(kMinBitrate + (kRemainingBitrate * kMidFractionAllocated),
+ observer_mid.last_bitrate_bps_);
+ EXPECT_EQ(kMinBitrate + (kRemainingBitrate * kHighFractionAllocated),
+ observer_high.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low);
+ allocator_->RemoveObserver(&observer_mid);
+ allocator_->RemoveObserver(&observer_high);
+}
+
+// Tests that after two observers are allocated bitrate to their max, the
+// the remaining observer is allocated what's left appropriately. This test
+// handles an edge case where the medium and high observer reach their
+// "relative" max allocation at the same time. The high has 40 to allocate
+// above its min, and the mid has 20 to allocate above its min, which scaled
+// by their bitrate priority is the same for each.
+TEST_F(BitrateAllocatorTest, PriorityRateThreeObserversTwoAllocatedToMax) {
+ TestBitrateObserver observer_low;
+ TestBitrateObserver observer_mid;
+ TestBitrateObserver observer_high;
+ AddObserver(&observer_low, 10, 40, 0, false, 2.0);
+ // Scaled allocation above the min allocation is the same for these two,
+ // meaning they will get allocated their max at the same time.
+ // Scaled (target allocation) = (max - min) / bitrate priority
+ AddObserver(&observer_mid, 10, 30, 0, false, 4.0);
+ AddObserver(&observer_high, 10, 50, 0, false, 8.0);
+ allocator_->OnNetworkEstimateChanged(
+ CreateTargetRateMessage(110, 0, 0, kDefaultProbingIntervalMs));
+
+ EXPECT_EQ(30u, observer_low.last_bitrate_bps_);
+ EXPECT_EQ(30u, observer_mid.last_bitrate_bps_);
+ EXPECT_EQ(50u, observer_high.last_bitrate_bps_);
+
+ allocator_->RemoveObserver(&observer_low);
+ allocator_->RemoveObserver(&observer_mid);
+ allocator_->RemoveObserver(&observer_high);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/bitrate_configurator_gn/moz.build b/third_party/libwebrtc/call/bitrate_configurator_gn/moz.build
new file mode 100644
index 0000000000..e6f73025aa
--- /dev/null
+++ b/third_party/libwebrtc/call/bitrate_configurator_gn/moz.build
@@ -0,0 +1,236 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/rtp_bitrate_configurator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("bitrate_configurator_gn")
diff --git a/third_party/libwebrtc/call/bitrate_estimator_tests.cc b/third_party/libwebrtc/call/bitrate_estimator_tests.cc
new file mode 100644
index 0000000000..f17a037ed2
--- /dev/null
+++ b/third_party/libwebrtc/call/bitrate_estimator_tests.cc
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <cstddef>
+#include <functional>
+#include <list>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/test/create_frame_generator.h"
+#include "call/call.h"
+#include "call/simulated_network.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/thread_annotations.h"
+#include "test/call_test.h"
+#include "test/encoder_settings.h"
+#include "test/fake_decoder.h"
+#include "test/fake_encoder.h"
+#include "test/frame_generator_capturer.h"
+#include "test/gtest.h"
+#include "test/video_test_constants.h"
+
+namespace webrtc {
+namespace {
+// Note: If you consider to re-use this class, think twice and instead consider
+// writing tests that don't depend on the logging system.
+class LogObserver {
+ public:
+ LogObserver() { rtc::LogMessage::AddLogToStream(&callback_, rtc::LS_INFO); }
+
+ ~LogObserver() { rtc::LogMessage::RemoveLogToStream(&callback_); }
+
+ void PushExpectedLogLine(absl::string_view expected_log_line) {
+ callback_.PushExpectedLogLine(expected_log_line);
+ }
+
+ bool Wait() { return callback_.Wait(); }
+
+ private:
+ class Callback : public rtc::LogSink {
+ public:
+ void OnLogMessage(const std::string& message) override {
+ OnLogMessage(absl::string_view(message));
+ }
+
+ void OnLogMessage(absl::string_view message) override {
+ MutexLock lock(&mutex_);
+ // Ignore log lines that are due to missing AST extensions, these are
+ // logged when we switch back from AST to TOF until the wrapping bitrate
+ // estimator gives up on using AST.
+ if (message.find("BitrateEstimator") != absl::string_view::npos &&
+ message.find("packet is missing") == absl::string_view::npos) {
+ received_log_lines_.push_back(std::string(message));
+ }
+
+ int num_popped = 0;
+ while (!received_log_lines_.empty() && !expected_log_lines_.empty()) {
+ std::string a = received_log_lines_.front();
+ std::string b = expected_log_lines_.front();
+ received_log_lines_.pop_front();
+ expected_log_lines_.pop_front();
+ num_popped++;
+ EXPECT_TRUE(a.find(b) != absl::string_view::npos) << a << " != " << b;
+ }
+ if (expected_log_lines_.empty()) {
+ if (num_popped > 0) {
+ done_.Set();
+ }
+ return;
+ }
+ }
+
+ bool Wait() {
+ return done_.Wait(test::VideoTestConstants::kDefaultTimeout);
+ }
+
+ void PushExpectedLogLine(absl::string_view expected_log_line) {
+ MutexLock lock(&mutex_);
+ expected_log_lines_.emplace_back(expected_log_line);
+ }
+
+ private:
+ typedef std::list<std::string> Strings;
+ Mutex mutex_;
+ Strings received_log_lines_ RTC_GUARDED_BY(mutex_);
+ Strings expected_log_lines_ RTC_GUARDED_BY(mutex_);
+ rtc::Event done_;
+ };
+
+ Callback callback_;
+};
+} // namespace
+
+static const int kTOFExtensionId = 4;
+static const int kASTExtensionId = 5;
+
+class BitrateEstimatorTest : public test::CallTest {
+ public:
+ BitrateEstimatorTest() : receive_config_(nullptr) {}
+
+ virtual ~BitrateEstimatorTest() { EXPECT_TRUE(streams_.empty()); }
+
+ virtual void SetUp() {
+ SendTask(task_queue(), [this]() {
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kTOFExtensionId));
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kASTExtensionId));
+
+ CreateCalls();
+
+ CreateSendTransport(BuiltInNetworkBehaviorConfig(), /*observer=*/nullptr);
+ CreateReceiveTransport(BuiltInNetworkBehaviorConfig(),
+ /*observer=*/nullptr);
+
+ VideoSendStream::Config video_send_config(send_transport_.get());
+ video_send_config.rtp.ssrcs.push_back(
+ test::VideoTestConstants::kVideoSendSsrcs[0]);
+ video_send_config.encoder_settings.encoder_factory =
+ &fake_encoder_factory_;
+ video_send_config.encoder_settings.bitrate_allocator_factory =
+ bitrate_allocator_factory_.get();
+ video_send_config.rtp.payload_name = "FAKE";
+ video_send_config.rtp.payload_type =
+ test::VideoTestConstants::kFakeVideoSendPayloadType;
+ SetVideoSendConfig(video_send_config);
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ SetVideoEncoderConfig(video_encoder_config);
+
+ receive_config_ =
+ VideoReceiveStreamInterface::Config(receive_transport_.get());
+ // receive_config_.decoders will be set by every stream separately.
+ receive_config_.rtp.remote_ssrc = GetVideoSendConfig()->rtp.ssrcs[0];
+ receive_config_.rtp.local_ssrc =
+ test::VideoTestConstants::kReceiverLocalVideoSsrc;
+ });
+ }
+
+ virtual void TearDown() {
+ SendTask(task_queue(), [this]() {
+ for (auto* stream : streams_) {
+ stream->StopSending();
+ delete stream;
+ }
+ streams_.clear();
+ DestroyCalls();
+ });
+ }
+
+ protected:
+ friend class Stream;
+
+ class Stream {
+ public:
+ explicit Stream(BitrateEstimatorTest* test)
+ : test_(test),
+ is_sending_receiving_(false),
+ send_stream_(nullptr),
+ frame_generator_capturer_(),
+ decoder_factory_(
+ []() { return std::make_unique<test::FakeDecoder>(); }) {
+ test_->GetVideoSendConfig()->rtp.ssrcs[0]++;
+ send_stream_ = test_->sender_call_->CreateVideoSendStream(
+ test_->GetVideoSendConfig()->Copy(),
+ test_->GetVideoEncoderConfig()->Copy());
+ RTC_DCHECK_EQ(1, test_->GetVideoEncoderConfig()->number_of_streams);
+ frame_generator_capturer_ =
+ std::make_unique<test::FrameGeneratorCapturer>(
+ test->clock_,
+ test::CreateSquareFrameGenerator(
+ test::VideoTestConstants::kDefaultWidth,
+ test::VideoTestConstants::kDefaultHeight, absl::nullopt,
+ absl::nullopt),
+ test::VideoTestConstants::kDefaultFramerate,
+ *test->task_queue_factory_);
+ frame_generator_capturer_->Init();
+ frame_generator_capturer_->Start();
+ send_stream_->SetSource(frame_generator_capturer_.get(),
+ DegradationPreference::MAINTAIN_FRAMERATE);
+ send_stream_->Start();
+
+ VideoReceiveStreamInterface::Decoder decoder;
+ test_->receive_config_.decoder_factory = &decoder_factory_;
+ decoder.payload_type = test_->GetVideoSendConfig()->rtp.payload_type;
+ decoder.video_format =
+ SdpVideoFormat(test_->GetVideoSendConfig()->rtp.payload_name);
+ test_->receive_config_.decoders.clear();
+ test_->receive_config_.decoders.push_back(decoder);
+ test_->receive_config_.rtp.remote_ssrc =
+ test_->GetVideoSendConfig()->rtp.ssrcs[0];
+ test_->receive_config_.rtp.local_ssrc++;
+ test_->receive_config_.renderer = &test->fake_renderer_;
+ video_receive_stream_ = test_->receiver_call_->CreateVideoReceiveStream(
+ test_->receive_config_.Copy());
+ video_receive_stream_->Start();
+ is_sending_receiving_ = true;
+ }
+
+ ~Stream() {
+ EXPECT_FALSE(is_sending_receiving_);
+ test_->sender_call_->DestroyVideoSendStream(send_stream_);
+ frame_generator_capturer_.reset(nullptr);
+ send_stream_ = nullptr;
+ if (video_receive_stream_) {
+ test_->receiver_call_->DestroyVideoReceiveStream(video_receive_stream_);
+ video_receive_stream_ = nullptr;
+ }
+ }
+
+ void StopSending() {
+ if (is_sending_receiving_) {
+ send_stream_->Stop();
+ if (video_receive_stream_) {
+ video_receive_stream_->Stop();
+ }
+ is_sending_receiving_ = false;
+ }
+ }
+
+ private:
+ BitrateEstimatorTest* test_;
+ bool is_sending_receiving_;
+ VideoSendStream* send_stream_;
+ VideoReceiveStreamInterface* video_receive_stream_;
+ std::unique_ptr<test::FrameGeneratorCapturer> frame_generator_capturer_;
+
+ test::FunctionVideoDecoderFactory decoder_factory_;
+ };
+
+ LogObserver receiver_log_;
+ VideoReceiveStreamInterface::Config receive_config_;
+ std::vector<Stream*> streams_;
+};
+
+static const char* kAbsSendTimeLog =
+ "RemoteBitrateEstimatorAbsSendTime: Instantiating.";
+static const char* kSingleStreamLog =
+ "RemoteBitrateEstimatorSingleStream: Instantiating.";
+
+TEST_F(BitrateEstimatorTest, InstantiatesTOFPerDefaultForVideo) {
+ SendTask(task_queue(), [this]() {
+ GetVideoSendConfig()->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kTOFExtensionId));
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ streams_.push_back(new Stream(this));
+ });
+ EXPECT_TRUE(receiver_log_.Wait());
+}
+
+TEST_F(BitrateEstimatorTest, ImmediatelySwitchToASTForVideo) {
+ SendTask(task_queue(), [this]() {
+ GetVideoSendConfig()->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kASTExtensionId));
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ receiver_log_.PushExpectedLogLine("Switching to absolute send time RBE.");
+ receiver_log_.PushExpectedLogLine(kAbsSendTimeLog);
+ streams_.push_back(new Stream(this));
+ });
+ EXPECT_TRUE(receiver_log_.Wait());
+}
+
+TEST_F(BitrateEstimatorTest, SwitchesToASTForVideo) {
+ SendTask(task_queue(), [this]() {
+ GetVideoSendConfig()->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kTOFExtensionId));
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ streams_.push_back(new Stream(this));
+ });
+ EXPECT_TRUE(receiver_log_.Wait());
+
+ SendTask(task_queue(), [this]() {
+ GetVideoSendConfig()->rtp.extensions[0] =
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kASTExtensionId);
+ receiver_log_.PushExpectedLogLine("Switching to absolute send time RBE.");
+ receiver_log_.PushExpectedLogLine(kAbsSendTimeLog);
+ streams_.push_back(new Stream(this));
+ });
+ EXPECT_TRUE(receiver_log_.Wait());
+}
+
+// This test is flaky. See webrtc:5790.
+TEST_F(BitrateEstimatorTest, DISABLED_SwitchesToASTThenBackToTOFForVideo) {
+ SendTask(task_queue(), [this]() {
+ GetVideoSendConfig()->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kTOFExtensionId));
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ receiver_log_.PushExpectedLogLine(kAbsSendTimeLog);
+ receiver_log_.PushExpectedLogLine(kSingleStreamLog);
+ streams_.push_back(new Stream(this));
+ });
+ EXPECT_TRUE(receiver_log_.Wait());
+
+ SendTask(task_queue(), [this]() {
+ GetVideoSendConfig()->rtp.extensions[0] =
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kASTExtensionId);
+ receiver_log_.PushExpectedLogLine(kAbsSendTimeLog);
+ receiver_log_.PushExpectedLogLine("Switching to absolute send time RBE.");
+ streams_.push_back(new Stream(this));
+ });
+ EXPECT_TRUE(receiver_log_.Wait());
+
+ SendTask(task_queue(), [this]() {
+ GetVideoSendConfig()->rtp.extensions[0] =
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kTOFExtensionId);
+ receiver_log_.PushExpectedLogLine(kAbsSendTimeLog);
+ receiver_log_.PushExpectedLogLine(
+ "WrappingBitrateEstimator: Switching to transmission time offset RBE.");
+ streams_.push_back(new Stream(this));
+ streams_[0]->StopSending();
+ streams_[1]->StopSending();
+ });
+ EXPECT_TRUE(receiver_log_.Wait());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/call.cc b/third_party/libwebrtc/call/call.cc
new file mode 100644
index 0000000000..0f3699501e
--- /dev/null
+++ b/third_party/libwebrtc/call/call.cc
@@ -0,0 +1,1428 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/call.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "absl/functional/bind_front.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/media_types.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/transport/network_control.h"
+#include "audio/audio_receive_stream.h"
+#include "audio/audio_send_stream.h"
+#include "audio/audio_state.h"
+#include "call/adaptation/broadcast_resource_listener.h"
+#include "call/bitrate_allocator.h"
+#include "call/flexfec_receive_stream_impl.h"
+#include "call/packet_receiver.h"
+#include "call/receive_time_calculator.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "call/rtp_transport_controller_send.h"
+#include "call/rtp_transport_controller_send_factory.h"
+#include "call/version.h"
+#include "logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h"
+#include "logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h"
+#include "logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h"
+#include "logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h"
+#include "logging/rtc_event_log/events/rtc_event_video_send_stream_config.h"
+#include "logging/rtc_event_log/rtc_stream_config.h"
+#include "modules/congestion_controller/include/receive_side_congestion_controller.h"
+#include "modules/rtp_rtcp/include/flexfec_receiver.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "modules/video_coding/fec_controller_default.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/cpu_info.h"
+#include "system_wrappers/include/metrics.h"
+#include "video/call_stats2.h"
+#include "video/send_delay_stats.h"
+#include "video/stats_counter.h"
+#include "video/video_receive_stream2.h"
+#include "video/video_send_stream.h"
+
+namespace webrtc {
+
+namespace {
+
+const int* FindKeyByValue(const std::map<int, int>& m, int v) {
+ for (const auto& kv : m) {
+ if (kv.second == v)
+ return &kv.first;
+ }
+ return nullptr;
+}
+
+std::unique_ptr<rtclog::StreamConfig> CreateRtcLogStreamConfig(
+ const VideoReceiveStreamInterface::Config& config) {
+ auto rtclog_config = std::make_unique<rtclog::StreamConfig>();
+ rtclog_config->remote_ssrc = config.rtp.remote_ssrc;
+ rtclog_config->local_ssrc = config.rtp.local_ssrc;
+ rtclog_config->rtx_ssrc = config.rtp.rtx_ssrc;
+ rtclog_config->rtcp_mode = config.rtp.rtcp_mode;
+
+ for (const auto& d : config.decoders) {
+ const int* search =
+ FindKeyByValue(config.rtp.rtx_associated_payload_types, d.payload_type);
+ rtclog_config->codecs.emplace_back(d.video_format.name, d.payload_type,
+ search ? *search : 0);
+ }
+ return rtclog_config;
+}
+
+std::unique_ptr<rtclog::StreamConfig> CreateRtcLogStreamConfig(
+ const VideoSendStream::Config& config,
+ size_t ssrc_index) {
+ auto rtclog_config = std::make_unique<rtclog::StreamConfig>();
+ rtclog_config->local_ssrc = config.rtp.ssrcs[ssrc_index];
+ if (ssrc_index < config.rtp.rtx.ssrcs.size()) {
+ rtclog_config->rtx_ssrc = config.rtp.rtx.ssrcs[ssrc_index];
+ }
+ rtclog_config->rtcp_mode = config.rtp.rtcp_mode;
+ rtclog_config->rtp_extensions = config.rtp.extensions;
+
+ rtclog_config->codecs.emplace_back(config.rtp.payload_name,
+ config.rtp.payload_type,
+ config.rtp.rtx.payload_type);
+ return rtclog_config;
+}
+
+std::unique_ptr<rtclog::StreamConfig> CreateRtcLogStreamConfig(
+ const AudioReceiveStreamInterface::Config& config) {
+ auto rtclog_config = std::make_unique<rtclog::StreamConfig>();
+ rtclog_config->remote_ssrc = config.rtp.remote_ssrc;
+ rtclog_config->local_ssrc = config.rtp.local_ssrc;
+ return rtclog_config;
+}
+
+TaskQueueBase* GetCurrentTaskQueueOrThread() {
+ TaskQueueBase* current = TaskQueueBase::Current();
+ if (!current)
+ current = rtc::ThreadManager::Instance()->CurrentThread();
+ return current;
+}
+
+} // namespace
+
+namespace internal {
+
+// Wraps an injected resource in a BroadcastResourceListener and handles adding
+// and removing adapter resources to individual VideoSendStreams.
+class ResourceVideoSendStreamForwarder {
+ public:
+ ResourceVideoSendStreamForwarder(
+ rtc::scoped_refptr<webrtc::Resource> resource)
+ : broadcast_resource_listener_(resource) {
+ broadcast_resource_listener_.StartListening();
+ }
+ ~ResourceVideoSendStreamForwarder() {
+ RTC_DCHECK(adapter_resources_.empty());
+ broadcast_resource_listener_.StopListening();
+ }
+
+ rtc::scoped_refptr<webrtc::Resource> Resource() const {
+ return broadcast_resource_listener_.SourceResource();
+ }
+
+ void OnCreateVideoSendStream(VideoSendStream* video_send_stream) {
+ RTC_DCHECK(adapter_resources_.find(video_send_stream) ==
+ adapter_resources_.end());
+ auto adapter_resource =
+ broadcast_resource_listener_.CreateAdapterResource();
+ video_send_stream->AddAdaptationResource(adapter_resource);
+ adapter_resources_.insert(
+ std::make_pair(video_send_stream, adapter_resource));
+ }
+
+ void OnDestroyVideoSendStream(VideoSendStream* video_send_stream) {
+ auto it = adapter_resources_.find(video_send_stream);
+ RTC_DCHECK(it != adapter_resources_.end());
+ broadcast_resource_listener_.RemoveAdapterResource(it->second);
+ adapter_resources_.erase(it);
+ }
+
+ private:
+ BroadcastResourceListener broadcast_resource_listener_;
+ std::map<VideoSendStream*, rtc::scoped_refptr<webrtc::Resource>>
+ adapter_resources_;
+};
+
+class Call final : public webrtc::Call,
+ public PacketReceiver,
+ public TargetTransferRateObserver,
+ public BitrateAllocator::LimitObserver {
+ public:
+ Call(Clock* clock,
+ const CallConfig& config,
+ std::unique_ptr<RtpTransportControllerSendInterface> transport_send,
+ TaskQueueFactory* task_queue_factory);
+ ~Call() override;
+
+ Call(const Call&) = delete;
+ Call& operator=(const Call&) = delete;
+
+ // Implements webrtc::Call.
+ PacketReceiver* Receiver() override;
+
+ webrtc::AudioSendStream* CreateAudioSendStream(
+ const webrtc::AudioSendStream::Config& config) override;
+ void DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) override;
+
+ webrtc::AudioReceiveStreamInterface* CreateAudioReceiveStream(
+ const webrtc::AudioReceiveStreamInterface::Config& config) override;
+ void DestroyAudioReceiveStream(
+ webrtc::AudioReceiveStreamInterface* receive_stream) override;
+
+ webrtc::VideoSendStream* CreateVideoSendStream(
+ webrtc::VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config) override;
+ webrtc::VideoSendStream* CreateVideoSendStream(
+ webrtc::VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ std::unique_ptr<FecController> fec_controller) override;
+ void DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) override;
+
+ webrtc::VideoReceiveStreamInterface* CreateVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface::Config configuration) override;
+ void DestroyVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface* receive_stream) override;
+
+ FlexfecReceiveStream* CreateFlexfecReceiveStream(
+ const FlexfecReceiveStream::Config config) override;
+ void DestroyFlexfecReceiveStream(
+ FlexfecReceiveStream* receive_stream) override;
+
+ void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
+
+ RtpTransportControllerSendInterface* GetTransportControllerSend() override;
+
+ Stats GetStats() const override;
+
+ const FieldTrialsView& trials() const override;
+
+ TaskQueueBase* network_thread() const override;
+ TaskQueueBase* worker_thread() const override;
+
+ void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override;
+
+ void DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) override;
+
+ void SignalChannelNetworkState(MediaType media, NetworkState state) override;
+
+ void OnAudioTransportOverheadChanged(
+ int transport_overhead_per_packet) override;
+
+ void OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
+ uint32_t local_ssrc) override;
+ void OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
+ uint32_t local_ssrc) override;
+ void OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
+ uint32_t local_ssrc) override;
+
+ void OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
+ absl::string_view sync_group) override;
+
+ void OnSentPacket(const rtc::SentPacket& sent_packet) override;
+
+ // Implements TargetTransferRateObserver,
+ void OnTargetTransferRate(TargetTransferRate msg) override;
+ void OnStartRateUpdate(DataRate start_rate) override;
+
+ // Implements BitrateAllocator::LimitObserver.
+ void OnAllocationLimitsChanged(BitrateAllocationLimits limits) override;
+
+ void SetClientBitratePreferences(const BitrateSettings& preferences) override;
+
+ private:
+ // Thread-compatible class that collects received packet stats and exposes
+ // them as UMA histograms on destruction.
+ class ReceiveStats {
+ public:
+ explicit ReceiveStats(Clock* clock);
+ ~ReceiveStats();
+
+ void AddReceivedRtcpBytes(int bytes);
+ void AddReceivedAudioBytes(int bytes, webrtc::Timestamp arrival_time);
+ void AddReceivedVideoBytes(int bytes, webrtc::Timestamp arrival_time);
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ RateCounter received_bytes_per_second_counter_
+ RTC_GUARDED_BY(sequence_checker_);
+ RateCounter received_audio_bytes_per_second_counter_
+ RTC_GUARDED_BY(sequence_checker_);
+ RateCounter received_video_bytes_per_second_counter_
+ RTC_GUARDED_BY(sequence_checker_);
+ RateCounter received_rtcp_bytes_per_second_counter_
+ RTC_GUARDED_BY(sequence_checker_);
+ absl::optional<Timestamp> first_received_rtp_audio_timestamp_
+ RTC_GUARDED_BY(sequence_checker_);
+ absl::optional<Timestamp> last_received_rtp_audio_timestamp_
+ RTC_GUARDED_BY(sequence_checker_);
+ absl::optional<Timestamp> first_received_rtp_video_timestamp_
+ RTC_GUARDED_BY(sequence_checker_);
+ absl::optional<Timestamp> last_received_rtp_video_timestamp_
+ RTC_GUARDED_BY(sequence_checker_);
+ };
+
+ // Thread-compatible class that collects sent packet stats and exposes
+ // them as UMA histograms on destruction, provided SetFirstPacketTime was
+ // called with a non-empty packet timestamp before the destructor.
+ class SendStats {
+ public:
+ explicit SendStats(Clock* clock);
+ ~SendStats();
+
+ void SetFirstPacketTime(absl::optional<Timestamp> first_sent_packet_time);
+ void PauseSendAndPacerBitrateCounters();
+ void AddTargetBitrateSample(uint32_t target_bitrate_bps);
+ void SetMinAllocatableRate(BitrateAllocationLimits limits);
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker destructor_sequence_checker_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_;
+ Clock* const clock_ RTC_GUARDED_BY(destructor_sequence_checker_);
+ AvgCounter estimated_send_bitrate_kbps_counter_
+ RTC_GUARDED_BY(sequence_checker_);
+ AvgCounter pacer_bitrate_kbps_counter_ RTC_GUARDED_BY(sequence_checker_);
+ uint32_t min_allocated_send_bitrate_bps_ RTC_GUARDED_BY(sequence_checker_){
+ 0};
+ absl::optional<Timestamp> first_sent_packet_time_
+ RTC_GUARDED_BY(destructor_sequence_checker_);
+ };
+
+ void DeliverRtcp(MediaType media_type, rtc::CopyOnWriteBuffer packet)
+ RTC_RUN_ON(network_thread_);
+
+ AudioReceiveStreamImpl* FindAudioStreamForSyncGroup(
+ absl::string_view sync_group) RTC_RUN_ON(worker_thread_);
+ void ConfigureSync(absl::string_view sync_group) RTC_RUN_ON(worker_thread_);
+
+ void NotifyBweOfReceivedPacket(const RtpPacketReceived& packet,
+ MediaType media_type)
+ RTC_RUN_ON(worker_thread_);
+
+ bool RegisterReceiveStream(uint32_t ssrc, ReceiveStreamInterface* stream);
+ bool UnregisterReceiveStream(uint32_t ssrc);
+
+ void UpdateAggregateNetworkState();
+
+ // Ensure that necessary process threads are started, and any required
+ // callbacks have been registered.
+ void EnsureStarted() RTC_RUN_ON(worker_thread_);
+
+ Clock* const clock_;
+ TaskQueueFactory* const task_queue_factory_;
+ TaskQueueBase* const worker_thread_;
+ TaskQueueBase* const network_thread_;
+ const std::unique_ptr<DecodeSynchronizer> decode_sync_;
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker send_transport_sequence_checker_;
+
+ const int num_cpu_cores_;
+ const std::unique_ptr<CallStats> call_stats_;
+ const std::unique_ptr<BitrateAllocator> bitrate_allocator_;
+ const CallConfig config_ RTC_GUARDED_BY(worker_thread_);
+ // Maps to config_.trials, can be used from any thread via `trials()`.
+ const FieldTrialsView& trials_;
+
+ NetworkState audio_network_state_ RTC_GUARDED_BY(worker_thread_);
+ NetworkState video_network_state_ RTC_GUARDED_BY(worker_thread_);
+ // TODO(bugs.webrtc.org/11993): Move aggregate_network_up_ over to the
+ // network thread.
+ bool aggregate_network_up_ RTC_GUARDED_BY(worker_thread_);
+
+ // Schedules nack periodic processing on behalf of all streams.
+ NackPeriodicProcessor nack_periodic_processor_;
+
+ // Audio, Video, and FlexFEC receive streams are owned by the client that
+ // creates them.
+ // TODO(bugs.webrtc.org/11993): Move audio_receive_streams_,
+ // video_receive_streams_ over to the network thread.
+ std::set<AudioReceiveStreamImpl*> audio_receive_streams_
+ RTC_GUARDED_BY(worker_thread_);
+ std::set<VideoReceiveStream2*> video_receive_streams_
+ RTC_GUARDED_BY(worker_thread_);
+ // TODO(bugs.webrtc.org/7135, bugs.webrtc.org/9719): Should eventually be
+ // injected at creation, with a single object in the bundled case.
+ RtpStreamReceiverController audio_receiver_controller_
+ RTC_GUARDED_BY(worker_thread_);
+ RtpStreamReceiverController video_receiver_controller_
+ RTC_GUARDED_BY(worker_thread_);
+
+ // This extra map is used for receive processing which is
+ // independent of media type.
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker receive_11993_checker_;
+
+ // Audio and Video send streams are owned by the client that creates them.
+ // TODO(bugs.webrtc.org/11993): `audio_send_ssrcs_` and `video_send_ssrcs_`
+ // should be accessed on the network thread.
+ std::map<uint32_t, AudioSendStream*> audio_send_ssrcs_
+ RTC_GUARDED_BY(worker_thread_);
+ std::map<uint32_t, VideoSendStream*> video_send_ssrcs_
+ RTC_GUARDED_BY(worker_thread_);
+ std::set<VideoSendStream*> video_send_streams_ RTC_GUARDED_BY(worker_thread_);
+ // True if `video_send_streams_` is empty, false if not. The atomic variable
+ // is used to decide UMA send statistics behavior and enables avoiding a
+ // PostTask().
+ std::atomic<bool> video_send_streams_empty_{true};
+
+ // Each forwarder wraps an adaptation resource that was added to the call.
+ std::vector<std::unique_ptr<ResourceVideoSendStreamForwarder>>
+ adaptation_resource_forwarders_ RTC_GUARDED_BY(worker_thread_);
+
+ using RtpStateMap = std::map<uint32_t, RtpState>;
+ RtpStateMap suspended_audio_send_ssrcs_ RTC_GUARDED_BY(worker_thread_);
+ RtpStateMap suspended_video_send_ssrcs_ RTC_GUARDED_BY(worker_thread_);
+
+ using RtpPayloadStateMap = std::map<uint32_t, RtpPayloadState>;
+ RtpPayloadStateMap suspended_video_payload_states_
+ RTC_GUARDED_BY(worker_thread_);
+
+ webrtc::RtcEventLog* const event_log_;
+
+ // TODO(bugs.webrtc.org/11993) ready to move stats access to the network
+ // thread.
+ ReceiveStats receive_stats_ RTC_GUARDED_BY(worker_thread_);
+ SendStats send_stats_ RTC_GUARDED_BY(send_transport_sequence_checker_);
+ // `last_bandwidth_bps_` and `configured_max_padding_bitrate_bps_` being
+ // atomic avoids a PostTask. The variables are used for stats gathering.
+ std::atomic<uint32_t> last_bandwidth_bps_{0};
+ std::atomic<uint32_t> configured_max_padding_bitrate_bps_{0};
+
+ ReceiveSideCongestionController receive_side_cc_;
+ RepeatingTaskHandle receive_side_cc_periodic_task_;
+
+ const std::unique_ptr<ReceiveTimeCalculator> receive_time_calculator_;
+
+ const std::unique_ptr<SendDelayStats> video_send_delay_stats_;
+ const Timestamp start_of_call_;
+
+ // Note that `task_safety_` needs to be at a greater scope than the task queue
+ // owned by `transport_send_` since calls might arrive on the network thread
+ // while Call is being deleted and the task queue is being torn down.
+ const ScopedTaskSafety task_safety_;
+
+ // Caches transport_send_.get(), to avoid racing with destructor.
+ // Note that this is declared before transport_send_ to ensure that it is not
+ // invalidated until no more tasks can be running on the transport_send_ task
+ // queue.
+ // For more details on the background of this member variable, see:
+ // https://webrtc-review.googlesource.com/c/src/+/63023/9/call/call.cc
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=992640
+ RtpTransportControllerSendInterface* const transport_send_ptr_
+ RTC_GUARDED_BY(send_transport_sequence_checker_);
+ // Declared last since it will issue callbacks from a task queue. Declaring it
+ // last ensures that it is destroyed first and any running tasks are finished.
+ const std::unique_ptr<RtpTransportControllerSendInterface> transport_send_;
+
+ bool is_started_ RTC_GUARDED_BY(worker_thread_) = false;
+
+ // Sequence checker for outgoing network traffic. Could be the network thread.
+ // Could also be a pacer owned thread or TQ such as the TaskQueuePacedSender.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker sent_packet_sequence_checker_;
+ absl::optional<rtc::SentPacket> last_sent_packet_
+ RTC_GUARDED_BY(sent_packet_sequence_checker_);
+};
+} // namespace internal
+
+/* Mozilla: Avoid this since it could use GetRealTimeClock().
+std::unique_ptr<Call> Call::Create(const CallConfig& config) {
+ Clock* clock = Clock::GetRealTimeClock();
+ return Create(config, clock,
+ RtpTransportControllerSendFactory().Create(
+ config.ExtractTransportConfig(), clock));
+}
+ */
+
+std::unique_ptr<Call> Call::Create(
+ const CallConfig& config,
+ Clock* clock,
+ std::unique_ptr<RtpTransportControllerSendInterface>
+ transportControllerSend) {
+ RTC_DCHECK(config.task_queue_factory);
+ return std::make_unique<internal::Call>(clock, config,
+ std::move(transportControllerSend),
+ config.task_queue_factory);
+}
+
+// This method here to avoid subclasses has to implement this method.
+// Call perf test will use Internal::Call::CreateVideoSendStream() to inject
+// FecController.
+VideoSendStream* Call::CreateVideoSendStream(
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ std::unique_ptr<FecController> fec_controller) {
+ return nullptr;
+}
+
+namespace internal {
+
+Call::ReceiveStats::ReceiveStats(Clock* clock)
+ : received_bytes_per_second_counter_(clock, nullptr, false),
+ received_audio_bytes_per_second_counter_(clock, nullptr, false),
+ received_video_bytes_per_second_counter_(clock, nullptr, false),
+ received_rtcp_bytes_per_second_counter_(clock, nullptr, false) {
+ sequence_checker_.Detach();
+}
+
+void Call::ReceiveStats::AddReceivedRtcpBytes(int bytes) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (received_bytes_per_second_counter_.HasSample()) {
+ // First RTP packet has been received.
+ received_bytes_per_second_counter_.Add(static_cast<int>(bytes));
+ received_rtcp_bytes_per_second_counter_.Add(static_cast<int>(bytes));
+ }
+}
+
+void Call::ReceiveStats::AddReceivedAudioBytes(int bytes,
+ webrtc::Timestamp arrival_time) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ received_bytes_per_second_counter_.Add(bytes);
+ received_audio_bytes_per_second_counter_.Add(bytes);
+ if (!first_received_rtp_audio_timestamp_)
+ first_received_rtp_audio_timestamp_ = arrival_time;
+ last_received_rtp_audio_timestamp_ = arrival_time;
+}
+
+void Call::ReceiveStats::AddReceivedVideoBytes(int bytes,
+ webrtc::Timestamp arrival_time) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ received_bytes_per_second_counter_.Add(bytes);
+ received_video_bytes_per_second_counter_.Add(bytes);
+ if (!first_received_rtp_video_timestamp_)
+ first_received_rtp_video_timestamp_ = arrival_time;
+ last_received_rtp_video_timestamp_ = arrival_time;
+}
+
+Call::ReceiveStats::~ReceiveStats() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (first_received_rtp_audio_timestamp_) {
+ RTC_HISTOGRAM_COUNTS_100000(
+ "WebRTC.Call.TimeReceivingAudioRtpPacketsInSeconds",
+ (*last_received_rtp_audio_timestamp_ -
+ *first_received_rtp_audio_timestamp_)
+ .seconds());
+ }
+ if (first_received_rtp_video_timestamp_) {
+ RTC_HISTOGRAM_COUNTS_100000(
+ "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds",
+ (*last_received_rtp_video_timestamp_ -
+ *first_received_rtp_video_timestamp_)
+ .seconds());
+ }
+ const int kMinRequiredPeriodicSamples = 5;
+ AggregatedStats video_bytes_per_sec =
+ received_video_bytes_per_second_counter_.GetStats();
+ if (video_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.VideoBitrateReceivedInKbps",
+ video_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << "WebRTC.Call.VideoBitrateReceivedInBps, "
+ << video_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ AggregatedStats audio_bytes_per_sec =
+ received_audio_bytes_per_second_counter_.GetStats();
+ if (audio_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.AudioBitrateReceivedInKbps",
+ audio_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << "WebRTC.Call.AudioBitrateReceivedInBps, "
+ << audio_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ AggregatedStats rtcp_bytes_per_sec =
+ received_rtcp_bytes_per_second_counter_.GetStats();
+ if (rtcp_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.RtcpBitrateReceivedInBps",
+ rtcp_bytes_per_sec.average * 8);
+ RTC_LOG(LS_INFO) << "WebRTC.Call.RtcpBitrateReceivedInBps, "
+ << rtcp_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ AggregatedStats recv_bytes_per_sec =
+ received_bytes_per_second_counter_.GetStats();
+ if (recv_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.BitrateReceivedInKbps",
+ recv_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << "WebRTC.Call.BitrateReceivedInBps, "
+ << recv_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+}
+
+Call::SendStats::SendStats(Clock* clock)
+ : clock_(clock),
+ estimated_send_bitrate_kbps_counter_(clock, nullptr, true),
+ pacer_bitrate_kbps_counter_(clock, nullptr, true) {
+ destructor_sequence_checker_.Detach();
+ sequence_checker_.Detach();
+}
+
+Call::SendStats::~SendStats() {
+ RTC_DCHECK_RUN_ON(&destructor_sequence_checker_);
+ if (!first_sent_packet_time_)
+ return;
+
+ TimeDelta elapsed = clock_->CurrentTime() - *first_sent_packet_time_;
+ if (elapsed.seconds() < metrics::kMinRunTimeInSeconds)
+ return;
+
+ const int kMinRequiredPeriodicSamples = 5;
+ AggregatedStats send_bitrate_stats =
+ estimated_send_bitrate_kbps_counter_.ProcessAndGetStats();
+ if (send_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.EstimatedSendBitrateInKbps",
+ send_bitrate_stats.average);
+ RTC_LOG(LS_INFO) << "WebRTC.Call.EstimatedSendBitrateInKbps, "
+ << send_bitrate_stats.ToString();
+ }
+ AggregatedStats pacer_bitrate_stats =
+ pacer_bitrate_kbps_counter_.ProcessAndGetStats();
+ if (pacer_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.PacerBitrateInKbps",
+ pacer_bitrate_stats.average);
+ RTC_LOG(LS_INFO) << "WebRTC.Call.PacerBitrateInKbps, "
+ << pacer_bitrate_stats.ToString();
+ }
+}
+
+void Call::SendStats::SetFirstPacketTime(
+ absl::optional<Timestamp> first_sent_packet_time) {
+ RTC_DCHECK_RUN_ON(&destructor_sequence_checker_);
+ first_sent_packet_time_ = first_sent_packet_time;
+}
+
+void Call::SendStats::PauseSendAndPacerBitrateCounters() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ estimated_send_bitrate_kbps_counter_.ProcessAndPause();
+ pacer_bitrate_kbps_counter_.ProcessAndPause();
+}
+
+void Call::SendStats::AddTargetBitrateSample(uint32_t target_bitrate_bps) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ estimated_send_bitrate_kbps_counter_.Add(target_bitrate_bps / 1000);
+ // Pacer bitrate may be higher than bitrate estimate if enforcing min
+ // bitrate.
+ uint32_t pacer_bitrate_bps =
+ std::max(target_bitrate_bps, min_allocated_send_bitrate_bps_);
+ pacer_bitrate_kbps_counter_.Add(pacer_bitrate_bps / 1000);
+}
+
+void Call::SendStats::SetMinAllocatableRate(BitrateAllocationLimits limits) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ min_allocated_send_bitrate_bps_ = limits.min_allocatable_rate.bps();
+}
+
+Call::Call(Clock* clock,
+ const CallConfig& config,
+ std::unique_ptr<RtpTransportControllerSendInterface> transport_send,
+ TaskQueueFactory* task_queue_factory)
+ : clock_(clock),
+ task_queue_factory_(task_queue_factory),
+ worker_thread_(GetCurrentTaskQueueOrThread()),
+ // If `network_task_queue_` was set to nullptr, network related calls
+ // must be made on `worker_thread_` (i.e. they're one and the same).
+ network_thread_(config.network_task_queue_ ? config.network_task_queue_
+ : worker_thread_),
+ decode_sync_(config.metronome
+ ? std::make_unique<DecodeSynchronizer>(clock_,
+ config.metronome,
+ worker_thread_)
+ : nullptr),
+ num_cpu_cores_(CpuInfo::DetectNumberOfCores()),
+ call_stats_(new CallStats(clock_, worker_thread_)),
+ bitrate_allocator_(new BitrateAllocator(this)),
+ config_(config),
+ trials_(*config.trials),
+ audio_network_state_(kNetworkDown),
+ video_network_state_(kNetworkDown),
+ aggregate_network_up_(false),
+ event_log_(config.event_log),
+ receive_stats_(clock_),
+ send_stats_(clock_),
+ receive_side_cc_(clock,
+ absl::bind_front(&PacketRouter::SendCombinedRtcpPacket,
+ transport_send->packet_router()),
+ absl::bind_front(&PacketRouter::SendRemb,
+ transport_send->packet_router()),
+ /*network_state_estimator=*/nullptr),
+ receive_time_calculator_(
+ ReceiveTimeCalculator::CreateFromFieldTrial(*config.trials)),
+ video_send_delay_stats_(new SendDelayStats(clock_)),
+ start_of_call_(clock_->CurrentTime()),
+ transport_send_ptr_(transport_send.get()),
+ transport_send_(std::move(transport_send)) {
+ RTC_DCHECK(config.event_log != nullptr);
+ RTC_DCHECK(config.trials != nullptr);
+ RTC_DCHECK(network_thread_);
+ RTC_DCHECK(worker_thread_->IsCurrent());
+
+ receive_11993_checker_.Detach();
+ send_transport_sequence_checker_.Detach();
+ sent_packet_sequence_checker_.Detach();
+
+ // Do not remove this call; it is here to convince the compiler that the
+ // WebRTC source timestamp string needs to be in the final binary.
+ LoadWebRTCVersionInRegister();
+
+ call_stats_->RegisterStatsObserver(&receive_side_cc_);
+
+ ReceiveSideCongestionController* receive_side_cc = &receive_side_cc_;
+ receive_side_cc_periodic_task_ = RepeatingTaskHandle::Start(
+ worker_thread_,
+ [receive_side_cc] { return receive_side_cc->MaybeProcess(); },
+ TaskQueueBase::DelayPrecision::kLow, clock_);
+}
+
+Call::~Call() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ RTC_CHECK(audio_send_ssrcs_.empty());
+ RTC_CHECK(video_send_ssrcs_.empty());
+ RTC_CHECK(video_send_streams_.empty());
+ RTC_CHECK(audio_receive_streams_.empty());
+ RTC_CHECK(video_receive_streams_.empty());
+
+ receive_side_cc_periodic_task_.Stop();
+ call_stats_->DeregisterStatsObserver(&receive_side_cc_);
+ send_stats_.SetFirstPacketTime(transport_send_->GetFirstPacketTime());
+
+ RTC_HISTOGRAM_COUNTS_100000(
+ "WebRTC.Call.LifetimeInSeconds",
+ (clock_->CurrentTime() - start_of_call_).seconds());
+}
+
+void Call::EnsureStarted() {
+ if (is_started_) {
+ return;
+ }
+ is_started_ = true;
+
+ call_stats_->EnsureStarted();
+
+ // This call seems to kick off a number of things, so probably better left
+ // off being kicked off on request rather than in the ctor.
+ transport_send_->RegisterTargetTransferRateObserver(this);
+
+ transport_send_->EnsureStarted();
+}
+
+void Call::SetClientBitratePreferences(const BitrateSettings& preferences) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ GetTransportControllerSend()->SetClientBitratePreferences(preferences);
+}
+
+PacketReceiver* Call::Receiver() {
+ return this;
+}
+
+webrtc::AudioSendStream* Call::CreateAudioSendStream(
+ const webrtc::AudioSendStream::Config& config) {
+ TRACE_EVENT0("webrtc", "Call::CreateAudioSendStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ EnsureStarted();
+
+ // Stream config is logged in AudioSendStream::ConfigureStream, as it may
+ // change during the stream's lifetime.
+ absl::optional<RtpState> suspended_rtp_state;
+ {
+ const auto& iter = suspended_audio_send_ssrcs_.find(config.rtp.ssrc);
+ if (iter != suspended_audio_send_ssrcs_.end()) {
+ suspended_rtp_state.emplace(iter->second);
+ }
+ }
+
+ AudioSendStream* send_stream = new AudioSendStream(
+ clock_, config, config_.audio_state, task_queue_factory_,
+ transport_send_.get(), bitrate_allocator_.get(), event_log_,
+ call_stats_->AsRtcpRttStats(), suspended_rtp_state, trials());
+ RTC_DCHECK(audio_send_ssrcs_.find(config.rtp.ssrc) ==
+ audio_send_ssrcs_.end());
+ audio_send_ssrcs_[config.rtp.ssrc] = send_stream;
+
+ // TODO(bugs.webrtc.org/11993): call AssociateSendStream and
+ // UpdateAggregateNetworkState asynchronously on the network thread.
+ for (AudioReceiveStreamImpl* stream : audio_receive_streams_) {
+ if (stream->local_ssrc() == config.rtp.ssrc) {
+ stream->AssociateSendStream(send_stream);
+ }
+ }
+
+ UpdateAggregateNetworkState();
+
+ return send_stream;
+}
+
+void Call::DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) {
+ TRACE_EVENT0("webrtc", "Call::DestroyAudioSendStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK(send_stream != nullptr);
+
+ send_stream->Stop();
+
+ const uint32_t ssrc = send_stream->GetConfig().rtp.ssrc;
+ webrtc::internal::AudioSendStream* audio_send_stream =
+ static_cast<webrtc::internal::AudioSendStream*>(send_stream);
+ suspended_audio_send_ssrcs_[ssrc] = audio_send_stream->GetRtpState();
+
+ size_t num_deleted = audio_send_ssrcs_.erase(ssrc);
+ RTC_DCHECK_EQ(1, num_deleted);
+
+ // TODO(bugs.webrtc.org/11993): call AssociateSendStream and
+ // UpdateAggregateNetworkState asynchronously on the network thread.
+ for (AudioReceiveStreamImpl* stream : audio_receive_streams_) {
+ if (stream->local_ssrc() == ssrc) {
+ stream->AssociateSendStream(nullptr);
+ }
+ }
+
+ UpdateAggregateNetworkState();
+
+ delete send_stream;
+}
+
+webrtc::AudioReceiveStreamInterface* Call::CreateAudioReceiveStream(
+ const webrtc::AudioReceiveStreamInterface::Config& config) {
+ TRACE_EVENT0("webrtc", "Call::CreateAudioReceiveStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ EnsureStarted();
+ event_log_->Log(std::make_unique<RtcEventAudioReceiveStreamConfig>(
+ CreateRtcLogStreamConfig(config)));
+
+ AudioReceiveStreamImpl* receive_stream = new AudioReceiveStreamImpl(
+ clock_, transport_send_->packet_router(), config_.neteq_factory, config,
+ config_.audio_state, event_log_);
+ audio_receive_streams_.insert(receive_stream);
+
+ // TODO(bugs.webrtc.org/11993): Make the registration on the network thread
+ // (asynchronously). The registration and `audio_receiver_controller_` need
+ // to live on the network thread.
+ receive_stream->RegisterWithTransport(&audio_receiver_controller_);
+
+ ConfigureSync(config.sync_group);
+
+ auto it = audio_send_ssrcs_.find(config.rtp.local_ssrc);
+ if (it != audio_send_ssrcs_.end()) {
+ receive_stream->AssociateSendStream(it->second);
+ }
+
+ UpdateAggregateNetworkState();
+ return receive_stream;
+}
+
+void Call::DestroyAudioReceiveStream(
+ webrtc::AudioReceiveStreamInterface* receive_stream) {
+ TRACE_EVENT0("webrtc", "Call::DestroyAudioReceiveStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK(receive_stream != nullptr);
+ webrtc::AudioReceiveStreamImpl* audio_receive_stream =
+ static_cast<webrtc::AudioReceiveStreamImpl*>(receive_stream);
+
+ // TODO(bugs.webrtc.org/11993): Access the map, rtp config, call ConfigureSync
+ // and UpdateAggregateNetworkState on the network thread. The call to
+ // `UnregisterFromTransport` should also happen on the network thread.
+ audio_receive_stream->UnregisterFromTransport();
+
+ uint32_t ssrc = audio_receive_stream->remote_ssrc();
+ receive_side_cc_.RemoveStream(ssrc);
+
+ audio_receive_streams_.erase(audio_receive_stream);
+
+ // After calling erase(), call ConfigureSync. This will clear associated
+ // video streams or associate them with a different audio stream if one exists
+ // for this sync_group.
+ ConfigureSync(audio_receive_stream->sync_group());
+
+ UpdateAggregateNetworkState();
+ // TODO(bugs.webrtc.org/11993): Consider if deleting `audio_receive_stream`
+ // on the network thread would be better or if we'd need to tear down the
+ // state in two phases.
+ delete audio_receive_stream;
+}
+
+// This method can be used for Call tests with external fec controller factory.
+webrtc::VideoSendStream* Call::CreateVideoSendStream(
+ webrtc::VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ std::unique_ptr<FecController> fec_controller) {
+ TRACE_EVENT0("webrtc", "Call::CreateVideoSendStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ EnsureStarted();
+
+ video_send_delay_stats_->AddSsrcs(config);
+ for (size_t ssrc_index = 0; ssrc_index < config.rtp.ssrcs.size();
+ ++ssrc_index) {
+ event_log_->Log(std::make_unique<RtcEventVideoSendStreamConfig>(
+ CreateRtcLogStreamConfig(config, ssrc_index)));
+ }
+
+ // TODO(mflodman): Base the start bitrate on a current bandwidth estimate, if
+ // the call has already started.
+ // Copy ssrcs from `config` since `config` is moved.
+ std::vector<uint32_t> ssrcs = config.rtp.ssrcs;
+
+ VideoSendStream* send_stream = new VideoSendStream(
+ clock_, num_cpu_cores_, task_queue_factory_, network_thread_,
+ call_stats_->AsRtcpRttStats(), transport_send_.get(),
+ bitrate_allocator_.get(), video_send_delay_stats_.get(), event_log_,
+ std::move(config), std::move(encoder_config), suspended_video_send_ssrcs_,
+ suspended_video_payload_states_, std::move(fec_controller),
+ *config_.trials);
+
+ for (uint32_t ssrc : ssrcs) {
+ RTC_DCHECK(video_send_ssrcs_.find(ssrc) == video_send_ssrcs_.end());
+ video_send_ssrcs_[ssrc] = send_stream;
+ }
+ video_send_streams_.insert(send_stream);
+ video_send_streams_empty_.store(false, std::memory_order_relaxed);
+
+ // Forward resources that were previously added to the call to the new stream.
+ for (const auto& resource_forwarder : adaptation_resource_forwarders_) {
+ resource_forwarder->OnCreateVideoSendStream(send_stream);
+ }
+
+ UpdateAggregateNetworkState();
+
+ return send_stream;
+}
+
+webrtc::VideoSendStream* Call::CreateVideoSendStream(
+ webrtc::VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ if (config_.fec_controller_factory) {
+ RTC_LOG(LS_INFO) << "External FEC Controller will be used.";
+ }
+ std::unique_ptr<FecController> fec_controller =
+ config_.fec_controller_factory
+ ? config_.fec_controller_factory->CreateFecController()
+ : std::make_unique<FecControllerDefault>(clock_);
+ return CreateVideoSendStream(std::move(config), std::move(encoder_config),
+ std::move(fec_controller));
+}
+
+void Call::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) {
+ TRACE_EVENT0("webrtc", "Call::DestroyVideoSendStream");
+ RTC_DCHECK(send_stream != nullptr);
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ VideoSendStream* send_stream_impl =
+ static_cast<VideoSendStream*>(send_stream);
+
+ auto it = video_send_ssrcs_.begin();
+ while (it != video_send_ssrcs_.end()) {
+ if (it->second == static_cast<VideoSendStream*>(send_stream)) {
+ send_stream_impl = it->second;
+ video_send_ssrcs_.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+
+ // Stop forwarding resources to the stream being destroyed.
+ for (const auto& resource_forwarder : adaptation_resource_forwarders_) {
+ resource_forwarder->OnDestroyVideoSendStream(send_stream_impl);
+ }
+ video_send_streams_.erase(send_stream_impl);
+ if (video_send_streams_.empty())
+ video_send_streams_empty_.store(true, std::memory_order_relaxed);
+
+ VideoSendStream::RtpStateMap rtp_states;
+ VideoSendStream::RtpPayloadStateMap rtp_payload_states;
+ send_stream_impl->StopPermanentlyAndGetRtpStates(&rtp_states,
+ &rtp_payload_states);
+ for (const auto& kv : rtp_states) {
+ suspended_video_send_ssrcs_[kv.first] = kv.second;
+ }
+ for (const auto& kv : rtp_payload_states) {
+ suspended_video_payload_states_[kv.first] = kv.second;
+ }
+
+ UpdateAggregateNetworkState();
+ // TODO(tommi): consider deleting on the same thread as runs
+ // StopPermanentlyAndGetRtpStates.
+ delete send_stream_impl;
+}
+
+webrtc::VideoReceiveStreamInterface* Call::CreateVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface::Config configuration) {
+ TRACE_EVENT0("webrtc", "Call::CreateVideoReceiveStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ EnsureStarted();
+
+ event_log_->Log(std::make_unique<RtcEventVideoReceiveStreamConfig>(
+ CreateRtcLogStreamConfig(configuration)));
+
+ // TODO(bugs.webrtc.org/11993): Move the registration between `receive_stream`
+ // and `video_receiver_controller_` out of VideoReceiveStream2 construction
+ // and set it up asynchronously on the network thread (the registration and
+ // `video_receiver_controller_` need to live on the network thread).
+ // TODO(crbug.com/1381982): Re-enable decode synchronizer once the Chromium
+ // API has adapted to the new Metronome interface.
+ VideoReceiveStream2* receive_stream = new VideoReceiveStream2(
+ task_queue_factory_, this, num_cpu_cores_,
+ transport_send_->packet_router(), std::move(configuration),
+ call_stats_.get(), clock_, std::make_unique<VCMTiming>(clock_, trials()),
+ &nack_periodic_processor_, decode_sync_.get(), event_log_);
+ // TODO(bugs.webrtc.org/11993): Set this up asynchronously on the network
+ // thread.
+ receive_stream->RegisterWithTransport(&video_receiver_controller_);
+ video_receive_streams_.insert(receive_stream);
+
+ ConfigureSync(receive_stream->sync_group());
+
+ receive_stream->SignalNetworkState(video_network_state_);
+ UpdateAggregateNetworkState();
+ return receive_stream;
+}
+
+void Call::DestroyVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface* receive_stream) {
+ TRACE_EVENT0("webrtc", "Call::DestroyVideoReceiveStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK(receive_stream != nullptr);
+ VideoReceiveStream2* receive_stream_impl =
+ static_cast<VideoReceiveStream2*>(receive_stream);
+ // TODO(bugs.webrtc.org/11993): Unregister on the network thread.
+ receive_stream_impl->UnregisterFromTransport();
+ video_receive_streams_.erase(receive_stream_impl);
+ ConfigureSync(receive_stream_impl->sync_group());
+
+ receive_side_cc_.RemoveStream(receive_stream_impl->remote_ssrc());
+
+ UpdateAggregateNetworkState();
+ delete receive_stream_impl;
+}
+
+FlexfecReceiveStream* Call::CreateFlexfecReceiveStream(
+ const FlexfecReceiveStream::Config config) {
+ TRACE_EVENT0("webrtc", "Call::CreateFlexfecReceiveStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ // Unlike the video and audio receive streams, FlexfecReceiveStream implements
+ // RtpPacketSinkInterface itself, and hence its constructor passes its `this`
+ // pointer to video_receiver_controller_->CreateStream(). Calling the
+ // constructor while on the worker thread ensures that we don't call
+ // OnRtpPacket until the constructor is finished and the object is
+ // in a valid state, since OnRtpPacket runs on the same thread.
+ FlexfecReceiveStreamImpl* receive_stream = new FlexfecReceiveStreamImpl(
+ clock_, std::move(config), &video_receiver_controller_,
+ call_stats_->AsRtcpRttStats());
+
+ // TODO(bugs.webrtc.org/11993): Set this up asynchronously on the network
+ // thread.
+ receive_stream->RegisterWithTransport(&video_receiver_controller_);
+ // TODO(brandtr): Store config in RtcEventLog here.
+
+ return receive_stream;
+}
+
+void Call::DestroyFlexfecReceiveStream(FlexfecReceiveStream* receive_stream) {
+ TRACE_EVENT0("webrtc", "Call::DestroyFlexfecReceiveStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ FlexfecReceiveStreamImpl* receive_stream_impl =
+ static_cast<FlexfecReceiveStreamImpl*>(receive_stream);
+ // TODO(bugs.webrtc.org/11993): Unregister on the network thread.
+ receive_stream_impl->UnregisterFromTransport();
+
+ auto ssrc = receive_stream_impl->remote_ssrc();
+ // Remove all SSRCs pointing to the FlexfecReceiveStreamImpl to be
+ // destroyed.
+ receive_side_cc_.RemoveStream(ssrc);
+
+ delete receive_stream_impl;
+}
+
+void Call::AddAdaptationResource(rtc::scoped_refptr<Resource> resource) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ adaptation_resource_forwarders_.push_back(
+ std::make_unique<ResourceVideoSendStreamForwarder>(resource));
+ const auto& resource_forwarder = adaptation_resource_forwarders_.back();
+ for (VideoSendStream* send_stream : video_send_streams_) {
+ resource_forwarder->OnCreateVideoSendStream(send_stream);
+ }
+}
+
+RtpTransportControllerSendInterface* Call::GetTransportControllerSend() {
+ return transport_send_.get();
+}
+
+Call::Stats Call::GetStats() const {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ Stats stats;
+ // TODO(srte): It is unclear if we only want to report queues if network is
+ // available.
+ stats.pacer_delay_ms =
+ aggregate_network_up_ ? transport_send_->GetPacerQueuingDelayMs() : 0;
+
+ stats.rtt_ms = call_stats_->LastProcessedRtt();
+
+ // Fetch available send/receive bitrates.
+ stats.recv_bandwidth_bps = receive_side_cc_.LatestReceiveSideEstimate().bps();
+ stats.send_bandwidth_bps =
+ last_bandwidth_bps_.load(std::memory_order_relaxed);
+ stats.max_padding_bitrate_bps =
+ configured_max_padding_bitrate_bps_.load(std::memory_order_relaxed);
+
+ return stats;
+}
+
+const FieldTrialsView& Call::trials() const {
+ return trials_;
+}
+
+TaskQueueBase* Call::network_thread() const {
+ return network_thread_;
+}
+
+TaskQueueBase* Call::worker_thread() const {
+ return worker_thread_;
+}
+
+void Call::SignalChannelNetworkState(MediaType media, NetworkState state) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DCHECK(media == MediaType::AUDIO || media == MediaType::VIDEO);
+
+ auto closure = [this, media, state]() {
+ // TODO(bugs.webrtc.org/11993): Move this over to the network thread.
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ if (media == MediaType::AUDIO) {
+ audio_network_state_ = state;
+ } else {
+ RTC_DCHECK_EQ(media, MediaType::VIDEO);
+ video_network_state_ = state;
+ }
+
+ // TODO(tommi): Is it necessary to always do this, including if there
+ // was no change in state?
+ UpdateAggregateNetworkState();
+
+ // TODO(tommi): Is it right to do this if media == AUDIO?
+ for (VideoReceiveStream2* video_receive_stream : video_receive_streams_) {
+ video_receive_stream->SignalNetworkState(video_network_state_);
+ }
+ };
+
+ if (network_thread_ == worker_thread_) {
+ closure();
+ } else {
+ // TODO(bugs.webrtc.org/11993): Remove workaround when we no longer need to
+ // post to the worker thread.
+ worker_thread_->PostTask(SafeTask(task_safety_.flag(), std::move(closure)));
+ }
+}
+
+void Call::OnAudioTransportOverheadChanged(int transport_overhead_per_packet) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [this, transport_overhead_per_packet]() {
+ // TODO(bugs.webrtc.org/11993): Move this over to the network thread.
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ for (auto& kv : audio_send_ssrcs_) {
+ kv.second->SetTransportOverhead(transport_overhead_per_packet);
+ }
+ }));
+}
+
+void Call::UpdateAggregateNetworkState() {
+ // TODO(bugs.webrtc.org/11993): Move this over to the network thread.
+ // RTC_DCHECK_RUN_ON(network_thread_);
+
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ bool have_audio =
+ !audio_send_ssrcs_.empty() || !audio_receive_streams_.empty();
+ bool have_video =
+ !video_send_ssrcs_.empty() || !video_receive_streams_.empty();
+
+ bool aggregate_network_up =
+ ((have_video && video_network_state_ == kNetworkUp) ||
+ (have_audio && audio_network_state_ == kNetworkUp));
+
+ if (aggregate_network_up != aggregate_network_up_) {
+ RTC_LOG(LS_INFO)
+ << "UpdateAggregateNetworkState: aggregate_state change to "
+ << (aggregate_network_up ? "up" : "down");
+ } else {
+ RTC_LOG(LS_VERBOSE)
+ << "UpdateAggregateNetworkState: aggregate_state remains at "
+ << (aggregate_network_up ? "up" : "down");
+ }
+ aggregate_network_up_ = aggregate_network_up;
+
+ transport_send_->OnNetworkAvailability(aggregate_network_up);
+}
+
+void Call::OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
+ uint32_t local_ssrc) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ webrtc::AudioReceiveStreamImpl& receive_stream =
+ static_cast<webrtc::AudioReceiveStreamImpl&>(stream);
+
+ receive_stream.SetLocalSsrc(local_ssrc);
+ auto it = audio_send_ssrcs_.find(local_ssrc);
+ receive_stream.AssociateSendStream(it != audio_send_ssrcs_.end() ? it->second
+ : nullptr);
+}
+
+void Call::OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
+ uint32_t local_ssrc) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ static_cast<VideoReceiveStream2&>(stream).SetLocalSsrc(local_ssrc);
+}
+
+void Call::OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
+ uint32_t local_ssrc) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ static_cast<FlexfecReceiveStreamImpl&>(stream).SetLocalSsrc(local_ssrc);
+}
+
+void Call::OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
+ absl::string_view sync_group) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ webrtc::AudioReceiveStreamImpl& receive_stream =
+ static_cast<webrtc::AudioReceiveStreamImpl&>(stream);
+ receive_stream.SetSyncGroup(sync_group);
+ ConfigureSync(sync_group);
+}
+
+void Call::OnSentPacket(const rtc::SentPacket& sent_packet) {
+ RTC_DCHECK_RUN_ON(&sent_packet_sequence_checker_);
+ // When bundling is in effect, multiple senders may be sharing the same
+ // transport. It means every |sent_packet| will be multiply notified from
+ // different channels, WebRtcVoiceMediaChannel or WebRtcVideoChannel. Record
+ // |last_sent_packet_| to deduplicate redundant notifications to downstream.
+ // (https://crbug.com/webrtc/13437): Pass all packets without a |packet_id| to
+ // downstream.
+ if (last_sent_packet_.has_value() && last_sent_packet_->packet_id != -1 &&
+ last_sent_packet_->packet_id == sent_packet.packet_id &&
+ last_sent_packet_->send_time_ms == sent_packet.send_time_ms) {
+ return;
+ }
+ last_sent_packet_ = sent_packet;
+
+ // In production and with most tests, this method will be called on the
+ // network thread. However some test classes such as DirectTransport don't
+ // incorporate a network thread. This means that tests for RtpSenderEgress
+ // and ModuleRtpRtcpImpl2 that use DirectTransport, will call this method
+ // on a ProcessThread. This is alright as is since we forward the call to
+ // implementations that either just do a PostTask or use locking.
+ video_send_delay_stats_->OnSentPacket(sent_packet.packet_id,
+ clock_->CurrentTime());
+ transport_send_->OnSentPacket(sent_packet);
+}
+
+void Call::OnStartRateUpdate(DataRate start_rate) {
+ RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_);
+ bitrate_allocator_->UpdateStartRate(start_rate.bps<uint32_t>());
+}
+
+void Call::OnTargetTransferRate(TargetTransferRate msg) {
+ RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_);
+
+ uint32_t target_bitrate_bps = msg.target_rate.bps();
+ // For controlling the rate of feedback messages.
+ receive_side_cc_.OnBitrateChanged(target_bitrate_bps);
+ bitrate_allocator_->OnNetworkEstimateChanged(msg);
+
+ last_bandwidth_bps_.store(target_bitrate_bps, std::memory_order_relaxed);
+
+ // Ignore updates if bitrate is zero (the aggregate network state is
+ // down) or if we're not sending video.
+ // Using `video_send_streams_empty_` is racy but as the caller can't
+ // reasonably expect synchronize with changes in `video_send_streams_` (being
+ // on `send_transport_sequence_checker`), we can avoid a PostTask this way.
+ if (target_bitrate_bps == 0 ||
+ video_send_streams_empty_.load(std::memory_order_relaxed)) {
+ send_stats_.PauseSendAndPacerBitrateCounters();
+ } else {
+ send_stats_.AddTargetBitrateSample(target_bitrate_bps);
+ }
+}
+
+void Call::OnAllocationLimitsChanged(BitrateAllocationLimits limits) {
+ RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_);
+
+ transport_send_ptr_->SetAllocatedSendBitrateLimits(limits);
+ send_stats_.SetMinAllocatableRate(limits);
+ configured_max_padding_bitrate_bps_.store(limits.max_padding_rate.bps(),
+ std::memory_order_relaxed);
+}
+
+AudioReceiveStreamImpl* Call::FindAudioStreamForSyncGroup(
+ absl::string_view sync_group) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK_RUN_ON(&receive_11993_checker_);
+ if (!sync_group.empty()) {
+ for (AudioReceiveStreamImpl* stream : audio_receive_streams_) {
+ if (stream->sync_group() == sync_group)
+ return stream;
+ }
+ }
+
+ return nullptr;
+}
+
+void Call::ConfigureSync(absl::string_view sync_group) {
+ // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread.
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ // `audio_stream` may be nullptr when clearing the audio stream for a group.
+ AudioReceiveStreamImpl* audio_stream =
+ FindAudioStreamForSyncGroup(sync_group);
+
+ size_t num_synced_streams = 0;
+ for (VideoReceiveStream2* video_stream : video_receive_streams_) {
+ if (video_stream->sync_group() != sync_group)
+ continue;
+ ++num_synced_streams;
+ // TODO(bugs.webrtc.org/4762): Support synchronizing more than one A/V pair.
+ // Attempting to sync more than one audio/video pair within the same sync
+ // group is not supported in the current implementation.
+ // Only sync the first A/V pair within this sync group.
+ if (num_synced_streams == 1) {
+ // sync_audio_stream may be null and that's ok.
+ video_stream->SetSync(audio_stream);
+ } else {
+ video_stream->SetSync(nullptr);
+ }
+ }
+}
+
+void Call::DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK(IsRtcpPacket(packet));
+ TRACE_EVENT0("webrtc", "Call::DeliverRtcp");
+
+ receive_stats_.AddReceivedRtcpBytes(static_cast<int>(packet.size()));
+ bool rtcp_delivered = false;
+ for (VideoReceiveStream2* stream : video_receive_streams_) {
+ if (stream->DeliverRtcp(packet.cdata(), packet.size()))
+ rtcp_delivered = true;
+ }
+
+ for (AudioReceiveStreamImpl* stream : audio_receive_streams_) {
+ stream->DeliverRtcp(packet.cdata(), packet.size());
+ rtcp_delivered = true;
+ }
+
+ for (VideoSendStream* stream : video_send_streams_) {
+ stream->DeliverRtcp(packet.cdata(), packet.size());
+ rtcp_delivered = true;
+ }
+
+ for (auto& kv : audio_send_ssrcs_) {
+ kv.second->DeliverRtcp(packet.cdata(), packet.size());
+ rtcp_delivered = true;
+ }
+
+ if (rtcp_delivered) {
+ event_log_->Log(std::make_unique<RtcEventRtcpPacketIncoming>(packet));
+ }
+}
+
+void Call::DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK(packet.arrival_time().IsFinite());
+
+ if (receive_time_calculator_) {
+ int64_t packet_time_us = packet.arrival_time().us();
+ // Repair packet_time_us for clock resets by comparing a new read of
+ // the same clock (TimeUTCMicros) to a monotonic clock reading.
+ packet_time_us = receive_time_calculator_->ReconcileReceiveTimes(
+ packet_time_us, rtc::TimeUTCMicros(), clock_->TimeInMicroseconds());
+ packet.set_arrival_time(Timestamp::Micros(packet_time_us));
+ }
+
+ NotifyBweOfReceivedPacket(packet, media_type);
+
+ event_log_->Log(std::make_unique<RtcEventRtpPacketIncoming>(packet));
+ if (media_type != MediaType::AUDIO && media_type != MediaType::VIDEO) {
+ return;
+ }
+
+ RtpStreamReceiverController& receiver_controller =
+ media_type == MediaType::AUDIO ? audio_receiver_controller_
+ : video_receiver_controller_;
+
+ if (!receiver_controller.OnRtpPacket(packet)) {
+ // Demuxing failed. Allow the caller to create a
+ // receive stream in order to handle unsignalled SSRCs and try again.
+ // Note that we dont want to call NotifyBweOfReceivedPacket twice per
+ // packet.
+ if (!undemuxable_packet_handler(packet)) {
+ return;
+ }
+ if (!receiver_controller.OnRtpPacket(packet)) {
+ RTC_LOG(LS_INFO) << "Failed to demux packet " << packet.Ssrc();
+ return;
+ }
+ }
+
+ // RateCounters expect input parameter as int, save it as int,
+ // instead of converting each time it is passed to RateCounter::Add below.
+ int length = static_cast<int>(packet.size());
+ if (media_type == MediaType::AUDIO) {
+ receive_stats_.AddReceivedAudioBytes(length, packet.arrival_time());
+ }
+ if (media_type == MediaType::VIDEO) {
+ receive_stats_.AddReceivedVideoBytes(length, packet.arrival_time());
+ }
+}
+
+void Call::NotifyBweOfReceivedPacket(const RtpPacketReceived& packet,
+ MediaType media_type) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ ReceivedPacket packet_msg;
+ packet_msg.size = DataSize::Bytes(packet.payload_size());
+ packet_msg.receive_time = packet.arrival_time();
+ uint32_t time_24;
+ if (packet.GetExtension<AbsoluteSendTime>(&time_24)) {
+ packet_msg.send_time = AbsoluteSendTime::ToTimestamp(time_24);
+ }
+ transport_send_->OnReceivedPacket(packet_msg);
+
+ receive_side_cc_.OnReceivedPacket(packet, media_type);
+}
+
+} // namespace internal
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/call.h b/third_party/libwebrtc/call/call.h
new file mode 100644
index 0000000000..b36872f5b5
--- /dev/null
+++ b/third_party/libwebrtc/call/call.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_CALL_H_
+#define CALL_CALL_H_
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/adaptation/resource.h"
+#include "api/media_types.h"
+#include "api/task_queue/task_queue_base.h"
+#include "call/audio_receive_stream.h"
+#include "call/audio_send_stream.h"
+#include "call/call_basic_stats.h"
+#include "call/call_config.h"
+#include "call/flexfec_receive_stream.h"
+#include "call/packet_receiver.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/network/sent_packet.h"
+#include "rtc_base/network_route.h"
+
+namespace webrtc {
+
+// A Call represents a two-way connection carrying zero or more outgoing
+// and incoming media streams, transported over one or more RTP transports.
+
+// A Call instance can contain several send and/or receive streams. All streams
+// are assumed to have the same remote endpoint and will share bitrate estimates
+// etc.
+
+// When using the PeerConnection API, there is an one to one relationship
+// between the PeerConnection and the Call.
+
+class Call {
+ public:
+ using Stats = CallBasicStats;
+
+ static std::unique_ptr<Call> Create(const CallConfig& config);
+ static std::unique_ptr<Call> Create(
+ const CallConfig& config,
+ Clock* clock,
+ std::unique_ptr<RtpTransportControllerSendInterface>
+ transportControllerSend);
+
+ virtual AudioSendStream* CreateAudioSendStream(
+ const AudioSendStream::Config& config) = 0;
+
+ virtual void DestroyAudioSendStream(AudioSendStream* send_stream) = 0;
+
+ virtual AudioReceiveStreamInterface* CreateAudioReceiveStream(
+ const AudioReceiveStreamInterface::Config& config) = 0;
+ virtual void DestroyAudioReceiveStream(
+ AudioReceiveStreamInterface* receive_stream) = 0;
+
+ virtual VideoSendStream* CreateVideoSendStream(
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config) = 0;
+ virtual VideoSendStream* CreateVideoSendStream(
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ std::unique_ptr<FecController> fec_controller);
+ virtual void DestroyVideoSendStream(VideoSendStream* send_stream) = 0;
+
+ virtual VideoReceiveStreamInterface* CreateVideoReceiveStream(
+ VideoReceiveStreamInterface::Config configuration) = 0;
+ virtual void DestroyVideoReceiveStream(
+ VideoReceiveStreamInterface* receive_stream) = 0;
+
+ // In order for a created VideoReceiveStreamInterface to be aware that it is
+ // protected by a FlexfecReceiveStream, the latter should be created before
+ // the former.
+ virtual FlexfecReceiveStream* CreateFlexfecReceiveStream(
+ const FlexfecReceiveStream::Config config) = 0;
+ virtual void DestroyFlexfecReceiveStream(
+ FlexfecReceiveStream* receive_stream) = 0;
+
+ // When a resource is overused, the Call will try to reduce the load on the
+ // sysem, for example by reducing the resolution or frame rate of encoded
+ // streams.
+ virtual void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) = 0;
+
+ // All received RTP and RTCP packets for the call should be inserted to this
+ // PacketReceiver. The PacketReceiver pointer is valid as long as the
+ // Call instance exists.
+ virtual PacketReceiver* Receiver() = 0;
+
+ // This is used to access the transport controller send instance owned by
+ // Call. The send transport controller is currently owned by Call for legacy
+ // reasons. (for instance variants of call tests are built on this assumtion)
+ // TODO(srte): Move ownership of transport controller send out of Call and
+ // remove this method interface.
+ virtual RtpTransportControllerSendInterface* GetTransportControllerSend() = 0;
+
+ // Returns the call statistics, such as estimated send and receive bandwidth,
+ // pacing delay, etc.
+ virtual Stats GetStats() const = 0;
+
+ // TODO(skvlad): When the unbundled case with multiple streams for the same
+ // media type going over different networks is supported, track the state
+ // for each stream separately. Right now it's global per media type.
+ virtual void SignalChannelNetworkState(MediaType media,
+ NetworkState state) = 0;
+
+ virtual void OnAudioTransportOverheadChanged(
+ int transport_overhead_per_packet) = 0;
+
+ // Called when a receive stream's local ssrc has changed and association with
+ // send streams needs to be updated.
+ virtual void OnLocalSsrcUpdated(AudioReceiveStreamInterface& stream,
+ uint32_t local_ssrc) = 0;
+ virtual void OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
+ uint32_t local_ssrc) = 0;
+ virtual void OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
+ uint32_t local_ssrc) = 0;
+
+ virtual void OnUpdateSyncGroup(AudioReceiveStreamInterface& stream,
+ absl::string_view sync_group) = 0;
+
+ virtual void OnSentPacket(const rtc::SentPacket& sent_packet) = 0;
+
+ virtual void SetClientBitratePreferences(
+ const BitrateSettings& preferences) = 0;
+
+ virtual const FieldTrialsView& trials() const = 0;
+
+ virtual TaskQueueBase* network_thread() const = 0;
+ virtual TaskQueueBase* worker_thread() const = 0;
+
+ virtual ~Call() {}
+};
+
+} // namespace webrtc
+
+#endif // CALL_CALL_H_
diff --git a/third_party/libwebrtc/call/call_basic_stats.cc b/third_party/libwebrtc/call/call_basic_stats.cc
new file mode 100644
index 0000000000..74333a663b
--- /dev/null
+++ b/third_party/libwebrtc/call/call_basic_stats.cc
@@ -0,0 +1,20 @@
+#include "call/call_basic_stats.h"
+
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+std::string CallBasicStats::ToString(int64_t time_ms) const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "Call stats: " << time_ms << ", {";
+ ss << "send_bw_bps: " << send_bandwidth_bps << ", ";
+ ss << "recv_bw_bps: " << recv_bandwidth_bps << ", ";
+ ss << "max_pad_bps: " << max_padding_bitrate_bps << ", ";
+ ss << "pacer_delay_ms: " << pacer_delay_ms << ", ";
+ ss << "rtt_ms: " << rtt_ms;
+ ss << '}';
+ return ss.str();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/call_basic_stats.h b/third_party/libwebrtc/call/call_basic_stats.h
new file mode 100644
index 0000000000..98febe9405
--- /dev/null
+++ b/third_party/libwebrtc/call/call_basic_stats.h
@@ -0,0 +1,21 @@
+#ifndef CALL_CALL_BASIC_STATS_H_
+#define CALL_CALL_BASIC_STATS_H_
+
+#include <string>
+
+namespace webrtc {
+
+// named to avoid conflicts with video/call_stats.h
+struct CallBasicStats {
+ std::string ToString(int64_t time_ms) const;
+
+ int send_bandwidth_bps = 0; // Estimated available send bandwidth.
+ int max_padding_bitrate_bps = 0; // Cumulative configured max padding.
+ int recv_bandwidth_bps = 0; // Estimated available receive bandwidth.
+ int64_t pacer_delay_ms = 0;
+ int64_t rtt_ms = -1;
+};
+
+} // namespace webrtc
+
+#endif // CALL_CALL_BASIC_STATS_H_
diff --git a/third_party/libwebrtc/call/call_config.cc b/third_party/libwebrtc/call/call_config.cc
new file mode 100644
index 0000000000..93f6b1aec4
--- /dev/null
+++ b/third_party/libwebrtc/call/call_config.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/call_config.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+CallConfig::CallConfig(RtcEventLog* event_log,
+ TaskQueueBase* network_task_queue /* = nullptr*/)
+ : event_log(event_log), network_task_queue_(network_task_queue) {
+ RTC_DCHECK(event_log);
+}
+
+CallConfig::CallConfig(const CallConfig& config) = default;
+
+RtpTransportConfig CallConfig::ExtractTransportConfig() const {
+ RtpTransportConfig transportConfig;
+ transportConfig.bitrate_config = bitrate_config;
+ transportConfig.event_log = event_log;
+ transportConfig.network_controller_factory = network_controller_factory;
+ transportConfig.network_state_predictor_factory =
+ network_state_predictor_factory;
+ transportConfig.task_queue_factory = task_queue_factory;
+ transportConfig.trials = trials;
+ transportConfig.pacer_burst_interval = pacer_burst_interval;
+
+ return transportConfig;
+}
+
+CallConfig::~CallConfig() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/call_config.h b/third_party/libwebrtc/call/call_config.h
new file mode 100644
index 0000000000..918c077435
--- /dev/null
+++ b/third_party/libwebrtc/call/call_config.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_CALL_CONFIG_H_
+#define CALL_CALL_CONFIG_H_
+
+#include "api/fec_controller.h"
+#include "api/field_trials_view.h"
+#include "api/metronome/metronome.h"
+#include "api/neteq/neteq_factory.h"
+#include "api/network_state_predictor.h"
+#include "api/rtc_error.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/transport/network_control.h"
+#include "call/audio_state.h"
+#include "call/rtp_transport_config.h"
+#include "call/rtp_transport_controller_send_factory_interface.h"
+
+namespace webrtc {
+
+class AudioProcessing;
+class RtcEventLog;
+
+struct CallConfig {
+ // If `network_task_queue` is set to nullptr, Call will assume that network
+ // related callbacks will be made on the same TQ as the Call instance was
+ // constructed on.
+ explicit CallConfig(RtcEventLog* event_log,
+ TaskQueueBase* network_task_queue = nullptr);
+ CallConfig(const CallConfig&);
+ RtpTransportConfig ExtractTransportConfig() const;
+ ~CallConfig();
+
+ // Bitrate config used until valid bitrate estimates are calculated. Also
+ // used to cap total bitrate used. This comes from the remote connection.
+ BitrateConstraints bitrate_config;
+
+ // AudioState which is possibly shared between multiple calls.
+ rtc::scoped_refptr<AudioState> audio_state;
+
+ // Audio Processing Module to be used in this call.
+ AudioProcessing* audio_processing = nullptr;
+
+ // RtcEventLog to use for this call. Required.
+ // Use webrtc::RtcEventLog::CreateNull() for a null implementation.
+ RtcEventLog* const event_log = nullptr;
+
+ // FecController to use for this call.
+ FecControllerFactoryInterface* fec_controller_factory = nullptr;
+
+ // Task Queue Factory to be used in this call. Required.
+ TaskQueueFactory* task_queue_factory = nullptr;
+
+ // NetworkStatePredictor to use for this call.
+ NetworkStatePredictorFactoryInterface* network_state_predictor_factory =
+ nullptr;
+
+ // Network controller factory to use for this call.
+ NetworkControllerFactoryInterface* network_controller_factory = nullptr;
+
+ // NetEq factory to use for this call.
+ NetEqFactory* neteq_factory = nullptr;
+
+ // Key-value mapping of internal configurations to apply,
+ // e.g. field trials.
+ const FieldTrialsView* trials = nullptr;
+
+ TaskQueueBase* const network_task_queue_ = nullptr;
+ // RtpTransportControllerSend to use for this call.
+ RtpTransportControllerSendFactoryInterface*
+ rtp_transport_controller_send_factory = nullptr;
+
+ Metronome* metronome = nullptr;
+
+ // The burst interval of the pacer, see TaskQueuePacedSender constructor.
+ absl::optional<TimeDelta> pacer_burst_interval;
+
+ // Enables send packet batching from the egress RTP sender.
+ bool enable_send_packet_batching = false;
+};
+
+} // namespace webrtc
+
+#endif // CALL_CALL_CONFIG_H_
diff --git a/third_party/libwebrtc/call/call_factory.cc b/third_party/libwebrtc/call/call_factory.cc
new file mode 100644
index 0000000000..78a4f1635f
--- /dev/null
+++ b/third_party/libwebrtc/call/call_factory.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/call_factory.h"
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/test/simulated_network.h"
+#include "api/units/time_delta.h"
+#include "call/call.h"
+#include "call/degraded_call.h"
+#include "call/rtp_transport_config.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/field_trial_list.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+namespace {
+using TimeScopedNetworkConfig = DegradedCall::TimeScopedNetworkConfig;
+
+std::vector<TimeScopedNetworkConfig> GetNetworkConfigs(
+ const FieldTrialsView& trials,
+ bool send) {
+ FieldTrialStructList<TimeScopedNetworkConfig> trials_list(
+ {FieldTrialStructMember("queue_length_packets",
+ [](TimeScopedNetworkConfig* p) {
+ // FieldTrialParser does not natively support
+ // size_t type, so use this ugly cast as
+ // workaround.
+ return reinterpret_cast<unsigned*>(
+ &p->queue_length_packets);
+ }),
+ FieldTrialStructMember(
+ "queue_delay_ms",
+ [](TimeScopedNetworkConfig* p) { return &p->queue_delay_ms; }),
+ FieldTrialStructMember("delay_standard_deviation_ms",
+ [](TimeScopedNetworkConfig* p) {
+ return &p->delay_standard_deviation_ms;
+ }),
+ FieldTrialStructMember(
+ "link_capacity_kbps",
+ [](TimeScopedNetworkConfig* p) { return &p->link_capacity_kbps; }),
+ FieldTrialStructMember(
+ "loss_percent",
+ [](TimeScopedNetworkConfig* p) { return &p->loss_percent; }),
+ FieldTrialStructMember(
+ "allow_reordering",
+ [](TimeScopedNetworkConfig* p) { return &p->allow_reordering; }),
+ FieldTrialStructMember("avg_burst_loss_length",
+ [](TimeScopedNetworkConfig* p) {
+ return &p->avg_burst_loss_length;
+ }),
+ FieldTrialStructMember(
+ "packet_overhead",
+ [](TimeScopedNetworkConfig* p) { return &p->packet_overhead; }),
+ FieldTrialStructMember(
+ "duration",
+ [](TimeScopedNetworkConfig* p) { return &p->duration; })},
+ {});
+ ParseFieldTrial({&trials_list},
+ trials.Lookup(send ? "WebRTC-FakeNetworkSendConfig"
+ : "WebRTC-FakeNetworkReceiveConfig"));
+ return trials_list.Get();
+}
+
+} // namespace
+
+CallFactory::CallFactory() {
+ call_thread_.Detach();
+}
+
+std::unique_ptr<Call> CallFactory::CreateCall(const CallConfig& config) {
+ RTC_DCHECK_RUN_ON(&call_thread_);
+ RTC_DCHECK(config.trials);
+
+ std::vector<DegradedCall::TimeScopedNetworkConfig> send_degradation_configs =
+ GetNetworkConfigs(*config.trials, /*send=*/true);
+ std::vector<DegradedCall::TimeScopedNetworkConfig>
+ receive_degradation_configs =
+ GetNetworkConfigs(*config.trials, /*send=*/false);
+
+ RtpTransportConfig transportConfig = config.ExtractTransportConfig();
+
+ RTC_CHECK(false);
+ return nullptr;
+ /* Mozilla: Avoid this since it could use GetRealTimeClock().
+ std::unique_ptr<Call> call =
+ Call::Create(config, Clock::GetRealTimeClock(),
+ config.rtp_transport_controller_send_factory->Create(
+ transportConfig, Clock::GetRealTimeClock()));
+
+ if (!send_degradation_configs.empty() ||
+ !receive_degradation_configs.empty()) {
+ return std::make_unique<DegradedCall>(
+ std::move(call), send_degradation_configs, receive_degradation_configs);
+ }
+
+ return call;
+ */
+}
+
+std::unique_ptr<CallFactoryInterface> CreateCallFactory() {
+ return std::make_unique<CallFactory>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/call_factory.h b/third_party/libwebrtc/call/call_factory.h
new file mode 100644
index 0000000000..f75b1bd71b
--- /dev/null
+++ b/third_party/libwebrtc/call/call_factory.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_CALL_FACTORY_H_
+#define CALL_CALL_FACTORY_H_
+
+#include <memory>
+
+#include "api/call/call_factory_interface.h"
+#include "api/sequence_checker.h"
+#include "call/call.h"
+#include "call/call_config.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class CallFactory : public CallFactoryInterface {
+ public:
+ CallFactory();
+ ~CallFactory() override = default;
+
+ private:
+ std::unique_ptr<Call> CreateCall(const CallConfig& config) override;
+
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker call_thread_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_CALL_FACTORY_H_
diff --git a/third_party/libwebrtc/call/call_gn/moz.build b/third_party/libwebrtc/call/call_gn/moz.build
new file mode 100644
index 0000000000..25c1961b89
--- /dev/null
+++ b/third_party/libwebrtc/call/call_gn/moz.build
@@ -0,0 +1,239 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/call.cc",
+ "/third_party/libwebrtc/call/flexfec_receive_stream_impl.cc",
+ "/third_party/libwebrtc/call/receive_time_calculator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("call_gn")
diff --git a/third_party/libwebrtc/call/call_interfaces_gn/moz.build b/third_party/libwebrtc/call/call_interfaces_gn/moz.build
new file mode 100644
index 0000000000..a5796666d8
--- /dev/null
+++ b/third_party/libwebrtc/call/call_interfaces_gn/moz.build
@@ -0,0 +1,243 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/audio_receive_stream.cc",
+ "/third_party/libwebrtc/call/audio_send_stream.cc",
+ "/third_party/libwebrtc/call/audio_state.cc",
+ "/third_party/libwebrtc/call/call_basic_stats.cc",
+ "/third_party/libwebrtc/call/call_config.cc",
+ "/third_party/libwebrtc/call/flexfec_receive_stream.cc",
+ "/third_party/libwebrtc/call/syncable.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("call_interfaces_gn")
diff --git a/third_party/libwebrtc/call/call_perf_tests.cc b/third_party/libwebrtc/call/call_perf_tests.cc
new file mode 100644
index 0000000000..0ba6d05b19
--- /dev/null
+++ b/third_party/libwebrtc/call/call_perf_tests.cc
@@ -0,0 +1,1209 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "absl/flags/flag.h"
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/numerics/samples_stats_counter.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/metrics/global_metrics_logger_and_exporter.h"
+#include "api/test/metrics/metric.h"
+#include "api/test/simulated_network.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/call.h"
+#include "call/fake_network_pipe.h"
+#include "call/simulated_network.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/test_audio_device.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/call_test.h"
+#include "test/direct_transport.h"
+#include "test/drifting_clock.h"
+#include "test/encoder_settings.h"
+#include "test/fake_encoder.h"
+#include "test/field_trial.h"
+#include "test/frame_generator_capturer.h"
+#include "test/gtest.h"
+#include "test/null_transport.h"
+#include "test/rtp_rtcp_observer.h"
+#include "test/test_flags.h"
+#include "test/testsupport/file_utils.h"
+#include "test/video_encoder_proxy_factory.h"
+#include "test/video_test_constants.h"
+#include "video/config/video_encoder_config.h"
+#include "video/transport_adapter.h"
+
+using webrtc::test::DriftingClock;
+
+namespace webrtc {
+namespace {
+
+using ::webrtc::test::GetGlobalMetricsLogger;
+using ::webrtc::test::ImprovementDirection;
+using ::webrtc::test::Unit;
+
+enum : int { // The first valid value is 1.
+ kTransportSequenceNumberExtensionId = 1,
+};
+
+} // namespace
+
+class CallPerfTest : public test::CallTest {
+ public:
+ CallPerfTest() {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ }
+
+ protected:
+ enum class FecMode { kOn, kOff };
+ enum class CreateOrder { kAudioFirst, kVideoFirst };
+ void TestAudioVideoSync(FecMode fec,
+ CreateOrder create_first,
+ float video_ntp_speed,
+ float video_rtp_speed,
+ float audio_rtp_speed,
+ absl::string_view test_label);
+
+ void TestMinTransmitBitrate(bool pad_to_min_bitrate);
+
+ void TestCaptureNtpTime(const BuiltInNetworkBehaviorConfig& net_config,
+ int threshold_ms,
+ int start_time_ms,
+ int run_time_ms);
+ void TestMinAudioVideoBitrate(int test_bitrate_from,
+ int test_bitrate_to,
+ int test_bitrate_step,
+ int min_bwe,
+ int start_bwe,
+ int max_bwe);
+ void TestEncodeFramerate(VideoEncoderFactory* encoder_factory,
+ absl::string_view payload_name,
+ const std::vector<int>& max_framerates);
+};
+
+class VideoRtcpAndSyncObserver : public test::RtpRtcpObserver,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ static const int kInSyncThresholdMs = 50;
+ static const int kStartupTimeMs = 2000;
+ static const int kMinRunTimeMs = 30000;
+
+ public:
+ explicit VideoRtcpAndSyncObserver(TaskQueueBase* task_queue,
+ Clock* clock,
+ absl::string_view test_label)
+ : test::RtpRtcpObserver(test::VideoTestConstants::kLongTimeout),
+ clock_(clock),
+ test_label_(test_label),
+ creation_time_ms_(clock_->TimeInMilliseconds()),
+ task_queue_(task_queue) {}
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ task_queue_->PostTask([this]() { CheckStats(); });
+ }
+
+ void CheckStats() {
+ if (!receive_stream_)
+ return;
+
+ VideoReceiveStreamInterface::Stats stats = receive_stream_->GetStats();
+ if (stats.sync_offset_ms == std::numeric_limits<int>::max())
+ return;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ int64_t time_since_creation = now_ms - creation_time_ms_;
+ // During the first couple of seconds audio and video can falsely be
+ // estimated as being synchronized. We don't want to trigger on those.
+ if (time_since_creation < kStartupTimeMs)
+ return;
+ if (std::abs(stats.sync_offset_ms) < kInSyncThresholdMs) {
+ if (first_time_in_sync_ == -1) {
+ first_time_in_sync_ = now_ms;
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "sync_convergence_time" + test_label_, "synchronization",
+ time_since_creation, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ }
+ if (time_since_creation > kMinRunTimeMs)
+ observation_complete_.Set();
+ }
+ if (first_time_in_sync_ != -1)
+ sync_offset_ms_list_.AddSample(stats.sync_offset_ms);
+ }
+
+ void set_receive_stream(VideoReceiveStreamInterface* receive_stream) {
+ RTC_DCHECK_EQ(task_queue_, TaskQueueBase::Current());
+ // Note that receive_stream may be nullptr.
+ receive_stream_ = receive_stream;
+ }
+
+ void PrintResults() {
+ GetGlobalMetricsLogger()->LogMetric(
+ "stream_offset" + test_label_, "synchronization", sync_offset_ms_list_,
+ Unit::kMilliseconds, ImprovementDirection::kNeitherIsBetter);
+ }
+
+ private:
+ Clock* const clock_;
+ const std::string test_label_;
+ const int64_t creation_time_ms_;
+ int64_t first_time_in_sync_ = -1;
+ VideoReceiveStreamInterface* receive_stream_ = nullptr;
+ SamplesStatsCounter sync_offset_ms_list_;
+ TaskQueueBase* const task_queue_;
+};
+
+void CallPerfTest::TestAudioVideoSync(FecMode fec,
+ CreateOrder create_first,
+ float video_ntp_speed,
+ float video_rtp_speed,
+ float audio_rtp_speed,
+ absl::string_view test_label) {
+ const char* kSyncGroup = "av_sync";
+ const uint32_t kAudioSendSsrc = 1234;
+ const uint32_t kAudioRecvSsrc = 5678;
+
+ BuiltInNetworkBehaviorConfig audio_net_config;
+ audio_net_config.queue_delay_ms = 500;
+ audio_net_config.loss_percent = 5;
+
+ auto observer = std::make_unique<VideoRtcpAndSyncObserver>(
+ task_queue(), Clock::GetRealTimeClock(), test_label);
+
+ std::map<uint8_t, MediaType> audio_pt_map;
+ std::map<uint8_t, MediaType> video_pt_map;
+
+ std::unique_ptr<test::PacketTransport> audio_send_transport;
+ std::unique_ptr<test::PacketTransport> video_send_transport;
+ std::unique_ptr<test::PacketTransport> receive_transport;
+
+ AudioSendStream* audio_send_stream;
+ AudioReceiveStreamInterface* audio_receive_stream;
+ std::unique_ptr<DriftingClock> drifting_clock;
+
+ SendTask(task_queue(), [&]() {
+ metrics::Reset();
+ rtc::scoped_refptr<AudioDeviceModule> fake_audio_device =
+ TestAudioDeviceModule::Create(
+ task_queue_factory_.get(),
+ TestAudioDeviceModule::CreatePulsedNoiseCapturer(256, 48000),
+ TestAudioDeviceModule::CreateDiscardRenderer(48000),
+ audio_rtp_speed);
+ EXPECT_EQ(0, fake_audio_device->Init());
+
+ AudioState::Config send_audio_state_config;
+ send_audio_state_config.audio_mixer = AudioMixerImpl::Create();
+ send_audio_state_config.audio_processing =
+ AudioProcessingBuilder().Create();
+ send_audio_state_config.audio_device_module = fake_audio_device;
+ CallConfig sender_config(send_event_log_.get());
+
+ auto audio_state = AudioState::Create(send_audio_state_config);
+ fake_audio_device->RegisterAudioCallback(audio_state->audio_transport());
+ sender_config.audio_state = audio_state;
+ CallConfig receiver_config(recv_event_log_.get());
+ receiver_config.audio_state = audio_state;
+ CreateCalls(sender_config, receiver_config);
+
+ std::copy_if(std::begin(payload_type_map_), std::end(payload_type_map_),
+ std::inserter(audio_pt_map, audio_pt_map.end()),
+ [](const std::pair<const uint8_t, MediaType>& pair) {
+ return pair.second == MediaType::AUDIO;
+ });
+ std::copy_if(std::begin(payload_type_map_), std::end(payload_type_map_),
+ std::inserter(video_pt_map, video_pt_map.end()),
+ [](const std::pair<const uint8_t, MediaType>& pair) {
+ return pair.second == MediaType::VIDEO;
+ });
+
+ audio_send_transport = std::make_unique<test::PacketTransport>(
+ task_queue(), sender_call_.get(), observer.get(),
+ test::PacketTransport::kSender, audio_pt_map,
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(),
+ std::make_unique<SimulatedNetwork>(audio_net_config)),
+ GetRegisteredExtensions(), GetRegisteredExtensions());
+ audio_send_transport->SetReceiver(receiver_call_->Receiver());
+
+ video_send_transport = std::make_unique<test::PacketTransport>(
+ task_queue(), sender_call_.get(), observer.get(),
+ test::PacketTransport::kSender, video_pt_map,
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(),
+ std::make_unique<SimulatedNetwork>(BuiltInNetworkBehaviorConfig())),
+ GetRegisteredExtensions(), GetRegisteredExtensions());
+ video_send_transport->SetReceiver(receiver_call_->Receiver());
+
+ receive_transport = std::make_unique<test::PacketTransport>(
+ task_queue(), receiver_call_.get(), observer.get(),
+ test::PacketTransport::kReceiver, payload_type_map_,
+ std::make_unique<FakeNetworkPipe>(
+ Clock::GetRealTimeClock(),
+ std::make_unique<SimulatedNetwork>(BuiltInNetworkBehaviorConfig())),
+ GetRegisteredExtensions(), GetRegisteredExtensions());
+ receive_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(1, 0, 0, video_send_transport.get());
+ CreateMatchingReceiveConfigs(receive_transport.get());
+
+ AudioSendStream::Config audio_send_config(audio_send_transport.get());
+ audio_send_config.rtp.ssrc = kAudioSendSsrc;
+ // TODO(bugs.webrtc.org/14683): Let the tests fail with invalid config.
+ audio_send_config.send_codec_spec = AudioSendStream::Config::SendCodecSpec(
+ test::VideoTestConstants::kAudioSendPayloadType, {"OPUS", 48000, 2});
+ audio_send_config.min_bitrate_bps = 6000;
+ audio_send_config.max_bitrate_bps = 510000;
+ audio_send_config.encoder_factory = CreateBuiltinAudioEncoderFactory();
+ audio_send_stream = sender_call_->CreateAudioSendStream(audio_send_config);
+
+ GetVideoSendConfig()->rtp.nack.rtp_history_ms =
+ test::VideoTestConstants::kNackRtpHistoryMs;
+ if (fec == FecMode::kOn) {
+ GetVideoSendConfig()->rtp.ulpfec.red_payload_type =
+ test::VideoTestConstants::kRedPayloadType;
+ GetVideoSendConfig()->rtp.ulpfec.ulpfec_payload_type =
+ test::VideoTestConstants::kUlpfecPayloadType;
+ video_receive_configs_[0].rtp.red_payload_type =
+ test::VideoTestConstants::kRedPayloadType;
+ video_receive_configs_[0].rtp.ulpfec_payload_type =
+ test::VideoTestConstants::kUlpfecPayloadType;
+ }
+ video_receive_configs_[0].rtp.nack.rtp_history_ms = 1000;
+ video_receive_configs_[0].renderer = observer.get();
+ video_receive_configs_[0].sync_group = kSyncGroup;
+
+ AudioReceiveStreamInterface::Config audio_recv_config;
+ audio_recv_config.rtp.remote_ssrc = kAudioSendSsrc;
+ audio_recv_config.rtp.local_ssrc = kAudioRecvSsrc;
+ audio_recv_config.rtcp_send_transport = receive_transport.get();
+ audio_recv_config.sync_group = kSyncGroup;
+ audio_recv_config.decoder_factory = audio_decoder_factory_;
+ audio_recv_config.decoder_map = {
+ {test::VideoTestConstants::kAudioSendPayloadType, {"OPUS", 48000, 2}}};
+
+ if (create_first == CreateOrder::kAudioFirst) {
+ audio_receive_stream =
+ receiver_call_->CreateAudioReceiveStream(audio_recv_config);
+ CreateVideoStreams();
+ } else {
+ CreateVideoStreams();
+ audio_receive_stream =
+ receiver_call_->CreateAudioReceiveStream(audio_recv_config);
+ }
+ EXPECT_EQ(1u, video_receive_streams_.size());
+ observer->set_receive_stream(video_receive_streams_[0]);
+ drifting_clock = std::make_unique<DriftingClock>(clock_, video_ntp_speed);
+ CreateFrameGeneratorCapturerWithDrift(
+ drifting_clock.get(), video_rtp_speed,
+ test::VideoTestConstants::kDefaultFramerate,
+ test::VideoTestConstants::kDefaultWidth,
+ test::VideoTestConstants::kDefaultHeight);
+
+ Start();
+
+ audio_send_stream->Start();
+ audio_receive_stream->Start();
+ });
+
+ EXPECT_TRUE(observer->Wait())
+ << "Timed out while waiting for audio and video to be synchronized.";
+
+ SendTask(task_queue(), [&]() {
+ // Clear the pointer to the receive stream since it will now be deleted.
+ observer->set_receive_stream(nullptr);
+
+ audio_send_stream->Stop();
+ audio_receive_stream->Stop();
+
+ Stop();
+
+ DestroyStreams();
+
+ sender_call_->DestroyAudioSendStream(audio_send_stream);
+ receiver_call_->DestroyAudioReceiveStream(audio_receive_stream);
+
+ DestroyCalls();
+ // Call may post periodic rtcp packet to the transport on the process
+ // thread, thus transport should be destroyed after the call objects.
+ // Though transports keep pointers to the call objects, transports handle
+ // packets on the task_queue() and thus wouldn't create a race while current
+ // destruction happens in the same task as destruction of the call objects.
+ video_send_transport.reset();
+ audio_send_transport.reset();
+ receive_transport.reset();
+ });
+
+ observer->PrintResults();
+
+ // In quick test synchronization may not be achieved in time.
+ if (!absl::GetFlag(FLAGS_webrtc_quick_perf_test)) {
+// TODO(bugs.webrtc.org/10417): Reenable this for iOS
+#if !defined(WEBRTC_IOS)
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.AVSyncOffsetInMs"));
+#endif
+ }
+
+ task_queue()->PostTask(
+ [to_delete = observer.release()]() { delete to_delete; });
+}
+
+TEST_F(CallPerfTest, Synchronization_PlaysOutAudioAndVideoWithoutClockDrift) {
+ TestAudioVideoSync(FecMode::kOff, CreateOrder::kAudioFirst,
+ DriftingClock::kNoDrift, DriftingClock::kNoDrift,
+ DriftingClock::kNoDrift, "_video_no_drift");
+}
+
+TEST_F(CallPerfTest, Synchronization_PlaysOutAudioAndVideoWithVideoNtpDrift) {
+ TestAudioVideoSync(FecMode::kOff, CreateOrder::kAudioFirst,
+ DriftingClock::PercentsFaster(10.0f),
+ DriftingClock::kNoDrift, DriftingClock::kNoDrift,
+ "_video_ntp_drift");
+}
+
+TEST_F(CallPerfTest,
+ Synchronization_PlaysOutAudioAndVideoWithAudioFasterThanVideoDrift) {
+ TestAudioVideoSync(FecMode::kOff, CreateOrder::kAudioFirst,
+ DriftingClock::kNoDrift,
+ DriftingClock::PercentsSlower(30.0f),
+ DriftingClock::PercentsFaster(30.0f), "_audio_faster");
+}
+
+TEST_F(CallPerfTest,
+ Synchronization_PlaysOutAudioAndVideoWithVideoFasterThanAudioDrift) {
+ TestAudioVideoSync(FecMode::kOn, CreateOrder::kVideoFirst,
+ DriftingClock::kNoDrift,
+ DriftingClock::PercentsFaster(30.0f),
+ DriftingClock::PercentsSlower(30.0f), "_video_faster");
+}
+
+void CallPerfTest::TestCaptureNtpTime(
+ const BuiltInNetworkBehaviorConfig& net_config,
+ int threshold_ms,
+ int start_time_ms,
+ int run_time_ms) {
+ class CaptureNtpTimeObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ CaptureNtpTimeObserver(const BuiltInNetworkBehaviorConfig& net_config,
+ int threshold_ms,
+ int start_time_ms,
+ int run_time_ms)
+ : EndToEndTest(test::VideoTestConstants::kLongTimeout),
+ net_config_(net_config),
+ clock_(Clock::GetRealTimeClock()),
+ threshold_ms_(threshold_ms),
+ start_time_ms_(start_time_ms),
+ run_time_ms_(run_time_ms),
+ creation_time_ms_(clock_->TimeInMilliseconds()),
+ capturer_(nullptr),
+ rtp_start_timestamp_set_(false),
+ rtp_start_timestamp_(0) {}
+
+ private:
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ return net_config_;
+ }
+
+ BuiltInNetworkBehaviorConfig GetReceiveTransportConfig() const override {
+ return net_config_;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ MutexLock lock(&mutex_);
+ if (video_frame.ntp_time_ms() <= 0) {
+ // Haven't got enough RTCP SR in order to calculate the capture ntp
+ // time.
+ return;
+ }
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ int64_t time_since_creation = now_ms - creation_time_ms_;
+ if (time_since_creation < start_time_ms_) {
+ // Wait for `start_time_ms_` before start measuring.
+ return;
+ }
+
+ if (time_since_creation > run_time_ms_) {
+ observation_complete_.Set();
+ }
+
+ FrameCaptureTimeList::iterator iter =
+ capture_time_list_.find(video_frame.timestamp());
+ EXPECT_TRUE(iter != capture_time_list_.end());
+
+ // The real capture time has been wrapped to uint32_t before converted
+ // to rtp timestamp in the sender side. So here we convert the estimated
+ // capture time to a uint32_t 90k timestamp also for comparing.
+ uint32_t estimated_capture_timestamp =
+ 90 * static_cast<uint32_t>(video_frame.ntp_time_ms());
+ uint32_t real_capture_timestamp = iter->second;
+ int time_offset_ms = real_capture_timestamp - estimated_capture_timestamp;
+ time_offset_ms = time_offset_ms / 90;
+ time_offset_ms_list_.AddSample(time_offset_ms);
+
+ EXPECT_TRUE(std::abs(time_offset_ms) < threshold_ms_);
+ }
+
+ Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
+ MutexLock lock(&mutex_);
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+
+ if (!rtp_start_timestamp_set_) {
+ // Calculate the rtp timestamp offset in order to calculate the real
+ // capture time.
+ uint32_t first_capture_timestamp =
+ 90 * static_cast<uint32_t>(capturer_->first_frame_capture_time());
+ rtp_start_timestamp_ = rtp_packet.Timestamp() - first_capture_timestamp;
+ rtp_start_timestamp_set_ = true;
+ }
+
+ uint32_t capture_timestamp =
+ rtp_packet.Timestamp() - rtp_start_timestamp_;
+ capture_time_list_.insert(
+ capture_time_list_.end(),
+ std::make_pair(rtp_packet.Timestamp(), capture_timestamp));
+ return SEND_PACKET;
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ capturer_ = frame_generator_capturer;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ (*receive_configs)[0].renderer = this;
+ // Enable the receiver side rtt calculation.
+ (*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report = true;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for estimated capture "
+ "NTP time to be within bounds.";
+ GetGlobalMetricsLogger()->LogMetric(
+ "capture_ntp_time", "real - estimated", time_offset_ms_list_,
+ Unit::kMilliseconds, ImprovementDirection::kNeitherIsBetter);
+ }
+
+ Mutex mutex_;
+ const BuiltInNetworkBehaviorConfig net_config_;
+ Clock* const clock_;
+ const int threshold_ms_;
+ const int start_time_ms_;
+ const int run_time_ms_;
+ const int64_t creation_time_ms_;
+ test::FrameGeneratorCapturer* capturer_;
+ bool rtp_start_timestamp_set_;
+ uint32_t rtp_start_timestamp_;
+ typedef std::map<uint32_t, uint32_t> FrameCaptureTimeList;
+ FrameCaptureTimeList capture_time_list_ RTC_GUARDED_BY(&mutex_);
+ SamplesStatsCounter time_offset_ms_list_;
+ } test(net_config, threshold_ms, start_time_ms, run_time_ms);
+
+ RunBaseTest(&test);
+}
+
+// Flaky tests, disabled on Mac and Windows due to webrtc:8291.
+#if !(defined(WEBRTC_MAC) || defined(WEBRTC_WIN))
+TEST_F(CallPerfTest, Real_Estimated_CaptureNtpTimeWithNetworkDelay) {
+ BuiltInNetworkBehaviorConfig net_config;
+ net_config.queue_delay_ms = 100;
+ // TODO(wu): lower the threshold as the calculation/estimation becomes more
+ // accurate.
+ const int kThresholdMs = 100;
+ const int kStartTimeMs = 10000;
+ const int kRunTimeMs = 20000;
+ TestCaptureNtpTime(net_config, kThresholdMs, kStartTimeMs, kRunTimeMs);
+}
+
+TEST_F(CallPerfTest, Real_Estimated_CaptureNtpTimeWithNetworkJitter) {
+ BuiltInNetworkBehaviorConfig net_config;
+ net_config.queue_delay_ms = 100;
+ net_config.delay_standard_deviation_ms = 10;
+ // TODO(wu): lower the threshold as the calculation/estimation becomes more
+ // accurate.
+ const int kThresholdMs = 100;
+ const int kStartTimeMs = 10000;
+ const int kRunTimeMs = 20000;
+ TestCaptureNtpTime(net_config, kThresholdMs, kStartTimeMs, kRunTimeMs);
+}
+#endif
+
+TEST_F(CallPerfTest, ReceivesCpuOveruseAndUnderuse) {
+ // Minimal normal usage at the start, then 30s overuse to allow filter to
+ // settle, and then 80s underuse to allow plenty of time for rampup again.
+ test::ScopedFieldTrials fake_overuse_settings(
+ "WebRTC-ForceSimulatedOveruseIntervalMs/1-30000-80000/");
+
+ class LoadObserver : public test::SendTest,
+ public test::FrameGeneratorCapturer::SinkWantsObserver {
+ public:
+ LoadObserver()
+ : SendTest(test::VideoTestConstants::kLongTimeout),
+ test_phase_(TestPhase::kInit) {}
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetSinkWantsObserver(this);
+ // Set a high initial resolution to be sure that we can scale down.
+ frame_generator_capturer->ChangeResolution(1920, 1080);
+ }
+
+ // OnSinkWantsChanged is called when FrameGeneratorCapturer::AddOrUpdateSink
+ // is called.
+ // TODO(sprang): Add integration test for maintain-framerate mode?
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ // The sink wants can change either because an adaptation happened (i.e.
+ // the pixels or frame rate changed) or for other reasons, such as encoded
+ // resolutions being communicated (happens whenever we capture a new frame
+ // size). In this test, we only care about adaptations.
+ bool did_adapt =
+ last_wants_.max_pixel_count != wants.max_pixel_count ||
+ last_wants_.target_pixel_count != wants.target_pixel_count ||
+ last_wants_.max_framerate_fps != wants.max_framerate_fps;
+ last_wants_ = wants;
+ if (!did_adapt) {
+ return;
+ }
+ // At kStart expect CPU overuse. Then expect CPU underuse when the encoder
+ // delay has been decreased.
+ switch (test_phase_) {
+ case TestPhase::kInit:
+ // Max framerate should be set initially.
+ if (wants.max_framerate_fps != std::numeric_limits<int>::max() &&
+ wants.max_pixel_count == std::numeric_limits<int>::max()) {
+ test_phase_ = TestPhase::kStart;
+ } else {
+ ADD_FAILURE() << "Got unexpected adaptation request, max res = "
+ << wants.max_pixel_count << ", target res = "
+ << wants.target_pixel_count.value_or(-1)
+ << ", max fps = " << wants.max_framerate_fps;
+ }
+ break;
+ case TestPhase::kStart:
+ if (wants.max_pixel_count < std::numeric_limits<int>::max()) {
+ // On adapting down, VideoStreamEncoder::VideoSourceProxy will set
+ // only the max pixel count, leaving the target unset.
+ test_phase_ = TestPhase::kAdaptedDown;
+ } else {
+ ADD_FAILURE() << "Got unexpected adaptation request, max res = "
+ << wants.max_pixel_count << ", target res = "
+ << wants.target_pixel_count.value_or(-1)
+ << ", max fps = " << wants.max_framerate_fps;
+ }
+ break;
+ case TestPhase::kAdaptedDown:
+ // On adapting up, the adaptation counter will again be at zero, and
+ // so all constraints will be reset.
+ if (wants.max_pixel_count == std::numeric_limits<int>::max() &&
+ !wants.target_pixel_count) {
+ test_phase_ = TestPhase::kAdaptedUp;
+ observation_complete_.Set();
+ } else {
+ ADD_FAILURE() << "Got unexpected adaptation request, max res = "
+ << wants.max_pixel_count << ", target res = "
+ << wants.target_pixel_count.value_or(-1)
+ << ", max fps = " << wants.max_framerate_fps;
+ }
+ break;
+ case TestPhase::kAdaptedUp:
+ ADD_FAILURE() << "Got unexpected adaptation request, max res = "
+ << wants.max_pixel_count << ", target res = "
+ << wants.target_pixel_count.value_or(-1)
+ << ", max fps = " << wants.max_framerate_fps;
+ }
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {}
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out before receiving an overuse callback.";
+ }
+
+ enum class TestPhase {
+ kInit,
+ kStart,
+ kAdaptedDown,
+ kAdaptedUp
+ } test_phase_;
+
+ private:
+ rtc::VideoSinkWants last_wants_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+void CallPerfTest::TestMinTransmitBitrate(bool pad_to_min_bitrate) {
+ static const int kMaxEncodeBitrateKbps = 30;
+ static const int kMinTransmitBitrateBps = 150000;
+ static const int kMinAcceptableTransmitBitrate = 130;
+ static const int kMaxAcceptableTransmitBitrate = 170;
+ static const int kNumBitrateObservationsInRange = 100;
+ static const int kAcceptableBitrateErrorMargin = 15; // +- 7
+ class BitrateObserver : public test::EndToEndTest {
+ public:
+ explicit BitrateObserver(bool using_min_transmit_bitrate,
+ TaskQueueBase* task_queue)
+ : EndToEndTest(test::VideoTestConstants::kLongTimeout),
+ send_stream_(nullptr),
+ converged_(false),
+ pad_to_min_bitrate_(using_min_transmit_bitrate),
+ min_acceptable_bitrate_(using_min_transmit_bitrate
+ ? kMinAcceptableTransmitBitrate
+ : (kMaxEncodeBitrateKbps -
+ kAcceptableBitrateErrorMargin / 2)),
+ max_acceptable_bitrate_(using_min_transmit_bitrate
+ ? kMaxAcceptableTransmitBitrate
+ : (kMaxEncodeBitrateKbps +
+ kAcceptableBitrateErrorMargin / 2)),
+ num_bitrate_observations_in_range_(0),
+ task_queue_(task_queue),
+ task_safety_flag_(PendingTaskSafetyFlag::CreateDetached()) {}
+
+ private:
+ // TODO(holmer): Run this with a timer instead of once per packet.
+ Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
+ task_queue_->PostTask(SafeTask(task_safety_flag_, [this]() {
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+
+ if (!stats.substreams.empty()) {
+ RTC_DCHECK_EQ(1, stats.substreams.size());
+ int bitrate_kbps =
+ stats.substreams.begin()->second.total_bitrate_bps / 1000;
+ if (bitrate_kbps > min_acceptable_bitrate_ &&
+ bitrate_kbps < max_acceptable_bitrate_) {
+ converged_ = true;
+ ++num_bitrate_observations_in_range_;
+ if (num_bitrate_observations_in_range_ ==
+ kNumBitrateObservationsInRange)
+ observation_complete_.Set();
+ }
+ if (converged_)
+ bitrate_kbps_list_.AddSample(bitrate_kbps);
+ }
+ }));
+ return SEND_PACKET;
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void OnStreamsStopped() override { task_safety_flag_->SetNotAlive(); }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (pad_to_min_bitrate_) {
+ encoder_config->min_transmit_bitrate_bps = kMinTransmitBitrateBps;
+ } else {
+ RTC_DCHECK_EQ(0, encoder_config->min_transmit_bitrate_bps);
+ }
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timeout while waiting for send-bitrate stats.";
+ GetGlobalMetricsLogger()->LogMetric(
+ std::string("bitrate_stats_") +
+ (pad_to_min_bitrate_ ? "min_transmit_bitrate"
+ : "without_min_transmit_bitrate"),
+ "bitrate_kbps", bitrate_kbps_list_, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ }
+
+ VideoSendStream* send_stream_;
+ bool converged_;
+ const bool pad_to_min_bitrate_;
+ const int min_acceptable_bitrate_;
+ const int max_acceptable_bitrate_;
+ int num_bitrate_observations_in_range_;
+ SamplesStatsCounter bitrate_kbps_list_;
+ TaskQueueBase* task_queue_;
+ rtc::scoped_refptr<PendingTaskSafetyFlag> task_safety_flag_;
+ } test(pad_to_min_bitrate, task_queue());
+
+ fake_encoder_max_bitrate_ = kMaxEncodeBitrateKbps;
+ RunBaseTest(&test);
+}
+
+TEST_F(CallPerfTest, Bitrate_Kbps_PadsToMinTransmitBitrate) {
+ TestMinTransmitBitrate(true);
+}
+
+TEST_F(CallPerfTest, Bitrate_Kbps_NoPadWithoutMinTransmitBitrate) {
+ TestMinTransmitBitrate(false);
+}
+
+// TODO(bugs.webrtc.org/8878)
+#if defined(WEBRTC_MAC)
+#define MAYBE_KeepsHighBitrateWhenReconfiguringSender \
+ DISABLED_KeepsHighBitrateWhenReconfiguringSender
+#else
+#define MAYBE_KeepsHighBitrateWhenReconfiguringSender \
+ KeepsHighBitrateWhenReconfiguringSender
+#endif
+TEST_F(CallPerfTest, MAYBE_KeepsHighBitrateWhenReconfiguringSender) {
+ static const uint32_t kInitialBitrateKbps = 400;
+ static const uint32_t kInitialBitrateOverheadKpbs = 6;
+ static const uint32_t kReconfigureThresholdKbps = 600;
+
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const webrtc::VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(frame_width, frame_height, encoder_config);
+ streams[0].min_bitrate_bps = 50000;
+ streams[0].target_bitrate_bps = streams[0].max_bitrate_bps = 2000000;
+ return streams;
+ }
+ };
+
+ class BitrateObserver : public test::EndToEndTest, public test::FakeEncoder {
+ public:
+ explicit BitrateObserver(TaskQueueBase* task_queue)
+ : EndToEndTest(test::VideoTestConstants::kDefaultTimeout),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ encoder_inits_(0),
+ last_set_bitrate_kbps_(0),
+ send_stream_(nullptr),
+ frame_generator_(nullptr),
+ encoder_factory_(this),
+ bitrate_allocator_factory_(
+ CreateBuiltinVideoBitrateAllocatorFactory()),
+ task_queue_(task_queue) {}
+
+ int32_t InitEncode(const VideoCodec* config,
+ const VideoEncoder::Settings& settings) override {
+ ++encoder_inits_;
+ if (encoder_inits_ == 1) {
+ // First time initialization. Frame size is known.
+ // `expected_bitrate` is affected by bandwidth estimation before the
+ // first frame arrives to the encoder.
+ uint32_t expected_bitrate =
+ last_set_bitrate_kbps_ > 0
+ ? last_set_bitrate_kbps_
+ : kInitialBitrateKbps - kInitialBitrateOverheadKpbs;
+ EXPECT_EQ(expected_bitrate, config->startBitrate)
+ << "Encoder not initialized at expected bitrate.";
+ EXPECT_EQ(test::VideoTestConstants::kDefaultWidth, config->width);
+ EXPECT_EQ(test::VideoTestConstants::kDefaultHeight, config->height);
+ } else if (encoder_inits_ == 2) {
+ EXPECT_EQ(2 * test::VideoTestConstants::kDefaultWidth, config->width);
+ EXPECT_EQ(2 * test::VideoTestConstants::kDefaultHeight, config->height);
+ EXPECT_GE(last_set_bitrate_kbps_, kReconfigureThresholdKbps);
+ EXPECT_GT(config->startBitrate, kReconfigureThresholdKbps)
+ << "Encoder reconfigured with bitrate too far away from last set.";
+ observation_complete_.Set();
+ }
+ return FakeEncoder::InitEncode(config, settings);
+ }
+
+ void SetRates(const RateControlParameters& parameters) override {
+ last_set_bitrate_kbps_ = parameters.bitrate.get_sum_kbps();
+ if (encoder_inits_ == 1 &&
+ parameters.bitrate.get_sum_kbps() > kReconfigureThresholdKbps) {
+ time_to_reconfigure_.Set();
+ }
+ FakeEncoder::SetRates(parameters);
+ }
+
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ bitrate_config->start_bitrate_bps = kInitialBitrateKbps * 1000;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->encoder_settings.bitrate_allocator_factory =
+ bitrate_allocator_factory_.get();
+ encoder_config->max_bitrate_bps = 2 * kReconfigureThresholdKbps * 1000;
+ encoder_config->video_stream_factory =
+ rtc::make_ref_counted<VideoStreamFactory>();
+
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_ = frame_generator_capturer;
+ }
+
+ void PerformTest() override {
+ ASSERT_TRUE(
+ time_to_reconfigure_.Wait(test::VideoTestConstants::kDefaultTimeout))
+ << "Timed out before receiving an initial high bitrate.";
+ frame_generator_->ChangeResolution(
+ test::VideoTestConstants::kDefaultWidth * 2,
+ test::VideoTestConstants::kDefaultHeight * 2);
+ SendTask(task_queue_, [&]() {
+ send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy());
+ });
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for a couple of high bitrate estimates "
+ "after reconfiguring the send stream.";
+ }
+
+ private:
+ rtc::Event time_to_reconfigure_;
+ int encoder_inits_;
+ uint32_t last_set_bitrate_kbps_;
+ VideoSendStream* send_stream_;
+ test::FrameGeneratorCapturer* frame_generator_;
+ test::VideoEncoderProxyFactory encoder_factory_;
+ std::unique_ptr<VideoBitrateAllocatorFactory> bitrate_allocator_factory_;
+ VideoEncoderConfig encoder_config_;
+ TaskQueueBase* task_queue_;
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+// Discovers the minimal supported audio+video bitrate. The test bitrate is
+// considered supported if Rtt does not go above 400ms with the network
+// contrained to the test bitrate.
+//
+// |test_bitrate_from test_bitrate_to| bitrate constraint range
+// `test_bitrate_step` bitrate constraint update step during the test
+// |min_bwe max_bwe| BWE range
+// `start_bwe` initial BWE
+void CallPerfTest::TestMinAudioVideoBitrate(int test_bitrate_from,
+ int test_bitrate_to,
+ int test_bitrate_step,
+ int min_bwe,
+ int start_bwe,
+ int max_bwe) {
+ static const std::string kAudioTrackId = "audio_track_0";
+ static constexpr int kBitrateStabilizationMs = 10000;
+ static constexpr int kBitrateMeasurements = 10;
+ static constexpr int kBitrateMeasurementMs = 1000;
+ static constexpr int kShortDelayMs = 10;
+ static constexpr int kMinGoodRttMs = 400;
+
+ class MinVideoAndAudioBitrateTester : public test::EndToEndTest {
+ public:
+ MinVideoAndAudioBitrateTester(int test_bitrate_from,
+ int test_bitrate_to,
+ int test_bitrate_step,
+ int min_bwe,
+ int start_bwe,
+ int max_bwe,
+ TaskQueueBase* task_queue)
+ : EndToEndTest(),
+ test_bitrate_from_(test_bitrate_from),
+ test_bitrate_to_(test_bitrate_to),
+ test_bitrate_step_(test_bitrate_step),
+ min_bwe_(min_bwe),
+ start_bwe_(start_bwe),
+ max_bwe_(max_bwe),
+ task_queue_(task_queue) {}
+
+ protected:
+ BuiltInNetworkBehaviorConfig GetFakeNetworkPipeConfig() const {
+ BuiltInNetworkBehaviorConfig pipe_config;
+ pipe_config.link_capacity_kbps = test_bitrate_from_;
+ return pipe_config;
+ }
+
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override {
+ return GetFakeNetworkPipeConfig();
+ }
+ BuiltInNetworkBehaviorConfig GetReceiveTransportConfig() const override {
+ return GetFakeNetworkPipeConfig();
+ }
+
+ void OnTransportCreated(
+ test::PacketTransport* to_receiver,
+ SimulatedNetworkInterface* sender_network,
+ test::PacketTransport* to_sender,
+ SimulatedNetworkInterface* receiver_network) override {
+ send_simulated_network_ = sender_network;
+ receive_simulated_network_ = receiver_network;
+ }
+
+ void PerformTest() override {
+ // Quick test mode, just to exercise all the code paths without actually
+ // caring about performance measurements.
+ const bool quick_perf_test = absl::GetFlag(FLAGS_webrtc_quick_perf_test);
+
+ int last_passed_test_bitrate = -1;
+ for (int test_bitrate = test_bitrate_from_;
+ test_bitrate_from_ < test_bitrate_to_
+ ? test_bitrate <= test_bitrate_to_
+ : test_bitrate >= test_bitrate_to_;
+ test_bitrate += test_bitrate_step_) {
+ BuiltInNetworkBehaviorConfig pipe_config;
+ pipe_config.link_capacity_kbps = test_bitrate;
+ send_simulated_network_->SetConfig(pipe_config);
+ receive_simulated_network_->SetConfig(pipe_config);
+
+ rtc::Thread::SleepMs(quick_perf_test ? kShortDelayMs
+ : kBitrateStabilizationMs);
+
+ int64_t avg_rtt = 0;
+ for (int i = 0; i < kBitrateMeasurements; i++) {
+ Call::Stats call_stats;
+ SendTask(task_queue_, [this, &call_stats]() {
+ call_stats = sender_call_->GetStats();
+ });
+ avg_rtt += call_stats.rtt_ms;
+ rtc::Thread::SleepMs(quick_perf_test ? kShortDelayMs
+ : kBitrateMeasurementMs);
+ }
+ avg_rtt = avg_rtt / kBitrateMeasurements;
+ if (avg_rtt > kMinGoodRttMs) {
+ RTC_LOG(LS_WARNING)
+ << "Failed test bitrate: " << test_bitrate << " RTT: " << avg_rtt;
+ break;
+ } else {
+ RTC_LOG(LS_INFO) << "Passed test bitrate: " << test_bitrate
+ << " RTT: " << avg_rtt;
+ last_passed_test_bitrate = test_bitrate;
+ }
+ }
+ EXPECT_GT(last_passed_test_bitrate, -1)
+ << "Minimum supported bitrate out of the test scope";
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "min_test_bitrate_", "min_bitrate", last_passed_test_bitrate,
+ Unit::kUnitless, ImprovementDirection::kNeitherIsBetter);
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = min_bwe_;
+ bitrate_config.start_bitrate_bps = start_bwe_;
+ bitrate_config.max_bitrate_bps = max_bwe_;
+ sender_call->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+ }
+
+ size_t GetNumVideoStreams() const override { return 1; }
+
+ size_t GetNumAudioStreams() const override { return 1; }
+
+ private:
+ const int test_bitrate_from_;
+ const int test_bitrate_to_;
+ const int test_bitrate_step_;
+ const int min_bwe_;
+ const int start_bwe_;
+ const int max_bwe_;
+ SimulatedNetworkInterface* send_simulated_network_;
+ SimulatedNetworkInterface* receive_simulated_network_;
+ Call* sender_call_;
+ TaskQueueBase* const task_queue_;
+ } test(test_bitrate_from, test_bitrate_to, test_bitrate_step, min_bwe,
+ start_bwe, max_bwe, task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(CallPerfTest, Min_Bitrate_VideoAndAudio) {
+ TestMinAudioVideoBitrate(110, 40, -10, 10000, 70000, 200000);
+}
+
+void CallPerfTest::TestEncodeFramerate(VideoEncoderFactory* encoder_factory,
+ absl::string_view payload_name,
+ const std::vector<int>& max_framerates) {
+ static constexpr double kAllowedFpsDiff = 1.5;
+ static constexpr TimeDelta kMinGetStatsInterval = TimeDelta::Millis(400);
+ static constexpr TimeDelta kMinRunTime = TimeDelta::Seconds(15);
+ static constexpr DataRate kMaxBitrate = DataRate::KilobitsPerSec(1000);
+
+ class FramerateObserver
+ : public test::EndToEndTest,
+ public test::FrameGeneratorCapturer::SinkWantsObserver {
+ public:
+ FramerateObserver(VideoEncoderFactory* encoder_factory,
+ absl::string_view payload_name,
+ const std::vector<int>& max_framerates,
+ TaskQueueBase* task_queue)
+ : EndToEndTest(test::VideoTestConstants::kDefaultTimeout),
+ clock_(Clock::GetRealTimeClock()),
+ encoder_factory_(encoder_factory),
+ payload_name_(payload_name),
+ max_framerates_(max_framerates),
+ task_queue_(task_queue),
+ start_time_(clock_->CurrentTime()),
+ last_getstats_time_(start_time_),
+ send_stream_(nullptr) {}
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->ChangeResolution(640, 360);
+ }
+
+ void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {}
+
+ void ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) override {
+ bitrate_config->start_bitrate_bps = kMaxBitrate.bps() / 2;
+ }
+
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ size_t GetNumVideoStreams() const override {
+ return max_framerates_.size();
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder_factory = encoder_factory_;
+ send_config->rtp.payload_name = payload_name_;
+ send_config->rtp.payload_type =
+ test::VideoTestConstants::kVideoSendPayloadType;
+ encoder_config->video_format.name = payload_name_;
+ encoder_config->codec_type = PayloadStringToCodecType(payload_name_);
+ encoder_config->max_bitrate_bps = kMaxBitrate.bps();
+ for (size_t i = 0; i < max_framerates_.size(); ++i) {
+ encoder_config->simulcast_layers[i].max_framerate = max_framerates_[i];
+ configured_framerates_[send_config->rtp.ssrcs[i]] = max_framerates_[i];
+ }
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timeout while waiting for framerate stats.";
+ }
+
+ void VerifyStats() const {
+ const bool quick_perf_test = absl::GetFlag(FLAGS_webrtc_quick_perf_test);
+ double input_fps = 0.0;
+ for (const auto& configured_framerate : configured_framerates_) {
+ input_fps = std::max(configured_framerate.second, input_fps);
+ }
+ for (const auto& encode_frame_rate_list : encode_frame_rate_lists_) {
+ const SamplesStatsCounter& values = encode_frame_rate_list.second;
+ GetGlobalMetricsLogger()->LogMetric(
+ "substream_fps", "encode_frame_rate", values, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ if (values.IsEmpty()) {
+ continue;
+ }
+ double average_fps = values.GetAverage();
+ uint32_t ssrc = encode_frame_rate_list.first;
+ double expected_fps = configured_framerates_.find(ssrc)->second;
+ if (quick_perf_test && expected_fps != input_fps)
+ EXPECT_NEAR(expected_fps, average_fps, kAllowedFpsDiff);
+ }
+ }
+
+ Action OnSendRtp(rtc::ArrayView<const uint8_t> packet) override {
+ const Timestamp now = clock_->CurrentTime();
+ if (now - last_getstats_time_ > kMinGetStatsInterval) {
+ last_getstats_time_ = now;
+ task_queue_->PostTask([this, now]() {
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+ for (const auto& stat : stats.substreams) {
+ encode_frame_rate_lists_[stat.first].AddSample(
+ stat.second.encode_frame_rate);
+ }
+ if (now - start_time_ > kMinRunTime) {
+ VerifyStats();
+ observation_complete_.Set();
+ }
+ });
+ }
+ return SEND_PACKET;
+ }
+
+ Clock* const clock_;
+ VideoEncoderFactory* const encoder_factory_;
+ const std::string payload_name_;
+ const std::vector<int> max_framerates_;
+ TaskQueueBase* const task_queue_;
+ const Timestamp start_time_;
+ Timestamp last_getstats_time_;
+ VideoSendStream* send_stream_;
+ std::map<uint32_t, SamplesStatsCounter> encode_frame_rate_lists_;
+ std::map<uint32_t, double> configured_framerates_;
+ } test(encoder_factory, payload_name, max_framerates, task_queue());
+
+ RunBaseTest(&test);
+}
+
+TEST_F(CallPerfTest, TestEncodeFramerateVp8Simulcast) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestEncodeFramerate(&encoder_factory, "VP8",
+ /*max_framerates=*/{20, 30});
+}
+
+TEST_F(CallPerfTest, TestEncodeFramerateVp8SimulcastLowerInputFps) {
+ InternalEncoderFactory internal_encoder_factory;
+ test::FunctionVideoEncoderFactory encoder_factory(
+ [&internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ &internal_encoder_factory, SdpVideoFormat("VP8"));
+ });
+
+ TestEncodeFramerate(&encoder_factory, "VP8",
+ /*max_framerates=*/{14, 20});
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/call_unittest.cc b/third_party/libwebrtc/call/call_unittest.cc
new file mode 100644
index 0000000000..886a15aaf0
--- /dev/null
+++ b/third_party/libwebrtc/call/call_unittest.cc
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/call.h"
+
+#include <list>
+#include <map>
+#include <memory>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/media_types.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/test/mock_audio_mixer.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/timestamp.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "audio/audio_receive_stream.h"
+#include "audio/audio_send_stream.h"
+#include "call/adaptation/test/fake_resource.h"
+#include "call/adaptation/test/mock_resource_listener.h"
+#include "call/audio_state.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "test/fake_encoder.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+#include "test/mock_transport.h"
+#include "test/run_loop.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::Contains;
+using ::testing::MockFunction;
+using ::testing::NiceMock;
+using ::testing::StrictMock;
+using ::webrtc::test::MockAudioDeviceModule;
+using ::webrtc::test::MockAudioMixer;
+using ::webrtc::test::MockAudioProcessing;
+using ::webrtc::test::RunLoop;
+
+struct CallHelper {
+ explicit CallHelper(bool use_null_audio_processing) {
+ task_queue_factory_ = CreateDefaultTaskQueueFactory();
+ AudioState::Config audio_state_config;
+ audio_state_config.audio_mixer = rtc::make_ref_counted<MockAudioMixer>();
+ audio_state_config.audio_processing =
+ use_null_audio_processing
+ ? nullptr
+ : rtc::make_ref_counted<NiceMock<MockAudioProcessing>>();
+ audio_state_config.audio_device_module =
+ rtc::make_ref_counted<MockAudioDeviceModule>();
+ CallConfig config(&event_log_);
+ config.audio_state = AudioState::Create(audio_state_config);
+ config.task_queue_factory = task_queue_factory_.get();
+ config.trials = &field_trials_;
+ call_ = Call::Create(config);
+ }
+
+ Call* operator->() { return call_.get(); }
+
+ private:
+ RunLoop loop_;
+ RtcEventLogNull event_log_;
+ FieldTrialBasedConfig field_trials_;
+ std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ std::unique_ptr<Call> call_;
+};
+
+rtc::scoped_refptr<Resource> FindResourceWhoseNameContains(
+ const std::vector<rtc::scoped_refptr<Resource>>& resources,
+ absl::string_view name_contains) {
+ for (const auto& resource : resources) {
+ if (resource->Name().find(std::string(name_contains)) != std::string::npos)
+ return resource;
+ }
+ return nullptr;
+}
+
+} // namespace
+
+TEST(CallTest, ConstructDestruct) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ }
+}
+
+TEST(CallTest, CreateDestroy_AudioSendStream) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport send_transport;
+ AudioSendStream::Config config(&send_transport);
+ config.rtp.ssrc = 42;
+ AudioSendStream* stream = call->CreateAudioSendStream(config);
+ EXPECT_NE(stream, nullptr);
+ call->DestroyAudioSendStream(stream);
+ }
+}
+
+TEST(CallTest, CreateDestroy_AudioReceiveStream) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ AudioReceiveStreamInterface::Config config;
+ MockTransport rtcp_send_transport;
+ config.rtp.remote_ssrc = 42;
+ config.rtcp_send_transport = &rtcp_send_transport;
+ config.decoder_factory =
+ rtc::make_ref_counted<webrtc::MockAudioDecoderFactory>();
+ AudioReceiveStreamInterface* stream =
+ call->CreateAudioReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ call->DestroyAudioReceiveStream(stream);
+ }
+}
+
+TEST(CallTest, CreateDestroy_AudioSendStreams) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport send_transport;
+ AudioSendStream::Config config(&send_transport);
+ std::list<AudioSendStream*> streams;
+ for (int i = 0; i < 2; ++i) {
+ for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
+ config.rtp.ssrc = ssrc;
+ AudioSendStream* stream = call->CreateAudioSendStream(config);
+ EXPECT_NE(stream, nullptr);
+ if (ssrc & 1) {
+ streams.push_back(stream);
+ } else {
+ streams.push_front(stream);
+ }
+ }
+ for (auto s : streams) {
+ call->DestroyAudioSendStream(s);
+ }
+ streams.clear();
+ }
+ }
+}
+
+TEST(CallTest, CreateDestroy_AudioReceiveStreams) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ AudioReceiveStreamInterface::Config config;
+ MockTransport rtcp_send_transport;
+ config.rtcp_send_transport = &rtcp_send_transport;
+ config.decoder_factory =
+ rtc::make_ref_counted<webrtc::MockAudioDecoderFactory>();
+ std::list<AudioReceiveStreamInterface*> streams;
+ for (int i = 0; i < 2; ++i) {
+ for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
+ config.rtp.remote_ssrc = ssrc;
+ AudioReceiveStreamInterface* stream =
+ call->CreateAudioReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ if (ssrc & 1) {
+ streams.push_back(stream);
+ } else {
+ streams.push_front(stream);
+ }
+ }
+ for (auto s : streams) {
+ call->DestroyAudioReceiveStream(s);
+ }
+ streams.clear();
+ }
+ }
+}
+
+TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ AudioReceiveStreamInterface::Config recv_config;
+ MockTransport rtcp_send_transport;
+ recv_config.rtp.remote_ssrc = 42;
+ recv_config.rtp.local_ssrc = 777;
+ recv_config.rtcp_send_transport = &rtcp_send_transport;
+ recv_config.decoder_factory =
+ rtc::make_ref_counted<webrtc::MockAudioDecoderFactory>();
+ AudioReceiveStreamInterface* recv_stream =
+ call->CreateAudioReceiveStream(recv_config);
+ EXPECT_NE(recv_stream, nullptr);
+
+ MockTransport send_transport;
+ AudioSendStream::Config send_config(&send_transport);
+ send_config.rtp.ssrc = 777;
+ AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
+ EXPECT_NE(send_stream, nullptr);
+
+ AudioReceiveStreamImpl* internal_recv_stream =
+ static_cast<AudioReceiveStreamImpl*>(recv_stream);
+ EXPECT_EQ(send_stream,
+ internal_recv_stream->GetAssociatedSendStreamForTesting());
+
+ call->DestroyAudioSendStream(send_stream);
+ EXPECT_EQ(nullptr,
+ internal_recv_stream->GetAssociatedSendStreamForTesting());
+
+ call->DestroyAudioReceiveStream(recv_stream);
+ }
+}
+
+TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_SendFirst) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport send_transport;
+ AudioSendStream::Config send_config(&send_transport);
+ send_config.rtp.ssrc = 777;
+ AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
+ EXPECT_NE(send_stream, nullptr);
+
+ AudioReceiveStreamInterface::Config recv_config;
+ MockTransport rtcp_send_transport;
+ recv_config.rtp.remote_ssrc = 42;
+ recv_config.rtp.local_ssrc = 777;
+ recv_config.rtcp_send_transport = &rtcp_send_transport;
+ recv_config.decoder_factory =
+ rtc::make_ref_counted<webrtc::MockAudioDecoderFactory>();
+ AudioReceiveStreamInterface* recv_stream =
+ call->CreateAudioReceiveStream(recv_config);
+ EXPECT_NE(recv_stream, nullptr);
+
+ AudioReceiveStreamImpl* internal_recv_stream =
+ static_cast<AudioReceiveStreamImpl*>(recv_stream);
+ EXPECT_EQ(send_stream,
+ internal_recv_stream->GetAssociatedSendStreamForTesting());
+
+ call->DestroyAudioReceiveStream(recv_stream);
+
+ call->DestroyAudioSendStream(send_stream);
+ }
+}
+
+TEST(CallTest, CreateDestroy_FlexfecReceiveStream) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config config(&rtcp_send_transport);
+ config.payload_type = 118;
+ config.rtp.remote_ssrc = 38837212;
+ config.protected_media_ssrcs = {27273};
+
+ FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ call->DestroyFlexfecReceiveStream(stream);
+ }
+}
+
+TEST(CallTest, CreateDestroy_FlexfecReceiveStreams) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config config(&rtcp_send_transport);
+ config.payload_type = 118;
+ std::list<FlexfecReceiveStream*> streams;
+
+ for (int i = 0; i < 2; ++i) {
+ for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
+ config.rtp.remote_ssrc = ssrc;
+ config.protected_media_ssrcs = {ssrc + 1};
+ FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ if (ssrc & 1) {
+ streams.push_back(stream);
+ } else {
+ streams.push_front(stream);
+ }
+ }
+ for (auto s : streams) {
+ call->DestroyFlexfecReceiveStream(s);
+ }
+ streams.clear();
+ }
+ }
+}
+
+TEST(CallTest, MultipleFlexfecReceiveStreamsProtectingSingleVideoStream) {
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config config(&rtcp_send_transport);
+ config.payload_type = 118;
+ config.protected_media_ssrcs = {1324234};
+ FlexfecReceiveStream* stream;
+ std::list<FlexfecReceiveStream*> streams;
+
+ config.rtp.remote_ssrc = 838383;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ config.rtp.remote_ssrc = 424993;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ config.rtp.remote_ssrc = 99383;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ config.rtp.remote_ssrc = 5548;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ for (auto s : streams) {
+ call->DestroyFlexfecReceiveStream(s);
+ }
+ }
+}
+
+TEST(CallTest,
+ DeliverRtpPacketOfTypeAudioTriggerOnUndemuxablePacketHandlerIfNotDemuxed) {
+ CallHelper call(/*use_null_audio_processing=*/false);
+ MockFunction<bool(const RtpPacketReceived& parsed_packet)>
+ un_demuxable_packet_handler;
+
+ RtpPacketReceived packet;
+ packet.set_arrival_time(Timestamp::Millis(1));
+ EXPECT_CALL(un_demuxable_packet_handler, Call);
+ call->Receiver()->DeliverRtpPacket(
+ MediaType::AUDIO, packet, un_demuxable_packet_handler.AsStdFunction());
+}
+
+TEST(CallTest,
+ DeliverRtpPacketOfTypeVideoTriggerOnUndemuxablePacketHandlerIfNotDemuxed) {
+ CallHelper call(/*use_null_audio_processing=*/false);
+ MockFunction<bool(const RtpPacketReceived& parsed_packet)>
+ un_demuxable_packet_handler;
+
+ RtpPacketReceived packet;
+ packet.set_arrival_time(Timestamp::Millis(1));
+ EXPECT_CALL(un_demuxable_packet_handler, Call);
+ call->Receiver()->DeliverRtpPacket(
+ MediaType::VIDEO, packet, un_demuxable_packet_handler.AsStdFunction());
+}
+
+TEST(CallTest,
+ DeliverRtpPacketOfTypeAnyDoesNotTriggerOnUndemuxablePacketHandler) {
+ CallHelper call(/*use_null_audio_processing=*/false);
+ MockFunction<bool(const RtpPacketReceived& parsed_packet)>
+ un_demuxable_packet_handler;
+
+ RtpPacketReceived packet;
+ packet.set_arrival_time(Timestamp::Millis(1));
+ EXPECT_CALL(un_demuxable_packet_handler, Call).Times(0);
+ call->Receiver()->DeliverRtpPacket(
+ MediaType::ANY, packet, un_demuxable_packet_handler.AsStdFunction());
+}
+
+TEST(CallTest, RecreatingAudioStreamWithSameSsrcReusesRtpState) {
+ constexpr uint32_t kSSRC = 12345;
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+
+ auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) {
+ MockTransport send_transport;
+ AudioSendStream::Config config(&send_transport);
+ config.rtp.ssrc = ssrc;
+ AudioSendStream* stream = call->CreateAudioSendStream(config);
+ const RtpState rtp_state =
+ static_cast<internal::AudioSendStream*>(stream)->GetRtpState();
+ call->DestroyAudioSendStream(stream);
+ return rtp_state;
+ };
+
+ const RtpState rtp_state1 = create_stream_and_get_rtp_state(kSSRC);
+ const RtpState rtp_state2 = create_stream_and_get_rtp_state(kSSRC);
+
+ EXPECT_EQ(rtp_state1.sequence_number, rtp_state2.sequence_number);
+ EXPECT_EQ(rtp_state1.start_timestamp, rtp_state2.start_timestamp);
+ EXPECT_EQ(rtp_state1.timestamp, rtp_state2.timestamp);
+ EXPECT_EQ(rtp_state1.capture_time, rtp_state2.capture_time);
+ EXPECT_EQ(rtp_state1.last_timestamp_time, rtp_state2.last_timestamp_time);
+ }
+}
+
+TEST(CallTest, AddAdaptationResourceAfterCreatingVideoSendStream) {
+ CallHelper call(true);
+ // Create a VideoSendStream.
+ test::FunctionVideoEncoderFactory fake_encoder_factory([]() {
+ return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
+ });
+ auto bitrate_allocator_factory = CreateBuiltinVideoBitrateAllocatorFactory();
+ MockTransport send_transport;
+ VideoSendStream::Config config(&send_transport);
+ config.rtp.payload_type = 110;
+ config.rtp.ssrcs = {42};
+ config.encoder_settings.encoder_factory = &fake_encoder_factory;
+ config.encoder_settings.bitrate_allocator_factory =
+ bitrate_allocator_factory.get();
+ VideoEncoderConfig encoder_config;
+ encoder_config.max_bitrate_bps = 1337;
+ VideoSendStream* stream1 =
+ call->CreateVideoSendStream(config.Copy(), encoder_config.Copy());
+ EXPECT_NE(stream1, nullptr);
+ config.rtp.ssrcs = {43};
+ VideoSendStream* stream2 =
+ call->CreateVideoSendStream(config.Copy(), encoder_config.Copy());
+ EXPECT_NE(stream2, nullptr);
+ // Add a fake resource.
+ auto fake_resource = FakeResource::Create("FakeResource");
+ call->AddAdaptationResource(fake_resource);
+ // An adapter resource mirroring the `fake_resource` should now be present on
+ // both streams.
+ auto injected_resource1 = FindResourceWhoseNameContains(
+ stream1->GetAdaptationResources(), fake_resource->Name());
+ EXPECT_TRUE(injected_resource1);
+ auto injected_resource2 = FindResourceWhoseNameContains(
+ stream2->GetAdaptationResources(), fake_resource->Name());
+ EXPECT_TRUE(injected_resource2);
+ // Overwrite the real resource listeners with mock ones to verify the signal
+ // gets through.
+ injected_resource1->SetResourceListener(nullptr);
+ StrictMock<MockResourceListener> resource_listener1;
+ EXPECT_CALL(resource_listener1, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([injected_resource1](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(injected_resource1, resource);
+ EXPECT_EQ(ResourceUsageState::kOveruse, usage_state);
+ });
+ injected_resource1->SetResourceListener(&resource_listener1);
+ injected_resource2->SetResourceListener(nullptr);
+ StrictMock<MockResourceListener> resource_listener2;
+ EXPECT_CALL(resource_listener2, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([injected_resource2](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(injected_resource2, resource);
+ EXPECT_EQ(ResourceUsageState::kOveruse, usage_state);
+ });
+ injected_resource2->SetResourceListener(&resource_listener2);
+ // The kOveruse signal should get to our resource listeners.
+ fake_resource->SetUsageState(ResourceUsageState::kOveruse);
+ call->DestroyVideoSendStream(stream1);
+ call->DestroyVideoSendStream(stream2);
+}
+
+TEST(CallTest, AddAdaptationResourceBeforeCreatingVideoSendStream) {
+ CallHelper call(true);
+ // Add a fake resource.
+ auto fake_resource = FakeResource::Create("FakeResource");
+ call->AddAdaptationResource(fake_resource);
+ // Create a VideoSendStream.
+ test::FunctionVideoEncoderFactory fake_encoder_factory([]() {
+ return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
+ });
+ auto bitrate_allocator_factory = CreateBuiltinVideoBitrateAllocatorFactory();
+ MockTransport send_transport;
+ VideoSendStream::Config config(&send_transport);
+ config.rtp.payload_type = 110;
+ config.rtp.ssrcs = {42};
+ config.encoder_settings.encoder_factory = &fake_encoder_factory;
+ config.encoder_settings.bitrate_allocator_factory =
+ bitrate_allocator_factory.get();
+ VideoEncoderConfig encoder_config;
+ encoder_config.max_bitrate_bps = 1337;
+ VideoSendStream* stream1 =
+ call->CreateVideoSendStream(config.Copy(), encoder_config.Copy());
+ EXPECT_NE(stream1, nullptr);
+ config.rtp.ssrcs = {43};
+ VideoSendStream* stream2 =
+ call->CreateVideoSendStream(config.Copy(), encoder_config.Copy());
+ EXPECT_NE(stream2, nullptr);
+ // An adapter resource mirroring the `fake_resource` should be present on both
+ // streams.
+ auto injected_resource1 = FindResourceWhoseNameContains(
+ stream1->GetAdaptationResources(), fake_resource->Name());
+ EXPECT_TRUE(injected_resource1);
+ auto injected_resource2 = FindResourceWhoseNameContains(
+ stream2->GetAdaptationResources(), fake_resource->Name());
+ EXPECT_TRUE(injected_resource2);
+ // Overwrite the real resource listeners with mock ones to verify the signal
+ // gets through.
+ injected_resource1->SetResourceListener(nullptr);
+ StrictMock<MockResourceListener> resource_listener1;
+ EXPECT_CALL(resource_listener1, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([injected_resource1](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(injected_resource1, resource);
+ EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state);
+ });
+ injected_resource1->SetResourceListener(&resource_listener1);
+ injected_resource2->SetResourceListener(nullptr);
+ StrictMock<MockResourceListener> resource_listener2;
+ EXPECT_CALL(resource_listener2, OnResourceUsageStateMeasured(_, _))
+ .Times(1)
+ .WillOnce([injected_resource2](rtc::scoped_refptr<Resource> resource,
+ ResourceUsageState usage_state) {
+ EXPECT_EQ(injected_resource2, resource);
+ EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state);
+ });
+ injected_resource2->SetResourceListener(&resource_listener2);
+ // The kUnderuse signal should get to our resource listeners.
+ fake_resource->SetUsageState(ResourceUsageState::kUnderuse);
+ call->DestroyVideoSendStream(stream1);
+ call->DestroyVideoSendStream(stream2);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/degraded_call.cc b/third_party/libwebrtc/call/degraded_call.cc
new file mode 100644
index 0000000000..75a4a1cac0
--- /dev/null
+++ b/third_party/libwebrtc/call/degraded_call.cc
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/degraded_call.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "api/sequence_checker.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+DegradedCall::FakeNetworkPipeOnTaskQueue::FakeNetworkPipeOnTaskQueue(
+ TaskQueueBase* task_queue,
+ rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive,
+ Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior)
+ : clock_(clock),
+ task_queue_(task_queue),
+ call_alive_(std::move(call_alive)),
+ pipe_(clock, std::move(network_behavior)) {}
+
+void DegradedCall::FakeNetworkPipeOnTaskQueue::SendRtp(
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options,
+ Transport* transport) {
+ pipe_.SendRtp(packet, options, transport);
+ Process();
+}
+
+void DegradedCall::FakeNetworkPipeOnTaskQueue::SendRtcp(
+ rtc::ArrayView<const uint8_t> packet,
+ Transport* transport) {
+ pipe_.SendRtcp(packet, transport);
+ Process();
+}
+
+void DegradedCall::FakeNetworkPipeOnTaskQueue::AddActiveTransport(
+ Transport* transport) {
+ pipe_.AddActiveTransport(transport);
+}
+
+void DegradedCall::FakeNetworkPipeOnTaskQueue::RemoveActiveTransport(
+ Transport* transport) {
+ pipe_.RemoveActiveTransport(transport);
+}
+
+bool DegradedCall::FakeNetworkPipeOnTaskQueue::Process() {
+ pipe_.Process();
+ auto time_to_next = pipe_.TimeUntilNextProcess();
+ if (!time_to_next) {
+ // Packet was probably sent immediately.
+ return false;
+ }
+
+ task_queue_->PostTask(SafeTask(call_alive_, [this, time_to_next] {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ int64_t next_process_time = *time_to_next + clock_->TimeInMilliseconds();
+ if (!next_process_ms_ || next_process_time < *next_process_ms_) {
+ next_process_ms_ = next_process_time;
+ task_queue_->PostDelayedHighPrecisionTask(
+ SafeTask(call_alive_,
+ [this] {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ if (!Process()) {
+ next_process_ms_.reset();
+ }
+ }),
+ TimeDelta::Millis(*time_to_next));
+ }
+ }));
+
+ return true;
+}
+
+DegradedCall::FakeNetworkPipeTransportAdapter::FakeNetworkPipeTransportAdapter(
+ FakeNetworkPipeOnTaskQueue* fake_network,
+ Call* call,
+ Clock* clock,
+ Transport* real_transport)
+ : network_pipe_(fake_network),
+ call_(call),
+ clock_(clock),
+ real_transport_(real_transport) {
+ network_pipe_->AddActiveTransport(real_transport);
+}
+
+DegradedCall::FakeNetworkPipeTransportAdapter::
+ ~FakeNetworkPipeTransportAdapter() {
+ network_pipe_->RemoveActiveTransport(real_transport_);
+}
+
+bool DegradedCall::FakeNetworkPipeTransportAdapter::SendRtp(
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ // A call here comes from the RTP stack (probably pacer). We intercept it and
+ // put it in the fake network pipe instead, but report to Call that is has
+ // been sent, so that the bandwidth estimator sees the delay we add.
+ network_pipe_->SendRtp(packet, options, real_transport_);
+ if (options.packet_id != -1) {
+ rtc::SentPacket sent_packet;
+ sent_packet.packet_id = options.packet_id;
+ sent_packet.send_time_ms = clock_->TimeInMilliseconds();
+ sent_packet.info.included_in_feedback = options.included_in_feedback;
+ sent_packet.info.included_in_allocation = options.included_in_allocation;
+ sent_packet.info.packet_size_bytes = packet.size();
+ sent_packet.info.packet_type = rtc::PacketType::kData;
+ call_->OnSentPacket(sent_packet);
+ }
+ return true;
+}
+
+bool DegradedCall::FakeNetworkPipeTransportAdapter::SendRtcp(
+ rtc::ArrayView<const uint8_t> packet) {
+ network_pipe_->SendRtcp(packet, real_transport_);
+ return true;
+}
+
+/* Mozilla: Avoid this since it could use GetRealTimeClock().
+DegradedCall::DegradedCall(
+ std::unique_ptr<Call> call,
+ const std::vector<TimeScopedNetworkConfig>& send_configs,
+ const std::vector<TimeScopedNetworkConfig>& receive_configs)
+ : clock_(Clock::GetRealTimeClock()),
+ call_(std::move(call)),
+ call_alive_(PendingTaskSafetyFlag::CreateDetached()),
+ send_config_index_(0),
+ send_configs_(send_configs),
+ send_simulated_network_(nullptr),
+ receive_config_index_(0),
+ receive_configs_(receive_configs) {
+ if (!receive_configs_.empty()) {
+ auto network = std::make_unique<SimulatedNetwork>(receive_configs_[0]);
+ receive_simulated_network_ = network.get();
+ receive_pipe_ =
+ std::make_unique<webrtc::FakeNetworkPipe>(clock_, std::move(network));
+ receive_pipe_->SetReceiver(call_->Receiver());
+ if (receive_configs_.size() > 1) {
+ call_->network_thread()->PostDelayedTask(
+ SafeTask(call_alive_, [this] { UpdateReceiveNetworkConfig(); }),
+ receive_configs_[0].duration);
+ }
+ }
+ if (!send_configs_.empty()) {
+ auto network = std::make_unique<SimulatedNetwork>(send_configs_[0]);
+ send_simulated_network_ = network.get();
+ send_pipe_ = std::make_unique<FakeNetworkPipeOnTaskQueue>(
+ call_->network_thread(), call_alive_, clock_, std::move(network));
+ if (send_configs_.size() > 1) {
+ call_->network_thread()->PostDelayedTask(
+ SafeTask(call_alive_, [this] { UpdateSendNetworkConfig(); }),
+ send_configs_[0].duration);
+ }
+ }
+}
+*/
+
+DegradedCall::~DegradedCall() {
+ RTC_DCHECK_RUN_ON(call_->worker_thread());
+ // Thread synchronization is required to call `SetNotAlive`.
+ // Otherwise, when the `DegradedCall` object is destroyed but
+ // `SetNotAlive` has not yet been called,
+ // another Closure guarded by `call_alive_` may be called.
+ // TODO(https://crbug.com/webrtc/12649): Remove this block-invoke.
+ static_cast<rtc::Thread*>(call_->network_thread())
+ ->BlockingCall(
+ [flag = std::move(call_alive_)]() mutable { flag->SetNotAlive(); });
+}
+
+AudioSendStream* DegradedCall::CreateAudioSendStream(
+ const AudioSendStream::Config& config) {
+ if (!send_configs_.empty()) {
+ auto transport_adapter = std::make_unique<FakeNetworkPipeTransportAdapter>(
+ send_pipe_.get(), call_.get(), clock_, config.send_transport);
+ AudioSendStream::Config degrade_config = config;
+ degrade_config.send_transport = transport_adapter.get();
+ AudioSendStream* send_stream = call_->CreateAudioSendStream(degrade_config);
+ if (send_stream) {
+ audio_send_transport_adapters_[send_stream] =
+ std::move(transport_adapter);
+ }
+ return send_stream;
+ }
+ return call_->CreateAudioSendStream(config);
+}
+
+void DegradedCall::DestroyAudioSendStream(AudioSendStream* send_stream) {
+ call_->DestroyAudioSendStream(send_stream);
+ audio_send_transport_adapters_.erase(send_stream);
+}
+
+AudioReceiveStreamInterface* DegradedCall::CreateAudioReceiveStream(
+ const AudioReceiveStreamInterface::Config& config) {
+ return call_->CreateAudioReceiveStream(config);
+}
+
+void DegradedCall::DestroyAudioReceiveStream(
+ AudioReceiveStreamInterface* receive_stream) {
+ call_->DestroyAudioReceiveStream(receive_stream);
+}
+
+VideoSendStream* DegradedCall::CreateVideoSendStream(
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config) {
+ std::unique_ptr<FakeNetworkPipeTransportAdapter> transport_adapter;
+ if (!send_configs_.empty()) {
+ transport_adapter = std::make_unique<FakeNetworkPipeTransportAdapter>(
+ send_pipe_.get(), call_.get(), clock_, config.send_transport);
+ config.send_transport = transport_adapter.get();
+ }
+ VideoSendStream* send_stream = call_->CreateVideoSendStream(
+ std::move(config), std::move(encoder_config));
+ if (send_stream && transport_adapter) {
+ video_send_transport_adapters_[send_stream] = std::move(transport_adapter);
+ }
+ return send_stream;
+}
+
+VideoSendStream* DegradedCall::CreateVideoSendStream(
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ std::unique_ptr<FecController> fec_controller) {
+ std::unique_ptr<FakeNetworkPipeTransportAdapter> transport_adapter;
+ if (!send_configs_.empty()) {
+ transport_adapter = std::make_unique<FakeNetworkPipeTransportAdapter>(
+ send_pipe_.get(), call_.get(), clock_, config.send_transport);
+ config.send_transport = transport_adapter.get();
+ }
+ VideoSendStream* send_stream = call_->CreateVideoSendStream(
+ std::move(config), std::move(encoder_config), std::move(fec_controller));
+ if (send_stream && transport_adapter) {
+ video_send_transport_adapters_[send_stream] = std::move(transport_adapter);
+ }
+ return send_stream;
+}
+
+void DegradedCall::DestroyVideoSendStream(VideoSendStream* send_stream) {
+ call_->DestroyVideoSendStream(send_stream);
+ video_send_transport_adapters_.erase(send_stream);
+}
+
+VideoReceiveStreamInterface* DegradedCall::CreateVideoReceiveStream(
+ VideoReceiveStreamInterface::Config configuration) {
+ return call_->CreateVideoReceiveStream(std::move(configuration));
+}
+
+void DegradedCall::DestroyVideoReceiveStream(
+ VideoReceiveStreamInterface* receive_stream) {
+ call_->DestroyVideoReceiveStream(receive_stream);
+}
+
+FlexfecReceiveStream* DegradedCall::CreateFlexfecReceiveStream(
+ const FlexfecReceiveStream::Config config) {
+ return call_->CreateFlexfecReceiveStream(std::move(config));
+}
+
+void DegradedCall::DestroyFlexfecReceiveStream(
+ FlexfecReceiveStream* receive_stream) {
+ call_->DestroyFlexfecReceiveStream(receive_stream);
+}
+
+void DegradedCall::AddAdaptationResource(
+ rtc::scoped_refptr<Resource> resource) {
+ call_->AddAdaptationResource(std::move(resource));
+}
+
+PacketReceiver* DegradedCall::Receiver() {
+ if (!receive_configs_.empty()) {
+ return this;
+ }
+ return call_->Receiver();
+}
+
+RtpTransportControllerSendInterface*
+DegradedCall::GetTransportControllerSend() {
+ return call_->GetTransportControllerSend();
+}
+
+Call::Stats DegradedCall::GetStats() const {
+ return call_->GetStats();
+}
+
+const FieldTrialsView& DegradedCall::trials() const {
+ return call_->trials();
+}
+
+TaskQueueBase* DegradedCall::network_thread() const {
+ return call_->network_thread();
+}
+
+TaskQueueBase* DegradedCall::worker_thread() const {
+ return call_->worker_thread();
+}
+
+void DegradedCall::SignalChannelNetworkState(MediaType media,
+ NetworkState state) {
+ call_->SignalChannelNetworkState(media, state);
+}
+
+void DegradedCall::OnAudioTransportOverheadChanged(
+ int transport_overhead_per_packet) {
+ call_->OnAudioTransportOverheadChanged(transport_overhead_per_packet);
+}
+
+void DegradedCall::OnLocalSsrcUpdated(AudioReceiveStreamInterface& stream,
+ uint32_t local_ssrc) {
+ call_->OnLocalSsrcUpdated(stream, local_ssrc);
+}
+
+void DegradedCall::OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
+ uint32_t local_ssrc) {
+ call_->OnLocalSsrcUpdated(stream, local_ssrc);
+}
+
+void DegradedCall::OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
+ uint32_t local_ssrc) {
+ call_->OnLocalSsrcUpdated(stream, local_ssrc);
+}
+
+void DegradedCall::OnUpdateSyncGroup(AudioReceiveStreamInterface& stream,
+ absl::string_view sync_group) {
+ call_->OnUpdateSyncGroup(stream, sync_group);
+}
+
+void DegradedCall::OnSentPacket(const rtc::SentPacket& sent_packet) {
+ if (!send_configs_.empty()) {
+ // If we have a degraded send-transport, we have already notified call
+ // about the supposed network send time. Discard the actual network send
+ // time in order to properly fool the BWE.
+ return;
+ }
+ call_->OnSentPacket(sent_packet);
+}
+
+void DegradedCall::DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) {
+ RTC_DCHECK_RUN_ON(&received_packet_sequence_checker_);
+ receive_pipe_->DeliverRtpPacket(media_type, std::move(packet),
+ std::move(undemuxable_packet_handler));
+ receive_pipe_->Process();
+}
+
+void DegradedCall::DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) {
+ RTC_DCHECK_RUN_ON(&received_packet_sequence_checker_);
+ receive_pipe_->DeliverRtcpPacket(std::move(packet));
+ receive_pipe_->Process();
+}
+
+void DegradedCall::SetClientBitratePreferences(
+ const webrtc::BitrateSettings& preferences) {
+ call_->SetClientBitratePreferences(preferences);
+}
+
+void DegradedCall::UpdateSendNetworkConfig() {
+ send_config_index_ = (send_config_index_ + 1) % send_configs_.size();
+ send_simulated_network_->SetConfig(send_configs_[send_config_index_]);
+ call_->network_thread()->PostDelayedTask(
+ SafeTask(call_alive_, [this] { UpdateSendNetworkConfig(); }),
+ send_configs_[send_config_index_].duration);
+}
+
+void DegradedCall::UpdateReceiveNetworkConfig() {
+ receive_config_index_ = (receive_config_index_ + 1) % receive_configs_.size();
+ receive_simulated_network_->SetConfig(
+ receive_configs_[receive_config_index_]);
+ call_->network_thread()->PostDelayedTask(
+ SafeTask(call_alive_, [this] { UpdateReceiveNetworkConfig(); }),
+ receive_configs_[receive_config_index_].duration);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/degraded_call.h b/third_party/libwebrtc/call/degraded_call.h
new file mode 100644
index 0000000000..14892f0607
--- /dev/null
+++ b/third_party/libwebrtc/call/degraded_call.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_DEGRADED_CALL_H_
+#define CALL_DEGRADED_CALL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/call/transport.h"
+#include "api/fec_controller.h"
+#include "api/media_types.h"
+#include "api/rtp_headers.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/test/simulated_network.h"
+#include "call/audio_receive_stream.h"
+#include "call/audio_send_stream.h"
+#include "call/call.h"
+#include "call/fake_network_pipe.h"
+#include "call/flexfec_receive_stream.h"
+#include "call/packet_receiver.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "call/simulated_network.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/network/sent_packet.h"
+#include "rtc_base/task_queue.h"
+#include "system_wrappers/include/clock.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+class DegradedCall : public Call, private PacketReceiver {
+ public:
+ struct TimeScopedNetworkConfig : public BuiltInNetworkBehaviorConfig {
+ TimeDelta duration = TimeDelta::PlusInfinity();
+ };
+
+ explicit DegradedCall(
+ std::unique_ptr<Call> call,
+ const std::vector<TimeScopedNetworkConfig>& send_configs,
+ const std::vector<TimeScopedNetworkConfig>& receive_configs);
+ ~DegradedCall() override;
+
+ // Implements Call.
+ AudioSendStream* CreateAudioSendStream(
+ const AudioSendStream::Config& config) override;
+ void DestroyAudioSendStream(AudioSendStream* send_stream) override;
+
+ AudioReceiveStreamInterface* CreateAudioReceiveStream(
+ const AudioReceiveStreamInterface::Config& config) override;
+ void DestroyAudioReceiveStream(
+ AudioReceiveStreamInterface* receive_stream) override;
+
+ VideoSendStream* CreateVideoSendStream(
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config) override;
+ VideoSendStream* CreateVideoSendStream(
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ std::unique_ptr<FecController> fec_controller) override;
+ void DestroyVideoSendStream(VideoSendStream* send_stream) override;
+
+ VideoReceiveStreamInterface* CreateVideoReceiveStream(
+ VideoReceiveStreamInterface::Config configuration) override;
+ void DestroyVideoReceiveStream(
+ VideoReceiveStreamInterface* receive_stream) override;
+
+ FlexfecReceiveStream* CreateFlexfecReceiveStream(
+ const FlexfecReceiveStream::Config config) override;
+ void DestroyFlexfecReceiveStream(
+ FlexfecReceiveStream* receive_stream) override;
+
+ void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
+
+ PacketReceiver* Receiver() override;
+
+ RtpTransportControllerSendInterface* GetTransportControllerSend() override;
+
+ Stats GetStats() const override;
+
+ const FieldTrialsView& trials() const override;
+
+ TaskQueueBase* network_thread() const override;
+ TaskQueueBase* worker_thread() const override;
+
+ void SignalChannelNetworkState(MediaType media, NetworkState state) override;
+ void OnAudioTransportOverheadChanged(
+ int transport_overhead_per_packet) override;
+ void OnLocalSsrcUpdated(AudioReceiveStreamInterface& stream,
+ uint32_t local_ssrc) override;
+ void OnLocalSsrcUpdated(VideoReceiveStreamInterface& stream,
+ uint32_t local_ssrc) override;
+ void OnLocalSsrcUpdated(FlexfecReceiveStream& stream,
+ uint32_t local_ssrc) override;
+ void OnUpdateSyncGroup(AudioReceiveStreamInterface& stream,
+ absl::string_view sync_group) override;
+ void OnSentPacket(const rtc::SentPacket& sent_packet) override;
+
+ protected:
+ // Implements PacketReceiver.
+ void DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) override;
+ void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override;
+
+ private:
+ class FakeNetworkPipeOnTaskQueue {
+ public:
+ FakeNetworkPipeOnTaskQueue(
+ TaskQueueBase* task_queue,
+ rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive,
+ Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior);
+
+ void SendRtp(rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options,
+ Transport* transport);
+ void SendRtcp(rtc::ArrayView<const uint8_t> packet, Transport* transport);
+
+ void AddActiveTransport(Transport* transport);
+ void RemoveActiveTransport(Transport* transport);
+
+ private:
+ // Try to process packets on the fake network queue.
+ // Returns true if call resulted in a delayed process, false if queue empty.
+ bool Process();
+
+ Clock* const clock_;
+ TaskQueueBase* const task_queue_;
+ rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive_;
+ FakeNetworkPipe pipe_;
+ absl::optional<int64_t> next_process_ms_ RTC_GUARDED_BY(&task_queue_);
+ };
+
+ // For audio/video send stream, a TransportAdapter instance is used to
+ // intercept packets to be sent, and put them into a common FakeNetworkPipe
+ // in such as way that they will eventually (unless dropped) be forwarded to
+ // the correct Transport for that stream.
+ class FakeNetworkPipeTransportAdapter : public Transport {
+ public:
+ FakeNetworkPipeTransportAdapter(FakeNetworkPipeOnTaskQueue* fake_network,
+ Call* call,
+ Clock* clock,
+ Transport* real_transport);
+ ~FakeNetworkPipeTransportAdapter();
+
+ bool SendRtp(rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) override;
+ bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
+
+ private:
+ FakeNetworkPipeOnTaskQueue* const network_pipe_;
+ Call* const call_;
+ Clock* const clock_;
+ Transport* const real_transport_;
+ };
+
+ void SetClientBitratePreferences(
+ const webrtc::BitrateSettings& preferences) override;
+ void UpdateSendNetworkConfig();
+ void UpdateReceiveNetworkConfig();
+
+ Clock* const clock_;
+ const std::unique_ptr<Call> call_;
+ // For cancelling tasks on the network thread when DegradedCall is destroyed
+ rtc::scoped_refptr<PendingTaskSafetyFlag> call_alive_;
+ size_t send_config_index_;
+ const std::vector<TimeScopedNetworkConfig> send_configs_;
+ SimulatedNetwork* send_simulated_network_;
+ std::unique_ptr<FakeNetworkPipeOnTaskQueue> send_pipe_;
+ std::map<AudioSendStream*, std::unique_ptr<FakeNetworkPipeTransportAdapter>>
+ audio_send_transport_adapters_;
+ std::map<VideoSendStream*, std::unique_ptr<FakeNetworkPipeTransportAdapter>>
+ video_send_transport_adapters_;
+
+ size_t receive_config_index_;
+ const std::vector<TimeScopedNetworkConfig> receive_configs_;
+ SimulatedNetwork* receive_simulated_network_;
+ SequenceChecker received_packet_sequence_checker_;
+ std::unique_ptr<FakeNetworkPipe> receive_pipe_
+ RTC_GUARDED_BY(received_packet_sequence_checker_);
+};
+
+} // namespace webrtc
+
+#endif // CALL_DEGRADED_CALL_H_
diff --git a/third_party/libwebrtc/call/fake_network_pipe.cc b/third_party/libwebrtc/call/fake_network_pipe.cc
new file mode 100644
index 0000000000..3c7207bd84
--- /dev/null
+++ b/third_party/libwebrtc/call/fake_network_pipe.cc
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/fake_network_pipe.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <queue>
+#include <utility>
+#include <vector>
+
+#include "api/media_types.h"
+#include "api/units/timestamp.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int64_t kLogIntervalMs = 5000;
+} // namespace
+
+NetworkPacket::NetworkPacket(rtc::CopyOnWriteBuffer packet,
+ int64_t send_time,
+ int64_t arrival_time,
+ absl::optional<PacketOptions> packet_options,
+ bool is_rtcp,
+ MediaType media_type,
+ absl::optional<int64_t> packet_time_us,
+ Transport* transport)
+ : packet_(std::move(packet)),
+ send_time_(send_time),
+ arrival_time_(arrival_time),
+ packet_options_(packet_options),
+ is_rtcp_(is_rtcp),
+ media_type_(media_type),
+ packet_time_us_(packet_time_us),
+ transport_(transport) {}
+
+NetworkPacket::NetworkPacket(RtpPacketReceived packet_received,
+ MediaType media_type,
+ int64_t send_time,
+ int64_t arrival_time)
+ : packet_(packet_received.Buffer()),
+ send_time_(send_time),
+ arrival_time_(arrival_time),
+ is_rtcp_(false),
+ media_type_(media_type),
+ packet_time_us_(packet_received.arrival_time().us()),
+ packet_received_(std::move(packet_received)),
+ transport_(nullptr) {}
+
+NetworkPacket::NetworkPacket(NetworkPacket&& o)
+ : packet_(std::move(o.packet_)),
+ send_time_(o.send_time_),
+ arrival_time_(o.arrival_time_),
+ packet_options_(o.packet_options_),
+ is_rtcp_(o.is_rtcp_),
+ media_type_(o.media_type_),
+ packet_time_us_(o.packet_time_us_),
+ packet_received_(std::move(o.packet_received_)),
+ transport_(o.transport_) {}
+
+NetworkPacket::~NetworkPacket() = default;
+
+NetworkPacket& NetworkPacket::operator=(NetworkPacket&& o) {
+ packet_ = std::move(o.packet_);
+ send_time_ = o.send_time_;
+ arrival_time_ = o.arrival_time_;
+ packet_options_ = o.packet_options_;
+ is_rtcp_ = o.is_rtcp_;
+ media_type_ = o.media_type_;
+ packet_time_us_ = o.packet_time_us_;
+ packet_received_ = o.packet_received_;
+ transport_ = o.transport_;
+
+ return *this;
+}
+
+FakeNetworkPipe::FakeNetworkPipe(
+ Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior)
+ : FakeNetworkPipe(clock, std::move(network_behavior), nullptr, 1) {}
+
+FakeNetworkPipe::FakeNetworkPipe(
+ Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior,
+ PacketReceiver* receiver)
+ : FakeNetworkPipe(clock, std::move(network_behavior), receiver, 1) {}
+
+FakeNetworkPipe::FakeNetworkPipe(
+ Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior,
+ PacketReceiver* receiver,
+ uint64_t seed)
+ : clock_(clock),
+ network_behavior_(std::move(network_behavior)),
+ receiver_(receiver),
+ clock_offset_ms_(0),
+ dropped_packets_(0),
+ sent_packets_(0),
+ total_packet_delay_us_(0),
+ last_log_time_us_(clock_->TimeInMicroseconds()) {}
+
+FakeNetworkPipe::~FakeNetworkPipe() {
+ RTC_DCHECK(active_transports_.empty());
+}
+
+void FakeNetworkPipe::SetReceiver(PacketReceiver* receiver) {
+ MutexLock lock(&config_lock_);
+ receiver_ = receiver;
+}
+
+void FakeNetworkPipe::AddActiveTransport(Transport* transport) {
+ MutexLock lock(&config_lock_);
+ active_transports_[transport]++;
+}
+
+void FakeNetworkPipe::RemoveActiveTransport(Transport* transport) {
+ MutexLock lock(&config_lock_);
+ auto it = active_transports_.find(transport);
+ RTC_CHECK(it != active_transports_.end());
+ if (--(it->second) == 0) {
+ active_transports_.erase(it);
+ }
+}
+
+bool FakeNetworkPipe::SendRtp(rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options,
+ Transport* transport) {
+ RTC_DCHECK(transport);
+ EnqueuePacket(rtc::CopyOnWriteBuffer(packet), options, false, transport);
+ return true;
+}
+
+bool FakeNetworkPipe::SendRtcp(rtc::ArrayView<const uint8_t> packet,
+ Transport* transport) {
+ RTC_DCHECK(transport);
+ EnqueuePacket(rtc::CopyOnWriteBuffer(packet), absl::nullopt, true, transport);
+ return true;
+}
+
+void FakeNetworkPipe::DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) {
+ MutexLock lock(&process_lock_);
+ int64_t time_now_us = clock_->TimeInMicroseconds();
+ EnqueuePacket(
+ NetworkPacket(std::move(packet), media_type, time_now_us, time_now_us));
+}
+
+void FakeNetworkPipe::DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) {
+ EnqueuePacket(std::move(packet), absl::nullopt, true, MediaType::ANY,
+ absl::nullopt);
+}
+
+void FakeNetworkPipe::SetClockOffset(int64_t offset_ms) {
+ MutexLock lock(&config_lock_);
+ clock_offset_ms_ = offset_ms;
+}
+
+FakeNetworkPipe::StoredPacket::StoredPacket(NetworkPacket&& packet)
+ : packet(std::move(packet)) {}
+
+bool FakeNetworkPipe::EnqueuePacket(rtc::CopyOnWriteBuffer packet,
+ absl::optional<PacketOptions> options,
+ bool is_rtcp,
+ MediaType media_type,
+ absl::optional<int64_t> packet_time_us) {
+ MutexLock lock(&process_lock_);
+ int64_t time_now_us = clock_->TimeInMicroseconds();
+ return EnqueuePacket(NetworkPacket(std::move(packet), time_now_us,
+ time_now_us, options, is_rtcp, media_type,
+ packet_time_us, nullptr));
+}
+
+bool FakeNetworkPipe::EnqueuePacket(rtc::CopyOnWriteBuffer packet,
+ absl::optional<PacketOptions> options,
+ bool is_rtcp,
+ Transport* transport) {
+ MutexLock lock(&process_lock_);
+ int64_t time_now_us = clock_->TimeInMicroseconds();
+ return EnqueuePacket(NetworkPacket(std::move(packet), time_now_us,
+ time_now_us, options, is_rtcp,
+ MediaType::ANY, absl::nullopt, transport));
+}
+
+bool FakeNetworkPipe::EnqueuePacket(NetworkPacket&& net_packet) {
+ int64_t send_time_us = net_packet.send_time();
+ size_t packet_size = net_packet.data_length();
+
+ packets_in_flight_.emplace_back(StoredPacket(std::move(net_packet)));
+ int64_t packet_id = reinterpret_cast<uint64_t>(&packets_in_flight_.back());
+ bool sent = network_behavior_->EnqueuePacket(
+ PacketInFlightInfo(packet_size, send_time_us, packet_id));
+
+ if (!sent) {
+ packets_in_flight_.pop_back();
+ ++dropped_packets_;
+ }
+ return sent;
+}
+
+float FakeNetworkPipe::PercentageLoss() {
+ MutexLock lock(&process_lock_);
+ if (sent_packets_ == 0)
+ return 0;
+
+ return static_cast<float>(dropped_packets_) /
+ (sent_packets_ + dropped_packets_);
+}
+
+int FakeNetworkPipe::AverageDelay() {
+ MutexLock lock(&process_lock_);
+ if (sent_packets_ == 0)
+ return 0;
+
+ return static_cast<int>(total_packet_delay_us_ /
+ (1000 * static_cast<int64_t>(sent_packets_)));
+}
+
+size_t FakeNetworkPipe::DroppedPackets() {
+ MutexLock lock(&process_lock_);
+ return dropped_packets_;
+}
+
+size_t FakeNetworkPipe::SentPackets() {
+ MutexLock lock(&process_lock_);
+ return sent_packets_;
+}
+
+void FakeNetworkPipe::Process() {
+ int64_t time_now_us;
+ std::queue<NetworkPacket> packets_to_deliver;
+ {
+ MutexLock lock(&process_lock_);
+ time_now_us = clock_->TimeInMicroseconds();
+ if (time_now_us - last_log_time_us_ > kLogIntervalMs * 1000) {
+ int64_t queueing_delay_us = 0;
+ if (!packets_in_flight_.empty())
+ queueing_delay_us =
+ time_now_us - packets_in_flight_.front().packet.send_time();
+
+ RTC_LOG(LS_INFO) << "Network queue: " << queueing_delay_us / 1000
+ << " ms.";
+ last_log_time_us_ = time_now_us;
+ }
+
+ std::vector<PacketDeliveryInfo> delivery_infos =
+ network_behavior_->DequeueDeliverablePackets(time_now_us);
+ for (auto& delivery_info : delivery_infos) {
+ // In the common case where no reordering happens, find will return early
+ // as the first packet will be a match.
+ auto packet_it =
+ std::find_if(packets_in_flight_.begin(), packets_in_flight_.end(),
+ [&delivery_info](StoredPacket& packet_ref) {
+ return reinterpret_cast<uint64_t>(&packet_ref) ==
+ delivery_info.packet_id;
+ });
+ // Check that the packet is in the deque of packets in flight.
+ RTC_CHECK(packet_it != packets_in_flight_.end());
+ // Check that the packet is not already removed.
+ RTC_DCHECK(!packet_it->removed);
+
+ NetworkPacket packet = std::move(packet_it->packet);
+ packet_it->removed = true;
+
+ // Cleanup of removed packets at the beginning of the deque.
+ while (!packets_in_flight_.empty() &&
+ packets_in_flight_.front().removed) {
+ packets_in_flight_.pop_front();
+ }
+
+ if (delivery_info.receive_time_us != PacketDeliveryInfo::kNotReceived) {
+ int64_t added_delay_us =
+ delivery_info.receive_time_us - packet.send_time();
+ packet.IncrementArrivalTime(added_delay_us);
+ packets_to_deliver.emplace(std::move(packet));
+ // `time_now_us` might be later than when the packet should have
+ // arrived, due to NetworkProcess being called too late. For stats, use
+ // the time it should have been on the link.
+ total_packet_delay_us_ += added_delay_us;
+ ++sent_packets_;
+ } else {
+ ++dropped_packets_;
+ }
+ }
+ }
+
+ MutexLock lock(&config_lock_);
+ while (!packets_to_deliver.empty()) {
+ NetworkPacket packet = std::move(packets_to_deliver.front());
+ packets_to_deliver.pop();
+ DeliverNetworkPacket(&packet);
+ }
+}
+
+void FakeNetworkPipe::DeliverNetworkPacket(NetworkPacket* packet) {
+ Transport* transport = packet->transport();
+ if (transport) {
+ RTC_DCHECK(!receiver_);
+ if (active_transports_.find(transport) == active_transports_.end()) {
+ // Transport has been destroyed, ignore this packet.
+ return;
+ }
+ if (packet->is_rtcp()) {
+ transport->SendRtcp(
+ rtc::MakeArrayView(packet->data(), packet->data_length()));
+ } else {
+ transport->SendRtp(
+ rtc::MakeArrayView(packet->data(), packet->data_length()),
+ packet->packet_options());
+ }
+ } else if (receiver_) {
+ int64_t packet_time_us = packet->packet_time_us().value_or(-1);
+ if (packet_time_us != -1) {
+ int64_t queue_time_us = packet->arrival_time() - packet->send_time();
+ RTC_CHECK(queue_time_us >= 0);
+ packet_time_us += queue_time_us;
+ packet_time_us += (clock_offset_ms_ * 1000);
+ }
+ if (packet->is_rtcp()) {
+ receiver_->DeliverRtcpPacket(std::move(*packet->raw_packet()));
+ } else if (packet->packet_received()) {
+ packet->packet_received()->set_arrival_time(
+ Timestamp::Micros(packet_time_us));
+ receiver_->DeliverRtpPacket(
+ packet->media_type(), *packet->packet_received(),
+ [](const RtpPacketReceived& packet) {
+ RTC_LOG(LS_WARNING)
+ << "Unexpected failed demuxing packet in FakeNetworkPipe, "
+ "Ssrc: "
+ << packet.Ssrc() << " seq : " << packet.SequenceNumber();
+ return false;
+ });
+ }
+ }
+}
+
+absl::optional<int64_t> FakeNetworkPipe::TimeUntilNextProcess() {
+ MutexLock lock(&process_lock_);
+ absl::optional<int64_t> delivery_us = network_behavior_->NextDeliveryTimeUs();
+ if (delivery_us) {
+ int64_t delay_us = *delivery_us - clock_->TimeInMicroseconds();
+ return std::max<int64_t>((delay_us + 500) / 1000, 0);
+ }
+ return absl::nullopt;
+}
+
+bool FakeNetworkPipe::HasReceiver() const {
+ MutexLock lock(&config_lock_);
+ return receiver_ != nullptr;
+}
+
+void FakeNetworkPipe::DeliverPacketWithLock(NetworkPacket* packet) {
+ MutexLock lock(&config_lock_);
+ DeliverNetworkPacket(packet);
+}
+
+void FakeNetworkPipe::ResetStats() {
+ MutexLock lock(&process_lock_);
+ dropped_packets_ = 0;
+ sent_packets_ = 0;
+ total_packet_delay_us_ = 0;
+}
+
+int64_t FakeNetworkPipe::GetTimeInMicroseconds() const {
+ return clock_->TimeInMicroseconds();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/fake_network_pipe.h b/third_party/libwebrtc/call/fake_network_pipe.h
new file mode 100644
index 0000000000..7bc7e0f060
--- /dev/null
+++ b/third_party/libwebrtc/call/fake_network_pipe.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_FAKE_NETWORK_PIPE_H_
+#define CALL_FAKE_NETWORK_PIPE_H_
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "api/call/transport.h"
+#include "api/test/simulated_network.h"
+#include "call/simulated_packet_receiver.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class Clock;
+class PacketReceiver;
+enum class MediaType;
+
+class NetworkPacket {
+ public:
+ NetworkPacket(rtc::CopyOnWriteBuffer packet,
+ int64_t send_time,
+ int64_t arrival_time,
+ absl::optional<PacketOptions> packet_options,
+ bool is_rtcp,
+ MediaType media_type,
+ absl::optional<int64_t> packet_time_us,
+ Transport* transport);
+
+ NetworkPacket(RtpPacketReceived packet,
+ MediaType media_type,
+ int64_t send_time,
+ int64_t arrival_time);
+
+ // Disallow copy constructor and copy assignment (no deep copies of `data_`).
+ NetworkPacket(const NetworkPacket&) = delete;
+ ~NetworkPacket();
+ NetworkPacket& operator=(const NetworkPacket&) = delete;
+ // Allow move constructor/assignment, so that we can use in stl containers.
+ NetworkPacket(NetworkPacket&&);
+ NetworkPacket& operator=(NetworkPacket&&);
+
+ const uint8_t* data() const { return packet_.data(); }
+ size_t data_length() const { return packet_.size(); }
+ rtc::CopyOnWriteBuffer* raw_packet() { return &packet_; }
+ int64_t send_time() const { return send_time_; }
+ int64_t arrival_time() const { return arrival_time_; }
+ void IncrementArrivalTime(int64_t extra_delay) {
+ arrival_time_ += extra_delay;
+ }
+ PacketOptions packet_options() const {
+ return packet_options_.value_or(PacketOptions());
+ }
+ bool is_rtcp() const { return is_rtcp_; }
+ MediaType media_type() const { return media_type_; }
+ absl::optional<int64_t> packet_time_us() const { return packet_time_us_; }
+ RtpPacketReceived* packet_received() {
+ return packet_received_ ? &packet_received_.value() : nullptr;
+ }
+ absl::optional<RtpPacketReceived> packet_received() const {
+ return packet_received_;
+ }
+ Transport* transport() const { return transport_; }
+
+ private:
+ rtc::CopyOnWriteBuffer packet_;
+ // The time the packet was sent out on the network.
+ int64_t send_time_;
+ // The time the packet should arrive at the receiver.
+ int64_t arrival_time_;
+ // If using a Transport for outgoing degradation, populate with
+ // PacketOptions (transport-wide sequence number) for RTP.
+ absl::optional<PacketOptions> packet_options_;
+ bool is_rtcp_;
+ // If using a PacketReceiver for incoming degradation, populate with
+ // appropriate MediaType and packet time. This type/timing will be kept and
+ // forwarded. The packet time might be altered to reflect time spent in fake
+ // network pipe.
+ MediaType media_type_;
+ absl::optional<int64_t> packet_time_us_;
+ absl::optional<RtpPacketReceived> packet_received_;
+ Transport* transport_;
+};
+
+// Class faking a network link, internally is uses an implementation of a
+// SimulatedNetworkInterface to simulate network behavior.
+class FakeNetworkPipe : public SimulatedPacketReceiverInterface {
+ public:
+ // Will keep `network_behavior` alive while pipe is alive itself.
+ FakeNetworkPipe(Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior);
+ FakeNetworkPipe(Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior,
+ PacketReceiver* receiver);
+ FakeNetworkPipe(Clock* clock,
+ std::unique_ptr<NetworkBehaviorInterface> network_behavior,
+ PacketReceiver* receiver,
+ uint64_t seed);
+
+ ~FakeNetworkPipe() override;
+
+ FakeNetworkPipe(const FakeNetworkPipe&) = delete;
+ FakeNetworkPipe& operator=(const FakeNetworkPipe&) = delete;
+
+ void SetClockOffset(int64_t offset_ms);
+
+ // Must not be called in parallel with DeliverPacket or Process.
+ void SetReceiver(PacketReceiver* receiver) override;
+
+ // Adds/subtracts references to Transport instances. If a Transport is
+ // destroyed we cannot use to forward a potential delayed packet, these
+ // methods are used to maintain a map of which instances are live.
+ void AddActiveTransport(Transport* transport);
+ void RemoveActiveTransport(Transport* transport);
+
+ // Methods for use with Transport interface. When/if packets are delivered,
+ // they will be passed to the instance specified by the `transport` parameter.
+ // Note that that instance must be in the map of active transports.
+ bool SendRtp(rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options,
+ Transport* transport);
+ bool SendRtcp(rtc::ArrayView<const uint8_t> packet, Transport* transport);
+
+ // Implements the PacketReceiver interface. When/if packets are delivered,
+ // they will be passed directly to the receiver instance given in
+ // SetReceiver(). The receive time will be increased by the amount of time the
+ // packet spent in the fake network pipe.
+ void DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) override;
+ void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override;
+
+ // Processes the network queues and trigger PacketReceiver::IncomingPacket for
+ // packets ready to be delivered.
+ void Process() override;
+ absl::optional<int64_t> TimeUntilNextProcess() override;
+
+ // Get statistics.
+ float PercentageLoss();
+ int AverageDelay() override;
+ size_t DroppedPackets();
+ size_t SentPackets();
+ void ResetStats();
+
+ protected:
+ void DeliverPacketWithLock(NetworkPacket* packet);
+ int64_t GetTimeInMicroseconds() const;
+ bool ShouldProcess(int64_t time_now_us) const;
+ void SetTimeToNextProcess(int64_t skip_us);
+
+ private:
+ struct StoredPacket {
+ NetworkPacket packet;
+ bool removed = false;
+ explicit StoredPacket(NetworkPacket&& packet);
+ StoredPacket(StoredPacket&&) = default;
+ StoredPacket(const StoredPacket&) = delete;
+ StoredPacket& operator=(const StoredPacket&) = delete;
+ StoredPacket() = delete;
+ };
+
+ // Returns true if enqueued, or false if packet was dropped. Use this method
+ // when enqueueing packets that should be received by PacketReceiver instance.
+ bool EnqueuePacket(rtc::CopyOnWriteBuffer packet,
+ absl::optional<PacketOptions> options,
+ bool is_rtcp,
+ MediaType media_type,
+ absl::optional<int64_t> packet_time_us);
+
+ // Returns true if enqueued, or false if packet was dropped. Use this method
+ // when enqueueing packets that should be received by Transport instance.
+ bool EnqueuePacket(rtc::CopyOnWriteBuffer packet,
+ absl::optional<PacketOptions> options,
+ bool is_rtcp,
+ Transport* transport);
+
+ bool EnqueuePacket(NetworkPacket&& net_packet)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(process_lock_);
+
+ void DeliverNetworkPacket(NetworkPacket* packet)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(config_lock_);
+ bool HasReceiver() const;
+
+ Clock* const clock_;
+ // `config_lock` guards the mostly constant things like the callbacks.
+ mutable Mutex config_lock_;
+ const std::unique_ptr<NetworkBehaviorInterface> network_behavior_;
+ PacketReceiver* receiver_ RTC_GUARDED_BY(config_lock_);
+
+ // `process_lock` guards the data structures involved in delay and loss
+ // processes, such as the packet queues.
+ Mutex process_lock_;
+ // Packets are added at the back of the deque, this makes the deque ordered
+ // by increasing send time. The common case when removing packets from the
+ // deque is removing early packets, which will be close to the front of the
+ // deque. This makes finding the packets in the deque efficient in the common
+ // case.
+ std::deque<StoredPacket> packets_in_flight_ RTC_GUARDED_BY(process_lock_);
+
+ int64_t clock_offset_ms_ RTC_GUARDED_BY(config_lock_);
+
+ // Statistics.
+ size_t dropped_packets_ RTC_GUARDED_BY(process_lock_);
+ size_t sent_packets_ RTC_GUARDED_BY(process_lock_);
+ int64_t total_packet_delay_us_ RTC_GUARDED_BY(process_lock_);
+ int64_t last_log_time_us_;
+
+ std::map<Transport*, size_t> active_transports_ RTC_GUARDED_BY(config_lock_);
+};
+
+} // namespace webrtc
+
+#endif // CALL_FAKE_NETWORK_PIPE_H_
diff --git a/third_party/libwebrtc/call/fake_network_pipe_unittest.cc b/third_party/libwebrtc/call/fake_network_pipe_unittest.cc
new file mode 100644
index 0000000000..31f97fc85c
--- /dev/null
+++ b/third_party/libwebrtc/call/fake_network_pipe_unittest.cc
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/fake_network_pipe.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Property;
+using ::testing::WithArg;
+
+namespace webrtc {
+class MockReceiver : public PacketReceiver {
+ public:
+ MOCK_METHOD(void,
+ DeliverRtcpPacket,
+ (rtc::CopyOnWriteBuffer packet),
+ (override));
+ MOCK_METHOD(void,
+ DeliverRtpPacket,
+ (MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler),
+ (override));
+ virtual ~MockReceiver() = default;
+};
+
+class ReorderTestReceiver : public MockReceiver {
+ public:
+ void DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) override {
+ RTC_DCHECK_GE(packet.size(), sizeof(int));
+ delivered_sequence_numbers_.push_back(packet.SequenceNumber());
+ }
+ std::vector<int> delivered_sequence_numbers_;
+};
+
+class FakeNetworkPipeTest : public ::testing::Test {
+ public:
+ FakeNetworkPipeTest() : fake_clock_(12345) {}
+
+ protected:
+ void SendPackets(FakeNetworkPipe* pipe, int number_packets, int packet_size) {
+ RTC_DCHECK_GE(packet_size, sizeof(int));
+ for (int i = 0; i < number_packets; ++i) {
+ RtpPacketReceived packet;
+ constexpr size_t kFixedHeaderSize = 12;
+ packet.AllocatePayload(packet_size - kFixedHeaderSize);
+ packet.SetSequenceNumber(i);
+ packet.set_arrival_time(fake_clock_.CurrentTime());
+ RTC_DCHECK_EQ(packet.Buffer().size(), packet_size);
+ pipe->DeliverRtpPacket(MediaType::ANY, std::move(packet),
+ [](const RtpPacketReceived&) { return false; });
+ }
+ }
+
+ int PacketTimeMs(int capacity_kbps, int packet_size) const {
+ return 8 * packet_size / capacity_kbps;
+ }
+
+ SimulatedClock fake_clock_;
+};
+
+// Test the capacity link and verify we get as many packets as we expect.
+TEST_F(FakeNetworkPipeTest, CapacityTest) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 20;
+ config.link_capacity_kbps = 80;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ // Add 10 packets of 1000 bytes, = 80 kb, and verify it takes one second to
+ // get through the pipe.
+ const int kNumPackets = 10;
+ const int kPacketSize = 1000;
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+
+ // Time to get one packet through the link.
+ const int kPacketTimeMs =
+ PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+
+ // Time haven't increased yet, so we souldn't get any packets.
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ pipe->Process();
+
+ // Advance enough time to release one packet.
+ fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+
+ // Release all but one packet
+ fake_clock_.AdvanceTimeMilliseconds(9 * kPacketTimeMs - 1);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(8);
+ pipe->Process();
+
+ // And the last one.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+}
+
+// Test the extra network delay.
+TEST_F(FakeNetworkPipeTest, ExtraDelayTest) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 20;
+ config.queue_delay_ms = 100;
+ config.link_capacity_kbps = 80;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ const int kNumPackets = 2;
+ const int kPacketSize = 1000;
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+
+ // Time to get one packet through the link.
+ const int kPacketTimeMs =
+ PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+
+ // Increase more than kPacketTimeMs, but not more than the extra delay.
+ fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ pipe->Process();
+
+ // Advance the network delay to get the first packet.
+ fake_clock_.AdvanceTimeMilliseconds(config.queue_delay_ms);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+
+ // Advance one more kPacketTimeMs to get the last packet.
+ fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+}
+
+// Test the number of buffers and packets are dropped when sending too many
+// packets too quickly.
+TEST_F(FakeNetworkPipeTest, QueueLengthTest) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 2;
+ config.link_capacity_kbps = 80;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ const int kPacketSize = 1000;
+ const int kPacketTimeMs =
+ PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+
+ // Send three packets and verify only 2 are delivered.
+ SendPackets(pipe.get(), 3, kPacketSize);
+
+ // Increase time enough to deliver all three packets, verify only two are
+ // delivered.
+ fake_clock_.AdvanceTimeMilliseconds(3 * kPacketTimeMs);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(2);
+ pipe->Process();
+}
+
+// Test we get statistics as expected.
+TEST_F(FakeNetworkPipeTest, StatisticsTest) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 2;
+ config.queue_delay_ms = 20;
+ config.link_capacity_kbps = 80;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ const int kPacketSize = 1000;
+ const int kPacketTimeMs =
+ PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+
+ // Send three packets and verify only 2 are delivered.
+ SendPackets(pipe.get(), 3, kPacketSize);
+ fake_clock_.AdvanceTimeMilliseconds(3 * kPacketTimeMs +
+ config.queue_delay_ms);
+
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(2);
+ pipe->Process();
+
+ // Packet 1: kPacketTimeMs + config.queue_delay_ms,
+ // packet 2: 2 * kPacketTimeMs + config.queue_delay_ms => 170 ms average.
+ EXPECT_EQ(pipe->AverageDelay(), 170);
+ EXPECT_EQ(pipe->SentPackets(), 2u);
+ EXPECT_EQ(pipe->DroppedPackets(), 1u);
+ EXPECT_EQ(pipe->PercentageLoss(), 1 / 3.f);
+}
+
+// Change the link capacity half-way through the test and verify that the
+// delivery times change accordingly.
+TEST_F(FakeNetworkPipeTest, ChangingCapacityWithEmptyPipeTest) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 20;
+ config.link_capacity_kbps = 80;
+ MockReceiver receiver;
+ std::unique_ptr<SimulatedNetwork> network(new SimulatedNetwork(config));
+ SimulatedNetwork* simulated_network = network.get();
+ std::unique_ptr<FakeNetworkPipe> pipe(
+ new FakeNetworkPipe(&fake_clock_, std::move(network), &receiver));
+
+ // Add 10 packets of 1000 bytes, = 80 kb, and verify it takes one second to
+ // get through the pipe.
+ const int kNumPackets = 10;
+ const int kPacketSize = 1000;
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+
+ // Time to get one packet through the link.
+ int packet_time_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+
+ // Time hasn't increased yet, so we souldn't get any packets.
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ pipe->Process();
+
+ // Advance time in steps to release one packet at a time.
+ for (int i = 0; i < kNumPackets; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(packet_time_ms);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+ }
+
+ // Change the capacity.
+ config.link_capacity_kbps /= 2; // Reduce to 50%.
+ simulated_network->SetConfig(config);
+
+ // Add another 10 packets of 1000 bytes, = 80 kb, and verify it takes two
+ // seconds to get them through the pipe.
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+
+ // Time to get one packet through the link.
+ packet_time_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+
+ // Time hasn't increased yet, so we souldn't get any packets.
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ pipe->Process();
+
+ // Advance time in steps to release one packet at a time.
+ for (int i = 0; i < kNumPackets; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(packet_time_ms);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+ }
+
+ // Check that all the packets were sent.
+ EXPECT_EQ(static_cast<size_t>(2 * kNumPackets), pipe->SentPackets());
+ EXPECT_FALSE(pipe->TimeUntilNextProcess().has_value());
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ pipe->Process();
+}
+
+// Change the link capacity half-way through the test and verify that the
+// delivery times change accordingly.
+TEST_F(FakeNetworkPipeTest, ChangingCapacityWithPacketsInPipeTest) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 20;
+ config.link_capacity_kbps = 80;
+ MockReceiver receiver;
+ std::unique_ptr<SimulatedNetwork> network(new SimulatedNetwork(config));
+ SimulatedNetwork* simulated_network = network.get();
+ std::unique_ptr<FakeNetworkPipe> pipe(
+ new FakeNetworkPipe(&fake_clock_, std::move(network), &receiver));
+
+ // Add 20 packets of 1000 bytes, = 160 kb.
+ const int kNumPackets = 20;
+ const int kPacketSize = 1000;
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+
+ // Time hasn't increased yet, so we souldn't get any packets.
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ pipe->Process();
+
+ // Advance time in steps to release half of the packets one at a time.
+ int step_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+ for (int i = 0; i < kNumPackets / 2; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(step_ms);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+ }
+
+ // Change the capacity.
+ config.link_capacity_kbps *= 2; // Double the capacity.
+ simulated_network->SetConfig(config);
+
+ // Advance time in steps to release remaining packets one at a time.
+ step_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+ for (int i = 0; i < kNumPackets / 2; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(step_ms);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+ }
+
+ // Check that all the packets were sent.
+ EXPECT_EQ(static_cast<size_t>(kNumPackets), pipe->SentPackets());
+ EXPECT_FALSE(pipe->TimeUntilNextProcess().has_value());
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ pipe->Process();
+}
+
+// At first disallow reordering and then allow reordering.
+TEST_F(FakeNetworkPipeTest, DisallowReorderingThenAllowReordering) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = 1000;
+ config.link_capacity_kbps = 800;
+ config.queue_delay_ms = 100;
+ config.delay_standard_deviation_ms = 10;
+ ReorderTestReceiver receiver;
+ std::unique_ptr<SimulatedNetwork> network(new SimulatedNetwork(config));
+ SimulatedNetwork* simulated_network = network.get();
+ std::unique_ptr<FakeNetworkPipe> pipe(
+ new FakeNetworkPipe(&fake_clock_, std::move(network), &receiver));
+
+ const uint32_t kNumPackets = 100;
+ const int kPacketSize = 10;
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ pipe->Process();
+
+ // Confirm that all packets have been delivered in order.
+ EXPECT_EQ(kNumPackets, receiver.delivered_sequence_numbers_.size());
+ int last_seq_num = -1;
+ for (int seq_num : receiver.delivered_sequence_numbers_) {
+ EXPECT_GT(seq_num, last_seq_num);
+ last_seq_num = seq_num;
+ }
+
+ config.allow_reordering = true;
+ simulated_network->SetConfig(config);
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ receiver.delivered_sequence_numbers_.clear();
+ pipe->Process();
+
+ // Confirm that all packets have been delivered
+ // and that reordering has occured.
+ EXPECT_EQ(kNumPackets, receiver.delivered_sequence_numbers_.size());
+ bool reordering_has_occured = false;
+ last_seq_num = -1;
+ for (int seq_num : receiver.delivered_sequence_numbers_) {
+ if (last_seq_num > seq_num) {
+ reordering_has_occured = true;
+ break;
+ }
+ last_seq_num = seq_num;
+ }
+ EXPECT_TRUE(reordering_has_occured);
+}
+
+TEST_F(FakeNetworkPipeTest, BurstLoss) {
+ const int kLossPercent = 5;
+ const int kAvgBurstLength = 3;
+ const int kNumPackets = 10000;
+ const int kPacketSize = 10;
+
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_length_packets = kNumPackets;
+ config.loss_percent = kLossPercent;
+ config.avg_burst_loss_length = kAvgBurstLength;
+ ReorderTestReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ SendPackets(pipe.get(), kNumPackets, kPacketSize);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ pipe->Process();
+
+ // Check that the average loss is `kLossPercent` percent.
+ int lost_packets = kNumPackets - receiver.delivered_sequence_numbers_.size();
+ double loss_fraction = lost_packets / static_cast<double>(kNumPackets);
+
+ EXPECT_NEAR(kLossPercent / 100.0, loss_fraction, 0.05);
+
+ // Find the number of bursts that has occurred.
+ size_t received_packets = receiver.delivered_sequence_numbers_.size();
+ int num_bursts = 0;
+ for (size_t i = 0; i < received_packets - 1; ++i) {
+ int diff = receiver.delivered_sequence_numbers_[i + 1] -
+ receiver.delivered_sequence_numbers_[i];
+ if (diff > 1)
+ ++num_bursts;
+ }
+
+ double average_burst_length = static_cast<double>(lost_packets) / num_bursts;
+
+ EXPECT_NEAR(kAvgBurstLength, average_burst_length, 0.3);
+}
+
+TEST_F(FakeNetworkPipeTest, SetReceiver) {
+ BuiltInNetworkBehaviorConfig config;
+ config.link_capacity_kbps = 800;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ const int kPacketSize = 1000;
+ const int kPacketTimeMs =
+ PacketTimeMs(config.link_capacity_kbps, kPacketSize);
+ SendPackets(pipe.get(), 1, kPacketSize);
+ fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+
+ MockReceiver new_receiver;
+ pipe->SetReceiver(&new_receiver);
+
+ SendPackets(pipe.get(), 1, kPacketSize);
+ fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
+ EXPECT_CALL(receiver, DeliverRtpPacket).Times(0);
+ EXPECT_CALL(new_receiver, DeliverRtpPacket).Times(1);
+ pipe->Process();
+}
+
+TEST_F(FakeNetworkPipeTest, DeliverRtpPacketSetsCorrectArrivalTime) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_delay_ms = 100;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ Timestamp send_time = fake_clock_.CurrentTime();
+ RtpPacketReceived packet(nullptr, send_time);
+ packet.SetExtension<TransportSequenceNumber>(123);
+ pipe->DeliverRtpPacket(MediaType::VIDEO, std::move(packet),
+ [](const RtpPacketReceived&) { return false; });
+
+ // Advance the network delay to get the first packet.
+ fake_clock_.AdvanceTimeMilliseconds(config.queue_delay_ms);
+ EXPECT_CALL(receiver, DeliverRtpPacket(MediaType::VIDEO, _, _))
+ .WillOnce(WithArg<1>([&](RtpPacketReceived packet) {
+ EXPECT_EQ(packet.arrival_time(),
+ send_time + TimeDelta::Millis(config.queue_delay_ms));
+ }));
+ pipe->Process();
+}
+
+TEST_F(FakeNetworkPipeTest, DeliverRtpPacketPropagatesExtensions) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_delay_ms = 100;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<TransportSequenceNumber>(/*id=*/7);
+
+ RtpPacketReceived packet(&extension_map, fake_clock_.CurrentTime());
+ packet.SetExtension<TransportSequenceNumber>(123);
+ pipe->DeliverRtpPacket(MediaType::VIDEO, std::move(packet),
+ [](const RtpPacketReceived&) { return false; });
+
+ // Advance the network delay to get the first packet.
+ fake_clock_.AdvanceTimeMilliseconds(config.queue_delay_ms);
+ EXPECT_CALL(receiver, DeliverRtpPacket(MediaType::VIDEO, _, _))
+ .WillOnce(WithArg<1>([](RtpPacketReceived packet) {
+ EXPECT_EQ(packet.GetExtension<TransportSequenceNumber>(), 123);
+ }));
+ pipe->Process();
+}
+
+TEST_F(FakeNetworkPipeTest, DeliverRtcpPacket) {
+ BuiltInNetworkBehaviorConfig config;
+ config.queue_delay_ms = 100;
+ MockReceiver receiver;
+ auto simulated_network = std::make_unique<SimulatedNetwork>(config);
+ std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
+ &fake_clock_, std::move(simulated_network), &receiver));
+
+ rtc::CopyOnWriteBuffer buffer(100);
+ memset(buffer.MutableData(), 0, 100);
+ pipe->DeliverRtcpPacket(std::move(buffer));
+
+ // Advance the network delay to get the first packet.
+ fake_clock_.AdvanceTimeMilliseconds(config.queue_delay_ms);
+ EXPECT_CALL(receiver,
+ DeliverRtcpPacket(Property(&rtc::CopyOnWriteBuffer::size, 100)));
+ pipe->Process();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/flexfec_receive_stream.cc b/third_party/libwebrtc/call/flexfec_receive_stream.cc
new file mode 100644
index 0000000000..ab6dde37b4
--- /dev/null
+++ b/third_party/libwebrtc/call/flexfec_receive_stream.cc
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/flexfec_receive_stream.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+FlexfecReceiveStream::Config::Config(Transport* rtcp_send_transport)
+ : rtcp_send_transport(rtcp_send_transport) {
+ RTC_DCHECK(rtcp_send_transport);
+}
+
+FlexfecReceiveStream::Config::Config(const Config& config) = default;
+
+FlexfecReceiveStream::Config::~Config() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/flexfec_receive_stream.h b/third_party/libwebrtc/call/flexfec_receive_stream.h
new file mode 100644
index 0000000000..c5ac0f9fb6
--- /dev/null
+++ b/third_party/libwebrtc/call/flexfec_receive_stream.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_FLEXFEC_RECEIVE_STREAM_H_
+#define CALL_FLEXFEC_RECEIVE_STREAM_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "api/call/transport.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+#include "call/receive_stream.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+
+namespace webrtc {
+
+class FlexfecReceiveStream : public RtpPacketSinkInterface,
+ public ReceiveStreamInterface {
+ public:
+ ~FlexfecReceiveStream() override = default;
+
+ struct Config {
+ explicit Config(Transport* rtcp_send_transport);
+ Config(const Config&);
+ ~Config();
+
+ std::string ToString() const;
+
+ // Returns true if all RTP information is available in order to
+ // enable receiving FlexFEC.
+ bool IsCompleteAndEnabled() const;
+
+ // Payload type for FlexFEC.
+ int payload_type = -1;
+
+ ReceiveStreamRtpConfig rtp;
+
+ // Vector containing a single element, corresponding to the SSRC of the
+ // media stream being protected by this FlexFEC stream. The vector MUST have
+ // size 1.
+ //
+ // TODO(brandtr): Update comment above when we support multistream
+ // protection.
+ std::vector<uint32_t> protected_media_ssrcs;
+
+ // What RTCP mode to use in the reports.
+ RtcpMode rtcp_mode = RtcpMode::kCompound;
+
+ // Transport for outgoing RTCP packets.
+ Transport* rtcp_send_transport = nullptr;
+ };
+
+ // TODO(tommi): FlexfecReceiveStream inherits from ReceiveStreamInterface,
+ // not VideoReceiveStreamInterface where there's also a SetRtcpMode method.
+ // Perhaps this should be in ReceiveStreamInterface and apply to audio streams
+ // as well (although there's no logic that would use it at present).
+ virtual void SetRtcpMode(RtcpMode mode) = 0;
+
+ // Called to change the payload type after initialization.
+ virtual void SetPayloadType(int payload_type) = 0;
+ virtual int payload_type() const = 0;
+
+ virtual const ReceiveStatistics* GetStats() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_FLEXFEC_RECEIVE_STREAM_H_
diff --git a/third_party/libwebrtc/call/flexfec_receive_stream_impl.cc b/third_party/libwebrtc/call/flexfec_receive_stream_impl.cc
new file mode 100644
index 0000000000..e20f1b6ac5
--- /dev/null
+++ b/third_party/libwebrtc/call/flexfec_receive_stream_impl.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/flexfec_receive_stream_impl.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <string>
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/call/transport.h"
+#include "api/rtp_parameters.h"
+#include "call/rtp_stream_receiver_controller_interface.h"
+#include "modules/rtp_rtcp/include/flexfec_receiver.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+std::string FlexfecReceiveStream::Config::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{payload_type: " << payload_type;
+ ss << ", remote_ssrc: " << rtp.remote_ssrc;
+ ss << ", local_ssrc: " << rtp.local_ssrc;
+ ss << ", protected_media_ssrcs: [";
+ size_t i = 0;
+ for (; i + 1 < protected_media_ssrcs.size(); ++i)
+ ss << protected_media_ssrcs[i] << ", ";
+ if (!protected_media_ssrcs.empty())
+ ss << protected_media_ssrcs[i];
+ ss << "}";
+ return ss.str();
+}
+
+bool FlexfecReceiveStream::Config::IsCompleteAndEnabled() const {
+ // Check if FlexFEC is enabled.
+ if (payload_type < 0)
+ return false;
+ // Do we have the necessary SSRC information?
+ if (rtp.remote_ssrc == 0)
+ return false;
+ // TODO(brandtr): Update this check when we support multistream protection.
+ if (protected_media_ssrcs.size() != 1u)
+ return false;
+ return true;
+}
+
+namespace {
+
+// TODO(brandtr): Update this function when we support multistream protection.
+std::unique_ptr<FlexfecReceiver> MaybeCreateFlexfecReceiver(
+ Clock* clock,
+ const FlexfecReceiveStream::Config& config,
+ RecoveredPacketReceiver* recovered_packet_receiver) {
+ if (config.payload_type < 0) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid FlexFEC payload type given. "
+ "This FlexfecReceiveStream will therefore be useless.";
+ return nullptr;
+ }
+ RTC_DCHECK_GE(config.payload_type, 0);
+ RTC_DCHECK_LE(config.payload_type, 127);
+ if (config.rtp.remote_ssrc == 0) {
+ RTC_LOG(LS_WARNING)
+ << "Invalid FlexFEC SSRC given. "
+ "This FlexfecReceiveStream will therefore be useless.";
+ return nullptr;
+ }
+ if (config.protected_media_ssrcs.empty()) {
+ RTC_LOG(LS_WARNING)
+ << "No protected media SSRC supplied. "
+ "This FlexfecReceiveStream will therefore be useless.";
+ return nullptr;
+ }
+
+ if (config.protected_media_ssrcs.size() > 1) {
+ RTC_LOG(LS_WARNING)
+ << "The supplied FlexfecConfig contained multiple protected "
+ "media streams, but our implementation currently only "
+ "supports protecting a single media stream. "
+ "To avoid confusion, disabling FlexFEC completely.";
+ return nullptr;
+ }
+ RTC_DCHECK_EQ(1U, config.protected_media_ssrcs.size());
+ return std::unique_ptr<FlexfecReceiver>(new FlexfecReceiver(
+ clock, config.rtp.remote_ssrc, config.protected_media_ssrcs[0],
+ recovered_packet_receiver));
+}
+
+std::unique_ptr<ModuleRtpRtcpImpl2> CreateRtpRtcpModule(
+ Clock* clock,
+ ReceiveStatistics* receive_statistics,
+ const FlexfecReceiveStreamImpl::Config& config,
+ RtcpRttStats* rtt_stats) {
+ RtpRtcpInterface::Configuration configuration;
+ configuration.audio = false;
+ configuration.receiver_only = true;
+ configuration.clock = clock;
+ configuration.receive_statistics = receive_statistics;
+ configuration.outgoing_transport = config.rtcp_send_transport;
+ configuration.rtt_stats = rtt_stats;
+ configuration.local_media_ssrc = config.rtp.local_ssrc;
+ return ModuleRtpRtcpImpl2::Create(configuration);
+}
+
+} // namespace
+
+FlexfecReceiveStreamImpl::FlexfecReceiveStreamImpl(
+ Clock* clock,
+ Config config,
+ RecoveredPacketReceiver* recovered_packet_receiver,
+ RtcpRttStats* rtt_stats)
+ : remote_ssrc_(config.rtp.remote_ssrc),
+ payload_type_(config.payload_type),
+ receiver_(
+ MaybeCreateFlexfecReceiver(clock, config, recovered_packet_receiver)),
+ rtp_receive_statistics_(ReceiveStatistics::Create(clock)),
+ rtp_rtcp_(CreateRtpRtcpModule(clock,
+ rtp_receive_statistics_.get(),
+ config,
+ rtt_stats)) {
+ RTC_LOG(LS_INFO) << "FlexfecReceiveStreamImpl: " << config.ToString();
+ RTC_DCHECK_GE(payload_type_, -1);
+
+ packet_sequence_checker_.Detach();
+
+ // RTCP reporting.
+ rtp_rtcp_->SetRTCPStatus(config.rtcp_mode);
+}
+
+FlexfecReceiveStreamImpl::~FlexfecReceiveStreamImpl() {
+ RTC_DLOG(LS_INFO) << "~FlexfecReceiveStreamImpl: ssrc: " << remote_ssrc_;
+}
+
+void FlexfecReceiveStreamImpl::RegisterWithTransport(
+ RtpStreamReceiverControllerInterface* receiver_controller) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK(!rtp_stream_receiver_);
+
+ if (!receiver_)
+ return;
+
+ // TODO(nisse): OnRtpPacket in this class delegates all real work to
+ // `receiver_`. So maybe we don't need to implement RtpPacketSinkInterface
+ // here at all, we'd then delete the OnRtpPacket method and instead register
+ // `receiver_` as the RtpPacketSinkInterface for this stream.
+ rtp_stream_receiver_ =
+ receiver_controller->CreateReceiver(remote_ssrc(), this);
+}
+
+void FlexfecReceiveStreamImpl::UnregisterFromTransport() {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_stream_receiver_.reset();
+}
+
+void FlexfecReceiveStreamImpl::OnRtpPacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (!receiver_)
+ return;
+
+ receiver_->OnRtpPacket(packet);
+
+ // Do not report media packets in the RTCP RRs generated by `rtp_rtcp_`.
+ if (packet.Ssrc() == remote_ssrc()) {
+ rtp_receive_statistics_->OnRtpPacket(packet);
+ }
+}
+
+void FlexfecReceiveStreamImpl::SetPayloadType(int payload_type) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ RTC_DCHECK_GE(payload_type, -1);
+ payload_type_ = payload_type;
+}
+
+int FlexfecReceiveStreamImpl::payload_type() const {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ return payload_type_;
+}
+
+void FlexfecReceiveStreamImpl::SetLocalSsrc(uint32_t local_ssrc) {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ if (local_ssrc == rtp_rtcp_->local_media_ssrc())
+ return;
+
+ rtp_rtcp_->SetLocalSsrc(local_ssrc);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/flexfec_receive_stream_impl.h b/third_party/libwebrtc/call/flexfec_receive_stream_impl.h
new file mode 100644
index 0000000000..5ce2cb6f0e
--- /dev/null
+++ b/third_party/libwebrtc/call/flexfec_receive_stream_impl.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_FLEXFEC_RECEIVE_STREAM_IMPL_H_
+#define CALL_FLEXFEC_RECEIVE_STREAM_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "call/flexfec_receive_stream.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class FlexfecReceiver;
+class ReceiveStatistics;
+class RecoveredPacketReceiver;
+class RtcpRttStats;
+class RtpPacketReceived;
+class RtpRtcp;
+class RtpStreamReceiverControllerInterface;
+class RtpStreamReceiverInterface;
+
+class FlexfecReceiveStreamImpl : public FlexfecReceiveStream {
+ public:
+ FlexfecReceiveStreamImpl(Clock* clock,
+ Config config,
+ RecoveredPacketReceiver* recovered_packet_receiver,
+ RtcpRttStats* rtt_stats);
+ // Destruction happens on the worker thread. Prior to destruction the caller
+ // must ensure that a registration with the transport has been cleared. See
+ // `RegisterWithTransport` for details.
+ // TODO(tommi): As a further improvement to this, performing the full
+ // destruction on the network thread could be made the default.
+ ~FlexfecReceiveStreamImpl() override;
+
+ // Called on the network thread to register/unregister with the network
+ // transport.
+ void RegisterWithTransport(
+ RtpStreamReceiverControllerInterface* receiver_controller);
+ // If registration has previously been done (via `RegisterWithTransport`) then
+ // `UnregisterFromTransport` must be called prior to destruction, on the
+ // network thread.
+ void UnregisterFromTransport();
+
+ // RtpPacketSinkInterface.
+ void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+ void SetPayloadType(int payload_type) override;
+ int payload_type() const override;
+
+ // Updates the `rtp_video_stream_receiver_`'s `local_ssrc` when the default
+ // sender has been created, changed or removed.
+ void SetLocalSsrc(uint32_t local_ssrc);
+
+ uint32_t remote_ssrc() const { return remote_ssrc_; }
+
+ void SetRtcpMode(RtcpMode mode) override {
+ RTC_DCHECK_RUN_ON(&packet_sequence_checker_);
+ rtp_rtcp_->SetRTCPStatus(mode);
+ }
+
+ const ReceiveStatistics* GetStats() const override {
+ return rtp_receive_statistics_.get();
+ }
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_;
+
+ const uint32_t remote_ssrc_;
+
+ // `payload_type_` is initially set to -1, indicating that FlexFec is
+ // disabled.
+ int payload_type_ RTC_GUARDED_BY(packet_sequence_checker_) = -1;
+
+ // Erasure code interfacing.
+ const std::unique_ptr<FlexfecReceiver> receiver_;
+
+ // RTCP reporting.
+ const std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+ const std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
+
+ std::unique_ptr<RtpStreamReceiverInterface> rtp_stream_receiver_
+ RTC_GUARDED_BY(packet_sequence_checker_);
+};
+
+} // namespace webrtc
+
+#endif // CALL_FLEXFEC_RECEIVE_STREAM_IMPL_H_
diff --git a/third_party/libwebrtc/call/flexfec_receive_stream_unittest.cc b/third_party/libwebrtc/call/flexfec_receive_stream_unittest.cc
new file mode 100644
index 0000000000..c575a3f41d
--- /dev/null
+++ b/third_party/libwebrtc/call/flexfec_receive_stream_unittest.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/flexfec_receive_stream.h"
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/call/transport.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+#include "call/flexfec_receive_stream_impl.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/mocks/mock_recovered_packet_receiver.h"
+#include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/thread.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::Property;
+
+constexpr uint8_t kFlexfecPlType = 118;
+constexpr uint8_t kFlexfecSsrc[] = {0x00, 0x00, 0x00, 0x01};
+constexpr uint8_t kMediaSsrc[] = {0x00, 0x00, 0x00, 0x02};
+
+FlexfecReceiveStream::Config CreateDefaultConfig(
+ Transport* rtcp_send_transport) {
+ FlexfecReceiveStream::Config config(rtcp_send_transport);
+ config.payload_type = kFlexfecPlType;
+ config.rtp.remote_ssrc = ByteReader<uint32_t>::ReadBigEndian(kFlexfecSsrc);
+ config.protected_media_ssrcs = {
+ ByteReader<uint32_t>::ReadBigEndian(kMediaSsrc)};
+ EXPECT_TRUE(config.IsCompleteAndEnabled());
+ return config;
+}
+
+RtpPacketReceived ParsePacket(rtc::ArrayView<const uint8_t> packet) {
+ RtpPacketReceived parsed_packet(nullptr);
+ EXPECT_TRUE(parsed_packet.Parse(packet));
+ return parsed_packet;
+}
+
+} // namespace
+
+TEST(FlexfecReceiveStreamConfigTest, IsCompleteAndEnabled) {
+ MockTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config config(&rtcp_send_transport);
+
+ config.rtp.local_ssrc = 18374743;
+ config.rtcp_mode = RtcpMode::kCompound;
+ EXPECT_FALSE(config.IsCompleteAndEnabled());
+
+ config.payload_type = 123;
+ EXPECT_FALSE(config.IsCompleteAndEnabled());
+
+ config.rtp.remote_ssrc = 238423838;
+ EXPECT_FALSE(config.IsCompleteAndEnabled());
+
+ config.protected_media_ssrcs.push_back(138989393);
+ EXPECT_TRUE(config.IsCompleteAndEnabled());
+
+ config.protected_media_ssrcs.push_back(33423423);
+ EXPECT_FALSE(config.IsCompleteAndEnabled());
+}
+
+class FlexfecReceiveStreamTest : public ::testing::Test {
+ protected:
+ FlexfecReceiveStreamTest()
+ : config_(CreateDefaultConfig(&rtcp_send_transport_)) {
+ receive_stream_ = std::make_unique<FlexfecReceiveStreamImpl>(
+ Clock::GetRealTimeClock(), config_, &recovered_packet_receiver_,
+ &rtt_stats_);
+ receive_stream_->RegisterWithTransport(&rtp_stream_receiver_controller_);
+ }
+
+ ~FlexfecReceiveStreamTest() { receive_stream_->UnregisterFromTransport(); }
+
+ rtc::AutoThread main_thread_;
+ MockTransport rtcp_send_transport_;
+ FlexfecReceiveStream::Config config_;
+ MockRecoveredPacketReceiver recovered_packet_receiver_;
+ MockRtcpRttStats rtt_stats_;
+ RtpStreamReceiverController rtp_stream_receiver_controller_;
+ std::unique_ptr<FlexfecReceiveStreamImpl> receive_stream_;
+};
+
+TEST_F(FlexfecReceiveStreamTest, ConstructDestruct) {}
+
+// Create a FlexFEC packet that protects a single media packet and ensure
+// that the callback is called. Correctness of recovery is checked in the
+// FlexfecReceiver unit tests.
+TEST_F(FlexfecReceiveStreamTest, RecoversPacket) {
+ constexpr uint8_t kFlexfecSeqNum[] = {0x00, 0x01};
+ constexpr uint8_t kFlexfecTs[] = {0x00, 0x11, 0x22, 0x33};
+ constexpr uint8_t kMediaPlType = 107;
+ constexpr uint8_t kMediaSeqNum[] = {0x00, 0x02};
+ constexpr uint8_t kMediaTs[] = {0xaa, 0xbb, 0xcc, 0xdd};
+
+ // This packet mask protects a single media packet, i.e., the FlexFEC payload
+ // is a copy of that media packet. When inserted in the FlexFEC pipeline,
+ // it will thus trivially recover the lost media packet.
+ constexpr uint8_t kKBit0 = 1 << 7;
+ constexpr uint8_t kFlexfecPktMask[] = {kKBit0 | 0x00, 0x01};
+ constexpr uint8_t kPayloadLength[] = {0x00, 0x04};
+ constexpr uint8_t kSsrcCount = 1;
+ constexpr uint8_t kReservedBits = 0x00;
+ constexpr uint8_t kPayloadBits = 0x00;
+ // clang-format off
+ constexpr uint8_t kFlexfecPacket[] = {
+ // RTP header.
+ 0x80, kFlexfecPlType, kFlexfecSeqNum[0], kFlexfecSeqNum[1],
+ kFlexfecTs[0], kFlexfecTs[1], kFlexfecTs[2], kFlexfecTs[3],
+ kFlexfecSsrc[0], kFlexfecSsrc[1], kFlexfecSsrc[2], kFlexfecSsrc[3],
+ // FlexFEC header.
+ 0x00, kMediaPlType, kPayloadLength[0], kPayloadLength[1],
+ kMediaTs[0], kMediaTs[1], kMediaTs[2], kMediaTs[3],
+ kSsrcCount, kReservedBits, kReservedBits, kReservedBits,
+ kMediaSsrc[0], kMediaSsrc[1], kMediaSsrc[2], kMediaSsrc[3],
+ kMediaSeqNum[0], kMediaSeqNum[1], kFlexfecPktMask[0], kFlexfecPktMask[1],
+ // FEC payload.
+ kPayloadBits, kPayloadBits, kPayloadBits, kPayloadBits};
+ // clang-format on
+
+ EXPECT_CALL(recovered_packet_receiver_,
+ OnRecoveredPacket(Property(&RtpPacketReceived::payload_size,
+ Eq(kPayloadLength[1]))));
+
+ receive_stream_->OnRtpPacket(ParsePacket(kFlexfecPacket));
+
+ // Tear-down
+ receive_stream_->UnregisterFromTransport();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/packet_receiver.h b/third_party/libwebrtc/call/packet_receiver.h
new file mode 100644
index 0000000000..cdcf7bfc73
--- /dev/null
+++ b/third_party/libwebrtc/call/packet_receiver.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_PACKET_RECEIVER_H_
+#define CALL_PACKET_RECEIVER_H_
+
+#include "absl/functional/any_invocable.h"
+#include "api/media_types.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+
+class PacketReceiver {
+ public:
+ // Demux RTCP packets. Must be called on the worker thread.
+ virtual void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) = 0;
+
+ // Invoked once when a packet packet is received that can not be demuxed.
+ // If the method returns true, a new attempt is made to demux the packet.
+ using OnUndemuxablePacketHandler =
+ absl::AnyInvocable<bool(const RtpPacketReceived& parsed_packet)>;
+
+ // Must be called on the worker thread.
+ // If `media_type` is not Audio or Video, packets may be used for BWE
+ // calculations but are not demuxed.
+ virtual void DeliverRtpPacket(
+ MediaType media_type,
+ RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) = 0;
+
+ protected:
+ virtual ~PacketReceiver() {}
+};
+
+} // namespace webrtc
+
+#endif // CALL_PACKET_RECEIVER_H_
diff --git a/third_party/libwebrtc/call/rampup_tests.cc b/third_party/libwebrtc/call/rampup_tests.cc
new file mode 100644
index 0000000000..232fe0b3fe
--- /dev/null
+++ b/third_party/libwebrtc/call/rampup_tests.cc
@@ -0,0 +1,711 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rampup_tests.h"
+
+#include <memory>
+
+#include "absl/flags/flag.h"
+#include "absl/strings/string_view.h"
+#include "api/rtc_event_log/rtc_event_log_factory.h"
+#include "api/rtc_event_log_output_file.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/test/metrics/global_metrics_logger_and_exporter.h"
+#include "api/test/metrics/metric.h"
+#include "call/fake_network_pipe.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/string_encode.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/time_utils.h"
+#include "test/encoder_settings.h"
+#include "test/gtest.h"
+#include "test/video_test_constants.h"
+
+ABSL_FLAG(std::string,
+ ramp_dump_name,
+ "",
+ "Filename for dumped received RTP stream.");
+
+namespace webrtc {
+namespace {
+
+using ::webrtc::test::GetGlobalMetricsLogger;
+using ::webrtc::test::ImprovementDirection;
+using ::webrtc::test::Unit;
+
+constexpr TimeDelta kPollInterval = TimeDelta::Millis(20);
+static const int kExpectedHighVideoBitrateBps = 80000;
+static const int kExpectedHighAudioBitrateBps = 30000;
+static const int kLowBandwidthLimitBps = 20000;
+// Set target detected bitrate to slightly larger than the target bitrate to
+// avoid flakiness.
+static const int kLowBitrateMarginBps = 2000;
+
+std::vector<uint32_t> GenerateSsrcs(size_t num_streams, uint32_t ssrc_offset) {
+ std::vector<uint32_t> ssrcs;
+ for (size_t i = 0; i != num_streams; ++i)
+ ssrcs.push_back(static_cast<uint32_t>(ssrc_offset + i));
+ return ssrcs;
+}
+
+} // namespace
+
+RampUpTester::RampUpTester(size_t num_video_streams,
+ size_t num_audio_streams,
+ size_t num_flexfec_streams,
+ unsigned int start_bitrate_bps,
+ int64_t min_run_time_ms,
+ bool rtx,
+ bool red,
+ bool report_perf_stats,
+ TaskQueueBase* task_queue)
+ : EndToEndTest(test::VideoTestConstants::kLongTimeout),
+ clock_(Clock::GetRealTimeClock()),
+ num_video_streams_(num_video_streams),
+ num_audio_streams_(num_audio_streams),
+ num_flexfec_streams_(num_flexfec_streams),
+ rtx_(rtx),
+ red_(red),
+ report_perf_stats_(report_perf_stats),
+ sender_call_(nullptr),
+ send_stream_(nullptr),
+ send_transport_(nullptr),
+ send_simulated_network_(nullptr),
+ start_bitrate_bps_(start_bitrate_bps),
+ min_run_time_ms_(min_run_time_ms),
+ expected_bitrate_bps_(0),
+ test_start_ms_(-1),
+ ramp_up_finished_ms_(-1),
+ video_ssrcs_(GenerateSsrcs(num_video_streams_, 100)),
+ video_rtx_ssrcs_(GenerateSsrcs(num_video_streams_, 200)),
+ audio_ssrcs_(GenerateSsrcs(num_audio_streams_, 300)),
+ task_queue_(task_queue) {
+ if (red_)
+ EXPECT_EQ(0u, num_flexfec_streams_);
+ EXPECT_LE(num_audio_streams_, 1u);
+}
+
+RampUpTester::~RampUpTester() = default;
+
+void RampUpTester::ModifySenderBitrateConfig(
+ BitrateConstraints* bitrate_config) {
+ if (start_bitrate_bps_ != 0) {
+ bitrate_config->start_bitrate_bps = start_bitrate_bps_;
+ }
+ bitrate_config->min_bitrate_bps = 10000;
+}
+
+void RampUpTester::OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>& receive_streams) {
+ send_stream_ = send_stream;
+}
+
+BuiltInNetworkBehaviorConfig RampUpTester::GetSendTransportConfig() const {
+ return forward_transport_config_;
+}
+
+size_t RampUpTester::GetNumVideoStreams() const {
+ return num_video_streams_;
+}
+
+size_t RampUpTester::GetNumAudioStreams() const {
+ return num_audio_streams_;
+}
+
+size_t RampUpTester::GetNumFlexfecStreams() const {
+ return num_flexfec_streams_;
+}
+
+class RampUpTester::VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int frame_width,
+ int frame_height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(frame_width, frame_height, encoder_config);
+ if (encoder_config.number_of_streams == 1) {
+ streams[0].target_bitrate_bps = streams[0].max_bitrate_bps = 2000000;
+ }
+ return streams;
+ }
+};
+
+void RampUpTester::ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) {
+ send_config->suspend_below_min_bitrate = true;
+ encoder_config->number_of_streams = num_video_streams_;
+ encoder_config->max_bitrate_bps = 2000000;
+ encoder_config->video_stream_factory =
+ rtc::make_ref_counted<RampUpTester::VideoStreamFactory>();
+ if (num_video_streams_ == 1) {
+ // For single stream rampup until 1mbps
+ expected_bitrate_bps_ = kSingleStreamTargetBps;
+ } else {
+ // To ensure simulcast rate allocation.
+ send_config->rtp.payload_name = "VP8";
+ encoder_config->codec_type = kVideoCodecVP8;
+ std::vector<VideoStream> streams = test::CreateVideoStreams(
+ test::VideoTestConstants::kDefaultWidth,
+ test::VideoTestConstants::kDefaultHeight, *encoder_config);
+ // For multi stream rampup until all streams are being sent. That means
+ // enough bitrate to send all the target streams plus the min bitrate of
+ // the last one.
+ expected_bitrate_bps_ = streams.back().min_bitrate_bps;
+ for (size_t i = 0; i < streams.size() - 1; ++i) {
+ expected_bitrate_bps_ += streams[i].target_bitrate_bps;
+ }
+ }
+
+ send_config->rtp.nack.rtp_history_ms =
+ test::VideoTestConstants::kNackRtpHistoryMs;
+ send_config->rtp.ssrcs = video_ssrcs_;
+ if (rtx_) {
+ send_config->rtp.rtx.payload_type =
+ test::VideoTestConstants::kSendRtxPayloadType;
+ send_config->rtp.rtx.ssrcs = video_rtx_ssrcs_;
+ }
+ if (red_) {
+ send_config->rtp.ulpfec.ulpfec_payload_type =
+ test::VideoTestConstants::kUlpfecPayloadType;
+ send_config->rtp.ulpfec.red_payload_type =
+ test::VideoTestConstants::kRedPayloadType;
+ if (rtx_) {
+ send_config->rtp.ulpfec.red_rtx_payload_type =
+ test::VideoTestConstants::kRtxRedPayloadType;
+ }
+ }
+
+ size_t i = 0;
+ for (VideoReceiveStreamInterface::Config& recv_config : *receive_configs) {
+ recv_config.decoders.reserve(1);
+ recv_config.decoders[0].payload_type = send_config->rtp.payload_type;
+ recv_config.decoders[0].video_format =
+ SdpVideoFormat(send_config->rtp.payload_name);
+
+ recv_config.rtp.remote_ssrc = video_ssrcs_[i];
+ recv_config.rtp.nack.rtp_history_ms = send_config->rtp.nack.rtp_history_ms;
+
+ if (red_) {
+ recv_config.rtp.red_payload_type =
+ send_config->rtp.ulpfec.red_payload_type;
+ recv_config.rtp.ulpfec_payload_type =
+ send_config->rtp.ulpfec.ulpfec_payload_type;
+ if (rtx_) {
+ recv_config.rtp.rtx_associated_payload_types
+ [send_config->rtp.ulpfec.red_rtx_payload_type] =
+ send_config->rtp.ulpfec.red_payload_type;
+ }
+ }
+
+ if (rtx_) {
+ recv_config.rtp.rtx_ssrc = video_rtx_ssrcs_[i];
+ recv_config.rtp
+ .rtx_associated_payload_types[send_config->rtp.rtx.payload_type] =
+ send_config->rtp.payload_type;
+ }
+ ++i;
+ }
+
+ RTC_DCHECK_LE(num_flexfec_streams_, 1);
+ if (num_flexfec_streams_ == 1) {
+ send_config->rtp.flexfec.payload_type =
+ test::VideoTestConstants::kFlexfecPayloadType;
+ send_config->rtp.flexfec.ssrc = test::VideoTestConstants::kFlexfecSendSsrc;
+ send_config->rtp.flexfec.protected_media_ssrcs = {video_ssrcs_[0]};
+ }
+}
+
+void RampUpTester::ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStreamInterface::Config>* receive_configs) {
+ if (num_audio_streams_ == 0)
+ return;
+
+ send_config->rtp.ssrc = audio_ssrcs_[0];
+ send_config->min_bitrate_bps = 6000;
+ send_config->max_bitrate_bps = 60000;
+
+ for (AudioReceiveStreamInterface::Config& recv_config : *receive_configs) {
+ recv_config.rtp.remote_ssrc = send_config->rtp.ssrc;
+ }
+}
+
+void RampUpTester::ModifyFlexfecConfigs(
+ std::vector<FlexfecReceiveStream::Config>* receive_configs) {
+ if (num_flexfec_streams_ == 0)
+ return;
+ RTC_DCHECK_EQ(1, num_flexfec_streams_);
+ (*receive_configs)[0].payload_type =
+ test::VideoTestConstants::kFlexfecPayloadType;
+ (*receive_configs)[0].rtp.remote_ssrc =
+ test::VideoTestConstants::kFlexfecSendSsrc;
+ (*receive_configs)[0].protected_media_ssrcs = {video_ssrcs_[0]};
+ (*receive_configs)[0].rtp.local_ssrc = video_ssrcs_[0];
+}
+
+void RampUpTester::OnCallsCreated(Call* sender_call, Call* receiver_call) {
+ RTC_DCHECK(sender_call);
+ sender_call_ = sender_call;
+ pending_task_ = RepeatingTaskHandle::Start(task_queue_, [this] {
+ PollStats();
+ return kPollInterval;
+ });
+}
+
+void RampUpTester::OnTransportCreated(
+ test::PacketTransport* to_receiver,
+ SimulatedNetworkInterface* sender_network,
+ test::PacketTransport* to_sender,
+ SimulatedNetworkInterface* receiver_network) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+
+ send_transport_ = to_receiver;
+ send_simulated_network_ = sender_network;
+}
+
+void RampUpTester::PollStats() {
+ RTC_DCHECK_RUN_ON(task_queue_);
+
+ Call::Stats stats = sender_call_->GetStats();
+ EXPECT_GE(expected_bitrate_bps_, 0);
+
+ if (stats.send_bandwidth_bps >= expected_bitrate_bps_ &&
+ (min_run_time_ms_ == -1 ||
+ clock_->TimeInMilliseconds() - test_start_ms_ >= min_run_time_ms_)) {
+ ramp_up_finished_ms_ = clock_->TimeInMilliseconds();
+ observation_complete_.Set();
+ pending_task_.Stop();
+ }
+}
+
+void RampUpTester::ReportResult(
+ absl::string_view measurement,
+ size_t value,
+ Unit unit,
+ ImprovementDirection improvement_direction) const {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ measurement,
+ ::testing::UnitTest::GetInstance()->current_test_info()->name(), value,
+ unit, improvement_direction);
+}
+
+void RampUpTester::AccumulateStats(const VideoSendStream::StreamStats& stream,
+ size_t* total_packets_sent,
+ size_t* total_sent,
+ size_t* padding_sent,
+ size_t* media_sent) const {
+ *total_packets_sent += stream.rtp_stats.transmitted.packets +
+ stream.rtp_stats.retransmitted.packets +
+ stream.rtp_stats.fec.packets;
+ *total_sent += stream.rtp_stats.transmitted.TotalBytes() +
+ stream.rtp_stats.retransmitted.TotalBytes() +
+ stream.rtp_stats.fec.TotalBytes();
+ *padding_sent += stream.rtp_stats.transmitted.padding_bytes +
+ stream.rtp_stats.retransmitted.padding_bytes +
+ stream.rtp_stats.fec.padding_bytes;
+ *media_sent += stream.rtp_stats.MediaPayloadBytes();
+}
+
+void RampUpTester::TriggerTestDone() {
+ RTC_DCHECK_GE(test_start_ms_, 0);
+
+ // Stop polling stats.
+ // Corner case for webrtc_quick_perf_test
+ SendTask(task_queue_, [this] { pending_task_.Stop(); });
+
+ // TODO(holmer): Add audio send stats here too when those APIs are available.
+ if (!send_stream_)
+ return;
+
+ VideoSendStream::Stats send_stats;
+ SendTask(task_queue_, [&] { send_stats = send_stream_->GetStats(); });
+
+ send_stream_ = nullptr; // To avoid dereferencing a bad pointer.
+
+ size_t total_packets_sent = 0;
+ size_t total_sent = 0;
+ size_t padding_sent = 0;
+ size_t media_sent = 0;
+ for (uint32_t ssrc : video_ssrcs_) {
+ AccumulateStats(send_stats.substreams[ssrc], &total_packets_sent,
+ &total_sent, &padding_sent, &media_sent);
+ }
+
+ size_t rtx_total_packets_sent = 0;
+ size_t rtx_total_sent = 0;
+ size_t rtx_padding_sent = 0;
+ size_t rtx_media_sent = 0;
+ for (uint32_t rtx_ssrc : video_rtx_ssrcs_) {
+ AccumulateStats(send_stats.substreams[rtx_ssrc], &rtx_total_packets_sent,
+ &rtx_total_sent, &rtx_padding_sent, &rtx_media_sent);
+ }
+
+ if (report_perf_stats_) {
+ ReportResult("ramp-up-media-sent", media_sent, Unit::kBytes,
+ ImprovementDirection::kBiggerIsBetter);
+ ReportResult("ramp-up-padding-sent", padding_sent, Unit::kBytes,
+ ImprovementDirection::kSmallerIsBetter);
+ ReportResult("ramp-up-rtx-media-sent", rtx_media_sent, Unit::kBytes,
+ ImprovementDirection::kBiggerIsBetter);
+ ReportResult("ramp-up-rtx-padding-sent", rtx_padding_sent, Unit::kBytes,
+ ImprovementDirection::kSmallerIsBetter);
+ if (ramp_up_finished_ms_ >= 0) {
+ ReportResult("ramp-up-time", ramp_up_finished_ms_ - test_start_ms_,
+ Unit::kMilliseconds, ImprovementDirection::kSmallerIsBetter);
+ }
+ ReportResult("ramp-up-average-network-latency",
+ send_transport_->GetAverageDelayMs(), Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ }
+}
+
+void RampUpTester::PerformTest() {
+ test_start_ms_ = clock_->TimeInMilliseconds();
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for ramp-up to complete.";
+ TriggerTestDone();
+}
+
+RampUpDownUpTester::RampUpDownUpTester(size_t num_video_streams,
+ size_t num_audio_streams,
+ size_t num_flexfec_streams,
+ unsigned int start_bitrate_bps,
+ bool rtx,
+ bool red,
+ const std::vector<int>& loss_rates,
+ bool report_perf_stats,
+ TaskQueueBase* task_queue)
+ : RampUpTester(num_video_streams,
+ num_audio_streams,
+ num_flexfec_streams,
+ start_bitrate_bps,
+ 0,
+ rtx,
+ red,
+ report_perf_stats,
+ task_queue),
+ link_rates_({4 * GetExpectedHighBitrate() / (3 * 1000),
+ kLowBandwidthLimitBps / 1000,
+ 4 * GetExpectedHighBitrate() / (3 * 1000), 0}),
+ test_state_(kFirstRampup),
+ next_state_(kTransitionToNextState),
+ state_start_ms_(clock_->TimeInMilliseconds()),
+ interval_start_ms_(clock_->TimeInMilliseconds()),
+ sent_bytes_(0),
+ loss_rates_(loss_rates) {
+ forward_transport_config_.link_capacity_kbps = link_rates_[test_state_];
+ forward_transport_config_.queue_delay_ms = 100;
+ forward_transport_config_.loss_percent = loss_rates_[test_state_];
+}
+
+RampUpDownUpTester::~RampUpDownUpTester() {}
+
+void RampUpDownUpTester::PollStats() {
+ if (test_state_ == kTestEnd) {
+ pending_task_.Stop();
+ }
+
+ int transmit_bitrate_bps = 0;
+ bool suspended = false;
+ if (num_video_streams_ > 0 && send_stream_) {
+ webrtc::VideoSendStream::Stats stats = send_stream_->GetStats();
+ for (const auto& it : stats.substreams) {
+ transmit_bitrate_bps += it.second.total_bitrate_bps;
+ }
+ suspended = stats.suspended;
+ }
+ if (num_audio_streams_ > 0 && sender_call_) {
+ // An audio send stream doesn't have bitrate stats, so the call send BW is
+ // currently used instead.
+ transmit_bitrate_bps = sender_call_->GetStats().send_bandwidth_bps;
+ }
+
+ EvolveTestState(transmit_bitrate_bps, suspended);
+}
+
+void RampUpDownUpTester::ModifyReceiverBitrateConfig(
+ BitrateConstraints* bitrate_config) {
+ bitrate_config->min_bitrate_bps = 10000;
+}
+
+std::string RampUpDownUpTester::GetModifierString() const {
+ std::string str("_");
+ if (num_video_streams_ > 0) {
+ str += rtc::ToString(num_video_streams_);
+ str += "stream";
+ str += (num_video_streams_ > 1 ? "s" : "");
+ str += "_";
+ }
+ if (num_audio_streams_ > 0) {
+ str += rtc::ToString(num_audio_streams_);
+ str += "stream";
+ str += (num_audio_streams_ > 1 ? "s" : "");
+ str += "_";
+ }
+ str += (rtx_ ? "" : "no");
+ str += "rtx_";
+ str += (red_ ? "" : "no");
+ str += "red";
+ return str;
+}
+
+int RampUpDownUpTester::GetExpectedHighBitrate() const {
+ int expected_bitrate_bps = 0;
+ if (num_audio_streams_ > 0)
+ expected_bitrate_bps += kExpectedHighAudioBitrateBps;
+ if (num_video_streams_ > 0)
+ expected_bitrate_bps += kExpectedHighVideoBitrateBps;
+ return expected_bitrate_bps;
+}
+
+size_t RampUpDownUpTester::GetFecBytes() const {
+ size_t flex_fec_bytes = 0;
+ if (num_flexfec_streams_ > 0) {
+ webrtc::VideoSendStream::Stats stats = send_stream_->GetStats();
+ for (const auto& kv : stats.substreams)
+ flex_fec_bytes += kv.second.rtp_stats.fec.TotalBytes();
+ }
+ return flex_fec_bytes;
+}
+
+bool RampUpDownUpTester::ExpectingFec() const {
+ return num_flexfec_streams_ > 0 && forward_transport_config_.loss_percent > 0;
+}
+
+void RampUpDownUpTester::EvolveTestState(int bitrate_bps, bool suspended) {
+ int64_t now = clock_->TimeInMilliseconds();
+ switch (test_state_) {
+ case kFirstRampup:
+ EXPECT_FALSE(suspended);
+ if (bitrate_bps >= GetExpectedHighBitrate()) {
+ if (report_perf_stats_) {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "ramp_up_down_up" + GetModifierString(), "first_rampup",
+ now - state_start_ms_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ }
+ // Apply loss during the transition between states if FEC is enabled.
+ forward_transport_config_.loss_percent = loss_rates_[test_state_];
+ test_state_ = kTransitionToNextState;
+ next_state_ = kLowRate;
+ }
+ break;
+ case kLowRate: {
+ // Audio streams are never suspended.
+ bool check_suspend_state = num_video_streams_ > 0;
+ if (bitrate_bps < kLowBandwidthLimitBps + kLowBitrateMarginBps &&
+ suspended == check_suspend_state) {
+ if (report_perf_stats_) {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "ramp_up_down_up" + GetModifierString(), "rampdown",
+ now - state_start_ms_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ }
+ // Apply loss during the transition between states if FEC is enabled.
+ forward_transport_config_.loss_percent = loss_rates_[test_state_];
+ test_state_ = kTransitionToNextState;
+ next_state_ = kSecondRampup;
+ }
+ break;
+ }
+ case kSecondRampup:
+ if (bitrate_bps >= GetExpectedHighBitrate() && !suspended) {
+ if (report_perf_stats_) {
+ GetGlobalMetricsLogger()->LogSingleValueMetric(
+ "ramp_up_down_up" + GetModifierString(), "second_rampup",
+ now - state_start_ms_, Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ ReportResult("ramp-up-down-up-average-network-latency",
+ send_transport_->GetAverageDelayMs(),
+ Unit::kMilliseconds,
+ ImprovementDirection::kSmallerIsBetter);
+ }
+ // Apply loss during the transition between states if FEC is enabled.
+ forward_transport_config_.loss_percent = loss_rates_[test_state_];
+ test_state_ = kTransitionToNextState;
+ next_state_ = kTestEnd;
+ }
+ break;
+ case kTestEnd:
+ observation_complete_.Set();
+ break;
+ case kTransitionToNextState:
+ if (!ExpectingFec() || GetFecBytes() > 0) {
+ test_state_ = next_state_;
+ forward_transport_config_.link_capacity_kbps = link_rates_[test_state_];
+ // No loss while ramping up and down as it may affect the BWE
+ // negatively, making the test flaky.
+ forward_transport_config_.loss_percent = 0;
+ state_start_ms_ = now;
+ interval_start_ms_ = now;
+ sent_bytes_ = 0;
+ send_simulated_network_->SetConfig(forward_transport_config_);
+ }
+ break;
+ }
+}
+
+class RampUpTest : public test::CallTest {
+ public:
+ RampUpTest()
+ : task_queue_factory_(CreateDefaultTaskQueueFactory()),
+ rtc_event_log_factory_(task_queue_factory_.get()) {
+ std::string dump_name(absl::GetFlag(FLAGS_ramp_dump_name));
+ if (!dump_name.empty()) {
+ send_event_log_ = rtc_event_log_factory_.CreateRtcEventLog(
+ RtcEventLog::EncodingType::Legacy);
+ recv_event_log_ = rtc_event_log_factory_.CreateRtcEventLog(
+ RtcEventLog::EncodingType::Legacy);
+ bool event_log_started =
+ send_event_log_->StartLogging(
+ std::make_unique<RtcEventLogOutputFile>(
+ dump_name + ".send.rtc.dat", RtcEventLog::kUnlimitedOutput),
+ RtcEventLog::kImmediateOutput) &&
+ recv_event_log_->StartLogging(
+ std::make_unique<RtcEventLogOutputFile>(
+ dump_name + ".recv.rtc.dat", RtcEventLog::kUnlimitedOutput),
+ RtcEventLog::kImmediateOutput);
+ RTC_DCHECK(event_log_started);
+ }
+ }
+
+ private:
+ const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ RtcEventLogFactory rtc_event_log_factory_;
+};
+
+static const uint32_t kStartBitrateBps = 60000;
+
+TEST_F(RampUpTest, UpDownUpAbsSendTimeSimulcastRedRtx) {
+ std::vector<int> loss_rates = {0, 0, 0, 0};
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ RampUpDownUpTester test(3, 0, 0, kStartBitrateBps, true, true, loss_rates,
+ true, task_queue());
+ RunBaseTest(&test);
+}
+
+// TODO(bugs.webrtc.org/8878)
+#if defined(WEBRTC_MAC)
+#define MAYBE_UpDownUpTransportSequenceNumberRtx \
+ DISABLED_UpDownUpTransportSequenceNumberRtx
+#else
+#define MAYBE_UpDownUpTransportSequenceNumberRtx \
+ UpDownUpTransportSequenceNumberRtx
+#endif
+TEST_F(RampUpTest, MAYBE_UpDownUpTransportSequenceNumberRtx) {
+ std::vector<int> loss_rates = {0, 0, 0, 0};
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpDownUpTester test(3, 0, 0, kStartBitrateBps, true, false, loss_rates,
+ true, task_queue());
+ RunBaseTest(&test);
+}
+
+// TODO(holmer): Tests which don't report perf stats should be moved to a
+// different executable since they per definition are not perf tests.
+// This test is disabled because it crashes on Linux, and is flaky on other
+// platforms. See: crbug.com/webrtc/7919
+TEST_F(RampUpTest, DISABLED_UpDownUpTransportSequenceNumberPacketLoss) {
+ std::vector<int> loss_rates = {20, 0, 0, 0};
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpDownUpTester test(1, 0, 1, kStartBitrateBps, true, false, loss_rates,
+ false, task_queue());
+ RunBaseTest(&test);
+}
+
+// TODO(bugs.webrtc.org/8878)
+#if defined(WEBRTC_MAC)
+#define MAYBE_UpDownUpAudioVideoTransportSequenceNumberRtx \
+ DISABLED_UpDownUpAudioVideoTransportSequenceNumberRtx
+#else
+#define MAYBE_UpDownUpAudioVideoTransportSequenceNumberRtx \
+ UpDownUpAudioVideoTransportSequenceNumberRtx
+#endif
+TEST_F(RampUpTest, MAYBE_UpDownUpAudioVideoTransportSequenceNumberRtx) {
+ std::vector<int> loss_rates = {0, 0, 0, 0};
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpDownUpTester test(3, 1, 0, kStartBitrateBps, true, false, loss_rates,
+ false, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, UpDownUpAudioTransportSequenceNumberRtx) {
+ std::vector<int> loss_rates = {0, 0, 0, 0};
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpDownUpTester test(0, 1, 0, kStartBitrateBps, true, false, loss_rates,
+ false, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, TOffsetSimulcastRedRtx) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTimestampOffsetUri,
+ kTransmissionTimeOffsetExtensionId));
+ RampUpTester test(3, 0, 0, 0, 0, true, true, true, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, AbsSendTime) {
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ RampUpTester test(1, 0, 0, 0, 0, false, false, false, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, AbsSendTimeSimulcastRedRtx) {
+ RegisterRtpExtension(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
+ RampUpTester test(3, 0, 0, 0, 0, true, true, true, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, TransportSequenceNumber) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpTester test(1, 0, 0, 0, 0, false, false, false, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, TransportSequenceNumberSimulcast) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpTester test(3, 0, 0, 0, 0, false, false, false, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, TransportSequenceNumberSimulcastRedRtx) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpTester test(3, 0, 0, 0, 0, true, true, true, task_queue());
+ RunBaseTest(&test);
+}
+
+TEST_F(RampUpTest, AudioTransportSequenceNumber) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ RampUpTester test(0, 1, 0, 300000, 10000, false, false, false, task_queue());
+ RunBaseTest(&test);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rampup_tests.h b/third_party/libwebrtc/call/rampup_tests.h
new file mode 100644
index 0000000000..ba9989d25c
--- /dev/null
+++ b/third_party/libwebrtc/call/rampup_tests.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RAMPUP_TESTS_H_
+#define CALL_RAMPUP_TESTS_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/test/metrics/metric.h"
+#include "api/test/simulated_network.h"
+#include "call/call.h"
+#include "call/simulated_network.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_utils/repeating_task.h"
+#include "test/call_test.h"
+
+namespace webrtc {
+
+static const int kTransmissionTimeOffsetExtensionId = 6;
+static const int kAbsSendTimeExtensionId = 7;
+static const int kTransportSequenceNumberExtensionId = 8;
+static const unsigned int kSingleStreamTargetBps = 1000000;
+
+class Clock;
+
+class RampUpTester : public test::EndToEndTest {
+ public:
+ RampUpTester(size_t num_video_streams,
+ size_t num_audio_streams,
+ size_t num_flexfec_streams,
+ unsigned int start_bitrate_bps,
+ int64_t min_run_time_ms,
+ bool rtx,
+ bool red,
+ bool report_perf_stats,
+ TaskQueueBase* task_queue);
+ ~RampUpTester() override;
+
+ size_t GetNumVideoStreams() const override;
+ size_t GetNumAudioStreams() const override;
+ size_t GetNumFlexfecStreams() const override;
+
+ void PerformTest() override;
+
+ protected:
+ virtual void PollStats();
+
+ void AccumulateStats(const VideoSendStream::StreamStats& stream,
+ size_t* total_packets_sent,
+ size_t* total_sent,
+ size_t* padding_sent,
+ size_t* media_sent) const;
+
+ void ReportResult(absl::string_view measurement,
+ size_t value,
+ test::Unit unit,
+ test::ImprovementDirection improvement_direction) const;
+ void TriggerTestDone();
+
+ Clock* const clock_;
+ BuiltInNetworkBehaviorConfig forward_transport_config_;
+ const size_t num_video_streams_;
+ const size_t num_audio_streams_;
+ const size_t num_flexfec_streams_;
+ const bool rtx_;
+ const bool red_;
+ const bool report_perf_stats_;
+ Call* sender_call_;
+ VideoSendStream* send_stream_;
+ test::PacketTransport* send_transport_;
+ SimulatedNetworkInterface* send_simulated_network_;
+
+ private:
+ typedef std::map<uint32_t, uint32_t> SsrcMap;
+ class VideoStreamFactory;
+
+ void ModifySenderBitrateConfig(BitrateConstraints* bitrate_config) override;
+ void OnVideoStreamsCreated(VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStreamInterface*>&
+ receive_streams) override;
+ BuiltInNetworkBehaviorConfig GetSendTransportConfig() const override;
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStreamInterface::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override;
+ void ModifyAudioConfigs(AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStreamInterface::Config>*
+ receive_configs) override;
+ void ModifyFlexfecConfigs(
+ std::vector<FlexfecReceiveStream::Config>* receive_configs) override;
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override;
+ void OnTransportCreated(test::PacketTransport* to_receiver,
+ SimulatedNetworkInterface* sender_network,
+ test::PacketTransport* to_sender,
+ SimulatedNetworkInterface* receiver_network) override;
+
+ const int start_bitrate_bps_;
+ const int64_t min_run_time_ms_;
+ int expected_bitrate_bps_;
+ int64_t test_start_ms_;
+ int64_t ramp_up_finished_ms_;
+
+ std::vector<uint32_t> video_ssrcs_;
+ std::vector<uint32_t> video_rtx_ssrcs_;
+ std::vector<uint32_t> audio_ssrcs_;
+
+ protected:
+ TaskQueueBase* const task_queue_;
+ RepeatingTaskHandle pending_task_;
+};
+
+class RampUpDownUpTester : public RampUpTester {
+ public:
+ RampUpDownUpTester(size_t num_video_streams,
+ size_t num_audio_streams,
+ size_t num_flexfec_streams,
+ unsigned int start_bitrate_bps,
+ bool rtx,
+ bool red,
+ const std::vector<int>& loss_rates,
+ bool report_perf_stats,
+ TaskQueueBase* task_queue);
+ ~RampUpDownUpTester() override;
+
+ protected:
+ void PollStats() override;
+
+ private:
+ enum TestStates {
+ kFirstRampup = 0,
+ kLowRate,
+ kSecondRampup,
+ kTestEnd,
+ kTransitionToNextState,
+ };
+
+ void ModifyReceiverBitrateConfig(BitrateConstraints* bitrate_config) override;
+
+ std::string GetModifierString() const;
+ int GetExpectedHighBitrate() const;
+ int GetHighLinkCapacity() const;
+ size_t GetFecBytes() const;
+ bool ExpectingFec() const;
+ void EvolveTestState(int bitrate_bps, bool suspended);
+
+ const std::vector<int> link_rates_;
+ TestStates test_state_;
+ TestStates next_state_;
+ int64_t state_start_ms_;
+ int64_t interval_start_ms_;
+ int sent_bytes_;
+ std::vector<int> loss_rates_;
+};
+
+} // namespace webrtc
+#endif // CALL_RAMPUP_TESTS_H_
diff --git a/third_party/libwebrtc/call/receive_stream.h b/third_party/libwebrtc/call/receive_stream.h
new file mode 100644
index 0000000000..8a99059ec5
--- /dev/null
+++ b/third_party/libwebrtc/call/receive_stream.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RECEIVE_STREAM_H_
+#define CALL_RECEIVE_STREAM_H_
+
+#include <vector>
+
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/frame_transformer_interface.h"
+#include "api/media_types.h"
+#include "api/scoped_refptr.h"
+#include "api/transport/rtp/rtp_source.h"
+
+namespace webrtc {
+
+// Common base interface for MediaReceiveStreamInterface based classes and
+// FlexfecReceiveStream.
+class ReceiveStreamInterface {
+ public:
+ // Receive-stream specific RTP settings.
+ // TODO(tommi): This struct isn't needed at this level anymore. Move it closer
+ // to where it's used.
+ struct ReceiveStreamRtpConfig {
+ // Synchronization source (stream identifier) to be received.
+ // This member will not change mid-stream and can be assumed to be const
+ // post initialization.
+ uint32_t remote_ssrc = 0;
+
+ // Sender SSRC used for sending RTCP (such as receiver reports).
+ // This value may change mid-stream and must be done on the same thread
+ // that the value is read on (i.e. packet delivery).
+ uint32_t local_ssrc = 0;
+ };
+
+ protected:
+ virtual ~ReceiveStreamInterface() {}
+};
+
+// Either an audio or video receive stream.
+class MediaReceiveStreamInterface : public ReceiveStreamInterface {
+ public:
+ // Starts stream activity.
+ // When a stream is active, it can receive, process and deliver packets.
+ virtual void Start() = 0;
+
+ // Stops stream activity. Must be called to match with a previous call to
+ // `Start()`. When a stream has been stopped, it won't receive, decode,
+ // process or deliver packets to downstream objects such as callback pointers
+ // set in the config struct.
+ virtual void Stop() = 0;
+
+ virtual void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ frame_transformer) = 0;
+
+ virtual void SetFrameDecryptor(
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) = 0;
+
+ virtual std::vector<RtpSource> GetSources() const = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RECEIVE_STREAM_H_
diff --git a/third_party/libwebrtc/call/receive_stream_interface_gn/moz.build b/third_party/libwebrtc/call/receive_stream_interface_gn/moz.build
new file mode 100644
index 0000000000..92973e6d7b
--- /dev/null
+++ b/third_party/libwebrtc/call/receive_stream_interface_gn/moz.build
@@ -0,0 +1,220 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("receive_stream_interface_gn")
diff --git a/third_party/libwebrtc/call/receive_time_calculator.cc b/third_party/libwebrtc/call/receive_time_calculator.cc
new file mode 100644
index 0000000000..417168b15d
--- /dev/null
+++ b/third_party/libwebrtc/call/receive_time_calculator.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/receive_time_calculator.h"
+
+#include <memory>
+#include <string>
+#include <type_traits>
+
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+const char kBweReceiveTimeCorrection[] = "WebRTC-Bwe-ReceiveTimeFix";
+} // namespace
+
+ReceiveTimeCalculatorConfig::ReceiveTimeCalculatorConfig(
+ const FieldTrialsView& field_trials)
+ : max_packet_time_repair("maxrep", TimeDelta::Millis(2000)),
+ stall_threshold("stall", TimeDelta::Millis(5)),
+ tolerance("tol", TimeDelta::Millis(1)),
+ max_stall("maxstall", TimeDelta::Seconds(5)) {
+ std::string trial_string = field_trials.Lookup(kBweReceiveTimeCorrection);
+ ParseFieldTrial(
+ {&max_packet_time_repair, &stall_threshold, &tolerance, &max_stall},
+ trial_string);
+}
+ReceiveTimeCalculatorConfig::ReceiveTimeCalculatorConfig(
+ const ReceiveTimeCalculatorConfig&) = default;
+ReceiveTimeCalculatorConfig::~ReceiveTimeCalculatorConfig() = default;
+
+ReceiveTimeCalculator::ReceiveTimeCalculator(
+ const FieldTrialsView& field_trials)
+ : config_(field_trials) {}
+
+std::unique_ptr<ReceiveTimeCalculator>
+ReceiveTimeCalculator::CreateFromFieldTrial(
+ const FieldTrialsView& field_trials) {
+ if (!field_trials.IsEnabled(kBweReceiveTimeCorrection))
+ return nullptr;
+ return std::make_unique<ReceiveTimeCalculator>(field_trials);
+}
+
+int64_t ReceiveTimeCalculator::ReconcileReceiveTimes(int64_t packet_time_us,
+ int64_t system_time_us,
+ int64_t safe_time_us) {
+ int64_t stall_time_us = system_time_us - packet_time_us;
+ if (total_system_time_passed_us_ < config_.stall_threshold->us()) {
+ stall_time_us = rtc::SafeMin(stall_time_us, config_.max_stall->us());
+ }
+ int64_t corrected_time_us = safe_time_us - stall_time_us;
+
+ if (last_packet_time_us_ == -1 && stall_time_us < 0) {
+ static_clock_offset_us_ = stall_time_us;
+ corrected_time_us += static_clock_offset_us_;
+ } else if (last_packet_time_us_ > 0) {
+ // All repairs depend on variables being intialized
+ int64_t packet_time_delta_us = packet_time_us - last_packet_time_us_;
+ int64_t system_time_delta_us = system_time_us - last_system_time_us_;
+ int64_t safe_time_delta_us = safe_time_us - last_safe_time_us_;
+
+ // Repair backwards clock resets during initial stall. In this case, the
+ // reset is observed only in packet time but never in system time.
+ if (system_time_delta_us < 0)
+ total_system_time_passed_us_ += config_.stall_threshold->us();
+ else
+ total_system_time_passed_us_ += system_time_delta_us;
+ if (packet_time_delta_us < 0 &&
+ total_system_time_passed_us_ < config_.stall_threshold->us()) {
+ static_clock_offset_us_ -= packet_time_delta_us;
+ }
+ corrected_time_us += static_clock_offset_us_;
+
+ // Detect resets inbetween clock readings in socket and app.
+ bool forward_clock_reset =
+ corrected_time_us + config_.tolerance->us() < last_corrected_time_us_;
+ bool obvious_backward_clock_reset = system_time_us < packet_time_us;
+
+ // Harder case with backward clock reset during stall, the reset being
+ // smaller than the stall. Compensate throughout the stall.
+ bool small_backward_clock_reset =
+ !obvious_backward_clock_reset &&
+ safe_time_delta_us > system_time_delta_us + config_.tolerance->us();
+ bool stall_start =
+ packet_time_delta_us >= 0 &&
+ system_time_delta_us > packet_time_delta_us + config_.tolerance->us();
+ bool stall_is_over = safe_time_delta_us > config_.stall_threshold->us();
+ bool packet_time_caught_up =
+ packet_time_delta_us < 0 && system_time_delta_us >= 0;
+ if (stall_start && small_backward_clock_reset)
+ small_reset_during_stall_ = true;
+ else if (stall_is_over || packet_time_caught_up)
+ small_reset_during_stall_ = false;
+
+ // If resets are detected, advance time by (capped) packet time increase.
+ if (forward_clock_reset || obvious_backward_clock_reset ||
+ small_reset_during_stall_) {
+ corrected_time_us = last_corrected_time_us_ +
+ rtc::SafeClamp(packet_time_delta_us, 0,
+ config_.max_packet_time_repair->us());
+ }
+ }
+
+ last_corrected_time_us_ = corrected_time_us;
+ last_packet_time_us_ = packet_time_us;
+ last_system_time_us_ = system_time_us;
+ last_safe_time_us_ = safe_time_us;
+ return corrected_time_us;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/receive_time_calculator.h b/third_party/libwebrtc/call/receive_time_calculator.h
new file mode 100644
index 0000000000..57ba331844
--- /dev/null
+++ b/third_party/libwebrtc/call/receive_time_calculator.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_RECEIVE_TIME_CALCULATOR_H_
+#define CALL_RECEIVE_TIME_CALCULATOR_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "api/field_trials_view.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+
+namespace webrtc {
+
+struct ReceiveTimeCalculatorConfig {
+ explicit ReceiveTimeCalculatorConfig(const FieldTrialsView& field_trials);
+ ReceiveTimeCalculatorConfig(const ReceiveTimeCalculatorConfig&);
+ ReceiveTimeCalculatorConfig& operator=(const ReceiveTimeCalculatorConfig&) =
+ default;
+ ~ReceiveTimeCalculatorConfig();
+ FieldTrialParameter<TimeDelta> max_packet_time_repair;
+ FieldTrialParameter<TimeDelta> stall_threshold;
+ FieldTrialParameter<TimeDelta> tolerance;
+ FieldTrialParameter<TimeDelta> max_stall;
+};
+
+// The receive time calculator serves the purpose of combining packet time
+// stamps with a safely incremental clock. This assumes that the packet time
+// stamps are based on lower layer timestamps that have more accurate time
+// increments since they are based on the exact receive time. They might
+// however, have large jumps due to clock resets in the system. To compensate
+// this they are combined with a safe clock source that is guaranteed to be
+// consistent, but it will not be able to measure the exact time when a packet
+// is received.
+class ReceiveTimeCalculator {
+ public:
+ static std::unique_ptr<ReceiveTimeCalculator> CreateFromFieldTrial(
+ const FieldTrialsView& field_trials);
+ explicit ReceiveTimeCalculator(const FieldTrialsView& field_trials);
+ int64_t ReconcileReceiveTimes(int64_t packet_time_us_,
+ int64_t system_time_us_,
+ int64_t safe_time_us_);
+
+ private:
+ int64_t last_corrected_time_us_ = -1;
+ int64_t last_packet_time_us_ = -1;
+ int64_t last_system_time_us_ = -1;
+ int64_t last_safe_time_us_ = -1;
+ int64_t total_system_time_passed_us_ = 0;
+ int64_t static_clock_offset_us_ = 0;
+ int64_t small_reset_during_stall_ = false;
+ ReceiveTimeCalculatorConfig config_;
+};
+} // namespace webrtc
+#endif // CALL_RECEIVE_TIME_CALCULATOR_H_
diff --git a/third_party/libwebrtc/call/receive_time_calculator_unittest.cc b/third_party/libwebrtc/call/receive_time_calculator_unittest.cc
new file mode 100644
index 0000000000..f2e3d54f0c
--- /dev/null
+++ b/third_party/libwebrtc/call/receive_time_calculator_unittest.cc
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/receive_time_calculator.h"
+
+#include <stdlib.h>
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "rtc_base/random.h"
+#include "rtc_base/time_utils.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+class EmulatedClock {
+ public:
+ explicit EmulatedClock(int seed, float drift = 0.0f)
+ : random_(seed), clock_us_(random_.Rand<uint32_t>()), drift_(drift) {}
+ virtual ~EmulatedClock() = default;
+ int64_t GetClockUs() const { return clock_us_; }
+
+ protected:
+ int64_t UpdateClock(int64_t time_us) {
+ if (!last_query_us_)
+ last_query_us_ = time_us;
+ int64_t skip_us = time_us - *last_query_us_;
+ accumulated_drift_us_ += skip_us * drift_;
+ int64_t drift_correction_us = static_cast<int64_t>(accumulated_drift_us_);
+ accumulated_drift_us_ -= drift_correction_us;
+ clock_us_ += skip_us + drift_correction_us;
+ last_query_us_ = time_us;
+ return skip_us;
+ }
+ Random random_;
+
+ private:
+ int64_t clock_us_;
+ absl::optional<int64_t> last_query_us_;
+ float drift_;
+ float accumulated_drift_us_ = 0;
+};
+
+class EmulatedMonotoneousClock : public EmulatedClock {
+ public:
+ explicit EmulatedMonotoneousClock(int seed) : EmulatedClock(seed) {}
+ ~EmulatedMonotoneousClock() = default;
+
+ int64_t Query(int64_t time_us) {
+ int64_t skip_us = UpdateClock(time_us);
+
+ // In a stall
+ if (stall_recovery_time_us_ > 0) {
+ if (GetClockUs() > stall_recovery_time_us_) {
+ stall_recovery_time_us_ = 0;
+ return GetClockUs();
+ } else {
+ return stall_recovery_time_us_;
+ }
+ }
+
+ // Check if we enter a stall
+ for (int k = 0; k < skip_us; ++k) {
+ if (random_.Rand<double>() < kChanceOfStallPerUs) {
+ int64_t stall_duration_us =
+ static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs);
+ stall_recovery_time_us_ = GetClockUs() + stall_duration_us;
+ return stall_recovery_time_us_;
+ }
+ }
+ return GetClockUs();
+ }
+
+ void ForceStallUs() {
+ int64_t stall_duration_us =
+ static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs);
+ stall_recovery_time_us_ = GetClockUs() + stall_duration_us;
+ }
+
+ bool Stalled() const { return stall_recovery_time_us_ > 0; }
+
+ int64_t GetRemainingStall(int64_t time_us) const {
+ return stall_recovery_time_us_ > 0 ? stall_recovery_time_us_ - GetClockUs()
+ : 0;
+ }
+
+ const int64_t kMaxStallDurationUs = rtc::kNumMicrosecsPerSec;
+
+ private:
+ const float kChanceOfStallPerUs = 5e-6f;
+ int64_t stall_recovery_time_us_ = 0;
+};
+
+class EmulatedNonMonotoneousClock : public EmulatedClock {
+ public:
+ EmulatedNonMonotoneousClock(int seed, int64_t duration_us, float drift = 0)
+ : EmulatedClock(seed, drift) {
+ Pregenerate(duration_us);
+ }
+ ~EmulatedNonMonotoneousClock() = default;
+
+ void Pregenerate(int64_t duration_us) {
+ int64_t time_since_reset_us = kMinTimeBetweenResetsUs;
+ int64_t clock_offset_us = 0;
+ for (int64_t time_us = 0; time_us < duration_us; time_us += kResolutionUs) {
+ int64_t skip_us = UpdateClock(time_us);
+ time_since_reset_us += skip_us;
+ int64_t reset_us = 0;
+ if (time_since_reset_us >= kMinTimeBetweenResetsUs) {
+ for (int k = 0; k < skip_us; ++k) {
+ if (random_.Rand<double>() < kChanceOfResetPerUs) {
+ reset_us = static_cast<int64_t>(2 * random_.Rand<float>() *
+ kMaxAbsResetUs) -
+ kMaxAbsResetUs;
+ clock_offset_us += reset_us;
+ time_since_reset_us = 0;
+ break;
+ }
+ }
+ }
+ pregenerated_clock_.emplace_back(GetClockUs() + clock_offset_us);
+ resets_us_.emplace_back(reset_us);
+ }
+ }
+
+ int64_t Query(int64_t time_us) {
+ size_t ixStart =
+ (last_reset_query_time_us_ + (kResolutionUs >> 1)) / kResolutionUs + 1;
+ size_t ixEnd = (time_us + (kResolutionUs >> 1)) / kResolutionUs;
+ if (ixEnd >= pregenerated_clock_.size())
+ return -1;
+ last_reset_size_us_ = 0;
+ for (size_t ix = ixStart; ix <= ixEnd; ++ix) {
+ if (resets_us_[ix] != 0) {
+ last_reset_size_us_ = resets_us_[ix];
+ }
+ }
+ last_reset_query_time_us_ = time_us;
+ return pregenerated_clock_[ixEnd];
+ }
+
+ bool WasReset() const { return last_reset_size_us_ != 0; }
+ bool WasNegativeReset() const { return last_reset_size_us_ < 0; }
+ int64_t GetLastResetUs() const { return last_reset_size_us_; }
+
+ private:
+ const float kChanceOfResetPerUs = 1e-6f;
+ const int64_t kMaxAbsResetUs = rtc::kNumMicrosecsPerSec;
+ const int64_t kMinTimeBetweenResetsUs = 3 * rtc::kNumMicrosecsPerSec;
+ const int64_t kResolutionUs = rtc::kNumMicrosecsPerMillisec;
+ int64_t last_reset_query_time_us_ = 0;
+ int64_t last_reset_size_us_ = 0;
+ std::vector<int64_t> pregenerated_clock_;
+ std::vector<int64_t> resets_us_;
+};
+
+TEST(ClockRepair, NoClockDrift) {
+ webrtc::test::ScopedKeyValueConfig field_trials;
+ const int kSeeds = 10;
+ const int kFirstSeed = 1;
+ const int64_t kRuntimeUs = 10 * rtc::kNumMicrosecsPerSec;
+ const float kDrift = 0.0f;
+ const int64_t kMaxPacketInterarrivalUs = 50 * rtc::kNumMicrosecsPerMillisec;
+ for (int seed = kFirstSeed; seed < kSeeds + kFirstSeed; ++seed) {
+ EmulatedMonotoneousClock monotone_clock(seed);
+ EmulatedNonMonotoneousClock non_monotone_clock(
+ seed + 1, kRuntimeUs + rtc::kNumMicrosecsPerSec, kDrift);
+ ReceiveTimeCalculator reception_time_tracker(field_trials);
+ int64_t corrected_clock_0 = 0;
+ int64_t reset_during_stall_tol_us = 0;
+ bool initial_clock_stall = true;
+ int64_t accumulated_upper_bound_tolerance_us = 0;
+ int64_t accumulated_lower_bound_tolerance_us = 0;
+ Random random(1);
+ monotone_clock.ForceStallUs();
+ int64_t last_time_us = 0;
+ bool add_tolerance_on_next_packet = false;
+ int64_t monotone_noise_us = 1000;
+
+ for (int64_t time_us = 0; time_us < kRuntimeUs;
+ time_us += static_cast<int64_t>(random.Rand<float>() *
+ kMaxPacketInterarrivalUs)) {
+ int64_t socket_time_us = non_monotone_clock.Query(time_us);
+ int64_t monotone_us = monotone_clock.Query(time_us) +
+ 2 * random.Rand<float>() * monotone_noise_us -
+ monotone_noise_us;
+ int64_t system_time_us = non_monotone_clock.Query(
+ time_us + monotone_clock.GetRemainingStall(time_us));
+
+ int64_t corrected_clock_us = reception_time_tracker.ReconcileReceiveTimes(
+ socket_time_us, system_time_us, monotone_us);
+ if (time_us == 0)
+ corrected_clock_0 = corrected_clock_us;
+
+ if (add_tolerance_on_next_packet)
+ accumulated_lower_bound_tolerance_us -= (time_us - last_time_us);
+
+ // Perfect repair cannot be achiveved if non-monotone clock resets during
+ // a monotone clock stall.
+ add_tolerance_on_next_packet = false;
+ if (monotone_clock.Stalled() && non_monotone_clock.WasReset()) {
+ reset_during_stall_tol_us =
+ std::max(reset_during_stall_tol_us, time_us - last_time_us);
+ if (non_monotone_clock.WasNegativeReset()) {
+ add_tolerance_on_next_packet = true;
+ }
+ if (initial_clock_stall && !non_monotone_clock.WasNegativeReset()) {
+ // Positive resets during an initial clock stall cannot be repaired
+ // and error will propagate through rest of trace.
+ accumulated_upper_bound_tolerance_us +=
+ std::abs(non_monotone_clock.GetLastResetUs());
+ }
+ } else {
+ reset_during_stall_tol_us = 0;
+ initial_clock_stall = false;
+ }
+ int64_t err = corrected_clock_us - corrected_clock_0 - time_us;
+
+ // Resets during stalls may lead to small errors temporarily.
+ int64_t lower_tol_us = accumulated_lower_bound_tolerance_us -
+ reset_during_stall_tol_us - monotone_noise_us -
+ 2 * rtc::kNumMicrosecsPerMillisec;
+ EXPECT_GE(err, lower_tol_us);
+ int64_t upper_tol_us = accumulated_upper_bound_tolerance_us +
+ monotone_noise_us +
+ 2 * rtc::kNumMicrosecsPerMillisec;
+ EXPECT_LE(err, upper_tol_us);
+
+ last_time_us = time_us;
+ }
+ }
+}
+} // namespace
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_bitrate_configurator.cc b/third_party/libwebrtc/call/rtp_bitrate_configurator.cc
new file mode 100644
index 0000000000..264dcdcb81
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_bitrate_configurator.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_bitrate_configurator.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace {
+
+// Returns its smallest positive argument. If neither argument is positive,
+// returns an arbitrary nonpositive value.
+int MinPositive(int a, int b) {
+ if (a <= 0) {
+ return b;
+ }
+ if (b <= 0) {
+ return a;
+ }
+ return std::min(a, b);
+}
+
+} // namespace
+
+namespace webrtc {
+RtpBitrateConfigurator::RtpBitrateConfigurator(
+ const BitrateConstraints& bitrate_config)
+ : bitrate_config_(bitrate_config), base_bitrate_config_(bitrate_config) {
+ RTC_DCHECK_GE(bitrate_config.min_bitrate_bps, 0);
+ RTC_DCHECK_GE(bitrate_config.start_bitrate_bps,
+ bitrate_config.min_bitrate_bps);
+ if (bitrate_config.max_bitrate_bps != -1) {
+ RTC_DCHECK_GE(bitrate_config.max_bitrate_bps,
+ bitrate_config.start_bitrate_bps);
+ }
+}
+
+RtpBitrateConfigurator::~RtpBitrateConfigurator() = default;
+
+BitrateConstraints RtpBitrateConfigurator::GetConfig() const {
+ return bitrate_config_;
+}
+
+absl::optional<BitrateConstraints>
+RtpBitrateConfigurator::UpdateWithSdpParameters(
+ const BitrateConstraints& bitrate_config) {
+ RTC_DCHECK_GE(bitrate_config.min_bitrate_bps, 0);
+ RTC_DCHECK_NE(bitrate_config.start_bitrate_bps, 0);
+ if (bitrate_config.max_bitrate_bps != -1) {
+ RTC_DCHECK_GT(bitrate_config.max_bitrate_bps, 0);
+ }
+
+ absl::optional<int> new_start;
+ // Only update the "start" bitrate if it's set, and different from the old
+ // value. In practice, this value comes from the x-google-start-bitrate codec
+ // parameter in SDP, and setting the same remote description twice shouldn't
+ // restart bandwidth estimation.
+ if (bitrate_config.start_bitrate_bps != -1 &&
+ bitrate_config.start_bitrate_bps !=
+ base_bitrate_config_.start_bitrate_bps) {
+ new_start.emplace(bitrate_config.start_bitrate_bps);
+ }
+ base_bitrate_config_ = bitrate_config;
+ return UpdateConstraints(new_start);
+}
+
+absl::optional<BitrateConstraints>
+RtpBitrateConfigurator::UpdateWithClientPreferences(
+ const BitrateSettings& bitrate_mask) {
+ bitrate_config_mask_ = bitrate_mask;
+ return UpdateConstraints(bitrate_mask.start_bitrate_bps);
+}
+
+// Relay cap can change only max bitrate.
+absl::optional<BitrateConstraints> RtpBitrateConfigurator::UpdateWithRelayCap(
+ DataRate cap) {
+ if (cap.IsFinite()) {
+ RTC_DCHECK(!cap.IsZero());
+ }
+ max_bitrate_over_relay_ = cap;
+ return UpdateConstraints(absl::nullopt);
+}
+
+absl::optional<BitrateConstraints> RtpBitrateConfigurator::UpdateConstraints(
+ const absl::optional<int>& new_start) {
+ BitrateConstraints updated;
+ updated.min_bitrate_bps =
+ std::max(bitrate_config_mask_.min_bitrate_bps.value_or(0),
+ base_bitrate_config_.min_bitrate_bps);
+
+ updated.max_bitrate_bps =
+ MinPositive(bitrate_config_mask_.max_bitrate_bps.value_or(-1),
+ base_bitrate_config_.max_bitrate_bps);
+ updated.max_bitrate_bps =
+ MinPositive(updated.max_bitrate_bps, max_bitrate_over_relay_.bps_or(-1));
+
+ // If the combined min ends up greater than the combined max, the max takes
+ // priority.
+ if (updated.max_bitrate_bps != -1 &&
+ updated.min_bitrate_bps > updated.max_bitrate_bps) {
+ updated.min_bitrate_bps = updated.max_bitrate_bps;
+ }
+
+ // If there is nothing to update (min/max unchanged, no new bandwidth
+ // estimation start value), return early.
+ if (updated.min_bitrate_bps == bitrate_config_.min_bitrate_bps &&
+ updated.max_bitrate_bps == bitrate_config_.max_bitrate_bps &&
+ !new_start) {
+ return absl::nullopt;
+ }
+
+ if (new_start) {
+ // Clamp start by min and max.
+ updated.start_bitrate_bps = MinPositive(
+ std::max(*new_start, updated.min_bitrate_bps), updated.max_bitrate_bps);
+ } else {
+ updated.start_bitrate_bps = -1;
+ }
+ BitrateConstraints config_to_return = updated;
+ if (!new_start) {
+ updated.start_bitrate_bps = bitrate_config_.start_bitrate_bps;
+ }
+ bitrate_config_ = updated;
+ return config_to_return;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_bitrate_configurator.h b/third_party/libwebrtc/call/rtp_bitrate_configurator.h
new file mode 100644
index 0000000000..5cb779a3b3
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_bitrate_configurator.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_BITRATE_CONFIGURATOR_H_
+#define CALL_RTP_BITRATE_CONFIGURATOR_H_
+
+#include "absl/types/optional.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/units/data_rate.h"
+
+namespace webrtc {
+
+// RtpBitrateConfigurator calculates the bitrate configuration based on received
+// remote configuration combined with local overrides.
+class RtpBitrateConfigurator {
+ public:
+ explicit RtpBitrateConfigurator(const BitrateConstraints& bitrate_config);
+ ~RtpBitrateConfigurator();
+
+ RtpBitrateConfigurator(const RtpBitrateConfigurator&) = delete;
+ RtpBitrateConfigurator& operator=(const RtpBitrateConfigurator&) = delete;
+
+ BitrateConstraints GetConfig() const;
+
+ // The greater min and smaller max set by this and SetClientBitratePreferences
+ // will be used. The latest non-negative start value from either call will be
+ // used. Specifying a start bitrate (>0) will reset the current bitrate
+ // estimate. This is due to how the 'x-google-start-bitrate' flag is currently
+ // implemented. Passing -1 leaves the start bitrate unchanged. Behavior is not
+ // guaranteed for other negative values or 0.
+ // The optional return value is set with new configuration if it was updated.
+ absl::optional<BitrateConstraints> UpdateWithSdpParameters(
+ const BitrateConstraints& bitrate_config_);
+
+ // The greater min and smaller max set by this and SetSdpBitrateParameters
+ // will be used. The latest non-negative start value form either call will be
+ // used. Specifying a start bitrate will reset the current bitrate estimate.
+ // Assumes 0 <= min <= start <= max holds for set parameters.
+ // Update the bitrate configuration
+ // The optional return value is set with new configuration if it was updated.
+ absl::optional<BitrateConstraints> UpdateWithClientPreferences(
+ const BitrateSettings& bitrate_mask);
+
+ // Apply a cap for relayed calls.
+ absl::optional<BitrateConstraints> UpdateWithRelayCap(DataRate cap);
+
+ private:
+ // Applies update to the BitrateConstraints cached in `config_`, resetting
+ // with `new_start` if set.
+ absl::optional<BitrateConstraints> UpdateConstraints(
+ const absl::optional<int>& new_start);
+
+ // Bitrate config used until valid bitrate estimates are calculated. Also
+ // used to cap total bitrate used. This comes from the remote connection.
+ BitrateConstraints bitrate_config_;
+
+ // The config mask set by SetClientBitratePreferences.
+ // 0 <= min <= start <= max
+ BitrateSettings bitrate_config_mask_;
+
+ // The config set by SetSdpBitrateParameters.
+ // min >= 0, start != 0, max == -1 || max > 0
+ BitrateConstraints base_bitrate_config_;
+
+ // Bandwidth cap applied for relayed calls.
+ DataRate max_bitrate_over_relay_ = DataRate::PlusInfinity();
+};
+} // namespace webrtc
+
+#endif // CALL_RTP_BITRATE_CONFIGURATOR_H_
diff --git a/third_party/libwebrtc/call/rtp_bitrate_configurator_unittest.cc b/third_party/libwebrtc/call/rtp_bitrate_configurator_unittest.cc
new file mode 100644
index 0000000000..6449a1a0f5
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_bitrate_configurator_unittest.cc
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "call/rtp_bitrate_configurator.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+using absl::nullopt;
+
+class RtpBitrateConfiguratorTest : public ::testing::Test {
+ public:
+ RtpBitrateConfiguratorTest()
+ : configurator_(new RtpBitrateConfigurator(BitrateConstraints())) {}
+ std::unique_ptr<RtpBitrateConfigurator> configurator_;
+ void UpdateConfigMatches(BitrateConstraints bitrate_config,
+ absl::optional<int> min_bitrate_bps,
+ absl::optional<int> start_bitrate_bps,
+ absl::optional<int> max_bitrate_bps) {
+ absl::optional<BitrateConstraints> result =
+ configurator_->UpdateWithSdpParameters(bitrate_config);
+ EXPECT_TRUE(result.has_value());
+ if (start_bitrate_bps.has_value())
+ EXPECT_EQ(result->start_bitrate_bps, start_bitrate_bps);
+ if (min_bitrate_bps.has_value())
+ EXPECT_EQ(result->min_bitrate_bps, min_bitrate_bps);
+ if (max_bitrate_bps.has_value())
+ EXPECT_EQ(result->max_bitrate_bps, max_bitrate_bps);
+ }
+
+ void UpdateMaskMatches(BitrateSettings bitrate_mask,
+ absl::optional<int> min_bitrate_bps,
+ absl::optional<int> start_bitrate_bps,
+ absl::optional<int> max_bitrate_bps) {
+ absl::optional<BitrateConstraints> result =
+ configurator_->UpdateWithClientPreferences(bitrate_mask);
+ EXPECT_TRUE(result.has_value());
+ if (start_bitrate_bps.has_value())
+ EXPECT_EQ(result->start_bitrate_bps, start_bitrate_bps);
+ if (min_bitrate_bps.has_value())
+ EXPECT_EQ(result->min_bitrate_bps, min_bitrate_bps);
+ if (max_bitrate_bps.has_value())
+ EXPECT_EQ(result->max_bitrate_bps, max_bitrate_bps);
+ }
+};
+
+TEST_F(RtpBitrateConfiguratorTest, NewConfigWithValidConfigReturnsNewConfig) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 1;
+ bitrate_config.start_bitrate_bps = 2;
+ bitrate_config.max_bitrate_bps = 3;
+
+ UpdateConfigMatches(bitrate_config, 1, 2, 3);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, NewConfigWithDifferentMinReturnsNewConfig) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 10;
+ bitrate_config.start_bitrate_bps = 20;
+ bitrate_config.max_bitrate_bps = 30;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ bitrate_config.min_bitrate_bps = 11;
+ UpdateConfigMatches(bitrate_config, 11, -1, 30);
+}
+
+TEST_F(RtpBitrateConfiguratorTest,
+ NewConfigWithDifferentStartReturnsNewConfig) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 10;
+ bitrate_config.start_bitrate_bps = 20;
+ bitrate_config.max_bitrate_bps = 30;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ bitrate_config.start_bitrate_bps = 21;
+ UpdateConfigMatches(bitrate_config, 10, 21, 30);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, NewConfigWithDifferentMaxReturnsNewConfig) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 10;
+ bitrate_config.start_bitrate_bps = 20;
+ bitrate_config.max_bitrate_bps = 30;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ bitrate_config.max_bitrate_bps = 31;
+ UpdateConfigMatches(bitrate_config, 10, -1, 31);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, NewConfigWithSameConfigElidesSecondCall) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 1;
+ bitrate_config.start_bitrate_bps = 2;
+ bitrate_config.max_bitrate_bps = 3;
+
+ UpdateConfigMatches(bitrate_config, 1, 2, 3);
+ EXPECT_FALSE(
+ configurator_->UpdateWithSdpParameters(bitrate_config).has_value());
+}
+
+TEST_F(RtpBitrateConfiguratorTest,
+ NewConfigWithSameMinMaxAndNegativeStartElidesSecondCall) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 1;
+ bitrate_config.start_bitrate_bps = 2;
+ bitrate_config.max_bitrate_bps = 3;
+
+ UpdateConfigMatches(bitrate_config, 1, 2, 3);
+
+ bitrate_config.start_bitrate_bps = -1;
+ EXPECT_FALSE(
+ configurator_->UpdateWithSdpParameters(bitrate_config).has_value());
+}
+
+TEST_F(RtpBitrateConfiguratorTest, BiggerMaskMinUsed) {
+ BitrateSettings mask;
+ mask.min_bitrate_bps = 1234;
+ UpdateMaskMatches(mask, *mask.min_bitrate_bps, nullopt, nullopt);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, BiggerConfigMinUsed) {
+ BitrateSettings mask;
+ mask.min_bitrate_bps = 1000;
+ UpdateMaskMatches(mask, 1000, nullopt, nullopt);
+
+ BitrateConstraints config;
+ config.min_bitrate_bps = 1234;
+ UpdateConfigMatches(config, 1234, nullopt, nullopt);
+}
+
+// The last call to set start should be used.
+TEST_F(RtpBitrateConfiguratorTest, LatestStartMaskPreferred) {
+ BitrateSettings mask;
+ mask.start_bitrate_bps = 1300;
+ UpdateMaskMatches(mask, nullopt, *mask.start_bitrate_bps, nullopt);
+
+ BitrateConstraints bitrate_config;
+ bitrate_config.start_bitrate_bps = 1200;
+
+ UpdateConfigMatches(bitrate_config, nullopt, bitrate_config.start_bitrate_bps,
+ nullopt);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, SmallerMaskMaxUsed) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.max_bitrate_bps = bitrate_config.start_bitrate_bps + 2000;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ BitrateSettings mask;
+ mask.max_bitrate_bps = bitrate_config.start_bitrate_bps + 1000;
+
+ UpdateMaskMatches(mask, nullopt, nullopt, *mask.max_bitrate_bps);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, SmallerConfigMaxUsed) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.max_bitrate_bps = bitrate_config.start_bitrate_bps + 1000;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ BitrateSettings mask;
+ mask.max_bitrate_bps = bitrate_config.start_bitrate_bps + 2000;
+
+ // Expect no return because nothing changes
+ EXPECT_FALSE(configurator_->UpdateWithClientPreferences(mask).has_value());
+}
+
+TEST_F(RtpBitrateConfiguratorTest, MaskStartLessThanConfigMinClamped) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 2000;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ BitrateSettings mask;
+ mask.start_bitrate_bps = 1000;
+ UpdateMaskMatches(mask, 2000, 2000, nullopt);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, MaskStartGreaterThanConfigMaxClamped) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.start_bitrate_bps = 2000;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ BitrateSettings mask;
+ mask.max_bitrate_bps = 1000;
+
+ UpdateMaskMatches(mask, nullopt, -1, 1000);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, MaskMinGreaterThanConfigMaxClamped) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 2000;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ BitrateSettings mask;
+ mask.max_bitrate_bps = 1000;
+
+ UpdateMaskMatches(mask, 1000, nullopt, 1000);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, SettingMaskStartForcesUpdate) {
+ BitrateSettings mask;
+ mask.start_bitrate_bps = 1000;
+
+ // Config should be returned twice with the same params since
+ // start_bitrate_bps is set.
+ UpdateMaskMatches(mask, nullopt, 1000, nullopt);
+ UpdateMaskMatches(mask, nullopt, 1000, nullopt);
+}
+
+TEST_F(RtpBitrateConfiguratorTest, NewConfigWithNoChangesDoesNotCallNewConfig) {
+ BitrateConstraints config1;
+ config1.min_bitrate_bps = 0;
+ config1.start_bitrate_bps = 1000;
+ config1.max_bitrate_bps = -1;
+
+ BitrateConstraints config2;
+ config2.min_bitrate_bps = 0;
+ config2.start_bitrate_bps = -1;
+ config2.max_bitrate_bps = -1;
+
+ // The second call should not return anything because it doesn't
+ // change any values.
+ UpdateConfigMatches(config1, 0, 1000, -1);
+ EXPECT_FALSE(configurator_->UpdateWithSdpParameters(config2).has_value());
+}
+
+// If config changes the max, but not the effective max,
+// new config shouldn't be returned, to avoid unnecessary encoder
+// reconfigurations.
+TEST_F(RtpBitrateConfiguratorTest,
+ NewConfigNotReturnedWhenEffectiveMaxUnchanged) {
+ BitrateConstraints config;
+ config.min_bitrate_bps = 0;
+ config.start_bitrate_bps = -1;
+ config.max_bitrate_bps = 2000;
+ UpdateConfigMatches(config, nullopt, nullopt, 2000);
+
+ // Reduce effective max to 1000 with the mask.
+ BitrateSettings mask;
+ mask.max_bitrate_bps = 1000;
+ UpdateMaskMatches(mask, nullopt, nullopt, 1000);
+
+ // This leaves the effective max unchanged, so new config shouldn't be
+ // returned again.
+ config.max_bitrate_bps = 1000;
+ EXPECT_FALSE(configurator_->UpdateWithSdpParameters(config).has_value());
+}
+
+// When the "start bitrate" mask is removed, new config shouldn't be returned
+// again, since nothing's changing.
+TEST_F(RtpBitrateConfiguratorTest, NewConfigNotReturnedWhenStartMaskRemoved) {
+ BitrateSettings mask;
+ mask.start_bitrate_bps = 1000;
+ UpdateMaskMatches(mask, 0, 1000, -1);
+
+ mask.start_bitrate_bps.reset();
+ EXPECT_FALSE(configurator_->UpdateWithClientPreferences(mask).has_value());
+}
+
+// Test that if a new config is returned after BitrateSettings applies a
+// "start" value, the new config won't return that start value a
+// second time.
+TEST_F(RtpBitrateConfiguratorTest, NewConfigAfterBitrateConfigMaskWithStart) {
+ BitrateSettings mask;
+ mask.start_bitrate_bps = 1000;
+ UpdateMaskMatches(mask, 0, 1000, -1);
+
+ BitrateConstraints config;
+ config.min_bitrate_bps = 0;
+ config.start_bitrate_bps = -1;
+ config.max_bitrate_bps = 5000;
+ // The start value isn't changing, so new config should be returned with
+ // -1.
+ UpdateConfigMatches(config, 0, -1, 5000);
+}
+
+TEST_F(RtpBitrateConfiguratorTest,
+ NewConfigNotReturnedWhenClampedMinUnchanged) {
+ BitrateConstraints bitrate_config;
+ bitrate_config.start_bitrate_bps = 500;
+ bitrate_config.max_bitrate_bps = 1000;
+ configurator_.reset(new RtpBitrateConfigurator(bitrate_config));
+
+ // Set min to 2000; it is clamped to the max (1000).
+ BitrateSettings mask;
+ mask.min_bitrate_bps = 2000;
+ UpdateMaskMatches(mask, 1000, -1, 1000);
+
+ // Set min to 3000; the clamped value stays the same so nothing happens.
+ mask.min_bitrate_bps = 3000;
+ EXPECT_FALSE(configurator_->UpdateWithClientPreferences(mask).has_value());
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_config.cc b/third_party/libwebrtc/call/rtp_config.cc
new file mode 100644
index 0000000000..5457a94696
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_config.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_config.h"
+
+#include <cstdint>
+
+#include "absl/algorithm/container.h"
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+namespace {
+
+uint32_t FindAssociatedSsrc(uint32_t ssrc,
+ const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& associated_ssrcs) {
+ RTC_DCHECK_EQ(ssrcs.size(), associated_ssrcs.size());
+ for (size_t i = 0; i < ssrcs.size(); ++i) {
+ if (ssrcs[i] == ssrc)
+ return associated_ssrcs[i];
+ }
+ RTC_DCHECK_NOTREACHED();
+ return 0;
+}
+
+} // namespace
+
+std::string LntfConfig::ToString() const {
+ return enabled ? "{enabled: true}" : "{enabled: false}";
+}
+
+std::string NackConfig::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{rtp_history_ms: " << rtp_history_ms;
+ ss << '}';
+ return ss.str();
+}
+
+std::string UlpfecConfig::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{ulpfec_payload_type: " << ulpfec_payload_type;
+ ss << ", red_payload_type: " << red_payload_type;
+ ss << ", red_rtx_payload_type: " << red_rtx_payload_type;
+ ss << '}';
+ return ss.str();
+}
+
+bool UlpfecConfig::operator==(const UlpfecConfig& other) const {
+ return ulpfec_payload_type == other.ulpfec_payload_type &&
+ red_payload_type == other.red_payload_type &&
+ red_rtx_payload_type == other.red_rtx_payload_type;
+}
+
+RtpConfig::RtpConfig() = default;
+RtpConfig::RtpConfig(const RtpConfig&) = default;
+RtpConfig::~RtpConfig() = default;
+
+RtpConfig::Flexfec::Flexfec() = default;
+RtpConfig::Flexfec::Flexfec(const Flexfec&) = default;
+RtpConfig::Flexfec::~Flexfec() = default;
+
+std::string RtpConfig::ToString() const {
+ char buf[2 * 1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{ssrcs: [";
+ for (size_t i = 0; i < ssrcs.size(); ++i) {
+ ss << ssrcs[i];
+ if (i != ssrcs.size() - 1)
+ ss << ", ";
+ }
+ ss << "], rids: [";
+ for (size_t i = 0; i < rids.size(); ++i) {
+ ss << rids[i];
+ if (i != rids.size() - 1)
+ ss << ", ";
+ }
+ ss << "], mid: '" << mid << "'";
+ ss << ", rtcp_mode: "
+ << (rtcp_mode == RtcpMode::kCompound ? "RtcpMode::kCompound"
+ : "RtcpMode::kReducedSize");
+ ss << ", max_packet_size: " << max_packet_size;
+ ss << ", extmap-allow-mixed: " << (extmap_allow_mixed ? "true" : "false");
+ ss << ", extensions: [";
+ for (size_t i = 0; i < extensions.size(); ++i) {
+ ss << extensions[i].ToString();
+ if (i != extensions.size() - 1)
+ ss << ", ";
+ }
+ ss << ']';
+
+ ss << ", lntf: " << lntf.ToString();
+ ss << ", nack: {rtp_history_ms: " << nack.rtp_history_ms << '}';
+ ss << ", ulpfec: " << ulpfec.ToString();
+ ss << ", payload_name: " << payload_name;
+ ss << ", payload_type: " << payload_type;
+ ss << ", raw_payload: " << (raw_payload ? "true" : "false");
+
+ ss << ", flexfec: {payload_type: " << flexfec.payload_type;
+ ss << ", ssrc: " << flexfec.ssrc;
+ ss << ", protected_media_ssrcs: [";
+ for (size_t i = 0; i < flexfec.protected_media_ssrcs.size(); ++i) {
+ ss << flexfec.protected_media_ssrcs[i];
+ if (i != flexfec.protected_media_ssrcs.size() - 1)
+ ss << ", ";
+ }
+ ss << "]}";
+
+ ss << ", rtx: " << rtx.ToString();
+ ss << ", c_name: " << c_name;
+ ss << '}';
+ return ss.str();
+}
+
+RtpConfig::Rtx::Rtx() = default;
+RtpConfig::Rtx::Rtx(const Rtx&) = default;
+RtpConfig::Rtx::~Rtx() = default;
+
+std::string RtpConfig::Rtx::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{ssrcs: [";
+ for (size_t i = 0; i < ssrcs.size(); ++i) {
+ ss << ssrcs[i];
+ if (i != ssrcs.size() - 1)
+ ss << ", ";
+ }
+ ss << ']';
+
+ ss << ", payload_type: " << payload_type;
+ ss << '}';
+ return ss.str();
+}
+
+bool RtpConfig::IsMediaSsrc(uint32_t ssrc) const {
+ return absl::c_linear_search(ssrcs, ssrc);
+}
+
+bool RtpConfig::IsRtxSsrc(uint32_t ssrc) const {
+ return absl::c_linear_search(rtx.ssrcs, ssrc);
+}
+
+bool RtpConfig::IsFlexfecSsrc(uint32_t ssrc) const {
+ return flexfec.payload_type != -1 && ssrc == flexfec.ssrc;
+}
+
+absl::optional<uint32_t> RtpConfig::GetRtxSsrcAssociatedWithMediaSsrc(
+ uint32_t media_ssrc) const {
+ RTC_DCHECK(IsMediaSsrc(media_ssrc));
+ // If we don't use RTX there is no association.
+ if (rtx.ssrcs.empty())
+ return absl::nullopt;
+ // If we use RTX there MUST be an association ssrcs[i] <-> rtx.ssrcs[i].
+ RTC_DCHECK_EQ(ssrcs.size(), rtx.ssrcs.size());
+ return FindAssociatedSsrc(media_ssrc, ssrcs, rtx.ssrcs);
+}
+
+uint32_t RtpConfig::GetMediaSsrcAssociatedWithRtxSsrc(uint32_t rtx_ssrc) const {
+ RTC_DCHECK(IsRtxSsrc(rtx_ssrc));
+ // If we use RTX there MUST be an association ssrcs[i] <-> rtx.ssrcs[i].
+ RTC_DCHECK_EQ(ssrcs.size(), rtx.ssrcs.size());
+ return FindAssociatedSsrc(rtx_ssrc, rtx.ssrcs, ssrcs);
+}
+
+uint32_t RtpConfig::GetMediaSsrcAssociatedWithFlexfecSsrc(
+ uint32_t flexfec_ssrc) const {
+ RTC_DCHECK(IsFlexfecSsrc(flexfec_ssrc));
+ // If we use FlexFEC there MUST be an associated media ssrc.
+ //
+ // TODO(brandtr/hbos): The current implementation only supports an association
+ // with a single media ssrc. If multiple ssrcs are to be supported in the
+ // future, in order not to break GetStats()'s packet and byte counters, we
+ // must be able to tell how many packets and bytes have contributed to which
+ // SSRC.
+ RTC_DCHECK_EQ(1u, flexfec.protected_media_ssrcs.size());
+ uint32_t media_ssrc = flexfec.protected_media_ssrcs[0];
+ RTC_DCHECK(IsMediaSsrc(media_ssrc));
+ return media_ssrc;
+}
+
+absl::optional<std::string> RtpConfig::GetRidForSsrc(uint32_t ssrc) const {
+ auto it = std::find(ssrcs.begin(), ssrcs.end(), ssrc);
+ if (it != ssrcs.end()) {
+ size_t ssrc_index = std::distance(ssrcs.begin(), it);
+ if (ssrc_index < rids.size()) {
+ return rids[ssrc_index];
+ }
+ }
+ return absl::nullopt;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_config.h b/third_party/libwebrtc/call/rtp_config.h
new file mode 100644
index 0000000000..a01a902ba9
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_config.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_CONFIG_H_
+#define CALL_RTP_CONFIG_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+
+namespace webrtc {
+// Currently only VP8/VP9 specific.
+struct RtpPayloadState {
+ int16_t picture_id = -1;
+ uint8_t tl0_pic_idx = 0;
+ int64_t shared_frame_id = 0;
+};
+
+// Settings for LNTF (LossNotification). Still highly experimental.
+struct LntfConfig {
+ std::string ToString() const;
+
+ bool enabled{false};
+};
+
+// Settings for NACK, see RFC 4585 for details.
+struct NackConfig {
+ NackConfig() : rtp_history_ms(0) {}
+ std::string ToString() const;
+ // Send side: the time RTP packets are stored for retransmissions.
+ // Receive side: the time the receiver is prepared to wait for
+ // retransmissions.
+ // Set to '0' to disable.
+ int rtp_history_ms;
+};
+
+// Settings for ULPFEC forward error correction.
+// Set the payload types to '-1' to disable.
+struct UlpfecConfig {
+ UlpfecConfig()
+ : ulpfec_payload_type(-1),
+ red_payload_type(-1),
+ red_rtx_payload_type(-1) {}
+ std::string ToString() const;
+ bool operator==(const UlpfecConfig& other) const;
+
+ // Payload type used for ULPFEC packets.
+ int ulpfec_payload_type;
+
+ // Payload type used for RED packets.
+ int red_payload_type;
+
+ // RTX payload type for RED payload.
+ int red_rtx_payload_type;
+};
+
+static const size_t kDefaultMaxPacketSize = 1500 - 40; // TCP over IPv4.
+struct RtpConfig {
+ RtpConfig();
+ RtpConfig(const RtpConfig&);
+ ~RtpConfig();
+ std::string ToString() const;
+
+ std::vector<uint32_t> ssrcs;
+
+ // The Rtp Stream Ids (aka RIDs) to send in the RID RTP header extension
+ // if the extension is included in the list of extensions.
+ // If rids are specified, they should correspond to the `ssrcs` vector.
+ // This means that:
+ // 1. rids.size() == 0 || rids.size() == ssrcs.size().
+ // 2. If rids is not empty, then `rids[i]` should use `ssrcs[i]`.
+ std::vector<std::string> rids;
+
+ // The value to send in the MID RTP header extension if the extension is
+ // included in the list of extensions.
+ std::string mid;
+
+ // See RtcpMode for description.
+ RtcpMode rtcp_mode = RtcpMode::kCompound;
+
+ // Max RTP packet size delivered to send transport from VideoEngine.
+ size_t max_packet_size = kDefaultMaxPacketSize;
+
+ // Corresponds to the SDP attribute extmap-allow-mixed.
+ bool extmap_allow_mixed = false;
+
+ // RTP header extensions to use for this send stream.
+ std::vector<RtpExtension> extensions;
+
+ // TODO(nisse): For now, these are fixed, but we'd like to support
+ // changing codec without recreating the VideoSendStream. Then these
+ // fields must be removed, and association between payload type and codec
+ // must move above the per-stream level. Ownership could be with
+ // RtpTransportControllerSend, with a reference from RtpVideoSender, where
+ // the latter would be responsible for mapping the codec type of encoded
+ // images to the right payload type.
+ std::string payload_name;
+ int payload_type = -1;
+ // Payload should be packetized using raw packetizer (payload header will
+ // not be added, additional meta data is expected to be present in generic
+ // frame descriptor RTP header extension).
+ bool raw_payload = false;
+
+ // See LntfConfig for description.
+ LntfConfig lntf;
+
+ // See NackConfig for description.
+ NackConfig nack;
+
+ // See UlpfecConfig for description.
+ UlpfecConfig ulpfec;
+
+ struct Flexfec {
+ Flexfec();
+ Flexfec(const Flexfec&);
+ ~Flexfec();
+ // Payload type of FlexFEC. Set to -1 to disable sending FlexFEC.
+ int payload_type = -1;
+
+ // SSRC of FlexFEC stream.
+ uint32_t ssrc = 0;
+
+ // Vector containing a single element, corresponding to the SSRC of the
+ // media stream being protected by this FlexFEC stream.
+ // The vector MUST have size 1.
+ //
+ // TODO(brandtr): Update comment above when we support
+ // multistream protection.
+ std::vector<uint32_t> protected_media_ssrcs;
+ } flexfec;
+
+ // Settings for RTP retransmission payload format, see RFC 4588 for
+ // details.
+ struct Rtx {
+ Rtx();
+ Rtx(const Rtx&);
+ ~Rtx();
+ std::string ToString() const;
+ // SSRCs to use for the RTX streams.
+ std::vector<uint32_t> ssrcs;
+
+ // Payload type to use for the RTX stream.
+ int payload_type = -1;
+ } rtx;
+
+ // RTCP CNAME, see RFC 3550.
+ std::string c_name;
+
+ // Enables send packet batching from the egress RTP sender.
+ bool enable_send_packet_batching = false;
+
+ bool IsMediaSsrc(uint32_t ssrc) const;
+ bool IsRtxSsrc(uint32_t ssrc) const;
+ bool IsFlexfecSsrc(uint32_t ssrc) const;
+ absl::optional<uint32_t> GetRtxSsrcAssociatedWithMediaSsrc(
+ uint32_t media_ssrc) const;
+ uint32_t GetMediaSsrcAssociatedWithRtxSsrc(uint32_t rtx_ssrc) const;
+ uint32_t GetMediaSsrcAssociatedWithFlexfecSsrc(uint32_t flexfec_ssrc) const;
+ absl::optional<std::string> GetRidForSsrc(uint32_t ssrc) const;
+};
+} // namespace webrtc
+#endif // CALL_RTP_CONFIG_H_
diff --git a/third_party/libwebrtc/call/rtp_demuxer.cc b/third_party/libwebrtc/call/rtp_demuxer.cc
new file mode 100644
index 0000000000..5c53f48144
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_demuxer.cc
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_demuxer.h"
+
+#include "absl/strings/string_view.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+namespace {
+
+template <typename Container, typename Value>
+size_t RemoveFromMultimapByValue(Container* multimap, const Value& value) {
+ size_t count = 0;
+ for (auto it = multimap->begin(); it != multimap->end();) {
+ if (it->second == value) {
+ it = multimap->erase(it);
+ ++count;
+ } else {
+ ++it;
+ }
+ }
+ return count;
+}
+
+template <typename Map, typename Value>
+size_t RemoveFromMapByValue(Map* map, const Value& value) {
+ return EraseIf(*map, [&](const auto& elem) { return elem.second == value; });
+}
+
+// Temp fix: MID in SDP is allowed to be slightly longer than what's allowed
+// in the RTP demuxer. Truncate if needed; this won't match, but it only
+// makes sense in places that wouldn't use this for matching anyway.
+// TODO(bugs.webrtc.org/12517): remove when length 16 is policed by parser.
+std::string CheckMidLength(absl::string_view mid) {
+ std::string new_mid(mid);
+ if (new_mid.length() > BaseRtpStringExtension::kMaxValueSizeBytes) {
+ RTC_LOG(LS_WARNING) << "`mid` attribute too long. Truncating.";
+ new_mid.resize(BaseRtpStringExtension::kMaxValueSizeBytes);
+ }
+ return new_mid;
+}
+
+} // namespace
+
+RtpDemuxerCriteria::RtpDemuxerCriteria(
+ absl::string_view mid,
+ absl::string_view rsid /*= absl::string_view()*/)
+ : mid_(CheckMidLength(mid)), rsid_(rsid) {}
+
+RtpDemuxerCriteria::RtpDemuxerCriteria() = default;
+RtpDemuxerCriteria::~RtpDemuxerCriteria() = default;
+
+bool RtpDemuxerCriteria::operator==(const RtpDemuxerCriteria& other) const {
+ return mid_ == other.mid_ && rsid_ == other.rsid_ && ssrcs_ == other.ssrcs_ &&
+ payload_types_ == other.payload_types_;
+}
+
+bool RtpDemuxerCriteria::operator!=(const RtpDemuxerCriteria& other) const {
+ return !(*this == other);
+}
+
+std::string RtpDemuxerCriteria::ToString() const {
+ rtc::StringBuilder sb;
+ sb << "{mid: " << (mid_.empty() ? "<empty>" : mid_)
+ << ", rsid: " << (rsid_.empty() ? "<empty>" : rsid_) << ", ssrcs: [";
+
+ for (auto ssrc : ssrcs_) {
+ sb << ssrc << ", ";
+ }
+
+ sb << "], payload_types = [";
+
+ for (auto pt : payload_types_) {
+ sb << pt << ", ";
+ }
+
+ sb << "]}";
+ return sb.Release();
+}
+
+// static
+std::string RtpDemuxer::DescribePacket(const RtpPacketReceived& packet) {
+ rtc::StringBuilder sb;
+ sb << "PT=" << packet.PayloadType() << " SSRC=" << packet.Ssrc();
+ std::string mid;
+ if (packet.GetExtension<RtpMid>(&mid)) {
+ sb << " MID=" << mid;
+ }
+ std::string rsid;
+ if (packet.GetExtension<RtpStreamId>(&rsid)) {
+ sb << " RSID=" << rsid;
+ }
+ std::string rrsid;
+ if (packet.GetExtension<RepairedRtpStreamId>(&rrsid)) {
+ sb << " RRSID=" << rrsid;
+ }
+ return sb.Release();
+}
+
+RtpDemuxer::RtpDemuxer(bool use_mid /* = true*/) : use_mid_(use_mid) {}
+
+RtpDemuxer::~RtpDemuxer() {
+ RTC_DCHECK(sink_by_mid_.empty());
+ RTC_DCHECK(sink_by_ssrc_.empty());
+ RTC_DCHECK(sinks_by_pt_.empty());
+ RTC_DCHECK(sink_by_mid_and_rsid_.empty());
+ RTC_DCHECK(sink_by_rsid_.empty());
+}
+
+bool RtpDemuxer::AddSink(const RtpDemuxerCriteria& criteria,
+ RtpPacketSinkInterface* sink) {
+ RTC_DCHECK(!criteria.payload_types().empty() || !criteria.ssrcs().empty() ||
+ !criteria.mid().empty() || !criteria.rsid().empty());
+ RTC_DCHECK(criteria.mid().empty() || IsLegalMidName(criteria.mid()));
+ RTC_DCHECK(criteria.rsid().empty() || IsLegalRsidName(criteria.rsid()));
+ RTC_DCHECK(sink);
+
+ // We return false instead of DCHECKing for logical conflicts with the new
+ // criteria because new sinks are created according to user-specified SDP and
+ // we do not want to crash due to a data validation error.
+ if (CriteriaWouldConflict(criteria)) {
+ RTC_LOG(LS_ERROR) << "Unable to add sink=" << sink
+ << " due to conflicting criteria " << criteria.ToString();
+ return false;
+ }
+
+ if (!criteria.mid().empty()) {
+ if (criteria.rsid().empty()) {
+ sink_by_mid_.emplace(criteria.mid(), sink);
+ } else {
+ sink_by_mid_and_rsid_.emplace(
+ std::make_pair(criteria.mid(), criteria.rsid()), sink);
+ }
+ } else {
+ if (!criteria.rsid().empty()) {
+ sink_by_rsid_.emplace(criteria.rsid(), sink);
+ }
+ }
+
+ for (uint32_t ssrc : criteria.ssrcs()) {
+ sink_by_ssrc_.emplace(ssrc, sink);
+ }
+
+ for (uint8_t payload_type : criteria.payload_types()) {
+ sinks_by_pt_.emplace(payload_type, sink);
+ }
+
+ RefreshKnownMids();
+
+ RTC_DLOG(LS_INFO) << "Added sink = " << sink << " for criteria "
+ << criteria.ToString();
+
+ return true;
+}
+
+bool RtpDemuxer::CriteriaWouldConflict(
+ const RtpDemuxerCriteria& criteria) const {
+ if (!criteria.mid().empty()) {
+ if (criteria.rsid().empty()) {
+ // If the MID is in the known_mids_ set, then there is already a sink
+ // added for this MID directly, or there is a sink already added with a
+ // MID, RSID pair for our MID and some RSID.
+ // Adding this criteria would cause one of these rules to be shadowed, so
+ // reject this new criteria.
+ if (known_mids_.find(criteria.mid()) != known_mids_.end()) {
+ RTC_LOG(LS_INFO) << criteria.ToString()
+ << " would conflict with known mid";
+ return true;
+ }
+ } else {
+ // If the exact rule already exists, then reject this duplicate.
+ const auto sink_by_mid_and_rsid = sink_by_mid_and_rsid_.find(
+ std::make_pair(criteria.mid(), criteria.rsid()));
+ if (sink_by_mid_and_rsid != sink_by_mid_and_rsid_.end()) {
+ RTC_LOG(LS_INFO) << criteria.ToString()
+ << " would conflict with existing sink = "
+ << sink_by_mid_and_rsid->second
+ << " by mid+rsid binding";
+ return true;
+ }
+ // If there is already a sink registered for the bare MID, then this
+ // criteria will never receive any packets because they will just be
+ // directed to that MID sink, so reject this new criteria.
+ const auto sink_by_mid = sink_by_mid_.find(criteria.mid());
+ if (sink_by_mid != sink_by_mid_.end()) {
+ RTC_LOG(LS_INFO) << criteria.ToString()
+ << " would conflict with existing sink = "
+ << sink_by_mid->second << " by mid binding";
+ return true;
+ }
+ }
+ }
+
+ for (uint32_t ssrc : criteria.ssrcs()) {
+ const auto sink_by_ssrc = sink_by_ssrc_.find(ssrc);
+ if (sink_by_ssrc != sink_by_ssrc_.end()) {
+ RTC_LOG(LS_INFO) << criteria.ToString()
+ << " would conflict with existing sink = "
+ << sink_by_ssrc->second << " binding by SSRC=" << ssrc;
+ return true;
+ }
+ }
+
+ // TODO(steveanton): May also sanity check payload types.
+
+ return false;
+}
+
+void RtpDemuxer::RefreshKnownMids() {
+ known_mids_.clear();
+
+ for (auto const& item : sink_by_mid_) {
+ const std::string& mid = item.first;
+ known_mids_.insert(mid);
+ }
+
+ for (auto const& item : sink_by_mid_and_rsid_) {
+ const std::string& mid = item.first.first;
+ known_mids_.insert(mid);
+ }
+}
+
+bool RtpDemuxer::AddSink(uint32_t ssrc, RtpPacketSinkInterface* sink) {
+ RtpDemuxerCriteria criteria;
+ criteria.ssrcs().insert(ssrc);
+ return AddSink(criteria, sink);
+}
+
+void RtpDemuxer::AddSink(absl::string_view rsid, RtpPacketSinkInterface* sink) {
+ RtpDemuxerCriteria criteria(absl::string_view() /* mid */, rsid);
+ AddSink(criteria, sink);
+}
+
+bool RtpDemuxer::RemoveSink(const RtpPacketSinkInterface* sink) {
+ RTC_DCHECK(sink);
+ size_t num_removed = RemoveFromMapByValue(&sink_by_mid_, sink) +
+ RemoveFromMapByValue(&sink_by_ssrc_, sink) +
+ RemoveFromMultimapByValue(&sinks_by_pt_, sink) +
+ RemoveFromMapByValue(&sink_by_mid_and_rsid_, sink) +
+ RemoveFromMapByValue(&sink_by_rsid_, sink);
+ RefreshKnownMids();
+ return num_removed > 0;
+}
+
+bool RtpDemuxer::OnRtpPacket(const RtpPacketReceived& packet) {
+ RtpPacketSinkInterface* sink = ResolveSink(packet);
+ if (sink != nullptr) {
+ sink->OnRtpPacket(packet);
+ return true;
+ }
+ return false;
+}
+
+RtpPacketSinkInterface* RtpDemuxer::ResolveSink(
+ const RtpPacketReceived& packet) {
+ // See the BUNDLE spec for high level reference to this algorithm:
+ // https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38#section-10.2
+
+ // RSID and RRID are routed to the same sinks. If an RSID is specified on a
+ // repair packet, it should be ignored and the RRID should be used.
+ std::string packet_mid, packet_rsid;
+ //bool has_mid = use_mid_ && packet.GetExtension<RtpMid>(&packet_mid);
+ bool has_rsid = packet.GetExtension<RepairedRtpStreamId>(&packet_rsid);
+ if (!has_rsid) {
+ has_rsid = packet.GetExtension<RtpStreamId>(&packet_rsid);
+ }
+ uint32_t ssrc = packet.Ssrc();
+
+ // Mid support is half-baked in branch 64. RtpStreamReceiverController only
+ // supports adding sinks by ssrc, so our mids will never show up in
+ // known_mids_, causing us to drop packets here.
+#if 0
+ // The BUNDLE spec says to drop any packets with unknown MIDs, even if the
+ // SSRC is known/latched.
+ if (has_mid && known_mids_.find(packet_mid) == known_mids_.end()) {
+ return nullptr;
+ }
+
+ // Cache information we learn about SSRCs and IDs. We need to do this even if
+ // there isn't a rule/sink yet because we might add an MID/RSID rule after
+ // learning an MID/RSID<->SSRC association.
+
+ std::string* mid = nullptr;
+ if (has_mid) {
+ mid_by_ssrc_[ssrc] = packet_mid;
+ mid = &packet_mid;
+ } else {
+ // If the packet does not include a MID header extension, check if there is
+ // a latched MID for the SSRC.
+ const auto it = mid_by_ssrc_.find(ssrc);
+ if (it != mid_by_ssrc_.end()) {
+ mid = &it->second;
+ }
+ }
+
+ std::string* rsid = nullptr;
+ if (has_rsid) {
+ rsid_by_ssrc_[ssrc] = packet_rsid;
+ rsid = &packet_rsid;
+ } else {
+ // If the packet does not include an RRID/RSID header extension, check if
+ // there is a latched RSID for the SSRC.
+ const auto it = rsid_by_ssrc_.find(ssrc);
+ if (it != rsid_by_ssrc_.end()) {
+ rsid = &it->second;
+ }
+ }
+
+ // If MID and/or RSID is specified, prioritize that for demuxing the packet.
+ // The motivation behind the BUNDLE algorithm is that we trust these are used
+ // deliberately by senders and are more likely to be correct than SSRC/payload
+ // type which are included with every packet.
+ // TODO(steveanton): According to the BUNDLE spec, new SSRC mappings are only
+ // accepted if the packet's extended sequence number is
+ // greater than that of the last SSRC mapping update.
+ // https://tools.ietf.org/html/rfc7941#section-4.2.6
+ if (mid != nullptr) {
+ RtpPacketSinkInterface* sink_by_mid = ResolveSinkByMid(*mid, ssrc);
+ if (sink_by_mid != nullptr) {
+ return sink_by_mid;
+ }
+
+ // RSID is scoped to a given MID if both are included.
+ if (rsid != nullptr) {
+ RtpPacketSinkInterface* sink_by_mid_rsid =
+ ResolveSinkByMidRsid(*mid, *rsid, ssrc);
+ if (sink_by_mid_rsid != nullptr) {
+ return sink_by_mid_rsid;
+ }
+ }
+
+ // At this point, there is at least one sink added for this MID and an RSID
+ // but either the packet does not have an RSID or it is for a different
+ // RSID. This falls outside the BUNDLE spec so drop the packet.
+ return nullptr;
+ }
+
+ // RSID can be used without MID as long as they are unique.
+ if (rsid != nullptr) {
+ RtpPacketSinkInterface* sink_by_rsid = ResolveSinkByRsid(*rsid, ssrc);
+ if (sink_by_rsid != nullptr) {
+ return sink_by_rsid;
+ }
+ }
+
+#endif
+ // We trust signaled SSRC more than payload type which is likely to conflict
+ // between streams.
+ const auto ssrc_sink_it = sink_by_ssrc_.find(ssrc);
+ if (ssrc_sink_it != sink_by_ssrc_.end()) {
+ return ssrc_sink_it->second;
+ }
+
+ // Legacy senders will only signal payload type, support that as last resort.
+ return ResolveSinkByPayloadType(packet.PayloadType(), ssrc);
+}
+
+RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByMid(absl::string_view mid,
+ uint32_t ssrc) {
+ const auto it = sink_by_mid_.find(mid);
+ if (it != sink_by_mid_.end()) {
+ RtpPacketSinkInterface* sink = it->second;
+ AddSsrcSinkBinding(ssrc, sink);
+ return sink;
+ }
+ return nullptr;
+}
+
+RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByMidRsid(absl::string_view mid,
+ absl::string_view rsid,
+ uint32_t ssrc) {
+ const auto it = sink_by_mid_and_rsid_.find(
+ std::make_pair(std::string(mid), std::string(rsid)));
+ if (it != sink_by_mid_and_rsid_.end()) {
+ RtpPacketSinkInterface* sink = it->second;
+ AddSsrcSinkBinding(ssrc, sink);
+ return sink;
+ }
+ return nullptr;
+}
+
+RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByRsid(absl::string_view rsid,
+ uint32_t ssrc) {
+ const auto it = sink_by_rsid_.find(rsid);
+ if (it != sink_by_rsid_.end()) {
+ RtpPacketSinkInterface* sink = it->second;
+ AddSsrcSinkBinding(ssrc, sink);
+ return sink;
+ }
+ return nullptr;
+}
+
+RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByPayloadType(
+ uint8_t payload_type,
+ uint32_t ssrc) {
+ const auto range = sinks_by_pt_.equal_range(payload_type);
+ if (range.first != range.second) {
+ auto it = range.first;
+ const auto end = range.second;
+ if (std::next(it) == end) {
+ RtpPacketSinkInterface* sink = it->second;
+ AddSsrcSinkBinding(ssrc, sink);
+ return sink;
+ }
+ }
+ return nullptr;
+}
+
+void RtpDemuxer::AddSsrcSinkBinding(uint32_t ssrc,
+ RtpPacketSinkInterface* sink) {
+ if (sink_by_ssrc_.size() >= kMaxSsrcBindings) {
+ RTC_LOG(LS_WARNING) << "New SSRC=" << ssrc
+ << " sink binding ignored; limit of" << kMaxSsrcBindings
+ << " bindings has been reached.";
+ return;
+ }
+
+ auto result = sink_by_ssrc_.emplace(ssrc, sink);
+ auto it = result.first;
+ bool inserted = result.second;
+ if (inserted) {
+ RTC_DLOG(LS_INFO) << "Added sink = " << sink
+ << " binding with SSRC=" << ssrc;
+ } else if (it->second != sink) {
+ RTC_DLOG(LS_INFO) << "Updated sink = " << sink
+ << " binding with SSRC=" << ssrc;
+ it->second = sink;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_demuxer.h b/third_party/libwebrtc/call/rtp_demuxer.h
new file mode 100644
index 0000000000..53eeb0b6b6
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_demuxer.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_DEMUXER_H_
+#define CALL_RTP_DEMUXER_H_
+
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/containers/flat_set.h"
+
+namespace webrtc {
+
+class RtpPacketReceived;
+class RtpPacketSinkInterface;
+
+// This struct describes the criteria that will be used to match packets to a
+// specific sink.
+class RtpDemuxerCriteria {
+ public:
+ explicit RtpDemuxerCriteria(absl::string_view mid,
+ absl::string_view rsid = absl::string_view());
+ RtpDemuxerCriteria();
+ ~RtpDemuxerCriteria();
+
+ bool operator==(const RtpDemuxerCriteria& other) const;
+ bool operator!=(const RtpDemuxerCriteria& other) const;
+
+ // If not the empty string, will match packets with this MID.
+ const std::string& mid() const { return mid_; }
+
+ // Return string representation of demux criteria to facilitate logging
+ std::string ToString() const;
+
+ // If not the empty string, will match packets with this as their RTP stream
+ // ID or repaired RTP stream ID.
+ // Note that if both MID and RSID are specified, this will only match packets
+ // that have both specified (either through RTP header extensions, SSRC
+ // latching or RTCP).
+ const std::string& rsid() const { return rsid_; }
+
+ // The criteria will match packets with any of these SSRCs.
+ const flat_set<uint32_t>& ssrcs() const { return ssrcs_; }
+
+ // Writable accessor for directly modifying the list of ssrcs.
+ flat_set<uint32_t>& ssrcs() { return ssrcs_; }
+
+ // The criteria will match packets with any of these payload types.
+ const flat_set<uint8_t>& payload_types() const { return payload_types_; }
+
+ // Writable accessor for directly modifying the list of payload types.
+ flat_set<uint8_t>& payload_types() { return payload_types_; }
+
+ private:
+ // Intentionally private member variables to encourage specifying them via the
+ // constructor and consider them to be const as much as possible.
+ const std::string mid_;
+ const std::string rsid_;
+ flat_set<uint32_t> ssrcs_;
+ flat_set<uint8_t> payload_types_;
+};
+
+// This class represents the RTP demuxing, for a single RTP session (i.e., one
+// SSRC space, see RFC 7656). It isn't thread aware, leaving responsibility of
+// multithreading issues to the user of this class.
+// The demuxing algorithm follows the sketch given in the BUNDLE draft:
+// https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38#section-10.2
+// with modifications to support RTP stream IDs also.
+//
+// When a packet is received, the RtpDemuxer will route according to the
+// following rules:
+// 1. If the packet contains the MID header extension, and no sink has been
+// added with that MID as a criteria, the packet is not routed.
+// 2. If the packet has the MID header extension, but no RSID or RRID extension,
+// and the MID is bound to a sink, then bind its SSRC to the same sink and
+// forward the packet to that sink. Note that rebinding to the same sink is
+// not an error. (Later packets with that SSRC would therefore be forwarded
+// to the same sink, whether they have the MID header extension or not.)
+// 3. If the packet has the MID header extension and either the RSID or RRID
+// extension, and the MID, RSID (or RRID) pair is bound to a sink, then bind
+// its SSRC to the same sink and forward the packet to that sink. Later
+// packets with that SSRC will be forwarded to the same sink.
+// 4. If the packet has the RSID or RRID header extension, but no MID extension,
+// and the RSID or RRID is bound to an RSID sink, then bind its SSRC to the
+// same sink and forward the packet to that sink. Later packets with that
+// SSRC will be forwarded to the same sink.
+// 5. If the packet's SSRC is bound to an SSRC through a previous call to
+// AddSink, then forward the packet to that sink. Note that the RtpDemuxer
+// will not verify the payload type even if included in the sink's criteria.
+// The sink is expected to do the check in its handler.
+// 6. If the packet's payload type is bound to exactly one payload type sink
+// through an earlier call to AddSink, then forward the packet to that sink.
+// 7. Otherwise, the packet is not routed.
+//
+// In summary, the routing algorithm will always try to first match MID and RSID
+// (including through SSRC binding), match SSRC directly as needed, and use
+// payload types only if all else fails.
+class RtpDemuxer {
+ public:
+ // Maximum number of unique SSRC bindings allowed. This limit is to prevent
+ // memory overuse attacks due to a malicious peer sending many packets with
+ // different SSRCs.
+ static constexpr int kMaxSsrcBindings = 1000;
+
+ // Returns a string that contains all the attributes of the given packet
+ // relevant for demuxing.
+ static std::string DescribePacket(const RtpPacketReceived& packet);
+
+ explicit RtpDemuxer(bool use_mid = true);
+ ~RtpDemuxer();
+
+ RtpDemuxer(const RtpDemuxer&) = delete;
+ void operator=(const RtpDemuxer&) = delete;
+
+ // Registers a sink that will be notified when RTP packets match its given
+ // criteria according to the algorithm described in the class description.
+ // Returns true if the sink was successfully added.
+ // Returns false in the following situations:
+ // - Only MID is specified and the MID is already registered.
+ // - Only RSID is specified and the RSID is already registered.
+ // - Both MID and RSID is specified and the (MID, RSID) pair is already
+ // registered.
+ // - Any of the criteria SSRCs are already registered.
+ // If false is returned, no changes are made to the demuxer state.
+ bool AddSink(const RtpDemuxerCriteria& criteria,
+ RtpPacketSinkInterface* sink);
+
+ // Registers a sink. Multiple SSRCs may be mapped to the same sink, but
+ // each SSRC may only be mapped to one sink. The return value reports
+ // whether the association has been recorded or rejected. Rejection may occur
+ // if the SSRC has already been associated with a sink. The previously added
+ // sink is *not* forgotten.
+ bool AddSink(uint32_t ssrc, RtpPacketSinkInterface* sink);
+
+ // Registers a sink's association to an RSID. Only one sink may be associated
+ // with a given RSID. Null pointer is not allowed.
+ void AddSink(absl::string_view rsid, RtpPacketSinkInterface* sink);
+
+ // Removes a sink. Return value reports if anything was actually removed.
+ // Null pointer is not allowed.
+ bool RemoveSink(const RtpPacketSinkInterface* sink);
+
+ // Demuxes the given packet and forwards it to the chosen sink. Returns true
+ // if the packet was forwarded and false if the packet was dropped.
+ bool OnRtpPacket(const RtpPacketReceived& packet);
+
+ private:
+ // Returns true if adding a sink with the given criteria would cause conflicts
+ // with the existing criteria and should be rejected.
+ bool CriteriaWouldConflict(const RtpDemuxerCriteria& criteria) const;
+
+ // Runs the demux algorithm on the given packet and returns the sink that
+ // should receive the packet.
+ // Will record any SSRC<->ID associations along the way.
+ // If the packet should be dropped, this method returns null.
+ RtpPacketSinkInterface* ResolveSink(const RtpPacketReceived& packet);
+
+ // Used by the ResolveSink algorithm.
+ RtpPacketSinkInterface* ResolveSinkByMid(absl::string_view mid,
+ uint32_t ssrc);
+ RtpPacketSinkInterface* ResolveSinkByMidRsid(absl::string_view mid,
+ absl::string_view rsid,
+ uint32_t ssrc);
+ RtpPacketSinkInterface* ResolveSinkByRsid(absl::string_view rsid,
+ uint32_t ssrc);
+ RtpPacketSinkInterface* ResolveSinkByPayloadType(uint8_t payload_type,
+ uint32_t ssrc);
+
+ // Regenerate the known_mids_ set from information in the sink_by_mid_ and
+ // sink_by_mid_and_rsid_ maps.
+ void RefreshKnownMids();
+
+ // Map each sink by its component attributes to facilitate quick lookups.
+ // Payload Type mapping is a multimap because if two sinks register for the
+ // same payload type, both AddSinks succeed but we must know not to demux on
+ // that attribute since it is ambiguous.
+ // Note: Mappings are only modified by AddSink/RemoveSink (except for
+ // SSRC mapping which receives all MID, payload type, or RSID to SSRC bindings
+ // discovered when demuxing packets).
+ flat_map<std::string, RtpPacketSinkInterface*> sink_by_mid_;
+ flat_map<uint32_t, RtpPacketSinkInterface*> sink_by_ssrc_;
+ std::multimap<uint8_t, RtpPacketSinkInterface*> sinks_by_pt_;
+ flat_map<std::pair<std::string, std::string>, RtpPacketSinkInterface*>
+ sink_by_mid_and_rsid_;
+ flat_map<std::string, RtpPacketSinkInterface*> sink_by_rsid_;
+
+ // Tracks all the MIDs that have been identified in added criteria. Used to
+ // determine if a packet should be dropped right away because the MID is
+ // unknown.
+ flat_set<std::string> known_mids_;
+
+ // Records learned mappings of MID --> SSRC and RSID --> SSRC as packets are
+ // received.
+ // This is stored separately from the sink mappings because if a sink is
+ // removed we want to still remember these associations.
+ flat_map<uint32_t, std::string> mid_by_ssrc_;
+ flat_map<uint32_t, std::string> rsid_by_ssrc_;
+
+ // Adds a binding from the SSRC to the given sink.
+ void AddSsrcSinkBinding(uint32_t ssrc, RtpPacketSinkInterface* sink);
+
+ const bool use_mid_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTP_DEMUXER_H_
diff --git a/third_party/libwebrtc/call/rtp_demuxer_unittest.cc b/third_party/libwebrtc/call/rtp_demuxer_unittest.cc
new file mode 100644
index 0000000000..e85052810a
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_demuxer_unittest.cc
@@ -0,0 +1,1286 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_demuxer.h"
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "call/test/mock_rtp_packet_sink_interface.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::InSequence;
+using ::testing::NiceMock;
+
+class RtpDemuxerTest : public ::testing::Test {
+ protected:
+ ~RtpDemuxerTest() {
+ for (auto* sink : sinks_to_tear_down_) {
+ demuxer_.RemoveSink(sink);
+ }
+ }
+
+ // These are convenience methods for calling demuxer.AddSink with different
+ // parameters and will ensure that the sink is automatically removed when the
+ // test case finishes.
+
+ bool AddSink(const RtpDemuxerCriteria& criteria,
+ RtpPacketSinkInterface* sink) {
+ bool added = demuxer_.AddSink(criteria, sink);
+ if (added) {
+ sinks_to_tear_down_.insert(sink);
+ }
+ return added;
+ }
+
+ bool AddSinkOnlySsrc(uint32_t ssrc, RtpPacketSinkInterface* sink) {
+ RtpDemuxerCriteria criteria;
+ criteria.ssrcs().insert(ssrc);
+ return AddSink(criteria, sink);
+ }
+
+ bool AddSinkOnlyRsid(absl::string_view rsid, RtpPacketSinkInterface* sink) {
+ RtpDemuxerCriteria criteria(absl::string_view(), rsid);
+ return AddSink(criteria, sink);
+ }
+
+ bool AddSinkOnlyMid(absl::string_view mid, RtpPacketSinkInterface* sink) {
+ RtpDemuxerCriteria criteria(mid);
+ return AddSink(criteria, sink);
+ }
+
+ bool AddSinkBothMidRsid(absl::string_view mid,
+ absl::string_view rsid,
+ RtpPacketSinkInterface* sink) {
+ RtpDemuxerCriteria criteria(mid, rsid);
+ return AddSink(criteria, sink);
+ }
+
+ bool RemoveSink(RtpPacketSinkInterface* sink) {
+ sinks_to_tear_down_.erase(sink);
+ return demuxer_.RemoveSink(sink);
+ }
+
+ // The CreatePacket* methods are helpers for creating new RTP packets with
+ // various attributes set. Tests should use the helper that provides the
+ // minimum information needed to exercise the behavior under test. Tests also
+ // should not rely on any behavior which is not clearly described in the
+ // helper name/arguments. Any additional settings that are not covered by the
+ // helper should be set manually on the packet once it has been returned.
+ // For example, most tests in this file do not care about the RTP sequence
+ // number, but to ensure that the returned packets are valid the helpers will
+ // auto-increment the sequence number starting with 1. Tests that rely on
+ // specific sequence number behavior should call SetSequenceNumber manually on
+ // the returned packet.
+
+ // Intended for use only by other CreatePacket* helpers.
+ std::unique_ptr<RtpPacketReceived> CreatePacket(
+ uint32_t ssrc,
+ RtpPacketReceived::ExtensionManager* extension_manager) {
+ auto packet = std::make_unique<RtpPacketReceived>(extension_manager);
+ packet->SetSsrc(ssrc);
+ packet->SetSequenceNumber(next_sequence_number_++);
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketReceived> CreatePacketWithSsrc(uint32_t ssrc) {
+ return CreatePacket(ssrc, nullptr);
+ }
+
+ std::unique_ptr<RtpPacketReceived> CreatePacketWithSsrcMid(
+ uint32_t ssrc,
+ absl::string_view mid) {
+ RtpPacketReceived::ExtensionManager extension_manager;
+ extension_manager.Register<RtpMid>(11);
+
+ auto packet = CreatePacket(ssrc, &extension_manager);
+ packet->SetExtension<RtpMid>(mid);
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketReceived> CreatePacketWithSsrcRsid(
+ uint32_t ssrc,
+ absl::string_view rsid) {
+ RtpPacketReceived::ExtensionManager extension_manager;
+ extension_manager.Register<RtpStreamId>(6);
+
+ auto packet = CreatePacket(ssrc, &extension_manager);
+ packet->SetExtension<RtpStreamId>(rsid);
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketReceived> CreatePacketWithSsrcRrid(
+ uint32_t ssrc,
+ absl::string_view rrid) {
+ RtpPacketReceived::ExtensionManager extension_manager;
+ extension_manager.Register<RepairedRtpStreamId>(7);
+
+ auto packet = CreatePacket(ssrc, &extension_manager);
+ packet->SetExtension<RepairedRtpStreamId>(rrid);
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketReceived> CreatePacketWithSsrcMidRsid(
+ uint32_t ssrc,
+ absl::string_view mid,
+ absl::string_view rsid) {
+ RtpPacketReceived::ExtensionManager extension_manager;
+ extension_manager.Register<RtpMid>(11);
+ extension_manager.Register<RtpStreamId>(6);
+
+ auto packet = CreatePacket(ssrc, &extension_manager);
+ packet->SetExtension<RtpMid>(mid);
+ packet->SetExtension<RtpStreamId>(rsid);
+ return packet;
+ }
+
+ std::unique_ptr<RtpPacketReceived> CreatePacketWithSsrcRsidRrid(
+ uint32_t ssrc,
+ absl::string_view rsid,
+ absl::string_view rrid) {
+ RtpPacketReceived::ExtensionManager extension_manager;
+ extension_manager.Register<RtpStreamId>(6);
+ extension_manager.Register<RepairedRtpStreamId>(7);
+
+ auto packet = CreatePacket(ssrc, &extension_manager);
+ packet->SetExtension<RtpStreamId>(rsid);
+ packet->SetExtension<RepairedRtpStreamId>(rrid);
+ return packet;
+ }
+
+ RtpDemuxer demuxer_;
+ std::set<RtpPacketSinkInterface*> sinks_to_tear_down_;
+ uint16_t next_sequence_number_ = 1;
+};
+
+class RtpDemuxerDeathTest : public RtpDemuxerTest {};
+
+MATCHER_P(SamePacketAs, other, "") {
+ return arg.Ssrc() == other.Ssrc() &&
+ arg.SequenceNumber() == other.SequenceNumber();
+}
+
+TEST_F(RtpDemuxerTest, CanAddSinkBySsrc) {
+ MockRtpPacketSink sink;
+ constexpr uint32_t ssrc = 1;
+
+ EXPECT_TRUE(AddSinkOnlySsrc(ssrc, &sink));
+}
+
+TEST_F(RtpDemuxerTest, AllowAddSinkWithOverlappingPayloadTypesIfDifferentMid) {
+ const std::string mid1 = "v";
+ const std::string mid2 = "a";
+ constexpr uint8_t pt1 = 30;
+ constexpr uint8_t pt2 = 31;
+ constexpr uint8_t pt3 = 32;
+
+ RtpDemuxerCriteria pt1_pt2(mid1);
+ pt1_pt2.payload_types() = {pt1, pt2};
+ MockRtpPacketSink sink1;
+ AddSink(pt1_pt2, &sink1);
+
+ RtpDemuxerCriteria pt1_pt3(mid2);
+ pt1_pt3.payload_types() = {pt1, pt3};
+ MockRtpPacketSink sink2;
+ EXPECT_TRUE(AddSink(pt1_pt3, &sink2));
+}
+
+TEST_F(RtpDemuxerTest, RejectAddSinkForSameMidOnly) {
+ const std::string mid = "mid";
+
+ MockRtpPacketSink sink;
+ AddSinkOnlyMid(mid, &sink);
+ EXPECT_FALSE(AddSinkOnlyMid(mid, &sink));
+}
+
+TEST_F(RtpDemuxerTest, RejectAddSinkForSameMidRsid) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+
+ MockRtpPacketSink sink1;
+ AddSinkBothMidRsid(mid, rsid, &sink1);
+
+ MockRtpPacketSink sink2;
+ EXPECT_FALSE(AddSinkBothMidRsid(mid, rsid, &sink2));
+}
+
+TEST_F(RtpDemuxerTest, RejectAddSinkForConflictingMidAndMidRsid) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+
+ MockRtpPacketSink mid_sink;
+ AddSinkOnlyMid(mid, &mid_sink);
+
+ // This sink would never get any packets routed to it because the above sink
+ // would receive them all.
+ MockRtpPacketSink mid_rsid_sink;
+ EXPECT_FALSE(AddSinkBothMidRsid(mid, rsid, &mid_rsid_sink));
+}
+
+TEST_F(RtpDemuxerTest, RejectAddSinkForConflictingMidRsidAndMid) {
+ const std::string mid = "v";
+ const std::string rsid = "";
+
+ MockRtpPacketSink mid_rsid_sink;
+ AddSinkBothMidRsid(mid, rsid, &mid_rsid_sink);
+
+ // This sink would shadow the above sink.
+ MockRtpPacketSink mid_sink;
+ EXPECT_FALSE(AddSinkOnlyMid(mid, &mid_sink));
+}
+
+TEST_F(RtpDemuxerTest, AddSinkFailsIfCalledForTwoSinksWithSameSsrc) {
+ MockRtpPacketSink sink_a;
+ MockRtpPacketSink sink_b;
+ constexpr uint32_t ssrc = 1;
+ ASSERT_TRUE(AddSinkOnlySsrc(ssrc, &sink_a));
+
+ EXPECT_FALSE(AddSinkOnlySsrc(ssrc, &sink_b));
+}
+
+TEST_F(RtpDemuxerTest, AddSinkFailsIfCalledTwiceEvenIfSameSinkWithSameSsrc) {
+ MockRtpPacketSink sink;
+ constexpr uint32_t ssrc = 1;
+ ASSERT_TRUE(AddSinkOnlySsrc(ssrc, &sink));
+
+ EXPECT_FALSE(AddSinkOnlySsrc(ssrc, &sink));
+}
+
+// TODO(steveanton): Currently fails because payload type validation is not
+// complete in AddSink (see note in rtp_demuxer.cc).
+TEST_F(RtpDemuxerTest, DISABLED_RejectAddSinkForSamePayloadTypes) {
+ constexpr uint8_t pt1 = 30;
+ constexpr uint8_t pt2 = 31;
+
+ RtpDemuxerCriteria pt1_pt2;
+ pt1_pt2.payload_types() = {pt1, pt2};
+ MockRtpPacketSink sink1;
+ AddSink(pt1_pt2, &sink1);
+
+ RtpDemuxerCriteria pt2_pt1;
+ pt2_pt1.payload_types() = {pt2, pt1};
+ MockRtpPacketSink sink2;
+ EXPECT_FALSE(AddSink(pt2_pt1, &sink2));
+}
+
+// Routing Tests
+
+TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkBySsrc) {
+ constexpr uint32_t ssrcs[] = {101, 202, 303};
+ MockRtpPacketSink sinks[arraysize(ssrcs)];
+ for (size_t i = 0; i < arraysize(ssrcs); i++) {
+ AddSinkOnlySsrc(ssrcs[i], &sinks[i]);
+ }
+
+ for (size_t i = 0; i < arraysize(ssrcs); i++) {
+ auto packet = CreatePacketWithSsrc(ssrcs[i]);
+ EXPECT_CALL(sinks[i], OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+}
+
+TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkByRsid) {
+ const std::string rsids[] = {"a", "b", "c"};
+ MockRtpPacketSink sinks[arraysize(rsids)];
+ for (size_t i = 0; i < arraysize(rsids); i++) {
+ AddSinkOnlyRsid(rsids[i], &sinks[i]);
+ }
+
+ for (size_t i = 0; i < arraysize(rsids); i++) {
+ auto packet =
+ CreatePacketWithSsrcRsid(rtc::checked_cast<uint32_t>(i), rsids[i]);
+ EXPECT_CALL(sinks[i], OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+}
+
+TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkByMid) {
+ const std::string mids[] = {"a", "v", "s"};
+ MockRtpPacketSink sinks[arraysize(mids)];
+ for (size_t i = 0; i < arraysize(mids); i++) {
+ AddSinkOnlyMid(mids[i], &sinks[i]);
+ }
+
+ for (size_t i = 0; i < arraysize(mids); i++) {
+ auto packet =
+ CreatePacketWithSsrcMid(rtc::checked_cast<uint32_t>(i), mids[i]);
+ EXPECT_CALL(sinks[i], OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+}
+
+TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkByMidAndRsid) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ MockRtpPacketSink sink;
+ AddSinkBothMidRsid(mid, rsid, &sink);
+
+ auto packet = CreatePacketWithSsrcMidRsid(ssrc, mid, rsid);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkByRepairedRsid) {
+ const std::string rrid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ MockRtpPacketSink sink;
+ AddSinkOnlyRsid(rrid, &sink);
+
+ auto packet_with_rrid = CreatePacketWithSsrcRrid(ssrc, rrid);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_with_rrid))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_rrid));
+}
+
+TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkByPayloadType) {
+ constexpr uint32_t ssrc = 10;
+ constexpr uint8_t payload_type = 30;
+
+ MockRtpPacketSink sink;
+ RtpDemuxerCriteria criteria;
+ criteria.payload_types() = {payload_type};
+ AddSink(criteria, &sink);
+
+ auto packet = CreatePacketWithSsrc(ssrc);
+ packet->SetPayloadType(payload_type);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, PacketsDeliveredInRightOrder) {
+ constexpr uint32_t ssrc = 101;
+ MockRtpPacketSink sink;
+ AddSinkOnlySsrc(ssrc, &sink);
+
+ std::unique_ptr<RtpPacketReceived> packets[5];
+ for (size_t i = 0; i < arraysize(packets); i++) {
+ packets[i] = CreatePacketWithSsrc(ssrc);
+ packets[i]->SetSequenceNumber(rtc::checked_cast<uint16_t>(i));
+ }
+
+ InSequence sequence;
+ for (const auto& packet : packets) {
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ }
+
+ for (const auto& packet : packets) {
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+}
+
+TEST_F(RtpDemuxerTest, SinkMappedToMultipleSsrcs) {
+ constexpr uint32_t ssrcs[] = {404, 505, 606};
+ MockRtpPacketSink sink;
+ for (uint32_t ssrc : ssrcs) {
+ AddSinkOnlySsrc(ssrc, &sink);
+ }
+
+ // The sink which is associated with multiple SSRCs gets the callback
+ // triggered for each of those SSRCs.
+ for (uint32_t ssrc : ssrcs) {
+ auto packet = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+}
+
+TEST_F(RtpDemuxerTest, NoCallbackOnSsrcSinkRemovedBeforeFirstPacket) {
+ constexpr uint32_t ssrc = 404;
+ MockRtpPacketSink sink;
+ AddSinkOnlySsrc(ssrc, &sink);
+
+ ASSERT_TRUE(RemoveSink(&sink));
+
+ // The removed sink does not get callbacks.
+ auto packet = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0); // Not called.
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, NoCallbackOnSsrcSinkRemovedAfterFirstPacket) {
+ constexpr uint32_t ssrc = 404;
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkOnlySsrc(ssrc, &sink);
+
+ InSequence sequence;
+ for (size_t i = 0; i < 10; i++) {
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*CreatePacketWithSsrc(ssrc)));
+ }
+
+ ASSERT_TRUE(RemoveSink(&sink));
+
+ // The removed sink does not get callbacks.
+ auto packet = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0); // Not called.
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+// An SSRC may only be mapped to a single sink. However, since configuration
+// of this associations might come from the network, we need to fail gracefully.
+TEST_F(RtpDemuxerTest, OnlyOneSinkPerSsrcGetsOnRtpPacketTriggered) {
+ MockRtpPacketSink sinks[3];
+ constexpr uint32_t ssrc = 404;
+ ASSERT_TRUE(AddSinkOnlySsrc(ssrc, &sinks[0]));
+ ASSERT_FALSE(AddSinkOnlySsrc(ssrc, &sinks[1]));
+ ASSERT_FALSE(AddSinkOnlySsrc(ssrc, &sinks[2]));
+
+ // The first sink associated with the SSRC remains active; other sinks
+ // were not really added, and so do not get OnRtpPacket() called.
+ auto packet = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sinks[0], OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_CALL(sinks[1], OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sinks[2], OnRtpPacket(_)).Times(0);
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, NoRepeatedCallbackOnRepeatedAddSinkForSameSink) {
+ constexpr uint32_t ssrc = 111;
+ MockRtpPacketSink sink;
+
+ ASSERT_TRUE(AddSinkOnlySsrc(ssrc, &sink));
+ ASSERT_FALSE(AddSinkOnlySsrc(ssrc, &sink));
+
+ auto packet = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, RemoveSinkReturnsFalseForNeverAddedSink) {
+ MockRtpPacketSink sink;
+ EXPECT_FALSE(RemoveSink(&sink));
+}
+
+TEST_F(RtpDemuxerTest, RemoveSinkReturnsTrueForPreviouslyAddedSsrcSink) {
+ constexpr uint32_t ssrc = 101;
+ MockRtpPacketSink sink;
+ AddSinkOnlySsrc(ssrc, &sink);
+
+ EXPECT_TRUE(RemoveSink(&sink));
+}
+
+TEST_F(RtpDemuxerTest,
+ RemoveSinkReturnsTrueForUnresolvedPreviouslyAddedRsidSink) {
+ const std::string rsid = "a";
+ MockRtpPacketSink sink;
+ AddSinkOnlyRsid(rsid, &sink);
+
+ EXPECT_TRUE(RemoveSink(&sink));
+}
+
+TEST_F(RtpDemuxerTest,
+ RemoveSinkReturnsTrueForResolvedPreviouslyAddedRsidSink) {
+ const std::string rsid = "a";
+ constexpr uint32_t ssrc = 101;
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkOnlyRsid(rsid, &sink);
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*CreatePacketWithSsrcRsid(ssrc, rsid)));
+
+ EXPECT_TRUE(RemoveSink(&sink));
+}
+
+TEST_F(RtpDemuxerTest, RsidLearnedAndLaterPacketsDeliveredWithOnlySsrc) {
+ MockRtpPacketSink sink;
+ const std::string rsid = "a";
+ AddSinkOnlyRsid(rsid, &sink);
+
+ // Create a sequence of RTP packets, where only the first one actually
+ // mentions the RSID.
+ std::unique_ptr<RtpPacketReceived> packets[5];
+ constexpr uint32_t rsid_ssrc = 111;
+ packets[0] = CreatePacketWithSsrcRsid(rsid_ssrc, rsid);
+ for (size_t i = 1; i < arraysize(packets); i++) {
+ packets[i] = CreatePacketWithSsrc(rsid_ssrc);
+ }
+
+ // The first packet associates the RSID with the SSRC, thereby allowing the
+ // demuxer to correctly demux all of the packets.
+ InSequence sequence;
+ for (const auto& packet : packets) {
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ }
+ for (const auto& packet : packets) {
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+}
+
+TEST_F(RtpDemuxerTest, NoCallbackOnRsidSinkRemovedBeforeFirstPacket) {
+ MockRtpPacketSink sink;
+ const std::string rsid = "a";
+ AddSinkOnlyRsid(rsid, &sink);
+
+ // Sink removed - it won't get triggers even if packets with its RSID arrive.
+ ASSERT_TRUE(RemoveSink(&sink));
+
+ constexpr uint32_t ssrc = 111;
+ auto packet = CreatePacketWithSsrcRsid(ssrc, rsid);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0); // Not called.
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, NoCallbackOnRsidSinkRemovedAfterFirstPacket) {
+ NiceMock<MockRtpPacketSink> sink;
+ const std::string rsid = "a";
+ AddSinkOnlyRsid(rsid, &sink);
+
+ InSequence sequence;
+ constexpr uint32_t ssrc = 111;
+ for (size_t i = 0; i < 10; i++) {
+ auto packet = CreatePacketWithSsrcRsid(ssrc, rsid);
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+
+ // Sink removed - it won't get triggers even if packets with its RSID arrive.
+ ASSERT_TRUE(RemoveSink(&sink));
+
+ auto packet = CreatePacketWithSsrcRsid(ssrc, rsid);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0); // Not called.
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, NoCallbackOnMidSinkRemovedBeforeFirstPacket) {
+ const std::string mid = "v";
+ constexpr uint32_t ssrc = 10;
+
+ MockRtpPacketSink sink;
+ AddSinkOnlyMid(mid, &sink);
+ RemoveSink(&sink);
+
+ auto packet = CreatePacketWithSsrcMid(ssrc, mid);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, NoCallbackOnMidSinkRemovedAfterFirstPacket) {
+ const std::string mid = "v";
+ constexpr uint32_t ssrc = 10;
+
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkOnlyMid(mid, &sink);
+
+ auto p1 = CreatePacketWithSsrcMid(ssrc, mid);
+ demuxer_.OnRtpPacket(*p1);
+
+ RemoveSink(&sink);
+
+ auto p2 = CreatePacketWithSsrcMid(ssrc, mid);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*p2));
+}
+
+TEST_F(RtpDemuxerTest, NoCallbackOnMidRsidSinkRemovedAfterFirstPacket) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkBothMidRsid(mid, rsid, &sink);
+
+ auto p1 = CreatePacketWithSsrcMidRsid(ssrc, mid, rsid);
+ demuxer_.OnRtpPacket(*p1);
+
+ RemoveSink(&sink);
+
+ auto p2 = CreatePacketWithSsrcMidRsid(ssrc, mid, rsid);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*p2));
+}
+
+// The RSID to SSRC mapping should be one-to-one. If we end up receiving
+// two (or more) packets with the same SSRC, but different RSIDs, we guarantee
+// delivery to one of them but not both.
+TEST_F(RtpDemuxerTest, FirstSsrcAssociatedWithAnRsidIsNotForgotten) {
+ // Each sink has a distinct RSID.
+ MockRtpPacketSink sink_a;
+ const std::string rsid_a = "a";
+ AddSinkOnlyRsid(rsid_a, &sink_a);
+
+ MockRtpPacketSink sink_b;
+ const std::string rsid_b = "b";
+ AddSinkOnlyRsid(rsid_b, &sink_b);
+
+ InSequence sequence; // Verify that the order of delivery is unchanged.
+
+ constexpr uint32_t shared_ssrc = 100;
+
+ // First a packet with `rsid_a` is received, and `sink_a` is associated with
+ // its SSRC.
+ auto packet_a = CreatePacketWithSsrcRsid(shared_ssrc, rsid_a);
+ EXPECT_CALL(sink_a, OnRtpPacket(SamePacketAs(*packet_a))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_a));
+
+ // Second, a packet with `rsid_b` is received. We guarantee that `sink_b`
+ // receives it.
+ auto packet_b = CreatePacketWithSsrcRsid(shared_ssrc, rsid_b);
+ EXPECT_CALL(sink_a, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sink_b, OnRtpPacket(SamePacketAs(*packet_b))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_b));
+
+ // Known edge-case; adding a new RSID association makes us re-examine all
+ // SSRCs. `sink_b` may or may not be associated with the SSRC now; we make
+ // no promises on that. However, since the RSID is specified and it cannot be
+ // found the packet should be dropped.
+ MockRtpPacketSink sink_c;
+ const std::string rsid_c = "c";
+ constexpr uint32_t some_other_ssrc = shared_ssrc + 1;
+ AddSinkOnlySsrc(some_other_ssrc, &sink_c);
+
+ auto packet_c = CreatePacketWithSsrcMid(shared_ssrc, rsid_c);
+ EXPECT_CALL(sink_a, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sink_b, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sink_c, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet_c));
+}
+
+TEST_F(RtpDemuxerTest, MultipleRsidsOnSameSink) {
+ MockRtpPacketSink sink;
+ const std::string rsids[] = {"a", "b", "c"};
+
+ for (const std::string& rsid : rsids) {
+ AddSinkOnlyRsid(rsid, &sink);
+ }
+
+ InSequence sequence;
+ for (size_t i = 0; i < arraysize(rsids); i++) {
+ // Assign different SSRCs and sequence numbers to all packets.
+ const uint32_t ssrc = 1000 + static_cast<uint32_t>(i);
+ const uint16_t sequence_number = 50 + static_cast<uint16_t>(i);
+ auto packet = CreatePacketWithSsrcRsid(ssrc, rsids[i]);
+ packet->SetSequenceNumber(sequence_number);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+ }
+}
+
+// RSIDs are given higher priority than SSRC because we believe senders are less
+// likely to mislabel packets with RSID than mislabel them with SSRCs.
+TEST_F(RtpDemuxerTest, SinkWithBothRsidAndSsrcAssociations) {
+ MockRtpPacketSink sink;
+ constexpr uint32_t standalone_ssrc = 10101;
+ constexpr uint32_t rsid_ssrc = 20202;
+ const std::string rsid = "1";
+
+ AddSinkOnlySsrc(standalone_ssrc, &sink);
+ AddSinkOnlyRsid(rsid, &sink);
+
+ InSequence sequence;
+
+ auto ssrc_packet = CreatePacketWithSsrc(standalone_ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*ssrc_packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*ssrc_packet));
+
+ auto rsid_packet = CreatePacketWithSsrcRsid(rsid_ssrc, rsid);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*rsid_packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*rsid_packet));
+}
+
+// Packets are always guaranteed to be routed to only one sink.
+TEST_F(RtpDemuxerTest, AssociatingByRsidAndBySsrcCannotTriggerDoubleCall) {
+ constexpr uint32_t ssrc = 10101;
+ const std::string rsid = "a";
+
+ MockRtpPacketSink sink;
+ AddSinkOnlySsrc(ssrc, &sink);
+ AddSinkOnlyRsid(rsid, &sink);
+
+ auto packet = CreatePacketWithSsrcRsid(ssrc, rsid);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+// If one sink is associated with SSRC x, and another sink with RSID y, then if
+// we receive a packet with both SSRC x and RSID y, route that to only the sink
+// for RSID y since we believe RSID tags to be more trustworthy than signaled
+// SSRCs.
+TEST_F(RtpDemuxerTest,
+ PacketFittingBothRsidSinkAndSsrcSinkGivenOnlyToRsidSink) {
+ constexpr uint32_t ssrc = 111;
+ MockRtpPacketSink ssrc_sink;
+ AddSinkOnlySsrc(ssrc, &ssrc_sink);
+
+ const std::string rsid = "a";
+ MockRtpPacketSink rsid_sink;
+ AddSinkOnlyRsid(rsid, &rsid_sink);
+
+ auto packet = CreatePacketWithSsrcRsid(ssrc, rsid);
+
+ EXPECT_CALL(ssrc_sink, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(rsid_sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+// We're not expecting RSIDs to be resolved to SSRCs which were previously
+// mapped to sinks, and make no guarantees except for graceful handling.
+TEST_F(RtpDemuxerTest,
+ GracefullyHandleRsidBeingMappedToPrevouslyAssociatedSsrc) {
+ constexpr uint32_t ssrc = 111;
+ NiceMock<MockRtpPacketSink> ssrc_sink;
+ AddSinkOnlySsrc(ssrc, &ssrc_sink);
+
+ const std::string rsid = "a";
+ NiceMock<MockRtpPacketSink> rsid_sink;
+ AddSinkOnlyRsid(rsid, &rsid_sink);
+
+ // The SSRC was mapped to an SSRC sink, but was even active (packets flowed
+ // over it).
+ auto packet = CreatePacketWithSsrcRsid(ssrc, rsid);
+ demuxer_.OnRtpPacket(*packet);
+
+ // If the SSRC sink is ever removed, the RSID sink *might* receive indications
+ // of packets, and observers *might* be informed. Only graceful handling
+ // is guaranteed.
+ RemoveSink(&ssrc_sink);
+ EXPECT_CALL(rsid_sink, OnRtpPacket(SamePacketAs(*packet))).Times(AtLeast(0));
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+// Tests that when one MID sink is configured, packets that include the MID
+// extension will get routed to that sink and any packets that use the same
+// SSRC as one of those packets later will also get routed to the sink, even
+// if a new SSRC is introduced for the same MID.
+TEST_F(RtpDemuxerTest, RoutedByMidWhenSsrcAdded) {
+ const std::string mid = "v";
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkOnlyMid(mid, &sink);
+
+ constexpr uint32_t ssrc1 = 10;
+ constexpr uint32_t ssrc2 = 11;
+
+ auto packet_ssrc1_mid = CreatePacketWithSsrcMid(ssrc1, mid);
+ demuxer_.OnRtpPacket(*packet_ssrc1_mid);
+ auto packet_ssrc2_mid = CreatePacketWithSsrcMid(ssrc2, mid);
+ demuxer_.OnRtpPacket(*packet_ssrc2_mid);
+
+ auto packet_ssrc1_only = CreatePacketWithSsrc(ssrc1);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_ssrc1_only))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_ssrc1_only));
+
+ auto packet_ssrc2_only = CreatePacketWithSsrc(ssrc2);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_ssrc2_only))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_ssrc2_only));
+}
+
+TEST_F(RtpDemuxerTest, DontLearnMidSsrcBindingBeforeSinkAdded) {
+ const std::string mid = "v";
+ constexpr uint32_t ssrc = 10;
+
+ auto packet_ssrc_mid = CreatePacketWithSsrcMid(ssrc, mid);
+ ASSERT_FALSE(demuxer_.OnRtpPacket(*packet_ssrc_mid));
+
+ MockRtpPacketSink sink;
+ AddSinkOnlyMid(mid, &sink);
+
+ auto packet_ssrc_only = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet_ssrc_only));
+}
+
+TEST_F(RtpDemuxerTest, DontForgetMidSsrcBindingWhenSinkRemoved) {
+ const std::string mid = "v";
+ constexpr uint32_t ssrc = 10;
+
+ NiceMock<MockRtpPacketSink> sink1;
+ AddSinkOnlyMid(mid, &sink1);
+
+ auto packet_with_mid = CreatePacketWithSsrcMid(ssrc, mid);
+ demuxer_.OnRtpPacket(*packet_with_mid);
+
+ RemoveSink(&sink1);
+
+ MockRtpPacketSink sink2;
+ AddSinkOnlyMid(mid, &sink2);
+
+ auto packet_with_ssrc = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink2, OnRtpPacket(SamePacketAs(*packet_with_ssrc)));
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_ssrc));
+}
+
+// If a sink is added with only a MID, then any packet with that MID no matter
+// the RSID should be routed to that sink.
+TEST_F(RtpDemuxerTest, RoutedByMidWithAnyRsid) {
+ const std::string mid = "v";
+ const std::string rsid1 = "1";
+ const std::string rsid2 = "2";
+ constexpr uint32_t ssrc1 = 10;
+ constexpr uint32_t ssrc2 = 11;
+
+ MockRtpPacketSink sink;
+ AddSinkOnlyMid(mid, &sink);
+
+ InSequence sequence;
+
+ auto packet_ssrc1_rsid1 = CreatePacketWithSsrcMidRsid(ssrc1, mid, rsid1);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_ssrc1_rsid1))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_ssrc1_rsid1));
+
+ auto packet_ssrc2_rsid2 = CreatePacketWithSsrcMidRsid(ssrc2, mid, rsid2);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_ssrc2_rsid2))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_ssrc2_rsid2));
+}
+
+// These two tests verify that for a sink added with a MID, RSID pair, if the
+// MID and RSID are learned in separate packets (e.g., because the header
+// extensions are sent separately), then a later packet with just SSRC will get
+// routed to that sink.
+// The first test checks that the functionality works when MID is learned first.
+// The second test checks that the functionality works when RSID is learned
+// first.
+TEST_F(RtpDemuxerTest, LearnMidThenRsidSeparatelyAndRouteBySsrc) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkBothMidRsid(mid, rsid, &sink);
+
+ auto packet_with_mid = CreatePacketWithSsrcMid(ssrc, mid);
+ ASSERT_FALSE(demuxer_.OnRtpPacket(*packet_with_mid));
+
+ auto packet_with_rsid = CreatePacketWithSsrcRsid(ssrc, rsid);
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*packet_with_rsid));
+
+ auto packet_with_ssrc = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_with_ssrc))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_ssrc));
+}
+
+TEST_F(RtpDemuxerTest, LearnRsidThenMidSeparatelyAndRouteBySsrc) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkBothMidRsid(mid, rsid, &sink);
+
+ auto packet_with_rsid = CreatePacketWithSsrcRsid(ssrc, rsid);
+ ASSERT_FALSE(demuxer_.OnRtpPacket(*packet_with_rsid));
+
+ auto packet_with_mid = CreatePacketWithSsrcMid(ssrc, mid);
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*packet_with_mid));
+
+ auto packet_with_ssrc = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_with_ssrc))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_ssrc));
+}
+
+TEST_F(RtpDemuxerTest, DontLearnMidRsidBindingBeforeSinkAdded) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ auto packet_with_both = CreatePacketWithSsrcMidRsid(ssrc, mid, rsid);
+ ASSERT_FALSE(demuxer_.OnRtpPacket(*packet_with_both));
+
+ MockRtpPacketSink sink;
+ AddSinkBothMidRsid(mid, rsid, &sink);
+
+ auto packet_with_ssrc = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet_with_ssrc));
+}
+
+TEST_F(RtpDemuxerTest, DontForgetMidRsidBindingWhenSinkRemoved) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ NiceMock<MockRtpPacketSink> sink1;
+ AddSinkBothMidRsid(mid, rsid, &sink1);
+
+ auto packet_with_both = CreatePacketWithSsrcMidRsid(ssrc, mid, rsid);
+ demuxer_.OnRtpPacket(*packet_with_both);
+
+ RemoveSink(&sink1);
+
+ MockRtpPacketSink sink2;
+ AddSinkBothMidRsid(mid, rsid, &sink2);
+
+ auto packet_with_ssrc = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink2, OnRtpPacket(SamePacketAs(*packet_with_ssrc)));
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_ssrc));
+}
+
+TEST_F(RtpDemuxerTest, LearnMidRsidBindingAfterSinkAdded) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ NiceMock<MockRtpPacketSink> sink;
+ AddSinkBothMidRsid(mid, rsid, &sink);
+
+ auto packet_with_both = CreatePacketWithSsrcMidRsid(ssrc, mid, rsid);
+ demuxer_.OnRtpPacket(*packet_with_both);
+
+ auto packet_with_ssrc = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_with_ssrc)));
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_ssrc));
+}
+
+TEST_F(RtpDemuxerTest, DropByPayloadTypeIfNoSink) {
+ constexpr uint8_t payload_type = 30;
+ constexpr uint32_t ssrc = 10;
+
+ auto packet = CreatePacketWithSsrc(ssrc);
+ packet->SetPayloadType(payload_type);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+// For legacy applications, it's possible for us to demux if the payload type is
+// unique. But if multiple sinks are registered with different MIDs and the same
+// payload types, then we cannot route a packet with just payload type because
+// it is ambiguous which sink it should be sent to.
+TEST_F(RtpDemuxerTest, DropByPayloadTypeIfAddedInMultipleSinks) {
+ const std::string mid1 = "v";
+ const std::string mid2 = "a";
+ constexpr uint8_t payload_type = 30;
+ constexpr uint32_t ssrc = 10;
+
+ RtpDemuxerCriteria mid1_pt(mid1);
+ mid1_pt.payload_types() = {payload_type};
+ MockRtpPacketSink sink1;
+ AddSink(mid1_pt, &sink1);
+
+ RtpDemuxerCriteria mid2_pt(mid2);
+ mid2_pt.payload_types() = {payload_type};
+ MockRtpPacketSink sink2;
+ AddSink(mid2_pt, &sink2);
+
+ auto packet = CreatePacketWithSsrc(ssrc);
+ packet->SetPayloadType(payload_type);
+
+ EXPECT_CALL(sink1, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sink2, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+// If two sinks are added with different MIDs but the same payload types, then
+// we cannot demux on the payload type only unless one of the sinks is removed.
+TEST_F(RtpDemuxerTest, RoutedByPayloadTypeIfAmbiguousSinkRemoved) {
+ const std::string mid1 = "v";
+ const std::string mid2 = "a";
+ constexpr uint8_t payload_type = 30;
+ constexpr uint32_t ssrc = 10;
+
+ RtpDemuxerCriteria mid1_pt(mid1);
+ mid1_pt.payload_types().insert(payload_type);
+ MockRtpPacketSink sink1;
+ AddSink(mid1_pt, &sink1);
+
+ RtpDemuxerCriteria mid2_pt(mid2);
+ mid2_pt.payload_types().insert(payload_type);
+ MockRtpPacketSink sink2;
+ AddSink(mid2_pt, &sink2);
+
+ RemoveSink(&sink1);
+
+ auto packet = CreatePacketWithSsrc(ssrc);
+ packet->SetPayloadType(payload_type);
+
+ EXPECT_CALL(sink1, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sink2, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, RoutedByPayloadTypeLatchesSsrc) {
+ constexpr uint8_t payload_type = 30;
+ constexpr uint32_t ssrc = 10;
+
+ RtpDemuxerCriteria pt;
+ pt.payload_types().insert(payload_type);
+ NiceMock<MockRtpPacketSink> sink;
+ AddSink(pt, &sink);
+
+ auto packet_with_pt = CreatePacketWithSsrc(ssrc);
+ packet_with_pt->SetPayloadType(payload_type);
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*packet_with_pt));
+
+ auto packet_with_ssrc = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_with_ssrc))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_ssrc));
+}
+
+// RSIDs are scoped within MID, so if two sinks are registered with the same
+// RSIDs but different MIDs, then packets containing both extensions should be
+// routed to the correct one.
+TEST_F(RtpDemuxerTest, PacketWithSameRsidDifferentMidRoutedToProperSink) {
+ const std::string mid1 = "mid1";
+ const std::string mid2 = "mid2";
+ const std::string rsid = "rsid";
+ constexpr uint32_t ssrc1 = 10;
+ constexpr uint32_t ssrc2 = 11;
+
+ NiceMock<MockRtpPacketSink> mid1_sink;
+ AddSinkBothMidRsid(mid1, rsid, &mid1_sink);
+
+ MockRtpPacketSink mid2_sink;
+ AddSinkBothMidRsid(mid2, rsid, &mid2_sink);
+
+ auto packet_mid1 = CreatePacketWithSsrcMidRsid(ssrc1, mid1, rsid);
+ ASSERT_TRUE(demuxer_.OnRtpPacket(*packet_mid1));
+
+ auto packet_mid2 = CreatePacketWithSsrcMidRsid(ssrc2, mid2, rsid);
+ EXPECT_CALL(mid2_sink, OnRtpPacket(SamePacketAs(*packet_mid2))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_mid2));
+}
+
+// If a sink is first bound to a given SSRC by signaling but later a new sink is
+// bound to a given MID by a later signaling, then when a packet arrives with
+// both the SSRC and MID, then the signaled MID sink should take precedence.
+TEST_F(RtpDemuxerTest, SignaledMidShouldOverwriteSignaledSsrc) {
+ constexpr uint32_t ssrc = 11;
+ const std::string mid = "mid";
+
+ MockRtpPacketSink ssrc_sink;
+ AddSinkOnlySsrc(ssrc, &ssrc_sink);
+
+ MockRtpPacketSink mid_sink;
+ AddSinkOnlyMid(mid, &mid_sink);
+
+ auto p = CreatePacketWithSsrcMid(ssrc, mid);
+ EXPECT_CALL(ssrc_sink, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(mid_sink, OnRtpPacket(SamePacketAs(*p))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*p));
+}
+
+// Extends the previous test to also ensure that later packets that do not
+// specify MID are still routed to the MID sink rather than the overwritten SSRC
+// sink.
+TEST_F(RtpDemuxerTest, SignaledMidShouldOverwriteSignalledSsrcPersistent) {
+ constexpr uint32_t ssrc = 11;
+ const std::string mid = "mid";
+
+ MockRtpPacketSink ssrc_sink;
+ AddSinkOnlySsrc(ssrc, &ssrc_sink);
+
+ NiceMock<MockRtpPacketSink> mid_sink;
+ AddSinkOnlyMid(mid, &mid_sink);
+
+ EXPECT_CALL(ssrc_sink, OnRtpPacket(_)).Times(0);
+
+ auto packet_with_mid = CreatePacketWithSsrcMid(ssrc, mid);
+ demuxer_.OnRtpPacket(*packet_with_mid);
+
+ auto packet_without_mid = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(mid_sink, OnRtpPacket(SamePacketAs(*packet_without_mid)))
+ .Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_without_mid));
+}
+
+TEST_F(RtpDemuxerTest, RouteByPayloadTypeMultipleMatch) {
+ constexpr uint32_t ssrc = 10;
+ constexpr uint8_t pt1 = 30;
+ constexpr uint8_t pt2 = 31;
+
+ MockRtpPacketSink sink;
+ RtpDemuxerCriteria criteria;
+ criteria.payload_types() = {pt1, pt2};
+ AddSink(criteria, &sink);
+
+ auto packet_with_pt1 = CreatePacketWithSsrc(ssrc);
+ packet_with_pt1->SetPayloadType(pt1);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_with_pt1)));
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_pt1));
+
+ auto packet_with_pt2 = CreatePacketWithSsrc(ssrc);
+ packet_with_pt2->SetPayloadType(pt2);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet_with_pt2)));
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_with_pt2));
+}
+
+TEST_F(RtpDemuxerTest, DontDemuxOnMidAloneIfAddedWithRsid) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ MockRtpPacketSink sink;
+ AddSinkBothMidRsid(mid, rsid, &sink);
+
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+
+ auto packet = CreatePacketWithSsrcMid(ssrc, mid);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, DemuxBySsrcEvenWithMidAndRsid) {
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ constexpr uint32_t ssrc = 10;
+
+ RtpDemuxerCriteria criteria(mid, rsid);
+ criteria.ssrcs().insert(ssrc);
+ MockRtpPacketSink sink;
+ AddSink(criteria, &sink);
+
+ auto packet = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+// In slight deviation from the BUNDLE spec, if we match a sink according to
+// SSRC, then we do not verify payload type against the criteria and defer to
+// the sink to check that it is correct.
+TEST_F(RtpDemuxerTest, DoNotCheckPayloadTypeIfMatchedByOtherCriteria) {
+ constexpr uint32_t ssrc = 10;
+ constexpr uint8_t payload_type = 30;
+ constexpr uint8_t different_payload_type = payload_type + 1;
+
+ RtpDemuxerCriteria criteria;
+ criteria.ssrcs().insert(ssrc);
+ criteria.payload_types().insert(payload_type);
+ MockRtpPacketSink sink;
+ AddSink(criteria, &sink);
+
+ auto packet = CreatePacketWithSsrc(ssrc);
+ packet->SetPayloadType(different_payload_type);
+ EXPECT_CALL(sink, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+// If a repair packet includes an RSID it should be ignored and the packet
+// should be routed by its RRID.
+TEST_F(RtpDemuxerTest, PacketWithRsidAndRridRoutedByRrid) {
+ const std::string rsid = "1";
+ const std::string rrid = "1r";
+ constexpr uint32_t ssrc = 10;
+
+ MockRtpPacketSink sink_rsid;
+ AddSinkOnlyRsid(rsid, &sink_rsid);
+
+ MockRtpPacketSink sink_rrid;
+ AddSinkOnlyRsid(rrid, &sink_rrid);
+
+ auto packet = CreatePacketWithSsrcRsidRrid(ssrc, rsid, rrid);
+ EXPECT_CALL(sink_rsid, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sink_rrid, OnRtpPacket(SamePacketAs(*packet))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
+}
+
+// Same test as above but checks that the latched SSRC routes to the RRID sink.
+TEST_F(RtpDemuxerTest, PacketWithRsidAndRridLatchesSsrcToRrid) {
+ const std::string rsid = "1";
+ const std::string rrid = "1r";
+ constexpr uint32_t ssrc = 10;
+
+ MockRtpPacketSink sink_rsid;
+ AddSinkOnlyRsid(rsid, &sink_rsid);
+
+ NiceMock<MockRtpPacketSink> sink_rrid;
+ AddSinkOnlyRsid(rrid, &sink_rrid);
+
+ auto packet_rsid_rrid = CreatePacketWithSsrcRsidRrid(ssrc, rsid, rrid);
+ demuxer_.OnRtpPacket(*packet_rsid_rrid);
+
+ auto packet_ssrc_only = CreatePacketWithSsrc(ssrc);
+ EXPECT_CALL(sink_rsid, OnRtpPacket(_)).Times(0);
+ EXPECT_CALL(sink_rrid, OnRtpPacket(SamePacketAs(*packet_ssrc_only))).Times(1);
+ EXPECT_TRUE(demuxer_.OnRtpPacket(*packet_ssrc_only));
+}
+
+// Tests that a packet which includes MID and RSID is dropped and not routed by
+// SSRC if the MID and RSID do not match an added sink.
+TEST_F(RtpDemuxerTest, PacketWithMidAndUnknownRsidIsNotRoutedBySsrc) {
+ constexpr uint32_t ssrc = 10;
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ const std::string wrong_rsid = "2";
+
+ RtpDemuxerCriteria criteria(mid, rsid);
+ criteria.ssrcs().insert(ssrc);
+ MockRtpPacketSink sink;
+ AddSink(criteria, &sink);
+
+ auto packet = CreatePacketWithSsrcMidRsid(ssrc, mid, wrong_rsid);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+// Tests that a packet which includes MID and RSID is dropped and not routed by
+// payload type if the MID and RSID do not match an added sink.
+TEST_F(RtpDemuxerTest, PacketWithMidAndUnknownRsidIsNotRoutedByPayloadType) {
+ constexpr uint32_t ssrc = 10;
+ const std::string mid = "v";
+ const std::string rsid = "1";
+ const std::string wrong_rsid = "2";
+ constexpr uint8_t payload_type = 30;
+
+ RtpDemuxerCriteria criteria(mid, rsid);
+ criteria.payload_types().insert(payload_type);
+ MockRtpPacketSink sink;
+ AddSink(criteria, &sink);
+
+ auto packet = CreatePacketWithSsrcMidRsid(ssrc, mid, wrong_rsid);
+ packet->SetPayloadType(payload_type);
+ EXPECT_CALL(sink, OnRtpPacket(_)).Times(0);
+ EXPECT_FALSE(demuxer_.OnRtpPacket(*packet));
+}
+
+TEST_F(RtpDemuxerTest, MidMustNotExceedMaximumLength) {
+ MockRtpPacketSink sink1;
+ std::string mid1(BaseRtpStringExtension::kMaxValueSizeBytes + 1, 'a');
+ // Adding the sink should pass even though the supplied mid is too long.
+ // The mid will be truncated though.
+ EXPECT_TRUE(AddSinkOnlyMid(mid1, &sink1));
+
+ // Adding a second sink with a mid that matches the truncated mid that was
+ // just added, should fail.
+ MockRtpPacketSink sink2;
+ std::string mid2(mid1.substr(0, BaseRtpStringExtension::kMaxValueSizeBytes));
+ EXPECT_FALSE(AddSinkOnlyMid(mid2, &sink2));
+ EXPECT_FALSE(RemoveSink(&sink2));
+
+ // Remove the original sink.
+ EXPECT_TRUE(RemoveSink(&sink1));
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST_F(RtpDemuxerDeathTest, CriteriaMustBeNonEmpty) {
+ MockRtpPacketSink sink;
+ RtpDemuxerCriteria criteria;
+ EXPECT_DEATH(AddSink(criteria, &sink), "");
+}
+
+TEST_F(RtpDemuxerDeathTest, RsidMustBeAlphaNumeric) {
+ MockRtpPacketSink sink;
+ EXPECT_DEATH(AddSinkOnlyRsid("a_3", &sink), "");
+}
+
+TEST_F(RtpDemuxerDeathTest, MidMustBeToken) {
+ MockRtpPacketSink sink;
+ EXPECT_DEATH(AddSinkOnlyMid("a(3)", &sink), "");
+}
+
+TEST_F(RtpDemuxerDeathTest, RsidMustNotExceedMaximumLength) {
+ MockRtpPacketSink sink;
+ std::string rsid(BaseRtpStringExtension::kMaxValueSizeBytes + 1, 'a');
+ EXPECT_DEATH(AddSinkOnlyRsid(rsid, &sink), "");
+}
+
+#endif
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_interfaces_gn/moz.build b/third_party/libwebrtc/call/rtp_interfaces_gn/moz.build
new file mode 100644
index 0000000000..c83031d5b5
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_interfaces_gn/moz.build
@@ -0,0 +1,236 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/rtp_config.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtp_interfaces_gn")
diff --git a/third_party/libwebrtc/call/rtp_packet_sink_interface.h b/third_party/libwebrtc/call/rtp_packet_sink_interface.h
new file mode 100644
index 0000000000..ffbd58c398
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_packet_sink_interface.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_RTP_PACKET_SINK_INTERFACE_H_
+#define CALL_RTP_PACKET_SINK_INTERFACE_H_
+
+namespace webrtc {
+
+class RtpPacketReceived;
+
+// This class represents a receiver of already parsed RTP packets.
+class RtpPacketSinkInterface {
+ public:
+ virtual ~RtpPacketSinkInterface() = default;
+ virtual void OnRtpPacket(const RtpPacketReceived& packet) = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTP_PACKET_SINK_INTERFACE_H_
diff --git a/third_party/libwebrtc/call/rtp_payload_params.cc b/third_party/libwebrtc/call/rtp_payload_params.cc
new file mode 100644
index 0000000000..4b63ebefb3
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_payload_params.cc
@@ -0,0 +1,790 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_payload_params.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/match.h"
+#include "absl/types/variant.h"
+#include "api/video/video_timing.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/frame_dependencies_calculator.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/random.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+namespace {
+
+constexpr int kMaxSimulatedSpatialLayers = 3;
+
+void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info,
+ absl::optional<int> spatial_index,
+ RTPVideoHeader* rtp) {
+ rtp->codec = info.codecType;
+ rtp->is_last_frame_in_picture = info.end_of_picture;
+ switch (info.codecType) {
+ case kVideoCodecVP8: {
+ auto& vp8_header = rtp->video_type_header.emplace<RTPVideoHeaderVP8>();
+ vp8_header.InitRTPVideoHeaderVP8();
+ vp8_header.nonReference = info.codecSpecific.VP8.nonReference;
+ vp8_header.temporalIdx = info.codecSpecific.VP8.temporalIdx;
+ vp8_header.layerSync = info.codecSpecific.VP8.layerSync;
+ vp8_header.keyIdx = info.codecSpecific.VP8.keyIdx;
+ return;
+ }
+ case kVideoCodecVP9: {
+ auto& vp9_header = rtp->video_type_header.emplace<RTPVideoHeaderVP9>();
+ vp9_header.InitRTPVideoHeaderVP9();
+ vp9_header.inter_pic_predicted =
+ info.codecSpecific.VP9.inter_pic_predicted;
+ vp9_header.flexible_mode = info.codecSpecific.VP9.flexible_mode;
+ vp9_header.ss_data_available = info.codecSpecific.VP9.ss_data_available;
+ vp9_header.non_ref_for_inter_layer_pred =
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred;
+ vp9_header.temporal_idx = info.codecSpecific.VP9.temporal_idx;
+ vp9_header.temporal_up_switch = info.codecSpecific.VP9.temporal_up_switch;
+ vp9_header.inter_layer_predicted =
+ info.codecSpecific.VP9.inter_layer_predicted;
+ vp9_header.gof_idx = info.codecSpecific.VP9.gof_idx;
+ vp9_header.num_spatial_layers = info.codecSpecific.VP9.num_spatial_layers;
+ vp9_header.first_active_layer = info.codecSpecific.VP9.first_active_layer;
+ if (vp9_header.num_spatial_layers > 1) {
+ vp9_header.spatial_idx = spatial_index.value_or(kNoSpatialIdx);
+ } else {
+ vp9_header.spatial_idx = kNoSpatialIdx;
+ }
+ if (info.codecSpecific.VP9.ss_data_available) {
+ vp9_header.spatial_layer_resolution_present =
+ info.codecSpecific.VP9.spatial_layer_resolution_present;
+ if (info.codecSpecific.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < info.codecSpecific.VP9.num_spatial_layers;
+ ++i) {
+ vp9_header.width[i] = info.codecSpecific.VP9.width[i];
+ vp9_header.height[i] = info.codecSpecific.VP9.height[i];
+ }
+ }
+ vp9_header.gof.CopyGofInfoVP9(info.codecSpecific.VP9.gof);
+ }
+
+ vp9_header.num_ref_pics = info.codecSpecific.VP9.num_ref_pics;
+ for (int i = 0; i < info.codecSpecific.VP9.num_ref_pics; ++i) {
+ vp9_header.pid_diff[i] = info.codecSpecific.VP9.p_diff[i];
+ }
+ vp9_header.end_of_picture = info.end_of_picture;
+ return;
+ }
+ case kVideoCodecH264: {
+ auto& h264_header = rtp->video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.packetization_mode =
+ info.codecSpecific.H264.packetization_mode;
+ return;
+ }
+ case kVideoCodecMultiplex:
+ case kVideoCodecGeneric:
+ rtp->codec = kVideoCodecGeneric;
+ return;
+ // TODO(bugs.webrtc.org/13485): Implement H265 codec specific info
+ default:
+ return;
+ }
+}
+
+void SetVideoTiming(const EncodedImage& image, VideoSendTiming* timing) {
+ if (image.timing_.flags == VideoSendTiming::TimingFrameFlags::kInvalid ||
+ image.timing_.flags == VideoSendTiming::TimingFrameFlags::kNotTriggered) {
+ timing->flags = VideoSendTiming::TimingFrameFlags::kInvalid;
+ return;
+ }
+
+ timing->encode_start_delta_ms = VideoSendTiming::GetDeltaCappedMs(
+ image.capture_time_ms_, image.timing_.encode_start_ms);
+ timing->encode_finish_delta_ms = VideoSendTiming::GetDeltaCappedMs(
+ image.capture_time_ms_, image.timing_.encode_finish_ms);
+ timing->packetization_finish_delta_ms = 0;
+ timing->pacer_exit_delta_ms = 0;
+ timing->network_timestamp_delta_ms = 0;
+ timing->network2_timestamp_delta_ms = 0;
+ timing->flags = image.timing_.flags;
+}
+
+// Returns structure that aligns with simulated generic info. The templates
+// allow to produce valid dependency descriptor for any stream where
+// `num_spatial_layers` * `num_temporal_layers` <= 32 (limited by
+// https://aomediacodec.github.io/av1-rtp-spec/#a82-syntax, see
+// template_fdiffs()). The set of the templates is not tuned for any paricular
+// structure thus dependency descriptor would use more bytes on the wire than
+// with tuned templates.
+FrameDependencyStructure MinimalisticStructure(int num_spatial_layers,
+ int num_temporal_layers) {
+ RTC_DCHECK_LE(num_spatial_layers, DependencyDescriptor::kMaxSpatialIds);
+ RTC_DCHECK_LE(num_temporal_layers, DependencyDescriptor::kMaxTemporalIds);
+ RTC_DCHECK_LE(num_spatial_layers * num_temporal_layers, 32);
+ FrameDependencyStructure structure;
+ structure.num_decode_targets = num_spatial_layers * num_temporal_layers;
+ structure.num_chains = num_spatial_layers;
+ structure.templates.reserve(num_spatial_layers * num_temporal_layers);
+ for (int sid = 0; sid < num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers; ++tid) {
+ FrameDependencyTemplate a_template;
+ a_template.spatial_id = sid;
+ a_template.temporal_id = tid;
+ for (int s = 0; s < num_spatial_layers; ++s) {
+ for (int t = 0; t < num_temporal_layers; ++t) {
+ // Prefer kSwitch indication for frames that is part of the decode
+ // target because dependency descriptor information generated in this
+ // class use kSwitch indications more often that kRequired, increasing
+ // the chance of a good (or complete) template match.
+ a_template.decode_target_indications.push_back(
+ sid <= s && tid <= t ? DecodeTargetIndication::kSwitch
+ : DecodeTargetIndication::kNotPresent);
+ }
+ }
+ a_template.frame_diffs.push_back(tid == 0 ? num_spatial_layers *
+ num_temporal_layers
+ : num_spatial_layers);
+ a_template.chain_diffs.assign(structure.num_chains, 1);
+ structure.templates.push_back(a_template);
+
+ structure.decode_target_protected_by_chain.push_back(sid);
+ }
+ }
+ return structure;
+}
+} // namespace
+
+RtpPayloadParams::RtpPayloadParams(const uint32_t ssrc,
+ const RtpPayloadState* state,
+ const FieldTrialsView& trials)
+ : ssrc_(ssrc),
+ generic_picture_id_experiment_(
+ absl::StartsWith(trials.Lookup("WebRTC-GenericPictureId"),
+ "Enabled")),
+ simulate_generic_structure_(absl::StartsWith(
+ trials.Lookup("WebRTC-GenericCodecDependencyDescriptor"),
+ "Enabled")) {
+ for (auto& spatial_layer : last_shared_frame_id_)
+ spatial_layer.fill(-1);
+
+ chain_last_frame_id_.fill(-1);
+ buffer_id_to_frame_id_.fill(-1);
+
+ Random random(rtc::TimeMicros());
+ state_.picture_id =
+ state ? state->picture_id : (random.Rand<int16_t>() & 0x7FFF);
+ state_.tl0_pic_idx = state ? state->tl0_pic_idx : (random.Rand<uint8_t>());
+}
+
+RtpPayloadParams::RtpPayloadParams(const RtpPayloadParams& other) = default;
+
+RtpPayloadParams::~RtpPayloadParams() {}
+
+RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader(
+ const EncodedImage& image,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t shared_frame_id) {
+ RTPVideoHeader rtp_video_header;
+ if (codec_specific_info) {
+ PopulateRtpWithCodecSpecifics(*codec_specific_info, image.SpatialIndex(),
+ &rtp_video_header);
+ }
+ rtp_video_header.simulcastIdx = image.SimulcastIndex().value_or(0);
+ rtp_video_header.frame_type = image._frameType;
+ rtp_video_header.rotation = image.rotation_;
+ rtp_video_header.content_type = image.content_type_;
+ rtp_video_header.playout_delay = image.PlayoutDelay();
+ rtp_video_header.width = image._encodedWidth;
+ rtp_video_header.height = image._encodedHeight;
+ rtp_video_header.color_space = image.ColorSpace()
+ ? absl::make_optional(*image.ColorSpace())
+ : absl::nullopt;
+ rtp_video_header.video_frame_tracking_id = image.VideoFrameTrackingId();
+ SetVideoTiming(image, &rtp_video_header.video_timing);
+
+ const bool is_keyframe = image._frameType == VideoFrameType::kVideoFrameKey;
+ const bool first_frame_in_picture =
+ (codec_specific_info && codec_specific_info->codecType == kVideoCodecVP9)
+ ? codec_specific_info->codecSpecific.VP9.first_frame_in_picture
+ : true;
+
+ SetCodecSpecific(&rtp_video_header, first_frame_in_picture);
+
+ SetGeneric(codec_specific_info, shared_frame_id, is_keyframe,
+ &rtp_video_header);
+
+ return rtp_video_header;
+}
+
+uint32_t RtpPayloadParams::ssrc() const {
+ return ssrc_;
+}
+
+RtpPayloadState RtpPayloadParams::state() const {
+ return state_;
+}
+
+void RtpPayloadParams::SetCodecSpecific(RTPVideoHeader* rtp_video_header,
+ bool first_frame_in_picture) {
+ // Always set picture id. Set tl0_pic_idx iff temporal index is set.
+ if (first_frame_in_picture) {
+ state_.picture_id = (static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
+ }
+ if (rtp_video_header->codec == kVideoCodecVP8) {
+ auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(rtp_video_header->video_type_header);
+ vp8_header.pictureId = state_.picture_id;
+
+ if (vp8_header.temporalIdx != kNoTemporalIdx) {
+ if (vp8_header.temporalIdx == 0) {
+ ++state_.tl0_pic_idx;
+ }
+ vp8_header.tl0PicIdx = state_.tl0_pic_idx;
+ }
+ }
+ if (rtp_video_header->codec == kVideoCodecVP9) {
+ auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(rtp_video_header->video_type_header);
+ vp9_header.picture_id = state_.picture_id;
+
+ // Note that in the case that we have no temporal layers but we do have
+ // spatial layers, packets will carry layering info with a temporal_idx of
+ // zero, and we then have to set and increment tl0_pic_idx.
+ if (vp9_header.temporal_idx != kNoTemporalIdx ||
+ vp9_header.spatial_idx != kNoSpatialIdx) {
+ if (first_frame_in_picture &&
+ (vp9_header.temporal_idx == 0 ||
+ vp9_header.temporal_idx == kNoTemporalIdx)) {
+ ++state_.tl0_pic_idx;
+ }
+ vp9_header.tl0_pic_idx = state_.tl0_pic_idx;
+ }
+ }
+ if (generic_picture_id_experiment_ &&
+ rtp_video_header->codec == kVideoCodecGeneric) {
+ rtp_video_header->video_type_header.emplace<RTPVideoHeaderLegacyGeneric>()
+ .picture_id = state_.picture_id;
+ }
+}
+
+RTPVideoHeader::GenericDescriptorInfo
+RtpPayloadParams::GenericDescriptorFromFrameInfo(
+ const GenericFrameInfo& frame_info,
+ int64_t frame_id) {
+ RTPVideoHeader::GenericDescriptorInfo generic;
+ generic.frame_id = frame_id;
+ generic.dependencies = dependencies_calculator_.FromBuffersUsage(
+ frame_id, frame_info.encoder_buffers);
+ generic.chain_diffs =
+ chains_calculator_.From(frame_id, frame_info.part_of_chain);
+ generic.spatial_index = frame_info.spatial_id;
+ generic.temporal_index = frame_info.temporal_id;
+ generic.decode_target_indications = frame_info.decode_target_indications;
+ generic.active_decode_targets = frame_info.active_decode_targets;
+ return generic;
+}
+
+void RtpPayloadParams::SetGeneric(const CodecSpecificInfo* codec_specific_info,
+ int64_t frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header) {
+ if (codec_specific_info && codec_specific_info->generic_frame_info &&
+ !codec_specific_info->generic_frame_info->encoder_buffers.empty()) {
+ if (is_keyframe) {
+ // Key frame resets all chains it is in.
+ chains_calculator_.Reset(
+ codec_specific_info->generic_frame_info->part_of_chain);
+ }
+ rtp_video_header->generic = GenericDescriptorFromFrameInfo(
+ *codec_specific_info->generic_frame_info, frame_id);
+ return;
+ }
+
+ switch (rtp_video_header->codec) {
+ case VideoCodecType::kVideoCodecGeneric:
+ GenericToGeneric(frame_id, is_keyframe, rtp_video_header);
+ return;
+ case VideoCodecType::kVideoCodecVP8:
+ if (codec_specific_info) {
+ Vp8ToGeneric(codec_specific_info->codecSpecific.VP8, frame_id,
+ is_keyframe, rtp_video_header);
+ }
+ return;
+ case VideoCodecType::kVideoCodecVP9:
+ if (codec_specific_info != nullptr) {
+ Vp9ToGeneric(codec_specific_info->codecSpecific.VP9, frame_id,
+ *rtp_video_header);
+ }
+ return;
+ case VideoCodecType::kVideoCodecAV1:
+ // TODO(philipel): Implement AV1 to generic descriptor.
+ return;
+ case VideoCodecType::kVideoCodecH264:
+ if (codec_specific_info) {
+ H264ToGeneric(codec_specific_info->codecSpecific.H264, frame_id,
+ is_keyframe, rtp_video_header);
+ }
+ return;
+ case VideoCodecType::kVideoCodecMultiplex:
+ return;
+ case VideoCodecType::kVideoCodecH265:
+ // TODO(bugs.webrtc.org/13485): Implement H265 to generic descriptor.
+ return;
+ }
+ RTC_DCHECK_NOTREACHED() << "Unsupported codec.";
+}
+
+absl::optional<FrameDependencyStructure> RtpPayloadParams::GenericStructure(
+ const CodecSpecificInfo* codec_specific_info) {
+ if (codec_specific_info == nullptr) {
+ return absl::nullopt;
+ }
+ // This helper shouldn't be used when template structure is specified
+ // explicetly.
+ RTC_DCHECK(!codec_specific_info->template_structure.has_value());
+ switch (codec_specific_info->codecType) {
+ case VideoCodecType::kVideoCodecGeneric:
+ if (simulate_generic_structure_) {
+ return MinimalisticStructure(/*num_spatial_layers=*/1,
+ /*num_temporal_layer=*/1);
+ }
+ return absl::nullopt;
+ case VideoCodecType::kVideoCodecVP8:
+ return MinimalisticStructure(/*num_spatial_layers=*/1,
+ /*num_temporal_layer=*/kMaxTemporalStreams);
+ case VideoCodecType::kVideoCodecVP9: {
+ absl::optional<FrameDependencyStructure> structure =
+ MinimalisticStructure(
+ /*num_spatial_layers=*/kMaxSimulatedSpatialLayers,
+ /*num_temporal_layer=*/kMaxTemporalStreams);
+ const CodecSpecificInfoVP9& vp9 = codec_specific_info->codecSpecific.VP9;
+ if (vp9.ss_data_available && vp9.spatial_layer_resolution_present) {
+ RenderResolution first_valid;
+ RenderResolution last_valid;
+ for (size_t i = 0; i < vp9.num_spatial_layers; ++i) {
+ RenderResolution r(vp9.width[i], vp9.height[i]);
+ if (r.Valid()) {
+ if (!first_valid.Valid()) {
+ first_valid = r;
+ }
+ last_valid = r;
+ }
+ structure->resolutions.push_back(r);
+ }
+ if (!last_valid.Valid()) {
+ // No valid resolution found. Do not send resolutions.
+ structure->resolutions.clear();
+ } else {
+ structure->resolutions.resize(kMaxSimulatedSpatialLayers, last_valid);
+ // VP9 encoder wrapper may disable first few spatial layers by
+ // setting invalid resolution (0,0). `structure->resolutions`
+ // doesn't support invalid resolution, so reset them to something
+ // valid.
+ for (RenderResolution& r : structure->resolutions) {
+ if (!r.Valid()) {
+ r = first_valid;
+ }
+ }
+ }
+ }
+ return structure;
+ }
+ case VideoCodecType::kVideoCodecAV1:
+ case VideoCodecType::kVideoCodecH264:
+ case VideoCodecType::kVideoCodecH265:
+ case VideoCodecType::kVideoCodecMultiplex:
+ return absl::nullopt;
+ }
+ RTC_DCHECK_NOTREACHED() << "Unsupported codec.";
+}
+
+void RtpPayloadParams::GenericToGeneric(int64_t shared_frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header) {
+ RTPVideoHeader::GenericDescriptorInfo& generic =
+ rtp_video_header->generic.emplace();
+
+ generic.frame_id = shared_frame_id;
+ generic.decode_target_indications.push_back(DecodeTargetIndication::kSwitch);
+
+ if (is_keyframe) {
+ generic.chain_diffs.push_back(0);
+ last_shared_frame_id_[0].fill(-1);
+ } else {
+ int64_t frame_id = last_shared_frame_id_[0][0];
+ RTC_DCHECK_NE(frame_id, -1);
+ RTC_DCHECK_LT(frame_id, shared_frame_id);
+ generic.chain_diffs.push_back(shared_frame_id - frame_id);
+ generic.dependencies.push_back(frame_id);
+ }
+
+ last_shared_frame_id_[0][0] = shared_frame_id;
+}
+
+void RtpPayloadParams::H264ToGeneric(const CodecSpecificInfoH264& h264_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header) {
+ const int temporal_index =
+ h264_info.temporal_idx != kNoTemporalIdx ? h264_info.temporal_idx : 0;
+
+ if (temporal_index >= RtpGenericFrameDescriptor::kMaxTemporalLayers) {
+ RTC_LOG(LS_WARNING) << "Temporal and/or spatial index is too high to be "
+ "used with generic frame descriptor.";
+ return;
+ }
+
+ RTPVideoHeader::GenericDescriptorInfo& generic =
+ rtp_video_header->generic.emplace();
+
+ generic.frame_id = shared_frame_id;
+ generic.temporal_index = temporal_index;
+
+ if (is_keyframe) {
+ RTC_DCHECK_EQ(temporal_index, 0);
+ last_shared_frame_id_[/*spatial index*/ 0].fill(-1);
+ last_shared_frame_id_[/*spatial index*/ 0][temporal_index] =
+ shared_frame_id;
+ return;
+ }
+
+ if (h264_info.base_layer_sync) {
+ int64_t tl0_frame_id = last_shared_frame_id_[/*spatial index*/ 0][0];
+
+ for (int i = 1; i < RtpGenericFrameDescriptor::kMaxTemporalLayers; ++i) {
+ if (last_shared_frame_id_[/*spatial index*/ 0][i] < tl0_frame_id) {
+ last_shared_frame_id_[/*spatial index*/ 0][i] = -1;
+ }
+ }
+
+ RTC_DCHECK_GE(tl0_frame_id, 0);
+ RTC_DCHECK_LT(tl0_frame_id, shared_frame_id);
+ generic.dependencies.push_back(tl0_frame_id);
+ } else {
+ for (int i = 0; i <= temporal_index; ++i) {
+ int64_t frame_id = last_shared_frame_id_[/*spatial index*/ 0][i];
+
+ if (frame_id != -1) {
+ RTC_DCHECK_LT(frame_id, shared_frame_id);
+ generic.dependencies.push_back(frame_id);
+ }
+ }
+ }
+
+ last_shared_frame_id_[/*spatial_index*/ 0][temporal_index] = shared_frame_id;
+}
+
+void RtpPayloadParams::Vp8ToGeneric(const CodecSpecificInfoVP8& vp8_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header) {
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(rtp_video_header->video_type_header);
+ const int spatial_index = 0;
+ const int temporal_index =
+ vp8_header.temporalIdx != kNoTemporalIdx ? vp8_header.temporalIdx : 0;
+
+ if (temporal_index >= RtpGenericFrameDescriptor::kMaxTemporalLayers ||
+ spatial_index >= RtpGenericFrameDescriptor::kMaxSpatialLayers) {
+ RTC_LOG(LS_WARNING) << "Temporal and/or spatial index is too high to be "
+ "used with generic frame descriptor.";
+ return;
+ }
+
+ RTPVideoHeader::GenericDescriptorInfo& generic =
+ rtp_video_header->generic.emplace();
+
+ generic.frame_id = shared_frame_id;
+ generic.spatial_index = spatial_index;
+ generic.temporal_index = temporal_index;
+
+ // Generate decode target indications.
+ RTC_DCHECK_LT(temporal_index, kMaxTemporalStreams);
+ generic.decode_target_indications.resize(kMaxTemporalStreams);
+ auto it = std::fill_n(generic.decode_target_indications.begin(),
+ temporal_index, DecodeTargetIndication::kNotPresent);
+ std::fill(it, generic.decode_target_indications.end(),
+ DecodeTargetIndication::kSwitch);
+
+ // Frame dependencies.
+ if (vp8_info.useExplicitDependencies) {
+ SetDependenciesVp8New(vp8_info, shared_frame_id, is_keyframe,
+ vp8_header.layerSync, &generic);
+ } else {
+ SetDependenciesVp8Deprecated(vp8_info, shared_frame_id, is_keyframe,
+ spatial_index, temporal_index,
+ vp8_header.layerSync, &generic);
+ }
+
+ // Calculate chains.
+ generic.chain_diffs = {
+ (is_keyframe || chain_last_frame_id_[0] < 0)
+ ? 0
+ : static_cast<int>(shared_frame_id - chain_last_frame_id_[0])};
+ if (temporal_index == 0) {
+ chain_last_frame_id_[0] = shared_frame_id;
+ }
+}
+
+void RtpPayloadParams::Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info,
+ int64_t shared_frame_id,
+ RTPVideoHeader& rtp_video_header) {
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(rtp_video_header.video_type_header);
+ const int num_spatial_layers = kMaxSimulatedSpatialLayers;
+ const int first_active_spatial_id = vp9_header.first_active_layer;
+ const int last_active_spatial_id = vp9_header.num_spatial_layers - 1;
+ const int num_temporal_layers = kMaxTemporalStreams;
+ static_assert(num_spatial_layers <=
+ RtpGenericFrameDescriptor::kMaxSpatialLayers);
+ static_assert(num_temporal_layers <=
+ RtpGenericFrameDescriptor::kMaxTemporalLayers);
+ static_assert(num_spatial_layers <= DependencyDescriptor::kMaxSpatialIds);
+ static_assert(num_temporal_layers <= DependencyDescriptor::kMaxTemporalIds);
+
+ int spatial_index =
+ vp9_header.spatial_idx != kNoSpatialIdx ? vp9_header.spatial_idx : 0;
+ int temporal_index =
+ vp9_header.temporal_idx != kNoTemporalIdx ? vp9_header.temporal_idx : 0;
+
+ if (!(temporal_index < num_temporal_layers &&
+ first_active_spatial_id <= spatial_index &&
+ spatial_index <= last_active_spatial_id &&
+ last_active_spatial_id < num_spatial_layers)) {
+ // Prefer to generate no generic layering than an inconsistent one.
+ RTC_LOG(LS_ERROR) << "Inconsistent layer id sid=" << spatial_index
+ << ",tid=" << temporal_index
+ << " in VP9 header. Active spatial ids: ["
+ << first_active_spatial_id << ","
+ << last_active_spatial_id << "]";
+ return;
+ }
+
+ RTPVideoHeader::GenericDescriptorInfo& result =
+ rtp_video_header.generic.emplace();
+
+ result.frame_id = shared_frame_id;
+ result.spatial_index = spatial_index;
+ result.temporal_index = temporal_index;
+
+ result.decode_target_indications.reserve(num_spatial_layers *
+ num_temporal_layers);
+ for (int sid = 0; sid < num_spatial_layers; ++sid) {
+ for (int tid = 0; tid < num_temporal_layers; ++tid) {
+ DecodeTargetIndication dti;
+ if (sid < spatial_index || tid < temporal_index) {
+ dti = DecodeTargetIndication::kNotPresent;
+ } else if (spatial_index != sid &&
+ vp9_header.non_ref_for_inter_layer_pred) {
+ dti = DecodeTargetIndication::kNotPresent;
+ } else if (sid == spatial_index && tid == temporal_index) {
+ // Assume that if frame is decodable, all of its own layer is decodable.
+ dti = DecodeTargetIndication::kSwitch;
+ } else if (sid == spatial_index && vp9_header.temporal_up_switch) {
+ dti = DecodeTargetIndication::kSwitch;
+ } else if (!vp9_header.inter_pic_predicted) {
+ // Key frame or spatial upswitch
+ dti = DecodeTargetIndication::kSwitch;
+ } else {
+ // Make no other assumptions. That should be safe, though suboptimal.
+ // To provide more accurate dti, encoder wrapper should fill in
+ // CodecSpecificInfo::generic_frame_info
+ dti = DecodeTargetIndication::kRequired;
+ }
+ result.decode_target_indications.push_back(dti);
+ }
+ }
+
+ // Calculate frame dependencies.
+ static constexpr int kPictureDiffLimit = 128;
+ if (last_vp9_frame_id_.empty()) {
+ // Create the array only if it is ever used.
+ last_vp9_frame_id_.resize(kPictureDiffLimit);
+ }
+
+ if (vp9_header.flexible_mode) {
+ if (vp9_header.inter_layer_predicted && spatial_index > 0) {
+ result.dependencies.push_back(
+ last_vp9_frame_id_[vp9_header.picture_id % kPictureDiffLimit]
+ [spatial_index - 1]);
+ }
+ if (vp9_header.inter_pic_predicted) {
+ for (size_t i = 0; i < vp9_header.num_ref_pics; ++i) {
+ // picture_id is 15 bit number that wraps around. Though undeflow may
+ // produce picture that exceeds 2^15, it is ok because in this
+ // code block only last 7 bits of the picture_id are used.
+ uint16_t depend_on = vp9_header.picture_id - vp9_header.pid_diff[i];
+ result.dependencies.push_back(
+ last_vp9_frame_id_[depend_on % kPictureDiffLimit][spatial_index]);
+ }
+ }
+ last_vp9_frame_id_[vp9_header.picture_id % kPictureDiffLimit]
+ [spatial_index] = shared_frame_id;
+ } else {
+ // Implementing general conversion logic for non-flexible mode requires some
+ // work and we will almost certainly never need it, so for now support only
+ // non-layerd streams.
+ if (spatial_index > 0 || temporal_index > 0) {
+ // Prefer to generate no generic layering than an inconsistent one.
+ rtp_video_header.generic.reset();
+ return;
+ }
+
+ if (vp9_header.inter_pic_predicted) {
+ // Since we only support non-scalable streams we only need to save the
+ // last frame id.
+ result.dependencies.push_back(last_vp9_frame_id_[0][0]);
+ }
+ last_vp9_frame_id_[0][0] = shared_frame_id;
+ }
+
+ result.active_decode_targets =
+ ((uint32_t{1} << num_temporal_layers * (last_active_spatial_id + 1)) -
+ 1) ^
+ ((uint32_t{1} << num_temporal_layers * first_active_spatial_id) - 1);
+
+ // Calculate chains, asuming chain includes all frames with temporal_id = 0
+ if (!vp9_header.inter_pic_predicted && !vp9_header.inter_layer_predicted) {
+ // Assume frames without dependencies also reset chains.
+ for (int sid = spatial_index; sid <= last_active_spatial_id; ++sid) {
+ chain_last_frame_id_[sid] = -1;
+ }
+ }
+ result.chain_diffs.resize(num_spatial_layers, 0);
+ for (int sid = first_active_spatial_id; sid <= last_active_spatial_id;
+ ++sid) {
+ if (chain_last_frame_id_[sid] == -1) {
+ result.chain_diffs[sid] = 0;
+ continue;
+ }
+ int64_t chain_diff = shared_frame_id - chain_last_frame_id_[sid];
+ if (chain_diff >= 256) {
+ RTC_LOG(LS_ERROR)
+ << "Too many frames since last VP9 T0 frame for spatial layer #"
+ << sid << " at frame#" << shared_frame_id;
+ chain_last_frame_id_[sid] = -1;
+ chain_diff = 0;
+ }
+ result.chain_diffs[sid] = chain_diff;
+ }
+
+ if (temporal_index == 0) {
+ chain_last_frame_id_[spatial_index] = shared_frame_id;
+ if (!vp9_header.non_ref_for_inter_layer_pred) {
+ for (int sid = spatial_index + 1; sid <= last_active_spatial_id; ++sid) {
+ chain_last_frame_id_[sid] = shared_frame_id;
+ }
+ }
+ }
+}
+
+void RtpPayloadParams::SetDependenciesVp8Deprecated(
+ const CodecSpecificInfoVP8& vp8_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ int spatial_index,
+ int temporal_index,
+ bool layer_sync,
+ RTPVideoHeader::GenericDescriptorInfo* generic) {
+ RTC_DCHECK(!vp8_info.useExplicitDependencies);
+ RTC_DCHECK(!new_version_used_.has_value() || !new_version_used_.value());
+ new_version_used_ = false;
+
+ if (is_keyframe) {
+ RTC_DCHECK_EQ(temporal_index, 0);
+ last_shared_frame_id_[spatial_index].fill(-1);
+ last_shared_frame_id_[spatial_index][temporal_index] = shared_frame_id;
+ return;
+ }
+
+ if (layer_sync) {
+ int64_t tl0_frame_id = last_shared_frame_id_[spatial_index][0];
+
+ for (int i = 1; i < RtpGenericFrameDescriptor::kMaxTemporalLayers; ++i) {
+ if (last_shared_frame_id_[spatial_index][i] < tl0_frame_id) {
+ last_shared_frame_id_[spatial_index][i] = -1;
+ }
+ }
+
+ RTC_DCHECK_GE(tl0_frame_id, 0);
+ RTC_DCHECK_LT(tl0_frame_id, shared_frame_id);
+ generic->dependencies.push_back(tl0_frame_id);
+ } else {
+ for (int i = 0; i <= temporal_index; ++i) {
+ int64_t frame_id = last_shared_frame_id_[spatial_index][i];
+
+ if (frame_id != -1) {
+ RTC_DCHECK_LT(frame_id, shared_frame_id);
+ generic->dependencies.push_back(frame_id);
+ }
+ }
+ }
+
+ last_shared_frame_id_[spatial_index][temporal_index] = shared_frame_id;
+}
+
+void RtpPayloadParams::SetDependenciesVp8New(
+ const CodecSpecificInfoVP8& vp8_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ bool layer_sync,
+ RTPVideoHeader::GenericDescriptorInfo* generic) {
+ RTC_DCHECK(vp8_info.useExplicitDependencies);
+ RTC_DCHECK(!new_version_used_.has_value() || new_version_used_.value());
+ new_version_used_ = true;
+
+ if (is_keyframe) {
+ RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
+ buffer_id_to_frame_id_.fill(shared_frame_id);
+ return;
+ }
+
+ constexpr size_t kBuffersCountVp8 = CodecSpecificInfoVP8::kBuffersCount;
+
+ RTC_DCHECK_GT(vp8_info.referencedBuffersCount, 0u);
+ RTC_DCHECK_LE(vp8_info.referencedBuffersCount,
+ arraysize(vp8_info.referencedBuffers));
+
+ for (size_t i = 0; i < vp8_info.referencedBuffersCount; ++i) {
+ const size_t referenced_buffer = vp8_info.referencedBuffers[i];
+ RTC_DCHECK_LT(referenced_buffer, kBuffersCountVp8);
+ RTC_DCHECK_LT(referenced_buffer, buffer_id_to_frame_id_.size());
+
+ const int64_t dependency_frame_id =
+ buffer_id_to_frame_id_[referenced_buffer];
+ RTC_DCHECK_GE(dependency_frame_id, 0);
+ RTC_DCHECK_LT(dependency_frame_id, shared_frame_id);
+
+ const bool is_new_dependency =
+ std::find(generic->dependencies.begin(), generic->dependencies.end(),
+ dependency_frame_id) == generic->dependencies.end();
+ if (is_new_dependency) {
+ generic->dependencies.push_back(dependency_frame_id);
+ }
+ }
+
+ RTC_DCHECK_LE(vp8_info.updatedBuffersCount, kBuffersCountVp8);
+ for (size_t i = 0; i < vp8_info.updatedBuffersCount; ++i) {
+ const size_t updated_id = vp8_info.updatedBuffers[i];
+ buffer_id_to_frame_id_[updated_id] = shared_frame_id;
+ }
+
+ RTC_DCHECK_LE(buffer_id_to_frame_id_.size(), kBuffersCountVp8);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_payload_params.h b/third_party/libwebrtc/call/rtp_payload_params.h
new file mode 100644
index 0000000000..5feee11ab0
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_payload_params.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_PAYLOAD_PARAMS_H_
+#define CALL_RTP_PAYLOAD_PARAMS_H_
+
+#include <array>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/field_trials_view.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/rtp_config.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/chain_diff_calculator.h"
+#include "modules/video_coding/frame_dependencies_calculator.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+// State for setting picture id and tl0 pic idx, for VP8 and VP9
+// TODO(nisse): Make these properties not codec specific.
+class RtpPayloadParams final {
+ public:
+ RtpPayloadParams(uint32_t ssrc,
+ const RtpPayloadState* state,
+ const FieldTrialsView& trials);
+ RtpPayloadParams(const RtpPayloadParams& other);
+ ~RtpPayloadParams();
+
+ RTPVideoHeader GetRtpVideoHeader(const EncodedImage& image,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t shared_frame_id);
+
+ // Returns structure that aligns with simulated generic info generated by
+ // `GetRtpVideoHeader` for the `codec_specific_info`
+ absl::optional<FrameDependencyStructure> GenericStructure(
+ const CodecSpecificInfo* codec_specific_info);
+
+ uint32_t ssrc() const;
+
+ RtpPayloadState state() const;
+
+ private:
+ void SetCodecSpecific(RTPVideoHeader* rtp_video_header,
+ bool first_frame_in_picture);
+ RTPVideoHeader::GenericDescriptorInfo GenericDescriptorFromFrameInfo(
+ const GenericFrameInfo& frame_info,
+ int64_t frame_id);
+ void SetGeneric(const CodecSpecificInfo* codec_specific_info,
+ int64_t frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header);
+
+ void Vp8ToGeneric(const CodecSpecificInfoVP8& vp8_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header);
+
+ void Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info,
+ int64_t shared_frame_id,
+ RTPVideoHeader& rtp_video_header);
+
+ void H264ToGeneric(const CodecSpecificInfoH264& h264_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header);
+
+ void GenericToGeneric(int64_t shared_frame_id,
+ bool is_keyframe,
+ RTPVideoHeader* rtp_video_header);
+
+ // TODO(bugs.webrtc.org/10242): Delete SetDependenciesVp8Deprecated() and move
+ // the logic in SetDependenciesVp8New() into Vp8ToGeneric() once all hardware
+ // wrappers have been updated.
+ void SetDependenciesVp8Deprecated(
+ const CodecSpecificInfoVP8& vp8_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ int spatial_index,
+ int temporal_index,
+ bool layer_sync,
+ RTPVideoHeader::GenericDescriptorInfo* generic);
+ void SetDependenciesVp8New(const CodecSpecificInfoVP8& vp8_info,
+ int64_t shared_frame_id,
+ bool is_keyframe,
+ bool layer_sync,
+ RTPVideoHeader::GenericDescriptorInfo* generic);
+
+ FrameDependenciesCalculator dependencies_calculator_;
+ ChainDiffCalculator chains_calculator_;
+ // TODO(bugs.webrtc.org/10242): Remove once all encoder-wrappers are updated.
+ // Holds the last shared frame id for a given (spatial, temporal) layer.
+ std::array<std::array<int64_t, RtpGenericFrameDescriptor::kMaxTemporalLayers>,
+ RtpGenericFrameDescriptor::kMaxSpatialLayers>
+ last_shared_frame_id_;
+ // circular buffer of frame ids for the last 128 vp9 pictures.
+ // ids for the `picture_id` are stored at the index `picture_id % 128`.
+ std::vector<std::array<int64_t, RtpGenericFrameDescriptor::kMaxSpatialLayers>>
+ last_vp9_frame_id_;
+ // Last frame id for each chain
+ std::array<int64_t, RtpGenericFrameDescriptor::kMaxSpatialLayers>
+ chain_last_frame_id_;
+
+ // TODO(eladalon): When additional codecs are supported,
+ // set kMaxCodecBuffersCount to the max() of these codecs' buffer count.
+ static constexpr size_t kMaxCodecBuffersCount =
+ CodecSpecificInfoVP8::kBuffersCount;
+
+ // Maps buffer IDs to the frame-ID stored in them.
+ std::array<int64_t, kMaxCodecBuffersCount> buffer_id_to_frame_id_;
+
+ // Until we remove SetDependenciesVp8Deprecated(), we should make sure
+ // that, for a given object, we either always use
+ // SetDependenciesVp8Deprecated(), or always use SetDependenciesVp8New().
+ // TODO(bugs.webrtc.org/10242): Remove.
+ absl::optional<bool> new_version_used_;
+
+ const uint32_t ssrc_;
+ RtpPayloadState state_;
+
+ const bool generic_picture_id_experiment_;
+ const bool simulate_generic_structure_;
+};
+} // namespace webrtc
+#endif // CALL_RTP_PAYLOAD_PARAMS_H_
diff --git a/third_party/libwebrtc/call/rtp_payload_params_unittest.cc b/third_party/libwebrtc/call/rtp_payload_params_unittest.cc
new file mode 100644
index 0000000000..45f00061ee
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_payload_params_unittest.cc
@@ -0,0 +1,1398 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_payload_params.h"
+
+#include <string.h>
+
+#include <map>
+#include <set>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_rotation.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/interface/common_constants.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "test/explicit_key_value_config.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::AllOf;
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::IsEmpty;
+using ::testing::Optional;
+using ::testing::SizeIs;
+
+using GenericDescriptorInfo = RTPVideoHeader::GenericDescriptorInfo;
+
+const uint32_t kSsrc1 = 12345;
+const uint32_t kSsrc2 = 23456;
+const int16_t kPictureId = 123;
+const int16_t kTl0PicIdx = 20;
+const uint8_t kTemporalIdx = 1;
+const int16_t kInitialPictureId1 = 222;
+const int16_t kInitialTl0PicIdx1 = 99;
+const int64_t kDontCare = 0;
+
+TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp8) {
+ RtpPayloadState state2;
+ state2.picture_id = kPictureId;
+ state2.tl0_pic_idx = kTl0PicIdx;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc2, state2}};
+
+ RtpPayloadParams params(kSsrc2, &state2, FieldTrialBasedConfig());
+ EncodedImage encoded_image;
+ encoded_image.rotation_ = kVideoRotation_90;
+ encoded_image.content_type_ = VideoContentType::SCREENSHARE;
+ encoded_image.SetSimulcastIndex(1);
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx = 0;
+ codec_info.codecSpecific.VP8.keyIdx = kNoKeyIdx;
+ codec_info.codecSpecific.VP8.layerSync = false;
+ codec_info.codecSpecific.VP8.nonReference = true;
+
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx = 1;
+ codec_info.codecSpecific.VP8.layerSync = true;
+
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info, 1);
+
+ EXPECT_EQ(kVideoRotation_90, header.rotation);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
+ EXPECT_EQ(1, header.simulcastIdx);
+ EXPECT_EQ(kVideoCodecVP8, header.codec);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(header.video_type_header);
+ EXPECT_EQ(kPictureId + 2, vp8_header.pictureId);
+ EXPECT_EQ(kTemporalIdx, vp8_header.temporalIdx);
+ EXPECT_EQ(kTl0PicIdx + 1, vp8_header.tl0PicIdx);
+ EXPECT_EQ(kNoKeyIdx, vp8_header.keyIdx);
+ EXPECT_TRUE(vp8_header.layerSync);
+ EXPECT_TRUE(vp8_header.nonReference);
+}
+
+TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) {
+ RtpPayloadState state;
+ state.picture_id = kPictureId;
+ state.tl0_pic_idx = kTl0PicIdx;
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+
+ EncodedImage encoded_image;
+ encoded_image.rotation_ = kVideoRotation_90;
+ encoded_image.content_type_ = VideoContentType::SCREENSHARE;
+ encoded_image.SetSpatialIndex(0);
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 3;
+ codec_info.codecSpecific.VP9.first_frame_in_picture = true;
+ codec_info.codecSpecific.VP9.temporal_idx = 2;
+ codec_info.end_of_picture = false;
+
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+
+ EXPECT_EQ(kVideoRotation_90, header.rotation);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
+ EXPECT_EQ(kVideoCodecVP9, header.codec);
+ EXPECT_FALSE(header.color_space);
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(header.video_type_header);
+ EXPECT_EQ(kPictureId + 1, vp9_header.picture_id);
+ EXPECT_EQ(kTl0PicIdx, vp9_header.tl0_pic_idx);
+ EXPECT_EQ(vp9_header.temporal_idx, codec_info.codecSpecific.VP9.temporal_idx);
+ EXPECT_EQ(vp9_header.spatial_idx, encoded_image.SpatialIndex());
+ EXPECT_EQ(vp9_header.num_spatial_layers,
+ codec_info.codecSpecific.VP9.num_spatial_layers);
+ EXPECT_EQ(vp9_header.end_of_picture, codec_info.end_of_picture);
+
+ // Next spatial layer.
+ codec_info.codecSpecific.VP9.first_frame_in_picture = false;
+ codec_info.end_of_picture = true;
+
+ encoded_image.SetSpatialIndex(1);
+ ColorSpace color_space(
+ ColorSpace::PrimaryID::kSMPTE170M, ColorSpace::TransferID::kSMPTE170M,
+ ColorSpace::MatrixID::kSMPTE170M, ColorSpace::RangeID::kFull);
+ encoded_image.SetColorSpace(color_space);
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+
+ EXPECT_EQ(kVideoRotation_90, header.rotation);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
+ EXPECT_EQ(kVideoCodecVP9, header.codec);
+ EXPECT_EQ(absl::make_optional(color_space), header.color_space);
+ EXPECT_EQ(kPictureId + 1, vp9_header.picture_id);
+ EXPECT_EQ(kTl0PicIdx, vp9_header.tl0_pic_idx);
+ EXPECT_EQ(vp9_header.temporal_idx, codec_info.codecSpecific.VP9.temporal_idx);
+ EXPECT_EQ(vp9_header.spatial_idx, encoded_image.SpatialIndex());
+ EXPECT_EQ(vp9_header.num_spatial_layers,
+ codec_info.codecSpecific.VP9.num_spatial_layers);
+ EXPECT_EQ(vp9_header.end_of_picture, codec_info.end_of_picture);
+}
+
+TEST(RtpPayloadParamsTest, PictureIdIsSetForVp8) {
+ RtpPayloadState state;
+ state.picture_id = kInitialPictureId1;
+ state.tl0_pic_idx = kInitialTl0PicIdx1;
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+ EXPECT_EQ(kVideoCodecVP8, header.codec);
+ EXPECT_EQ(kInitialPictureId1 + 1,
+ absl::get<RTPVideoHeaderVP8>(header.video_type_header).pictureId);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ state = params.state();
+ EXPECT_EQ(kInitialPictureId1 + 1, state.picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, state.tl0_pic_idx);
+}
+
+TEST(RtpPayloadParamsTest, PictureIdWraps) {
+ RtpPayloadState state;
+ state.picture_id = kMaxTwoBytePictureId;
+ state.tl0_pic_idx = kInitialTl0PicIdx1;
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx = kNoTemporalIdx;
+
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+ EXPECT_EQ(kVideoCodecVP8, header.codec);
+ EXPECT_EQ(0,
+ absl::get<RTPVideoHeaderVP8>(header.video_type_header).pictureId);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ EXPECT_EQ(0, params.state().picture_id); // Wrapped.
+ EXPECT_EQ(kInitialTl0PicIdx1, params.state().tl0_pic_idx);
+}
+
+TEST(RtpPayloadParamsTest, CreatesGenericDescriptorForVp8) {
+ constexpr auto kSwitch = DecodeTargetIndication::kSwitch;
+ constexpr auto kNotPresent = DecodeTargetIndication::kNotPresent;
+
+ RtpPayloadState state;
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+
+ EncodedImage key_frame_image;
+ key_frame_image._frameType = VideoFrameType::kVideoFrameKey;
+ CodecSpecificInfo key_frame_info;
+ key_frame_info.codecType = kVideoCodecVP8;
+ key_frame_info.codecSpecific.VP8.temporalIdx = 0;
+ RTPVideoHeader key_frame_header = params.GetRtpVideoHeader(
+ key_frame_image, &key_frame_info, /*shared_frame_id=*/123);
+
+ EncodedImage delta_t1_image;
+ delta_t1_image._frameType = VideoFrameType::kVideoFrameDelta;
+ CodecSpecificInfo delta_t1_info;
+ delta_t1_info.codecType = kVideoCodecVP8;
+ delta_t1_info.codecSpecific.VP8.temporalIdx = 1;
+ RTPVideoHeader delta_t1_header = params.GetRtpVideoHeader(
+ delta_t1_image, &delta_t1_info, /*shared_frame_id=*/124);
+
+ EncodedImage delta_t0_image;
+ delta_t0_image._frameType = VideoFrameType::kVideoFrameDelta;
+ CodecSpecificInfo delta_t0_info;
+ delta_t0_info.codecType = kVideoCodecVP8;
+ delta_t0_info.codecSpecific.VP8.temporalIdx = 0;
+ RTPVideoHeader delta_t0_header = params.GetRtpVideoHeader(
+ delta_t0_image, &delta_t0_info, /*shared_frame_id=*/125);
+
+ EXPECT_THAT(
+ key_frame_header,
+ AllOf(Field(&RTPVideoHeader::codec, kVideoCodecVP8),
+ Field(&RTPVideoHeader::frame_type, VideoFrameType::kVideoFrameKey),
+ Field(&RTPVideoHeader::generic,
+ Optional(AllOf(
+ Field(&GenericDescriptorInfo::frame_id, 123),
+ Field(&GenericDescriptorInfo::spatial_index, 0),
+ Field(&GenericDescriptorInfo::temporal_index, 0),
+ Field(&GenericDescriptorInfo::decode_target_indications,
+ ElementsAre(kSwitch, kSwitch, kSwitch, kSwitch)),
+ Field(&GenericDescriptorInfo::dependencies, IsEmpty()),
+ Field(&GenericDescriptorInfo::chain_diffs,
+ ElementsAre(0)))))));
+
+ EXPECT_THAT(
+ delta_t1_header,
+ AllOf(
+ Field(&RTPVideoHeader::codec, kVideoCodecVP8),
+ Field(&RTPVideoHeader::frame_type, VideoFrameType::kVideoFrameDelta),
+ Field(
+ &RTPVideoHeader::generic,
+ Optional(AllOf(
+ Field(&GenericDescriptorInfo::frame_id, 124),
+ Field(&GenericDescriptorInfo::spatial_index, 0),
+ Field(&GenericDescriptorInfo::temporal_index, 1),
+ Field(&GenericDescriptorInfo::decode_target_indications,
+ ElementsAre(kNotPresent, kSwitch, kSwitch, kSwitch)),
+ Field(&GenericDescriptorInfo::dependencies, ElementsAre(123)),
+ Field(&GenericDescriptorInfo::chain_diffs,
+ ElementsAre(1)))))));
+
+ EXPECT_THAT(
+ delta_t0_header,
+ AllOf(
+ Field(&RTPVideoHeader::codec, kVideoCodecVP8),
+ Field(&RTPVideoHeader::frame_type, VideoFrameType::kVideoFrameDelta),
+ Field(
+ &RTPVideoHeader::generic,
+ Optional(AllOf(
+ Field(&GenericDescriptorInfo::frame_id, 125),
+ Field(&GenericDescriptorInfo::spatial_index, 0),
+ Field(&GenericDescriptorInfo::temporal_index, 0),
+ Field(&GenericDescriptorInfo::decode_target_indications,
+ ElementsAre(kSwitch, kSwitch, kSwitch, kSwitch)),
+ Field(&GenericDescriptorInfo::dependencies, ElementsAre(123)),
+ Field(&GenericDescriptorInfo::chain_diffs,
+ ElementsAre(2)))))));
+}
+
+TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp8) {
+ RtpPayloadState state;
+ state.picture_id = kInitialPictureId1;
+ state.tl0_pic_idx = kInitialTl0PicIdx1;
+
+ EncodedImage encoded_image;
+ // Modules are sending for this test.
+ // OnEncodedImage, temporalIdx: 1.
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx = 1;
+
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+
+ EXPECT_EQ(kVideoCodecVP8, header.codec);
+ const auto& vp8_header =
+ absl::get<RTPVideoHeaderVP8>(header.video_type_header);
+ EXPECT_EQ(kInitialPictureId1 + 1, vp8_header.pictureId);
+ EXPECT_EQ(kInitialTl0PicIdx1, vp8_header.tl0PicIdx);
+
+ // OnEncodedImage, temporalIdx: 0.
+ codec_info.codecSpecific.VP8.temporalIdx = 0;
+
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+ EXPECT_EQ(kVideoCodecVP8, header.codec);
+ EXPECT_EQ(kInitialPictureId1 + 2, vp8_header.pictureId);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, vp8_header.tl0PicIdx);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, params.state().tl0_pic_idx);
+}
+
+TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp9) {
+ RtpPayloadState state;
+ state.picture_id = kInitialPictureId1;
+ state.tl0_pic_idx = kInitialTl0PicIdx1;
+
+ EncodedImage encoded_image;
+ // Modules are sending for this test.
+ // OnEncodedImage, temporalIdx: 1.
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.temporal_idx = 1;
+ codec_info.codecSpecific.VP9.first_frame_in_picture = true;
+
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+
+ EXPECT_EQ(kVideoCodecVP9, header.codec);
+ const auto& vp9_header =
+ absl::get<RTPVideoHeaderVP9>(header.video_type_header);
+ EXPECT_EQ(kInitialPictureId1 + 1, vp9_header.picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1, vp9_header.tl0_pic_idx);
+
+ // OnEncodedImage, temporalIdx: 0.
+ codec_info.codecSpecific.VP9.temporal_idx = 0;
+
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+
+ EXPECT_EQ(kVideoCodecVP9, header.codec);
+ EXPECT_EQ(kInitialPictureId1 + 2, vp9_header.picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, vp9_header.tl0_pic_idx);
+
+ // OnEncodedImage, first_frame_in_picture = false
+ codec_info.codecSpecific.VP9.first_frame_in_picture = false;
+
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare);
+
+ EXPECT_EQ(kVideoCodecVP9, header.codec);
+ EXPECT_EQ(kInitialPictureId1 + 2, vp9_header.picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, vp9_header.tl0_pic_idx);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, params.state().tl0_pic_idx);
+}
+
+TEST(RtpPayloadParamsTest, PictureIdForOldGenericFormat) {
+ test::ScopedKeyValueConfig field_trials("WebRTC-GenericPictureId/Enabled/");
+ RtpPayloadState state{};
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecGeneric;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+
+ RtpPayloadParams params(kSsrc1, &state, field_trials);
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, 10);
+
+ EXPECT_EQ(kVideoCodecGeneric, header.codec);
+ const auto* generic =
+ absl::get_if<RTPVideoHeaderLegacyGeneric>(&header.video_type_header);
+ ASSERT_TRUE(generic);
+ EXPECT_EQ(0, generic->picture_id);
+
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info, 20);
+ generic =
+ absl::get_if<RTPVideoHeaderLegacyGeneric>(&header.video_type_header);
+ ASSERT_TRUE(generic);
+ EXPECT_EQ(1, generic->picture_id);
+}
+
+TEST(RtpPayloadParamsTest, GenericDescriptorForGenericCodec) {
+ RtpPayloadState state;
+
+ EncodedImage encoded_image;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecGeneric;
+
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+ RTPVideoHeader header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, 0);
+
+ EXPECT_THAT(header.codec, Eq(kVideoCodecGeneric));
+
+ ASSERT_TRUE(header.generic);
+ EXPECT_THAT(header.generic->frame_id, Eq(0));
+ EXPECT_THAT(header.generic->spatial_index, Eq(0));
+ EXPECT_THAT(header.generic->temporal_index, Eq(0));
+ EXPECT_THAT(header.generic->decode_target_indications,
+ ElementsAre(DecodeTargetIndication::kSwitch));
+ EXPECT_THAT(header.generic->dependencies, IsEmpty());
+ EXPECT_THAT(header.generic->chain_diffs, ElementsAre(0));
+
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info, 3);
+ ASSERT_TRUE(header.generic);
+ EXPECT_THAT(header.generic->frame_id, Eq(3));
+ EXPECT_THAT(header.generic->spatial_index, Eq(0));
+ EXPECT_THAT(header.generic->temporal_index, Eq(0));
+ EXPECT_THAT(header.generic->dependencies, ElementsAre(0));
+ EXPECT_THAT(header.generic->decode_target_indications,
+ ElementsAre(DecodeTargetIndication::kSwitch));
+ EXPECT_THAT(header.generic->chain_diffs, ElementsAre(3));
+}
+
+TEST(RtpPayloadParamsTest, SetsGenericFromGenericFrameInfo) {
+ RtpPayloadState state;
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+
+ RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig());
+
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ codec_info.generic_frame_info =
+ GenericFrameInfo::Builder().S(1).T(0).Dtis("S").Build();
+ codec_info.generic_frame_info->encoder_buffers = {
+ {/*id=*/0, /*referenced=*/false, /*updated=*/true}};
+ codec_info.generic_frame_info->part_of_chain = {true, false};
+ RTPVideoHeader key_header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, /*frame_id=*/1);
+
+ ASSERT_TRUE(key_header.generic);
+ EXPECT_EQ(key_header.generic->spatial_index, 1);
+ EXPECT_EQ(key_header.generic->temporal_index, 0);
+ EXPECT_EQ(key_header.generic->frame_id, 1);
+ EXPECT_THAT(key_header.generic->dependencies, IsEmpty());
+ EXPECT_THAT(key_header.generic->decode_target_indications,
+ ElementsAre(DecodeTargetIndication::kSwitch));
+ EXPECT_THAT(key_header.generic->chain_diffs, SizeIs(2));
+
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ codec_info.generic_frame_info =
+ GenericFrameInfo::Builder().S(2).T(3).Dtis("D").Build();
+ codec_info.generic_frame_info->encoder_buffers = {
+ {/*id=*/0, /*referenced=*/true, /*updated=*/false}};
+ codec_info.generic_frame_info->part_of_chain = {false, false};
+ RTPVideoHeader delta_header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info, /*frame_id=*/3);
+
+ ASSERT_TRUE(delta_header.generic);
+ EXPECT_EQ(delta_header.generic->spatial_index, 2);
+ EXPECT_EQ(delta_header.generic->temporal_index, 3);
+ EXPECT_EQ(delta_header.generic->frame_id, 3);
+ EXPECT_THAT(delta_header.generic->dependencies, ElementsAre(1));
+ EXPECT_THAT(delta_header.generic->decode_target_indications,
+ ElementsAre(DecodeTargetIndication::kDiscardable));
+ EXPECT_THAT(delta_header.generic->chain_diffs, SizeIs(2));
+}
+
+class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test {
+ public:
+ enum LayerSync { kNoSync, kSync };
+
+ RtpPayloadParamsVp8ToGenericTest()
+ : state_(), params_(123, &state_, trials_config_) {}
+
+ void ConvertAndCheck(int temporal_index,
+ int64_t shared_frame_id,
+ VideoFrameType frame_type,
+ LayerSync layer_sync,
+ const std::set<int64_t>& expected_deps,
+ uint16_t width = 0,
+ uint16_t height = 0) {
+ EncodedImage encoded_image;
+ encoded_image._frameType = frame_type;
+ encoded_image._encodedWidth = width;
+ encoded_image._encodedHeight = height;
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx = temporal_index;
+ codec_info.codecSpecific.VP8.layerSync = layer_sync == kSync;
+
+ RTPVideoHeader header =
+ params_.GetRtpVideoHeader(encoded_image, &codec_info, shared_frame_id);
+
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->spatial_index, 0);
+
+ EXPECT_EQ(header.generic->frame_id, shared_frame_id);
+ EXPECT_EQ(header.generic->temporal_index, temporal_index);
+ std::set<int64_t> actual_deps(header.generic->dependencies.begin(),
+ header.generic->dependencies.end());
+ EXPECT_EQ(expected_deps, actual_deps);
+
+ EXPECT_EQ(header.width, width);
+ EXPECT_EQ(header.height, height);
+ }
+
+ protected:
+ FieldTrialBasedConfig trials_config_;
+ RtpPayloadState state_;
+ RtpPayloadParams params_;
+};
+
+TEST_F(RtpPayloadParamsVp8ToGenericTest, Keyframe) {
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+ ConvertAndCheck(0, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(0, 2, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+}
+
+TEST_F(RtpPayloadParamsVp8ToGenericTest, TooHighTemporalIndex) {
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+
+ EncodedImage encoded_image;
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx =
+ RtpGenericFrameDescriptor::kMaxTemporalLayers;
+ codec_info.codecSpecific.VP8.layerSync = false;
+
+ RTPVideoHeader header =
+ params_.GetRtpVideoHeader(encoded_image, &codec_info, 1);
+ EXPECT_FALSE(header.generic);
+}
+
+TEST_F(RtpPayloadParamsVp8ToGenericTest, LayerSync) {
+ // 02120212 pattern
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+ ConvertAndCheck(2, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(1, 2, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(2, 3, VideoFrameType::kVideoFrameDelta, kNoSync, {0, 1, 2});
+
+ ConvertAndCheck(0, 4, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(2, 5, VideoFrameType::kVideoFrameDelta, kNoSync, {2, 3, 4});
+ ConvertAndCheck(1, 6, VideoFrameType::kVideoFrameDelta, kSync,
+ {4}); // layer sync
+ ConvertAndCheck(2, 7, VideoFrameType::kVideoFrameDelta, kNoSync, {4, 5, 6});
+}
+
+TEST_F(RtpPayloadParamsVp8ToGenericTest, FrameIdGaps) {
+ // 0101 pattern
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+ ConvertAndCheck(1, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+
+ ConvertAndCheck(0, 5, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(1, 10, VideoFrameType::kVideoFrameDelta, kNoSync, {1, 5});
+
+ ConvertAndCheck(0, 15, VideoFrameType::kVideoFrameDelta, kNoSync, {5});
+ ConvertAndCheck(1, 20, VideoFrameType::kVideoFrameDelta, kNoSync, {10, 15});
+}
+
+TEST(RtpPayloadParamsVp9ToGenericTest, NoScalability) {
+ RtpPayloadState state;
+ RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.flexible_mode = true;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 1;
+ codec_info.codecSpecific.VP9.temporal_idx = kNoTemporalIdx;
+ codec_info.codecSpecific.VP9.first_frame_in_picture = true;
+ codec_info.end_of_picture = true;
+
+ // Key frame.
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ codec_info.codecSpecific.VP9.inter_pic_predicted = false;
+ codec_info.codecSpecific.VP9.num_ref_pics = 0;
+ RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info,
+ /*shared_frame_id=*/1);
+
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->spatial_index, 0);
+ EXPECT_EQ(header.generic->temporal_index, 0);
+ EXPECT_EQ(header.generic->frame_id, 1);
+ ASSERT_THAT(header.generic->decode_target_indications, Not(IsEmpty()));
+ EXPECT_EQ(header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(header.generic->dependencies, IsEmpty());
+ ASSERT_THAT(header.generic->chain_diffs, Not(IsEmpty()));
+ EXPECT_EQ(header.generic->chain_diffs[0], 0);
+
+ // Delta frame.
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ codec_info.codecSpecific.VP9.inter_pic_predicted = true;
+ codec_info.codecSpecific.VP9.num_ref_pics = 1;
+ codec_info.codecSpecific.VP9.p_diff[0] = 1;
+ header = params.GetRtpVideoHeader(encoded_image, &codec_info,
+ /*shared_frame_id=*/3);
+
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->spatial_index, 0);
+ EXPECT_EQ(header.generic->temporal_index, 0);
+ EXPECT_EQ(header.generic->frame_id, 3);
+ ASSERT_THAT(header.generic->decode_target_indications, Not(IsEmpty()));
+ EXPECT_EQ(header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(header.generic->dependencies, ElementsAre(1));
+ ASSERT_THAT(header.generic->chain_diffs, Not(IsEmpty()));
+ // previous frame in the chain was frame#1,
+ EXPECT_EQ(header.generic->chain_diffs[0], 3 - 1);
+}
+
+TEST(RtpPayloadParamsVp9ToGenericTest, NoScalabilityNonFlexibleMode) {
+ RtpPayloadState state;
+ RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.flexible_mode = false;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 1;
+ codec_info.codecSpecific.VP9.temporal_idx = kNoTemporalIdx;
+ codec_info.codecSpecific.VP9.first_frame_in_picture = true;
+ codec_info.end_of_picture = true;
+
+ // Key frame.
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ codec_info.codecSpecific.VP9.inter_pic_predicted = false;
+ RTPVideoHeader key_header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info,
+ /*shared_frame_id=*/1);
+
+ ASSERT_TRUE(key_header.generic);
+ EXPECT_EQ(key_header.generic->spatial_index, 0);
+ EXPECT_EQ(key_header.generic->temporal_index, 0);
+ EXPECT_EQ(key_header.generic->frame_id, 1);
+ ASSERT_THAT(key_header.generic->decode_target_indications, Not(IsEmpty()));
+ EXPECT_EQ(key_header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(key_header.generic->dependencies, IsEmpty());
+ ASSERT_THAT(key_header.generic->chain_diffs, Not(IsEmpty()));
+ EXPECT_EQ(key_header.generic->chain_diffs[0], 0);
+
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ codec_info.codecSpecific.VP9.inter_pic_predicted = true;
+ RTPVideoHeader delta_header =
+ params.GetRtpVideoHeader(encoded_image, &codec_info,
+ /*shared_frame_id=*/3);
+
+ ASSERT_TRUE(delta_header.generic);
+ EXPECT_EQ(delta_header.generic->spatial_index, 0);
+ EXPECT_EQ(delta_header.generic->temporal_index, 0);
+ EXPECT_EQ(delta_header.generic->frame_id, 3);
+ ASSERT_THAT(delta_header.generic->decode_target_indications, Not(IsEmpty()));
+ EXPECT_EQ(delta_header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(delta_header.generic->dependencies, ElementsAre(1));
+ ASSERT_THAT(delta_header.generic->chain_diffs, Not(IsEmpty()));
+ EXPECT_EQ(delta_header.generic->chain_diffs[0], 3 - 1);
+}
+
+TEST(RtpPayloadParamsVp9ToGenericTest, TemporalScalabilityWith2Layers) {
+ // Test with 2 temporal layers structure that is not used by webrtc:
+ // 1---3 5
+ // / / / ...
+ // 0---2---4---
+ RtpPayloadState state;
+ RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
+
+ EncodedImage image;
+ CodecSpecificInfo info;
+ info.codecType = kVideoCodecVP9;
+ info.codecSpecific.VP9.flexible_mode = true;
+ info.codecSpecific.VP9.num_spatial_layers = 1;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.end_of_picture = true;
+
+ RTPVideoHeader headers[6];
+ // Key frame.
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 0;
+ headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1);
+
+ // Delta frames.
+ info.codecSpecific.VP9.inter_pic_predicted = true;
+ image._frameType = VideoFrameType::kVideoFrameDelta;
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 1;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3);
+
+ info.codecSpecific.VP9.temporal_up_switch = false;
+ info.codecSpecific.VP9.temporal_idx = 0;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 2;
+ headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5);
+
+ info.codecSpecific.VP9.temporal_up_switch = false;
+ info.codecSpecific.VP9.temporal_idx = 1;
+ info.codecSpecific.VP9.num_ref_pics = 2;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ info.codecSpecific.VP9.p_diff[1] = 2;
+ headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7);
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 0;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 2;
+ headers[4] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/9);
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 1;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[5] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/11);
+
+ ASSERT_TRUE(headers[0].generic);
+ int num_decode_targets = headers[0].generic->decode_target_indications.size();
+ int num_chains = headers[0].generic->chain_diffs.size();
+ ASSERT_GE(num_decode_targets, 2);
+ ASSERT_GE(num_chains, 1);
+
+ for (int frame_idx = 0; frame_idx < 6; ++frame_idx) {
+ const RTPVideoHeader& header = headers[frame_idx];
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->spatial_index, 0);
+ EXPECT_EQ(header.generic->temporal_index, frame_idx % 2);
+ EXPECT_EQ(header.generic->frame_id, 1 + 2 * frame_idx);
+ ASSERT_THAT(header.generic->decode_target_indications,
+ SizeIs(num_decode_targets));
+ ASSERT_THAT(header.generic->chain_diffs, SizeIs(num_chains));
+ // Expect only T0 frames are needed for the 1st decode target.
+ if (header.generic->temporal_index == 0) {
+ EXPECT_NE(header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kNotPresent);
+ } else {
+ EXPECT_EQ(header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kNotPresent);
+ }
+ // Expect all frames are needed for the 2nd decode target.
+ EXPECT_NE(header.generic->decode_target_indications[1],
+ DecodeTargetIndication::kNotPresent);
+ }
+
+ // Expect switch at every beginning of the pattern.
+ EXPECT_THAT(headers[0].generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(headers[0].generic->decode_target_indications[1],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(headers[4].generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(headers[4].generic->decode_target_indications[1],
+ DecodeTargetIndication::kSwitch);
+
+ EXPECT_THAT(headers[0].generic->dependencies, IsEmpty()); // T0, 1
+ EXPECT_THAT(headers[1].generic->dependencies, ElementsAre(1)); // T1, 3
+ EXPECT_THAT(headers[2].generic->dependencies, ElementsAre(1)); // T0, 5
+ EXPECT_THAT(headers[3].generic->dependencies, ElementsAre(5, 3)); // T1, 7
+ EXPECT_THAT(headers[4].generic->dependencies, ElementsAre(5)); // T0, 9
+ EXPECT_THAT(headers[5].generic->dependencies, ElementsAre(9)); // T1, 11
+
+ EXPECT_THAT(headers[0].generic->chain_diffs[0], Eq(0));
+ EXPECT_THAT(headers[1].generic->chain_diffs[0], Eq(2));
+ EXPECT_THAT(headers[2].generic->chain_diffs[0], Eq(4));
+ EXPECT_THAT(headers[3].generic->chain_diffs[0], Eq(2));
+ EXPECT_THAT(headers[4].generic->chain_diffs[0], Eq(4));
+ EXPECT_THAT(headers[5].generic->chain_diffs[0], Eq(2));
+}
+
+TEST(RtpPayloadParamsVp9ToGenericTest, TemporalScalabilityWith3Layers) {
+ // Test with 3 temporal layers structure that is not used by webrtc, but used
+ // by chromium: https://imgur.com/pURAGvp
+ RtpPayloadState state;
+ RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
+
+ EncodedImage image;
+ CodecSpecificInfo info;
+ info.codecType = kVideoCodecVP9;
+ info.codecSpecific.VP9.flexible_mode = true;
+ info.codecSpecific.VP9.num_spatial_layers = 1;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.end_of_picture = true;
+
+ RTPVideoHeader headers[9];
+ // Key frame.
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 0;
+ headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1);
+
+ // Delta frames.
+ info.codecSpecific.VP9.inter_pic_predicted = true;
+ image._frameType = VideoFrameType::kVideoFrameDelta;
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 2;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3);
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 1;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 2;
+ headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5);
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 2;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7);
+
+ info.codecSpecific.VP9.temporal_up_switch = false;
+ info.codecSpecific.VP9.temporal_idx = 0;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 4;
+ headers[4] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/9);
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 2;
+ info.codecSpecific.VP9.num_ref_pics = 2;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ info.codecSpecific.VP9.p_diff[1] = 3;
+ headers[5] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/11);
+
+ info.codecSpecific.VP9.temporal_up_switch = false;
+ info.codecSpecific.VP9.temporal_idx = 1;
+ info.codecSpecific.VP9.num_ref_pics = 2;
+ info.codecSpecific.VP9.p_diff[0] = 2;
+ info.codecSpecific.VP9.p_diff[1] = 4;
+ headers[6] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/13);
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 2;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[7] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/15);
+
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.temporal_idx = 0;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 4;
+ headers[8] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/17);
+
+ ASSERT_TRUE(headers[0].generic);
+ int num_decode_targets = headers[0].generic->decode_target_indications.size();
+ int num_chains = headers[0].generic->chain_diffs.size();
+ ASSERT_GE(num_decode_targets, 3);
+ ASSERT_GE(num_chains, 1);
+
+ for (int frame_idx = 0; frame_idx < 9; ++frame_idx) {
+ const RTPVideoHeader& header = headers[frame_idx];
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->spatial_index, 0);
+ EXPECT_EQ(header.generic->frame_id, 1 + 2 * frame_idx);
+ ASSERT_THAT(header.generic->decode_target_indications,
+ SizeIs(num_decode_targets));
+ ASSERT_THAT(header.generic->chain_diffs, SizeIs(num_chains));
+ // Expect only T0 frames are needed for the 1st decode target.
+ if (header.generic->temporal_index == 0) {
+ EXPECT_NE(header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kNotPresent);
+ } else {
+ EXPECT_EQ(header.generic->decode_target_indications[0],
+ DecodeTargetIndication::kNotPresent);
+ }
+ // Expect only T0 and T1 frames are needed for the 2nd decode target.
+ if (header.generic->temporal_index <= 1) {
+ EXPECT_NE(header.generic->decode_target_indications[1],
+ DecodeTargetIndication::kNotPresent);
+ } else {
+ EXPECT_EQ(header.generic->decode_target_indications[1],
+ DecodeTargetIndication::kNotPresent);
+ }
+ // Expect all frames are needed for the 3rd decode target.
+ EXPECT_NE(header.generic->decode_target_indications[2],
+ DecodeTargetIndication::kNotPresent);
+ }
+
+ EXPECT_EQ(headers[0].generic->temporal_index, 0);
+ EXPECT_EQ(headers[1].generic->temporal_index, 2);
+ EXPECT_EQ(headers[2].generic->temporal_index, 1);
+ EXPECT_EQ(headers[3].generic->temporal_index, 2);
+ EXPECT_EQ(headers[4].generic->temporal_index, 0);
+ EXPECT_EQ(headers[5].generic->temporal_index, 2);
+ EXPECT_EQ(headers[6].generic->temporal_index, 1);
+ EXPECT_EQ(headers[7].generic->temporal_index, 2);
+ EXPECT_EQ(headers[8].generic->temporal_index, 0);
+
+ // Expect switch at every beginning of the pattern.
+ EXPECT_THAT(headers[0].generic->decode_target_indications,
+ Each(DecodeTargetIndication::kSwitch));
+ EXPECT_THAT(headers[8].generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(headers[8].generic->decode_target_indications[1],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_THAT(headers[8].generic->decode_target_indications[2],
+ DecodeTargetIndication::kSwitch);
+
+ EXPECT_THAT(headers[0].generic->dependencies, IsEmpty()); // T0, 1
+ EXPECT_THAT(headers[1].generic->dependencies, ElementsAre(1)); // T2, 3
+ EXPECT_THAT(headers[2].generic->dependencies, ElementsAre(1)); // T1, 5
+ EXPECT_THAT(headers[3].generic->dependencies, ElementsAre(5)); // T2, 7
+ EXPECT_THAT(headers[4].generic->dependencies, ElementsAre(1)); // T0, 9
+ EXPECT_THAT(headers[5].generic->dependencies, ElementsAre(9, 5)); // T2, 11
+ EXPECT_THAT(headers[6].generic->dependencies, ElementsAre(9, 5)); // T1, 13
+ EXPECT_THAT(headers[7].generic->dependencies, ElementsAre(13)); // T2, 15
+ EXPECT_THAT(headers[8].generic->dependencies, ElementsAre(9)); // T0, 17
+
+ EXPECT_THAT(headers[0].generic->chain_diffs[0], Eq(0));
+ EXPECT_THAT(headers[1].generic->chain_diffs[0], Eq(2));
+ EXPECT_THAT(headers[2].generic->chain_diffs[0], Eq(4));
+ EXPECT_THAT(headers[3].generic->chain_diffs[0], Eq(6));
+ EXPECT_THAT(headers[4].generic->chain_diffs[0], Eq(8));
+ EXPECT_THAT(headers[5].generic->chain_diffs[0], Eq(2));
+ EXPECT_THAT(headers[6].generic->chain_diffs[0], Eq(4));
+ EXPECT_THAT(headers[7].generic->chain_diffs[0], Eq(6));
+ EXPECT_THAT(headers[8].generic->chain_diffs[0], Eq(8));
+}
+
+TEST(RtpPayloadParamsVp9ToGenericTest, SpatialScalabilityKSvc) {
+ // 1---3--
+ // | ...
+ // 0---2--
+ RtpPayloadState state;
+ RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
+
+ EncodedImage image;
+ CodecSpecificInfo info;
+ info.codecType = kVideoCodecVP9;
+ info.codecSpecific.VP9.flexible_mode = true;
+ info.codecSpecific.VP9.num_spatial_layers = 2;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+
+ RTPVideoHeader headers[4];
+ // Key frame.
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ image.SetSpatialIndex(0);
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.inter_layer_predicted = false;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.end_of_picture = false;
+ headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1);
+
+ image.SetSpatialIndex(1);
+ info.codecSpecific.VP9.inter_layer_predicted = true;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true;
+ info.codecSpecific.VP9.first_frame_in_picture = false;
+ info.end_of_picture = true;
+ headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3);
+
+ // Delta frames.
+ info.codecSpecific.VP9.inter_pic_predicted = true;
+ image._frameType = VideoFrameType::kVideoFrameDelta;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+
+ image.SetSpatialIndex(0);
+ info.codecSpecific.VP9.inter_layer_predicted = false;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.end_of_picture = false;
+ headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5);
+
+ image.SetSpatialIndex(1);
+ info.codecSpecific.VP9.inter_layer_predicted = false;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true;
+ info.codecSpecific.VP9.first_frame_in_picture = false;
+ info.end_of_picture = true;
+ headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7);
+
+ ASSERT_TRUE(headers[0].generic);
+ int num_decode_targets = headers[0].generic->decode_target_indications.size();
+ // Rely on implementation detail there are always kMaxTemporalStreams temporal
+ // layers assumed, in particular assume Decode Target#0 matches layer S0T0,
+ // and Decode Target#kMaxTemporalStreams matches layer S1T0.
+ ASSERT_GE(num_decode_targets, kMaxTemporalStreams * 2);
+ int num_chains = headers[0].generic->chain_diffs.size();
+ ASSERT_GE(num_chains, 2);
+
+ for (int frame_idx = 0; frame_idx < 4; ++frame_idx) {
+ const RTPVideoHeader& header = headers[frame_idx];
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->spatial_index, frame_idx % 2);
+ EXPECT_EQ(header.generic->temporal_index, 0);
+ EXPECT_EQ(header.generic->frame_id, 1 + 2 * frame_idx);
+ ASSERT_THAT(header.generic->decode_target_indications,
+ SizeIs(num_decode_targets));
+ ASSERT_THAT(header.generic->chain_diffs, SizeIs(num_chains));
+ }
+
+ // Expect S0 key frame is switch for both Decode Targets.
+ EXPECT_EQ(headers[0].generic->decode_target_indications[0],
+ DecodeTargetIndication::kSwitch);
+ EXPECT_EQ(headers[0].generic->decode_target_indications[kMaxTemporalStreams],
+ DecodeTargetIndication::kSwitch);
+ // S1 key frame is only needed for the 2nd Decode Targets.
+ EXPECT_EQ(headers[1].generic->decode_target_indications[0],
+ DecodeTargetIndication::kNotPresent);
+ EXPECT_NE(headers[1].generic->decode_target_indications[kMaxTemporalStreams],
+ DecodeTargetIndication::kNotPresent);
+ // Delta frames are only needed for their own Decode Targets.
+ EXPECT_NE(headers[2].generic->decode_target_indications[0],
+ DecodeTargetIndication::kNotPresent);
+ EXPECT_EQ(headers[2].generic->decode_target_indications[kMaxTemporalStreams],
+ DecodeTargetIndication::kNotPresent);
+ EXPECT_EQ(headers[3].generic->decode_target_indications[0],
+ DecodeTargetIndication::kNotPresent);
+ EXPECT_NE(headers[3].generic->decode_target_indications[kMaxTemporalStreams],
+ DecodeTargetIndication::kNotPresent);
+
+ EXPECT_THAT(headers[0].generic->dependencies, IsEmpty()); // S0, 1
+ EXPECT_THAT(headers[1].generic->dependencies, ElementsAre(1)); // S1, 3
+ EXPECT_THAT(headers[2].generic->dependencies, ElementsAre(1)); // S0, 5
+ EXPECT_THAT(headers[3].generic->dependencies, ElementsAre(3)); // S1, 7
+
+ EXPECT_THAT(headers[0].generic->chain_diffs[0], Eq(0));
+ EXPECT_THAT(headers[0].generic->chain_diffs[1], Eq(0));
+ EXPECT_THAT(headers[1].generic->chain_diffs[0], Eq(2));
+ EXPECT_THAT(headers[1].generic->chain_diffs[1], Eq(2));
+ EXPECT_THAT(headers[2].generic->chain_diffs[0], Eq(4));
+ EXPECT_THAT(headers[2].generic->chain_diffs[1], Eq(2));
+ EXPECT_THAT(headers[3].generic->chain_diffs[0], Eq(2));
+ EXPECT_THAT(headers[3].generic->chain_diffs[1], Eq(4));
+}
+
+TEST(RtpPayloadParamsVp9ToGenericTest,
+ IncreaseNumberOfSpatialLayersOnDeltaFrame) {
+ // S1 5--
+ // | ...
+ // S0 1---3--
+ RtpPayloadState state;
+ RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
+
+ EncodedImage image;
+ CodecSpecificInfo info;
+ info.codecType = kVideoCodecVP9;
+ info.codecSpecific.VP9.flexible_mode = true;
+ info.codecSpecific.VP9.num_spatial_layers = 1;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+
+ RTPVideoHeader headers[3];
+ // Key frame.
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ image.SetSpatialIndex(0);
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.inter_layer_predicted = false;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.end_of_picture = true;
+ headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1);
+
+ // S0 delta frame.
+ image._frameType = VideoFrameType::kVideoFrameDelta;
+ info.codecSpecific.VP9.num_spatial_layers = 2;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = false;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.codecSpecific.VP9.inter_pic_predicted = true;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ info.end_of_picture = false;
+ headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3);
+
+ // S1 delta frame.
+ image.SetSpatialIndex(1);
+ info.codecSpecific.VP9.inter_layer_predicted = true;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true;
+ info.codecSpecific.VP9.first_frame_in_picture = false;
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.end_of_picture = true;
+ headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5);
+
+ ASSERT_TRUE(headers[0].generic);
+ int num_decode_targets = headers[0].generic->decode_target_indications.size();
+ int num_chains = headers[0].generic->chain_diffs.size();
+ // Rely on implementation detail there are always kMaxTemporalStreams temporal
+ // layers. In particular assume Decode Target#0 matches layer S0T0, and
+ // Decode Target#kMaxTemporalStreams matches layer S1T0.
+ static constexpr int kS0T0 = 0;
+ static constexpr int kS1T0 = kMaxTemporalStreams;
+ ASSERT_GE(num_decode_targets, 2);
+ ASSERT_GE(num_chains, 2);
+
+ for (int frame_idx = 0; frame_idx < 3; ++frame_idx) {
+ const RTPVideoHeader& header = headers[frame_idx];
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->temporal_index, 0);
+ EXPECT_EQ(header.generic->frame_id, 1 + 2 * frame_idx);
+ ASSERT_THAT(header.generic->decode_target_indications,
+ SizeIs(num_decode_targets));
+ ASSERT_THAT(header.generic->chain_diffs, SizeIs(num_chains));
+ }
+
+ EXPECT_TRUE(headers[0].generic->active_decode_targets[kS0T0]);
+ EXPECT_FALSE(headers[0].generic->active_decode_targets[kS1T0]);
+
+ EXPECT_TRUE(headers[1].generic->active_decode_targets[kS0T0]);
+ EXPECT_TRUE(headers[1].generic->active_decode_targets[kS1T0]);
+
+ EXPECT_TRUE(headers[2].generic->active_decode_targets[kS0T0]);
+ EXPECT_TRUE(headers[2].generic->active_decode_targets[kS1T0]);
+
+ EXPECT_EQ(headers[0].generic->decode_target_indications[kS0T0],
+ DecodeTargetIndication::kSwitch);
+
+ EXPECT_EQ(headers[1].generic->decode_target_indications[kS0T0],
+ DecodeTargetIndication::kSwitch);
+
+ EXPECT_EQ(headers[2].generic->decode_target_indications[kS0T0],
+ DecodeTargetIndication::kNotPresent);
+ EXPECT_EQ(headers[2].generic->decode_target_indications[kS1T0],
+ DecodeTargetIndication::kSwitch);
+
+ EXPECT_THAT(headers[0].generic->dependencies, IsEmpty()); // S0, 1
+ EXPECT_THAT(headers[1].generic->dependencies, ElementsAre(1)); // S0, 3
+ EXPECT_THAT(headers[2].generic->dependencies, ElementsAre(3)); // S1, 5
+
+ EXPECT_EQ(headers[0].generic->chain_diffs[0], 0);
+
+ EXPECT_EQ(headers[1].generic->chain_diffs[0], 2);
+ EXPECT_EQ(headers[1].generic->chain_diffs[1], 0);
+
+ EXPECT_EQ(headers[2].generic->chain_diffs[0], 2);
+ EXPECT_EQ(headers[2].generic->chain_diffs[1], 2);
+}
+
+TEST(RtpPayloadParamsVp9ToGenericTest, ChangeFirstActiveLayer) {
+ // S2 4---5
+ //
+ // S1 1---3 7
+ //
+ // S0 0---2 6
+ RtpPayloadState state;
+ RtpPayloadParams params(/*ssrc=*/123, &state, FieldTrialBasedConfig());
+
+ EncodedImage image;
+ CodecSpecificInfo info;
+ info.codecType = kVideoCodecVP9;
+ info.codecSpecific.VP9.flexible_mode = true;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.codecSpecific.VP9.inter_layer_predicted = false;
+ info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true;
+ info.codecSpecific.VP9.first_frame_in_picture = true;
+ info.end_of_picture = true;
+
+ RTPVideoHeader headers[8];
+ // S0 key frame.
+ info.codecSpecific.VP9.num_spatial_layers = 2;
+ info.codecSpecific.VP9.first_active_layer = 0;
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ image.SetSpatialIndex(0);
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/0);
+
+ // S1 key frame.
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ image.SetSpatialIndex(1);
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1);
+
+ // S0 delta frame.
+ image._frameType = VideoFrameType::kVideoFrameDelta;
+ image.SetSpatialIndex(0);
+ info.codecSpecific.VP9.inter_pic_predicted = true;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/2);
+
+ // S1 delta frame.
+ image._frameType = VideoFrameType::kVideoFrameDelta;
+ info.codecSpecific.VP9.inter_pic_predicted = true;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3);
+
+ // S2 key frame
+ info.codecSpecific.VP9.num_spatial_layers = 3;
+ info.codecSpecific.VP9.first_active_layer = 2;
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ image.SetSpatialIndex(2);
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ headers[4] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/4);
+
+ // S2 delta frame.
+ image._frameType = VideoFrameType::kVideoFrameDelta;
+ info.codecSpecific.VP9.inter_pic_predicted = true;
+ info.codecSpecific.VP9.num_ref_pics = 1;
+ info.codecSpecific.VP9.p_diff[0] = 1;
+ headers[5] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5);
+
+ // S0 key frame after pause.
+ info.codecSpecific.VP9.num_spatial_layers = 2;
+ info.codecSpecific.VP9.first_active_layer = 0;
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ image.SetSpatialIndex(0);
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ headers[6] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/6);
+
+ // S1 key frame.
+ image._frameType = VideoFrameType::kVideoFrameKey;
+ image.SetSpatialIndex(1);
+ info.codecSpecific.VP9.inter_pic_predicted = false;
+ info.codecSpecific.VP9.num_ref_pics = 0;
+ headers[7] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7);
+
+ ASSERT_TRUE(headers[0].generic);
+ int num_decode_targets = headers[0].generic->decode_target_indications.size();
+ int num_chains = headers[0].generic->chain_diffs.size();
+ // Rely on implementation detail there are always kMaxTemporalStreams temporal
+ // layers. In particular assume Decode Target#0 matches layer S0T0, and
+ // Decode Target#kMaxTemporalStreams matches layer S1T0.
+ static constexpr int kS0T0 = 0;
+ static constexpr int kS1T0 = kMaxTemporalStreams;
+ static constexpr int kS2T0 = 2 * kMaxTemporalStreams;
+ ASSERT_GE(num_decode_targets, 3);
+ ASSERT_GE(num_chains, 3);
+
+ for (int frame_idx = 0; frame_idx < int{std::size(headers)}; ++frame_idx) {
+ const RTPVideoHeader& header = headers[frame_idx];
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->temporal_index, 0);
+ ASSERT_THAT(header.generic->decode_target_indications,
+ SizeIs(num_decode_targets));
+ ASSERT_THAT(header.generic->chain_diffs, SizeIs(num_chains));
+ EXPECT_EQ(header.generic->frame_id, frame_idx);
+ }
+
+ EXPECT_TRUE(headers[0].generic->active_decode_targets[kS0T0]);
+ EXPECT_TRUE(headers[0].generic->active_decode_targets[kS1T0]);
+ EXPECT_FALSE(headers[0].generic->active_decode_targets[kS2T0]);
+
+ EXPECT_FALSE(headers[4].generic->active_decode_targets[kS0T0]);
+ EXPECT_FALSE(headers[4].generic->active_decode_targets[kS1T0]);
+ EXPECT_TRUE(headers[4].generic->active_decode_targets[kS2T0]);
+
+ EXPECT_EQ(headers[1].generic->active_decode_targets,
+ headers[0].generic->active_decode_targets);
+
+ EXPECT_EQ(headers[2].generic->active_decode_targets,
+ headers[0].generic->active_decode_targets);
+
+ EXPECT_EQ(headers[3].generic->active_decode_targets,
+ headers[0].generic->active_decode_targets);
+
+ EXPECT_EQ(headers[5].generic->active_decode_targets,
+ headers[4].generic->active_decode_targets);
+
+ EXPECT_EQ(headers[6].generic->active_decode_targets,
+ headers[0].generic->active_decode_targets);
+
+ EXPECT_EQ(headers[7].generic->active_decode_targets,
+ headers[0].generic->active_decode_targets);
+
+ EXPECT_EQ(headers[0].generic->chain_diffs[0], 0);
+ EXPECT_EQ(headers[0].generic->chain_diffs[1], 0);
+ EXPECT_EQ(headers[0].generic->chain_diffs[2], 0);
+
+ EXPECT_EQ(headers[1].generic->chain_diffs[0], 1);
+ EXPECT_EQ(headers[1].generic->chain_diffs[1], 0);
+ EXPECT_EQ(headers[1].generic->chain_diffs[2], 0);
+
+ EXPECT_EQ(headers[2].generic->chain_diffs[0], 2);
+ EXPECT_EQ(headers[2].generic->chain_diffs[1], 1);
+ EXPECT_EQ(headers[2].generic->chain_diffs[2], 0);
+
+ EXPECT_EQ(headers[3].generic->chain_diffs[0], 1);
+ EXPECT_EQ(headers[3].generic->chain_diffs[1], 2);
+ EXPECT_EQ(headers[3].generic->chain_diffs[2], 0);
+
+ EXPECT_EQ(headers[4].generic->chain_diffs[0], 0);
+ EXPECT_EQ(headers[4].generic->chain_diffs[1], 0);
+ EXPECT_EQ(headers[4].generic->chain_diffs[2], 0);
+
+ EXPECT_EQ(headers[5].generic->chain_diffs[0], 0);
+ EXPECT_EQ(headers[5].generic->chain_diffs[1], 0);
+ EXPECT_EQ(headers[5].generic->chain_diffs[2], 1);
+
+ EXPECT_EQ(headers[6].generic->chain_diffs[0], 0);
+ EXPECT_EQ(headers[6].generic->chain_diffs[1], 0);
+ EXPECT_EQ(headers[6].generic->chain_diffs[2], 0);
+
+ EXPECT_EQ(headers[7].generic->chain_diffs[0], 1);
+ EXPECT_EQ(headers[7].generic->chain_diffs[1], 0);
+ EXPECT_EQ(headers[7].generic->chain_diffs[2], 0);
+}
+
+class RtpPayloadParamsH264ToGenericTest : public ::testing::Test {
+ public:
+ enum LayerSync { kNoSync, kSync };
+
+ RtpPayloadParamsH264ToGenericTest()
+ : state_(), params_(123, &state_, trials_config_) {}
+
+ void ConvertAndCheck(int temporal_index,
+ int64_t shared_frame_id,
+ VideoFrameType frame_type,
+ LayerSync layer_sync,
+ const std::set<int64_t>& expected_deps,
+ uint16_t width = 0,
+ uint16_t height = 0) {
+ EncodedImage encoded_image;
+ encoded_image._frameType = frame_type;
+ encoded_image._encodedWidth = width;
+ encoded_image._encodedHeight = height;
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecH264;
+ codec_info.codecSpecific.H264.temporal_idx = temporal_index;
+ codec_info.codecSpecific.H264.base_layer_sync = layer_sync == kSync;
+
+ RTPVideoHeader header =
+ params_.GetRtpVideoHeader(encoded_image, &codec_info, shared_frame_id);
+
+ ASSERT_TRUE(header.generic);
+ EXPECT_EQ(header.generic->spatial_index, 0);
+
+ EXPECT_EQ(header.generic->frame_id, shared_frame_id);
+ EXPECT_EQ(header.generic->temporal_index, temporal_index);
+ std::set<int64_t> actual_deps(header.generic->dependencies.begin(),
+ header.generic->dependencies.end());
+ EXPECT_EQ(expected_deps, actual_deps);
+
+ EXPECT_EQ(header.width, width);
+ EXPECT_EQ(header.height, height);
+ }
+
+ protected:
+ FieldTrialBasedConfig trials_config_;
+ RtpPayloadState state_;
+ RtpPayloadParams params_;
+};
+
+TEST_F(RtpPayloadParamsH264ToGenericTest, Keyframe) {
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+ ConvertAndCheck(0, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(0, 2, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+}
+
+TEST_F(RtpPayloadParamsH264ToGenericTest, TooHighTemporalIndex) {
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+
+ EncodedImage encoded_image;
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecH264;
+ codec_info.codecSpecific.H264.temporal_idx =
+ RtpGenericFrameDescriptor::kMaxTemporalLayers;
+ codec_info.codecSpecific.H264.base_layer_sync = false;
+
+ RTPVideoHeader header =
+ params_.GetRtpVideoHeader(encoded_image, &codec_info, 1);
+ EXPECT_FALSE(header.generic);
+}
+
+TEST_F(RtpPayloadParamsH264ToGenericTest, LayerSync) {
+ // 02120212 pattern
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+ ConvertAndCheck(2, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(1, 2, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(2, 3, VideoFrameType::kVideoFrameDelta, kNoSync, {0, 1, 2});
+
+ ConvertAndCheck(0, 4, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(2, 5, VideoFrameType::kVideoFrameDelta, kNoSync, {2, 3, 4});
+ ConvertAndCheck(1, 6, VideoFrameType::kVideoFrameDelta, kSync,
+ {4}); // layer sync
+ ConvertAndCheck(2, 7, VideoFrameType::kVideoFrameDelta, kNoSync, {4, 5, 6});
+}
+
+TEST_F(RtpPayloadParamsH264ToGenericTest, FrameIdGaps) {
+ // 0101 pattern
+ ConvertAndCheck(0, 0, VideoFrameType::kVideoFrameKey, kNoSync, {}, 480, 360);
+ ConvertAndCheck(1, 1, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+
+ ConvertAndCheck(0, 5, VideoFrameType::kVideoFrameDelta, kNoSync, {0});
+ ConvertAndCheck(1, 10, VideoFrameType::kVideoFrameDelta, kNoSync, {1, 5});
+
+ ConvertAndCheck(0, 15, VideoFrameType::kVideoFrameDelta, kNoSync, {5});
+ ConvertAndCheck(1, 20, VideoFrameType::kVideoFrameDelta, kNoSync, {10, 15});
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_receiver_gn/moz.build b/third_party/libwebrtc/call/rtp_receiver_gn/moz.build
new file mode 100644
index 0000000000..8809c7664f
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_receiver_gn/moz.build
@@ -0,0 +1,239 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/rtp_demuxer.cc",
+ "/third_party/libwebrtc/call/rtp_stream_receiver_controller.cc",
+ "/third_party/libwebrtc/call/rtx_receive_stream.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtp_receiver_gn")
diff --git a/third_party/libwebrtc/call/rtp_sender_gn/moz.build b/third_party/libwebrtc/call/rtp_sender_gn/moz.build
new file mode 100644
index 0000000000..09560bbaab
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_sender_gn/moz.build
@@ -0,0 +1,239 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/rtp_payload_params.cc",
+ "/third_party/libwebrtc/call/rtp_transport_controller_send.cc",
+ "/third_party/libwebrtc/call/rtp_video_sender.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtp_sender_gn")
diff --git a/third_party/libwebrtc/call/rtp_stream_receiver_controller.cc b/third_party/libwebrtc/call/rtp_stream_receiver_controller.cc
new file mode 100644
index 0000000000..993a4fc76e
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_stream_receiver_controller.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_stream_receiver_controller.h"
+
+#include <memory>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtpStreamReceiverController::Receiver::Receiver(
+ RtpStreamReceiverController* controller,
+ uint32_t ssrc,
+ RtpPacketSinkInterface* sink)
+ : controller_(controller), sink_(sink) {
+ const bool sink_added = controller_->AddSink(ssrc, sink_);
+ if (!sink_added) {
+ RTC_LOG(LS_ERROR)
+ << "RtpStreamReceiverController::Receiver::Receiver: Sink "
+ "could not be added for SSRC="
+ << ssrc << ".";
+ }
+}
+
+RtpStreamReceiverController::Receiver::~Receiver() {
+ // This may fail, if corresponding AddSink in the constructor failed.
+ controller_->RemoveSink(sink_);
+}
+
+RtpStreamReceiverController::RtpStreamReceiverController() {}
+
+RtpStreamReceiverController::~RtpStreamReceiverController() = default;
+
+std::unique_ptr<RtpStreamReceiverInterface>
+RtpStreamReceiverController::CreateReceiver(uint32_t ssrc,
+ RtpPacketSinkInterface* sink) {
+ return std::make_unique<Receiver>(this, ssrc, sink);
+}
+
+bool RtpStreamReceiverController::OnRtpPacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&demuxer_sequence_);
+ return demuxer_.OnRtpPacket(packet);
+}
+
+void RtpStreamReceiverController::OnRecoveredPacket(
+ const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&demuxer_sequence_);
+ demuxer_.OnRtpPacket(packet);
+}
+
+bool RtpStreamReceiverController::AddSink(uint32_t ssrc,
+ RtpPacketSinkInterface* sink) {
+ RTC_DCHECK_RUN_ON(&demuxer_sequence_);
+ return demuxer_.AddSink(ssrc, sink);
+}
+
+bool RtpStreamReceiverController::RemoveSink(
+ const RtpPacketSinkInterface* sink) {
+ RTC_DCHECK_RUN_ON(&demuxer_sequence_);
+ return demuxer_.RemoveSink(sink);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_stream_receiver_controller.h b/third_party/libwebrtc/call/rtp_stream_receiver_controller.h
new file mode 100644
index 0000000000..1040632639
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_stream_receiver_controller.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_RTP_STREAM_RECEIVER_CONTROLLER_H_
+#define CALL_RTP_STREAM_RECEIVER_CONTROLLER_H_
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "call/rtp_demuxer.h"
+#include "call/rtp_stream_receiver_controller_interface.h"
+#include "modules/rtp_rtcp/include/recovered_packet_receiver.h"
+
+namespace webrtc {
+
+class RtpPacketReceived;
+
+// This class represents the RTP receive parsing and demuxing, for a
+// single RTP session.
+// TODO(bugs.webrtc.org/7135): Add RTCP processing, we should aim to terminate
+// RTCP and not leave any RTCP processing to individual receive streams.
+class RtpStreamReceiverController : public RtpStreamReceiverControllerInterface,
+ public RecoveredPacketReceiver {
+ public:
+ RtpStreamReceiverController();
+ ~RtpStreamReceiverController() override;
+
+ // Implements RtpStreamReceiverControllerInterface.
+ std::unique_ptr<RtpStreamReceiverInterface> CreateReceiver(
+ uint32_t ssrc,
+ RtpPacketSinkInterface* sink) override;
+
+ // TODO(bugs.webrtc.org/7135): Not yet responsible for parsing.
+ bool OnRtpPacket(const RtpPacketReceived& packet);
+
+ // Implements RecoveredPacketReceiver.
+ // Responsible for demuxing recovered FLEXFEC packets.
+ void OnRecoveredPacket(const RtpPacketReceived& packet) override;
+
+ private:
+ class Receiver : public RtpStreamReceiverInterface {
+ public:
+ Receiver(RtpStreamReceiverController* controller,
+ uint32_t ssrc,
+ RtpPacketSinkInterface* sink);
+
+ ~Receiver() override;
+
+ private:
+ RtpStreamReceiverController* const controller_;
+ RtpPacketSinkInterface* const sink_;
+ };
+
+ // Thread-safe wrappers for the corresponding RtpDemuxer methods.
+ bool AddSink(uint32_t ssrc, RtpPacketSinkInterface* sink);
+ bool RemoveSink(const RtpPacketSinkInterface* sink);
+
+ // TODO(bugs.webrtc.org/11993): We expect construction and all methods to be
+ // called on the same thread/tq. Currently this is the worker thread
+ // (including OnRtpPacket) but a more natural fit would be the network thread.
+ // Using a sequence checker to ensure that usage is correct but at the same
+ // time not require a specific thread/tq, an instance of this class + the
+ // associated functionality should be easily moved from one execution context
+ // to another (i.e. when network packets don't hop to the worker thread inside
+ // of Call).
+ SequenceChecker demuxer_sequence_;
+ // At this level the demuxer is only configured to demux by SSRC, so don't
+ // worry about MIDs (MIDs are handled by upper layers).
+ RtpDemuxer demuxer_ RTC_GUARDED_BY(&demuxer_sequence_){false /*use_mid*/};
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTP_STREAM_RECEIVER_CONTROLLER_H_
diff --git a/third_party/libwebrtc/call/rtp_stream_receiver_controller_interface.h b/third_party/libwebrtc/call/rtp_stream_receiver_controller_interface.h
new file mode 100644
index 0000000000..793d0bc145
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_stream_receiver_controller_interface.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_RTP_STREAM_RECEIVER_CONTROLLER_INTERFACE_H_
+#define CALL_RTP_STREAM_RECEIVER_CONTROLLER_INTERFACE_H_
+
+#include <memory>
+
+#include "call/rtp_packet_sink_interface.h"
+
+namespace webrtc {
+
+// An RtpStreamReceiver is responsible for the rtp-specific but
+// media-independent state needed for receiving an RTP stream.
+// TODO(bugs.webrtc.org/7135): Currently, only owns the association between ssrc
+// and the stream's RtpPacketSinkInterface. Ownership of corresponding objects
+// from modules/rtp_rtcp/ should move to this class (or rather, the
+// corresponding implementation class). We should add methods for getting rtp
+// receive stats, and for sending RTCP messages related to the receive stream.
+class RtpStreamReceiverInterface {
+ public:
+ virtual ~RtpStreamReceiverInterface() {}
+};
+
+// This class acts as a factory for RtpStreamReceiver objects.
+class RtpStreamReceiverControllerInterface {
+ public:
+ virtual ~RtpStreamReceiverControllerInterface() {}
+
+ virtual std::unique_ptr<RtpStreamReceiverInterface> CreateReceiver(
+ uint32_t ssrc,
+ RtpPacketSinkInterface* sink) = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTP_STREAM_RECEIVER_CONTROLLER_INTERFACE_H_
diff --git a/third_party/libwebrtc/call/rtp_transport_config.h b/third_party/libwebrtc/call/rtp_transport_config.h
new file mode 100644
index 0000000000..6c94f7d911
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_transport_config.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_TRANSPORT_CONFIG_H_
+#define CALL_RTP_TRANSPORT_CONFIG_H_
+
+#include <memory>
+
+#include "api/field_trials_view.h"
+#include "api/network_state_predictor.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/transport/network_control.h"
+#include "rtc_base/task_queue.h"
+
+namespace webrtc {
+
+struct RtpTransportConfig {
+ // Bitrate config used until valid bitrate estimates are calculated. Also
+ // used to cap total bitrate used. This comes from the remote connection.
+ BitrateConstraints bitrate_config;
+
+ // RtcEventLog to use for this call. Required.
+ // Use webrtc::RtcEventLog::CreateNull() for a null implementation.
+ RtcEventLog* event_log = nullptr;
+
+ // Task Queue Factory to be used in this call. Required.
+ TaskQueueFactory* task_queue_factory = nullptr;
+
+ // NetworkStatePredictor to use for this call.
+ NetworkStatePredictorFactoryInterface* network_state_predictor_factory =
+ nullptr;
+
+ // Network controller factory to use for this call.
+ NetworkControllerFactoryInterface* network_controller_factory = nullptr;
+
+ // Key-value mapping of internal configurations to apply,
+ // e.g. field trials.
+ const FieldTrialsView* trials = nullptr;
+
+ // The burst interval of the pacer, see TaskQueuePacedSender constructor.
+ absl::optional<TimeDelta> pacer_burst_interval;
+};
+} // namespace webrtc
+
+#endif // CALL_RTP_TRANSPORT_CONFIG_H_
diff --git a/third_party/libwebrtc/call/rtp_transport_controller_send.cc b/third_party/libwebrtc/call/rtp_transport_controller_send.cc
new file mode 100644
index 0000000000..556a4dd89a
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_transport_controller_send.cc
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "call/rtp_transport_controller_send.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/transport/goog_cc_factory.h"
+#include "api/transport/network_types.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "call/rtp_video_sender.h"
+#include "logging/rtc_event_log/events/rtc_event_remote_estimate.h"
+#include "logging/rtc_event_log/events/rtc_event_route_change.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/rate_limiter.h"
+
+namespace webrtc {
+namespace {
+static const int64_t kRetransmitWindowSizeMs = 500;
+static const size_t kMaxOverheadBytes = 500;
+
+constexpr TimeDelta kPacerQueueUpdateInterval = TimeDelta::Millis(25);
+
+TargetRateConstraints ConvertConstraints(int min_bitrate_bps,
+ int max_bitrate_bps,
+ int start_bitrate_bps,
+ Clock* clock) {
+ TargetRateConstraints msg;
+ msg.at_time = Timestamp::Millis(clock->TimeInMilliseconds());
+ msg.min_data_rate = min_bitrate_bps >= 0
+ ? DataRate::BitsPerSec(min_bitrate_bps)
+ : DataRate::Zero();
+ msg.max_data_rate = max_bitrate_bps > 0
+ ? DataRate::BitsPerSec(max_bitrate_bps)
+ : DataRate::Infinity();
+ if (start_bitrate_bps > 0)
+ msg.starting_rate = DataRate::BitsPerSec(start_bitrate_bps);
+ return msg;
+}
+
+TargetRateConstraints ConvertConstraints(const BitrateConstraints& contraints,
+ Clock* clock) {
+ return ConvertConstraints(contraints.min_bitrate_bps,
+ contraints.max_bitrate_bps,
+ contraints.start_bitrate_bps, clock);
+}
+
+bool IsEnabled(const FieldTrialsView& trials, absl::string_view key) {
+ return absl::StartsWith(trials.Lookup(key), "Enabled");
+}
+
+bool IsRelayed(const rtc::NetworkRoute& route) {
+ return route.local.uses_turn() || route.remote.uses_turn();
+}
+} // namespace
+
+RtpTransportControllerSend::RtpTransportControllerSend(
+ Clock* clock,
+ const RtpTransportConfig& config)
+ : clock_(clock),
+ event_log_(config.event_log),
+ task_queue_factory_(config.task_queue_factory),
+ task_queue_(TaskQueueBase::Current()),
+ bitrate_configurator_(config.bitrate_config),
+ pacer_started_(false),
+ pacer_(clock,
+ &packet_router_,
+ *config.trials,
+ TimeDelta::Millis(5),
+ 3,
+ config.pacer_burst_interval),
+ observer_(nullptr),
+ controller_factory_override_(config.network_controller_factory),
+ controller_factory_fallback_(
+ std::make_unique<GoogCcNetworkControllerFactory>(
+ config.network_state_predictor_factory)),
+ process_interval_(controller_factory_fallback_->GetProcessInterval()),
+ last_report_block_time_(Timestamp::Millis(clock_->TimeInMilliseconds())),
+ reset_feedback_on_route_change_(
+ !IsEnabled(*config.trials, "WebRTC-Bwe-NoFeedbackReset")),
+ add_pacing_to_cwin_(
+ IsEnabled(*config.trials,
+ "WebRTC-AddPacingToCongestionWindowPushback")),
+ relay_bandwidth_cap_("relay_cap", DataRate::PlusInfinity()),
+ transport_overhead_bytes_per_packet_(0),
+ network_available_(false),
+ congestion_window_size_(DataSize::PlusInfinity()),
+ is_congested_(false),
+ retransmission_rate_limiter_(clock, kRetransmitWindowSizeMs),
+ field_trials_(*config.trials) {
+ ParseFieldTrial({&relay_bandwidth_cap_},
+ config.trials->Lookup("WebRTC-Bwe-NetworkRouteConstraints"));
+ initial_config_.constraints =
+ ConvertConstraints(config.bitrate_config, clock_);
+ initial_config_.event_log = config.event_log;
+ initial_config_.key_value_config = config.trials;
+ RTC_DCHECK(config.bitrate_config.start_bitrate_bps > 0);
+
+ pacer_.SetPacingRates(
+ DataRate::BitsPerSec(config.bitrate_config.start_bitrate_bps),
+ DataRate::Zero());
+}
+
+RtpTransportControllerSend::~RtpTransportControllerSend() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(video_rtp_senders_.empty());
+ pacer_queue_update_task_.Stop();
+ controller_task_.Stop();
+}
+
+RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender(
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& states,
+ const RtpConfig& rtp_config,
+ int rtcp_report_interval_ms,
+ Transport* send_transport,
+ const RtpSenderObservers& observers,
+ RtcEventLog* event_log,
+ std::unique_ptr<FecController> fec_controller,
+ const RtpSenderFrameEncryptionConfig& frame_encryption_config,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ video_rtp_senders_.push_back(std::make_unique<RtpVideoSender>(
+ clock_, suspended_ssrcs, states, rtp_config, rtcp_report_interval_ms,
+ send_transport, observers,
+ // TODO(holmer): Remove this circular dependency by injecting
+ // the parts of RtpTransportControllerSendInterface that are really used.
+ this, event_log, &retransmission_rate_limiter_, std::move(fec_controller),
+ frame_encryption_config.frame_encryptor,
+ frame_encryption_config.crypto_options, std::move(frame_transformer),
+ field_trials_, task_queue_factory_));
+ return video_rtp_senders_.back().get();
+}
+
+void RtpTransportControllerSend::DestroyRtpVideoSender(
+ RtpVideoSenderInterface* rtp_video_sender) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ std::vector<std::unique_ptr<RtpVideoSenderInterface>>::iterator it =
+ video_rtp_senders_.end();
+ for (it = video_rtp_senders_.begin(); it != video_rtp_senders_.end(); ++it) {
+ if (it->get() == rtp_video_sender) {
+ break;
+ }
+ }
+ RTC_DCHECK(it != video_rtp_senders_.end());
+ video_rtp_senders_.erase(it);
+}
+
+void RtpTransportControllerSend::UpdateControlState() {
+ absl::optional<TargetTransferRate> update = control_handler_->GetUpdate();
+ if (!update)
+ return;
+ retransmission_rate_limiter_.SetMaxRate(update->target_rate.bps());
+ // We won't create control_handler_ until we have an observers.
+ RTC_DCHECK(observer_ != nullptr);
+ observer_->OnTargetTransferRate(*update);
+}
+
+void RtpTransportControllerSend::UpdateCongestedState() {
+ if (auto update = GetCongestedStateUpdate()) {
+ is_congested_ = update.value();
+ pacer_.SetCongested(update.value());
+ }
+}
+
+absl::optional<bool> RtpTransportControllerSend::GetCongestedStateUpdate()
+ const {
+ bool congested = transport_feedback_adapter_.GetOutstandingData() >=
+ congestion_window_size_;
+ if (congested != is_congested_)
+ return congested;
+ return absl::nullopt;
+}
+
+PacketRouter* RtpTransportControllerSend::packet_router() {
+ return &packet_router_;
+}
+
+NetworkStateEstimateObserver*
+RtpTransportControllerSend::network_state_estimate_observer() {
+ return this;
+}
+
+TransportFeedbackObserver*
+RtpTransportControllerSend::transport_feedback_observer() {
+ return this;
+}
+
+RtpPacketSender* RtpTransportControllerSend::packet_sender() {
+ return &pacer_;
+}
+
+void RtpTransportControllerSend::SetAllocatedSendBitrateLimits(
+ BitrateAllocationLimits limits) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ streams_config_.min_total_allocated_bitrate = limits.min_allocatable_rate;
+ streams_config_.max_padding_rate = limits.max_padding_rate;
+ streams_config_.max_total_allocated_bitrate = limits.max_allocatable_rate;
+ UpdateStreamsConfig();
+}
+void RtpTransportControllerSend::SetPacingFactor(float pacing_factor) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ streams_config_.pacing_factor = pacing_factor;
+ UpdateStreamsConfig();
+}
+void RtpTransportControllerSend::SetQueueTimeLimit(int limit_ms) {
+ pacer_.SetQueueTimeLimit(TimeDelta::Millis(limit_ms));
+}
+StreamFeedbackProvider*
+RtpTransportControllerSend::GetStreamFeedbackProvider() {
+ return &feedback_demuxer_;
+}
+
+void RtpTransportControllerSend::RegisterTargetTransferRateObserver(
+ TargetTransferRateObserver* observer) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_DCHECK(observer_ == nullptr);
+ observer_ = observer;
+ observer_->OnStartRateUpdate(*initial_config_.constraints.starting_rate);
+ MaybeCreateControllers();
+}
+
+bool RtpTransportControllerSend::IsRelevantRouteChange(
+ const rtc::NetworkRoute& old_route,
+ const rtc::NetworkRoute& new_route) const {
+ // TODO(bugs.webrtc.org/11438): Experiment with using more information/
+ // other conditions.
+ bool connected_changed = old_route.connected != new_route.connected;
+ bool route_ids_changed =
+ old_route.local.network_id() != new_route.local.network_id() ||
+ old_route.remote.network_id() != new_route.remote.network_id();
+ if (relay_bandwidth_cap_->IsFinite()) {
+ bool relaying_changed = IsRelayed(old_route) != IsRelayed(new_route);
+ return connected_changed || route_ids_changed || relaying_changed;
+ } else {
+ return connected_changed || route_ids_changed;
+ }
+}
+
+void RtpTransportControllerSend::OnNetworkRouteChanged(
+ absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ // Check if the network route is connected.
+ if (!network_route.connected) {
+ // TODO(honghaiz): Perhaps handle this in SignalChannelNetworkState and
+ // consider merging these two methods.
+ return;
+ }
+
+ absl::optional<BitrateConstraints> relay_constraint_update =
+ ApplyOrLiftRelayCap(IsRelayed(network_route));
+
+ // Check whether the network route has changed on each transport.
+ auto result = network_routes_.insert(
+ // Explicit conversion of transport_name to std::string here is necessary
+ // to support some platforms that cannot yet deal with implicit
+ // conversion in these types of situations.
+ std::make_pair(std::string(transport_name), network_route));
+ auto kv = result.first;
+ bool inserted = result.second;
+ if (inserted || !(kv->second == network_route)) {
+ RTC_LOG(LS_INFO) << "Network route changed on transport " << transport_name
+ << ": new_route = " << network_route.DebugString();
+ if (!inserted) {
+ RTC_LOG(LS_INFO) << "old_route = " << kv->second.DebugString();
+ }
+ }
+
+ if (inserted) {
+ if (relay_constraint_update.has_value()) {
+ UpdateBitrateConstraints(*relay_constraint_update);
+ }
+ transport_overhead_bytes_per_packet_ = network_route.packet_overhead;
+ // No need to reset BWE if this is the first time the network connects.
+ return;
+ }
+
+ const rtc::NetworkRoute old_route = kv->second;
+ kv->second = network_route;
+
+ // Check if enough conditions of the new/old route has changed
+ // to trigger resetting of bitrates (and a probe).
+ if (IsRelevantRouteChange(old_route, network_route)) {
+ BitrateConstraints bitrate_config = bitrate_configurator_.GetConfig();
+ RTC_LOG(LS_INFO) << "Reset bitrates to min: "
+ << bitrate_config.min_bitrate_bps
+ << " bps, start: " << bitrate_config.start_bitrate_bps
+ << " bps, max: " << bitrate_config.max_bitrate_bps
+ << " bps.";
+ RTC_DCHECK_GT(bitrate_config.start_bitrate_bps, 0);
+
+ if (event_log_) {
+ event_log_->Log(std::make_unique<RtcEventRouteChange>(
+ network_route.connected, network_route.packet_overhead));
+ }
+ NetworkRouteChange msg;
+ msg.at_time = Timestamp::Millis(clock_->TimeInMilliseconds());
+ msg.constraints = ConvertConstraints(bitrate_config, clock_);
+ transport_overhead_bytes_per_packet_ = network_route.packet_overhead;
+ if (reset_feedback_on_route_change_) {
+ transport_feedback_adapter_.SetNetworkRoute(network_route);
+ }
+ if (controller_) {
+ PostUpdates(controller_->OnNetworkRouteChange(msg));
+ } else {
+ UpdateInitialConstraints(msg.constraints);
+ }
+ is_congested_ = false;
+ pacer_.SetCongested(false);
+ }
+}
+void RtpTransportControllerSend::OnNetworkAvailability(bool network_available) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RTC_LOG(LS_VERBOSE) << "SignalNetworkState "
+ << (network_available ? "Up" : "Down");
+ NetworkAvailability msg;
+ msg.at_time = Timestamp::Millis(clock_->TimeInMilliseconds());
+ msg.network_available = network_available;
+ network_available_ = network_available;
+ if (network_available) {
+ pacer_.Resume();
+ } else {
+ pacer_.Pause();
+ }
+ is_congested_ = false;
+ pacer_.SetCongested(false);
+
+ if (!controller_) {
+ MaybeCreateControllers();
+ }
+ if (controller_) {
+ control_handler_->SetNetworkAvailability(network_available);
+ PostUpdates(controller_->OnNetworkAvailability(msg));
+ UpdateControlState();
+ }
+ for (auto& rtp_sender : video_rtp_senders_) {
+ rtp_sender->OnNetworkAvailability(network_available);
+ }
+}
+NetworkLinkRtcpObserver* RtpTransportControllerSend::GetRtcpObserver() {
+ return this;
+}
+int64_t RtpTransportControllerSend::GetPacerQueuingDelayMs() const {
+ return pacer_.OldestPacketWaitTime().ms();
+}
+absl::optional<Timestamp> RtpTransportControllerSend::GetFirstPacketTime()
+ const {
+ return pacer_.FirstSentPacketTime();
+}
+void RtpTransportControllerSend::EnablePeriodicAlrProbing(bool enable) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+
+ streams_config_.requests_alr_probing = enable;
+ UpdateStreamsConfig();
+}
+void RtpTransportControllerSend::OnSentPacket(
+ const rtc::SentPacket& sent_packet) {
+ // Normally called on the network thread!
+ // TODO(crbug.com/1373439): Clarify other thread contexts calling in,
+ // and simplify task posting logic when the combined network/worker project
+ // launches.
+ if (TaskQueueBase::Current() != task_queue_) {
+ task_queue_->PostTask(SafeTask(safety_.flag(), [this, sent_packet]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ ProcessSentPacket(sent_packet);
+ }));
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ ProcessSentPacket(sent_packet);
+}
+
+void RtpTransportControllerSend::ProcessSentPacket(
+ const rtc::SentPacket& sent_packet) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ absl::optional<SentPacket> packet_msg =
+ transport_feedback_adapter_.ProcessSentPacket(sent_packet);
+ if (!packet_msg)
+ return;
+
+ auto congestion_update = GetCongestedStateUpdate();
+ NetworkControlUpdate control_update;
+ if (controller_)
+ control_update = controller_->OnSentPacket(*packet_msg);
+ if (!congestion_update && !control_update.has_updates())
+ return;
+ ProcessSentPacketUpdates(std::move(control_update));
+}
+
+// RTC_RUN_ON(task_queue_)
+void RtpTransportControllerSend::ProcessSentPacketUpdates(
+ NetworkControlUpdate updates) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ // Only update outstanding data if:
+ // 1. Packet feedback is used.
+ // 2. The packet has not yet received an acknowledgement.
+ // 3. It is not a retransmission of an earlier packet.
+ UpdateCongestedState();
+ if (controller_) {
+ PostUpdates(std::move(updates));
+ }
+}
+
+void RtpTransportControllerSend::OnReceivedPacket(
+ const ReceivedPacket& packet_msg) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (controller_)
+ PostUpdates(controller_->OnReceivedPacket(packet_msg));
+}
+
+void RtpTransportControllerSend::UpdateBitrateConstraints(
+ const BitrateConstraints& updated) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ TargetRateConstraints msg = ConvertConstraints(updated, clock_);
+ if (controller_) {
+ PostUpdates(controller_->OnTargetRateConstraints(msg));
+ } else {
+ UpdateInitialConstraints(msg);
+ }
+}
+
+void RtpTransportControllerSend::SetSdpBitrateParameters(
+ const BitrateConstraints& constraints) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ absl::optional<BitrateConstraints> updated =
+ bitrate_configurator_.UpdateWithSdpParameters(constraints);
+ if (updated.has_value()) {
+ UpdateBitrateConstraints(*updated);
+ } else {
+ RTC_LOG(LS_VERBOSE)
+ << "WebRTC.RtpTransportControllerSend.SetSdpBitrateParameters: "
+ "nothing to update";
+ }
+}
+
+void RtpTransportControllerSend::SetClientBitratePreferences(
+ const BitrateSettings& preferences) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ absl::optional<BitrateConstraints> updated =
+ bitrate_configurator_.UpdateWithClientPreferences(preferences);
+ if (updated.has_value()) {
+ UpdateBitrateConstraints(*updated);
+ } else {
+ RTC_LOG(LS_VERBOSE)
+ << "WebRTC.RtpTransportControllerSend.SetClientBitratePreferences: "
+ "nothing to update";
+ }
+}
+
+absl::optional<BitrateConstraints>
+RtpTransportControllerSend::ApplyOrLiftRelayCap(bool is_relayed) {
+ DataRate cap = is_relayed ? relay_bandwidth_cap_ : DataRate::PlusInfinity();
+ return bitrate_configurator_.UpdateWithRelayCap(cap);
+}
+
+void RtpTransportControllerSend::OnTransportOverheadChanged(
+ size_t transport_overhead_bytes_per_packet) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (transport_overhead_bytes_per_packet >= kMaxOverheadBytes) {
+ RTC_LOG(LS_ERROR) << "Transport overhead exceeds " << kMaxOverheadBytes;
+ return;
+ }
+
+ pacer_.SetTransportOverhead(
+ DataSize::Bytes(transport_overhead_bytes_per_packet));
+
+ // TODO(holmer): Call AudioRtpSenders when they have been moved to
+ // RtpTransportControllerSend.
+ for (auto& rtp_video_sender : video_rtp_senders_) {
+ rtp_video_sender->OnTransportOverheadChanged(
+ transport_overhead_bytes_per_packet);
+ }
+}
+
+void RtpTransportControllerSend::AccountForAudioPacketsInPacedSender(
+ bool account_for_audio) {
+ pacer_.SetAccountForAudioPackets(account_for_audio);
+}
+
+void RtpTransportControllerSend::IncludeOverheadInPacedSender() {
+ pacer_.SetIncludeOverhead();
+}
+
+void RtpTransportControllerSend::EnsureStarted() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (!pacer_started_) {
+ pacer_started_ = true;
+ pacer_.EnsureStarted();
+ }
+}
+
+void RtpTransportControllerSend::OnReceiverEstimatedMaxBitrate(
+ Timestamp receive_time,
+ DataRate bitrate) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RemoteBitrateReport msg;
+ msg.receive_time = receive_time;
+ msg.bandwidth = bitrate;
+ if (controller_)
+ PostUpdates(controller_->OnRemoteBitrateReport(msg));
+}
+
+void RtpTransportControllerSend::OnRttUpdate(Timestamp receive_time,
+ TimeDelta rtt) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ RoundTripTimeUpdate report;
+ report.receive_time = receive_time;
+ report.round_trip_time = rtt.RoundTo(TimeDelta::Millis(1));
+ report.smoothed = false;
+ if (controller_ && !report.round_trip_time.IsZero())
+ PostUpdates(controller_->OnRoundTripTimeUpdate(report));
+}
+
+void RtpTransportControllerSend::OnAddPacket(
+ const RtpPacketSendInfo& packet_info) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ Timestamp creation_time = Timestamp::Millis(clock_->TimeInMilliseconds());
+ feedback_demuxer_.AddPacket(packet_info);
+ transport_feedback_adapter_.AddPacket(
+ packet_info, transport_overhead_bytes_per_packet_, creation_time);
+}
+
+void RtpTransportControllerSend::OnTransportFeedback(
+ Timestamp receive_time,
+ const rtcp::TransportFeedback& feedback) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ feedback_demuxer_.OnTransportFeedback(feedback);
+ absl::optional<TransportPacketsFeedback> feedback_msg =
+ transport_feedback_adapter_.ProcessTransportFeedback(feedback,
+ receive_time);
+ if (feedback_msg) {
+ if (controller_)
+ PostUpdates(controller_->OnTransportPacketsFeedback(*feedback_msg));
+
+ // Only update outstanding data if any packet is first time acked.
+ UpdateCongestedState();
+ }
+}
+
+void RtpTransportControllerSend::OnRemoteNetworkEstimate(
+ NetworkStateEstimate estimate) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (event_log_) {
+ event_log_->Log(std::make_unique<RtcEventRemoteEstimate>(
+ estimate.link_capacity_lower, estimate.link_capacity_upper));
+ }
+ estimate.update_time = Timestamp::Millis(clock_->TimeInMilliseconds());
+ if (controller_)
+ PostUpdates(controller_->OnNetworkStateEstimate(estimate));
+}
+
+void RtpTransportControllerSend::MaybeCreateControllers() {
+ RTC_DCHECK(!controller_);
+ RTC_DCHECK(!control_handler_);
+
+ if (!network_available_ || !observer_)
+ return;
+ control_handler_ = std::make_unique<CongestionControlHandler>();
+
+ initial_config_.constraints.at_time =
+ Timestamp::Millis(clock_->TimeInMilliseconds());
+ initial_config_.stream_based_config = streams_config_;
+
+ // TODO(srte): Use fallback controller if no feedback is available.
+ if (controller_factory_override_) {
+ RTC_LOG(LS_INFO) << "Creating overridden congestion controller";
+ controller_ = controller_factory_override_->Create(initial_config_);
+ process_interval_ = controller_factory_override_->GetProcessInterval();
+ } else {
+ RTC_LOG(LS_INFO) << "Creating fallback congestion controller";
+ controller_ = controller_factory_fallback_->Create(initial_config_);
+ process_interval_ = controller_factory_fallback_->GetProcessInterval();
+ }
+ UpdateControllerWithTimeInterval();
+ StartProcessPeriodicTasks();
+}
+
+void RtpTransportControllerSend::UpdateInitialConstraints(
+ TargetRateConstraints new_contraints) {
+ if (!new_contraints.starting_rate)
+ new_contraints.starting_rate = initial_config_.constraints.starting_rate;
+ RTC_DCHECK(new_contraints.starting_rate);
+ initial_config_.constraints = new_contraints;
+}
+
+void RtpTransportControllerSend::StartProcessPeriodicTasks() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (!pacer_queue_update_task_.Running()) {
+ pacer_queue_update_task_ = RepeatingTaskHandle::DelayedStart(
+ task_queue_, kPacerQueueUpdateInterval, [this]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ TimeDelta expected_queue_time = pacer_.ExpectedQueueTime();
+ control_handler_->SetPacerQueue(expected_queue_time);
+ UpdateControlState();
+ return kPacerQueueUpdateInterval;
+ });
+ }
+ controller_task_.Stop();
+ if (process_interval_.IsFinite()) {
+ controller_task_ = RepeatingTaskHandle::DelayedStart(
+ task_queue_, process_interval_, [this]() {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ UpdateControllerWithTimeInterval();
+ return process_interval_;
+ });
+ }
+}
+
+void RtpTransportControllerSend::UpdateControllerWithTimeInterval() {
+ RTC_DCHECK(controller_);
+ ProcessInterval msg;
+ msg.at_time = Timestamp::Millis(clock_->TimeInMilliseconds());
+ if (add_pacing_to_cwin_)
+ msg.pacer_queue = pacer_.QueueSizeData();
+ PostUpdates(controller_->OnProcessInterval(msg));
+}
+
+void RtpTransportControllerSend::UpdateStreamsConfig() {
+ streams_config_.at_time = Timestamp::Millis(clock_->TimeInMilliseconds());
+ if (controller_)
+ PostUpdates(controller_->OnStreamsConfig(streams_config_));
+}
+
+void RtpTransportControllerSend::PostUpdates(NetworkControlUpdate update) {
+ if (update.congestion_window) {
+ congestion_window_size_ = *update.congestion_window;
+ UpdateCongestedState();
+ }
+ if (update.pacer_config) {
+ pacer_.SetPacingRates(update.pacer_config->data_rate(),
+ update.pacer_config->pad_rate());
+ }
+ if (!update.probe_cluster_configs.empty()) {
+ pacer_.CreateProbeClusters(std::move(update.probe_cluster_configs));
+ }
+ if (update.target_rate) {
+ control_handler_->SetTargetRate(*update.target_rate);
+ UpdateControlState();
+ }
+}
+
+void RtpTransportControllerSend::OnReport(
+ Timestamp receive_time,
+ rtc::ArrayView<const ReportBlockData> report_blocks) {
+ RTC_DCHECK_RUN_ON(&sequence_checker_);
+ if (report_blocks.empty())
+ return;
+
+ int total_packets_lost_delta = 0;
+ int total_packets_delta = 0;
+
+ // Compute the packet loss from all report blocks.
+ for (const ReportBlockData& report_block : report_blocks) {
+ auto [it, inserted] =
+ last_report_blocks_.try_emplace(report_block.source_ssrc());
+ LossReport& last_loss_report = it->second;
+ if (!inserted) {
+ total_packets_delta += report_block.extended_highest_sequence_number() -
+ last_loss_report.extended_highest_sequence_number;
+ total_packets_lost_delta +=
+ report_block.cumulative_lost() - last_loss_report.cumulative_lost;
+ }
+ last_loss_report.extended_highest_sequence_number =
+ report_block.extended_highest_sequence_number();
+ last_loss_report.cumulative_lost = report_block.cumulative_lost();
+ }
+ // Can only compute delta if there has been previous blocks to compare to. If
+ // not, total_packets_delta will be unchanged and there's nothing more to do.
+ if (!total_packets_delta)
+ return;
+ int packets_received_delta = total_packets_delta - total_packets_lost_delta;
+ // To detect lost packets, at least one packet has to be received. This check
+ // is needed to avoid bandwith detection update in
+ // VideoSendStreamTest.SuspendBelowMinBitrate
+
+ if (packets_received_delta < 1)
+ return;
+ TransportLossReport msg;
+ msg.packets_lost_delta = total_packets_lost_delta;
+ msg.packets_received_delta = packets_received_delta;
+ msg.receive_time = receive_time;
+ msg.start_time = last_report_block_time_;
+ msg.end_time = receive_time;
+ if (controller_)
+ PostUpdates(controller_->OnTransportLossReport(msg));
+ last_report_block_time_ = receive_time;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_transport_controller_send.h b/third_party/libwebrtc/call/rtp_transport_controller_send.h
new file mode 100644
index 0000000000..1aace1ce65
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_transport_controller_send.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_H_
+#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_H_
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/network_state_predictor.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/transport/network_control.h"
+#include "api/units/data_rate.h"
+#include "call/rtp_bitrate_configurator.h"
+#include "call/rtp_transport_config.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "call/rtp_video_sender.h"
+#include "modules/congestion_controller/rtp/control_handler.h"
+#include "modules/congestion_controller/rtp/transport_feedback_adapter.h"
+#include "modules/congestion_controller/rtp/transport_feedback_demuxer.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/pacing/rtp_packet_pacer.h"
+#include "modules/pacing/task_queue_paced_sender.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/task_utils/repeating_task.h"
+
+namespace webrtc {
+class Clock;
+class FrameEncryptorInterface;
+class RtcEventLog;
+
+class RtpTransportControllerSend final
+ : public RtpTransportControllerSendInterface,
+ public NetworkLinkRtcpObserver,
+ public TransportFeedbackObserver,
+ public NetworkStateEstimateObserver {
+ public:
+ RtpTransportControllerSend(Clock* clock, const RtpTransportConfig& config);
+ ~RtpTransportControllerSend() override;
+
+ RtpTransportControllerSend(const RtpTransportControllerSend&) = delete;
+ RtpTransportControllerSend& operator=(const RtpTransportControllerSend&) =
+ delete;
+
+ // TODO(tommi): Change to std::unique_ptr<>.
+ RtpVideoSenderInterface* CreateRtpVideoSender(
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>&
+ states, // move states into RtpTransportControllerSend
+ const RtpConfig& rtp_config,
+ int rtcp_report_interval_ms,
+ Transport* send_transport,
+ const RtpSenderObservers& observers,
+ RtcEventLog* event_log,
+ std::unique_ptr<FecController> fec_controller,
+ const RtpSenderFrameEncryptionConfig& frame_encryption_config,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) override;
+ void DestroyRtpVideoSender(
+ RtpVideoSenderInterface* rtp_video_sender) override;
+
+ // Implements RtpTransportControllerSendInterface
+ PacketRouter* packet_router() override;
+
+ NetworkStateEstimateObserver* network_state_estimate_observer() override;
+ TransportFeedbackObserver* transport_feedback_observer() override;
+ RtpPacketSender* packet_sender() override;
+
+ void SetAllocatedSendBitrateLimits(BitrateAllocationLimits limits) override;
+
+ void SetPacingFactor(float pacing_factor) override;
+ void SetQueueTimeLimit(int limit_ms) override;
+ StreamFeedbackProvider* GetStreamFeedbackProvider() override;
+ void RegisterTargetTransferRateObserver(
+ TargetTransferRateObserver* observer) override;
+ void OnNetworkRouteChanged(absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) override;
+ void OnNetworkAvailability(bool network_available) override;
+ NetworkLinkRtcpObserver* GetRtcpObserver() override;
+ int64_t GetPacerQueuingDelayMs() const override;
+ absl::optional<Timestamp> GetFirstPacketTime() const override;
+ void EnablePeriodicAlrProbing(bool enable) override;
+ void OnSentPacket(const rtc::SentPacket& sent_packet) override;
+ void OnReceivedPacket(const ReceivedPacket& packet_msg) override;
+
+ void SetSdpBitrateParameters(const BitrateConstraints& constraints) override;
+ void SetClientBitratePreferences(const BitrateSettings& preferences) override;
+
+ void OnTransportOverheadChanged(
+ size_t transport_overhead_bytes_per_packet) override;
+
+ void AccountForAudioPacketsInPacedSender(bool account_for_audio) override;
+ void IncludeOverheadInPacedSender() override;
+ void EnsureStarted() override;
+
+ // Implements NetworkLinkRtcpObserver interface
+ void OnReceiverEstimatedMaxBitrate(Timestamp receive_time,
+ DataRate bitrate) override;
+ void OnReport(Timestamp receive_time,
+ rtc::ArrayView<const ReportBlockData> report_blocks) override;
+ void OnRttUpdate(Timestamp receive_time, TimeDelta rtt) override;
+ void OnTransportFeedback(Timestamp receive_time,
+ const rtcp::TransportFeedback& feedback) override;
+
+ // Implements TransportFeedbackObserver interface
+ void OnAddPacket(const RtpPacketSendInfo& packet_info) override;
+
+ // Implements NetworkStateEstimateObserver interface
+ void OnRemoteNetworkEstimate(NetworkStateEstimate estimate) override;
+
+ private:
+ void MaybeCreateControllers() RTC_RUN_ON(sequence_checker_);
+ void UpdateInitialConstraints(TargetRateConstraints new_contraints)
+ RTC_RUN_ON(sequence_checker_);
+
+ void StartProcessPeriodicTasks() RTC_RUN_ON(sequence_checker_);
+ void UpdateControllerWithTimeInterval() RTC_RUN_ON(sequence_checker_);
+
+ absl::optional<BitrateConstraints> ApplyOrLiftRelayCap(bool is_relayed);
+ bool IsRelevantRouteChange(const rtc::NetworkRoute& old_route,
+ const rtc::NetworkRoute& new_route) const;
+ void UpdateBitrateConstraints(const BitrateConstraints& updated);
+ void UpdateStreamsConfig() RTC_RUN_ON(sequence_checker_);
+ void PostUpdates(NetworkControlUpdate update) RTC_RUN_ON(sequence_checker_);
+ void UpdateControlState() RTC_RUN_ON(sequence_checker_);
+ void UpdateCongestedState() RTC_RUN_ON(sequence_checker_);
+ absl::optional<bool> GetCongestedStateUpdate() const
+ RTC_RUN_ON(sequence_checker_);
+ void ProcessSentPacket(const rtc::SentPacket& sent_packet)
+ RTC_RUN_ON(sequence_checker_);
+ void ProcessSentPacketUpdates(NetworkControlUpdate updates)
+ RTC_RUN_ON(sequence_checker_);
+
+ Clock* const clock_;
+ RtcEventLog* const event_log_;
+ TaskQueueFactory* const task_queue_factory_;
+ SequenceChecker sequence_checker_;
+ TaskQueueBase* task_queue_;
+ PacketRouter packet_router_;
+ std::vector<std::unique_ptr<RtpVideoSenderInterface>> video_rtp_senders_
+ RTC_GUARDED_BY(&sequence_checker_);
+ RtpBitrateConfigurator bitrate_configurator_;
+ std::map<std::string, rtc::NetworkRoute> network_routes_
+ RTC_GUARDED_BY(sequence_checker_);
+ bool pacer_started_ RTC_GUARDED_BY(sequence_checker_);
+ TaskQueuePacedSender pacer_;
+
+ TargetTransferRateObserver* observer_ RTC_GUARDED_BY(sequence_checker_);
+ TransportFeedbackDemuxer feedback_demuxer_;
+
+ TransportFeedbackAdapter transport_feedback_adapter_
+ RTC_GUARDED_BY(sequence_checker_);
+
+ NetworkControllerFactoryInterface* const controller_factory_override_
+ RTC_PT_GUARDED_BY(sequence_checker_);
+ const std::unique_ptr<NetworkControllerFactoryInterface>
+ controller_factory_fallback_ RTC_PT_GUARDED_BY(sequence_checker_);
+
+ std::unique_ptr<CongestionControlHandler> control_handler_
+ RTC_GUARDED_BY(sequence_checker_) RTC_PT_GUARDED_BY(sequence_checker_);
+
+ std::unique_ptr<NetworkControllerInterface> controller_
+ RTC_GUARDED_BY(sequence_checker_) RTC_PT_GUARDED_BY(sequence_checker_);
+
+ TimeDelta process_interval_ RTC_GUARDED_BY(sequence_checker_);
+
+ struct LossReport {
+ uint32_t extended_highest_sequence_number = 0;
+ int cumulative_lost = 0;
+ };
+ std::map<uint32_t, LossReport> last_report_blocks_
+ RTC_GUARDED_BY(sequence_checker_);
+ Timestamp last_report_block_time_ RTC_GUARDED_BY(sequence_checker_);
+
+ NetworkControllerConfig initial_config_ RTC_GUARDED_BY(sequence_checker_);
+ StreamsConfig streams_config_ RTC_GUARDED_BY(sequence_checker_);
+
+ const bool reset_feedback_on_route_change_;
+ const bool add_pacing_to_cwin_;
+ FieldTrialParameter<DataRate> relay_bandwidth_cap_;
+
+ size_t transport_overhead_bytes_per_packet_ RTC_GUARDED_BY(sequence_checker_);
+ bool network_available_ RTC_GUARDED_BY(sequence_checker_);
+ RepeatingTaskHandle pacer_queue_update_task_
+ RTC_GUARDED_BY(sequence_checker_);
+ RepeatingTaskHandle controller_task_ RTC_GUARDED_BY(sequence_checker_);
+
+ DataSize congestion_window_size_ RTC_GUARDED_BY(sequence_checker_);
+ bool is_congested_ RTC_GUARDED_BY(sequence_checker_);
+
+ // Protected by internal locks.
+ RateLimiter retransmission_rate_limiter_;
+
+ ScopedTaskSafety safety_;
+
+ const FieldTrialsView& field_trials_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_H_
diff --git a/third_party/libwebrtc/call/rtp_transport_controller_send_factory.h b/third_party/libwebrtc/call/rtp_transport_controller_send_factory.h
new file mode 100644
index 0000000000..6349302e45
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_transport_controller_send_factory.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_
+#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_
+
+#include <memory>
+#include <utility>
+
+#include "call/rtp_transport_controller_send.h"
+#include "call/rtp_transport_controller_send_factory_interface.h"
+
+namespace webrtc {
+class RtpTransportControllerSendFactory
+ : public RtpTransportControllerSendFactoryInterface {
+ public:
+ std::unique_ptr<RtpTransportControllerSendInterface> Create(
+ const RtpTransportConfig& config,
+ Clock* clock) override {
+ RTC_CHECK(config.trials);
+ return std::make_unique<RtpTransportControllerSend>(clock, config);
+ }
+
+ virtual ~RtpTransportControllerSendFactory() {}
+};
+} // namespace webrtc
+#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_
diff --git a/third_party/libwebrtc/call/rtp_transport_controller_send_factory_interface.h b/third_party/libwebrtc/call/rtp_transport_controller_send_factory_interface.h
new file mode 100644
index 0000000000..0f4c36c221
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_transport_controller_send_factory_interface.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_
+#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_
+
+#include <memory>
+
+#include "call/rtp_transport_config.h"
+#include "call/rtp_transport_controller_send_interface.h"
+
+namespace webrtc {
+// A factory used for dependency injection on the send side of the transport
+// controller.
+class RtpTransportControllerSendFactoryInterface {
+ public:
+ virtual std::unique_ptr<RtpTransportControllerSendInterface> Create(
+ const RtpTransportConfig& config,
+ Clock* clock) = 0;
+
+ virtual ~RtpTransportControllerSendFactoryInterface() {}
+};
+} // namespace webrtc
+#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_
diff --git a/third_party/libwebrtc/call/rtp_transport_controller_send_interface.h b/third_party/libwebrtc/call/rtp_transport_controller_send_interface.h
new file mode 100644
index 0000000000..7edc135037
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_transport_controller_send_interface.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_INTERFACE_H_
+#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_INTERFACE_H_
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/crypto/crypto_options.h"
+#include "api/fec_controller.h"
+#include "api/frame_transformer_interface.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/units/timestamp.h"
+#include "call/rtp_config.h"
+#include "common_video/frame_counts.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_packet_sender.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+
+namespace rtc {
+struct SentPacket;
+struct NetworkRoute;
+class TaskQueue;
+} // namespace rtc
+namespace webrtc {
+
+class FrameEncryptorInterface;
+class TargetTransferRateObserver;
+class Transport;
+class PacketRouter;
+class RtpVideoSenderInterface;
+class RtpPacketSender;
+
+struct RtpSenderObservers {
+ RtcpRttStats* rtcp_rtt_stats;
+ RtcpIntraFrameObserver* intra_frame_callback;
+ RtcpLossNotificationObserver* rtcp_loss_notification_observer;
+ ReportBlockDataObserver* report_block_data_observer;
+ StreamDataCountersCallback* rtp_stats;
+ BitrateStatisticsObserver* bitrate_observer;
+ FrameCountObserver* frame_count_observer;
+ RtcpPacketTypeCounterObserver* rtcp_type_observer;
+ SendPacketObserver* send_packet_observer;
+};
+
+struct RtpSenderFrameEncryptionConfig {
+ FrameEncryptorInterface* frame_encryptor = nullptr;
+ CryptoOptions crypto_options;
+};
+
+// An RtpTransportController should own everything related to the RTP
+// transport to/from a remote endpoint. We should have separate
+// interfaces for send and receive side, even if they are implemented
+// by the same class. This is an ongoing refactoring project. At some
+// point, this class should be promoted to a public api under
+// webrtc/api/rtp/.
+//
+// For a start, this object is just a collection of the objects needed
+// by the VideoSendStream constructor. The plan is to move ownership
+// of all RTP-related objects here, and add methods to create per-ssrc
+// objects which would then be passed to VideoSendStream. Eventually,
+// direct accessors like packet_router() should be removed.
+//
+// This should also have a reference to the underlying
+// webrtc::Transport(s). Currently, webrtc::Transport is implemented by
+// WebRtcVideoChannel and WebRtcVoiceMediaChannel, and owned by
+// WebrtcSession. Video and audio always uses different transport
+// objects, even in the common case where they are bundled over the
+// same underlying transport.
+//
+// Extracting the logic of the webrtc::Transport from BaseChannel and
+// subclasses into a separate class seems to be a prerequesite for
+// moving the transport here.
+class RtpTransportControllerSendInterface {
+ public:
+ virtual ~RtpTransportControllerSendInterface() {}
+ virtual PacketRouter* packet_router() = 0;
+
+ virtual RtpVideoSenderInterface* CreateRtpVideoSender(
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ // TODO(holmer): Move states into RtpTransportControllerSend.
+ const std::map<uint32_t, RtpPayloadState>& states,
+ const RtpConfig& rtp_config,
+ int rtcp_report_interval_ms,
+ Transport* send_transport,
+ const RtpSenderObservers& observers,
+ RtcEventLog* event_log,
+ std::unique_ptr<FecController> fec_controller,
+ const RtpSenderFrameEncryptionConfig& frame_encryption_config,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) = 0;
+ virtual void DestroyRtpVideoSender(
+ RtpVideoSenderInterface* rtp_video_sender) = 0;
+
+ virtual NetworkStateEstimateObserver* network_state_estimate_observer() = 0;
+ virtual TransportFeedbackObserver* transport_feedback_observer() = 0;
+
+ virtual RtpPacketSender* packet_sender() = 0;
+
+ // SetAllocatedSendBitrateLimits sets bitrates limits imposed by send codec
+ // settings.
+ virtual void SetAllocatedSendBitrateLimits(
+ BitrateAllocationLimits limits) = 0;
+
+ virtual void SetPacingFactor(float pacing_factor) = 0;
+ virtual void SetQueueTimeLimit(int limit_ms) = 0;
+
+ virtual StreamFeedbackProvider* GetStreamFeedbackProvider() = 0;
+ virtual void RegisterTargetTransferRateObserver(
+ TargetTransferRateObserver* observer) = 0;
+ virtual void OnNetworkRouteChanged(
+ absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) = 0;
+ virtual void OnNetworkAvailability(bool network_available) = 0;
+ virtual NetworkLinkRtcpObserver* GetRtcpObserver() = 0;
+ virtual int64_t GetPacerQueuingDelayMs() const = 0;
+ virtual absl::optional<Timestamp> GetFirstPacketTime() const = 0;
+ virtual void EnablePeriodicAlrProbing(bool enable) = 0;
+
+ // Called when a packet has been sent.
+ // The call should arrive on the network thread, but may not in all cases
+ // (some tests don't adhere to this). Implementations today should not block
+ // the calling thread or make assumptions about the thread context.
+ virtual void OnSentPacket(const rtc::SentPacket& sent_packet) = 0;
+
+ virtual void OnReceivedPacket(const ReceivedPacket& received_packet) = 0;
+
+ virtual void SetSdpBitrateParameters(
+ const BitrateConstraints& constraints) = 0;
+ virtual void SetClientBitratePreferences(
+ const BitrateSettings& preferences) = 0;
+
+ virtual void OnTransportOverheadChanged(
+ size_t transport_overhead_per_packet) = 0;
+
+ virtual void AccountForAudioPacketsInPacedSender(bool account_for_audio) = 0;
+ virtual void IncludeOverheadInPacedSender() = 0;
+
+ virtual void EnsureStarted() = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_INTERFACE_H_
diff --git a/third_party/libwebrtc/call/rtp_video_sender.cc b/third_party/libwebrtc/call/rtp_video_sender.cc
new file mode 100644
index 0000000000..4d99c61bb4
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_video_sender.cc
@@ -0,0 +1,1031 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_video_sender.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/time_delta.h"
+#include "api/video_codecs/video_codec.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+
+namespace webrtc_internal_rtp_video_sender {
+
+RtpStreamSender::RtpStreamSender(
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp,
+ std::unique_ptr<RTPSenderVideo> sender_video,
+ std::unique_ptr<VideoFecGenerator> fec_generator)
+ : rtp_rtcp(std::move(rtp_rtcp)),
+ sender_video(std::move(sender_video)),
+ fec_generator(std::move(fec_generator)) {}
+
+RtpStreamSender::~RtpStreamSender() = default;
+
+} // namespace webrtc_internal_rtp_video_sender
+
+namespace {
+static const int kMinSendSidePacketHistorySize = 600;
+// We don't do MTU discovery, so assume that we have the standard ethernet MTU.
+static const size_t kPathMTU = 1500;
+
+using webrtc_internal_rtp_video_sender::RtpStreamSender;
+
+bool PayloadTypeSupportsSkippingFecPackets(absl::string_view payload_name,
+ const FieldTrialsView& trials) {
+ const VideoCodecType codecType =
+ PayloadStringToCodecType(std::string(payload_name));
+ if (codecType == kVideoCodecVP8 || codecType == kVideoCodecVP9) {
+ return true;
+ }
+ if (codecType == kVideoCodecGeneric &&
+ absl::StartsWith(trials.Lookup("WebRTC-GenericPictureId"), "Enabled")) {
+ return true;
+ }
+ return false;
+}
+
+bool ShouldDisableRedAndUlpfec(bool flexfec_enabled,
+ const RtpConfig& rtp_config,
+ const FieldTrialsView& trials) {
+ // Consistency of NACK and RED+ULPFEC parameters is checked in this function.
+ const bool nack_enabled = rtp_config.nack.rtp_history_ms > 0;
+
+ // Shorthands.
+ auto IsRedEnabled = [&]() { return rtp_config.ulpfec.red_payload_type >= 0; };
+ auto IsUlpfecEnabled = [&]() {
+ return rtp_config.ulpfec.ulpfec_payload_type >= 0;
+ };
+
+ bool should_disable_red_and_ulpfec = false;
+
+ if (absl::StartsWith(trials.Lookup("WebRTC-DisableUlpFecExperiment"),
+ "Enabled")) {
+ RTC_LOG(LS_INFO) << "Experiment to disable sending ULPFEC is enabled.";
+ should_disable_red_and_ulpfec = true;
+ }
+
+ // If enabled, FlexFEC takes priority over RED+ULPFEC.
+ if (flexfec_enabled) {
+ if (IsUlpfecEnabled()) {
+ RTC_LOG(LS_INFO)
+ << "Both FlexFEC and ULPFEC are configured. Disabling ULPFEC.";
+ }
+ should_disable_red_and_ulpfec = true;
+ }
+
+ // Payload types without picture ID cannot determine that a stream is complete
+ // without retransmitting FEC, so using ULPFEC + NACK for H.264 (for instance)
+ // is a waste of bandwidth since FEC packets still have to be transmitted.
+ // Note that this is not the case with FlexFEC.
+ if (nack_enabled && IsUlpfecEnabled() &&
+ !PayloadTypeSupportsSkippingFecPackets(rtp_config.payload_name, trials)) {
+ RTC_LOG(LS_WARNING)
+ << "Transmitting payload type without picture ID using "
+ "NACK+ULPFEC is a waste of bandwidth since ULPFEC packets "
+ "also have to be retransmitted. Disabling ULPFEC.";
+ should_disable_red_and_ulpfec = true;
+ }
+
+ // Verify payload types.
+ if (IsUlpfecEnabled() ^ IsRedEnabled()) {
+ RTC_LOG(LS_WARNING)
+ << "Only RED or only ULPFEC enabled, but not both. Disabling both.";
+ should_disable_red_and_ulpfec = true;
+ }
+
+ return should_disable_red_and_ulpfec;
+}
+
+// TODO(brandtr): Update this function when we support multistream protection.
+std::unique_ptr<VideoFecGenerator> MaybeCreateFecGenerator(
+ Clock* clock,
+ const RtpConfig& rtp,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ int simulcast_index,
+ const FieldTrialsView& trials) {
+ // If flexfec is configured that takes priority.
+ if (rtp.flexfec.payload_type >= 0) {
+ RTC_DCHECK_GE(rtp.flexfec.payload_type, 0);
+ RTC_DCHECK_LE(rtp.flexfec.payload_type, 127);
+ if (rtp.flexfec.ssrc == 0) {
+ RTC_LOG(LS_WARNING) << "FlexFEC is enabled, but no FlexFEC SSRC given. "
+ "Therefore disabling FlexFEC.";
+ return nullptr;
+ }
+ if (rtp.flexfec.protected_media_ssrcs.empty()) {
+ RTC_LOG(LS_WARNING)
+ << "FlexFEC is enabled, but no protected media SSRC given. "
+ "Therefore disabling FlexFEC.";
+ return nullptr;
+ }
+
+ if (rtp.flexfec.protected_media_ssrcs.size() > 1) {
+ RTC_LOG(LS_WARNING)
+ << "The supplied FlexfecConfig contained multiple protected "
+ "media streams, but our implementation currently only "
+ "supports protecting a single media stream. "
+ "To avoid confusion, disabling FlexFEC completely.";
+ return nullptr;
+ }
+
+ if (absl::c_find(rtp.flexfec.protected_media_ssrcs,
+ rtp.ssrcs[simulcast_index]) ==
+ rtp.flexfec.protected_media_ssrcs.end()) {
+ // Media SSRC not among flexfec protected SSRCs.
+ return nullptr;
+ }
+
+ const RtpState* rtp_state = nullptr;
+ auto it = suspended_ssrcs.find(rtp.flexfec.ssrc);
+ if (it != suspended_ssrcs.end()) {
+ rtp_state = &it->second;
+ }
+
+ RTC_DCHECK_EQ(1U, rtp.flexfec.protected_media_ssrcs.size());
+ return std::make_unique<FlexfecSender>(
+ rtp.flexfec.payload_type, rtp.flexfec.ssrc,
+ rtp.flexfec.protected_media_ssrcs[0], rtp.mid, rtp.extensions,
+ RTPSender::FecExtensionSizes(), rtp_state, clock);
+ } else if (rtp.ulpfec.red_payload_type >= 0 &&
+ rtp.ulpfec.ulpfec_payload_type >= 0 &&
+ !ShouldDisableRedAndUlpfec(/*flexfec_enabled=*/false, rtp,
+ trials)) {
+ // Flexfec not configured, but ulpfec is and is not disabled.
+ return std::make_unique<UlpfecGenerator>(
+ rtp.ulpfec.red_payload_type, rtp.ulpfec.ulpfec_payload_type, clock);
+ }
+
+ // Not a single FEC is given.
+ return nullptr;
+}
+
+std::vector<RtpStreamSender> CreateRtpStreamSenders(
+ Clock* clock,
+ const RtpConfig& rtp_config,
+ const RtpSenderObservers& observers,
+ int rtcp_report_interval_ms,
+ Transport* send_transport,
+ RtpTransportControllerSendInterface* transport,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ RtcEventLog* event_log,
+ RateLimiter* retransmission_rate_limiter,
+ FrameEncryptorInterface* frame_encryptor,
+ const CryptoOptions& crypto_options,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ const FieldTrialsView& trials,
+ TaskQueueFactory* task_queue_factory) {
+ RTC_DCHECK_GT(rtp_config.ssrcs.size(), 0);
+ RTC_DCHECK(task_queue_factory);
+
+ RtpRtcpInterface::Configuration configuration;
+ configuration.clock = clock;
+ configuration.audio = false;
+ configuration.receiver_only = false;
+ configuration.outgoing_transport = send_transport;
+ configuration.intra_frame_callback = observers.intra_frame_callback;
+ configuration.rtcp_loss_notification_observer =
+ observers.rtcp_loss_notification_observer;
+ configuration.network_link_rtcp_observer = transport->GetRtcpObserver();
+ configuration.network_state_estimate_observer =
+ transport->network_state_estimate_observer();
+ configuration.transport_feedback_callback =
+ transport->transport_feedback_observer();
+ configuration.rtt_stats = observers.rtcp_rtt_stats;
+ configuration.rtcp_packet_type_counter_observer =
+ observers.rtcp_type_observer;
+ configuration.report_block_data_observer =
+ observers.report_block_data_observer;
+ configuration.paced_sender = transport->packet_sender();
+ configuration.send_bitrate_observer = observers.bitrate_observer;
+ configuration.send_packet_observer = observers.send_packet_observer;
+ configuration.event_log = event_log;
+ if (trials.IsDisabled("WebRTC-DisableRtxRateLimiter")) {
+ configuration.retransmission_rate_limiter = retransmission_rate_limiter;
+ }
+ configuration.rtp_stats_callback = observers.rtp_stats;
+ configuration.frame_encryptor = frame_encryptor;
+ configuration.require_frame_encryption =
+ crypto_options.sframe.require_frame_encryption;
+ configuration.extmap_allow_mixed = rtp_config.extmap_allow_mixed;
+ configuration.rtcp_report_interval_ms = rtcp_report_interval_ms;
+ configuration.field_trials = &trials;
+ configuration.enable_send_packet_batching =
+ rtp_config.enable_send_packet_batching;
+
+ std::vector<RtpStreamSender> rtp_streams;
+
+ RTC_DCHECK(rtp_config.rtx.ssrcs.empty() ||
+ rtp_config.rtx.ssrcs.size() == rtp_config.ssrcs.size());
+
+ // Some streams could have been disabled, but the rids are still there.
+ // This will occur when simulcast has been disabled for a codec (e.g. VP9)
+ RTC_DCHECK(rtp_config.rids.empty() ||
+ rtp_config.rids.size() >= rtp_config.ssrcs.size());
+
+ for (size_t i = 0; i < rtp_config.ssrcs.size(); ++i) {
+ RTPSenderVideo::Config video_config;
+ configuration.local_media_ssrc = rtp_config.ssrcs[i];
+
+ std::unique_ptr<VideoFecGenerator> fec_generator =
+ MaybeCreateFecGenerator(clock, rtp_config, suspended_ssrcs, i, trials);
+ configuration.fec_generator = fec_generator.get();
+
+ configuration.rtx_send_ssrc =
+ rtp_config.GetRtxSsrcAssociatedWithMediaSsrc(rtp_config.ssrcs[i]);
+ RTC_DCHECK_EQ(configuration.rtx_send_ssrc.has_value(),
+ !rtp_config.rtx.ssrcs.empty());
+
+ configuration.rid = (i < rtp_config.rids.size()) ? rtp_config.rids[i] : "";
+
+ configuration.need_rtp_packet_infos = rtp_config.lntf.enabled;
+
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp(
+ ModuleRtpRtcpImpl2::Create(configuration));
+ rtp_rtcp->SetSendingStatus(false);
+ rtp_rtcp->SetSendingMediaStatus(false);
+ rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
+ // Set NACK.
+ rtp_rtcp->SetStorePacketsStatus(true, kMinSendSidePacketHistorySize);
+
+ video_config.clock = configuration.clock;
+ video_config.rtp_sender = rtp_rtcp->RtpSender();
+ video_config.frame_encryptor = frame_encryptor;
+ video_config.require_frame_encryption =
+ crypto_options.sframe.require_frame_encryption;
+ video_config.field_trials = &trials;
+ video_config.enable_retransmit_all_layers =
+ !video_config.field_trials->IsDisabled(
+ "WebRTC-Video-EnableRetransmitAllLayers");
+
+ const bool using_flexfec =
+ fec_generator &&
+ fec_generator->GetFecType() == VideoFecGenerator::FecType::kFlexFec;
+ const bool should_disable_red_and_ulpfec =
+ ShouldDisableRedAndUlpfec(using_flexfec, rtp_config, trials);
+ if (!should_disable_red_and_ulpfec &&
+ rtp_config.ulpfec.red_payload_type != -1) {
+ video_config.red_payload_type = rtp_config.ulpfec.red_payload_type;
+ }
+ if (fec_generator) {
+ video_config.fec_type = fec_generator->GetFecType();
+ video_config.fec_overhead_bytes = fec_generator->MaxPacketOverhead();
+ }
+ video_config.frame_transformer = frame_transformer;
+ video_config.task_queue_factory = task_queue_factory;
+ auto sender_video = std::make_unique<RTPSenderVideo>(video_config);
+ rtp_streams.emplace_back(std::move(rtp_rtcp), std::move(sender_video),
+ std::move(fec_generator));
+ }
+ return rtp_streams;
+}
+
+absl::optional<VideoCodecType> GetVideoCodecType(const RtpConfig& config) {
+ if (config.raw_payload) {
+ return absl::nullopt;
+ }
+ return PayloadStringToCodecType(config.payload_name);
+}
+bool TransportSeqNumExtensionConfigured(const RtpConfig& config) {
+ return absl::c_any_of(config.extensions, [](const RtpExtension& ext) {
+ return ext.uri == RtpExtension::kTransportSequenceNumberUri;
+ });
+}
+
+// Returns true when some coded video sequence can be decoded starting with
+// this frame without requiring any previous frames.
+// e.g. it is the same as a key frame when spatial scalability is not used.
+// When spatial scalability is used, then it is true for layer frames of
+// a key frame without inter-layer dependencies.
+bool IsFirstFrameOfACodedVideoSequence(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ if (encoded_image._frameType != VideoFrameType::kVideoFrameKey) {
+ return false;
+ }
+
+ if (codec_specific_info != nullptr) {
+ if (codec_specific_info->generic_frame_info.has_value()) {
+ // This function is used before
+ // `codec_specific_info->generic_frame_info->frame_diffs` are calculated,
+ // so need to use a more complicated way to check for presence of the
+ // dependencies.
+ return absl::c_none_of(
+ codec_specific_info->generic_frame_info->encoder_buffers,
+ [](const CodecBufferUsage& buffer) { return buffer.referenced; });
+ }
+
+ if (codec_specific_info->codecType == VideoCodecType::kVideoCodecVP8 ||
+ codec_specific_info->codecType == VideoCodecType::kVideoCodecH264 ||
+ codec_specific_info->codecType == VideoCodecType::kVideoCodecGeneric) {
+ // These codecs do not support intra picture dependencies, so a frame
+ // marked as a key frame should be a key frame.
+ return true;
+ }
+ }
+
+ // Without depenedencies described in generic format do an educated guess.
+ // It might be wrong for VP9 with spatial layer 0 skipped or higher spatial
+ // layer not depending on the spatial layer 0. This corner case is unimportant
+ // for current usage of this helper function.
+
+ // Use <= to accept both 0 (i.e. the first) and nullopt (i.e. the only).
+ return encoded_image.SpatialIndex() <= 0;
+}
+
+} // namespace
+
+RtpVideoSender::RtpVideoSender(
+ Clock* clock,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& states,
+ const RtpConfig& rtp_config,
+ int rtcp_report_interval_ms,
+ Transport* send_transport,
+ const RtpSenderObservers& observers,
+ RtpTransportControllerSendInterface* transport,
+ RtcEventLog* event_log,
+ RateLimiter* retransmission_limiter,
+ std::unique_ptr<FecController> fec_controller,
+ FrameEncryptorInterface* frame_encryptor,
+ const CryptoOptions& crypto_options,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ const FieldTrialsView& field_trials,
+ TaskQueueFactory* task_queue_factory)
+ : field_trials_(field_trials),
+ use_frame_rate_for_overhead_(absl::StartsWith(
+ field_trials_.Lookup("WebRTC-Video-UseFrameRateForOverhead"),
+ "Enabled")),
+ has_packet_feedback_(TransportSeqNumExtensionConfigured(rtp_config)),
+ active_(false),
+ fec_controller_(std::move(fec_controller)),
+ fec_allowed_(true),
+ rtp_streams_(CreateRtpStreamSenders(clock,
+ rtp_config,
+ observers,
+ rtcp_report_interval_ms,
+ send_transport,
+ transport,
+ suspended_ssrcs,
+ event_log,
+ retransmission_limiter,
+ frame_encryptor,
+ crypto_options,
+ std::move(frame_transformer),
+ field_trials_,
+ task_queue_factory)),
+ rtp_config_(rtp_config),
+ codec_type_(GetVideoCodecType(rtp_config)),
+ transport_(transport),
+ transport_overhead_bytes_per_packet_(0),
+ encoder_target_rate_bps_(0),
+ frame_counts_(rtp_config.ssrcs.size()),
+ frame_count_observer_(observers.frame_count_observer) {
+ transport_checker_.Detach();
+ RTC_DCHECK_EQ(rtp_config_.ssrcs.size(), rtp_streams_.size());
+ if (has_packet_feedback_)
+ transport_->IncludeOverheadInPacedSender();
+ // SSRCs are assumed to be sorted in the same order as `rtp_modules`.
+ for (uint32_t ssrc : rtp_config_.ssrcs) {
+ // Restore state if it previously existed.
+ const RtpPayloadState* state = nullptr;
+ auto it = states.find(ssrc);
+ if (it != states.end()) {
+ state = &it->second;
+ shared_frame_id_ = std::max(shared_frame_id_, state->shared_frame_id);
+ }
+ params_.push_back(RtpPayloadParams(ssrc, state, field_trials_));
+ }
+
+ // RTP/RTCP initialization.
+
+ for (size_t i = 0; i < rtp_config_.extensions.size(); ++i) {
+ const std::string& extension = rtp_config_.extensions[i].uri;
+ int id = rtp_config_.extensions[i].id;
+ RTC_DCHECK(RtpExtension::IsSupportedForVideo(extension));
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->RegisterRtpHeaderExtension(extension, id);
+ }
+ }
+
+ ConfigureSsrcs(suspended_ssrcs);
+
+ if (!rtp_config_.mid.empty()) {
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->SetMid(rtp_config_.mid);
+ }
+ }
+
+ bool fec_enabled = false;
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ // Simulcast has one module for each layer. Set the CNAME on all modules.
+ stream.rtp_rtcp->SetCNAME(rtp_config_.c_name.c_str());
+ stream.rtp_rtcp->SetMaxRtpPacketSize(rtp_config_.max_packet_size);
+ stream.rtp_rtcp->RegisterSendPayloadFrequency(rtp_config_.payload_type,
+ kVideoPayloadTypeFrequency);
+ if (stream.fec_generator != nullptr) {
+ fec_enabled = true;
+ }
+ }
+ // Currently, both ULPFEC and FlexFEC use the same FEC rate calculation logic,
+ // so enable that logic if either of those FEC schemes are enabled.
+ fec_controller_->SetProtectionMethod(fec_enabled, NackEnabled());
+
+ fec_controller_->SetProtectionCallback(this);
+
+ // Construction happens on the worker thread (see Call::CreateVideoSendStream)
+ // but subseqeuent calls to the RTP state will happen on one of two threads:
+ // * The pacer thread for actually sending packets.
+ // * The transport thread when tearing down and quering GetRtpState().
+ // Detach thread checkers.
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->OnPacketSendingThreadSwitched();
+ }
+}
+
+RtpVideoSender::~RtpVideoSender() {
+ // TODO(bugs.webrtc.org/13517): Remove once RtpVideoSender gets deleted on the
+ // transport task queue.
+ transport_checker_.Detach();
+
+ SetActiveModulesLocked(
+ std::vector<bool>(rtp_streams_.size(), /*active=*/false));
+
+ RTC_DCHECK(!registered_for_feedback_);
+}
+
+void RtpVideoSender::Stop() {
+ RTC_DCHECK_RUN_ON(&transport_checker_);
+ MutexLock lock(&mutex_);
+ if (!active_)
+ return;
+
+ const std::vector<bool> active_modules(rtp_streams_.size(), false);
+ SetActiveModulesLocked(active_modules);
+}
+
+void RtpVideoSender::SetActiveModules(const std::vector<bool>& active_modules) {
+ RTC_DCHECK_RUN_ON(&transport_checker_);
+ MutexLock lock(&mutex_);
+ return SetActiveModulesLocked(active_modules);
+}
+
+void RtpVideoSender::SetActiveModulesLocked(
+ const std::vector<bool>& active_modules) {
+ RTC_DCHECK_RUN_ON(&transport_checker_);
+ RTC_CHECK_EQ(rtp_streams_.size(), active_modules.size());
+ active_ = false;
+ for (size_t i = 0; i < active_modules.size(); ++i) {
+ if (active_modules[i]) {
+ active_ = true;
+ }
+
+ RtpRtcpInterface& rtp_module = *rtp_streams_[i].rtp_rtcp;
+ const bool was_active = rtp_module.Sending();
+ const bool should_be_active = active_modules[i];
+
+ // Sends a kRtcpByeCode when going from true to false.
+ rtp_module.SetSendingStatus(active_modules[i]);
+
+ if (was_active && !should_be_active) {
+ // Disabling media, remove from packet router map to reduce size and
+ // prevent any stray packets in the pacer from asynchronously arriving
+ // to a disabled module.
+ transport_->packet_router()->RemoveSendRtpModule(&rtp_module);
+
+ // Clear the pacer queue of any packets pertaining to this module.
+ transport_->packet_sender()->RemovePacketsForSsrc(rtp_module.SSRC());
+ if (rtp_module.RtxSsrc().has_value()) {
+ transport_->packet_sender()->RemovePacketsForSsrc(
+ *rtp_module.RtxSsrc());
+ }
+ if (rtp_module.FlexfecSsrc().has_value()) {
+ transport_->packet_sender()->RemovePacketsForSsrc(
+ *rtp_module.FlexfecSsrc());
+ }
+ }
+
+ // If set to false this module won't send media.
+ rtp_module.SetSendingMediaStatus(active_modules[i]);
+
+ if (!was_active && should_be_active) {
+ // Turning on media, register with packet router.
+ transport_->packet_router()->AddSendRtpModule(&rtp_module,
+ /*remb_candidate=*/true);
+ }
+ }
+ if (!active_) {
+ auto* feedback_provider = transport_->GetStreamFeedbackProvider();
+ if (registered_for_feedback_) {
+ feedback_provider->DeRegisterStreamFeedbackObserver(this);
+ registered_for_feedback_ = false;
+ }
+ } else if (!registered_for_feedback_) {
+ auto* feedback_provider = transport_->GetStreamFeedbackProvider();
+ feedback_provider->RegisterStreamFeedbackObserver(rtp_config_.ssrcs, this);
+ registered_for_feedback_ = true;
+ }
+}
+
+bool RtpVideoSender::IsActive() {
+ RTC_DCHECK_RUN_ON(&transport_checker_);
+ MutexLock lock(&mutex_);
+ return IsActiveLocked();
+}
+
+bool RtpVideoSender::IsActiveLocked() {
+ return active_ && !rtp_streams_.empty();
+}
+
+EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ fec_controller_->UpdateWithEncodedData(encoded_image.size(),
+ encoded_image._frameType);
+ MutexLock lock(&mutex_);
+ RTC_DCHECK(!rtp_streams_.empty());
+ if (!active_)
+ return Result(Result::ERROR_SEND_FAILED);
+
+ shared_frame_id_++;
+ size_t simulcast_index = encoded_image.SimulcastIndex().value_or(0);
+ RTC_DCHECK_LT(simulcast_index, rtp_streams_.size());
+
+ uint32_t rtp_timestamp =
+ encoded_image.RtpTimestamp() +
+ rtp_streams_[simulcast_index].rtp_rtcp->StartTimestamp();
+
+ // RTCPSender has it's own copy of the timestamp offset, added in
+ // RTCPSender::BuildSR, hence we must not add the in the offset for this call.
+ // TODO(nisse): Delete RTCPSender:timestamp_offset_, and see if we can confine
+ // knowledge of the offset to a single place.
+ if (!rtp_streams_[simulcast_index].rtp_rtcp->OnSendingRtpFrame(
+ encoded_image.RtpTimestamp(), encoded_image.capture_time_ms_,
+ rtp_config_.payload_type,
+ encoded_image._frameType == VideoFrameType::kVideoFrameKey)) {
+ // The payload router could be active but this module isn't sending.
+ return Result(Result::ERROR_SEND_FAILED);
+ }
+
+ TimeDelta expected_retransmission_time = TimeDelta::PlusInfinity();
+ if (encoded_image.RetransmissionAllowed()) {
+ expected_retransmission_time =
+ rtp_streams_[simulcast_index].rtp_rtcp->ExpectedRetransmissionTime();
+ }
+
+ if (IsFirstFrameOfACodedVideoSequence(encoded_image, codec_specific_info)) {
+ // In order to use the dependency descriptor RTP header extension:
+ // - Pass along any `FrameDependencyStructure` templates produced by the
+ // encoder adapter.
+ // - If none were produced the `RtpPayloadParams::*ToGeneric` for the
+ // particular codec have simulated a dependency structure, so provide a
+ // minimal set of templates.
+ // - Otherwise, don't pass along any templates at all which will disable
+ // the generation of a dependency descriptor.
+ RTPSenderVideo& sender_video = *rtp_streams_[simulcast_index].sender_video;
+ if (codec_specific_info && codec_specific_info->template_structure) {
+ sender_video.SetVideoStructure(&*codec_specific_info->template_structure);
+ } else if (absl::optional<FrameDependencyStructure> structure =
+ params_[simulcast_index].GenericStructure(
+ codec_specific_info)) {
+ sender_video.SetVideoStructure(&*structure);
+ } else {
+ sender_video.SetVideoStructure(nullptr);
+ }
+ }
+
+ bool send_result =
+ rtp_streams_[simulcast_index].sender_video->SendEncodedImage(
+ rtp_config_.payload_type, codec_type_, rtp_timestamp, encoded_image,
+ params_[simulcast_index].GetRtpVideoHeader(
+ encoded_image, codec_specific_info, shared_frame_id_),
+ expected_retransmission_time);
+ if (frame_count_observer_) {
+ FrameCounts& counts = frame_counts_[simulcast_index];
+ if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
+ ++counts.key_frames;
+ } else if (encoded_image._frameType == VideoFrameType::kVideoFrameDelta) {
+ ++counts.delta_frames;
+ } else {
+ RTC_DCHECK(encoded_image._frameType == VideoFrameType::kEmptyFrame);
+ }
+ frame_count_observer_->FrameCountUpdated(
+ counts, rtp_config_.ssrcs[simulcast_index]);
+ }
+ if (!send_result)
+ return Result(Result::ERROR_SEND_FAILED);
+
+ return Result(Result::OK, rtp_timestamp);
+}
+
+void RtpVideoSender::OnBitrateAllocationUpdated(
+ const VideoBitrateAllocation& bitrate) {
+ RTC_DCHECK_RUN_ON(&transport_checker_);
+ MutexLock lock(&mutex_);
+ if (IsActiveLocked()) {
+ if (rtp_streams_.size() == 1) {
+ // If spatial scalability is enabled, it is covered by a single stream.
+ rtp_streams_[0].rtp_rtcp->SetVideoBitrateAllocation(bitrate);
+ } else {
+ std::vector<absl::optional<VideoBitrateAllocation>> layer_bitrates =
+ bitrate.GetSimulcastAllocations();
+ // Simulcast is in use, split the VideoBitrateAllocation into one struct
+ // per rtp stream, moving over the temporal layer allocation.
+ for (size_t i = 0; i < rtp_streams_.size(); ++i) {
+ // The next spatial layer could be used if the current one is
+ // inactive.
+ if (layer_bitrates[i]) {
+ rtp_streams_[i].rtp_rtcp->SetVideoBitrateAllocation(
+ *layer_bitrates[i]);
+ } else {
+ // Signal a 0 bitrate on a simulcast stream.
+ rtp_streams_[i].rtp_rtcp->SetVideoBitrateAllocation(
+ VideoBitrateAllocation());
+ }
+ }
+ }
+ }
+}
+void RtpVideoSender::OnVideoLayersAllocationUpdated(
+ const VideoLayersAllocation& allocation) {
+ MutexLock lock(&mutex_);
+ if (IsActiveLocked()) {
+ for (size_t i = 0; i < rtp_streams_.size(); ++i) {
+ VideoLayersAllocation stream_allocation = allocation;
+ stream_allocation.rtp_stream_index = i;
+ rtp_streams_[i].sender_video->SetVideoLayersAllocation(
+ std::move(stream_allocation));
+ // Only send video frames on the rtp module if the encoder is configured
+ // to send. This is to prevent stray frames to be sent after an encoder
+ // has been reconfigured.
+ rtp_streams_[i].rtp_rtcp->SetSendingMediaStatus(
+ absl::c_any_of(allocation.active_spatial_layers,
+ [&i](const VideoLayersAllocation::SpatialLayer layer) {
+ return layer.rtp_stream_index == static_cast<int>(i);
+ }));
+ }
+ }
+}
+
+bool RtpVideoSender::NackEnabled() const {
+ const bool nack_enabled = rtp_config_.nack.rtp_history_ms > 0;
+ return nack_enabled;
+}
+
+DataRate RtpVideoSender::GetPostEncodeOverhead() const {
+ DataRate post_encode_overhead = DataRate::Zero();
+ for (size_t i = 0; i < rtp_streams_.size(); ++i) {
+ if (rtp_streams_[i].rtp_rtcp->SendingMedia()) {
+ post_encode_overhead +=
+ rtp_streams_[i].sender_video->PostEncodeOverhead();
+ }
+ }
+ return post_encode_overhead;
+}
+
+void RtpVideoSender::DeliverRtcp(const uint8_t* packet, size_t length) {
+ // Runs on a network thread.
+ for (const RtpStreamSender& stream : rtp_streams_)
+ stream.rtp_rtcp->IncomingRtcpPacket(rtc::MakeArrayView(packet, length));
+}
+
+void RtpVideoSender::ConfigureSsrcs(
+ const std::map<uint32_t, RtpState>& suspended_ssrcs) {
+ // Configure regular SSRCs.
+ RTC_CHECK(ssrc_to_rtp_module_.empty());
+ for (size_t i = 0; i < rtp_config_.ssrcs.size(); ++i) {
+ uint32_t ssrc = rtp_config_.ssrcs[i];
+ RtpRtcpInterface* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get();
+
+ // Restore RTP state if previous existed.
+ auto it = suspended_ssrcs.find(ssrc);
+ if (it != suspended_ssrcs.end())
+ rtp_rtcp->SetRtpState(it->second);
+
+ ssrc_to_rtp_module_[ssrc] = rtp_rtcp;
+ }
+
+ // Set up RTX if available.
+ if (rtp_config_.rtx.ssrcs.empty())
+ return;
+
+ RTC_DCHECK_EQ(rtp_config_.rtx.ssrcs.size(), rtp_config_.ssrcs.size());
+ for (size_t i = 0; i < rtp_config_.rtx.ssrcs.size(); ++i) {
+ uint32_t ssrc = rtp_config_.rtx.ssrcs[i];
+ RtpRtcpInterface* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get();
+ auto it = suspended_ssrcs.find(ssrc);
+ if (it != suspended_ssrcs.end())
+ rtp_rtcp->SetRtxState(it->second);
+ }
+
+ // Configure RTX payload types.
+ RTC_DCHECK_GE(rtp_config_.rtx.payload_type, 0);
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->SetRtxSendPayloadType(rtp_config_.rtx.payload_type,
+ rtp_config_.payload_type);
+ stream.rtp_rtcp->SetRtxSendStatus(kRtxRetransmitted |
+ kRtxRedundantPayloads);
+ }
+ if (rtp_config_.ulpfec.red_payload_type != -1 &&
+ rtp_config_.ulpfec.red_rtx_payload_type != -1) {
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->SetRtxSendPayloadType(
+ rtp_config_.ulpfec.red_rtx_payload_type,
+ rtp_config_.ulpfec.red_payload_type);
+ }
+ }
+}
+
+void RtpVideoSender::OnNetworkAvailability(bool network_available) {
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->SetRTCPStatus(network_available ? rtp_config_.rtcp_mode
+ : RtcpMode::kOff);
+ }
+}
+
+std::map<uint32_t, RtpState> RtpVideoSender::GetRtpStates() const {
+ std::map<uint32_t, RtpState> rtp_states;
+
+ for (size_t i = 0; i < rtp_config_.ssrcs.size(); ++i) {
+ uint32_t ssrc = rtp_config_.ssrcs[i];
+ RTC_DCHECK_EQ(ssrc, rtp_streams_[i].rtp_rtcp->SSRC());
+ rtp_states[ssrc] = rtp_streams_[i].rtp_rtcp->GetRtpState();
+
+ // Only happens during shutdown, when RTP module is already inactive,
+ // so OK to call fec generator here.
+ if (rtp_streams_[i].fec_generator) {
+ absl::optional<RtpState> fec_state =
+ rtp_streams_[i].fec_generator->GetRtpState();
+ if (fec_state) {
+ uint32_t ssrc = rtp_config_.flexfec.ssrc;
+ rtp_states[ssrc] = *fec_state;
+ }
+ }
+ }
+
+ for (size_t i = 0; i < rtp_config_.rtx.ssrcs.size(); ++i) {
+ uint32_t ssrc = rtp_config_.rtx.ssrcs[i];
+ rtp_states[ssrc] = rtp_streams_[i].rtp_rtcp->GetRtxState();
+ }
+
+ return rtp_states;
+}
+
+std::map<uint32_t, RtpPayloadState> RtpVideoSender::GetRtpPayloadStates()
+ const {
+ MutexLock lock(&mutex_);
+ std::map<uint32_t, RtpPayloadState> payload_states;
+ for (const auto& param : params_) {
+ payload_states[param.ssrc()] = param.state();
+ payload_states[param.ssrc()].shared_frame_id = shared_frame_id_;
+ }
+ return payload_states;
+}
+
+void RtpVideoSender::OnTransportOverheadChanged(
+ size_t transport_overhead_bytes_per_packet) {
+ MutexLock lock(&mutex_);
+ transport_overhead_bytes_per_packet_ = transport_overhead_bytes_per_packet;
+
+ size_t max_rtp_packet_size =
+ std::min(rtp_config_.max_packet_size,
+ kPathMTU - transport_overhead_bytes_per_packet_);
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->SetMaxRtpPacketSize(max_rtp_packet_size);
+ }
+}
+
+void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update,
+ int framerate) {
+ // Substract overhead from bitrate.
+ MutexLock lock(&mutex_);
+ size_t num_active_streams = 0;
+ size_t overhead_bytes_per_packet = 0;
+ for (const auto& stream : rtp_streams_) {
+ if (stream.rtp_rtcp->SendingMedia()) {
+ overhead_bytes_per_packet += stream.rtp_rtcp->ExpectedPerPacketOverhead();
+ ++num_active_streams;
+ }
+ }
+ if (num_active_streams > 1) {
+ overhead_bytes_per_packet /= num_active_streams;
+ }
+
+ DataSize packet_overhead = DataSize::Bytes(
+ overhead_bytes_per_packet + transport_overhead_bytes_per_packet_);
+ DataSize max_total_packet_size = DataSize::Bytes(
+ rtp_config_.max_packet_size + transport_overhead_bytes_per_packet_);
+ uint32_t payload_bitrate_bps = update.target_bitrate.bps();
+ if (has_packet_feedback_) {
+ DataRate overhead_rate =
+ CalculateOverheadRate(update.target_bitrate, max_total_packet_size,
+ packet_overhead, Frequency::Hertz(framerate));
+ // TODO(srte): We probably should not accept 0 payload bitrate here.
+ payload_bitrate_bps = rtc::saturated_cast<uint32_t>(payload_bitrate_bps -
+ overhead_rate.bps());
+ }
+
+ // Get the encoder target rate. It is the estimated network rate -
+ // protection overhead.
+ // TODO(srte): We should multiply with 255 here.
+ encoder_target_rate_bps_ = fec_controller_->UpdateFecRates(
+ payload_bitrate_bps, framerate,
+ rtc::saturated_cast<uint8_t>(update.packet_loss_ratio * 256),
+ loss_mask_vector_, update.round_trip_time.ms());
+ if (!fec_allowed_) {
+ encoder_target_rate_bps_ = payload_bitrate_bps;
+ // fec_controller_->UpdateFecRates() was still called so as to allow
+ // `fec_controller_` to update whatever internal state it might have,
+ // since `fec_allowed_` may be toggled back on at any moment.
+ }
+
+ // Subtract post encode overhead from the encoder target. If target rate
+ // is really low, cap the overhead at 50%. This also avoids the case where
+ // `encoder_target_rate_bps_` is 0 due to encoder pause event while the
+ // packetization rate is positive since packets are still flowing.
+ uint32_t post_encode_overhead_bps = std::min(
+ GetPostEncodeOverhead().bps<uint32_t>(), encoder_target_rate_bps_ / 2);
+ encoder_target_rate_bps_ -= post_encode_overhead_bps;
+
+ loss_mask_vector_.clear();
+
+ uint32_t encoder_overhead_rate_bps = 0;
+ if (has_packet_feedback_) {
+ // TODO(srte): The packet size should probably be the same as in the
+ // CalculateOverheadRate call above (just max_total_packet_size), it doesn't
+ // make sense to use different packet rates for different overhead
+ // calculations.
+ DataRate encoder_overhead_rate = CalculateOverheadRate(
+ DataRate::BitsPerSec(encoder_target_rate_bps_),
+ max_total_packet_size - DataSize::Bytes(overhead_bytes_per_packet),
+ packet_overhead, Frequency::Hertz(framerate));
+ encoder_overhead_rate_bps = std::min(
+ encoder_overhead_rate.bps<uint32_t>(),
+ update.target_bitrate.bps<uint32_t>() - encoder_target_rate_bps_);
+ }
+ const uint32_t media_rate = encoder_target_rate_bps_ +
+ encoder_overhead_rate_bps +
+ post_encode_overhead_bps;
+ RTC_DCHECK_GE(update.target_bitrate, DataRate::BitsPerSec(media_rate));
+ // `protection_bitrate_bps_` includes overhead.
+ protection_bitrate_bps_ = update.target_bitrate.bps() - media_rate;
+}
+
+uint32_t RtpVideoSender::GetPayloadBitrateBps() const {
+ return encoder_target_rate_bps_;
+}
+
+uint32_t RtpVideoSender::GetProtectionBitrateBps() const {
+ return protection_bitrate_bps_;
+}
+
+std::vector<RtpSequenceNumberMap::Info> RtpVideoSender::GetSentRtpPacketInfos(
+ uint32_t ssrc,
+ rtc::ArrayView<const uint16_t> sequence_numbers) const {
+ for (const auto& rtp_stream : rtp_streams_) {
+ if (ssrc == rtp_stream.rtp_rtcp->SSRC()) {
+ return rtp_stream.rtp_rtcp->GetSentRtpPacketInfos(sequence_numbers);
+ }
+ }
+ return std::vector<RtpSequenceNumberMap::Info>();
+}
+
+int RtpVideoSender::ProtectionRequest(const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps) {
+ *sent_video_rate_bps = 0;
+ *sent_nack_rate_bps = 0;
+ *sent_fec_rate_bps = 0;
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.rtp_rtcp->SetFecProtectionParams(*delta_params, *key_params);
+
+ auto send_bitrate = stream.rtp_rtcp->GetSendRates();
+ *sent_video_rate_bps += send_bitrate[RtpPacketMediaType::kVideo].bps();
+ *sent_fec_rate_bps +=
+ send_bitrate[RtpPacketMediaType::kForwardErrorCorrection].bps();
+ *sent_nack_rate_bps +=
+ send_bitrate[RtpPacketMediaType::kRetransmission].bps();
+ }
+ return 0;
+}
+
+void RtpVideoSender::SetRetransmissionMode(int retransmission_mode) {
+ MutexLock lock(&mutex_);
+ for (const RtpStreamSender& stream : rtp_streams_) {
+ stream.sender_video->SetRetransmissionSetting(retransmission_mode);
+ }
+}
+
+void RtpVideoSender::SetFecAllowed(bool fec_allowed) {
+ MutexLock lock(&mutex_);
+ fec_allowed_ = fec_allowed;
+}
+
+void RtpVideoSender::OnPacketFeedbackVector(
+ std::vector<StreamPacketInfo> packet_feedback_vector) {
+ if (fec_controller_->UseLossVectorMask()) {
+ MutexLock lock(&mutex_);
+ for (const StreamPacketInfo& packet : packet_feedback_vector) {
+ loss_mask_vector_.push_back(!packet.received);
+ }
+ }
+
+ // Map from SSRC to all acked packets for that RTP module.
+ std::map<uint32_t, std::vector<uint16_t>> acked_packets_per_ssrc;
+ for (const StreamPacketInfo& packet : packet_feedback_vector) {
+ if (packet.received && packet.ssrc) {
+ acked_packets_per_ssrc[*packet.ssrc].push_back(
+ packet.rtp_sequence_number);
+ }
+ }
+
+ // Map from SSRC to vector of RTP sequence numbers that are indicated as
+ // lost by feedback, without being trailed by any received packets.
+ std::map<uint32_t, std::vector<uint16_t>> early_loss_detected_per_ssrc;
+
+ for (const StreamPacketInfo& packet : packet_feedback_vector) {
+ // Only include new media packets, not retransmissions/padding/fec.
+ if (!packet.received && packet.ssrc && !packet.is_retransmission) {
+ // Last known lost packet, might not be detectable as lost by remote
+ // jitter buffer.
+ early_loss_detected_per_ssrc[*packet.ssrc].push_back(
+ packet.rtp_sequence_number);
+ } else {
+ // Packet received, so any loss prior to this is already detectable.
+ early_loss_detected_per_ssrc.erase(*packet.ssrc);
+ }
+ }
+
+ for (const auto& kv : early_loss_detected_per_ssrc) {
+ const uint32_t ssrc = kv.first;
+ auto it = ssrc_to_rtp_module_.find(ssrc);
+ RTC_CHECK(it != ssrc_to_rtp_module_.end());
+ RTPSender* rtp_sender = it->second->RtpSender();
+ for (uint16_t sequence_number : kv.second) {
+ rtp_sender->ReSendPacket(sequence_number);
+ }
+ }
+
+ for (const auto& kv : acked_packets_per_ssrc) {
+ const uint32_t ssrc = kv.first;
+ auto it = ssrc_to_rtp_module_.find(ssrc);
+ if (it == ssrc_to_rtp_module_.end()) {
+ // No media, likely FEC or padding. Ignore since there's no RTP history to
+ // clean up anyway.
+ continue;
+ }
+ rtc::ArrayView<const uint16_t> rtp_sequence_numbers(kv.second);
+ it->second->OnPacketsAcknowledged(rtp_sequence_numbers);
+ }
+}
+
+void RtpVideoSender::SetEncodingData(size_t width,
+ size_t height,
+ size_t num_temporal_layers) {
+ fec_controller_->SetEncodingData(width, height, num_temporal_layers,
+ rtp_config_.max_packet_size);
+}
+
+DataRate RtpVideoSender::CalculateOverheadRate(DataRate data_rate,
+ DataSize packet_size,
+ DataSize overhead_per_packet,
+ Frequency framerate) const {
+ Frequency packet_rate = data_rate / packet_size;
+ if (use_frame_rate_for_overhead_) {
+ framerate = std::max(framerate, Frequency::Hertz(1));
+ DataSize frame_size = data_rate / framerate;
+ int packets_per_frame = ceil(frame_size / packet_size);
+ packet_rate = packets_per_frame * framerate;
+ }
+ return packet_rate.RoundUpTo(Frequency::Hertz(1)) * overhead_per_packet;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtp_video_sender.h b/third_party/libwebrtc/call/rtp_video_sender.h
new file mode 100644
index 0000000000..10b0d19d05
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_video_sender.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_VIDEO_SENDER_H_
+#define CALL_RTP_VIDEO_SENDER_H_
+
+#include <map>
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/call/transport.h"
+#include "api/fec_controller.h"
+#include "api/fec_controller_override.h"
+#include "api/field_trials_view.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/rtp_config.h"
+#include "call/rtp_payload_params.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "call/rtp_video_sender_interface.h"
+#include "modules/rtp_rtcp/include/flexfec_sender.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_sender_video.h"
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class FrameEncryptorInterface;
+class RtpTransportControllerSendInterface;
+
+namespace webrtc_internal_rtp_video_sender {
+// RTP state for a single simulcast stream. Internal to the implementation of
+// RtpVideoSender.
+struct RtpStreamSender {
+ RtpStreamSender(std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp,
+ std::unique_ptr<RTPSenderVideo> sender_video,
+ std::unique_ptr<VideoFecGenerator> fec_generator);
+ ~RtpStreamSender();
+
+ RtpStreamSender(RtpStreamSender&&) = default;
+ RtpStreamSender& operator=(RtpStreamSender&&) = default;
+
+ // Note: Needs pointer stability.
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp;
+ std::unique_ptr<RTPSenderVideo> sender_video;
+ std::unique_ptr<VideoFecGenerator> fec_generator;
+};
+
+} // namespace webrtc_internal_rtp_video_sender
+
+// RtpVideoSender routes outgoing data to the correct sending RTP module, based
+// on the simulcast layer in RTPVideoHeader.
+class RtpVideoSender : public RtpVideoSenderInterface,
+ public VCMProtectionCallback,
+ public StreamFeedbackObserver {
+ public:
+ // Rtp modules are assumed to be sorted in simulcast index order.
+ RtpVideoSender(
+ Clock* clock,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& states,
+ const RtpConfig& rtp_config,
+ int rtcp_report_interval_ms,
+ Transport* send_transport,
+ const RtpSenderObservers& observers,
+ RtpTransportControllerSendInterface* transport,
+ RtcEventLog* event_log,
+ RateLimiter* retransmission_limiter, // move inside RtpTransport
+ std::unique_ptr<FecController> fec_controller,
+ FrameEncryptorInterface* frame_encryptor,
+ const CryptoOptions& crypto_options, // move inside RtpTransport
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ const FieldTrialsView& field_trials,
+ TaskQueueFactory* task_queue_factory);
+ ~RtpVideoSender() override;
+
+ RtpVideoSender(const RtpVideoSender&) = delete;
+ RtpVideoSender& operator=(const RtpVideoSender&) = delete;
+
+ // Sets the sending status of the rtp modules and appropriately sets the
+ // payload router to active if any rtp modules are active.
+ void SetActiveModules(const std::vector<bool>& active_modules)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ void Stop() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool IsActive() RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ void OnNetworkAvailability(bool network_available)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ std::map<uint32_t, RtpState> GetRtpStates() const
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ std::map<uint32_t, RtpPayloadState> GetRtpPayloadStates() const
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ void DeliverRtcp(const uint8_t* packet, size_t length)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ // Implements webrtc::VCMProtectionCallback.
+ int ProtectionRequest(const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ // 'retransmission_mode' is either a value of enum RetransmissionMode, or
+ // computed with bitwise operators on values of enum RetransmissionMode.
+ void SetRetransmissionMode(int retransmission_mode)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ // Implements FecControllerOverride.
+ void SetFecAllowed(bool fec_allowed) RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ // Implements EncodedImageCallback.
+ // Returns 0 if the packet was routed / sent, -1 otherwise.
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ void OnBitrateAllocationUpdated(const VideoBitrateAllocation& bitrate)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ void OnVideoLayersAllocationUpdated(
+ const VideoLayersAllocation& layers) override;
+ void OnTransportOverheadChanged(size_t transport_overhead_bytes_per_packet)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ void OnBitrateUpdated(BitrateAllocationUpdate update, int framerate)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ uint32_t GetPayloadBitrateBps() const RTC_LOCKS_EXCLUDED(mutex_) override;
+ uint32_t GetProtectionBitrateBps() const RTC_LOCKS_EXCLUDED(mutex_) override;
+ void SetEncodingData(size_t width, size_t height, size_t num_temporal_layers)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
+ uint32_t ssrc,
+ rtc::ArrayView<const uint16_t> sequence_numbers) const
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ // From StreamFeedbackObserver.
+ void OnPacketFeedbackVector(
+ std::vector<StreamPacketInfo> packet_feedback_vector)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ private:
+ bool IsActiveLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void SetActiveModulesLocked(const std::vector<bool>& active_modules)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void UpdateModuleSendingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void ConfigureProtection();
+ void ConfigureSsrcs(const std::map<uint32_t, RtpState>& suspended_ssrcs);
+ bool NackEnabled() const;
+ DataRate GetPostEncodeOverhead() const;
+ DataRate CalculateOverheadRate(DataRate data_rate,
+ DataSize packet_size,
+ DataSize overhead_per_packet,
+ Frequency framerate) const;
+
+ const FieldTrialsView& field_trials_;
+ const bool use_frame_rate_for_overhead_;
+ const bool has_packet_feedback_;
+
+ // Semantically equivalent to checking for `transport_->GetWorkerQueue()`
+ // but some tests need to be updated to call from the correct context.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker transport_checker_;
+
+ // TODO(bugs.webrtc.org/13517): Remove mutex_ once RtpVideoSender runs on the
+ // transport task queue.
+ mutable Mutex mutex_;
+ bool active_ RTC_GUARDED_BY(mutex_);
+ bool registered_for_feedback_ RTC_GUARDED_BY(transport_checker_) = false;
+
+ const std::unique_ptr<FecController> fec_controller_;
+ bool fec_allowed_ RTC_GUARDED_BY(mutex_);
+
+ // Rtp modules are assumed to be sorted in simulcast index order.
+ const std::vector<webrtc_internal_rtp_video_sender::RtpStreamSender>
+ rtp_streams_;
+ const RtpConfig rtp_config_;
+ const absl::optional<VideoCodecType> codec_type_;
+ RtpTransportControllerSendInterface* const transport_;
+
+ // When using the generic descriptor we want all simulcast streams to share
+ // one frame id space (so that the SFU can switch stream without having to
+ // rewrite the frame id), therefore `shared_frame_id` has to live in a place
+ // where we are aware of all the different streams.
+ int64_t shared_frame_id_ = 0;
+ std::vector<RtpPayloadParams> params_ RTC_GUARDED_BY(mutex_);
+
+ size_t transport_overhead_bytes_per_packet_ RTC_GUARDED_BY(mutex_);
+ uint32_t protection_bitrate_bps_;
+ uint32_t encoder_target_rate_bps_;
+
+ std::vector<bool> loss_mask_vector_ RTC_GUARDED_BY(mutex_);
+
+ std::vector<FrameCounts> frame_counts_ RTC_GUARDED_BY(mutex_);
+ FrameCountObserver* const frame_count_observer_;
+
+ // Effectively const map from SSRC to RtpRtcp, for all media SSRCs.
+ // This map is set at construction time and never changed, but it's
+ // non-trivial to make it properly const.
+ std::map<uint32_t, RtpRtcpInterface*> ssrc_to_rtp_module_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTP_VIDEO_SENDER_H_
diff --git a/third_party/libwebrtc/call/rtp_video_sender_interface.h b/third_party/libwebrtc/call/rtp_video_sender_interface.h
new file mode 100644
index 0000000000..3f2877155a
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_video_sender_interface.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTP_VIDEO_SENDER_INTERFACE_H_
+#define CALL_RTP_VIDEO_SENDER_INTERFACE_H_
+
+#include <map>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/call/bitrate_allocation.h"
+#include "api/fec_controller_override.h"
+#include "api/video/video_layers_allocation.h"
+#include "call/rtp_config.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+class VideoBitrateAllocation;
+struct FecProtectionParams;
+
+class RtpVideoSenderInterface : public EncodedImageCallback,
+ public FecControllerOverride {
+ public:
+ // Sets the sending status of the rtp modules and appropriately sets the
+ // RtpVideoSender to active if any rtp modules are active.
+ // A module will only send packet if beeing active.
+ virtual void SetActiveModules(const std::vector<bool>& active_modules) = 0;
+ // Set the sending status of all rtp modules to inactive.
+ virtual void Stop() = 0;
+ virtual bool IsActive() = 0;
+
+ virtual void OnNetworkAvailability(bool network_available) = 0;
+ virtual std::map<uint32_t, RtpState> GetRtpStates() const = 0;
+ virtual std::map<uint32_t, RtpPayloadState> GetRtpPayloadStates() const = 0;
+
+ virtual void DeliverRtcp(const uint8_t* packet, size_t length) = 0;
+
+ virtual void OnBitrateAllocationUpdated(
+ const VideoBitrateAllocation& bitrate) = 0;
+ virtual void OnVideoLayersAllocationUpdated(
+ const VideoLayersAllocation& allocation) = 0;
+ virtual void OnBitrateUpdated(BitrateAllocationUpdate update,
+ int framerate) = 0;
+ virtual void OnTransportOverheadChanged(
+ size_t transport_overhead_bytes_per_packet) = 0;
+ virtual uint32_t GetPayloadBitrateBps() const = 0;
+ virtual uint32_t GetProtectionBitrateBps() const = 0;
+ virtual void SetEncodingData(size_t width,
+ size_t height,
+ size_t num_temporal_layers) = 0;
+ virtual std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
+ uint32_t ssrc,
+ rtc::ArrayView<const uint16_t> sequence_numbers) const = 0;
+
+ // Implements FecControllerOverride.
+ void SetFecAllowed(bool fec_allowed) override = 0;
+};
+} // namespace webrtc
+#endif // CALL_RTP_VIDEO_SENDER_INTERFACE_H_
diff --git a/third_party/libwebrtc/call/rtp_video_sender_unittest.cc b/third_party/libwebrtc/call/rtp_video_sender_unittest.cc
new file mode 100644
index 0000000000..cd2f1efbcf
--- /dev/null
+++ b/third_party/libwebrtc/call/rtp_video_sender_unittest.cc
@@ -0,0 +1,1232 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtp_video_sender.h"
+
+#include <atomic>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/functional/any_invocable.h"
+#include "call/rtp_transport_controller_send.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/video_coding/fec_controller_default.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/rate_limiter.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_frame_transformer.h"
+#include "test/mock_transport.h"
+#include "test/scenario/scenario.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "video/send_statistics_proxy.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::NiceMock;
+using ::testing::SaveArg;
+using ::testing::SizeIs;
+
+const int8_t kPayloadType = 96;
+const uint32_t kSsrc1 = 12345;
+const uint32_t kSsrc2 = 23456;
+const uint32_t kRtxSsrc1 = 34567;
+const uint32_t kRtxSsrc2 = 45678;
+const int16_t kInitialPictureId1 = 222;
+const int16_t kInitialPictureId2 = 44;
+const int16_t kInitialTl0PicIdx1 = 99;
+const int16_t kInitialTl0PicIdx2 = 199;
+const int64_t kRetransmitWindowSizeMs = 500;
+const int kTransportsSequenceExtensionId = 7;
+const int kDependencyDescriptorExtensionId = 8;
+
+class MockRtcpIntraFrameObserver : public RtcpIntraFrameObserver {
+ public:
+ MOCK_METHOD(void, OnReceivedIntraFrameRequest, (uint32_t), (override));
+};
+
+RtpSenderObservers CreateObservers(
+ RtcpIntraFrameObserver* intra_frame_callback,
+ ReportBlockDataObserver* report_block_data_observer,
+ StreamDataCountersCallback* rtp_stats,
+ BitrateStatisticsObserver* bitrate_observer,
+ FrameCountObserver* frame_count_observer,
+ RtcpPacketTypeCounterObserver* rtcp_type_observer) {
+ RtpSenderObservers observers;
+ observers.rtcp_rtt_stats = nullptr;
+ observers.intra_frame_callback = intra_frame_callback;
+ observers.rtcp_loss_notification_observer = nullptr;
+ observers.report_block_data_observer = report_block_data_observer;
+ observers.rtp_stats = rtp_stats;
+ observers.bitrate_observer = bitrate_observer;
+ observers.frame_count_observer = frame_count_observer;
+ observers.rtcp_type_observer = rtcp_type_observer;
+ observers.send_packet_observer = nullptr;
+ return observers;
+}
+
+BitrateConstraints GetBitrateConfig() {
+ BitrateConstraints bitrate_config;
+ bitrate_config.min_bitrate_bps = 30000;
+ bitrate_config.start_bitrate_bps = 300000;
+ bitrate_config.max_bitrate_bps = 3000000;
+ return bitrate_config;
+}
+
+VideoSendStream::Config CreateVideoSendStreamConfig(
+ Transport* transport,
+ const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& rtx_ssrcs,
+ int payload_type) {
+ VideoSendStream::Config config(transport);
+ config.rtp.ssrcs = ssrcs;
+ config.rtp.rtx.ssrcs = rtx_ssrcs;
+ config.rtp.payload_type = payload_type;
+ config.rtp.rtx.payload_type = payload_type + 1;
+ config.rtp.nack.rtp_history_ms = 1000;
+ config.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri,
+ kTransportsSequenceExtensionId);
+ config.rtp.extensions.emplace_back(RtpDependencyDescriptorExtension::Uri(),
+ kDependencyDescriptorExtensionId);
+ config.rtp.extmap_allow_mixed = true;
+ return config;
+}
+
+class RtpVideoSenderTestFixture {
+ public:
+ RtpVideoSenderTestFixture(
+ const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& rtx_ssrcs,
+ int payload_type,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states,
+ FrameCountObserver* frame_count_observer,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ const FieldTrialsView* field_trials = nullptr)
+ : time_controller_(Timestamp::Millis(1000000)),
+ config_(CreateVideoSendStreamConfig(&transport_,
+ ssrcs,
+ rtx_ssrcs,
+ payload_type)),
+ bitrate_config_(GetBitrateConfig()),
+ transport_controller_(
+ time_controller_.GetClock(),
+ RtpTransportConfig{
+ .bitrate_config = bitrate_config_,
+ .event_log = &event_log_,
+ .task_queue_factory = time_controller_.GetTaskQueueFactory(),
+ .trials = field_trials ? field_trials : &field_trials_,
+ }),
+ stats_proxy_(time_controller_.GetClock(),
+ config_,
+ VideoEncoderConfig::ContentType::kRealtimeVideo,
+ field_trials ? *field_trials : field_trials_),
+ retransmission_rate_limiter_(time_controller_.GetClock(),
+ kRetransmitWindowSizeMs) {
+ transport_controller_.EnsureStarted();
+ std::map<uint32_t, RtpState> suspended_ssrcs;
+ router_ = std::make_unique<RtpVideoSender>(
+ time_controller_.GetClock(), suspended_ssrcs, suspended_payload_states,
+ config_.rtp, config_.rtcp_report_interval_ms, &transport_,
+ CreateObservers(&encoder_feedback_, &stats_proxy_, &stats_proxy_,
+ &stats_proxy_, frame_count_observer, &stats_proxy_),
+ &transport_controller_, &event_log_, &retransmission_rate_limiter_,
+ std::make_unique<FecControllerDefault>(time_controller_.GetClock()),
+ nullptr, CryptoOptions{}, frame_transformer,
+ field_trials ? *field_trials : field_trials_,
+ time_controller_.GetTaskQueueFactory());
+ }
+
+ RtpVideoSenderTestFixture(
+ const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& rtx_ssrcs,
+ int payload_type,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states,
+ FrameCountObserver* frame_count_observer,
+ const FieldTrialsView* field_trials = nullptr)
+ : RtpVideoSenderTestFixture(ssrcs,
+ rtx_ssrcs,
+ payload_type,
+ suspended_payload_states,
+ frame_count_observer,
+ /*frame_transformer=*/nullptr,
+ field_trials) {}
+
+ RtpVideoSenderTestFixture(
+ const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& rtx_ssrcs,
+ int payload_type,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states,
+ const FieldTrialsView* field_trials = nullptr)
+ : RtpVideoSenderTestFixture(ssrcs,
+ rtx_ssrcs,
+ payload_type,
+ suspended_payload_states,
+ /*frame_count_observer=*/nullptr,
+ /*frame_transformer=*/nullptr,
+ field_trials) {}
+
+ ~RtpVideoSenderTestFixture() { Stop(); }
+
+ RtpVideoSender* router() { return router_.get(); }
+ MockTransport& transport() { return transport_; }
+ void AdvanceTime(TimeDelta delta) { time_controller_.AdvanceTime(delta); }
+
+ void Stop() { router_->Stop(); }
+
+ void SetActiveModules(const std::vector<bool>& active_modules) {
+ router_->SetActiveModules(active_modules);
+ }
+
+ private:
+ test::ScopedKeyValueConfig field_trials_;
+ NiceMock<MockTransport> transport_;
+ NiceMock<MockRtcpIntraFrameObserver> encoder_feedback_;
+ GlobalSimulatedTimeController time_controller_;
+ RtcEventLogNull event_log_;
+ VideoSendStream::Config config_;
+ BitrateConstraints bitrate_config_;
+ RtpTransportControllerSend transport_controller_;
+ SendStatisticsProxy stats_proxy_;
+ RateLimiter retransmission_rate_limiter_;
+ std::unique_ptr<RtpVideoSender> router_;
+};
+
+BitrateAllocationUpdate CreateBitrateAllocationUpdate(int target_bitrate_bps) {
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::BitsPerSec(target_bitrate_bps);
+ update.round_trip_time = TimeDelta::Zero();
+ return update;
+}
+
+} // namespace
+
+TEST(RtpVideoSenderTest, SendOnOneModule) {
+ constexpr uint8_t kPayload = 'a';
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1));
+
+ RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {});
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+
+ test.SetActiveModules({true});
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+
+ test.SetActiveModules({false});
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+
+ test.SetActiveModules({true});
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+}
+
+TEST(RtpVideoSenderTest, SendSimulcastSetActive) {
+ constexpr uint8_t kPayload = 'a';
+ EncodedImage encoded_image_1;
+ encoded_image_1.SetRtpTimestamp(1);
+ encoded_image_1.capture_time_ms_ = 2;
+ encoded_image_1._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image_1.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1));
+
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {});
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ test.SetActiveModules({true, true});
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_1, &codec_info).error);
+
+ EncodedImage encoded_image_2(encoded_image_1);
+ encoded_image_2.SetSimulcastIndex(1);
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_2, &codec_info).error);
+
+ // Inactive.
+ test.Stop();
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_1, &codec_info).error);
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_2, &codec_info).error);
+}
+
+// Tests how setting individual rtp modules to active affects the overall
+// behavior of the payload router. First sets one module to active and checks
+// that outgoing data can be sent on this module, and checks that no data can
+// be sent if both modules are inactive.
+TEST(RtpVideoSenderTest, SendSimulcastSetActiveModules) {
+ constexpr uint8_t kPayload = 'a';
+ EncodedImage encoded_image_1;
+ encoded_image_1.SetRtpTimestamp(1);
+ encoded_image_1.capture_time_ms_ = 2;
+ encoded_image_1._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image_1.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1));
+
+ EncodedImage encoded_image_2(encoded_image_1);
+ encoded_image_2.SetSimulcastIndex(1);
+
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {});
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ // Only setting one stream to active will still set the payload router to
+ // active and allow sending data on the active stream.
+ std::vector<bool> active_modules({true, false});
+ test.SetActiveModules(active_modules);
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_1, &codec_info).error);
+
+ // Setting both streams to inactive will turn the payload router to
+ // inactive.
+ active_modules = {false, false};
+ test.SetActiveModules(active_modules);
+ // An incoming encoded image will not ask the module to send outgoing data
+ // because the payload router is inactive.
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_1, &codec_info).error);
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_1, &codec_info).error);
+}
+
+TEST(RtpVideoSenderTest,
+ DiscardsHigherSimulcastFramesAfterLayerDisabledInVideoLayersAllocation) {
+ constexpr uint8_t kPayload = 'a';
+ EncodedImage encoded_image_1;
+ encoded_image_1.SetRtpTimestamp(1);
+ encoded_image_1.capture_time_ms_ = 2;
+ encoded_image_1._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image_1.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1));
+ EncodedImage encoded_image_2(encoded_image_1);
+ encoded_image_2.SetSimulcastIndex(1);
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {});
+ test.SetActiveModules({true, true});
+ // A layer is sent on both rtp streams.
+ test.router()->OnVideoLayersAllocationUpdated(
+ {.active_spatial_layers = {{.rtp_stream_index = 0},
+ {.rtp_stream_index = 1}}});
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_1, &codec_info).error);
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_2, &codec_info).error);
+
+ // Only rtp stream index 0 is configured to send a stream.
+ test.router()->OnVideoLayersAllocationUpdated(
+ {.active_spatial_layers = {{.rtp_stream_index = 0}}});
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_1, &codec_info).error);
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image_2, &codec_info).error);
+}
+
+TEST(RtpVideoSenderTest, CreateWithNoPreviousStates) {
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {});
+ test.SetActiveModules({true, true});
+
+ std::map<uint32_t, RtpPayloadState> initial_states =
+ test.router()->GetRtpPayloadStates();
+ EXPECT_EQ(2u, initial_states.size());
+ EXPECT_NE(initial_states.find(kSsrc1), initial_states.end());
+ EXPECT_NE(initial_states.find(kSsrc2), initial_states.end());
+}
+
+TEST(RtpVideoSenderTest, CreateWithPreviousStates) {
+ const int64_t kState1SharedFrameId = 123;
+ const int64_t kState2SharedFrameId = 234;
+ RtpPayloadState state1;
+ state1.picture_id = kInitialPictureId1;
+ state1.tl0_pic_idx = kInitialTl0PicIdx1;
+ state1.shared_frame_id = kState1SharedFrameId;
+ RtpPayloadState state2;
+ state2.picture_id = kInitialPictureId2;
+ state2.tl0_pic_idx = kInitialTl0PicIdx2;
+ state2.shared_frame_id = kState2SharedFrameId;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state1},
+ {kSsrc2, state2}};
+
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, states);
+ test.SetActiveModules({true, true});
+
+ std::map<uint32_t, RtpPayloadState> initial_states =
+ test.router()->GetRtpPayloadStates();
+ EXPECT_EQ(2u, initial_states.size());
+ EXPECT_EQ(kInitialPictureId1, initial_states[kSsrc1].picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1, initial_states[kSsrc1].tl0_pic_idx);
+ EXPECT_EQ(kInitialPictureId2, initial_states[kSsrc2].picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx2, initial_states[kSsrc2].tl0_pic_idx);
+ EXPECT_EQ(kState2SharedFrameId, initial_states[kSsrc1].shared_frame_id);
+ EXPECT_EQ(kState2SharedFrameId, initial_states[kSsrc2].shared_frame_id);
+}
+
+TEST(RtpVideoSenderTest, FrameCountCallbacks) {
+ class MockFrameCountObserver : public FrameCountObserver {
+ public:
+ MOCK_METHOD(void,
+ FrameCountUpdated,
+ (const FrameCounts& frame_counts, uint32_t ssrc),
+ (override));
+ } callback;
+
+ RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {},
+ &callback);
+
+ constexpr uint8_t kPayload = 'a';
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1));
+
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+
+ // No callbacks when not active.
+ EXPECT_CALL(callback, FrameCountUpdated).Times(0);
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+ ::testing::Mock::VerifyAndClearExpectations(&callback);
+
+ test.SetActiveModules({true});
+
+ FrameCounts frame_counts;
+ EXPECT_CALL(callback, FrameCountUpdated(_, kSsrc1))
+ .WillOnce(SaveArg<0>(&frame_counts));
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+
+ EXPECT_EQ(1, frame_counts.key_frames);
+ EXPECT_EQ(0, frame_counts.delta_frames);
+
+ ::testing::Mock::VerifyAndClearExpectations(&callback);
+
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ EXPECT_CALL(callback, FrameCountUpdated(_, kSsrc1))
+ .WillOnce(SaveArg<0>(&frame_counts));
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+
+ EXPECT_EQ(1, frame_counts.key_frames);
+ EXPECT_EQ(1, frame_counts.delta_frames);
+}
+
+// Integration test verifying that ack of packet via TransportFeedback means
+// that the packet is removed from RtpPacketHistory and won't be retransmitted
+// again.
+TEST(RtpVideoSenderTest, DoesNotRetrasmitAckedPackets) {
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {});
+ test.SetActiveModules({true, true});
+
+ constexpr uint8_t kPayload = 'a';
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1));
+
+ // Send two tiny images, mapping to two RTP packets. Capture sequence numbers.
+ std::vector<uint16_t> rtp_sequence_numbers;
+ std::vector<uint16_t> transport_sequence_numbers;
+ EXPECT_CALL(test.transport(), SendRtp)
+ .Times(2)
+ .WillRepeatedly([&rtp_sequence_numbers, &transport_sequence_numbers](
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ rtp_sequence_numbers.push_back(rtp_packet.SequenceNumber());
+ transport_sequence_numbers.push_back(options.packet_id);
+ return true;
+ });
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+ encoded_image.SetRtpTimestamp(2);
+ encoded_image.capture_time_ms_ = 3;
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(encoded_image, nullptr).error);
+
+ test.AdvanceTime(TimeDelta::Millis(33));
+
+ // Construct a NACK message for requesting retransmission of both packet.
+ rtcp::Nack nack;
+ nack.SetMediaSsrc(kSsrc1);
+ nack.SetPacketIds(rtp_sequence_numbers);
+ rtc::Buffer nack_buffer = nack.Build();
+
+ std::vector<uint16_t> retransmitted_rtp_sequence_numbers;
+ EXPECT_CALL(test.transport(), SendRtp)
+ .Times(2)
+ .WillRepeatedly([&retransmitted_rtp_sequence_numbers](
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
+ // Capture the retransmitted sequence number from the RTX header.
+ rtc::ArrayView<const uint8_t> payload = rtp_packet.payload();
+ retransmitted_rtp_sequence_numbers.push_back(
+ ByteReader<uint16_t>::ReadBigEndian(payload.data()));
+ return true;
+ });
+ test.router()->DeliverRtcp(nack_buffer.data(), nack_buffer.size());
+ test.AdvanceTime(TimeDelta::Millis(33));
+
+ // Verify that both packets were retransmitted.
+ EXPECT_EQ(retransmitted_rtp_sequence_numbers, rtp_sequence_numbers);
+
+ // Simulate transport feedback indicating fist packet received, next packet
+ // lost (not other way around as that would trigger early retransmit).
+ StreamFeedbackObserver::StreamPacketInfo lost_packet_feedback;
+ lost_packet_feedback.rtp_sequence_number = rtp_sequence_numbers[0];
+ lost_packet_feedback.ssrc = kSsrc1;
+ lost_packet_feedback.received = false;
+ lost_packet_feedback.is_retransmission = false;
+
+ StreamFeedbackObserver::StreamPacketInfo received_packet_feedback;
+ received_packet_feedback.rtp_sequence_number = rtp_sequence_numbers[1];
+ received_packet_feedback.ssrc = kSsrc1;
+ received_packet_feedback.received = true;
+ lost_packet_feedback.is_retransmission = false;
+
+ test.router()->OnPacketFeedbackVector(
+ {lost_packet_feedback, received_packet_feedback});
+
+ // Advance time to make sure retransmission would be allowed and try again.
+ // This time the retransmission should not happen for the first packet since
+ // the history has been notified of the ack and removed the packet. The
+ // second packet, included in the feedback but not marked as received, should
+ // still be retransmitted.
+ test.AdvanceTime(TimeDelta::Millis(33));
+ EXPECT_CALL(test.transport(), SendRtp)
+ .WillOnce([&lost_packet_feedback](rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
+ // Capture the retransmitted sequence number from the RTX header.
+ rtc::ArrayView<const uint8_t> payload = rtp_packet.payload();
+ EXPECT_EQ(lost_packet_feedback.rtp_sequence_number,
+ ByteReader<uint16_t>::ReadBigEndian(payload.data()));
+ return true;
+ });
+ test.router()->DeliverRtcp(nack_buffer.data(), nack_buffer.size());
+ test.AdvanceTime(TimeDelta::Millis(33));
+}
+
+// This tests that we utilize transport wide feedback to retransmit lost
+// packets. This is tested by dropping all ordinary packets from a "lossy"
+// stream sent along with a secondary untouched stream. The transport wide
+// feedback packets from the secondary stream allows the sending side to
+// detect and retreansmit the lost packets from the lossy stream.
+TEST(RtpVideoSenderTest, RetransmitsOnTransportWideLossInfo) {
+ int rtx_packets;
+ test::Scenario s(test_info_);
+ test::CallClientConfig call_conf;
+ // Keeping the bitrate fixed to avoid RTX due to probing.
+ call_conf.transport.rates.max_rate = DataRate::KilobitsPerSec(300);
+ call_conf.transport.rates.start_rate = DataRate::KilobitsPerSec(300);
+ test::NetworkSimulationConfig net_conf;
+ net_conf.bandwidth = DataRate::KilobitsPerSec(300);
+ auto send_node = s.CreateSimulationNode(net_conf);
+ auto* callee = s.CreateClient("return", call_conf);
+ auto* route = s.CreateRoutes(s.CreateClient("send", call_conf), {send_node},
+ callee, {s.CreateSimulationNode(net_conf)});
+
+ test::VideoStreamConfig lossy_config;
+ lossy_config.source.framerate = 5;
+ auto* lossy = s.CreateVideoStream(route->forward(), lossy_config);
+ // The secondary stream acts a driver for transport feedback messages,
+ // ensuring that lost packets on the lossy stream are retransmitted.
+ s.CreateVideoStream(route->forward(), test::VideoStreamConfig());
+
+ send_node->router()->SetFilter([&](const EmulatedIpPacket& packet) {
+ RtpPacket rtp;
+ if (rtp.Parse(packet.data)) {
+ // Drops all regular packets for the lossy stream and counts all RTX
+ // packets. Since no packets are let trough, NACKs can't be triggered
+ // by the receiving side.
+ if (lossy->send()->UsingSsrc(rtp.Ssrc())) {
+ return false;
+ } else if (lossy->send()->UsingRtxSsrc(rtp.Ssrc())) {
+ ++rtx_packets;
+ }
+ }
+ return true;
+ });
+
+ // Run for a short duration and reset counters to avoid counting RTX packets
+ // from initial probing.
+ s.RunFor(TimeDelta::Seconds(1));
+ rtx_packets = 0;
+ int decoded_baseline = 0;
+ callee->SendTask([&decoded_baseline, &lossy]() {
+ decoded_baseline = lossy->receive()->GetStats().frames_decoded;
+ });
+ s.RunFor(TimeDelta::Seconds(1));
+ // We expect both that RTX packets were sent and that an appropriate number of
+ // frames were received. This is somewhat redundant but reduces the risk of
+ // false positives in future regressions (e.g. RTX is send due to probing).
+ EXPECT_GE(rtx_packets, 1);
+ int frames_decoded = 0;
+ callee->SendTask([&decoded_baseline, &frames_decoded, &lossy]() {
+ frames_decoded =
+ lossy->receive()->GetStats().frames_decoded - decoded_baseline;
+ });
+ EXPECT_EQ(frames_decoded, 5);
+}
+
+// Integration test verifying that retransmissions are sent for packets which
+// can be detected as lost early, using transport wide feedback.
+TEST(RtpVideoSenderTest, EarlyRetransmits) {
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {});
+ test.SetActiveModules({true, true});
+
+ const uint8_t kPayload[1] = {'a'};
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+ encoded_image.SetSimulcastIndex(0);
+
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = VideoCodecType::kVideoCodecGeneric;
+
+ // Send two tiny images, mapping to single RTP packets. Capture sequence
+ // numbers.
+ uint16_t frame1_rtp_sequence_number = 0;
+ uint16_t frame1_transport_sequence_number = 0;
+ EXPECT_CALL(test.transport(), SendRtp)
+ .WillOnce(
+ [&frame1_rtp_sequence_number, &frame1_transport_sequence_number](
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ frame1_rtp_sequence_number = rtp_packet.SequenceNumber();
+ frame1_transport_sequence_number = options.packet_id;
+ EXPECT_EQ(rtp_packet.Ssrc(), kSsrc1);
+ return true;
+ });
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+
+ test.AdvanceTime(TimeDelta::Millis(33));
+
+ uint16_t frame2_rtp_sequence_number = 0;
+ uint16_t frame2_transport_sequence_number = 0;
+ encoded_image.SetSimulcastIndex(1);
+ EXPECT_CALL(test.transport(), SendRtp)
+ .WillOnce(
+ [&frame2_rtp_sequence_number, &frame2_transport_sequence_number](
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ frame2_rtp_sequence_number = rtp_packet.SequenceNumber();
+ frame2_transport_sequence_number = options.packet_id;
+ EXPECT_EQ(rtp_packet.Ssrc(), kSsrc2);
+ return true;
+ });
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+ test.AdvanceTime(TimeDelta::Millis(33));
+
+ EXPECT_NE(frame1_transport_sequence_number, frame2_transport_sequence_number);
+
+ // Inject a transport feedback where the packet for the first frame is lost,
+ // expect a retransmission for it.
+ EXPECT_CALL(test.transport(), SendRtp)
+ .WillOnce([&frame1_rtp_sequence_number](
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
+
+ // Retransmitted sequence number from the RTX header should match
+ // the lost packet.
+ rtc::ArrayView<const uint8_t> payload = rtp_packet.payload();
+ EXPECT_EQ(ByteReader<uint16_t>::ReadBigEndian(payload.data()),
+ frame1_rtp_sequence_number);
+ return true;
+ });
+
+ StreamFeedbackObserver::StreamPacketInfo first_packet_feedback;
+ first_packet_feedback.rtp_sequence_number = frame1_rtp_sequence_number;
+ first_packet_feedback.ssrc = kSsrc1;
+ first_packet_feedback.received = false;
+ first_packet_feedback.is_retransmission = false;
+
+ StreamFeedbackObserver::StreamPacketInfo second_packet_feedback;
+ second_packet_feedback.rtp_sequence_number = frame2_rtp_sequence_number;
+ second_packet_feedback.ssrc = kSsrc2;
+ second_packet_feedback.received = true;
+ first_packet_feedback.is_retransmission = false;
+
+ test.router()->OnPacketFeedbackVector(
+ {first_packet_feedback, second_packet_feedback});
+
+ // Wait for pacer to run and send the RTX packet.
+ test.AdvanceTime(TimeDelta::Millis(33));
+}
+
+TEST(RtpVideoSenderTest, SupportsDependencyDescriptor) {
+ RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {});
+ test.SetActiveModules({true});
+
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorExtensionId);
+ std::vector<RtpPacket> sent_packets;
+ ON_CALL(test.transport(), SendRtp)
+ .WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ sent_packets.emplace_back(&extensions);
+ EXPECT_TRUE(sent_packets.back().Parse(packet));
+ return true;
+ });
+
+ const uint8_t kPayload[1] = {'a'};
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = VideoCodecType::kVideoCodecGeneric;
+ codec_specific.template_structure.emplace();
+ codec_specific.template_structure->num_decode_targets = 1;
+ codec_specific.template_structure->templates = {
+ FrameDependencyTemplate().T(0).Dtis("S"),
+ FrameDependencyTemplate().T(0).Dtis("S").FrameDiffs({2}),
+ FrameDependencyTemplate().T(1).Dtis("D").FrameDiffs({1}),
+ };
+
+ // Send two tiny images, mapping to single RTP packets.
+ // Send in key frame.
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ codec_specific.generic_frame_info =
+ GenericFrameInfo::Builder().T(0).Dtis("S").Build();
+ codec_specific.generic_frame_info->encoder_buffers = {{0, false, true}};
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+ test.AdvanceTime(TimeDelta::Millis(33));
+ ASSERT_THAT(sent_packets, SizeIs(1));
+ EXPECT_TRUE(
+ sent_packets.back().HasExtension<RtpDependencyDescriptorExtension>());
+
+ // Send in delta frame.
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ codec_specific.template_structure = absl::nullopt;
+ codec_specific.generic_frame_info =
+ GenericFrameInfo::Builder().T(1).Dtis("D").Build();
+ codec_specific.generic_frame_info->encoder_buffers = {{0, true, false}};
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+ test.AdvanceTime(TimeDelta::Millis(33));
+ ASSERT_THAT(sent_packets, SizeIs(2));
+ EXPECT_TRUE(
+ sent_packets.back().HasExtension<RtpDependencyDescriptorExtension>());
+}
+
+TEST(RtpVideoSenderTest,
+ SupportsDependencyDescriptorForVp8NotProvidedByEncoder) {
+ constexpr uint8_t kPayload[1] = {'a'};
+ RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {});
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorExtensionId);
+ std::vector<RtpPacket> sent_packets;
+ ON_CALL(test.transport(), SendRtp)
+ .WillByDefault(
+ [&](rtc::ArrayView<const uint8_t> packet, const PacketOptions&) {
+ EXPECT_TRUE(sent_packets.emplace_back(&extensions).Parse(packet));
+ return true;
+ });
+ test.SetActiveModules({true});
+
+ EncodedImage key_frame_image;
+ key_frame_image._frameType = VideoFrameType::kVideoFrameKey;
+ key_frame_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+ CodecSpecificInfo key_frame_info;
+ key_frame_info.codecType = VideoCodecType::kVideoCodecVP8;
+ ASSERT_EQ(
+ test.router()->OnEncodedImage(key_frame_image, &key_frame_info).error,
+ EncodedImageCallback::Result::OK);
+
+ EncodedImage delta_image;
+ delta_image._frameType = VideoFrameType::kVideoFrameDelta;
+ delta_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+ CodecSpecificInfo delta_info;
+ delta_info.codecType = VideoCodecType::kVideoCodecVP8;
+ ASSERT_EQ(test.router()->OnEncodedImage(delta_image, &delta_info).error,
+ EncodedImageCallback::Result::OK);
+
+ test.AdvanceTime(TimeDelta::Millis(123));
+
+ DependencyDescriptor key_frame_dd;
+ DependencyDescriptor delta_dd;
+ ASSERT_THAT(sent_packets, SizeIs(2));
+ EXPECT_TRUE(sent_packets[0].GetExtension<RtpDependencyDescriptorExtension>(
+ /*structure=*/nullptr, &key_frame_dd));
+ EXPECT_TRUE(sent_packets[1].GetExtension<RtpDependencyDescriptorExtension>(
+ key_frame_dd.attached_structure.get(), &delta_dd));
+}
+
+TEST(RtpVideoSenderTest, SupportsDependencyDescriptorForVp9) {
+ RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {});
+ test.SetActiveModules({true});
+
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorExtensionId);
+ std::vector<RtpPacket> sent_packets;
+ ON_CALL(test.transport(), SendRtp)
+ .WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ sent_packets.emplace_back(&extensions);
+ EXPECT_TRUE(sent_packets.back().Parse(packet));
+ return true;
+ });
+
+ const uint8_t kPayload[1] = {'a'};
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = VideoCodecType::kVideoCodecVP9;
+ codec_specific.template_structure.emplace();
+ codec_specific.template_structure->num_decode_targets = 2;
+ codec_specific.template_structure->templates = {
+ FrameDependencyTemplate().S(0).Dtis("SS"),
+ FrameDependencyTemplate().S(1).Dtis("-S").FrameDiffs({1}),
+ };
+
+ // Send two tiny images, each mapping to single RTP packet.
+ // Send in key frame for the base spatial layer.
+ codec_specific.generic_frame_info =
+ GenericFrameInfo::Builder().S(0).Dtis("SS").Build();
+ codec_specific.generic_frame_info->encoder_buffers = {{0, false, true}};
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+ // Send in 2nd spatial layer.
+ codec_specific.template_structure = absl::nullopt;
+ codec_specific.generic_frame_info =
+ GenericFrameInfo::Builder().S(1).Dtis("-S").Build();
+ codec_specific.generic_frame_info->encoder_buffers = {{0, true, false},
+ {1, false, true}};
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+
+ test.AdvanceTime(TimeDelta::Millis(33));
+ ASSERT_THAT(sent_packets, SizeIs(2));
+ EXPECT_TRUE(sent_packets[0].HasExtension<RtpDependencyDescriptorExtension>());
+ EXPECT_TRUE(sent_packets[1].HasExtension<RtpDependencyDescriptorExtension>());
+}
+
+TEST(RtpVideoSenderTest,
+ SupportsDependencyDescriptorForVp9NotProvidedByEncoder) {
+ RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {});
+ test.SetActiveModules({true});
+
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorExtensionId);
+ std::vector<RtpPacket> sent_packets;
+ ON_CALL(test.transport(), SendRtp)
+ .WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ sent_packets.emplace_back(&extensions);
+ EXPECT_TRUE(sent_packets.back().Parse(packet));
+ return true;
+ });
+
+ const uint8_t kPayload[1] = {'a'};
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image._encodedWidth = 320;
+ encoded_image._encodedHeight = 180;
+ encoded_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = VideoCodecType::kVideoCodecVP9;
+ codec_specific.codecSpecific.VP9.num_spatial_layers = 1;
+ codec_specific.codecSpecific.VP9.temporal_idx = kNoTemporalIdx;
+ codec_specific.codecSpecific.VP9.first_frame_in_picture = true;
+ codec_specific.end_of_picture = true;
+ codec_specific.codecSpecific.VP9.inter_pic_predicted = false;
+
+ // Send two tiny images, each mapping to single RTP packet.
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+
+ // Send in 2nd picture.
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ encoded_image.SetRtpTimestamp(3000);
+ codec_specific.codecSpecific.VP9.inter_pic_predicted = true;
+ codec_specific.codecSpecific.VP9.num_ref_pics = 1;
+ codec_specific.codecSpecific.VP9.p_diff[0] = 1;
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+
+ test.AdvanceTime(TimeDelta::Millis(33));
+ ASSERT_THAT(sent_packets, SizeIs(2));
+ EXPECT_TRUE(sent_packets[0].HasExtension<RtpDependencyDescriptorExtension>());
+ EXPECT_TRUE(sent_packets[1].HasExtension<RtpDependencyDescriptorExtension>());
+}
+
+TEST(RtpVideoSenderTest, GenerateDependecyDescriptorForGenericCodecs) {
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-GenericCodecDependencyDescriptor/Enabled/");
+ RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {}, &field_trials);
+ test.SetActiveModules({true});
+
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorExtensionId);
+ std::vector<RtpPacket> sent_packets;
+ ON_CALL(test.transport(), SendRtp)
+ .WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ sent_packets.emplace_back(&extensions);
+ EXPECT_TRUE(sent_packets.back().Parse(packet));
+ return true;
+ });
+
+ const uint8_t kPayload[1] = {'a'};
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image._encodedWidth = 320;
+ encoded_image._encodedHeight = 180;
+ encoded_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = VideoCodecType::kVideoCodecGeneric;
+ codec_specific.end_of_picture = true;
+
+ // Send two tiny images, each mapping to single RTP packet.
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+
+ // Send in 2nd picture.
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ encoded_image.SetRtpTimestamp(3000);
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+
+ test.AdvanceTime(TimeDelta::Millis(33));
+ ASSERT_THAT(sent_packets, SizeIs(2));
+ EXPECT_TRUE(sent_packets[0].HasExtension<RtpDependencyDescriptorExtension>());
+ EXPECT_TRUE(sent_packets[1].HasExtension<RtpDependencyDescriptorExtension>());
+}
+
+TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) {
+ RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {});
+ test.SetActiveModules({true});
+
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorExtensionId);
+ std::vector<RtpPacket> sent_packets;
+ ON_CALL(test.transport(), SendRtp)
+ .WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ sent_packets.emplace_back(&extensions);
+ EXPECT_TRUE(sent_packets.back().Parse(packet));
+ return true;
+ });
+
+ const uint8_t kPayload[1] = {'a'};
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = VideoCodecType::kVideoCodecGeneric;
+ codec_specific.template_structure.emplace();
+ codec_specific.template_structure->num_decode_targets = 1;
+ codec_specific.template_structure->templates = {
+ FrameDependencyTemplate().T(0).Dtis("S"),
+ FrameDependencyTemplate().T(0).Dtis("S").FrameDiffs({2}),
+ FrameDependencyTemplate().T(1).Dtis("D").FrameDiffs({1}),
+ };
+
+ // Send two tiny images, mapping to single RTP packets.
+ // Send in a key frame.
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ codec_specific.generic_frame_info =
+ GenericFrameInfo::Builder().T(0).Dtis("S").Build();
+ codec_specific.generic_frame_info->encoder_buffers = {{0, false, true}};
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+ test.AdvanceTime(TimeDelta::Millis(33));
+ ASSERT_THAT(sent_packets, SizeIs(1));
+ EXPECT_TRUE(
+ sent_packets.back().HasExtension<RtpDependencyDescriptorExtension>());
+
+ // Send in a new key frame without the support for the dependency descriptor.
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ codec_specific.template_structure = absl::nullopt;
+ EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error,
+ EncodedImageCallback::Result::OK);
+ test.AdvanceTime(TimeDelta::Millis(33));
+ ASSERT_THAT(sent_packets, SizeIs(2));
+ EXPECT_FALSE(
+ sent_packets.back().HasExtension<RtpDependencyDescriptorExtension>());
+}
+
+TEST(RtpVideoSenderTest, CanSetZeroBitrate) {
+ RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {});
+ test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(0),
+ /*framerate*/ 0);
+}
+
+TEST(RtpVideoSenderTest, SimulcastSenderRegistersFrameTransformers) {
+ rtc::scoped_refptr<MockFrameTransformer> transformer =
+ rtc::make_ref_counted<MockFrameTransformer>();
+
+ EXPECT_CALL(*transformer, RegisterTransformedFrameSinkCallback(_, kSsrc1));
+ EXPECT_CALL(*transformer, RegisterTransformedFrameSinkCallback(_, kSsrc2));
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {}, nullptr, transformer);
+
+ EXPECT_CALL(*transformer, UnregisterTransformedFrameSinkCallback(kSsrc1));
+ EXPECT_CALL(*transformer, UnregisterTransformedFrameSinkCallback(kSsrc2));
+}
+
+TEST(RtpVideoSenderTest, OverheadIsSubtractedFromTargetBitrate) {
+ test::ScopedKeyValueConfig field_trials(
+ "WebRTC-Video-UseFrameRateForOverhead/Enabled/");
+
+ // TODO(jakobi): RTP header size should not be hard coded.
+ constexpr uint32_t kRtpHeaderSizeBytes = 20;
+ constexpr uint32_t kTransportPacketOverheadBytes = 40;
+ constexpr uint32_t kOverheadPerPacketBytes =
+ kRtpHeaderSizeBytes + kTransportPacketOverheadBytes;
+ RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {}, &field_trials);
+ test.router()->OnTransportOverheadChanged(kTransportPacketOverheadBytes);
+ test.SetActiveModules({true});
+
+ {
+ test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(300000),
+ /*framerate*/ 15);
+ // 1 packet per frame.
+ EXPECT_EQ(test.router()->GetPayloadBitrateBps(),
+ 300000 - kOverheadPerPacketBytes * 8 * 30);
+ }
+ {
+ test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(150000),
+ /*framerate*/ 15);
+ // 1 packet per frame.
+ EXPECT_EQ(test.router()->GetPayloadBitrateBps(),
+ 150000 - kOverheadPerPacketBytes * 8 * 15);
+ }
+ {
+ test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(1000000),
+ /*framerate*/ 30);
+ // 3 packets per frame.
+ EXPECT_EQ(test.router()->GetPayloadBitrateBps(),
+ 1000000 - kOverheadPerPacketBytes * 8 * 30 * 3);
+ }
+}
+
+TEST(RtpVideoSenderTest, ClearsPendingPacketsOnInactivation) {
+ RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {});
+ test.SetActiveModules({true});
+
+ RtpHeaderExtensionMap extensions;
+ extensions.Register<RtpDependencyDescriptorExtension>(
+ kDependencyDescriptorExtensionId);
+ std::vector<RtpPacket> sent_packets;
+ ON_CALL(test.transport(), SendRtp)
+ .WillByDefault([&](rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ sent_packets.emplace_back(&extensions);
+ EXPECT_TRUE(sent_packets.back().Parse(packet));
+ return true;
+ });
+
+ // Set a very low bitrate.
+ test.router()->OnBitrateUpdated(
+ CreateBitrateAllocationUpdate(/*rate_bps=*/30'000),
+ /*framerate=*/30);
+
+ // Create and send a large keyframe.
+ const size_t kImageSizeBytes = 10000;
+ constexpr uint8_t kPayload[kImageSizeBytes] = {'a'};
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image.SetEncodedData(
+ EncodedImageBuffer::Create(kPayload, sizeof(kPayload)));
+ EXPECT_EQ(test.router()
+ ->OnEncodedImage(encoded_image, /*codec_specific=*/nullptr)
+ .error,
+ EncodedImageCallback::Result::OK);
+
+ // Advance time a small amount, check that sent data is only part of the
+ // image.
+ test.AdvanceTime(TimeDelta::Millis(5));
+ DataSize transmittedPayload = DataSize::Zero();
+ for (const RtpPacket& packet : sent_packets) {
+ transmittedPayload += DataSize::Bytes(packet.payload_size());
+ // Make sure we don't see the end of the frame.
+ EXPECT_FALSE(packet.Marker());
+ }
+ EXPECT_GT(transmittedPayload, DataSize::Zero());
+ EXPECT_LT(transmittedPayload, DataSize::Bytes(kImageSizeBytes / 4));
+
+ // Record the RTP timestamp of the first frame.
+ const uint32_t first_frame_timestamp = sent_packets[0].Timestamp();
+ sent_packets.clear();
+
+ // Disable the sending module and advance time slightly. No packets should be
+ // sent.
+ test.SetActiveModules({false});
+ test.AdvanceTime(TimeDelta::Millis(20));
+ EXPECT_TRUE(sent_packets.empty());
+
+ // Reactive the send module - any packets should have been removed, so nothing
+ // should be transmitted.
+ test.SetActiveModules({true});
+ test.AdvanceTime(TimeDelta::Millis(33));
+ EXPECT_TRUE(sent_packets.empty());
+
+ // Send a new frame.
+ encoded_image.SetRtpTimestamp(3);
+ encoded_image.capture_time_ms_ = 4;
+ EXPECT_EQ(test.router()
+ ->OnEncodedImage(encoded_image, /*codec_specific=*/nullptr)
+ .error,
+ EncodedImageCallback::Result::OK);
+ test.AdvanceTime(TimeDelta::Millis(33));
+
+ // Advance time, check we get new packets - but only for the second frame.
+ EXPECT_FALSE(sent_packets.empty());
+ EXPECT_NE(sent_packets[0].Timestamp(), first_frame_timestamp);
+}
+
+// Integration test verifying that when retransmission mode is set to
+// kRetransmitBaseLayer,only base layer is retransmitted.
+TEST(RtpVideoSenderTest, RetransmitsBaseLayerOnly) {
+ RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {kRtxSsrc1, kRtxSsrc2},
+ kPayloadType, {});
+ test.SetActiveModules({true, true});
+
+ test.router()->SetRetransmissionMode(kRetransmitBaseLayer);
+ constexpr uint8_t kPayload = 'a';
+ EncodedImage encoded_image;
+ encoded_image.SetRtpTimestamp(1);
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = VideoFrameType::kVideoFrameKey;
+ encoded_image.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1));
+
+ // Send two tiny images, mapping to two RTP packets. Capture sequence numbers.
+ std::vector<uint16_t> rtp_sequence_numbers;
+ std::vector<uint16_t> transport_sequence_numbers;
+ std::vector<uint16_t> base_sequence_numbers;
+ EXPECT_CALL(test.transport(), SendRtp)
+ .Times(2)
+ .WillRepeatedly([&rtp_sequence_numbers, &transport_sequence_numbers](
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ rtp_sequence_numbers.push_back(rtp_packet.SequenceNumber());
+ transport_sequence_numbers.push_back(options.packet_id);
+ return true;
+ });
+ CodecSpecificInfo key_codec_info;
+ key_codec_info.codecType = kVideoCodecVP8;
+ key_codec_info.codecSpecific.VP8.temporalIdx = 0;
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(
+ encoded_image, &key_codec_info).error);
+ encoded_image.SetRtpTimestamp(2);
+ encoded_image.capture_time_ms_ = 3;
+ encoded_image._frameType = VideoFrameType::kVideoFrameDelta;
+ CodecSpecificInfo delta_codec_info;
+ delta_codec_info.codecType = kVideoCodecVP8;
+ delta_codec_info.codecSpecific.VP8.temporalIdx = 1;
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ test.router()->OnEncodedImage(
+ encoded_image, &delta_codec_info).error);
+
+ test.AdvanceTime(TimeDelta::Millis(33));
+
+ // Construct a NACK message for requesting retransmission of both packet.
+ rtcp::Nack nack;
+ nack.SetMediaSsrc(kSsrc1);
+ nack.SetPacketIds(rtp_sequence_numbers);
+ rtc::Buffer nack_buffer = nack.Build();
+
+ std::vector<uint16_t> retransmitted_rtp_sequence_numbers;
+ EXPECT_CALL(test.transport(), SendRtp)
+ .Times(1)
+ .WillRepeatedly([&retransmitted_rtp_sequence_numbers](
+ rtc::ArrayView<const uint8_t> packet,
+ const PacketOptions& options) {
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet));
+ EXPECT_EQ(rtp_packet.Ssrc(), kRtxSsrc1);
+ // Capture the retransmitted sequence number from the RTX header.
+ rtc::ArrayView<const uint8_t> payload = rtp_packet.payload();
+ retransmitted_rtp_sequence_numbers.push_back(
+ ByteReader<uint16_t>::ReadBigEndian(payload.data()));
+ return true;
+ });
+ test.router()->DeliverRtcp(nack_buffer.data(), nack_buffer.size());
+ test.AdvanceTime(TimeDelta::Millis(33));
+
+ // Verify that only base layer packet was retransmitted.
+ std::vector<uint16_t> base_rtp_sequence_numbers(rtp_sequence_numbers.begin(),
+ rtp_sequence_numbers.begin() + 1);
+ EXPECT_EQ(retransmitted_rtp_sequence_numbers, base_rtp_sequence_numbers);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtx_receive_stream.cc b/third_party/libwebrtc/call/rtx_receive_stream.cc
new file mode 100644
index 0000000000..6c5fa3f859
--- /dev/null
+++ b/third_party/libwebrtc/call/rtx_receive_stream.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtx_receive_stream.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+RtxReceiveStream::RtxReceiveStream(
+ RtpPacketSinkInterface* media_sink,
+ std::map<int, int> associated_payload_types,
+ uint32_t media_ssrc,
+ ReceiveStatistics* rtp_receive_statistics /* = nullptr */)
+ : media_sink_(media_sink),
+ associated_payload_types_(std::move(associated_payload_types)),
+ media_ssrc_(media_ssrc),
+ rtp_receive_statistics_(rtp_receive_statistics) {
+ packet_checker_.Detach();
+ if (associated_payload_types_.empty()) {
+ RTC_LOG(LS_WARNING)
+ << "RtxReceiveStream created with empty payload type mapping.";
+ }
+}
+
+RtxReceiveStream::~RtxReceiveStream() = default;
+
+void RtxReceiveStream::SetAssociatedPayloadTypes(
+ std::map<int, int> associated_payload_types) {
+ RTC_DCHECK_RUN_ON(&packet_checker_);
+ associated_payload_types_ = std::move(associated_payload_types);
+}
+
+void RtxReceiveStream::OnRtpPacket(const RtpPacketReceived& rtx_packet) {
+ RTC_DCHECK_RUN_ON(&packet_checker_);
+ if (rtp_receive_statistics_) {
+ rtp_receive_statistics_->OnRtpPacket(rtx_packet);
+ }
+ rtc::ArrayView<const uint8_t> payload = rtx_packet.payload();
+
+ if (payload.size() < kRtxHeaderSize) {
+ return;
+ }
+
+ auto it = associated_payload_types_.find(rtx_packet.PayloadType());
+ if (it == associated_payload_types_.end()) {
+ RTC_DLOG(LS_VERBOSE) << "Unknown payload type "
+ << static_cast<int>(rtx_packet.PayloadType())
+ << " on rtx ssrc " << rtx_packet.Ssrc();
+ return;
+ }
+ RtpPacketReceived media_packet;
+ media_packet.CopyHeaderFrom(rtx_packet);
+
+ media_packet.SetSsrc(media_ssrc_);
+ media_packet.SetSequenceNumber((payload[0] << 8) + payload[1]);
+ media_packet.SetPayloadType(it->second);
+ media_packet.set_recovered(true);
+ media_packet.set_arrival_time(rtx_packet.arrival_time());
+
+ // Skip the RTX header.
+ rtc::ArrayView<const uint8_t> rtx_payload = payload.subview(kRtxHeaderSize);
+
+ uint8_t* media_payload = media_packet.AllocatePayload(rtx_payload.size());
+ RTC_DCHECK(media_payload != nullptr);
+
+ memcpy(media_payload, rtx_payload.data(), rtx_payload.size());
+
+ media_sink_->OnRtpPacket(media_packet);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/rtx_receive_stream.h b/third_party/libwebrtc/call/rtx_receive_stream.h
new file mode 100644
index 0000000000..79b03d306b
--- /dev/null
+++ b/third_party/libwebrtc/call/rtx_receive_stream.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_RTX_RECEIVE_STREAM_H_
+#define CALL_RTX_RECEIVE_STREAM_H_
+
+#include <cstdint>
+#include <map>
+
+#include "api/sequence_checker.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace webrtc {
+
+class ReceiveStatistics;
+
+// This class is responsible for RTX decapsulation. The resulting media packets
+// are passed on to a sink representing the associated media stream.
+class RtxReceiveStream : public RtpPacketSinkInterface {
+ public:
+ RtxReceiveStream(RtpPacketSinkInterface* media_sink,
+ std::map<int, int> associated_payload_types,
+ uint32_t media_ssrc,
+ // TODO(nisse): Delete this argument, and
+ // corresponding member variable, by moving the
+ // responsibility for rtcp feedback to
+ // RtpStreamReceiverController.
+ ReceiveStatistics* rtp_receive_statistics = nullptr);
+ ~RtxReceiveStream() override;
+
+ // Update payload types post construction. Must be called from the same
+ // calling context as `OnRtpPacket` is called on.
+ void SetAssociatedPayloadTypes(std::map<int, int> associated_payload_types);
+
+ // RtpPacketSinkInterface.
+ void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+ private:
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_checker_;
+ RtpPacketSinkInterface* const media_sink_;
+ // Map from rtx payload type -> media payload type.
+ std::map<int, int> associated_payload_types_ RTC_GUARDED_BY(&packet_checker_);
+ // TODO(nisse): Ultimately, the media receive stream shouldn't care about the
+ // ssrc, and we should delete this.
+ const uint32_t media_ssrc_;
+ ReceiveStatistics* const rtp_receive_statistics_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_RTX_RECEIVE_STREAM_H_
diff --git a/third_party/libwebrtc/call/rtx_receive_stream_unittest.cc b/third_party/libwebrtc/call/rtx_receive_stream_unittest.cc
new file mode 100644
index 0000000000..b06990820f
--- /dev/null
+++ b/third_party/libwebrtc/call/rtx_receive_stream_unittest.cc
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/rtx_receive_stream.h"
+
+#include "call/test/mock_rtp_packet_sink_interface.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::testing::_;
+using ::testing::Property;
+using ::testing::StrictMock;
+
+constexpr int kMediaPayloadType = 100;
+constexpr int kRtxPayloadType = 98;
+constexpr int kUnknownPayloadType = 90;
+constexpr uint32_t kMediaSSRC = 0x3333333;
+constexpr uint16_t kMediaSeqno = 0x5657;
+
+constexpr uint8_t kRtxPacket[] = {
+ 0x80, // Version 2.
+ 98, // Payload type.
+ 0x12,
+ 0x34, // Seqno.
+ 0x11,
+ 0x11,
+ 0x11,
+ 0x11, // Timestamp.
+ 0x22,
+ 0x22,
+ 0x22,
+ 0x22, // SSRC.
+ // RTX header.
+ 0x56,
+ 0x57, // Orig seqno.
+ // Payload.
+ 0xee,
+};
+
+constexpr uint8_t kRtxPacketWithPadding[] = {
+ 0xa0, // Version 2, P set
+ 98, // Payload type.
+ 0x12,
+ 0x34, // Seqno.
+ 0x11,
+ 0x11,
+ 0x11,
+ 0x11, // Timestamp.
+ 0x22,
+ 0x22,
+ 0x22,
+ 0x22, // SSRC.
+ // RTX header.
+ 0x56,
+ 0x57, // Orig seqno.
+ // Padding
+ 0x1,
+};
+
+constexpr uint8_t kRtxPacketWithCVO[] = {
+ 0x90, // Version 2, X set.
+ 98, // Payload type.
+ 0x12,
+ 0x34, // Seqno.
+ 0x11,
+ 0x11,
+ 0x11,
+ 0x11, // Timestamp.
+ 0x22,
+ 0x22,
+ 0x22,
+ 0x22, // SSRC.
+ 0xbe,
+ 0xde,
+ 0x00,
+ 0x01, // Extension header.
+ 0x30,
+ 0x01,
+ 0x00,
+ 0x00, // 90 degree rotation.
+ // RTX header.
+ 0x56,
+ 0x57, // Orig seqno.
+ // Payload.
+ 0xee,
+};
+
+std::map<int, int> PayloadTypeMapping() {
+ const std::map<int, int> m = {{kRtxPayloadType, kMediaPayloadType}};
+ return m;
+}
+
+template <typename T>
+rtc::ArrayView<T> Truncate(rtc::ArrayView<T> a, size_t drop) {
+ return a.subview(0, a.size() - drop);
+}
+
+} // namespace
+
+TEST(RtxReceiveStreamTest, RestoresPacketPayload) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC);
+ RtpPacketReceived rtx_packet;
+ EXPECT_TRUE(rtx_packet.Parse(rtc::ArrayView<const uint8_t>(kRtxPacket)));
+
+ EXPECT_CALL(media_sink, OnRtpPacket)
+ .WillOnce([](const RtpPacketReceived& packet) {
+ EXPECT_EQ(packet.SequenceNumber(), kMediaSeqno);
+ EXPECT_EQ(packet.Ssrc(), kMediaSSRC);
+ EXPECT_EQ(packet.PayloadType(), kMediaPayloadType);
+ EXPECT_THAT(packet.payload(), ::testing::ElementsAre(0xee));
+ });
+
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+TEST(RtxReceiveStreamTest, SetsRecoveredFlag) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC);
+ RtpPacketReceived rtx_packet;
+ EXPECT_TRUE(rtx_packet.Parse(rtc::ArrayView<const uint8_t>(kRtxPacket)));
+ EXPECT_FALSE(rtx_packet.recovered());
+ EXPECT_CALL(media_sink, OnRtpPacket)
+ .WillOnce([](const RtpPacketReceived& packet) {
+ EXPECT_TRUE(packet.recovered());
+ });
+
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+TEST(RtxReceiveStreamTest, IgnoresUnknownPayloadType) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ const std::map<int, int> payload_type_mapping = {
+ {kUnknownPayloadType, kMediaPayloadType}};
+
+ RtxReceiveStream rtx_sink(&media_sink, payload_type_mapping, kMediaSSRC);
+ RtpPacketReceived rtx_packet;
+ EXPECT_TRUE(rtx_packet.Parse(rtc::ArrayView<const uint8_t>(kRtxPacket)));
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+TEST(RtxReceiveStreamTest, IgnoresTruncatedPacket) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC);
+ RtpPacketReceived rtx_packet;
+ EXPECT_TRUE(
+ rtx_packet.Parse(Truncate(rtc::ArrayView<const uint8_t>(kRtxPacket), 2)));
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+TEST(RtxReceiveStreamTest, CopiesRtpHeaderExtensions) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC);
+ RtpHeaderExtensionMap extension_map;
+ extension_map.RegisterByType(3, kRtpExtensionVideoRotation);
+ RtpPacketReceived rtx_packet(&extension_map);
+ EXPECT_TRUE(
+ rtx_packet.Parse(rtc::ArrayView<const uint8_t>(kRtxPacketWithCVO)));
+
+ VideoRotation rotation = kVideoRotation_0;
+ EXPECT_TRUE(rtx_packet.GetExtension<VideoOrientation>(&rotation));
+ EXPECT_EQ(kVideoRotation_90, rotation);
+
+ EXPECT_CALL(media_sink, OnRtpPacket)
+ .WillOnce([](const RtpPacketReceived& packet) {
+ EXPECT_EQ(packet.SequenceNumber(), kMediaSeqno);
+ EXPECT_EQ(packet.Ssrc(), kMediaSSRC);
+ EXPECT_EQ(packet.PayloadType(), kMediaPayloadType);
+ EXPECT_THAT(packet.payload(), ::testing::ElementsAre(0xee));
+ VideoRotation rotation = kVideoRotation_0;
+ EXPECT_TRUE(packet.GetExtension<VideoOrientation>(&rotation));
+ EXPECT_EQ(rotation, kVideoRotation_90);
+ });
+
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+TEST(RtxReceiveStreamTest, PropagatesArrivalTime) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC);
+ RtpPacketReceived rtx_packet(nullptr);
+ EXPECT_TRUE(rtx_packet.Parse(rtc::ArrayView<const uint8_t>(kRtxPacket)));
+ rtx_packet.set_arrival_time(Timestamp::Millis(123));
+ EXPECT_CALL(media_sink, OnRtpPacket(Property(&RtpPacketReceived::arrival_time,
+ Timestamp::Millis(123))));
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+TEST(RtxReceiveStreamTest, SupportsLargePacket) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC);
+ RtpPacketReceived rtx_packet;
+ constexpr int kRtxPacketSize = 2000;
+ constexpr int kRtxPayloadOffset = 14;
+ uint8_t large_rtx_packet[kRtxPacketSize];
+ memcpy(large_rtx_packet, kRtxPacket, sizeof(kRtxPacket));
+ rtc::ArrayView<uint8_t> payload(large_rtx_packet + kRtxPayloadOffset,
+ kRtxPacketSize - kRtxPayloadOffset);
+
+ // Fill payload.
+ for (size_t i = 0; i < payload.size(); i++) {
+ payload[i] = i;
+ }
+ EXPECT_TRUE(
+ rtx_packet.Parse(rtc::ArrayView<const uint8_t>(large_rtx_packet)));
+
+ EXPECT_CALL(media_sink, OnRtpPacket)
+ .WillOnce([&](const RtpPacketReceived& packet) {
+ EXPECT_EQ(packet.SequenceNumber(), kMediaSeqno);
+ EXPECT_EQ(packet.Ssrc(), kMediaSSRC);
+ EXPECT_EQ(packet.PayloadType(), kMediaPayloadType);
+ EXPECT_THAT(packet.payload(), ::testing::ElementsAreArray(payload));
+ });
+
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+TEST(RtxReceiveStreamTest, SupportsLargePacketWithPadding) {
+ StrictMock<MockRtpPacketSink> media_sink;
+ RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC);
+ RtpPacketReceived rtx_packet;
+ constexpr int kRtxPacketSize = 2000;
+ constexpr int kRtxPayloadOffset = 14;
+ constexpr int kRtxPaddingSize = 50;
+ uint8_t large_rtx_packet[kRtxPacketSize];
+ memcpy(large_rtx_packet, kRtxPacketWithPadding,
+ sizeof(kRtxPacketWithPadding));
+ rtc::ArrayView<uint8_t> payload(
+ large_rtx_packet + kRtxPayloadOffset,
+ kRtxPacketSize - kRtxPayloadOffset - kRtxPaddingSize);
+ rtc::ArrayView<uint8_t> padding(
+ large_rtx_packet + kRtxPacketSize - kRtxPaddingSize, kRtxPaddingSize);
+
+ // Fill payload.
+ for (size_t i = 0; i < payload.size(); i++) {
+ payload[i] = i;
+ }
+ // Fill padding. Only value of last padding byte matters.
+ for (size_t i = 0; i < padding.size(); i++) {
+ padding[i] = kRtxPaddingSize;
+ }
+
+ EXPECT_TRUE(
+ rtx_packet.Parse(rtc::ArrayView<const uint8_t>(large_rtx_packet)));
+
+ EXPECT_CALL(media_sink, OnRtpPacket)
+ .WillOnce([&](const RtpPacketReceived& packet) {
+ EXPECT_EQ(packet.SequenceNumber(), kMediaSeqno);
+ EXPECT_EQ(packet.Ssrc(), kMediaSSRC);
+ EXPECT_EQ(packet.PayloadType(), kMediaPayloadType);
+ EXPECT_THAT(packet.payload(), ::testing::ElementsAreArray(payload));
+ });
+
+ rtx_sink.OnRtpPacket(rtx_packet);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/simulated_network.cc b/third_party/libwebrtc/call/simulated_network.cc
new file mode 100644
index 0000000000..8f9d76dfe3
--- /dev/null
+++ b/third_party/libwebrtc/call/simulated_network.cc
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/simulated_network.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <utility>
+
+#include "api/units/data_rate.h"
+#include "api/units/data_size.h"
+#include "api/units/time_delta.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Calculate the time (in microseconds) that takes to send N `bits` on a
+// network with link capacity equal to `capacity_kbps` starting at time
+// `start_time_us`.
+int64_t CalculateArrivalTimeUs(int64_t start_time_us,
+ int64_t bits,
+ int capacity_kbps) {
+ // If capacity is 0, the link capacity is assumed to be infinite.
+ if (capacity_kbps == 0) {
+ return start_time_us;
+ }
+ // Adding `capacity - 1` to the numerator rounds the extra delay caused by
+ // capacity constraints up to an integral microsecond. Sending 0 bits takes 0
+ // extra time, while sending 1 bit gets rounded up to 1 (the multiplication by
+ // 1000 is because capacity is in kbps).
+ // The factor 1000 comes from 10^6 / 10^3, where 10^6 is due to the time unit
+ // being us and 10^3 is due to the rate unit being kbps.
+ return start_time_us + ((1000 * bits + capacity_kbps - 1) / capacity_kbps);
+}
+
+} // namespace
+
+SimulatedNetwork::SimulatedNetwork(Config config, uint64_t random_seed)
+ : random_(random_seed),
+ bursting_(false),
+ last_enqueue_time_us_(0),
+ last_capacity_link_exit_time_(0) {
+ SetConfig(config);
+}
+
+SimulatedNetwork::~SimulatedNetwork() = default;
+
+void SimulatedNetwork::SetConfig(const Config& config) {
+ MutexLock lock(&config_lock_);
+ config_state_.config = config; // Shallow copy of the struct.
+ double prob_loss = config.loss_percent / 100.0;
+ if (config_state_.config.avg_burst_loss_length == -1) {
+ // Uniform loss
+ config_state_.prob_loss_bursting = prob_loss;
+ config_state_.prob_start_bursting = prob_loss;
+ } else {
+ // Lose packets according to a gilbert-elliot model.
+ int avg_burst_loss_length = config.avg_burst_loss_length;
+ int min_avg_burst_loss_length = std::ceil(prob_loss / (1 - prob_loss));
+
+ RTC_CHECK_GT(avg_burst_loss_length, min_avg_burst_loss_length)
+ << "For a total packet loss of " << config.loss_percent
+ << "%% then"
+ " avg_burst_loss_length must be "
+ << min_avg_burst_loss_length + 1 << " or higher.";
+
+ config_state_.prob_loss_bursting = (1.0 - 1.0 / avg_burst_loss_length);
+ config_state_.prob_start_bursting =
+ prob_loss / (1 - prob_loss) / avg_burst_loss_length;
+ }
+}
+
+void SimulatedNetwork::UpdateConfig(
+ std::function<void(BuiltInNetworkBehaviorConfig*)> config_modifier) {
+ MutexLock lock(&config_lock_);
+ config_modifier(&config_state_.config);
+}
+
+void SimulatedNetwork::PauseTransmissionUntil(int64_t until_us) {
+ MutexLock lock(&config_lock_);
+ config_state_.pause_transmission_until_us = until_us;
+}
+
+bool SimulatedNetwork::EnqueuePacket(PacketInFlightInfo packet) {
+ RTC_DCHECK_RUNS_SERIALIZED(&process_checker_);
+
+ // Check that old packets don't get enqueued, the SimulatedNetwork expect that
+ // the packets' send time is monotonically increasing. The tolerance for
+ // non-monotonic enqueue events is 0.5 ms because on multi core systems
+ // clock_gettime(CLOCK_MONOTONIC) can show non-monotonic behaviour between
+ // theads running on different cores.
+ // TODO(bugs.webrtc.org/14525): Open a bug on this with the goal to re-enable
+ // the DCHECK.
+ // At the moment, we see more than 130ms between non-monotonic events, which
+ // is more than expected.
+ // RTC_DCHECK_GE(packet.send_time_us - last_enqueue_time_us_, -2000);
+
+ ConfigState state = GetConfigState();
+
+ // If the network config requires packet overhead, let's apply it as early as
+ // possible.
+ packet.size += state.config.packet_overhead;
+
+ // If `queue_length_packets` is 0, the queue size is infinite.
+ if (state.config.queue_length_packets > 0 &&
+ capacity_link_.size() >= state.config.queue_length_packets) {
+ // Too many packet on the link, drop this one.
+ return false;
+ }
+
+ // If the packet has been sent before the previous packet in the network left
+ // the capacity queue, let's ensure the new packet will start its trip in the
+ // network after the last bit of the previous packet has left it.
+ int64_t packet_send_time_us = packet.send_time_us;
+ if (!capacity_link_.empty()) {
+ packet_send_time_us =
+ std::max(packet_send_time_us, capacity_link_.back().arrival_time_us);
+ }
+ capacity_link_.push({.packet = packet,
+ .arrival_time_us = CalculateArrivalTimeUs(
+ packet_send_time_us, packet.size * 8,
+ state.config.link_capacity_kbps)});
+
+ // Only update `next_process_time_us_` if not already set (if set, there is no
+ // way that a new packet will make the `next_process_time_us_` change).
+ if (!next_process_time_us_) {
+ RTC_DCHECK_EQ(capacity_link_.size(), 1);
+ next_process_time_us_ = capacity_link_.front().arrival_time_us;
+ }
+
+ last_enqueue_time_us_ = packet.send_time_us;
+ return true;
+}
+
+absl::optional<int64_t> SimulatedNetwork::NextDeliveryTimeUs() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&process_checker_);
+ return next_process_time_us_;
+}
+
+void SimulatedNetwork::UpdateCapacityQueue(ConfigState state,
+ int64_t time_now_us) {
+ // If there is at least one packet in the `capacity_link_`, let's update its
+ // arrival time to take into account changes in the network configuration
+ // since the last call to UpdateCapacityQueue.
+ if (!capacity_link_.empty()) {
+ capacity_link_.front().arrival_time_us = CalculateArrivalTimeUs(
+ std::max(capacity_link_.front().packet.send_time_us,
+ last_capacity_link_exit_time_),
+ capacity_link_.front().packet.size * 8,
+ state.config.link_capacity_kbps);
+ }
+
+ // The capacity link is empty or the first packet is not expected to exit yet.
+ if (capacity_link_.empty() ||
+ time_now_us < capacity_link_.front().arrival_time_us) {
+ return;
+ }
+ bool reorder_packets = false;
+
+ do {
+ // Time to get this packet (the original or just updated arrival_time_us is
+ // smaller or equal to time_now_us).
+ PacketInfo packet = capacity_link_.front();
+ capacity_link_.pop();
+
+ // If the network is paused, the pause will be implemented as an extra delay
+ // to be spent in the `delay_link_` queue.
+ if (state.pause_transmission_until_us > packet.arrival_time_us) {
+ packet.arrival_time_us = state.pause_transmission_until_us;
+ }
+
+ // Store the original arrival time, before applying packet loss or extra
+ // delay. This is needed to know when it is the first available time the
+ // next packet in the `capacity_link_` queue can start transmitting.
+ last_capacity_link_exit_time_ = packet.arrival_time_us;
+
+ // Drop packets at an average rate of `state.config.loss_percent` with
+ // and average loss burst length of `state.config.avg_burst_loss_length`.
+ if ((bursting_ && random_.Rand<double>() < state.prob_loss_bursting) ||
+ (!bursting_ && random_.Rand<double>() < state.prob_start_bursting)) {
+ bursting_ = true;
+ packet.arrival_time_us = PacketDeliveryInfo::kNotReceived;
+ } else {
+ // If packets are not dropped, apply extra delay as configured.
+ bursting_ = false;
+ int64_t arrival_time_jitter_us = std::max(
+ random_.Gaussian(state.config.queue_delay_ms * 1000,
+ state.config.delay_standard_deviation_ms * 1000),
+ 0.0);
+
+ // If reordering is not allowed then adjust arrival_time_jitter
+ // to make sure all packets are sent in order.
+ int64_t last_arrival_time_us =
+ delay_link_.empty() ? -1 : delay_link_.back().arrival_time_us;
+ if (!state.config.allow_reordering && !delay_link_.empty() &&
+ packet.arrival_time_us + arrival_time_jitter_us <
+ last_arrival_time_us) {
+ arrival_time_jitter_us = last_arrival_time_us - packet.arrival_time_us;
+ }
+ packet.arrival_time_us += arrival_time_jitter_us;
+
+ // Optimization: Schedule a reorder only when a packet will exit before
+ // the one in front.
+ if (last_arrival_time_us > packet.arrival_time_us) {
+ reorder_packets = true;
+ }
+ }
+ delay_link_.emplace_back(packet);
+
+ // If there are no packets in the queue, there is nothing else to do.
+ if (capacity_link_.empty()) {
+ break;
+ }
+ // If instead there is another packet in the `capacity_link_` queue, let's
+ // calculate its arrival_time_us based on the latest config (which might
+ // have been changed since it was enqueued).
+ int64_t next_start = std::max(last_capacity_link_exit_time_,
+ capacity_link_.front().packet.send_time_us);
+ capacity_link_.front().arrival_time_us = CalculateArrivalTimeUs(
+ next_start, capacity_link_.front().packet.size * 8,
+ state.config.link_capacity_kbps);
+ // And if the next packet in the queue needs to exit, let's dequeue it.
+ } while (capacity_link_.front().arrival_time_us <= time_now_us);
+
+ if (state.config.allow_reordering && reorder_packets) {
+ // Packets arrived out of order and since the network config allows
+ // reordering, let's sort them per arrival_time_us to make so they will also
+ // be delivered out of order.
+ std::stable_sort(delay_link_.begin(), delay_link_.end(),
+ [](const PacketInfo& p1, const PacketInfo& p2) {
+ return p1.arrival_time_us < p2.arrival_time_us;
+ });
+ }
+}
+
+SimulatedNetwork::ConfigState SimulatedNetwork::GetConfigState() const {
+ MutexLock lock(&config_lock_);
+ return config_state_;
+}
+
+std::vector<PacketDeliveryInfo> SimulatedNetwork::DequeueDeliverablePackets(
+ int64_t receive_time_us) {
+ RTC_DCHECK_RUNS_SERIALIZED(&process_checker_);
+
+ UpdateCapacityQueue(GetConfigState(), receive_time_us);
+ std::vector<PacketDeliveryInfo> packets_to_deliver;
+
+ // Check the extra delay queue.
+ while (!delay_link_.empty() &&
+ receive_time_us >= delay_link_.front().arrival_time_us) {
+ PacketInfo packet_info = delay_link_.front();
+ packets_to_deliver.emplace_back(
+ PacketDeliveryInfo(packet_info.packet, packet_info.arrival_time_us));
+ delay_link_.pop_front();
+ }
+
+ if (!delay_link_.empty()) {
+ next_process_time_us_ = delay_link_.front().arrival_time_us;
+ } else if (!capacity_link_.empty()) {
+ next_process_time_us_ = capacity_link_.front().arrival_time_us;
+ } else {
+ next_process_time_us_.reset();
+ }
+ return packets_to_deliver;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/simulated_network.h b/third_party/libwebrtc/call/simulated_network.h
new file mode 100644
index 0000000000..8597367add
--- /dev/null
+++ b/third_party/libwebrtc/call/simulated_network.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_SIMULATED_NETWORK_H_
+#define CALL_SIMULATED_NETWORK_H_
+
+#include <stdint.h>
+
+#include <deque>
+#include <queue>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "api/test/simulated_network.h"
+#include "api/units/data_size.h"
+#include "api/units/timestamp.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/random.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// Class simulating a network link.
+//
+// This is a basic implementation of NetworkBehaviorInterface that supports:
+// - Packet loss
+// - Capacity delay
+// - Extra delay with or without packets reorder
+// - Packet overhead
+// - Queue max capacity
+class SimulatedNetwork : public SimulatedNetworkInterface {
+ public:
+ using Config = BuiltInNetworkBehaviorConfig;
+ explicit SimulatedNetwork(Config config, uint64_t random_seed = 1);
+ ~SimulatedNetwork() override;
+
+ // Sets a new configuration. This will affect packets that will be sent with
+ // EnqueuePacket but also packets in the network that have not left the
+ // network emulation. Packets that are ready to be retrieved by
+ // DequeueDeliverablePackets are not affected by the new configuration.
+ // TODO(bugs.webrtc.org/14525): Fix SetConfig and make it apply only to the
+ // part of the packet that is currently being sent (instead of applying to
+ // all of it).
+ void SetConfig(const Config& config) override;
+ void UpdateConfig(std::function<void(BuiltInNetworkBehaviorConfig*)>
+ config_modifier) override;
+ void PauseTransmissionUntil(int64_t until_us) override;
+
+ // NetworkBehaviorInterface
+ bool EnqueuePacket(PacketInFlightInfo packet) override;
+ std::vector<PacketDeliveryInfo> DequeueDeliverablePackets(
+ int64_t receive_time_us) override;
+
+ absl::optional<int64_t> NextDeliveryTimeUs() const override;
+
+ private:
+ struct PacketInfo {
+ PacketInFlightInfo packet;
+ // Time when the packet has left (or will leave) the network.
+ int64_t arrival_time_us;
+ };
+ // Contains current configuration state.
+ struct ConfigState {
+ // Static link configuration.
+ Config config;
+ // The probability to drop the packet if we are currently dropping a
+ // burst of packet
+ double prob_loss_bursting;
+ // The probability to drop a burst of packets.
+ double prob_start_bursting;
+ // Used for temporary delay spikes.
+ int64_t pause_transmission_until_us = 0;
+ };
+
+ // Moves packets from capacity- to delay link.
+ void UpdateCapacityQueue(ConfigState state, int64_t time_now_us)
+ RTC_RUN_ON(&process_checker_);
+ ConfigState GetConfigState() const;
+
+ mutable Mutex config_lock_;
+
+ // Guards the data structures involved in delay and loss processing, such as
+ // the packet queues.
+ rtc::RaceChecker process_checker_;
+ // Models the capacity of the network by rejecting packets if the queue is
+ // full and keeping them in the queue until they are ready to exit (according
+ // to the link capacity, which cannot be violated, e.g. a 1 kbps link will
+ // only be able to deliver 1000 bits per second).
+ //
+ // Invariant:
+ // The head of the `capacity_link_` has arrival_time_us correctly set to the
+ // time when the packet is supposed to be delivered (without accounting
+ // potential packet loss or potential extra delay and without accounting for a
+ // new configuration of the network, which requires a re-computation of the
+ // arrival_time_us).
+ std::queue<PacketInfo> capacity_link_ RTC_GUARDED_BY(process_checker_);
+ // Models the extra delay of the network (see `queue_delay_ms`
+ // and `delay_standard_deviation_ms` in BuiltInNetworkBehaviorConfig), packets
+ // in the `delay_link_` have technically already left the network and don't
+ // use its capacity but they are not delivered yet.
+ std::deque<PacketInfo> delay_link_ RTC_GUARDED_BY(process_checker_);
+ // Represents the next moment in time when the network is supposed to deliver
+ // packets to the client (either by pulling them from `delay_link_` or
+ // `capacity_link_` or both).
+ absl::optional<int64_t> next_process_time_us_
+ RTC_GUARDED_BY(process_checker_);
+
+ ConfigState config_state_ RTC_GUARDED_BY(config_lock_);
+
+ Random random_ RTC_GUARDED_BY(process_checker_);
+ // Are we currently dropping a burst of packets?
+ bool bursting_;
+
+ // The send time of the last enqueued packet, this is only used to check that
+ // the send time of enqueued packets is monotonically increasing.
+ int64_t last_enqueue_time_us_;
+
+ // The last time a packet left the capacity_link_ (used to enforce
+ // the capacity of the link and avoid packets starts to get sent before
+ // the link it free).
+ int64_t last_capacity_link_exit_time_;
+};
+
+} // namespace webrtc
+
+#endif // CALL_SIMULATED_NETWORK_H_
diff --git a/third_party/libwebrtc/call/simulated_network_unittest.cc b/third_party/libwebrtc/call/simulated_network_unittest.cc
new file mode 100644
index 0000000000..825dd6d065
--- /dev/null
+++ b/third_party/libwebrtc/call/simulated_network_unittest.cc
@@ -0,0 +1,513 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "call/simulated_network.h"
+
+#include <algorithm>
+#include <map>
+#include <optional>
+#include <set>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "api/test/simulated_network.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAre;
+
+PacketInFlightInfo PacketWithSize(size_t size) {
+ return PacketInFlightInfo(/*size=*/size, /*send_time_us=*/0, /*packet_id=*/1);
+}
+
+TEST(SimulatedNetworkTest, NextDeliveryTimeIsUnknownOnEmptyNetwork) {
+ SimulatedNetwork network = SimulatedNetwork({});
+ EXPECT_EQ(network.NextDeliveryTimeUs(), absl::nullopt);
+}
+
+TEST(SimulatedNetworkTest, EnqueueFirstPacketOnNetworkWithInfiniteCapacity) {
+ // A packet of 1 kB that gets enqueued on a network with infinite capacity
+ // should be ready to exit the network immediately.
+ SimulatedNetwork network = SimulatedNetwork({});
+ ASSERT_TRUE(network.EnqueuePacket(PacketWithSize(1'000)));
+
+ EXPECT_EQ(network.NextDeliveryTimeUs(), 0);
+}
+
+TEST(SimulatedNetworkTest, EnqueueFirstPacketOnNetworkWithLimitedCapacity) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(PacketWithSize(125)));
+
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+}
+
+TEST(SimulatedNetworkTest,
+ EnqueuePacketsButNextDeliveryIsBasedOnFirstEnqueuedPacket) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1)));
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+
+ // Enqueuing another packet after 100 us doesn't change the next delivery
+ // time.
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/100, /*packet_id=*/2)));
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+
+ // Enqueuing another packet after 2 seconds doesn't change the next delivery
+ // time since the first packet has not left the network yet.
+ ASSERT_TRUE(network.EnqueuePacket(PacketInFlightInfo(
+ /*size=*/125, /*send_time_us=*/TimeDelta::Seconds(2).us(),
+ /*packet_id=*/3)));
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+}
+
+TEST(SimulatedNetworkTest, EnqueueFailsWhenQueueLengthIsReached) {
+ SimulatedNetwork network =
+ SimulatedNetwork({.queue_length_packets = 1, .link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1)));
+
+ // Until there is 1 packet in the queue, no other packets can be enqueued,
+ // the only way to make space for new packets is calling
+ // DequeueDeliverablePackets at a time greater than or equal to
+ // NextDeliveryTimeUs.
+ EXPECT_FALSE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125,
+ /*send_time_us=*/TimeDelta::Seconds(0.5).us(),
+ /*packet_id=*/2)));
+
+ // Even if the send_time_us is after NextDeliveryTimeUs, it is still not
+ // possible to enqueue a new packet since the client didn't deque any packet
+ // from the queue (in this case the client is introducing unbounded delay but
+ // the network cannot do anything about it).
+ EXPECT_FALSE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125,
+ /*send_time_us=*/TimeDelta::Seconds(2).us(),
+ /*packet_id=*/3)));
+}
+
+TEST(SimulatedNetworkTest, PacketOverhead) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second, but since there is an
+ // overhead per packet of 125 bytes, it will exit the network after 2 seconds.
+ SimulatedNetwork network =
+ SimulatedNetwork({.link_capacity_kbps = 1, .packet_overhead = 125});
+ ASSERT_TRUE(network.EnqueuePacket(PacketWithSize(125)));
+
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(2).us());
+}
+
+TEST(SimulatedNetworkTest,
+ DequeueDeliverablePacketsLeavesPacketsInCapacityLink) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1)));
+ // Enqueue another packet of 125 bytes (this one should exit after 2 seconds).
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125,
+ /*send_time_us=*/TimeDelta::Seconds(1).us(),
+ /*packet_id=*/2)));
+
+ // The first packet will exit after 1 second, so that is the next delivery
+ // time.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+
+ // After 1 seconds, we collect the delivered packets...
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(1).us());
+ ASSERT_EQ(delivered_packets.size(), 1ul);
+ EXPECT_EQ(delivered_packets[0].packet_id, 1ul);
+ EXPECT_EQ(delivered_packets[0].receive_time_us, TimeDelta::Seconds(1).us());
+
+ // ... And after the first enqueued packet has left the network, the next
+ // delivery time reflects the delivery time of the next packet.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(2).us());
+}
+
+TEST(SimulatedNetworkTest,
+ DequeueDeliverablePacketsAppliesConfigChangesToCapacityLink) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ const PacketInFlightInfo packet_1 =
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1);
+ ASSERT_TRUE(network.EnqueuePacket(packet_1));
+
+ // Enqueue another packet of 125 bytes with send time 1 second so this should
+ // exit after 2 seconds.
+ PacketInFlightInfo packet_2 =
+ PacketInFlightInfo(/*size=*/125,
+ /*send_time_us=*/TimeDelta::Seconds(1).us(),
+ /*packet_id=*/2);
+ ASSERT_TRUE(network.EnqueuePacket(packet_2));
+
+ // The first packet will exit after 1 second, so that is the next delivery
+ // time.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+
+ // Since the link capacity changes from 1 kbps to 10 kbps, packets will take
+ // 100 ms each to leave the network.
+ network.SetConfig({.link_capacity_kbps = 10});
+
+ // The next delivery time doesn't change (it will be updated, if needed at
+ // DequeueDeliverablePackets time).
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+
+ // Getting the first enqueued packet after 100 ms.
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Millis(100).us());
+ ASSERT_EQ(delivered_packets.size(), 1ul);
+ EXPECT_THAT(delivered_packets,
+ ElementsAre(PacketDeliveryInfo(
+ /*source=*/packet_1,
+ /*receive_time_us=*/TimeDelta::Millis(100).us())));
+
+ // Getting the second enqueued packet that cannot be delivered before its send
+ // time, hence it will be delivered after 1.1 seconds.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Millis(1100).us());
+ delivered_packets = network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Millis(1100).us());
+ ASSERT_EQ(delivered_packets.size(), 1ul);
+ EXPECT_THAT(delivered_packets,
+ ElementsAre(PacketDeliveryInfo(
+ /*source=*/packet_2,
+ /*receive_time_us=*/TimeDelta::Millis(1100).us())));
+}
+
+TEST(SimulatedNetworkTest, NetworkEmptyAfterLastPacketDequeued) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(PacketWithSize(125)));
+
+ // Collecting all the delivered packets ...
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(1).us());
+ EXPECT_EQ(delivered_packets.size(), 1ul);
+
+ // ... leaves the network empty.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), absl::nullopt);
+}
+
+TEST(SimulatedNetworkTest, DequeueDeliverablePacketsOnLateCall) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1)));
+
+ // Enqueue another packet of 125 bytes with send time 1 second so this should
+ // exit after 2 seconds.
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125,
+ /*send_time_us=*/TimeDelta::Seconds(1).us(),
+ /*packet_id=*/2)));
+
+ // Collecting delivered packets after 3 seconds will result in the delivery of
+ // both the enqueued packets.
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(3).us());
+ EXPECT_EQ(delivered_packets.size(), 2ul);
+}
+
+TEST(SimulatedNetworkTest,
+ DequeueDeliverablePacketsOnEarlyCallReturnsNoPackets) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(PacketWithSize(125)));
+
+ // Collecting delivered packets after 0.5 seconds will result in the delivery
+ // of 0 packets.
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(0.5).us());
+ EXPECT_EQ(delivered_packets.size(), 0ul);
+
+ // Since the first enqueued packet was supposed to exit after 1 second.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+}
+
+TEST(SimulatedNetworkTest, QueueDelayMsWithoutStandardDeviation) {
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ SimulatedNetwork network =
+ SimulatedNetwork({.queue_delay_ms = 100, .link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(PacketWithSize(125)));
+ // The next delivery time is still 1 second even if there are 100 ms of
+ // extra delay but this will be applied at DequeueDeliverablePackets time.
+ ASSERT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+
+ // Since all packets are delayed by 100 ms, after 1 second, no packets will
+ // exit the network.
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(1).us());
+ EXPECT_EQ(delivered_packets.size(), 0ul);
+
+ // And the updated next delivery time takes into account the extra delay of
+ // 100 ms so the first packet in the network will be delivered after 1.1
+ // seconds.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Millis(1100).us());
+ delivered_packets = network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Millis(1100).us());
+ EXPECT_EQ(delivered_packets.size(), 1ul);
+}
+
+TEST(SimulatedNetworkTest,
+ QueueDelayMsWithStandardDeviationAndReorderNotAllowed) {
+ SimulatedNetwork network =
+ SimulatedNetwork({.queue_delay_ms = 100,
+ .delay_standard_deviation_ms = 90,
+ .link_capacity_kbps = 1,
+ .allow_reordering = false});
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1)));
+
+ // But 3 more packets of size 1 byte are enqueued at the same time.
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/1, /*send_time_us=*/0, /*packet_id=*/2)));
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/1, /*send_time_us=*/0, /*packet_id=*/3)));
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/1, /*send_time_us=*/0, /*packet_id=*/4)));
+
+ // After 5 seconds all of them exit the network.
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(5).us());
+ ASSERT_EQ(delivered_packets.size(), 4ul);
+
+ // And they are still in order even if the delay was applied.
+ EXPECT_EQ(delivered_packets[0].packet_id, 1ul);
+ EXPECT_EQ(delivered_packets[1].packet_id, 2ul);
+ EXPECT_GE(delivered_packets[1].receive_time_us,
+ delivered_packets[0].receive_time_us);
+ EXPECT_EQ(delivered_packets[2].packet_id, 3ul);
+ EXPECT_GE(delivered_packets[2].receive_time_us,
+ delivered_packets[1].receive_time_us);
+ EXPECT_EQ(delivered_packets[3].packet_id, 4ul);
+ EXPECT_GE(delivered_packets[3].receive_time_us,
+ delivered_packets[2].receive_time_us);
+}
+
+TEST(SimulatedNetworkTest, QueueDelayMsWithStandardDeviationAndReorderAllowed) {
+ SimulatedNetwork network =
+ SimulatedNetwork({.queue_delay_ms = 100,
+ .delay_standard_deviation_ms = 90,
+ .link_capacity_kbps = 1,
+ .allow_reordering = true},
+ /*random_seed=*/1);
+ // A packet of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network in 1 second.
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1)));
+
+ // But 3 more packets of size 1 byte are enqueued at the same time.
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/1, /*send_time_us=*/0, /*packet_id=*/2)));
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/1, /*send_time_us=*/0, /*packet_id=*/3)));
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/1, /*send_time_us=*/0, /*packet_id=*/4)));
+
+ // After 5 seconds all of them exit the network.
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(5).us());
+ ASSERT_EQ(delivered_packets.size(), 4ul);
+
+ // And they have been reordered accorting to the applied extra delay.
+ EXPECT_EQ(delivered_packets[0].packet_id, 3ul);
+ EXPECT_EQ(delivered_packets[1].packet_id, 1ul);
+ EXPECT_GE(delivered_packets[1].receive_time_us,
+ delivered_packets[0].receive_time_us);
+ EXPECT_EQ(delivered_packets[2].packet_id, 2ul);
+ EXPECT_GE(delivered_packets[2].receive_time_us,
+ delivered_packets[1].receive_time_us);
+ EXPECT_EQ(delivered_packets[3].packet_id, 4ul);
+ EXPECT_GE(delivered_packets[3].receive_time_us,
+ delivered_packets[2].receive_time_us);
+}
+
+TEST(SimulatedNetworkTest, PacketLoss) {
+ // On a network with 50% probablility of packet loss ...
+ SimulatedNetwork network = SimulatedNetwork({.loss_percent = 50});
+
+ // Enqueueing 8 packets ...
+ for (int i = 0; i < 8; i++) {
+ ASSERT_TRUE(network.EnqueuePacket(PacketInFlightInfo(
+ /*size=*/1, /*send_time_us=*/0, /*packet_id=*/i + 1)));
+ }
+
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(5).us());
+ EXPECT_EQ(delivered_packets.size(), 8ul);
+
+ // Results in the loss of 4 of them.
+ int lost_packets = 0;
+ for (const auto& packet : delivered_packets) {
+ if (packet.receive_time_us == PacketDeliveryInfo::kNotReceived) {
+ lost_packets++;
+ }
+ }
+ EXPECT_EQ(lost_packets, 4);
+}
+
+TEST(SimulatedNetworkTest, PacketLossBurst) {
+ // On a network with 50% probablility of packet loss and an average burst loss
+ // length of 100 ...
+ SimulatedNetwork network = SimulatedNetwork(
+ {.loss_percent = 50, .avg_burst_loss_length = 100}, /*random_seed=*/1);
+
+ // Enqueueing 20 packets ...
+ for (int i = 0; i < 20; i++) {
+ ASSERT_TRUE(network.EnqueuePacket(PacketInFlightInfo(
+ /*size=*/1, /*send_time_us=*/0, /*packet_id=*/i + 1)));
+ }
+
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(5).us());
+ EXPECT_EQ(delivered_packets.size(), 20ul);
+
+ // Results in a burst of lost packets after the first packet lost.
+ // With the current random seed, the first 12 are not lost, while the
+ // last 8 are.
+ int current_packet = 0;
+ for (const auto& packet : delivered_packets) {
+ if (current_packet < 12) {
+ EXPECT_NE(packet.receive_time_us, PacketDeliveryInfo::kNotReceived);
+ current_packet++;
+ } else {
+ EXPECT_EQ(packet.receive_time_us, PacketDeliveryInfo::kNotReceived);
+ current_packet++;
+ }
+ }
+}
+
+TEST(SimulatedNetworkTest, PauseTransmissionUntil) {
+ // 3 packets of 125 bytes that gets enqueued on a network with 1 kbps capacity
+ // should be ready to exit the network after 1, 2 and 3 seconds respectively.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/1)));
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/2)));
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/3)));
+ ASSERT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(1).us());
+
+ // The network gets paused for 5 seconds, which means that the first packet
+ // can exit after 5 seconds instead of 1 second.
+ network.PauseTransmissionUntil(TimeDelta::Seconds(5).us());
+
+ // No packets after 1 second.
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(1).us());
+ EXPECT_EQ(delivered_packets.size(), 0ul);
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(5).us());
+
+ // The first packet exits after 5 seconds.
+ delivered_packets = network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(5).us());
+ EXPECT_EQ(delivered_packets.size(), 1ul);
+
+ // After the first packet is exited, the next delivery time reflects the
+ // delivery time of the next packet which accounts for the network pause.
+ EXPECT_EQ(network.NextDeliveryTimeUs(), TimeDelta::Seconds(6).us());
+
+ // And 2 seconds after the exit of the first enqueued packet, the following 2
+ // packets are also delivered.
+ delivered_packets = network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(7).us());
+ EXPECT_EQ(delivered_packets.size(), 2ul);
+}
+
+TEST(SimulatedNetworkTest, CongestedNetworkRespectsLinkCapacity) {
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ for (size_t i = 0; i < 1'000; ++i) {
+ ASSERT_TRUE(network.EnqueuePacket(
+ PacketInFlightInfo(/*size=*/125, /*send_time_us=*/0, /*packet_id=*/i)));
+ }
+ PacketDeliveryInfo last_delivered_packet{
+ PacketInFlightInfo(/*size=*/0, /*send_time_us=*/0, /*packet_id=*/0), 0};
+ while (network.NextDeliveryTimeUs().has_value()) {
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/network.NextDeliveryTimeUs().value());
+ if (!delivered_packets.empty()) {
+ last_delivered_packet = delivered_packets.back();
+ }
+ }
+ // 1000 packets of 1000 bits each will take 1000 seconds to exit a 1 kpbs
+ // network.
+ EXPECT_EQ(last_delivered_packet.receive_time_us,
+ TimeDelta::Seconds(1000).us());
+ EXPECT_EQ(last_delivered_packet.packet_id, 999ul);
+}
+
+TEST(SimulatedNetworkTest, EnqueuePacketWithSubSecondNonMonotonicBehaviour) {
+ // On multi-core systems, different threads can experience sub-millisecond non
+ // monothonic behaviour when running on different cores. This test checks that
+ // when a non monotonic packet enqueue, the network continues to work and the
+ // out of order packet is sent anyway.
+ SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+ ASSERT_TRUE(network.EnqueuePacket(PacketInFlightInfo(
+ /*size=*/125, /*send_time_us=*/TimeDelta::Seconds(1).us(),
+ /*packet_id=*/0)));
+ ASSERT_TRUE(network.EnqueuePacket(PacketInFlightInfo(
+ /*size=*/125, /*send_time_us=*/TimeDelta::Seconds(1).us() - 1,
+ /*packet_id=*/1)));
+
+ std::vector<PacketDeliveryInfo> delivered_packets =
+ network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(2).us());
+ ASSERT_EQ(delivered_packets.size(), 1ul);
+ EXPECT_EQ(delivered_packets[0].packet_id, 0ul);
+ EXPECT_EQ(delivered_packets[0].receive_time_us, TimeDelta::Seconds(2).us());
+
+ delivered_packets = network.DequeueDeliverablePackets(
+ /*receive_time_us=*/TimeDelta::Seconds(3).us());
+ ASSERT_EQ(delivered_packets.size(), 1ul);
+ EXPECT_EQ(delivered_packets[0].packet_id, 1ul);
+ EXPECT_EQ(delivered_packets[0].receive_time_us, TimeDelta::Seconds(3).us());
+}
+
+// TODO(bugs.webrtc.org/14525): Re-enable when the DCHECK will be uncommented
+// and the non-monotonic events on real time clock tests is solved/understood.
+// TEST(SimulatedNetworkDeathTest, EnqueuePacketExpectMonotonicSendTime) {
+// SimulatedNetwork network = SimulatedNetwork({.link_capacity_kbps = 1});
+// ASSERT_TRUE(network.EnqueuePacket(PacketInFlightInfo(
+// /*size=*/125, /*send_time_us=*/2'000'000, /*packet_id=*/0)));
+// EXPECT_DEATH_IF_SUPPORTED(network.EnqueuePacket(PacketInFlightInfo(
+// /*size=*/125, /*send_time_us=*/900'000, /*packet_id=*/1)), "");
+// }
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/simulated_packet_receiver.h b/third_party/libwebrtc/call/simulated_packet_receiver.h
new file mode 100644
index 0000000000..2db46e8c38
--- /dev/null
+++ b/third_party/libwebrtc/call/simulated_packet_receiver.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_SIMULATED_PACKET_RECEIVER_H_
+#define CALL_SIMULATED_PACKET_RECEIVER_H_
+
+#include "api/test/simulated_network.h"
+#include "call/packet_receiver.h"
+
+namespace webrtc {
+
+// Private API that is fixing surface between DirectTransport and underlying
+// network conditions simulation implementation.
+class SimulatedPacketReceiverInterface : public PacketReceiver {
+ public:
+ // Must not be called in parallel with DeliverPacket or Process.
+ // Destination receiver will be injected with this method
+ virtual void SetReceiver(PacketReceiver* receiver) = 0;
+
+ // Reports average packet delay.
+ virtual int AverageDelay() = 0;
+
+ // Process any pending tasks such as timeouts.
+ // Called on a worker thread.
+ virtual void Process() = 0;
+
+ // Returns the time until next process or nullopt to indicate that the next
+ // process time is unknown. If the next process time is unknown, this should
+ // be checked again any time a packet is enqueued.
+ virtual absl::optional<int64_t> TimeUntilNextProcess() = 0;
+};
+
+} // namespace webrtc
+
+#endif // CALL_SIMULATED_PACKET_RECEIVER_H_
diff --git a/third_party/libwebrtc/call/syncable.cc b/third_party/libwebrtc/call/syncable.cc
new file mode 100644
index 0000000000..a821881884
--- /dev/null
+++ b/third_party/libwebrtc/call/syncable.cc
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/syncable.h"
+
+namespace webrtc {
+
+Syncable::~Syncable() = default;
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/syncable.h b/third_party/libwebrtc/call/syncable.h
new file mode 100644
index 0000000000..6817be9c55
--- /dev/null
+++ b/third_party/libwebrtc/call/syncable.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Syncable is used by RtpStreamsSynchronizer in VideoReceiveStreamInterface,
+// and implemented by AudioReceiveStreamInterface.
+
+#ifndef CALL_SYNCABLE_H_
+#define CALL_SYNCABLE_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+
+class Syncable {
+ public:
+ struct Info {
+ int64_t latest_receive_time_ms = 0;
+ uint32_t latest_received_capture_timestamp = 0;
+ uint32_t capture_time_ntp_secs = 0;
+ uint32_t capture_time_ntp_frac = 0;
+ uint32_t capture_time_source_clock = 0;
+ int current_delay_ms = 0;
+ };
+
+ virtual ~Syncable();
+
+ virtual uint32_t id() const = 0;
+ virtual absl::optional<Info> GetInfo() const = 0;
+ virtual bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp,
+ int64_t* time_ms) const = 0;
+ virtual bool SetMinimumPlayoutDelay(int delay_ms) = 0;
+ virtual void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms,
+ int64_t time_ms) = 0;
+};
+} // namespace webrtc
+
+#endif // CALL_SYNCABLE_H_
diff --git a/third_party/libwebrtc/call/test/mock_audio_send_stream.h b/third_party/libwebrtc/call/test/mock_audio_send_stream.h
new file mode 100644
index 0000000000..1993de8de0
--- /dev/null
+++ b/third_party/libwebrtc/call/test/mock_audio_send_stream.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_TEST_MOCK_AUDIO_SEND_STREAM_H_
+#define CALL_TEST_MOCK_AUDIO_SEND_STREAM_H_
+
+#include <memory>
+
+#include "call/audio_send_stream.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockAudioSendStream : public AudioSendStream {
+ public:
+ MOCK_METHOD(const webrtc::AudioSendStream::Config&,
+ GetConfig,
+ (),
+ (const, override));
+ MOCK_METHOD(void,
+ Reconfigure,
+ (const Config& config, SetParametersCallback callback),
+ (override));
+ MOCK_METHOD(void, Start, (), (override));
+ MOCK_METHOD(void, Stop, (), (override));
+ // GMock doesn't like move-only types, such as std::unique_ptr.
+ void SendAudioData(std::unique_ptr<webrtc::AudioFrame> audio_frame) override {
+ SendAudioDataForMock(audio_frame.get());
+ }
+ MOCK_METHOD(void, SendAudioDataForMock, (webrtc::AudioFrame*));
+ MOCK_METHOD(
+ bool,
+ SendTelephoneEvent,
+ (int payload_type, int payload_frequency, int event, int duration_ms),
+ (override));
+ MOCK_METHOD(void, SetMuted, (bool muted), (override));
+ MOCK_METHOD(Stats, GetStats, (), (const, override));
+ MOCK_METHOD(Stats, GetStats, (bool has_remote_tracks), (const, override));
+};
+} // namespace test
+} // namespace webrtc
+
+#endif // CALL_TEST_MOCK_AUDIO_SEND_STREAM_H_
diff --git a/third_party/libwebrtc/call/test/mock_bitrate_allocator.h b/third_party/libwebrtc/call/test/mock_bitrate_allocator.h
new file mode 100644
index 0000000000..b08916fe4f
--- /dev/null
+++ b/third_party/libwebrtc/call/test/mock_bitrate_allocator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_TEST_MOCK_BITRATE_ALLOCATOR_H_
+#define CALL_TEST_MOCK_BITRATE_ALLOCATOR_H_
+
+#include <string>
+
+#include "call/bitrate_allocator.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+class MockBitrateAllocator : public BitrateAllocatorInterface {
+ public:
+ MOCK_METHOD(void,
+ AddObserver,
+ (BitrateAllocatorObserver*, MediaStreamAllocationConfig),
+ (override));
+ MOCK_METHOD(void, RemoveObserver, (BitrateAllocatorObserver*), (override));
+ MOCK_METHOD(int,
+ GetStartBitrate,
+ (BitrateAllocatorObserver*),
+ (const, override));
+};
+} // namespace webrtc
+#endif // CALL_TEST_MOCK_BITRATE_ALLOCATOR_H_
diff --git a/third_party/libwebrtc/call/test/mock_rtp_packet_sink_interface.h b/third_party/libwebrtc/call/test/mock_rtp_packet_sink_interface.h
new file mode 100644
index 0000000000..e6d14f05c5
--- /dev/null
+++ b/third_party/libwebrtc/call/test/mock_rtp_packet_sink_interface.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef CALL_TEST_MOCK_RTP_PACKET_SINK_INTERFACE_H_
+#define CALL_TEST_MOCK_RTP_PACKET_SINK_INTERFACE_H_
+
+#include "call/rtp_packet_sink_interface.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRtpPacketSink : public RtpPacketSinkInterface {
+ public:
+ MOCK_METHOD(void, OnRtpPacket, (const RtpPacketReceived&), (override));
+};
+
+} // namespace webrtc
+
+#endif // CALL_TEST_MOCK_RTP_PACKET_SINK_INTERFACE_H_
diff --git a/third_party/libwebrtc/call/test/mock_rtp_transport_controller_send.h b/third_party/libwebrtc/call/test/mock_rtp_transport_controller_send.h
new file mode 100644
index 0000000000..b24e5a59ec
--- /dev/null
+++ b/third_party/libwebrtc/call/test/mock_rtp_transport_controller_send.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_TEST_MOCK_RTP_TRANSPORT_CONTROLLER_SEND_H_
+#define CALL_TEST_MOCK_RTP_TRANSPORT_CONTROLLER_SEND_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/crypto/crypto_options.h"
+#include "api/crypto/frame_encryptor_interface.h"
+#include "api/frame_transformer_interface.h"
+#include "api/transport/bitrate_settings.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "modules/pacing/packet_router.h"
+#include "rtc_base/network/sent_packet.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/rate_limiter.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRtpTransportControllerSend
+ : public RtpTransportControllerSendInterface {
+ public:
+ MOCK_METHOD(RtpVideoSenderInterface*,
+ CreateRtpVideoSender,
+ ((const std::map<uint32_t, RtpState>&),
+ (const std::map<uint32_t, RtpPayloadState>&),
+ const RtpConfig&,
+ int rtcp_report_interval_ms,
+ Transport*,
+ const RtpSenderObservers&,
+ RtcEventLog*,
+ std::unique_ptr<FecController>,
+ const RtpSenderFrameEncryptionConfig&,
+ rtc::scoped_refptr<FrameTransformerInterface>),
+ (override));
+ MOCK_METHOD(void,
+ DestroyRtpVideoSender,
+ (RtpVideoSenderInterface*),
+ (override));
+ MOCK_METHOD(PacketRouter*, packet_router, (), (override));
+ MOCK_METHOD(NetworkStateEstimateObserver*,
+ network_state_estimate_observer,
+ (),
+ (override));
+ MOCK_METHOD(TransportFeedbackObserver*,
+ transport_feedback_observer,
+ (),
+ (override));
+ MOCK_METHOD(RtpPacketSender*, packet_sender, (), (override));
+ MOCK_METHOD(void,
+ SetAllocatedSendBitrateLimits,
+ (BitrateAllocationLimits),
+ (override));
+ MOCK_METHOD(void, SetPacingFactor, (float), (override));
+ MOCK_METHOD(void, SetQueueTimeLimit, (int), (override));
+ MOCK_METHOD(StreamFeedbackProvider*,
+ GetStreamFeedbackProvider,
+ (),
+ (override));
+ MOCK_METHOD(void,
+ RegisterTargetTransferRateObserver,
+ (TargetTransferRateObserver*),
+ (override));
+ MOCK_METHOD(void,
+ OnNetworkRouteChanged,
+ (absl::string_view, const rtc::NetworkRoute&),
+ (override));
+ MOCK_METHOD(void, OnNetworkAvailability, (bool), (override));
+ MOCK_METHOD(NetworkLinkRtcpObserver*, GetRtcpObserver, (), (override));
+ MOCK_METHOD(int64_t, GetPacerQueuingDelayMs, (), (const, override));
+ MOCK_METHOD(absl::optional<Timestamp>,
+ GetFirstPacketTime,
+ (),
+ (const, override));
+ MOCK_METHOD(void, EnablePeriodicAlrProbing, (bool), (override));
+ MOCK_METHOD(void, OnSentPacket, (const rtc::SentPacket&), (override));
+ MOCK_METHOD(void,
+ SetSdpBitrateParameters,
+ (const BitrateConstraints&),
+ (override));
+ MOCK_METHOD(void,
+ SetClientBitratePreferences,
+ (const BitrateSettings&),
+ (override));
+ MOCK_METHOD(void, OnTransportOverheadChanged, (size_t), (override));
+ MOCK_METHOD(void, AccountForAudioPacketsInPacedSender, (bool), (override));
+ MOCK_METHOD(void, IncludeOverheadInPacedSender, (), (override));
+ MOCK_METHOD(void, OnReceivedPacket, (const ReceivedPacket&), (override));
+ MOCK_METHOD(void, EnsureStarted, (), (override));
+};
+} // namespace webrtc
+#endif // CALL_TEST_MOCK_RTP_TRANSPORT_CONTROLLER_SEND_H_
diff --git a/third_party/libwebrtc/call/version.cc b/third_party/libwebrtc/call/version.cc
new file mode 100644
index 0000000000..44c8c77156
--- /dev/null
+++ b/third_party/libwebrtc/call/version.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/version.h"
+
+namespace webrtc {
+
+// The timestamp is always in UTC.
+const char* const kSourceTimestamp = "WebRTC source stamp 2023-10-30T04:03:42";
+
+void LoadWebRTCVersionInRegister() {
+ // Using volatile to instruct the compiler to not optimize `p` away even
+ // if it looks unused.
+ const char* volatile p = kSourceTimestamp;
+ static_cast<void>(p);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/version.h b/third_party/libwebrtc/call/version.h
new file mode 100644
index 0000000000..d476e0e108
--- /dev/null
+++ b/third_party/libwebrtc/call/version.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_VERSION_H_
+#define CALL_VERSION_H_
+
+// LoadWebRTCVersionInRegistry is a helper function that loads the pointer to
+// the WebRTC version string into a register. While this function doesn't do
+// anything useful, it is needed in order to avoid that compiler optimizations
+// remove the WebRTC version string from the final binary.
+
+namespace webrtc {
+
+void LoadWebRTCVersionInRegister();
+
+} // namespace webrtc
+
+#endif // CALL_VERSION_H_
diff --git a/third_party/libwebrtc/call/version_gn/moz.build b/third_party/libwebrtc/call/version_gn/moz.build
new file mode 100644
index 0000000000..e2e087a17f
--- /dev/null
+++ b/third_party/libwebrtc/call/version_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/version.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("version_gn")
diff --git a/third_party/libwebrtc/call/video_receive_stream.cc b/third_party/libwebrtc/call/video_receive_stream.cc
new file mode 100644
index 0000000000..9ee9ed3e76
--- /dev/null
+++ b/third_party/libwebrtc/call/video_receive_stream.cc
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/video_receive_stream.h"
+
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+VideoReceiveStreamInterface::Decoder::Decoder(SdpVideoFormat video_format,
+ int payload_type)
+ : video_format(std::move(video_format)), payload_type(payload_type) {}
+VideoReceiveStreamInterface::Decoder::Decoder() : video_format("Unset") {}
+VideoReceiveStreamInterface::Decoder::Decoder(const Decoder&) = default;
+VideoReceiveStreamInterface::Decoder::~Decoder() = default;
+
+bool VideoReceiveStreamInterface::Decoder::operator==(
+ const Decoder& other) const {
+ return payload_type == other.payload_type &&
+ video_format == other.video_format;
+}
+
+std::string VideoReceiveStreamInterface::Decoder::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{payload_type: " << payload_type;
+ ss << ", payload_name: " << video_format.name;
+ ss << ", codec_params: {";
+ for (auto it = video_format.parameters.begin();
+ it != video_format.parameters.end(); ++it) {
+ if (it != video_format.parameters.begin()) {
+ ss << ", ";
+ }
+ ss << it->first << ": " << it->second;
+ }
+ ss << '}';
+ ss << '}';
+
+ return ss.str();
+}
+
+VideoReceiveStreamInterface::Stats::Stats() = default;
+VideoReceiveStreamInterface::Stats::~Stats() = default;
+
+std::string VideoReceiveStreamInterface::Stats::ToString(
+ int64_t time_ms) const {
+ char buf[2048];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "VideoReceiveStreamInterface stats: " << time_ms << ", {ssrc: " << ssrc
+ << ", ";
+ ss << "total_bps: " << total_bitrate_bps << ", ";
+ // Spec-compliant stats are camelCased to distinguish them from
+ // the legacy and internal stats.
+ ss << "frameWidth: " << width << ", ";
+ ss << "frameHeight: " << height << ", ";
+ // TODO(crbug.com/webrtc/15166): `key` and `delta` will not
+ // perfectly match the other frame counters.
+ ss << "key: " << frame_counts.key_frames << ", ";
+ ss << "delta: " << frame_counts.delta_frames << ", ";
+ ss << "framesAssembledFromMultiplePackets: "
+ << frames_assembled_from_multiple_packets << ", ";
+ ss << "framesDecoded: " << frames_decoded << ", ";
+ ss << "framesDropped: " << frames_dropped << ", ";
+ ss << "network_fps: " << network_frame_rate << ", ";
+ ss << "decode_fps: " << decode_frame_rate << ", ";
+ ss << "render_fps: " << render_frame_rate << ", ";
+ ss << "decode_ms: " << decode_ms << ", ";
+ ss << "max_decode_ms: " << max_decode_ms << ", ";
+ ss << "first_frame_received_to_decoded_ms: "
+ << first_frame_received_to_decoded_ms << ", ";
+ ss << "current_delay_ms: " << current_delay_ms << ", ";
+ ss << "target_delay_ms: " << target_delay_ms << ", ";
+ ss << "jitter_delay_ms: " << jitter_buffer_ms << ", ";
+ ss << "totalAssemblyTime: " << total_assembly_time.seconds<double>() << ", ";
+ ss << "jitterBufferDelay: " << jitter_buffer_delay.seconds<double>() << ", ";
+ ss << "jitterBufferTargetDelay: "
+ << jitter_buffer_target_delay.seconds<double>() << ", ";
+ ss << "jitterBufferEmittedCount: " << jitter_buffer_emitted_count << ", ";
+ ss << "jitterBufferMinimumDelay: "
+ << jitter_buffer_minimum_delay.seconds<double>() << ", ";
+ ss << "totalDecodeTime: " << total_decode_time.seconds<double>() << ", ";
+ ss << "totalProcessingDelay: " << total_processing_delay.seconds<double>()
+ << ", ";
+ ss << "min_playout_delay_ms: " << min_playout_delay_ms << ", ";
+ ss << "sync_offset_ms: " << sync_offset_ms << ", ";
+ ss << "cum_loss: " << rtp_stats.packets_lost << ", ";
+ ss << "nackCount: " << rtcp_packet_type_counts.nack_packets << ", ";
+ ss << "firCount: " << rtcp_packet_type_counts.fir_packets << ", ";
+ ss << "pliCount: " << rtcp_packet_type_counts.pli_packets;
+ ss << '}';
+ return ss.str();
+}
+
+VideoReceiveStreamInterface::Config::Config(const Config&) = default;
+VideoReceiveStreamInterface::Config::Config(Config&&) = default;
+VideoReceiveStreamInterface::Config::Config(
+ Transport* rtcp_send_transport,
+ VideoDecoderFactory* decoder_factory)
+ : decoder_factory(decoder_factory),
+ rtcp_send_transport(rtcp_send_transport) {}
+
+VideoReceiveStreamInterface::Config&
+VideoReceiveStreamInterface::Config::operator=(Config&&) = default;
+VideoReceiveStreamInterface::Config::Config::~Config() = default;
+
+std::string VideoReceiveStreamInterface::Config::ToString() const {
+ char buf[4 * 1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{decoders: [";
+ for (size_t i = 0; i < decoders.size(); ++i) {
+ ss << decoders[i].ToString();
+ if (i != decoders.size() - 1)
+ ss << ", ";
+ }
+ ss << ']';
+ ss << ", rtp: " << rtp.ToString();
+ ss << ", renderer: " << (renderer ? "(renderer)" : "nullptr");
+ ss << ", render_delay_ms: " << render_delay_ms;
+ if (!sync_group.empty())
+ ss << ", sync_group: " << sync_group;
+ ss << '}';
+
+ return ss.str();
+}
+
+VideoReceiveStreamInterface::Config::Rtp::Rtp() = default;
+VideoReceiveStreamInterface::Config::Rtp::Rtp(const Rtp&) = default;
+VideoReceiveStreamInterface::Config::Rtp::~Rtp() = default;
+
+std::string VideoReceiveStreamInterface::Config::Rtp::ToString() const {
+ char buf[2 * 1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{remote_ssrc: " << remote_ssrc;
+ ss << ", local_ssrc: " << local_ssrc;
+ ss << ", rtcp_mode: "
+ << (rtcp_mode == RtcpMode::kCompound ? "RtcpMode::kCompound"
+ : "RtcpMode::kReducedSize");
+ ss << ", rtcp_xr: ";
+ ss << "{receiver_reference_time_report: "
+ << (rtcp_xr.receiver_reference_time_report ? "on" : "off");
+ ss << '}';
+ ss << ", lntf: {enabled: " << (lntf.enabled ? "true" : "false") << '}';
+ ss << ", nack: {rtp_history_ms: " << nack.rtp_history_ms << '}';
+ ss << ", ulpfec_payload_type: " << ulpfec_payload_type;
+ ss << ", red_type: " << red_payload_type;
+ ss << ", rtx_ssrc: " << rtx_ssrc;
+ ss << ", rtx_payload_types: {";
+ for (auto& kv : rtx_associated_payload_types) {
+ ss << kv.first << " (pt) -> " << kv.second << " (apt), ";
+ }
+ ss << '}';
+ ss << ", raw_payload_types: {";
+ for (const auto& pt : raw_payload_types) {
+ ss << pt << ", ";
+ }
+ ss << '}';
+ ss << ", rtcp_event_observer: "
+ << (rtcp_event_observer ? "(rtcp_event_observer)" : "nullptr");
+ ss << '}';
+ return ss.str();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/video_receive_stream.h b/third_party/libwebrtc/call/video_receive_stream.h
new file mode 100644
index 0000000000..0dc7dee71d
--- /dev/null
+++ b/third_party/libwebrtc/call/video_receive_stream.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_VIDEO_RECEIVE_STREAM_H_
+#define CALL_VIDEO_RECEIVE_STREAM_H_
+
+#include <cstdint>
+#include <limits>
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/call/transport.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "api/crypto/crypto_options.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+#include "api/video/recordable_encoded_frame.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_timing.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "call/receive_stream.h"
+#include "call/rtp_config.h"
+#include "common_video/frame_counts.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class RtpPacketSinkInterface;
+class VideoDecoderFactory;
+
+class VideoReceiveStreamInterface : public MediaReceiveStreamInterface {
+ public:
+ // Class for handling moving in/out recording state.
+ struct RecordingState {
+ RecordingState() = default;
+ explicit RecordingState(
+ std::function<void(const RecordableEncodedFrame&)> callback)
+ : callback(std::move(callback)) {}
+
+ // Callback stored from the VideoReceiveStreamInterface. The
+ // VideoReceiveStreamInterface client should not interpret the attribute.
+ std::function<void(const RecordableEncodedFrame&)> callback;
+ // Memento of when a keyframe request was last sent. The
+ // VideoReceiveStreamInterface client should not interpret the attribute.
+ absl::optional<int64_t> last_keyframe_request_ms;
+ };
+
+ // TODO(mflodman) Move all these settings to VideoDecoder and move the
+ // declaration to common_types.h.
+ struct Decoder {
+ Decoder(SdpVideoFormat video_format, int payload_type);
+ Decoder();
+ Decoder(const Decoder&);
+ ~Decoder();
+
+ bool operator==(const Decoder& other) const;
+
+ std::string ToString() const;
+
+ SdpVideoFormat video_format;
+
+ // Received RTP packets with this payload type will be sent to this decoder
+ // instance.
+ int payload_type = 0;
+ };
+
+ struct Stats {
+ Stats();
+ ~Stats();
+ std::string ToString(int64_t time_ms) const;
+
+ int network_frame_rate = 0;
+ int decode_frame_rate = 0;
+ int render_frame_rate = 0;
+ uint32_t frames_rendered = 0;
+
+ // Decoder stats.
+ absl::optional<std::string> decoder_implementation_name;
+ absl::optional<bool> power_efficient_decoder;
+ FrameCounts frame_counts;
+ int decode_ms = 0;
+ int max_decode_ms = 0;
+ int current_delay_ms = 0;
+ int target_delay_ms = 0;
+ int jitter_buffer_ms = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferdelay
+ TimeDelta jitter_buffer_delay = TimeDelta::Zero();
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbuffertargetdelay
+ TimeDelta jitter_buffer_target_delay = TimeDelta::Zero();
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferemittedcount
+ uint64_t jitter_buffer_emitted_count = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferminimumdelay
+ TimeDelta jitter_buffer_minimum_delay = TimeDelta::Zero();
+ int min_playout_delay_ms = 0;
+ int render_delay_ms = 10;
+ int64_t interframe_delay_max_ms = -1;
+ // Frames dropped due to decoding failures or if the system is too slow.
+ // https://www.w3.org/TR/webrtc-stats/#dom-rtcvideoreceiverstats-framesdropped
+ uint32_t frames_dropped = 0;
+ uint32_t frames_decoded = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats-packetsdiscarded
+ uint64_t packets_discarded = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totaldecodetime
+ TimeDelta total_decode_time = TimeDelta::Zero();
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalprocessingdelay
+ TimeDelta total_processing_delay = TimeDelta::Zero();
+ // TODO(bugs.webrtc.org/13986): standardize
+ TimeDelta total_assembly_time = TimeDelta::Zero();
+ uint32_t frames_assembled_from_multiple_packets = 0;
+ // Total inter frame delay in seconds.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalinterframedelay
+ double total_inter_frame_delay = 0;
+ // Total squared inter frame delay in seconds^2.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalsqauredinterframedelay
+ double total_squared_inter_frame_delay = 0;
+ int64_t first_frame_received_to_decoded_ms = -1;
+ absl::optional<uint64_t> qp_sum;
+
+ int current_payload_type = -1;
+
+ int total_bitrate_bps = 0;
+
+ int width = 0;
+ int height = 0;
+
+ uint32_t freeze_count = 0;
+ uint32_t pause_count = 0;
+ uint32_t total_freezes_duration_ms = 0;
+ uint32_t total_pauses_duration_ms = 0;
+
+ VideoContentType content_type = VideoContentType::UNSPECIFIED;
+
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp
+ absl::optional<int64_t> estimated_playout_ntp_timestamp_ms;
+ int sync_offset_ms = std::numeric_limits<int>::max();
+
+ uint32_t ssrc = 0;
+ std::string c_name;
+ RtpReceiveStats rtp_stats;
+ RtcpPacketTypeCounter rtcp_packet_type_counts;
+ absl::optional<RtpReceiveStats> rtx_rtp_stats;
+
+ // Mozilla modification: Init these.
+ uint32_t rtcp_sender_packets_sent = 0;
+ uint32_t rtcp_sender_octets_sent = 0;
+ int64_t rtcp_sender_ntp_timestamp_ms = 0;
+ int64_t rtcp_sender_remote_ntp_timestamp_ms = 0;
+
+ // Timing frame info: all important timestamps for a full lifetime of a
+ // single 'timing frame'.
+ absl::optional<webrtc::TimingFrameInfo> timing_frame_info;
+ };
+
+ struct Config {
+ private:
+ // Access to the copy constructor is private to force use of the Copy()
+ // method for those exceptional cases where we do use it.
+ Config(const Config&);
+
+ public:
+ Config() = delete;
+ Config(Config&&);
+ Config(Transport* rtcp_send_transport,
+ VideoDecoderFactory* decoder_factory = nullptr);
+ Config& operator=(Config&&);
+ Config& operator=(const Config&) = delete;
+ ~Config();
+
+ // Mostly used by tests. Avoid creating copies if you can.
+ Config Copy() const { return Config(*this); }
+
+ std::string ToString() const;
+
+ // Decoders for every payload that we can receive.
+ std::vector<Decoder> decoders;
+
+ // Ownership stays with WebrtcVideoEngine (delegated from PeerConnection).
+ VideoDecoderFactory* decoder_factory = nullptr;
+
+ // Receive-stream specific RTP settings.
+ struct Rtp : public ReceiveStreamRtpConfig {
+ Rtp();
+ Rtp(const Rtp&);
+ ~Rtp();
+ std::string ToString() const;
+
+ // See NackConfig for description.
+ NackConfig nack;
+
+ // See RtcpMode for description.
+ RtcpMode rtcp_mode = RtcpMode::kCompound;
+
+ // Extended RTCP settings.
+ struct RtcpXr {
+ // True if RTCP Receiver Reference Time Report Block extension
+ // (RFC 3611) should be enabled.
+ bool receiver_reference_time_report = false;
+ } rtcp_xr;
+
+ // How to request keyframes from a remote sender. Applies only if lntf is
+ // disabled.
+ KeyFrameReqMethod keyframe_method = KeyFrameReqMethod::kPliRtcp;
+
+ // See draft-alvestrand-rmcat-remb for information.
+ bool remb = false;
+
+ bool tmmbr = false;
+
+ // See LntfConfig for description.
+ LntfConfig lntf;
+
+ // Payload types for ULPFEC and RED, respectively.
+ int ulpfec_payload_type = -1;
+ int red_payload_type = -1;
+
+ // SSRC for retransmissions.
+ uint32_t rtx_ssrc = 0;
+
+ // Set if the stream is protected using FlexFEC.
+ bool protected_by_flexfec = false;
+
+ // Optional callback sink to support additional packet handlers such as
+ // FlexFec.
+ RtpPacketSinkInterface* packet_sink_ = nullptr;
+
+ // Map from rtx payload type -> media payload type.
+ // For RTX to be enabled, both an SSRC and this mapping are needed.
+ std::map<int, int> rtx_associated_payload_types;
+
+ // Payload types that should be depacketized using raw depacketizer
+ // (payload header will not be parsed and must not be present, additional
+ // meta data is expected to be present in generic frame descriptor
+ // RTP header extension).
+ std::set<int> raw_payload_types;
+
+ RtcpEventObserver* rtcp_event_observer = nullptr;
+ } rtp;
+
+ // Transport for outgoing packets (RTCP).
+ Transport* rtcp_send_transport = nullptr;
+
+ // Must always be set.
+ rtc::VideoSinkInterface<VideoFrame>* renderer = nullptr;
+
+ // Expected delay needed by the renderer, i.e. the frame will be delivered
+ // this many milliseconds, if possible, earlier than the ideal render time.
+ int render_delay_ms = 10;
+
+ // If false, pass frames on to the renderer as soon as they are
+ // available.
+ bool enable_prerenderer_smoothing = true;
+
+ // Identifier for an A/V synchronization group. Empty string to disable.
+ // TODO(pbos): Synchronize streams in a sync group, not just video streams
+ // to one of the audio streams.
+ std::string sync_group;
+
+ // An optional custom frame decryptor that allows the entire frame to be
+ // decrypted in whatever way the caller choses. This is not required by
+ // default.
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor;
+
+ // Per PeerConnection cryptography options.
+ CryptoOptions crypto_options;
+
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
+ };
+
+ // TODO(pbos): Add info on currently-received codec to Stats.
+ virtual Stats GetStats() const = 0;
+
+ // Sets a base minimum for the playout delay. Base minimum delay sets lower
+ // bound on minimum delay value determining lower bound on playout delay.
+ //
+ // Returns true if value was successfully set, false overwise.
+ virtual bool SetBaseMinimumPlayoutDelayMs(int delay_ms) = 0;
+
+ // Returns current value of base minimum delay in milliseconds.
+ virtual int GetBaseMinimumPlayoutDelayMs() const = 0;
+
+ // Sets and returns recording state. The old state is moved out
+ // of the video receive stream and returned to the caller, and `state`
+ // is moved in. If the state's callback is set, it will be called with
+ // recordable encoded frames as they arrive.
+ // If `generate_key_frame` is true, the method will generate a key frame.
+ // When the function returns, it's guaranteed that all old callouts
+ // to the returned callback has ceased.
+ // Note: the client should not interpret the returned state's attributes, but
+ // instead treat it as opaque data.
+ virtual RecordingState SetAndGetRecordingState(RecordingState state,
+ bool generate_key_frame) = 0;
+
+ // Cause eventual generation of a key frame from the sender.
+ virtual void GenerateKeyFrame() = 0;
+
+ virtual void SetRtcpMode(RtcpMode mode) = 0;
+
+ // Sets or clears a flexfec RTP sink. This affects `rtp.packet_sink_` and
+ // `rtp.protected_by_flexfec` parts of the configuration. Must be called on
+ // the packet delivery thread.
+ // TODO(bugs.webrtc.org/11993): Packet delivery thread today means `worker
+ // thread` but will be `network thread`.
+ virtual void SetFlexFecProtection(RtpPacketSinkInterface* flexfec_sink) = 0;
+
+ // Turns on/off loss notifications. Must be called on the packet delivery
+ // thread.
+ virtual void SetLossNotificationEnabled(bool enabled) = 0;
+
+ // Modify `rtp.nack.rtp_history_ms` post construction. Setting this value
+ // to 0 disables nack.
+ // Must be called on the packet delivery thread.
+ virtual void SetNackHistory(TimeDelta history) = 0;
+
+ virtual void SetProtectionPayloadTypes(int red_payload_type,
+ int ulpfec_payload_type) = 0;
+
+ virtual void SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) = 0;
+
+ virtual void SetAssociatedPayloadTypes(
+ std::map<int, int> associated_payload_types) = 0;
+
+ virtual void UpdateRtxSsrc(uint32_t ssrc) = 0;
+
+ protected:
+ virtual ~VideoReceiveStreamInterface() {}
+};
+
+} // namespace webrtc
+
+#endif // CALL_VIDEO_RECEIVE_STREAM_H_
diff --git a/third_party/libwebrtc/call/video_send_stream.cc b/third_party/libwebrtc/call/video_send_stream.cc
new file mode 100644
index 0000000000..e8532a7a26
--- /dev/null
+++ b/third_party/libwebrtc/call/video_send_stream.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "call/video_send_stream.h"
+
+#include <utility>
+
+#include "api/crypto/frame_encryptor_interface.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/strings/string_format.h"
+
+namespace webrtc {
+
+namespace {
+
+const char* StreamTypeToString(VideoSendStream::StreamStats::StreamType type) {
+ switch (type) {
+ case VideoSendStream::StreamStats::StreamType::kMedia:
+ return "media";
+ case VideoSendStream::StreamStats::StreamType::kRtx:
+ return "rtx";
+ case VideoSendStream::StreamStats::StreamType::kFlexfec:
+ return "flexfec";
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+} // namespace
+
+VideoSendStream::StreamStats::StreamStats() = default;
+VideoSendStream::StreamStats::~StreamStats() = default;
+
+std::string VideoSendStream::StreamStats::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "type: " << StreamTypeToString(type);
+ if (referenced_media_ssrc.has_value())
+ ss << " (for: " << referenced_media_ssrc.value() << ")";
+ ss << ", ";
+ ss << "width: " << width << ", ";
+ ss << "height: " << height << ", ";
+ ss << "key: " << frame_counts.key_frames << ", ";
+ ss << "delta: " << frame_counts.delta_frames << ", ";
+ ss << "total_bps: " << total_bitrate_bps << ", ";
+ ss << "retransmit_bps: " << retransmit_bitrate_bps << ", ";
+ ss << "avg_delay_ms: " << avg_delay_ms << ", ";
+ ss << "max_delay_ms: " << max_delay_ms << ", ";
+ if (report_block_data) {
+ ss << "cum_loss: " << report_block_data->cumulative_lost() << ", ";
+ ss << "max_ext_seq: "
+ << report_block_data->extended_highest_sequence_number() << ", ";
+ }
+ ss << "nack: " << rtcp_packet_type_counts.nack_packets << ", ";
+ ss << "fir: " << rtcp_packet_type_counts.fir_packets << ", ";
+ ss << "pli: " << rtcp_packet_type_counts.pli_packets;
+ return ss.str();
+}
+
+VideoSendStream::Stats::Stats() = default;
+VideoSendStream::Stats::~Stats() = default;
+
+std::string VideoSendStream::Stats::ToString(int64_t time_ms) const {
+ char buf[2048];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "VideoSendStream stats: " << time_ms << ", {";
+ ss << "input_fps: " << rtc::StringFormat("%.1f", input_frame_rate) << ", ";
+ ss << "encode_fps: " << encode_frame_rate << ", ";
+ ss << "encode_ms: " << avg_encode_time_ms << ", ";
+ ss << "encode_usage_perc: " << encode_usage_percent << ", ";
+ ss << "target_bps: " << target_media_bitrate_bps << ", ";
+ ss << "media_bps: " << media_bitrate_bps << ", ";
+ ss << "suspended: " << (suspended ? "true" : "false") << ", ";
+ ss << "bw_adapted_res: " << (bw_limited_resolution ? "true" : "false")
+ << ", ";
+ ss << "cpu_adapted_res: " << (cpu_limited_resolution ? "true" : "false")
+ << ", ";
+ ss << "bw_adapted_fps: " << (bw_limited_framerate ? "true" : "false") << ", ";
+ ss << "cpu_adapted_fps: " << (cpu_limited_framerate ? "true" : "false")
+ << ", ";
+ ss << "#cpu_adaptations: " << number_of_cpu_adapt_changes << ", ";
+ ss << "#quality_adaptations: " << number_of_quality_adapt_changes;
+ ss << '}';
+ for (const auto& substream : substreams) {
+ if (substream.second.type ==
+ VideoSendStream::StreamStats::StreamType::kMedia) {
+ ss << " {ssrc: " << substream.first << ", ";
+ ss << substream.second.ToString();
+ ss << '}';
+ }
+ }
+ return ss.str();
+}
+
+VideoSendStream::Config::Config(const Config&) = default;
+VideoSendStream::Config::Config(Config&&) = default;
+VideoSendStream::Config::Config(Transport* send_transport)
+ : rtp(),
+ encoder_settings(VideoEncoder::Capabilities(rtp.lntf.enabled)),
+ send_transport(send_transport) {}
+
+VideoSendStream::Config& VideoSendStream::Config::operator=(Config&&) = default;
+VideoSendStream::Config::Config::~Config() = default;
+
+std::string VideoSendStream::Config::ToString() const {
+ char buf[2 * 1024];
+ rtc::SimpleStringBuilder ss(buf);
+ ss << "{encoder_settings: { experiment_cpu_load_estimator: "
+ << (encoder_settings.experiment_cpu_load_estimator ? "on" : "off") << "}}";
+ ss << ", rtp: " << rtp.ToString();
+ ss << ", rtcp_report_interval_ms: " << rtcp_report_interval_ms;
+ ss << ", send_transport: " << (send_transport ? "(Transport)" : "nullptr");
+ ss << ", render_delay_ms: " << render_delay_ms;
+ ss << ", target_delay_ms: " << target_delay_ms;
+ ss << ", suspend_below_min_bitrate: "
+ << (suspend_below_min_bitrate ? "on" : "off");
+ ss << '}';
+ return ss.str();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/call/video_send_stream.h b/third_party/libwebrtc/call/video_send_stream.h
new file mode 100644
index 0000000000..1a0261be1b
--- /dev/null
+++ b/third_party/libwebrtc/call/video_send_stream.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CALL_VIDEO_SEND_STREAM_H_
+#define CALL_VIDEO_SEND_STREAM_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/call/transport.h"
+#include "api/crypto/crypto_options.h"
+#include "api/frame_transformer_interface.h"
+#include "api/rtp_parameters.h"
+#include "api/rtp_sender_setparameters_callback.h"
+#include "api/scoped_refptr.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "api/video/video_stream_encoder_settings.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "call/rtp_config.h"
+#include "common_video/frame_counts.h"
+#include "common_video/include/quality_limitation_reason.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+
+class FrameEncryptorInterface;
+
+class VideoSendStream {
+ public:
+ // Multiple StreamStats objects are present if simulcast is used (multiple
+ // kMedia streams) or if RTX or FlexFEC is negotiated. Multiple SVC layers, on
+ // the other hand, does not cause additional StreamStats.
+ struct StreamStats {
+ enum class StreamType {
+ // A media stream is an RTP stream for audio or video. Retransmissions and
+ // FEC is either sent over the same SSRC or negotiated to be sent over
+ // separate SSRCs, in which case separate StreamStats objects exist with
+ // references to this media stream's SSRC.
+ kMedia,
+ // RTX streams are streams dedicated to retransmissions. They have a
+ // dependency on a single kMedia stream: `referenced_media_ssrc`.
+ kRtx,
+ // FlexFEC streams are streams dedicated to FlexFEC. They have a
+ // dependency on a single kMedia stream: `referenced_media_ssrc`.
+ kFlexfec,
+ };
+
+ StreamStats();
+ ~StreamStats();
+
+ std::string ToString() const;
+
+ StreamType type = StreamType::kMedia;
+ // If `type` is kRtx or kFlexfec this value is present. The referenced SSRC
+ // is the kMedia stream that this stream is performing retransmissions or
+ // FEC for. If `type` is kMedia, this value is null.
+ absl::optional<uint32_t> referenced_media_ssrc;
+ FrameCounts frame_counts;
+ int width = 0;
+ int height = 0;
+ // TODO(holmer): Move bitrate_bps out to the webrtc::Call layer.
+ int total_bitrate_bps = 0;
+ int retransmit_bitrate_bps = 0;
+ // `avg_delay_ms` and `max_delay_ms` are only used in tests. Consider
+ // deleting.
+ int avg_delay_ms = 0;
+ int max_delay_ms = 0;
+ StreamDataCounters rtp_stats;
+ RtcpPacketTypeCounter rtcp_packet_type_counts;
+ // A snapshot of the most recent Report Block with additional data of
+ // interest to statistics. Used to implement RTCRemoteInboundRtpStreamStats.
+ absl::optional<ReportBlockData> report_block_data;
+ double encode_frame_rate = 0.0;
+ int frames_encoded = 0;
+ absl::optional<uint64_t> qp_sum;
+ uint64_t total_encode_time_ms = 0;
+ uint64_t total_encoded_bytes_target = 0;
+ uint32_t huge_frames_sent = 0;
+ absl::optional<ScalabilityMode> scalability_mode;
+ };
+
+ struct Stats {
+ Stats();
+ ~Stats();
+ std::string ToString(int64_t time_ms) const;
+ absl::optional<std::string> encoder_implementation_name;
+ double input_frame_rate = 0;
+ int encode_frame_rate = 0;
+ int avg_encode_time_ms = 0;
+ int encode_usage_percent = 0;
+ uint32_t frames_encoded = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodetime
+ uint64_t total_encode_time_ms = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodedbytestarget
+ uint64_t total_encoded_bytes_target = 0;
+ uint32_t frames = 0;
+ uint32_t frames_dropped_by_capturer = 0;
+ uint32_t frames_dropped_by_encoder_queue = 0;
+ uint32_t frames_dropped_by_rate_limiter = 0;
+ uint32_t frames_dropped_by_congestion_window = 0;
+ uint32_t frames_dropped_by_encoder = 0;
+ // Bitrate the encoder is currently configured to use due to bandwidth
+ // limitations.
+ int target_media_bitrate_bps = 0;
+ // Bitrate the encoder is actually producing.
+ int media_bitrate_bps = 0;
+ bool suspended = false;
+ bool bw_limited_resolution = false;
+ bool cpu_limited_resolution = false;
+ bool bw_limited_framerate = false;
+ bool cpu_limited_framerate = false;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationreason
+ QualityLimitationReason quality_limitation_reason =
+ QualityLimitationReason::kNone;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationdurations
+ std::map<QualityLimitationReason, int64_t> quality_limitation_durations_ms;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
+ uint32_t quality_limitation_resolution_changes = 0;
+ // Total number of times resolution as been requested to be changed due to
+ // CPU/quality adaptation.
+ int number_of_cpu_adapt_changes = 0;
+ int number_of_quality_adapt_changes = 0;
+ bool has_entered_low_resolution = false;
+ std::map<uint32_t, StreamStats> substreams;
+ webrtc::VideoContentType content_type =
+ webrtc::VideoContentType::UNSPECIFIED;
+ uint32_t frames_sent = 0;
+ uint32_t huge_frames_sent = 0;
+ absl::optional<bool> power_efficient_encoder;
+ };
+
+ struct Config {
+ public:
+ Config() = delete;
+ Config(Config&&);
+ explicit Config(Transport* send_transport);
+
+ Config& operator=(Config&&);
+ Config& operator=(const Config&) = delete;
+
+ ~Config();
+
+ // Mostly used by tests. Avoid creating copies if you can.
+ Config Copy() const { return Config(*this); }
+
+ std::string ToString() const;
+
+ RtpConfig rtp;
+
+ VideoStreamEncoderSettings encoder_settings;
+
+ // Time interval between RTCP report for video
+ int rtcp_report_interval_ms = 1000;
+
+ // Transport for outgoing packets.
+ Transport* send_transport = nullptr;
+
+ // Expected delay needed by the renderer, i.e. the frame will be delivered
+ // this many milliseconds, if possible, earlier than expected render time.
+ // Only valid if `local_renderer` is set.
+ int render_delay_ms = 0;
+
+ // Target delay in milliseconds. A positive value indicates this stream is
+ // used for streaming instead of a real-time call.
+ int target_delay_ms = 0;
+
+ // True if the stream should be suspended when the available bitrate fall
+ // below the minimum configured bitrate. If this variable is false, the
+ // stream may send at a rate higher than the estimated available bitrate.
+ bool suspend_below_min_bitrate = false;
+
+ // Enables periodic bandwidth probing in application-limited region.
+ bool periodic_alr_bandwidth_probing = false;
+
+ // An optional custom frame encryptor that allows the entire frame to be
+ // encrypted in whatever way the caller chooses. This is not required by
+ // default.
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor;
+
+ // An optional encoder selector provided by the user.
+ // Overrides VideoEncoderFactory::GetEncoderSelector().
+ // Owned by RtpSenderBase.
+ VideoEncoderFactory::EncoderSelectorInterface* encoder_selector = nullptr;
+
+ // Per PeerConnection cryptography options.
+ CryptoOptions crypto_options;
+
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer;
+
+ private:
+ // Access to the copy constructor is private to force use of the Copy()
+ // method for those exceptional cases where we do use it.
+ Config(const Config&);
+ };
+
+ // Updates the sending state for all simulcast layers that the video send
+ // stream owns. This can mean updating the activity one or for multiple
+ // layers. The ordering of active layers is the order in which the
+ // rtp modules are stored in the VideoSendStream.
+ // Note: This starts stream activity if it is inactive and one of the layers
+ // is active. This stops stream activity if it is active and all layers are
+ // inactive.
+ // `active_layers` should have the same size as the number of configured
+ // simulcast layers or one if only one rtp stream is used.
+ virtual void StartPerRtpStream(std::vector<bool> active_layers) = 0;
+
+ // Starts stream activity.
+ // When a stream is active, it can receive, process and deliver packets.
+ // Prefer to use StartPerRtpStream.
+ virtual void Start() = 0;
+
+ // Stops stream activity.
+ // When a stream is stopped, it can't receive, process or deliver packets.
+ virtual void Stop() = 0;
+
+ // Accessor for determining if the stream is active. This is an inexpensive
+ // call that must be made on the same thread as `Start()` and `Stop()` methods
+ // are called on and will return `true` iff activity has been started either
+ // via `Start()` or `StartPerRtpStream()`. If activity is either
+ // stopped or is in the process of being stopped as a result of a call to
+ // either `Stop()` or `StartPerRtpStream()` where all layers were
+ // deactivated, the return value will be `false`.
+ virtual bool started() = 0;
+
+ // If the resource is overusing, the VideoSendStream will try to reduce
+ // resolution or frame rate until no resource is overusing.
+ // TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor
+ // is moved to Call this method could be deleted altogether in favor of
+ // Call-level APIs only.
+ virtual void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) = 0;
+ virtual std::vector<rtc::scoped_refptr<Resource>>
+ GetAdaptationResources() = 0;
+
+ virtual void SetSource(
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
+ const DegradationPreference& degradation_preference) = 0;
+
+ // Set which streams to send. Must have at least as many SSRCs as configured
+ // in the config. Encoder settings are passed on to the encoder instance along
+ // with the VideoStream settings.
+ virtual void ReconfigureVideoEncoder(VideoEncoderConfig config) = 0;
+
+ virtual void ReconfigureVideoEncoder(VideoEncoderConfig config,
+ SetParametersCallback callback) = 0;
+
+ virtual Stats GetStats() = 0;
+
+ virtual void GenerateKeyFrame(const std::vector<std::string>& rids) = 0;
+
+ protected:
+ virtual ~VideoSendStream() {}
+};
+
+} // namespace webrtc
+
+#endif // CALL_VIDEO_SEND_STREAM_H_
diff --git a/third_party/libwebrtc/call/video_stream_api_gn/moz.build b/third_party/libwebrtc/call/video_stream_api_gn/moz.build
new file mode 100644
index 0000000000..f2ec65de01
--- /dev/null
+++ b/third_party/libwebrtc/call/video_stream_api_gn/moz.build
@@ -0,0 +1,237 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/call/video_receive_stream.cc",
+ "/third_party/libwebrtc/call/video_send_stream.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("video_stream_api_gn")