summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/media
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/media')
-rw-r--r--third_party/libwebrtc/media/BUILD.gn996
-rw-r--r--third_party/libwebrtc/media/DEPS37
-rw-r--r--third_party/libwebrtc/media/OWNERS13
-rw-r--r--third_party/libwebrtc/media/base/adapted_video_track_source.cc126
-rw-r--r--third_party/libwebrtc/media/base/adapted_video_track_source.h104
-rw-r--r--third_party/libwebrtc/media/base/audio_source.h58
-rw-r--r--third_party/libwebrtc/media/base/codec.cc494
-rw-r--r--third_party/libwebrtc/media/base/codec.h229
-rw-r--r--third_party/libwebrtc/media/base/codec_unittest.cc538
-rw-r--r--third_party/libwebrtc/media/base/fake_frame_source.cc88
-rw-r--r--third_party/libwebrtc/media/base/fake_frame_source.h50
-rw-r--r--third_party/libwebrtc/media/base/fake_media_engine.cc705
-rw-r--r--third_party/libwebrtc/media/base/fake_media_engine.h876
-rw-r--r--third_party/libwebrtc/media/base/fake_network_interface.h232
-rw-r--r--third_party/libwebrtc/media/base/fake_rtp.cc71
-rw-r--r--third_party/libwebrtc/media/base/fake_rtp.h301
-rw-r--r--third_party/libwebrtc/media/base/fake_video_renderer.cc87
-rw-r--r--third_party/libwebrtc/media/base/fake_video_renderer.h73
-rw-r--r--third_party/libwebrtc/media/base/media_channel.h1002
-rw-r--r--third_party/libwebrtc/media/base/media_channel_impl.cc310
-rw-r--r--third_party/libwebrtc/media/base/media_channel_impl.h195
-rw-r--r--third_party/libwebrtc/media/base/media_config.h98
-rw-r--r--third_party/libwebrtc/media/base/media_constants.cc143
-rw-r--r--third_party/libwebrtc/media/base/media_constants.h164
-rw-r--r--third_party/libwebrtc/media/base/media_engine.cc291
-rw-r--r--third_party/libwebrtc/media/base/media_engine.h239
-rw-r--r--third_party/libwebrtc/media/base/media_engine_unittest.cc86
-rw-r--r--third_party/libwebrtc/media/base/rid_description.cc28
-rw-r--r--third_party/libwebrtc/media/base/rid_description.h93
-rw-r--r--third_party/libwebrtc/media/base/rtp_utils.cc401
-rw-r--r--third_party/libwebrtc/media/base/rtp_utils.h80
-rw-r--r--third_party/libwebrtc/media/base/rtp_utils_unittest.cc303
-rw-r--r--third_party/libwebrtc/media/base/sdp_video_format_utils.cc121
-rw-r--r--third_party/libwebrtc/media/base/sdp_video_format_utils.h52
-rw-r--r--third_party/libwebrtc/media/base/sdp_video_format_utils_unittest.cc115
-rw-r--r--third_party/libwebrtc/media/base/stream_params.cc240
-rw-r--r--third_party/libwebrtc/media/base/stream_params.h321
-rw-r--r--third_party/libwebrtc/media/base/stream_params_unittest.cc301
-rw-r--r--third_party/libwebrtc/media/base/test_utils.cc55
-rw-r--r--third_party/libwebrtc/media/base/test_utils.h56
-rw-r--r--third_party/libwebrtc/media/base/turn_utils.cc126
-rw-r--r--third_party/libwebrtc/media/base/turn_utils.h30
-rw-r--r--third_party/libwebrtc/media/base/turn_utils_unittest.cc127
-rw-r--r--third_party/libwebrtc/media/base/video_adapter.cc470
-rw-r--r--third_party/libwebrtc/media/base/video_adapter.h172
-rw-r--r--third_party/libwebrtc/media/base/video_adapter_unittest.cc1336
-rw-r--r--third_party/libwebrtc/media/base/video_broadcaster.cc214
-rw-r--r--third_party/libwebrtc/media/base/video_broadcaster.h82
-rw-r--r--third_party/libwebrtc/media/base/video_broadcaster_unittest.cc438
-rw-r--r--third_party/libwebrtc/media/base/video_common.cc97
-rw-r--r--third_party/libwebrtc/media/base/video_common.h224
-rw-r--r--third_party/libwebrtc/media/base/video_common_unittest.cc108
-rw-r--r--third_party/libwebrtc/media/base/video_source_base.cc104
-rw-r--r--third_party/libwebrtc/media/base/video_source_base.h83
-rw-r--r--third_party/libwebrtc/media/codec_gn/moz.build232
-rw-r--r--third_party/libwebrtc/media/engine/adm_helpers.cc82
-rw-r--r--third_party/libwebrtc/media/engine/adm_helpers.h25
-rw-r--r--third_party/libwebrtc/media/engine/fake_video_codec_factory.cc69
-rw-r--r--third_party/libwebrtc/media/engine/fake_video_codec_factory.h53
-rw-r--r--third_party/libwebrtc/media/engine/fake_webrtc_call.cc774
-rw-r--r--third_party/libwebrtc/media/engine/fake_webrtc_call.h519
-rw-r--r--third_party/libwebrtc/media/engine/fake_webrtc_video_engine.cc304
-rw-r--r--third_party/libwebrtc/media/engine/fake_webrtc_video_engine.h144
-rw-r--r--third_party/libwebrtc/media/engine/internal_decoder_factory.cc106
-rw-r--r--third_party/libwebrtc/media/engine/internal_decoder_factory.h35
-rw-r--r--third_party/libwebrtc/media/engine/internal_decoder_factory_unittest.cc163
-rw-r--r--third_party/libwebrtc/media/engine/internal_encoder_factory.cc66
-rw-r--r--third_party/libwebrtc/media/engine/internal_encoder_factory.h34
-rw-r--r--third_party/libwebrtc/media/engine/internal_encoder_factory_unittest.cc140
-rw-r--r--third_party/libwebrtc/media/engine/multiplex_codec_factory.cc114
-rw-r--r--third_party/libwebrtc/media/engine/multiplex_codec_factory.h79
-rw-r--r--third_party/libwebrtc/media/engine/multiplex_codec_factory_unittest.cc47
-rw-r--r--third_party/libwebrtc/media/engine/null_webrtc_video_engine.h54
-rw-r--r--third_party/libwebrtc/media/engine/null_webrtc_video_engine_unittest.cc47
-rw-r--r--third_party/libwebrtc/media/engine/payload_type_mapper.cc160
-rw-r--r--third_party/libwebrtc/media/engine/payload_type_mapper.h57
-rw-r--r--third_party/libwebrtc/media/engine/payload_type_mapper_unittest.cc139
-rw-r--r--third_party/libwebrtc/media/engine/simulcast_encoder_adapter.cc981
-rw-r--r--third_party/libwebrtc/media/engine/simulcast_encoder_adapter.h200
-rw-r--r--third_party/libwebrtc/media/engine/simulcast_encoder_adapter_unittest.cc1902
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_media_engine.cc223
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_media_engine.h89
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.cc43
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.h24
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_media_engine_unittest.cc337
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_video_engine.cc3943
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_video_engine.h906
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_video_engine_unittest.cc10194
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_voice_engine.cc2725
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_voice_engine.h522
-rw-r--r--third_party/libwebrtc/media/engine/webrtc_voice_engine_unittest.cc4017
-rw-r--r--third_party/libwebrtc/media/media_channel_gn/moz.build221
-rw-r--r--third_party/libwebrtc/media/media_channel_impl_gn/moz.build205
-rw-r--r--third_party/libwebrtc/media/media_constants_gn/moz.build221
-rw-r--r--third_party/libwebrtc/media/rid_description_gn/moz.build205
-rw-r--r--third_party/libwebrtc/media/rtc_media_base_gn/moz.build240
-rw-r--r--third_party/libwebrtc/media/rtc_media_config_gn/moz.build205
-rw-r--r--third_party/libwebrtc/media/rtc_simulcast_encoder_adapter_gn/moz.build237
-rw-r--r--third_party/libwebrtc/media/rtp_utils_gn/moz.build205
-rw-r--r--third_party/libwebrtc/media/sctp/OWNERS3
-rw-r--r--third_party/libwebrtc/media/sctp/dcsctp_transport.cc668
-rw-r--r--third_party/libwebrtc/media/sctp/dcsctp_transport.h141
-rw-r--r--third_party/libwebrtc/media/sctp/dcsctp_transport_unittest.cc251
-rw-r--r--third_party/libwebrtc/media/sctp/sctp_transport_factory.cc38
-rw-r--r--third_party/libwebrtc/media/sctp/sctp_transport_factory.h35
-rw-r--r--third_party/libwebrtc/media/sctp/sctp_transport_internal.h150
-rw-r--r--third_party/libwebrtc/media/stream_params_gn/moz.build205
107 files changed, 46162 insertions, 0 deletions
diff --git a/third_party/libwebrtc/media/BUILD.gn b/third_party/libwebrtc/media/BUILD.gn
new file mode 100644
index 0000000000..97ad4a889a
--- /dev/null
+++ b/third_party/libwebrtc/media/BUILD.gn
@@ -0,0 +1,996 @@
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//build/config/linux/pkg_config.gni")
+import("//third_party/libaom/options.gni")
+import("../webrtc.gni")
+
+group("media") {
+ deps = []
+ if (!build_with_mozilla) {
+ deps += [
+ ":rtc_media",
+ ":rtc_media_base",
+ ]
+ }
+}
+
+config("rtc_media_defines_config") {
+ defines = [ "HAVE_WEBRTC_VIDEO" ]
+}
+
+rtc_source_set("rtc_media_config") {
+ visibility = [ "*" ]
+ sources = [ "base/media_config.h" ]
+}
+
+rtc_library("rtc_sdp_video_format_utils") {
+ visibility = [ "*" ]
+ sources = [
+ "base/sdp_video_format_utils.cc",
+ "base/sdp_video_format_utils.h",
+ ]
+
+ deps = [
+ "../api/video_codecs:video_codecs_api",
+ "../rtc_base:checks",
+ "../rtc_base:stringutils",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
+rtc_library("rtc_media_base") {
+ visibility = [ "*" ]
+ defines = []
+ libs = []
+ deps = [
+ ":codec",
+ ":media_channel",
+ ":media_channel_impl",
+ ":rid_description",
+ ":rtc_media_config",
+ ":rtp_utils",
+ ":stream_params",
+ "../api:array_view",
+ "../api:audio_options_api",
+ "../api:call_api",
+ "../api:field_trials_view",
+ "../api:frame_transformer_interface",
+ "../api:media_stream_interface",
+ "../api:rtc_error",
+ "../api:rtp_parameters",
+ "../api:rtp_sender_setparameters_callback",
+ "../api:scoped_refptr",
+ "../api:sequence_checker",
+ "../api:transport_api",
+ "../api/audio:audio_frame_processor",
+ "../api/audio_codecs:audio_codecs_api",
+ "../api/crypto:frame_decryptor_interface",
+ "../api/crypto:frame_encryptor_interface",
+ "../api/crypto:options",
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/transport:datagram_transport_interface",
+ "../api/transport:stun_types",
+ "../api/transport/rtp:rtp_source",
+ "../api/units:time_delta",
+ "../api/video:recordable_encoded_frame",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_bitrate_allocator_factory",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:scalability_mode",
+ "../api/video_codecs:video_codecs_api",
+ "../call:call_interfaces",
+ "../call:video_stream_api",
+ "../common_video",
+ "../modules/async_audio_processing",
+ "../modules/audio_device",
+ "../modules/audio_processing:audio_processing_statistics",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:async_packet_socket",
+ "../rtc_base:buffer",
+ "../rtc_base:byte_order",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:dscp",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:network_route",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:sanitizer",
+ "../rtc_base:socket",
+ "../rtc_base:stringutils",
+ "../rtc_base:timeutils",
+ "../rtc_base:unique_id_generator",
+ "../rtc_base/network:sent_packet",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/system:file_wrapper",
+ "../rtc_base/system:no_unique_address",
+ "../rtc_base/system:rtc_export",
+ "../rtc_base/third_party/sigslot",
+ "../system_wrappers:field_trial",
+ "../video/config:encoder_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ sources = [
+ "base/adapted_video_track_source.cc",
+ "base/adapted_video_track_source.h",
+ "base/audio_source.h",
+ "base/media_engine.cc",
+ "base/media_engine.h",
+ "base/video_adapter.cc",
+ "base/video_adapter.h",
+ "base/video_broadcaster.cc",
+ "base/video_broadcaster.h",
+ "base/video_common.cc",
+ "base/video_common.h",
+ "base/video_source_base.cc",
+ "base/video_source_base.h",
+ ]
+ if (build_with_mozilla) {
+ sources -= [
+ "base/adapted_video_track_source.cc",
+ "base/adapted_video_track_source.h",
+ "base/audio_source.h",
+ "base/media_engine.cc",
+ "base/media_engine.h",
+ ]
+ }
+}
+
+rtc_library("media_channel_impl") {
+if (!build_with_mozilla) {
+ sources = [
+ "base/media_channel_impl.cc",
+ "base/media_channel_impl.h",
+ ]
+ deps = [
+ ":codec",
+ ":media_channel",
+ ":rtp_utils",
+ ":stream_params",
+ "../api:audio_options_api",
+ "../api:call_api",
+ "../api:frame_transformer_interface",
+ "../api:media_stream_interface",
+ "../api:rtc_error",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:rtp_sender_interface",
+ "../api:scoped_refptr",
+ "../api:sequence_checker",
+ "../api:transport_api",
+ "../api/crypto:frame_decryptor_interface",
+ "../api/crypto:frame_encryptor_interface",
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/transport/rtp:rtp_source",
+ "../api/units:time_delta",
+ "../api/video:recordable_encoded_frame",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:scalability_mode",
+ "../api/video_codecs:video_codecs_api",
+ "../common_video",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:async_packet_socket",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:dscp",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:network_route",
+ "../rtc_base:socket",
+ "../rtc_base/network:sent_packet",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+}
+
+rtc_source_set("media_channel") {
+ sources = [ "base/media_channel.h" ]
+ deps = [
+ ":codec",
+ ":media_constants",
+ ":rtp_utils",
+ ":stream_params",
+ "../api:audio_options_api",
+ "../api:call_api",
+ "../api:frame_transformer_interface",
+ "../api:media_stream_interface",
+ "../api:rtc_error",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:rtp_sender_interface",
+ "../api:scoped_refptr",
+ "../api/audio_codecs:audio_codecs_api",
+ "../api/crypto:frame_decryptor_interface",
+ "../api/crypto:frame_encryptor_interface",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/transport:datagram_transport_interface",
+ "../api/transport/rtp:rtp_source",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../api/video:recordable_encoded_frame",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:scalability_mode",
+ "../api/video_codecs:video_codecs_api",
+ "../call:video_stream_api",
+ "../common_video",
+ "../modules/audio_processing:audio_processing_statistics",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:async_packet_socket",
+ "../rtc_base:buffer",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:dscp",
+ "../rtc_base:logging",
+ "../rtc_base:network_route",
+ "../rtc_base:socket",
+ "../rtc_base:stringutils",
+ "../rtc_base/network:sent_packet",
+ "../video/config:encoder_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("codec") {
+ sources = [
+ "base/codec.cc",
+ "base/codec.h",
+ ]
+ deps = [
+ ":media_constants",
+ "../api:field_trials_view",
+ "../api:rtp_parameters",
+ "../api/audio_codecs:audio_codecs_api",
+ "../api/video_codecs:video_codecs_api",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base:stringutils",
+ "../rtc_base/system:rtc_export",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("rtp_utils") {
+if (!build_with_mozilla) {
+ sources = [
+ "base/rtp_utils.cc",
+ "base/rtp_utils.h",
+ ]
+ deps = [
+ ":turn_utils",
+ "../api:array_view",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:async_packet_socket",
+ "../rtc_base:byte_order",
+ "../rtc_base:checks",
+ "../rtc_base:ssl",
+ "../rtc_base/system:rtc_export",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
+}
+}
+
+rtc_library("stream_params") {
+if (!build_with_mozilla) {
+ sources = [
+ "base/stream_params.cc",
+ "base/stream_params.h",
+ ]
+ deps = [
+ ":rid_description",
+ "../api:array_view",
+ "../rtc_base:stringutils",
+ "../rtc_base:unique_id_generator",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ]
+}
+}
+
+rtc_library("media_constants") {
+ sources = [
+ "base/media_constants.cc",
+ "base/media_constants.h",
+ ]
+ deps = [ "../rtc_base/system:rtc_export" ]
+}
+
+rtc_library("turn_utils") {
+if (!build_with_mozilla) {
+ sources = [
+ "base/turn_utils.cc",
+ "base/turn_utils.h",
+ ]
+ deps = [
+ "../api/transport:stun_types",
+ "../rtc_base:byte_order",
+ "../rtc_base/system:rtc_export",
+ ]
+}
+}
+
+rtc_library("rid_description") {
+if (!build_with_mozilla) {
+ sources = [
+ "base/rid_description.cc",
+ "base/rid_description.h",
+ ]
+ deps = []
+}
+}
+
+rtc_library("rtc_simulcast_encoder_adapter") {
+ visibility = [ "*" ]
+ defines = []
+ libs = []
+ sources = [
+ "engine/simulcast_encoder_adapter.cc",
+ "engine/simulcast_encoder_adapter.h",
+ ]
+ deps = [
+ ":rtc_media_base",
+ "../api:fec_controller_api",
+ "../api:field_trials_view",
+ "../api:scoped_refptr",
+ "../api:sequence_checker",
+ "../api/transport:field_trial_based_config",
+ "../api/video:video_codec_constants",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:rtc_software_fallback_wrappers",
+ "../api/video_codecs:video_codecs_api",
+ "../call:video_stream_api",
+ "../common_video",
+ "../media:media_constants",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:video_coding_utility",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base/experiments:encoder_info_settings",
+ "../rtc_base/experiments:rate_control_settings",
+ "../rtc_base/system:no_unique_address",
+ "../rtc_base/system:rtc_export",
+ "../system_wrappers",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+rtc_library("rtc_internal_video_codecs") {
+ visibility = [ "*" ]
+ allow_poison = [ "software_video_codecs" ]
+ defines = []
+ libs = []
+ deps = [
+ ":codec",
+ ":media_constants",
+ ":rtc_media_base",
+ ":rtc_simulcast_encoder_adapter",
+ "../api/video:encoded_image",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:rtc_software_fallback_wrappers",
+ "../api/video_codecs:video_codecs_api",
+ "../api/video_codecs:video_encoder_factory_template",
+ "../api/video_codecs:video_encoder_factory_template_libvpx_vp8_adapter",
+ "../api/video_codecs:video_encoder_factory_template_libvpx_vp9_adapter",
+ "../api/video_codecs:video_encoder_factory_template_open_h264_adapter",
+ "../call:call_interfaces",
+ "../call:video_stream_api",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:webrtc_h264",
+ "../modules/video_coding:webrtc_multiplex",
+ "../modules/video_coding:webrtc_vp8",
+ "../modules/video_coding:webrtc_vp9",
+ "../rtc_base:checks",
+ "../rtc_base:logging",
+ "../rtc_base/system:rtc_export",
+ "../system_wrappers:field_trial",
+ "../test:fake_video_codecs",
+ ]
+ if (build_with_mozilla) {
+ deps -= [
+ "../modules/video_coding:webrtc_multiplex", # See Bug 1820869.
+ "../test:fake_video_codecs",
+ ]
+ }
+
+ if (enable_libaom) {
+ defines += [ "RTC_USE_LIBAOM_AV1_ENCODER" ]
+ deps += [
+ "../api/video_codecs:video_encoder_factory_template_libaom_av1_adapter",
+ ]
+ }
+
+ if (rtc_include_dav1d_in_internal_decoder_factory) {
+ deps += [ "../modules/video_coding/codecs/av1:dav1d_decoder" ]
+ }
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ sources = [
+ "engine/fake_video_codec_factory.cc",
+ "engine/fake_video_codec_factory.h",
+ "engine/internal_decoder_factory.cc",
+ "engine/internal_decoder_factory.h",
+ "engine/internal_encoder_factory.cc",
+ "engine/internal_encoder_factory.h",
+ "engine/multiplex_codec_factory.cc",
+ "engine/multiplex_codec_factory.h",
+ ]
+ if (build_with_mozilla) {
+ sources -= [
+ "engine/fake_video_codec_factory.cc",
+ "engine/fake_video_codec_factory.h",
+ "engine/internal_encoder_factory.cc", # See Bug 1820869.
+ "engine/multiplex_codec_factory.cc", # See Bug 1820869.
+ ]
+ }
+}
+
+rtc_library("rtc_audio_video") {
+ visibility = [ "*" ]
+ allow_poison = [ "audio_codecs" ] # TODO(bugs.webrtc.org/8396): Remove.
+ defines = []
+ libs = []
+ deps = [
+ ":codec",
+ ":media_channel",
+ ":media_channel_impl",
+ ":media_constants",
+ ":rid_description",
+ ":rtc_media_base",
+ ":rtc_media_config",
+ ":rtp_utils",
+ ":stream_params",
+ "../api:array_view",
+ "../api:audio_options_api",
+ "../api:call_api",
+ "../api:field_trials_view",
+ "../api:frame_transformer_interface",
+ "../api:libjingle_peerconnection_api",
+ "../api:make_ref_counted",
+ "../api:media_stream_interface",
+ "../api:priority",
+ "../api:rtc_error",
+ "../api:rtp_headers",
+ "../api:rtp_parameters",
+ "../api:rtp_transceiver_direction",
+ "../api:scoped_refptr",
+ "../api:sequence_checker",
+ "../api:transport_api",
+ "../api/audio:audio_frame_api",
+ "../api/audio:audio_frame_processor",
+ "../api/audio:audio_mixer_api",
+ "../api/audio_codecs:audio_codecs_api",
+ "../api/crypto:frame_decryptor_interface",
+ "../api/crypto:frame_encryptor_interface",
+ "../api/crypto:options",
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/transport:bitrate_settings",
+ "../api/transport:field_trial_based_config",
+ "../api/transport/rtp:rtp_source",
+ "../api/units:data_rate",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../api/video:recordable_encoded_frame",
+ "../api/video:resolution",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_bitrate_allocator_factory",
+ "../api/video:video_codec_constants",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video:video_stream_encoder",
+ "../api/video_codecs:rtc_software_fallback_wrappers",
+ "../api/video_codecs:scalability_mode",
+ "../api/video_codecs:video_codecs_api",
+ "../call",
+ "../call:call_interfaces",
+ "../call:receive_stream_interface",
+ "../call:rtp_interfaces",
+ "../call:video_stream_api",
+ "../common_video",
+ "../common_video:frame_counts",
+ "../modules/async_audio_processing:async_audio_processing",
+ "../modules/audio_device",
+ "../modules/audio_device:audio_device_impl",
+ "../modules/audio_mixer:audio_mixer_impl",
+ "../modules/audio_processing:api",
+ "../modules/audio_processing:audio_processing_statistics",
+ "../modules/audio_processing/aec_dump",
+ "../modules/audio_processing/agc:gain_control_interface",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/video_coding",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding:webrtc_vp9_helpers",
+ "../modules/video_coding/svc:scalability_mode_util",
+ "../rtc_base:audio_format_to_string",
+ "../rtc_base:buffer",
+ "../rtc_base:byte_order",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:dscp",
+ "../rtc_base:event_tracer",
+ "../rtc_base:ignore_wundef",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:network_route",
+ "../rtc_base:race_checker",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:safe_conversions",
+ "../rtc_base:socket",
+ "../rtc_base:ssl",
+ "../rtc_base:stringutils",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base/experiments:field_trial_parser",
+ "../rtc_base/experiments:min_video_bitrate_experiment",
+ "../rtc_base/experiments:normalize_simulcast_size_experiment",
+ "../rtc_base/experiments:rate_control_settings",
+ "../rtc_base/network:sent_packet",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/system:file_wrapper",
+ "../rtc_base/system:no_unique_address",
+ "../rtc_base/system:rtc_export",
+ "../rtc_base/third_party/base64",
+ "../system_wrappers",
+ "../system_wrappers:metrics",
+ "../video/config:encoder_config",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm",
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/functional:bind_front",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+
+ sources = [
+ "engine/adm_helpers.cc",
+ "engine/adm_helpers.h",
+ "engine/null_webrtc_video_engine.h",
+ "engine/payload_type_mapper.cc",
+ "engine/payload_type_mapper.h",
+ "engine/webrtc_media_engine.cc",
+ "engine/webrtc_media_engine.h",
+ "engine/webrtc_video_engine.cc",
+ "engine/webrtc_video_engine.h",
+ "engine/webrtc_voice_engine.cc",
+ "engine/webrtc_voice_engine.h",
+ ]
+
+ public_configs = []
+ if (!build_with_chromium) {
+ public_configs += [ ":rtc_media_defines_config" ]
+ deps += [ "../modules/video_capture:video_capture_internal_impl" ]
+ }
+ if (rtc_enable_protobuf) {
+ deps += [
+ "../modules/audio_coding:ana_config_proto",
+ "../modules/audio_processing/aec_dump:aec_dump_impl",
+ ]
+ } else {
+ deps += [ "../modules/audio_processing/aec_dump:null_aec_dump_factory" ]
+ }
+}
+
+# Heavy but optional helper for unittests and webrtc users who prefer to use
+# defaults factories or do not worry about extra dependencies and binary size.
+rtc_library("rtc_media_engine_defaults") {
+ visibility = [ "*" ]
+ allow_poison = [
+ "audio_codecs",
+ "default_task_queue",
+ "software_video_codecs",
+ ]
+ sources = [
+ "engine/webrtc_media_engine_defaults.cc",
+ "engine/webrtc_media_engine_defaults.h",
+ ]
+ deps = [
+ ":rtc_audio_video",
+ "../api/audio_codecs:builtin_audio_decoder_factory",
+ "../api/audio_codecs:builtin_audio_encoder_factory",
+ "../api/task_queue:default_task_queue_factory",
+ "../api/video:builtin_video_bitrate_allocator_factory",
+ "../api/video_codecs:builtin_video_decoder_factory",
+ "../api/video_codecs:builtin_video_encoder_factory",
+ "../modules/audio_processing:api",
+ "../rtc_base:checks",
+ "../rtc_base/system:rtc_export",
+ ]
+}
+
+rtc_source_set("rtc_data_sctp_transport_internal") {
+ sources = [ "sctp/sctp_transport_internal.h" ]
+ deps = [
+ ":media_channel",
+ "../api:rtc_error",
+ "../api/transport:datagram_transport_interface",
+ "../media:rtc_media_base",
+ "../p2p:rtc_p2p",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:threading",
+ ]
+}
+
+if (rtc_build_dcsctp) {
+ rtc_library("rtc_data_dcsctp_transport") {
+ sources = [
+ "sctp/dcsctp_transport.cc",
+ "sctp/dcsctp_transport.h",
+ ]
+ deps = [
+ ":media_channel",
+ ":rtc_data_sctp_transport_internal",
+ "../api:array_view",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/task_queue:task_queue",
+ "../media:rtc_media_base",
+ "../net/dcsctp/public:factory",
+ "../net/dcsctp/public:socket",
+ "../net/dcsctp/public:types",
+ "../net/dcsctp/public:utils",
+ "../net/dcsctp/timer:task_queue_timeout",
+ "../p2p:rtc_p2p",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:event_tracer",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:random",
+ "../rtc_base:socket",
+ "../rtc_base:stringutils",
+ "../rtc_base:threading",
+ "../rtc_base/containers:flat_map",
+ "../rtc_base/third_party/sigslot:sigslot",
+ "../system_wrappers",
+ ]
+ absl_deps += [
+ "//third_party/abseil-cpp/absl/strings:strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
+
+rtc_library("rtc_data_sctp_transport_factory") {
+ defines = []
+ sources = [
+ "sctp/sctp_transport_factory.cc",
+ "sctp/sctp_transport_factory.h",
+ ]
+ deps = [
+ ":rtc_data_sctp_transport_internal",
+ "../api/transport:sctp_transport_factory_interface",
+ "../rtc_base:threading",
+ "../rtc_base/system:unused",
+ ]
+
+ if (rtc_enable_sctp) {
+ assert(rtc_build_dcsctp, "An SCTP backend is required to enable SCTP")
+ }
+
+ if (rtc_build_dcsctp) {
+ defines += [ "WEBRTC_HAVE_DCSCTP" ]
+ deps += [
+ ":rtc_data_dcsctp_transport",
+ "../system_wrappers",
+ "../system_wrappers:field_trial",
+ ]
+ }
+}
+
+rtc_source_set("rtc_media") {
+ visibility = [ "*" ]
+ allow_poison = [ "audio_codecs" ] # TODO(bugs.webrtc.org/8396): Remove.
+ deps = [ ":rtc_audio_video" ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("rtc_media_tests_utils") {
+ testonly = true
+
+ defines = []
+ deps = [
+ ":codec",
+ ":media_channel",
+ ":media_channel_impl",
+ ":media_constants",
+ ":rtc_audio_video",
+ ":rtc_internal_video_codecs",
+ ":rtc_media",
+ ":rtc_media_base",
+ ":rtc_simulcast_encoder_adapter",
+ ":rtp_utils",
+ ":stream_params",
+ "../api:call_api",
+ "../api:fec_controller_api",
+ "../api:rtp_parameters",
+ "../api:scoped_refptr",
+ "../api/task_queue",
+ "../api/task_queue:pending_task_safety_flag",
+ "../api/transport:field_trial_based_config",
+ "../api/units:timestamp",
+ "../api/video:encoded_image",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:video_codecs_api",
+ "../call:call_interfaces",
+ "../call:mock_rtp_interfaces",
+ "../call:video_stream_api",
+ "../common_video",
+ "../modules/audio_processing",
+ "../modules/audio_processing:api",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding/svc:scalability_mode_util",
+ "../p2p:rtc_p2p",
+ "../rtc_base:buffer",
+ "../rtc_base:byte_order",
+ "../rtc_base:checks",
+ "../rtc_base:copy_on_write_buffer",
+ "../rtc_base:dscp",
+ "../rtc_base:gunit_helpers",
+ "../rtc_base:macromagic",
+ "../rtc_base:network_route",
+ "../rtc_base:rtc_event",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:stringutils",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/third_party/sigslot",
+ "../test:scoped_key_value_config",
+ "../test:test_support",
+ "../video/config:streams_config",
+ "//testing/gtest",
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/functional:any_invocable",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ sources = [
+ "base/fake_frame_source.cc",
+ "base/fake_frame_source.h",
+ "base/fake_media_engine.cc",
+ "base/fake_media_engine.h",
+ "base/fake_network_interface.h",
+ "base/fake_rtp.cc",
+ "base/fake_rtp.h",
+ "base/fake_video_renderer.cc",
+ "base/fake_video_renderer.h",
+ "base/test_utils.cc",
+ "base/test_utils.h",
+ "engine/fake_webrtc_call.cc",
+ "engine/fake_webrtc_call.h",
+ "engine/fake_webrtc_video_engine.cc",
+ "engine/fake_webrtc_video_engine.h",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ rtc_media_unittests_resources = [
+ "../resources/media/captured-320x240-2s-48.frames",
+ "../resources/media/faces.1280x720_P420.yuv",
+ "../resources/media/faces_I400.jpg",
+ "../resources/media/faces_I411.jpg",
+ "../resources/media/faces_I420.jpg",
+ "../resources/media/faces_I422.jpg",
+ "../resources/media/faces_I444.jpg",
+ ]
+
+ if (is_ios) {
+ bundle_data("rtc_media_unittests_bundle_data") {
+ testonly = true
+ sources = rtc_media_unittests_resources
+ outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
+ }
+ }
+
+ rtc_test("rtc_media_unittests") {
+ testonly = true
+
+ defines = []
+ deps = [
+ ":codec",
+ ":media_channel",
+ ":media_constants",
+ ":rtc_audio_video",
+ ":rtc_internal_video_codecs",
+ ":rtc_media",
+ ":rtc_media_base",
+ ":rtc_media_engine_defaults",
+ ":rtc_media_tests_utils",
+ ":rtc_sdp_video_format_utils",
+ ":rtc_simulcast_encoder_adapter",
+ ":rtp_utils",
+ ":stream_params",
+ ":turn_utils",
+ "../api:create_simulcast_test_fixture_api",
+ "../api:field_trials_view",
+ "../api:libjingle_peerconnection_api",
+ "../api:mock_encoder_selector",
+ "../api:mock_video_bitrate_allocator",
+ "../api:mock_video_bitrate_allocator_factory",
+ "../api:mock_video_codec_factory",
+ "../api:mock_video_encoder",
+ "../api:rtp_parameters",
+ "../api:scoped_refptr",
+ "../api:simulcast_test_fixture_api",
+ "../api/audio_codecs:builtin_audio_decoder_factory",
+ "../api/audio_codecs:builtin_audio_encoder_factory",
+ "../api/rtc_event_log",
+ "../api/task_queue",
+ "../api/task_queue:default_task_queue_factory",
+ "../api/test/video:function_video_factory",
+ "../api/transport:field_trial_based_config",
+ "../api/units:time_delta",
+ "../api/units:timestamp",
+ "../api/video:builtin_video_bitrate_allocator_factory",
+ "../api/video:resolution",
+ "../api/video:video_bitrate_allocation",
+ "../api/video:video_codec_constants",
+ "../api/video:video_frame",
+ "../api/video:video_rtp_headers",
+ "../api/video_codecs:video_codecs_api",
+ "../api/video_codecs:video_decoder_factory_template",
+ "../api/video_codecs:video_decoder_factory_template_dav1d_adapter",
+ "../api/video_codecs:video_decoder_factory_template_libvpx_vp8_adapter",
+ "../api/video_codecs:video_decoder_factory_template_libvpx_vp9_adapter",
+ "../api/video_codecs:video_decoder_factory_template_open_h264_adapter",
+ "../api/video_codecs:video_encoder_factory_template",
+ "../api/video_codecs:video_encoder_factory_template_libaom_av1_adapter",
+ "../api/video_codecs:video_encoder_factory_template_libvpx_vp8_adapter",
+ "../api/video_codecs:video_encoder_factory_template_libvpx_vp9_adapter",
+ "../api/video_codecs:video_encoder_factory_template_open_h264_adapter",
+ "../audio",
+ "../call:call_interfaces",
+ "../common_video",
+ "../modules/audio_device:mock_audio_device",
+ "../modules/audio_mixer:audio_mixer_impl",
+ "../modules/audio_processing",
+ "../modules/audio_processing:api",
+ "../modules/audio_processing:mocks",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../modules/video_coding:simulcast_test_fixture_impl",
+ "../modules/video_coding:video_codec_interface",
+ "../modules/video_coding:webrtc_h264",
+ "../modules/video_coding:webrtc_vp8",
+ "../modules/video_coding/svc:scalability_mode_util",
+ "../p2p:p2p_test_utils",
+ "../rtc_base:async_packet_socket",
+ "../rtc_base:byte_order",
+ "../rtc_base:checks",
+ "../rtc_base:gunit_helpers",
+ "../rtc_base:logging",
+ "../rtc_base:macromagic",
+ "../rtc_base:rtc_base_tests_utils",
+ "../rtc_base:rtc_event",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:safe_conversions",
+ "../rtc_base:stringutils",
+ "../rtc_base:threading",
+ "../rtc_base:timeutils",
+ "../rtc_base/experiments:min_video_bitrate_experiment",
+ "../rtc_base/synchronization:mutex",
+ "../rtc_base/third_party/sigslot",
+ "../system_wrappers:field_trial",
+ "../test:audio_codec_mocks",
+ "../test:fake_video_codecs",
+ "../test:field_trial",
+ "../test:rtp_test_utils",
+ "../test:scoped_key_value_config",
+ "../test:test_main",
+ "../test:test_support",
+ "../test:video_test_common",
+ "../test/time_controller",
+ "../video/config:streams_config",
+ ]
+
+ if (enable_libaom) {
+ defines += [ "RTC_USE_LIBAOM_AV1_ENCODER" ]
+ }
+
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ sources = [
+ "base/codec_unittest.cc",
+ "base/media_engine_unittest.cc",
+ "base/rtp_utils_unittest.cc",
+ "base/sdp_video_format_utils_unittest.cc",
+ "base/stream_params_unittest.cc",
+ "base/turn_utils_unittest.cc",
+ "base/video_adapter_unittest.cc",
+ "base/video_broadcaster_unittest.cc",
+ "base/video_common_unittest.cc",
+ "engine/internal_decoder_factory_unittest.cc",
+ "engine/internal_encoder_factory_unittest.cc",
+ "engine/multiplex_codec_factory_unittest.cc",
+ "engine/null_webrtc_video_engine_unittest.cc",
+ "engine/payload_type_mapper_unittest.cc",
+ "engine/simulcast_encoder_adapter_unittest.cc",
+ "engine/webrtc_media_engine_unittest.cc",
+ "engine/webrtc_video_engine_unittest.cc",
+ ]
+
+ # TODO(kthelgason): Reenable this test on iOS.
+ # See bugs.webrtc.org/5569
+ if (!is_ios) {
+ sources += [ "engine/webrtc_voice_engine_unittest.cc" ]
+ }
+
+ if (rtc_opus_support_120ms_ptime) {
+ defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=1" ]
+ } else {
+ defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=0" ]
+ }
+
+ data = rtc_media_unittests_resources
+
+ if (is_android) {
+ deps += [ "//testing/android/native_test:native_test_support" ]
+ shard_timeout = 900
+ }
+
+ if (is_ios) {
+ deps += [ ":rtc_media_unittests_bundle_data" ]
+ }
+
+ if (rtc_build_dcsctp) {
+ sources += [ "sctp/dcsctp_transport_unittest.cc" ]
+ deps += [
+ ":rtc_data_dcsctp_transport",
+ "../net/dcsctp/public:factory",
+ "../net/dcsctp/public:mocks",
+ "../net/dcsctp/public:socket",
+ ]
+ }
+ }
+ }
+}
diff --git a/third_party/libwebrtc/media/DEPS b/third_party/libwebrtc/media/DEPS
new file mode 100644
index 0000000000..7fbbc0fd47
--- /dev/null
+++ b/third_party/libwebrtc/media/DEPS
@@ -0,0 +1,37 @@
+include_rules = [
+ "+call",
+ "+common_video",
+ "+logging/rtc_event_log",
+ "+modules/async_audio_processing",
+ "+modules/audio_coding",
+ "+modules/audio_device",
+ "+modules/audio_mixer",
+ "+modules/audio_processing",
+ "+modules/rtp_rtcp",
+ "+modules/video_capture",
+ "+modules/video_coding",
+ "+modules/video_coding/utility",
+ "+net/dcsctp",
+ "+p2p",
+ "+sound",
+ "+system_wrappers",
+ "+third_party/libyuv",
+]
+
+specific_include_rules = {
+ "win32devicemanager\.cc": [
+ "+third_party/logitech/files/logitechquickcam.h",
+ ],
+ ".*webrtc_video_engine\.h": [
+ "+video/config",
+ ],
+ ".*media_channel\.h": [
+ "+video/config",
+ ],
+ ".*webrtc_video_engine_unittest\.cc": [
+ "+video/config",
+ ],
+ ".*fake_webrtc_call\.cc": [
+ "+video/config",
+ ],
+}
diff --git a/third_party/libwebrtc/media/OWNERS b/third_party/libwebrtc/media/OWNERS
new file mode 100644
index 0000000000..5d8ec5aba6
--- /dev/null
+++ b/third_party/libwebrtc/media/OWNERS
@@ -0,0 +1,13 @@
+brandtr@webrtc.org
+ilnik@webrtc.org
+sprang@webrtc.org
+magjed@webrtc.org
+mflodman@webrtc.org
+perkj@webrtc.org
+
+# Audio-related changes:
+peah@webrtc.org
+saza@webrtc.org
+
+# Datachannel-related changes:
+orphis@webrtc.org
diff --git a/third_party/libwebrtc/media/base/adapted_video_track_source.cc b/third_party/libwebrtc/media/base/adapted_video_track_source.cc
new file mode 100644
index 0000000000..816ada5f16
--- /dev/null
+++ b/third_party/libwebrtc/media/base/adapted_video_track_source.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/adapted_video_track_source.h"
+
+#include "api/scoped_refptr.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/time_utils.h"
+
+namespace rtc {
+
+AdaptedVideoTrackSource::AdaptedVideoTrackSource() = default;
+
+AdaptedVideoTrackSource::AdaptedVideoTrackSource(int required_alignment)
+ : video_adapter_(required_alignment) {}
+
+AdaptedVideoTrackSource::~AdaptedVideoTrackSource() = default;
+
+bool AdaptedVideoTrackSource::GetStats(Stats* stats) {
+ webrtc::MutexLock lock(&stats_mutex_);
+
+ if (!stats_) {
+ return false;
+ }
+
+ *stats = *stats_;
+ return true;
+}
+
+void AdaptedVideoTrackSource::OnFrame(const webrtc::VideoFrame& frame) {
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
+ frame.video_frame_buffer());
+ /* Note that this is a "best effort" approach to
+ wants.rotation_applied; apply_rotation_ can change from false to
+ true between the check of apply_rotation() and the call to
+ broadcaster_.OnFrame(), in which case we generate a frame with
+ pending rotation despite some sink with wants.rotation_applied ==
+ true was just added. The VideoBroadcaster enforces
+ synchronization for us in this case, by not passing the frame on
+ to sinks which don't want it. */
+ if (apply_rotation() && frame.rotation() != webrtc::kVideoRotation_0 &&
+ buffer->type() == webrtc::VideoFrameBuffer::Type::kI420) {
+ /* Apply pending rotation. */
+ webrtc::VideoFrame rotated_frame(frame);
+ rotated_frame.set_video_frame_buffer(
+ webrtc::I420Buffer::Rotate(*buffer->GetI420(), frame.rotation()));
+ rotated_frame.set_rotation(webrtc::kVideoRotation_0);
+ broadcaster_.OnFrame(rotated_frame);
+ } else {
+ broadcaster_.OnFrame(frame);
+ }
+}
+
+void AdaptedVideoTrackSource::OnFrameDropped() {
+ broadcaster_.OnDiscardedFrame();
+}
+
+void AdaptedVideoTrackSource::AddOrUpdateSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ broadcaster_.AddOrUpdateSink(sink, wants);
+ OnSinkWantsChanged(broadcaster_.wants());
+}
+
+void AdaptedVideoTrackSource::RemoveSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ broadcaster_.RemoveSink(sink);
+ OnSinkWantsChanged(broadcaster_.wants());
+}
+
+bool AdaptedVideoTrackSource::apply_rotation() {
+ return broadcaster_.wants().rotation_applied;
+}
+
+void AdaptedVideoTrackSource::OnSinkWantsChanged(
+ const rtc::VideoSinkWants& wants) {
+ video_adapter_.OnSinkWants(wants);
+}
+
+bool AdaptedVideoTrackSource::AdaptFrame(int width,
+ int height,
+ int64_t time_us,
+ int* out_width,
+ int* out_height,
+ int* crop_width,
+ int* crop_height,
+ int* crop_x,
+ int* crop_y) {
+ {
+ webrtc::MutexLock lock(&stats_mutex_);
+ stats_ = Stats{width, height};
+ }
+
+ if (!broadcaster_.frame_wanted()) {
+ return false;
+ }
+
+ if (!video_adapter_.AdaptFrameResolution(
+ width, height, time_us * rtc::kNumNanosecsPerMicrosec, crop_width,
+ crop_height, out_width, out_height)) {
+ broadcaster_.OnDiscardedFrame();
+ // VideoAdapter dropped the frame.
+ return false;
+ }
+
+ *crop_x = (width - *crop_width) / 2;
+ *crop_y = (height - *crop_height) / 2;
+ return true;
+}
+
+void AdaptedVideoTrackSource::ProcessConstraints(
+ const webrtc::VideoTrackSourceConstraints& constraints) {
+ broadcaster_.ProcessConstraints(constraints);
+}
+
+} // namespace rtc
diff --git a/third_party/libwebrtc/media/base/adapted_video_track_source.h b/third_party/libwebrtc/media/base/adapted_video_track_source.h
new file mode 100644
index 0000000000..1c3e0b68d3
--- /dev/null
+++ b/third_party/libwebrtc/media/base/adapted_video_track_source.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_
+#define MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_
+
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/media_stream_interface.h"
+#include "api/notifier.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "media/base/video_adapter.h"
+#include "media/base/video_broadcaster.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/rtc_export.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace rtc {
+
+// Base class for sources which needs video adaptation, e.g., video
+// capture sources. Sinks must be added and removed on one and only
+// one thread, while AdaptFrame and OnFrame may be called on any
+// thread.
+class RTC_EXPORT AdaptedVideoTrackSource
+ : public webrtc::Notifier<webrtc::VideoTrackSourceInterface> {
+ public:
+ AdaptedVideoTrackSource();
+ ~AdaptedVideoTrackSource() override;
+
+ protected:
+ // Allows derived classes to initialize `video_adapter_` with a custom
+ // alignment.
+ explicit AdaptedVideoTrackSource(int required_alignment);
+ // Checks the apply_rotation() flag. If the frame needs rotation, and it is a
+ // plain memory frame, it is rotated. Subclasses producing native frames must
+ // handle apply_rotation() themselves.
+ void OnFrame(const webrtc::VideoFrame& frame);
+ // Indication from source that a frame was dropped.
+ void OnFrameDropped();
+
+ // Reports the appropriate frame size after adaptation. Returns true
+ // if a frame is wanted. Returns false if there are no interested
+ // sinks, or if the VideoAdapter decides to drop the frame.
+ bool AdaptFrame(int width,
+ int height,
+ int64_t time_us,
+ int* out_width,
+ int* out_height,
+ int* crop_width,
+ int* crop_height,
+ int* crop_x,
+ int* crop_y);
+
+ // Returns the current value of the apply_rotation flag, derived
+ // from the VideoSinkWants of registered sinks. The value is derived
+ // from sinks' wants, in AddOrUpdateSink and RemoveSink. Beware that
+ // when using this method from a different thread, the value may
+ // become stale before it is used.
+ bool apply_rotation();
+
+ cricket::VideoAdapter* video_adapter() { return &video_adapter_; }
+
+ private:
+ // Implements rtc::VideoSourceInterface.
+ void AddOrUpdateSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override;
+ void RemoveSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+
+ // Part of VideoTrackSourceInterface.
+ bool GetStats(Stats* stats) override;
+
+ void OnSinkWantsChanged(const rtc::VideoSinkWants& wants);
+
+ // Encoded sinks not implemented for AdaptedVideoTrackSource.
+ bool SupportsEncodedOutput() const override { return false; }
+ void GenerateKeyFrame() override {}
+ void AddEncodedSink(
+ rtc::VideoSinkInterface<webrtc::RecordableEncodedFrame>* sink) override {}
+ void RemoveEncodedSink(
+ rtc::VideoSinkInterface<webrtc::RecordableEncodedFrame>* sink) override {}
+ void ProcessConstraints(
+ const webrtc::VideoTrackSourceConstraints& constraints) override;
+
+ cricket::VideoAdapter video_adapter_;
+
+ webrtc::Mutex stats_mutex_;
+ absl::optional<Stats> stats_ RTC_GUARDED_BY(stats_mutex_);
+
+ VideoBroadcaster broadcaster_;
+};
+
+} // namespace rtc
+
+#endif // MEDIA_BASE_ADAPTED_VIDEO_TRACK_SOURCE_H_
diff --git a/third_party/libwebrtc/media/base/audio_source.h b/third_party/libwebrtc/media/base/audio_source.h
new file mode 100644
index 0000000000..51fe0e13e1
--- /dev/null
+++ b/third_party/libwebrtc/media/base/audio_source.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_AUDIO_SOURCE_H_
+#define MEDIA_BASE_AUDIO_SOURCE_H_
+
+#include <cstddef>
+
+#include "absl/types/optional.h"
+
+namespace cricket {
+
+// Abstract interface for providing the audio data.
+// TODO(deadbeef): Rename this to AudioSourceInterface, and rename
+// webrtc::AudioSourceInterface to AudioTrackSourceInterface.
+class AudioSource {
+ public:
+ class Sink {
+ public:
+ // Callback to receive data from the AudioSource.
+ virtual void OnData(
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ absl::optional<int64_t> absolute_capture_timestamp_ms) = 0;
+
+ // Called when the AudioSource is going away.
+ virtual void OnClose() = 0;
+
+ // Returns the number of channels encoded by the sink. This can be less than
+ // the number_of_channels if down-mixing occur. A value of -1 means an
+ // unknown number.
+ virtual int NumPreferredChannels() const = 0;
+
+ protected:
+ virtual ~Sink() {}
+ };
+
+ // Sets a sink to the AudioSource. There can be only one sink connected
+ // to the source at a time.
+ virtual void SetSink(Sink* sink) = 0;
+
+ protected:
+ virtual ~AudioSource() {}
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_AUDIO_SOURCE_H_
diff --git a/third_party/libwebrtc/media/base/codec.cc b/third_party/libwebrtc/media/base/codec.cc
new file mode 100644
index 0000000000..b819707702
--- /dev/null
+++ b/third_party/libwebrtc/media/base/codec.cc
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/codec.h"
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/video_codecs/av1_profile.h"
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "media/base/media_constants.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_encode.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace cricket {
+namespace {
+
+std::string GetH264PacketizationModeOrDefault(const CodecParameterMap& params) {
+ auto it = params.find(kH264FmtpPacketizationMode);
+ if (it != params.end()) {
+ return it->second;
+ }
+ // If packetization-mode is not present, default to "0".
+ // https://tools.ietf.org/html/rfc6184#section-6.2
+ return "0";
+}
+
+bool IsSameH264PacketizationMode(const CodecParameterMap& left,
+ const CodecParameterMap& right) {
+ return GetH264PacketizationModeOrDefault(left) ==
+ GetH264PacketizationModeOrDefault(right);
+}
+
+// Some (video) codecs are actually families of codecs and rely on parameters
+// to distinguish different incompatible family members.
+bool IsSameCodecSpecific(const std::string& name1,
+ const CodecParameterMap& params1,
+ const std::string& name2,
+ const CodecParameterMap& params2) {
+ // The names might not necessarily match, so check both.
+ auto either_name_matches = [&](const std::string name) {
+ return absl::EqualsIgnoreCase(name, name1) ||
+ absl::EqualsIgnoreCase(name, name2);
+ };
+ if (either_name_matches(kH264CodecName))
+ return webrtc::H264IsSameProfile(params1, params2) &&
+ IsSameH264PacketizationMode(params1, params2);
+ if (either_name_matches(kVp9CodecName))
+ return webrtc::VP9IsSameProfile(params1, params2);
+ if (either_name_matches(kAv1CodecName))
+ return webrtc::AV1IsSameProfile(params1, params2);
+ return true;
+}
+
+} // namespace
+
+FeedbackParams::FeedbackParams() = default;
+FeedbackParams::~FeedbackParams() = default;
+
+bool FeedbackParam::operator==(const FeedbackParam& other) const {
+ return absl::EqualsIgnoreCase(other.id(), id()) &&
+ absl::EqualsIgnoreCase(other.param(), param());
+}
+
+bool FeedbackParams::operator==(const FeedbackParams& other) const {
+ return params_ == other.params_;
+}
+
+bool FeedbackParams::Has(const FeedbackParam& param) const {
+ return absl::c_linear_search(params_, param);
+}
+
+void FeedbackParams::Add(const FeedbackParam& param) {
+ if (param.id().empty()) {
+ return;
+ }
+ if (Has(param)) {
+ // Param already in `this`.
+ return;
+ }
+ params_.push_back(param);
+ RTC_CHECK(!HasDuplicateEntries());
+}
+
+void FeedbackParams::Intersect(const FeedbackParams& from) {
+ std::vector<FeedbackParam>::iterator iter_to = params_.begin();
+ while (iter_to != params_.end()) {
+ if (!from.Has(*iter_to)) {
+ iter_to = params_.erase(iter_to);
+ } else {
+ ++iter_to;
+ }
+ }
+}
+
+bool FeedbackParams::HasDuplicateEntries() const {
+ for (std::vector<FeedbackParam>::const_iterator iter = params_.begin();
+ iter != params_.end(); ++iter) {
+ for (std::vector<FeedbackParam>::const_iterator found = iter + 1;
+ found != params_.end(); ++found) {
+ if (*found == *iter) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+Codec::Codec(Type type, int id, const std::string& name, int clockrate)
+ : Codec(type, id, name, clockrate, 0) {}
+Codec::Codec(Type type,
+ int id,
+ const std::string& name,
+ int clockrate,
+ size_t channels)
+ : type(type),
+ id(id),
+ name(name),
+ clockrate(clockrate),
+ bitrate(0),
+ channels(channels) {}
+
+Codec::Codec(Type type) : Codec(type, 0, "", 0) {}
+
+Codec::Codec(const webrtc::SdpAudioFormat& c)
+ : Codec(Type::kAudio, 0, c.name, c.clockrate_hz, c.num_channels) {
+ params = c.parameters;
+}
+
+Codec::Codec(const webrtc::SdpVideoFormat& c)
+ : Codec(Type::kVideo, 0, c.name, kVideoCodecClockrate) {
+ params = c.parameters;
+ scalability_modes = c.scalability_modes;
+}
+
+Codec::Codec(const Codec& c) = default;
+Codec::Codec(Codec&& c) = default;
+Codec::~Codec() = default;
+Codec& Codec::operator=(const Codec& c) = default;
+Codec& Codec::operator=(Codec&& c) = default;
+
+bool Codec::operator==(const Codec& c) const {
+ return type == c.type && this->id == c.id && // id is reserved in objective-c
+ name == c.name && clockrate == c.clockrate && params == c.params &&
+ feedback_params == c.feedback_params &&
+ (type == Type::kAudio
+ ? (bitrate == c.bitrate && channels == c.channels)
+ : (packetization == c.packetization));
+}
+
+bool Codec::Matches(const Codec& codec,
+ const webrtc::FieldTrialsView* field_trials) const {
+ // Match the codec id/name based on the typical static/dynamic name rules.
+ // Matching is case-insensitive.
+
+ // We support the ranges [96, 127] and more recently [35, 65].
+ // https://www.iana.org/assignments/rtp-parameters/rtp-parameters.xhtml#rtp-parameters-1
+ // Within those ranges we match by codec name, outside by codec id.
+ // Since no codecs are assigned an id in the range [66, 95] by us, these will
+ // never match.
+ const int kLowerDynamicRangeMin = 35;
+ const int kLowerDynamicRangeMax = 65;
+ const int kUpperDynamicRangeMin = 96;
+ const int kUpperDynamicRangeMax = 127;
+ const bool is_id_in_dynamic_range =
+ (id >= kLowerDynamicRangeMin && id <= kLowerDynamicRangeMax) ||
+ (id >= kUpperDynamicRangeMin && id <= kUpperDynamicRangeMax);
+ const bool is_codec_id_in_dynamic_range =
+ (codec.id >= kLowerDynamicRangeMin &&
+ codec.id <= kLowerDynamicRangeMax) ||
+ (codec.id >= kUpperDynamicRangeMin && codec.id <= kUpperDynamicRangeMax);
+ bool matches_id = is_id_in_dynamic_range && is_codec_id_in_dynamic_range
+ ? (absl::EqualsIgnoreCase(name, codec.name))
+ : (id == codec.id);
+
+ auto matches_type_specific = [&]() {
+ switch (type) {
+ case Type::kAudio:
+ // If a nonzero clockrate is specified, it must match the actual
+ // clockrate. If a nonzero bitrate is specified, it must match the
+ // actual bitrate, unless the codec is VBR (0), where we just force the
+ // supplied value. The number of channels must match exactly, with the
+ // exception that channels=0 is treated synonymously as channels=1, per
+ // RFC 4566 section 6: " [The channels] parameter is OPTIONAL and may be
+ // omitted if the number of channels is one."
+ // Preference is ignored.
+ // TODO(juberti): Treat a zero clockrate as 8000Hz, the RTP default
+ // clockrate.
+ return ((codec.clockrate == 0 /*&& clockrate == 8000*/) ||
+ clockrate == codec.clockrate) &&
+ (codec.bitrate == 0 || bitrate <= 0 ||
+ bitrate == codec.bitrate) &&
+ ((codec.channels < 2 && channels < 2) ||
+ channels == codec.channels);
+
+ case Type::kVideo:
+ return IsSameCodecSpecific(name, params, codec.name, codec.params);
+ }
+ };
+
+ return matches_id && matches_type_specific();
+}
+
+bool Codec::MatchesRtpCodec(const webrtc::RtpCodec& codec_capability) const {
+ webrtc::RtpCodecParameters codec_parameters = ToCodecParameters();
+
+ return codec_parameters.name == codec_capability.name &&
+ codec_parameters.kind == codec_capability.kind &&
+ (codec_parameters.name == cricket::kRtxCodecName ||
+ (codec_parameters.num_channels == codec_capability.num_channels &&
+ codec_parameters.clock_rate == codec_capability.clock_rate &&
+ codec_parameters.parameters == codec_capability.parameters));
+}
+
+bool Codec::GetParam(const std::string& name, std::string* out) const {
+ CodecParameterMap::const_iterator iter = params.find(name);
+ if (iter == params.end())
+ return false;
+ *out = iter->second;
+ return true;
+}
+
+bool Codec::GetParam(const std::string& name, int* out) const {
+ CodecParameterMap::const_iterator iter = params.find(name);
+ if (iter == params.end())
+ return false;
+ return rtc::FromString(iter->second, out);
+}
+
+void Codec::SetParam(const std::string& name, const std::string& value) {
+ params[name] = value;
+}
+
+void Codec::SetParam(const std::string& name, int value) {
+ params[name] = rtc::ToString(value);
+}
+
+bool Codec::RemoveParam(const std::string& name) {
+ return params.erase(name) == 1;
+}
+
+void Codec::AddFeedbackParam(const FeedbackParam& param) {
+ feedback_params.Add(param);
+}
+
+bool Codec::HasFeedbackParam(const FeedbackParam& param) const {
+ return feedback_params.Has(param);
+}
+
+void Codec::IntersectFeedbackParams(const Codec& other) {
+ feedback_params.Intersect(other.feedback_params);
+}
+
+webrtc::RtpCodecParameters Codec::ToCodecParameters() const {
+ webrtc::RtpCodecParameters codec_params;
+ codec_params.payload_type = id;
+ codec_params.name = name;
+ codec_params.clock_rate = clockrate;
+ codec_params.parameters.insert(params.begin(), params.end());
+
+ switch (type) {
+ case Type::kAudio: {
+ codec_params.num_channels = static_cast<int>(channels);
+ codec_params.kind = MEDIA_TYPE_AUDIO;
+ break;
+ }
+ case Type::kVideo: {
+ codec_params.kind = MEDIA_TYPE_VIDEO;
+ break;
+ }
+ }
+
+ return codec_params;
+}
+
+bool Codec::IsMediaCodec() const {
+ return !IsResiliencyCodec();
+}
+
+bool Codec::IsResiliencyCodec() const {
+ return GetResiliencyType() != ResiliencyType::kNone;
+}
+
+Codec::ResiliencyType Codec::GetResiliencyType() const {
+ if (absl::EqualsIgnoreCase(name, kRedCodecName)) {
+ return ResiliencyType::kRed;
+ }
+ if (absl::EqualsIgnoreCase(name, kUlpfecCodecName)) {
+ return ResiliencyType::kUlpfec;
+ }
+ if (absl::EqualsIgnoreCase(name, kFlexfecCodecName)) {
+ return ResiliencyType::kFlexfec;
+ }
+ if (absl::EqualsIgnoreCase(name, kRtxCodecName)) {
+ return ResiliencyType::kRtx;
+ }
+ return ResiliencyType::kNone;
+}
+
+bool Codec::ValidateCodecFormat() const {
+ if (id < 0 || id > 127) {
+ RTC_LOG(LS_ERROR) << "Codec with invalid payload type: " << ToString();
+ return false;
+ }
+ if (IsResiliencyCodec()) {
+ return true;
+ }
+
+ int min_bitrate = -1;
+ int max_bitrate = -1;
+ if (GetParam(kCodecParamMinBitrate, &min_bitrate) &&
+ GetParam(kCodecParamMaxBitrate, &max_bitrate)) {
+ if (max_bitrate < min_bitrate) {
+ RTC_LOG(LS_ERROR) << "Codec with max < min bitrate: " << ToString();
+ return false;
+ }
+ }
+ return true;
+}
+
+std::string Codec::ToString() const {
+ char buf[256];
+
+ rtc::SimpleStringBuilder sb(buf);
+ switch (type) {
+ case Type::kAudio: {
+ sb << "AudioCodec[" << id << ":" << name << ":" << clockrate << ":"
+ << bitrate << ":" << channels << "]";
+ break;
+ }
+ case Type::kVideo: {
+ sb << "VideoCodec[" << id << ":" << name;
+ if (packetization.has_value()) {
+ sb << ":" << *packetization;
+ }
+ sb << "]";
+ break;
+ }
+ }
+ return sb.str();
+}
+
+Codec CreateAudioRtxCodec(int rtx_payload_type, int associated_payload_type) {
+ Codec rtx_codec = CreateAudioCodec(rtx_payload_type, kRtxCodecName, 0, 1);
+ rtx_codec.SetParam(kCodecParamAssociatedPayloadType, associated_payload_type);
+ return rtx_codec;
+}
+
+Codec CreateVideoRtxCodec(int rtx_payload_type, int associated_payload_type) {
+ Codec rtx_codec = CreateVideoCodec(rtx_payload_type, kRtxCodecName);
+ rtx_codec.SetParam(kCodecParamAssociatedPayloadType, associated_payload_type);
+ return rtx_codec;
+}
+
+const Codec* FindCodecById(const std::vector<Codec>& codecs, int payload_type) {
+ for (const auto& codec : codecs) {
+ if (codec.id == payload_type)
+ return &codec;
+ }
+ return nullptr;
+}
+
+bool HasLntf(const Codec& codec) {
+ return codec.HasFeedbackParam(
+ FeedbackParam(kRtcpFbParamLntf, kParamValueEmpty));
+}
+
+bool HasNack(const Codec& codec) {
+ return codec.HasFeedbackParam(
+ FeedbackParam(kRtcpFbParamNack, kParamValueEmpty));
+}
+
+bool HasRemb(const Codec& codec) {
+ return codec.HasFeedbackParam(
+ FeedbackParam(kRtcpFbParamRemb, kParamValueEmpty));
+}
+
+bool HasRrtr(const Codec& codec) {
+ return codec.HasFeedbackParam(
+ FeedbackParam(kRtcpFbParamRrtr, kParamValueEmpty));
+}
+
+bool HasTransportCc(const Codec& codec) {
+ return codec.HasFeedbackParam(
+ FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
+}
+
+const Codec* FindMatchingVideoCodec(const std::vector<Codec>& supported_codecs,
+ const Codec& codec) {
+ webrtc::SdpVideoFormat sdp_video_format{codec.name, codec.params};
+ for (const Codec& supported_codec : supported_codecs) {
+ if (sdp_video_format.IsSameCodec(
+ {supported_codec.name, supported_codec.params})) {
+ return &supported_codec;
+ }
+ }
+ return nullptr;
+}
+
+std::vector<const Codec*> FindAllMatchingCodecs(
+ const std::vector<Codec>& supported_codecs,
+ const Codec& codec) {
+ std::vector<const Codec*> result;
+ webrtc::SdpVideoFormat sdp(codec.name, codec.params);
+ for (const Codec& supported_codec : supported_codecs) {
+ if (sdp.IsSameCodec({supported_codec.name, supported_codec.params})) {
+ result.push_back(&supported_codec);
+ }
+ }
+ return result;
+}
+
+// If a decoder supports any H264 profile, it is implicitly assumed to also
+// support constrained base line even though it's not explicitly listed.
+void AddH264ConstrainedBaselineProfileToSupportedFormats(
+ std::vector<webrtc::SdpVideoFormat>* supported_formats) {
+ std::vector<webrtc::SdpVideoFormat> cbr_supported_formats;
+
+ // For any H264 supported profile, add the corresponding constrained baseline
+ // profile.
+ for (auto it = supported_formats->cbegin(); it != supported_formats->cend();
+ ++it) {
+ if (it->name == cricket::kH264CodecName) {
+ const absl::optional<webrtc::H264ProfileLevelId> profile_level_id =
+ webrtc::ParseSdpForH264ProfileLevelId(it->parameters);
+ if (profile_level_id &&
+ profile_level_id->profile !=
+ webrtc::H264Profile::kProfileConstrainedBaseline) {
+ webrtc::SdpVideoFormat cbp_format = *it;
+ webrtc::H264ProfileLevelId cbp_profile = *profile_level_id;
+ cbp_profile.profile = webrtc::H264Profile::kProfileConstrainedBaseline;
+ cbp_format.parameters[cricket::kH264FmtpProfileLevelId] =
+ *webrtc::H264ProfileLevelIdToString(cbp_profile);
+ cbr_supported_formats.push_back(cbp_format);
+ }
+ }
+ }
+
+ size_t original_size = supported_formats->size();
+ // ...if it's not already in the list.
+ std::copy_if(cbr_supported_formats.begin(), cbr_supported_formats.end(),
+ std::back_inserter(*supported_formats),
+ [supported_formats](const webrtc::SdpVideoFormat& format) {
+ return !format.IsCodecInList(*supported_formats);
+ });
+
+ if (supported_formats->size() > original_size) {
+ RTC_LOG(LS_WARNING) << "Explicitly added H264 constrained baseline to list "
+ "of supported formats.";
+ }
+}
+
+Codec CreateAudioCodec(int id,
+ const std::string& name,
+ int clockrate,
+ size_t channels) {
+ return Codec(Codec::Type::kAudio, id, name, clockrate, channels);
+}
+
+Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c) {
+ return Codec(c);
+}
+
+Codec CreateVideoCodec(const std::string& name) {
+ return CreateVideoCodec(0, name);
+}
+
+Codec CreateVideoCodec(int id, const std::string& name) {
+ Codec c(Codec::Type::kVideo, id, name, kVideoCodecClockrate);
+ if (absl::EqualsIgnoreCase(kH264CodecName, name)) {
+ // This default is set for all H.264 codecs created because
+ // that was the default before packetization mode support was added.
+ // TODO(hta): Move this to the places that create VideoCodecs from
+ // SDP or from knowledge of implementation capabilities.
+ c.SetParam(kH264FmtpPacketizationMode, "1");
+ }
+ return c;
+}
+
+Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c) {
+ return Codec(c);
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/codec.h b/third_party/libwebrtc/media/base/codec.h
new file mode 100644
index 0000000000..228acad07a
--- /dev/null
+++ b/third_party/libwebrtc/media/base/codec.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_CODEC_H_
+#define MEDIA_BASE_CODEC_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_parameters.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/media_constants.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace cricket {
+
+using CodecParameterMap = std::map<std::string, std::string>;
+
+class FeedbackParam {
+ public:
+ FeedbackParam() = default;
+ FeedbackParam(absl::string_view id, const std::string& param)
+ : id_(id), param_(param) {}
+ explicit FeedbackParam(absl::string_view id)
+ : id_(id), param_(kParamValueEmpty) {}
+
+ bool operator==(const FeedbackParam& other) const;
+ bool operator!=(const FeedbackParam& c) const { return !(*this == c); }
+
+ const std::string& id() const { return id_; }
+ const std::string& param() const { return param_; }
+
+ private:
+ std::string id_; // e.g. "nack", "ccm"
+ std::string param_; // e.g. "", "rpsi", "fir"
+};
+
+class FeedbackParams {
+ public:
+ FeedbackParams();
+ ~FeedbackParams();
+ bool operator==(const FeedbackParams& other) const;
+ bool operator!=(const FeedbackParams& c) const { return !(*this == c); }
+
+ bool Has(const FeedbackParam& param) const;
+ void Add(const FeedbackParam& param);
+
+ void Intersect(const FeedbackParams& from);
+
+ const std::vector<FeedbackParam>& params() const { return params_; }
+
+ private:
+ bool HasDuplicateEntries() const;
+
+ std::vector<FeedbackParam> params_;
+};
+
+struct RTC_EXPORT Codec {
+ enum class Type {
+ kAudio,
+ kVideo,
+ };
+
+ enum class ResiliencyType {
+ kNone,
+ kRed,
+ kUlpfec,
+ kFlexfec,
+ kRtx,
+ };
+
+ Type type;
+ int id;
+ std::string name;
+ int clockrate;
+
+ // Audio only
+ // Can be used to override the target bitrate in the encoder.
+ // TODO(orphis): Remove in favor of alternative APIs
+ int bitrate;
+ size_t channels;
+
+ // Video only
+ absl::optional<std::string> packetization;
+ absl::InlinedVector<webrtc::ScalabilityMode, webrtc::kScalabilityModeCount>
+ scalability_modes;
+
+ // Non key-value parameters such as the telephone-event "0‐15" are
+ // represented using an empty string as key, i.e. {"": "0-15"}.
+ CodecParameterMap params;
+ FeedbackParams feedback_params;
+
+ Codec(const Codec& c);
+ Codec(Codec&& c);
+
+ virtual ~Codec();
+
+ // Indicates if this codec is compatible with the specified codec by
+ // checking the assigned id and profile values for the relevant video codecs.
+ // H264 levels are not compared.
+ bool Matches(const Codec& codec,
+ const webrtc::FieldTrialsView* field_trials = nullptr) const;
+ bool MatchesRtpCodec(const webrtc::RtpCodec& capability) const;
+
+ // Find the parameter for `name` and write the value to `out`.
+ bool GetParam(const std::string& name, std::string* out) const;
+ bool GetParam(const std::string& name, int* out) const;
+
+ void SetParam(const std::string& name, const std::string& value);
+ void SetParam(const std::string& name, int value);
+
+ // It is safe to input a non-existent parameter.
+ // Returns true if the parameter existed, false if it did not exist.
+ bool RemoveParam(const std::string& name);
+
+ bool HasFeedbackParam(const FeedbackParam& param) const;
+ void AddFeedbackParam(const FeedbackParam& param);
+
+ // Filter `this` feedbacks params such that only those shared by both `this`
+ // and `other` are kept.
+ void IntersectFeedbackParams(const Codec& other);
+
+ virtual webrtc::RtpCodecParameters ToCodecParameters() const;
+
+ // The codec represent an actual media codec, and not a resiliency codec.
+ bool IsMediaCodec() const;
+ // The codec represent a resiliency codec such as RED, RTX or FEC variants.
+ bool IsResiliencyCodec() const;
+ ResiliencyType GetResiliencyType() const;
+
+ // Validates a VideoCodec's payload type, dimensions and bitrates etc. If they
+ // don't make sense (such as max < min bitrate), and error is logged and
+ // ValidateCodecFormat returns false.
+ bool ValidateCodecFormat() const;
+
+ std::string ToString() const;
+
+ Codec& operator=(const Codec& c);
+ Codec& operator=(Codec&& c);
+
+ bool operator==(const Codec& c) const;
+
+ bool operator!=(const Codec& c) const { return !(*this == c); }
+
+ protected:
+ // Creates an empty codec.
+ explicit Codec(Type type);
+ // Creates a codec with the given parameters.
+ Codec(Type type, int id, const std::string& name, int clockrate);
+ Codec(Type type,
+ int id,
+ const std::string& name,
+ int clockrate,
+ size_t channels);
+
+ explicit Codec(const webrtc::SdpAudioFormat& c);
+ explicit Codec(const webrtc::SdpVideoFormat& c);
+
+ friend Codec CreateAudioCodec(int id,
+ const std::string& name,
+ int clockrate,
+ size_t channels);
+ friend Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c);
+ friend Codec CreateAudioRtxCodec(int rtx_payload_type,
+ int associated_payload_type);
+ friend Codec CreateVideoCodec(int id, const std::string& name);
+ friend Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c);
+ friend Codec CreateVideoRtxCodec(int rtx_payload_type,
+ int associated_payload_type);
+};
+
+// TODO(webrtc:15214): Compatibility names, to be migrated away and removed.
+using VideoCodec = Codec;
+using AudioCodec = Codec;
+
+using VideoCodecs = std::vector<Codec>;
+using AudioCodecs = std::vector<Codec>;
+
+Codec CreateAudioCodec(int id,
+ const std::string& name,
+ int clockrate,
+ size_t channels);
+Codec CreateAudioCodec(const webrtc::SdpAudioFormat& c);
+Codec CreateAudioRtxCodec(int rtx_payload_type, int associated_payload_type);
+Codec CreateVideoCodec(const std::string& name);
+Codec CreateVideoCodec(int id, const std::string& name);
+Codec CreateVideoCodec(const webrtc::SdpVideoFormat& c);
+Codec CreateVideoRtxCodec(int rtx_payload_type, int associated_payload_type);
+
+// Get the codec setting associated with `payload_type`. If there
+// is no codec associated with that payload type it returns nullptr.
+const Codec* FindCodecById(const std::vector<Codec>& codecs, int payload_type);
+
+bool HasLntf(const Codec& codec);
+bool HasNack(const Codec& codec);
+bool HasRemb(const Codec& codec);
+bool HasRrtr(const Codec& codec);
+bool HasTransportCc(const Codec& codec);
+
+// Returns the first codec in `supported_codecs` that matches `codec`, or
+// nullptr if no codec matches.
+const Codec* FindMatchingVideoCodec(const std::vector<Codec>& supported_codecs,
+ const Codec& codec);
+
+// Returns all codecs in `supported_codecs` that matches `codec`.
+std::vector<const Codec*> FindAllMatchingCodecs(
+ const std::vector<Codec>& supported_codecs,
+ const Codec& codec);
+
+RTC_EXPORT void AddH264ConstrainedBaselineProfileToSupportedFormats(
+ std::vector<webrtc::SdpVideoFormat>* supported_formats);
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_CODEC_H_
diff --git a/third_party/libwebrtc/media/base/codec_unittest.cc b/third_party/libwebrtc/media/base/codec_unittest.cc
new file mode 100644
index 0000000000..eb34530c38
--- /dev/null
+++ b/third_party/libwebrtc/media/base/codec_unittest.cc
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2009 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/codec.h"
+
+#include <tuple>
+
+#include "api/video_codecs/av1_profile.h"
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "rtc_base/gunit.h"
+
+using cricket::AudioCodec;
+using cricket::Codec;
+using cricket::FeedbackParam;
+using cricket::kCodecParamAssociatedPayloadType;
+using cricket::kCodecParamMaxBitrate;
+using cricket::kCodecParamMinBitrate;
+using cricket::VideoCodec;
+
+class TestCodec : public Codec {
+ public:
+ TestCodec(int id, const std::string& name, int clockrate)
+ : Codec(Type::kAudio, id, name, clockrate) {}
+ TestCodec() : Codec(Type::kAudio) {}
+ TestCodec(const TestCodec& c) = default;
+ TestCodec& operator=(const TestCodec& c) = default;
+};
+
+TEST(CodecTest, TestCodecOperators) {
+ TestCodec c0(96, "D", 1000);
+ c0.SetParam("a", 1);
+
+ TestCodec c1 = c0;
+ EXPECT_TRUE(c1 == c0);
+
+ int param_value0;
+ int param_value1;
+ EXPECT_TRUE(c0.GetParam("a", &param_value0));
+ EXPECT_TRUE(c1.GetParam("a", &param_value1));
+ EXPECT_EQ(param_value0, param_value1);
+
+ c1.id = 86;
+ EXPECT_TRUE(c0 != c1);
+
+ c1 = c0;
+ c1.name = "x";
+ EXPECT_TRUE(c0 != c1);
+
+ c1 = c0;
+ c1.clockrate = 2000;
+ EXPECT_TRUE(c0 != c1);
+
+ c1 = c0;
+ c1.SetParam("a", 2);
+ EXPECT_TRUE(c0 != c1);
+
+ TestCodec c5;
+ TestCodec c6(0, "", 0);
+ EXPECT_TRUE(c5 == c6);
+}
+
+TEST(CodecTest, TestAudioCodecOperators) {
+ AudioCodec c0 = cricket::CreateAudioCodec(96, "A", 44100, 2);
+ AudioCodec c1 = cricket::CreateAudioCodec(95, "A", 44100, 2);
+ AudioCodec c2 = cricket::CreateAudioCodec(96, "x", 44100, 2);
+ AudioCodec c3 = cricket::CreateAudioCodec(96, "A", 48000, 2);
+ AudioCodec c4 = cricket::CreateAudioCodec(96, "A", 44100, 2);
+ c4.bitrate = 10000;
+ AudioCodec c5 = cricket::CreateAudioCodec(96, "A", 44100, 1);
+ EXPECT_NE(c0, c1);
+ EXPECT_NE(c0, c2);
+ EXPECT_NE(c0, c3);
+ EXPECT_NE(c0, c4);
+ EXPECT_NE(c0, c5);
+
+ AudioCodec c8 = cricket::CreateAudioCodec(0, "", 0, 0);
+ AudioCodec c9 = c0;
+ EXPECT_EQ(c9, c0);
+
+ AudioCodec c10(c0);
+ AudioCodec c11(c0);
+ AudioCodec c12(c0);
+ AudioCodec c13(c0);
+ c10.params["x"] = "abc";
+ c11.params["x"] = "def";
+ c12.params["y"] = "abc";
+ c13.params["x"] = "abc";
+ EXPECT_NE(c10, c0);
+ EXPECT_NE(c11, c0);
+ EXPECT_NE(c11, c10);
+ EXPECT_NE(c12, c0);
+ EXPECT_NE(c12, c10);
+ EXPECT_NE(c12, c11);
+ EXPECT_EQ(c13, c10);
+}
+
+TEST(CodecTest, TestAudioCodecMatches) {
+ // Test a codec with a static payload type.
+ AudioCodec c0 = cricket::CreateAudioCodec(34, "A", 44100, 1);
+ EXPECT_TRUE(c0.Matches(cricket::CreateAudioCodec(34, "", 44100, 1)));
+ EXPECT_TRUE(c0.Matches(cricket::CreateAudioCodec(34, "", 44100, 0)));
+ EXPECT_TRUE(c0.Matches(cricket::CreateAudioCodec(34, "", 44100, 0)));
+ EXPECT_TRUE(c0.Matches(cricket::CreateAudioCodec(34, "", 0, 0)));
+ EXPECT_FALSE(c0.Matches(cricket::CreateAudioCodec(96, "A", 44100, 1)));
+ EXPECT_FALSE(c0.Matches(cricket::CreateAudioCodec(96, "", 44100, 1)));
+ EXPECT_FALSE(c0.Matches(cricket::CreateAudioCodec(95, "", 55100, 1)));
+ EXPECT_FALSE(c0.Matches(cricket::CreateAudioCodec(95, "", 44100, 1)));
+ EXPECT_FALSE(c0.Matches(cricket::CreateAudioCodec(95, "", 44100, 2)));
+ EXPECT_FALSE(c0.Matches(cricket::CreateAudioCodec(95, "", 55100, 2)));
+
+ // Test a codec with a dynamic payload type.
+ AudioCodec c1 = cricket::CreateAudioCodec(96, "A", 44100, 1);
+ EXPECT_TRUE(c1.Matches(cricket::CreateAudioCodec(96, "A", 0, 0)));
+ EXPECT_TRUE(c1.Matches(cricket::CreateAudioCodec(97, "A", 0, 0)));
+ EXPECT_TRUE(c1.Matches(cricket::CreateAudioCodec(96, "a", 0, 0)));
+ EXPECT_TRUE(c1.Matches(cricket::CreateAudioCodec(97, "a", 0, 0)));
+ EXPECT_TRUE(c1.Matches(cricket::CreateAudioCodec(35, "a", 0, 0)));
+ EXPECT_TRUE(c1.Matches(cricket::CreateAudioCodec(42, "a", 0, 0)));
+ EXPECT_TRUE(c1.Matches(cricket::CreateAudioCodec(65, "a", 0, 0)));
+ EXPECT_FALSE(c1.Matches(cricket::CreateAudioCodec(95, "A", 0, 0)));
+ EXPECT_FALSE(c1.Matches(cricket::CreateAudioCodec(34, "A", 0, 0)));
+ EXPECT_FALSE(c1.Matches(cricket::CreateAudioCodec(96, "", 44100, 2)));
+ EXPECT_FALSE(c1.Matches(cricket::CreateAudioCodec(96, "A", 55100, 1)));
+
+ // Test a codec with a dynamic payload type, and auto bitrate.
+ AudioCodec c2 = cricket::CreateAudioCodec(97, "A", 16000, 1);
+ // Use default bitrate.
+ EXPECT_TRUE(c2.Matches(cricket::CreateAudioCodec(97, "A", 16000, 1)));
+ EXPECT_TRUE(c2.Matches(cricket::CreateAudioCodec(97, "A", 16000, 0)));
+ // Use explicit bitrate.
+ EXPECT_TRUE(c2.Matches(cricket::CreateAudioCodec(97, "A", 16000, 1)));
+ // Backward compatibility with clients that might send "-1" (for default).
+ EXPECT_TRUE(c2.Matches(cricket::CreateAudioCodec(97, "A", 16000, 1)));
+
+ // Stereo doesn't match channels = 0.
+ AudioCodec c3 = cricket::CreateAudioCodec(96, "A", 44100, 2);
+ EXPECT_TRUE(c3.Matches(cricket::CreateAudioCodec(96, "A", 44100, 2)));
+ EXPECT_FALSE(c3.Matches(cricket::CreateAudioCodec(96, "A", 44100, 1)));
+ EXPECT_FALSE(c3.Matches(cricket::CreateAudioCodec(96, "A", 44100, 0)));
+}
+
+TEST(CodecTest, TestVideoCodecOperators) {
+ VideoCodec c0 = cricket::CreateVideoCodec(96, "V");
+ VideoCodec c1 = cricket::CreateVideoCodec(95, "V");
+ VideoCodec c2 = cricket::CreateVideoCodec(96, "x");
+
+ EXPECT_TRUE(c0 != c1);
+ EXPECT_TRUE(c0 != c2);
+
+ VideoCodec c8 = cricket::CreateVideoCodec(0, "");
+ VideoCodec c9 = c0;
+ EXPECT_TRUE(c9 == c0);
+
+ VideoCodec c10(c0);
+ VideoCodec c11(c0);
+ VideoCodec c12(c0);
+ VideoCodec c13(c0);
+ c10.params["x"] = "abc";
+ c11.params["x"] = "def";
+ c12.params["y"] = "abc";
+ c13.params["x"] = "abc";
+ EXPECT_TRUE(c10 != c0);
+ EXPECT_TRUE(c11 != c0);
+ EXPECT_TRUE(c11 != c10);
+ EXPECT_TRUE(c12 != c0);
+ EXPECT_TRUE(c12 != c10);
+ EXPECT_TRUE(c12 != c11);
+ EXPECT_TRUE(c13 == c10);
+}
+
+TEST(CodecTest, TestVideoCodecEqualsWithDifferentPacketization) {
+ VideoCodec c0 = cricket::CreateVideoCodec(100, cricket::kVp8CodecName);
+ VideoCodec c1 = cricket::CreateVideoCodec(100, cricket::kVp8CodecName);
+ VideoCodec c2 = cricket::CreateVideoCodec(100, cricket::kVp8CodecName);
+ c2.packetization = "raw";
+
+ EXPECT_EQ(c0, c1);
+ EXPECT_NE(c0, c2);
+ EXPECT_NE(c2, c0);
+ EXPECT_EQ(c2, c2);
+}
+
+TEST(CodecTest, TestVideoCodecMatches) {
+ // Test a codec with a static payload type.
+ VideoCodec c0 = cricket::CreateVideoCodec(34, "V");
+ EXPECT_TRUE(c0.Matches(cricket::CreateVideoCodec(34, "")));
+ EXPECT_FALSE(c0.Matches(cricket::CreateVideoCodec(96, "")));
+ EXPECT_FALSE(c0.Matches(cricket::CreateVideoCodec(96, "V")));
+
+ // Test a codec with a dynamic payload type.
+ VideoCodec c1 = cricket::CreateVideoCodec(96, "V");
+ EXPECT_TRUE(c1.Matches(cricket::CreateVideoCodec(96, "V")));
+ EXPECT_TRUE(c1.Matches(cricket::CreateVideoCodec(97, "V")));
+ EXPECT_TRUE(c1.Matches(cricket::CreateVideoCodec(96, "v")));
+ EXPECT_TRUE(c1.Matches(cricket::CreateVideoCodec(97, "v")));
+ EXPECT_TRUE(c1.Matches(cricket::CreateVideoCodec(35, "v")));
+ EXPECT_TRUE(c1.Matches(cricket::CreateVideoCodec(42, "v")));
+ EXPECT_TRUE(c1.Matches(cricket::CreateVideoCodec(65, "v")));
+ EXPECT_FALSE(c1.Matches(cricket::CreateVideoCodec(96, "")));
+ EXPECT_FALSE(c1.Matches(cricket::CreateVideoCodec(95, "V")));
+ EXPECT_FALSE(c1.Matches(cricket::CreateVideoCodec(34, "V")));
+}
+
+TEST(CodecTest, TestVideoCodecMatchesWithDifferentPacketization) {
+ VideoCodec c0 = cricket::CreateVideoCodec(100, cricket::kVp8CodecName);
+ VideoCodec c1 = cricket::CreateVideoCodec(101, cricket::kVp8CodecName);
+ c1.packetization = "raw";
+
+ EXPECT_TRUE(c0.Matches(c1));
+ EXPECT_TRUE(c1.Matches(c0));
+}
+
+// AV1 codecs compare profile information.
+TEST(CodecTest, TestAV1CodecMatches) {
+ const char kProfile0[] = "0";
+ const char kProfile1[] = "1";
+ const char kProfile2[] = "2";
+
+ VideoCodec c_no_profile =
+ cricket::CreateVideoCodec(95, cricket::kAv1CodecName);
+ VideoCodec c_profile0 = cricket::CreateVideoCodec(95, cricket::kAv1CodecName);
+ c_profile0.params[webrtc::kAV1FmtpProfile] = kProfile0;
+ VideoCodec c_profile1 = cricket::CreateVideoCodec(95, cricket::kAv1CodecName);
+ c_profile1.params[webrtc::kAV1FmtpProfile] = kProfile1;
+ VideoCodec c_profile2 = cricket::CreateVideoCodec(95, cricket::kAv1CodecName);
+ c_profile2.params[webrtc::kAV1FmtpProfile] = kProfile2;
+
+ // An AV1 entry with no profile specified should be treated as profile-0.
+ EXPECT_TRUE(c_profile0.Matches(c_no_profile));
+
+ {
+ // Two AV1 entries without a profile specified are treated as duplicates.
+ VideoCodec c_no_profile_eq =
+ cricket::CreateVideoCodec(95, cricket::kAv1CodecName);
+ EXPECT_TRUE(c_no_profile.Matches(c_no_profile_eq));
+ }
+
+ {
+ // Two AV1 entries with profile 0 specified are treated as duplicates.
+ VideoCodec c_profile0_eq =
+ cricket::CreateVideoCodec(95, cricket::kAv1CodecName);
+ c_profile0_eq.params[webrtc::kAV1FmtpProfile] = kProfile0;
+ EXPECT_TRUE(c_profile0.Matches(c_profile0_eq));
+ }
+
+ {
+ // Two AV1 entries with profile 1 specified are treated as duplicates.
+ VideoCodec c_profile1_eq =
+ cricket::CreateVideoCodec(95, cricket::kAv1CodecName);
+ c_profile1_eq.params[webrtc::kAV1FmtpProfile] = kProfile1;
+ EXPECT_TRUE(c_profile1.Matches(c_profile1_eq));
+ }
+
+ // AV1 entries with different profiles (0 and 1) are seen as distinct.
+ EXPECT_FALSE(c_profile0.Matches(c_profile1));
+ EXPECT_FALSE(c_no_profile.Matches(c_profile1));
+
+ // AV1 entries with different profiles (0 and 2) are seen as distinct.
+ EXPECT_FALSE(c_profile0.Matches(c_profile2));
+ EXPECT_FALSE(c_no_profile.Matches(c_profile2));
+}
+
+// VP9 codecs compare profile information.
+TEST(CodecTest, TestVP9CodecMatches) {
+ const char kProfile0[] = "0";
+ const char kProfile2[] = "2";
+
+ VideoCodec c_no_profile =
+ cricket::CreateVideoCodec(95, cricket::kVp9CodecName);
+ VideoCodec c_profile0 = cricket::CreateVideoCodec(95, cricket::kVp9CodecName);
+ c_profile0.params[webrtc::kVP9FmtpProfileId] = kProfile0;
+
+ EXPECT_TRUE(c_profile0.Matches(c_no_profile));
+
+ {
+ VideoCodec c_profile0_eq =
+ cricket::CreateVideoCodec(95, cricket::kVp9CodecName);
+ c_profile0_eq.params[webrtc::kVP9FmtpProfileId] = kProfile0;
+ EXPECT_TRUE(c_profile0.Matches(c_profile0_eq));
+ }
+
+ {
+ VideoCodec c_profile2 =
+ cricket::CreateVideoCodec(95, cricket::kVp9CodecName);
+ c_profile2.params[webrtc::kVP9FmtpProfileId] = kProfile2;
+ EXPECT_FALSE(c_profile0.Matches(c_profile2));
+ EXPECT_FALSE(c_no_profile.Matches(c_profile2));
+ }
+
+ {
+ VideoCodec c_no_profile_eq =
+ cricket::CreateVideoCodec(95, cricket::kVp9CodecName);
+ EXPECT_TRUE(c_no_profile.Matches(c_no_profile_eq));
+ }
+}
+
+// Matching H264 codecs also need to have matching profile-level-id and
+// packetization-mode.
+TEST(CodecTest, TestH264CodecMatches) {
+ const char kProfileLevelId1[] = "42e01f";
+ const char kProfileLevelId2[] = "42a01e";
+
+ VideoCodec pli_1_pm_0 = cricket::CreateVideoCodec(95, "H264");
+ pli_1_pm_0.params[cricket::kH264FmtpProfileLevelId] = kProfileLevelId1;
+ pli_1_pm_0.params[cricket::kH264FmtpPacketizationMode] = "0";
+
+ {
+ VideoCodec pli_1_pm_blank = cricket::CreateVideoCodec(95, "H264");
+ pli_1_pm_blank.params[cricket::kH264FmtpProfileLevelId] = kProfileLevelId1;
+ pli_1_pm_blank.params.erase(
+ pli_1_pm_blank.params.find(cricket::kH264FmtpPacketizationMode));
+
+ // Matches since if packetization-mode is not specified it defaults to "0".
+ EXPECT_TRUE(pli_1_pm_0.Matches(pli_1_pm_blank));
+ }
+
+ {
+ VideoCodec pli_1_pm_1 = cricket::CreateVideoCodec(95, "H264");
+ pli_1_pm_1.params[cricket::kH264FmtpProfileLevelId] = kProfileLevelId1;
+ pli_1_pm_1.params[cricket::kH264FmtpPacketizationMode] = "1";
+
+ // Does not match since packetization-mode is different.
+ EXPECT_FALSE(pli_1_pm_0.Matches(pli_1_pm_1));
+ }
+
+ {
+ VideoCodec pli_2_pm_0 = cricket::CreateVideoCodec(95, "H264");
+ pli_2_pm_0.params[cricket::kH264FmtpProfileLevelId] = kProfileLevelId2;
+ pli_2_pm_0.params[cricket::kH264FmtpPacketizationMode] = "0";
+
+ // Does not match since profile-level-id is different.
+ EXPECT_FALSE(pli_1_pm_0.Matches(pli_2_pm_0));
+ }
+}
+
+TEST(CodecTest, TestSetParamGetParamAndRemoveParam) {
+ AudioCodec codec = cricket::CreateAudioCodec(0, "foo", 22222, 2);
+ codec.SetParam("a", "1");
+ codec.SetParam("b", "x");
+
+ int int_value = 0;
+ EXPECT_TRUE(codec.GetParam("a", &int_value));
+ EXPECT_EQ(1, int_value);
+ EXPECT_FALSE(codec.GetParam("b", &int_value));
+ EXPECT_FALSE(codec.GetParam("c", &int_value));
+
+ std::string str_value;
+ EXPECT_TRUE(codec.GetParam("a", &str_value));
+ EXPECT_EQ("1", str_value);
+ EXPECT_TRUE(codec.GetParam("b", &str_value));
+ EXPECT_EQ("x", str_value);
+ EXPECT_FALSE(codec.GetParam("c", &str_value));
+ EXPECT_TRUE(codec.RemoveParam("a"));
+ EXPECT_FALSE(codec.RemoveParam("c"));
+}
+
+TEST(CodecTest, TestIntersectFeedbackParams) {
+ const FeedbackParam a1("a", "1");
+ const FeedbackParam b2("b", "2");
+ const FeedbackParam b3("b", "3");
+ const FeedbackParam c3("c", "3");
+ TestCodec c1;
+ c1.AddFeedbackParam(a1); // Only match with c2.
+ c1.AddFeedbackParam(b2); // Same param different values.
+ c1.AddFeedbackParam(c3); // Not in c2.
+ TestCodec c2;
+ c2.AddFeedbackParam(a1);
+ c2.AddFeedbackParam(b3);
+
+ c1.IntersectFeedbackParams(c2);
+ EXPECT_TRUE(c1.HasFeedbackParam(a1));
+ EXPECT_FALSE(c1.HasFeedbackParam(b2));
+ EXPECT_FALSE(c1.HasFeedbackParam(c3));
+}
+
+TEST(CodecTest, TestGetCodecType) {
+ // Codec type comparison should be case insenstive on names.
+ const VideoCodec codec = cricket::CreateVideoCodec(96, "V");
+ const VideoCodec rtx_codec = cricket::CreateVideoCodec(96, "rTx");
+ const VideoCodec ulpfec_codec = cricket::CreateVideoCodec(96, "ulpFeC");
+ const VideoCodec flexfec_codec = cricket::CreateVideoCodec(96, "FlExFeC-03");
+ const VideoCodec red_codec = cricket::CreateVideoCodec(96, "ReD");
+ EXPECT_TRUE(codec.IsMediaCodec());
+ EXPECT_EQ(codec.GetResiliencyType(), Codec::ResiliencyType::kNone);
+ EXPECT_EQ(rtx_codec.GetResiliencyType(), Codec::ResiliencyType::kRtx);
+ EXPECT_EQ(ulpfec_codec.GetResiliencyType(), Codec::ResiliencyType::kUlpfec);
+ EXPECT_EQ(flexfec_codec.GetResiliencyType(), Codec::ResiliencyType::kFlexfec);
+ EXPECT_EQ(red_codec.GetResiliencyType(), Codec::ResiliencyType::kRed);
+}
+
+TEST(CodecTest, TestCreateRtxCodec) {
+ VideoCodec rtx_codec = cricket::CreateVideoRtxCodec(96, 120);
+ EXPECT_EQ(96, rtx_codec.id);
+ EXPECT_EQ(rtx_codec.GetResiliencyType(), Codec::ResiliencyType::kRtx);
+ int associated_payload_type;
+ ASSERT_TRUE(rtx_codec.GetParam(kCodecParamAssociatedPayloadType,
+ &associated_payload_type));
+ EXPECT_EQ(120, associated_payload_type);
+}
+
+TEST(CodecTest, TestValidateCodecFormat) {
+ const VideoCodec codec = cricket::CreateVideoCodec(96, "V");
+ ASSERT_TRUE(codec.ValidateCodecFormat());
+
+ // Accept 0-127 as payload types.
+ VideoCodec low_payload_type = codec;
+ low_payload_type.id = 0;
+ VideoCodec high_payload_type = codec;
+ high_payload_type.id = 127;
+ ASSERT_TRUE(low_payload_type.ValidateCodecFormat());
+ EXPECT_TRUE(high_payload_type.ValidateCodecFormat());
+
+ // Reject negative payloads.
+ VideoCodec negative_payload_type = codec;
+ negative_payload_type.id = -1;
+ EXPECT_FALSE(negative_payload_type.ValidateCodecFormat());
+
+ // Reject too-high payloads.
+ VideoCodec too_high_payload_type = codec;
+ too_high_payload_type.id = 128;
+ EXPECT_FALSE(too_high_payload_type.ValidateCodecFormat());
+
+ // Reject codecs with min bitrate > max bitrate.
+ VideoCodec incorrect_bitrates = codec;
+ incorrect_bitrates.params[kCodecParamMinBitrate] = "100";
+ incorrect_bitrates.params[kCodecParamMaxBitrate] = "80";
+ EXPECT_FALSE(incorrect_bitrates.ValidateCodecFormat());
+
+ // Accept min bitrate == max bitrate.
+ VideoCodec equal_bitrates = codec;
+ equal_bitrates.params[kCodecParamMinBitrate] = "100";
+ equal_bitrates.params[kCodecParamMaxBitrate] = "100";
+ EXPECT_TRUE(equal_bitrates.ValidateCodecFormat());
+
+ // Accept min bitrate < max bitrate.
+ VideoCodec different_bitrates = codec;
+ different_bitrates.params[kCodecParamMinBitrate] = "99";
+ different_bitrates.params[kCodecParamMaxBitrate] = "100";
+ EXPECT_TRUE(different_bitrates.ValidateCodecFormat());
+}
+
+TEST(CodecTest, TestToCodecParameters) {
+ VideoCodec v = cricket::CreateVideoCodec(96, "V");
+ v.SetParam("p1", "v1");
+ webrtc::RtpCodecParameters codec_params_1 = v.ToCodecParameters();
+ EXPECT_EQ(96, codec_params_1.payload_type);
+ EXPECT_EQ(cricket::MEDIA_TYPE_VIDEO, codec_params_1.kind);
+ EXPECT_EQ("V", codec_params_1.name);
+ EXPECT_EQ(cricket::kVideoCodecClockrate, codec_params_1.clock_rate);
+ EXPECT_EQ(absl::nullopt, codec_params_1.num_channels);
+ ASSERT_EQ(1u, codec_params_1.parameters.size());
+ EXPECT_EQ("p1", codec_params_1.parameters.begin()->first);
+ EXPECT_EQ("v1", codec_params_1.parameters.begin()->second);
+
+ AudioCodec a = cricket::CreateAudioCodec(97, "A", 44100, 2);
+ a.SetParam("p1", "a1");
+ webrtc::RtpCodecParameters codec_params_2 = a.ToCodecParameters();
+ EXPECT_EQ(97, codec_params_2.payload_type);
+ EXPECT_EQ(cricket::MEDIA_TYPE_AUDIO, codec_params_2.kind);
+ EXPECT_EQ("A", codec_params_2.name);
+ EXPECT_EQ(44100, codec_params_2.clock_rate);
+ EXPECT_EQ(2, codec_params_2.num_channels);
+ ASSERT_EQ(1u, codec_params_2.parameters.size());
+ EXPECT_EQ("p1", codec_params_2.parameters.begin()->first);
+ EXPECT_EQ("a1", codec_params_2.parameters.begin()->second);
+}
+
+TEST(CodecTest, H264CostrainedBaselineIsAddedIfH264IsSupported) {
+ const std::vector<webrtc::SdpVideoFormat> kExplicitlySupportedFormats = {
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileBaseline,
+ webrtc::H264Level::kLevel3_1, "1"),
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileBaseline,
+ webrtc::H264Level::kLevel3_1, "0")};
+
+ std::vector<webrtc::SdpVideoFormat> supported_formats =
+ kExplicitlySupportedFormats;
+ cricket::AddH264ConstrainedBaselineProfileToSupportedFormats(
+ &supported_formats);
+
+ const webrtc::SdpVideoFormat kH264ConstrainedBasedlinePacketization1 =
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileConstrainedBaseline,
+ webrtc::H264Level::kLevel3_1, "1");
+ const webrtc::SdpVideoFormat kH264ConstrainedBasedlinePacketization0 =
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileConstrainedBaseline,
+ webrtc::H264Level::kLevel3_1, "0");
+
+ EXPECT_EQ(supported_formats[0], kExplicitlySupportedFormats[0]);
+ EXPECT_EQ(supported_formats[1], kExplicitlySupportedFormats[1]);
+ EXPECT_EQ(supported_formats[2], kH264ConstrainedBasedlinePacketization1);
+ EXPECT_EQ(supported_formats[3], kH264ConstrainedBasedlinePacketization0);
+}
+
+TEST(CodecTest, H264CostrainedBaselineIsNotAddedIfH264IsUnsupported) {
+ const std::vector<webrtc::SdpVideoFormat> kExplicitlySupportedFormats = {
+ {cricket::kVp9CodecName,
+ {{webrtc::kVP9FmtpProfileId,
+ VP9ProfileToString(webrtc::VP9Profile::kProfile0)}}}};
+
+ std::vector<webrtc::SdpVideoFormat> supported_formats =
+ kExplicitlySupportedFormats;
+ cricket::AddH264ConstrainedBaselineProfileToSupportedFormats(
+ &supported_formats);
+
+ EXPECT_EQ(supported_formats[0], kExplicitlySupportedFormats[0]);
+ EXPECT_EQ(supported_formats.size(), kExplicitlySupportedFormats.size());
+}
+
+TEST(CodecTest, H264CostrainedBaselineNotAddedIfAlreadySpecified) {
+ const std::vector<webrtc::SdpVideoFormat> kExplicitlySupportedFormats = {
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileBaseline,
+ webrtc::H264Level::kLevel3_1, "1"),
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileBaseline,
+ webrtc::H264Level::kLevel3_1, "0"),
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileConstrainedBaseline,
+ webrtc::H264Level::kLevel3_1, "1"),
+ webrtc::CreateH264Format(webrtc::H264Profile::kProfileConstrainedBaseline,
+ webrtc::H264Level::kLevel3_1, "0")};
+
+ std::vector<webrtc::SdpVideoFormat> supported_formats =
+ kExplicitlySupportedFormats;
+ cricket::AddH264ConstrainedBaselineProfileToSupportedFormats(
+ &supported_formats);
+
+ EXPECT_EQ(supported_formats[0], kExplicitlySupportedFormats[0]);
+ EXPECT_EQ(supported_formats[1], kExplicitlySupportedFormats[1]);
+ EXPECT_EQ(supported_formats[2], kExplicitlySupportedFormats[2]);
+ EXPECT_EQ(supported_formats[3], kExplicitlySupportedFormats[3]);
+ EXPECT_EQ(supported_formats.size(), kExplicitlySupportedFormats.size());
+}
diff --git a/third_party/libwebrtc/media/base/fake_frame_source.cc b/third_party/libwebrtc/media/base/fake_frame_source.cc
new file mode 100644
index 0000000000..61bc5857d9
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_frame_source.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/fake_frame_source.h"
+
+#include "api/scoped_refptr.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/time_utils.h"
+
+namespace cricket {
+
+FakeFrameSource::FakeFrameSource(int width,
+ int height,
+ int interval_us,
+ int64_t timestamp_offset_us)
+ : width_(width),
+ height_(height),
+ interval_us_(interval_us),
+ next_timestamp_us_(timestamp_offset_us) {
+ RTC_CHECK_GT(width_, 0);
+ RTC_CHECK_GT(height_, 0);
+ RTC_CHECK_GT(interval_us_, 0);
+ RTC_CHECK_GE(next_timestamp_us_, 0);
+}
+
+FakeFrameSource::FakeFrameSource(int width, int height, int interval_us)
+ : FakeFrameSource(width, height, interval_us, rtc::TimeMicros()) {}
+
+webrtc::VideoRotation FakeFrameSource::GetRotation() const {
+ return rotation_;
+}
+
+void FakeFrameSource::SetRotation(webrtc::VideoRotation rotation) {
+ rotation_ = rotation;
+}
+
+webrtc::VideoFrame FakeFrameSource::GetFrameRotationApplied() {
+ switch (rotation_) {
+ case webrtc::kVideoRotation_0:
+ case webrtc::kVideoRotation_180:
+ return GetFrame(width_, height_, webrtc::kVideoRotation_0, interval_us_);
+ case webrtc::kVideoRotation_90:
+ case webrtc::kVideoRotation_270:
+ return GetFrame(height_, width_, webrtc::kVideoRotation_0, interval_us_);
+ }
+ RTC_DCHECK_NOTREACHED() << "Invalid rotation value: "
+ << static_cast<int>(rotation_);
+ // Without this return, the Windows Visual Studio compiler complains
+ // "not all control paths return a value".
+ return GetFrame();
+}
+
+webrtc::VideoFrame FakeFrameSource::GetFrame() {
+ return GetFrame(width_, height_, rotation_, interval_us_);
+}
+
+webrtc::VideoFrame FakeFrameSource::GetFrame(int width,
+ int height,
+ webrtc::VideoRotation rotation,
+ int interval_us) {
+ RTC_CHECK_GT(width, 0);
+ RTC_CHECK_GT(height, 0);
+ RTC_CHECK_GT(interval_us, 0);
+
+ rtc::scoped_refptr<webrtc::I420Buffer> buffer(
+ webrtc::I420Buffer::Create(width, height));
+
+ buffer->InitializeData();
+ webrtc::VideoFrame frame = webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_rotation(rotation)
+ .set_timestamp_us(next_timestamp_us_)
+ .build();
+
+ next_timestamp_us_ += interval_us;
+ return frame;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/fake_frame_source.h b/third_party/libwebrtc/media/base/fake_frame_source.h
new file mode 100644
index 0000000000..4c56204e69
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_frame_source.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_FAKE_FRAME_SOURCE_H_
+#define MEDIA_BASE_FAKE_FRAME_SOURCE_H_
+
+#include "api/video/video_frame.h"
+#include "rtc_base/time_utils.h"
+
+namespace cricket {
+
+class FakeFrameSource {
+ public:
+ FakeFrameSource(int width,
+ int height,
+ int interval_us,
+ int64_t timestamp_offset_us);
+ FakeFrameSource(int width, int height, int interval_us);
+
+ webrtc::VideoRotation GetRotation() const;
+ void SetRotation(webrtc::VideoRotation rotation);
+
+ webrtc::VideoFrame GetFrame();
+ webrtc::VideoFrame GetFrameRotationApplied();
+
+ // Override configuration.
+ webrtc::VideoFrame GetFrame(int width,
+ int height,
+ webrtc::VideoRotation rotation,
+ int interval_us);
+
+ private:
+ const int width_;
+ const int height_;
+ const int interval_us_;
+
+ webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
+ int64_t next_timestamp_us_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_FAKE_FRAME_SOURCE_H_
diff --git a/third_party/libwebrtc/media/base/fake_media_engine.cc b/third_party/libwebrtc/media/base/fake_media_engine.cc
new file mode 100644
index 0000000000..5a1da3326e
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_media_engine.cc
@@ -0,0 +1,705 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/fake_media_engine.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "media/base/media_channel.h"
+#include "rtc_base/checks.h"
+
+namespace cricket {
+using webrtc::TaskQueueBase;
+
+FakeVoiceMediaReceiveChannel::DtmfInfo::DtmfInfo(uint32_t ssrc,
+ int event_code,
+ int duration)
+ : ssrc(ssrc), event_code(event_code), duration(duration) {}
+
+FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::VoiceChannelAudioSink(
+ AudioSource* source)
+ : source_(source) {
+ source_->SetSink(this);
+}
+FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::~VoiceChannelAudioSink() {
+ if (source_) {
+ source_->SetSink(nullptr);
+ }
+}
+void FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::OnData(
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ absl::optional<int64_t> absolute_capture_timestamp_ms) {}
+void FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::OnClose() {
+ source_ = nullptr;
+}
+AudioSource* FakeVoiceMediaReceiveChannel::VoiceChannelAudioSink::source()
+ const {
+ return source_;
+}
+
+FakeVoiceMediaReceiveChannel::FakeVoiceMediaReceiveChannel(
+ const AudioOptions& options,
+ TaskQueueBase* network_thread)
+ : RtpReceiveChannelHelper<VoiceMediaReceiveChannelInterface>(
+ network_thread),
+ max_bps_(-1) {
+ output_scalings_[0] = 1.0; // For default channel.
+ SetOptions(options);
+}
+FakeVoiceMediaReceiveChannel::~FakeVoiceMediaReceiveChannel() = default;
+const std::vector<AudioCodec>& FakeVoiceMediaReceiveChannel::recv_codecs()
+ const {
+ return recv_codecs_;
+}
+const std::vector<FakeVoiceMediaReceiveChannel::DtmfInfo>&
+FakeVoiceMediaReceiveChannel::dtmf_info_queue() const {
+ return dtmf_info_queue_;
+}
+const AudioOptions& FakeVoiceMediaReceiveChannel::options() const {
+ return options_;
+}
+int FakeVoiceMediaReceiveChannel::max_bps() const {
+ return max_bps_;
+}
+bool FakeVoiceMediaReceiveChannel::SetReceiverParameters(
+ const AudioReceiverParameters& params) {
+ set_recv_rtcp_parameters(params.rtcp);
+ return (SetRecvCodecs(params.codecs) &&
+ SetRecvRtpHeaderExtensions(params.extensions));
+}
+void FakeVoiceMediaReceiveChannel::SetPlayout(bool playout) {
+ set_playout(playout);
+}
+bool FakeVoiceMediaReceiveChannel::HasSource(uint32_t ssrc) const {
+ return local_sinks_.find(ssrc) != local_sinks_.end();
+}
+bool FakeVoiceMediaReceiveChannel::AddRecvStream(const StreamParams& sp) {
+ if (!RtpReceiveChannelHelper<
+ VoiceMediaReceiveChannelInterface>::AddRecvStream(sp))
+ return false;
+ output_scalings_[sp.first_ssrc()] = 1.0;
+ output_delays_[sp.first_ssrc()] = 0;
+ return true;
+}
+bool FakeVoiceMediaReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
+ if (!RtpReceiveChannelHelper<
+ VoiceMediaReceiveChannelInterface>::RemoveRecvStream(ssrc))
+ return false;
+ output_scalings_.erase(ssrc);
+ output_delays_.erase(ssrc);
+ return true;
+}
+bool FakeVoiceMediaReceiveChannel::SetOutputVolume(uint32_t ssrc,
+ double volume) {
+ if (output_scalings_.find(ssrc) != output_scalings_.end()) {
+ output_scalings_[ssrc] = volume;
+ return true;
+ }
+ return false;
+}
+bool FakeVoiceMediaReceiveChannel::SetDefaultOutputVolume(double volume) {
+ for (auto& entry : output_scalings_) {
+ entry.second = volume;
+ }
+ return true;
+}
+bool FakeVoiceMediaReceiveChannel::GetOutputVolume(uint32_t ssrc,
+ double* volume) {
+ if (output_scalings_.find(ssrc) == output_scalings_.end())
+ return false;
+ *volume = output_scalings_[ssrc];
+ return true;
+}
+bool FakeVoiceMediaReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
+ int delay_ms) {
+ if (output_delays_.find(ssrc) == output_delays_.end()) {
+ return false;
+ } else {
+ output_delays_[ssrc] = delay_ms;
+ return true;
+ }
+}
+absl::optional<int> FakeVoiceMediaReceiveChannel::GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const {
+ const auto it = output_delays_.find(ssrc);
+ if (it != output_delays_.end()) {
+ return it->second;
+ }
+ return absl::nullopt;
+}
+bool FakeVoiceMediaReceiveChannel::GetStats(VoiceMediaReceiveInfo* info,
+ bool get_and_clear_legacy_stats) {
+ return false;
+}
+void FakeVoiceMediaReceiveChannel::SetRawAudioSink(
+ uint32_t ssrc,
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) {
+ sink_ = std::move(sink);
+}
+void FakeVoiceMediaReceiveChannel::SetDefaultRawAudioSink(
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) {
+ sink_ = std::move(sink);
+}
+std::vector<webrtc::RtpSource> FakeVoiceMediaReceiveChannel::GetSources(
+ uint32_t ssrc) const {
+ return std::vector<webrtc::RtpSource>();
+}
+bool FakeVoiceMediaReceiveChannel::SetRecvCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ if (fail_set_recv_codecs()) {
+ // Fake the failure in SetRecvCodecs.
+ return false;
+ }
+ recv_codecs_ = codecs;
+ return true;
+}
+bool FakeVoiceMediaReceiveChannel::SetMaxSendBandwidth(int bps) {
+ max_bps_ = bps;
+ return true;
+}
+bool FakeVoiceMediaReceiveChannel::SetOptions(const AudioOptions& options) {
+ // Does a "merge" of current options and set options.
+ options_.SetAll(options);
+ return true;
+}
+
+FakeVoiceMediaSendChannel::DtmfInfo::DtmfInfo(uint32_t ssrc,
+ int event_code,
+ int duration)
+ : ssrc(ssrc), event_code(event_code), duration(duration) {}
+
+FakeVoiceMediaSendChannel::VoiceChannelAudioSink::VoiceChannelAudioSink(
+ AudioSource* source)
+ : source_(source) {
+ source_->SetSink(this);
+}
+FakeVoiceMediaSendChannel::VoiceChannelAudioSink::~VoiceChannelAudioSink() {
+ if (source_) {
+ source_->SetSink(nullptr);
+ }
+}
+void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnData(
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ absl::optional<int64_t> absolute_capture_timestamp_ms) {}
+void FakeVoiceMediaSendChannel::VoiceChannelAudioSink::OnClose() {
+ source_ = nullptr;
+}
+AudioSource* FakeVoiceMediaSendChannel::VoiceChannelAudioSink::source() const {
+ return source_;
+}
+
+FakeVoiceMediaSendChannel::FakeVoiceMediaSendChannel(
+ const AudioOptions& options,
+ TaskQueueBase* network_thread)
+ : RtpSendChannelHelper<VoiceMediaSendChannelInterface>(network_thread),
+ max_bps_(-1) {
+ output_scalings_[0] = 1.0; // For default channel.
+ SetOptions(options);
+}
+FakeVoiceMediaSendChannel::~FakeVoiceMediaSendChannel() = default;
+const std::vector<AudioCodec>& FakeVoiceMediaSendChannel::send_codecs() const {
+ return send_codecs_;
+}
+absl::optional<Codec> FakeVoiceMediaSendChannel::GetSendCodec() const {
+ if (!send_codecs_.empty()) {
+ return send_codecs_.front();
+ }
+ return absl::nullopt;
+}
+const std::vector<FakeVoiceMediaSendChannel::DtmfInfo>&
+FakeVoiceMediaSendChannel::dtmf_info_queue() const {
+ return dtmf_info_queue_;
+}
+const AudioOptions& FakeVoiceMediaSendChannel::options() const {
+ return options_;
+}
+int FakeVoiceMediaSendChannel::max_bps() const {
+ return max_bps_;
+}
+bool FakeVoiceMediaSendChannel::SetSenderParameters(
+ const AudioSenderParameter& params) {
+ set_send_rtcp_parameters(params.rtcp);
+ SetExtmapAllowMixed(params.extmap_allow_mixed);
+ return (SetSendCodecs(params.codecs) &&
+ SetSendRtpHeaderExtensions(params.extensions) &&
+ SetMaxSendBandwidth(params.max_bandwidth_bps) &&
+ SetOptions(params.options));
+}
+void FakeVoiceMediaSendChannel::SetSend(bool send) {
+ set_sending(send);
+}
+bool FakeVoiceMediaSendChannel::SetAudioSend(uint32_t ssrc,
+ bool enable,
+ const AudioOptions* options,
+ AudioSource* source) {
+ if (!SetLocalSource(ssrc, source)) {
+ return false;
+ }
+ if (!RtpSendChannelHelper<VoiceMediaSendChannelInterface>::MuteStream(
+ ssrc, !enable)) {
+ return false;
+ }
+ if (enable && options) {
+ return SetOptions(*options);
+ }
+ return true;
+}
+bool FakeVoiceMediaSendChannel::HasSource(uint32_t ssrc) const {
+ return local_sinks_.find(ssrc) != local_sinks_.end();
+}
+bool FakeVoiceMediaSendChannel::CanInsertDtmf() {
+ for (std::vector<AudioCodec>::const_iterator it = send_codecs_.begin();
+ it != send_codecs_.end(); ++it) {
+ // Find the DTMF telephone event "codec".
+ if (absl::EqualsIgnoreCase(it->name, "telephone-event")) {
+ return true;
+ }
+ }
+ return false;
+}
+bool FakeVoiceMediaSendChannel::InsertDtmf(uint32_t ssrc,
+ int event_code,
+ int duration) {
+ dtmf_info_queue_.push_back(DtmfInfo(ssrc, event_code, duration));
+ return true;
+}
+bool FakeVoiceMediaSendChannel::GetOutputVolume(uint32_t ssrc, double* volume) {
+ if (output_scalings_.find(ssrc) == output_scalings_.end())
+ return false;
+ *volume = output_scalings_[ssrc];
+ return true;
+}
+bool FakeVoiceMediaSendChannel::GetStats(VoiceMediaSendInfo* info) {
+ return false;
+}
+bool FakeVoiceMediaSendChannel::SetSendCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ if (fail_set_send_codecs()) {
+ // Fake the failure in SetSendCodecs.
+ return false;
+ }
+ send_codecs_ = codecs;
+ return true;
+}
+bool FakeVoiceMediaSendChannel::SetMaxSendBandwidth(int bps) {
+ max_bps_ = bps;
+ return true;
+}
+bool FakeVoiceMediaSendChannel::SetOptions(const AudioOptions& options) {
+ // Does a "merge" of current options and set options.
+ options_.SetAll(options);
+ return true;
+}
+bool FakeVoiceMediaSendChannel::SetLocalSource(uint32_t ssrc,
+ AudioSource* source) {
+ auto it = local_sinks_.find(ssrc);
+ if (source) {
+ if (it != local_sinks_.end()) {
+ RTC_CHECK(it->second->source() == source);
+ } else {
+ local_sinks_.insert(std::make_pair(
+ ssrc, std::make_unique<VoiceChannelAudioSink>(source)));
+ }
+ } else {
+ if (it != local_sinks_.end()) {
+ local_sinks_.erase(it);
+ }
+ }
+ return true;
+}
+
+bool CompareDtmfInfo(const FakeVoiceMediaSendChannel::DtmfInfo& info,
+ uint32_t ssrc,
+ int event_code,
+ int duration) {
+ return (info.duration == duration && info.event_code == event_code &&
+ info.ssrc == ssrc);
+}
+
+FakeVideoMediaSendChannel::FakeVideoMediaSendChannel(
+ const VideoOptions& options,
+ TaskQueueBase* network_thread)
+ : RtpSendChannelHelper<VideoMediaSendChannelInterface>(network_thread),
+ max_bps_(-1) {
+ SetOptions(options);
+}
+FakeVideoMediaSendChannel::~FakeVideoMediaSendChannel() = default;
+const std::vector<VideoCodec>& FakeVideoMediaSendChannel::send_codecs() const {
+ return send_codecs_;
+}
+const std::vector<VideoCodec>& FakeVideoMediaSendChannel::codecs() const {
+ return send_codecs();
+}
+const VideoOptions& FakeVideoMediaSendChannel::options() const {
+ return options_;
+}
+int FakeVideoMediaSendChannel::max_bps() const {
+ return max_bps_;
+}
+bool FakeVideoMediaSendChannel::SetSenderParameters(
+ const VideoSenderParameters& params) {
+ set_send_rtcp_parameters(params.rtcp);
+ SetExtmapAllowMixed(params.extmap_allow_mixed);
+ return (SetSendCodecs(params.codecs) &&
+ SetSendRtpHeaderExtensions(params.extensions) &&
+ SetMaxSendBandwidth(params.max_bandwidth_bps));
+}
+absl::optional<Codec> FakeVideoMediaSendChannel::GetSendCodec() const {
+ if (send_codecs_.empty()) {
+ return absl::nullopt;
+ }
+ return send_codecs_[0];
+}
+bool FakeVideoMediaSendChannel::SetSend(bool send) {
+ return set_sending(send);
+}
+bool FakeVideoMediaSendChannel::SetVideoSend(
+ uint32_t ssrc,
+ const VideoOptions* options,
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source) {
+ if (options) {
+ if (!SetOptions(*options)) {
+ return false;
+ }
+ }
+ sources_[ssrc] = source;
+ return true;
+}
+bool FakeVideoMediaSendChannel::HasSource(uint32_t ssrc) const {
+ return sources_.find(ssrc) != sources_.end() && sources_.at(ssrc) != nullptr;
+}
+void FakeVideoMediaSendChannel::FillBitrateInfo(
+ BandwidthEstimationInfo* bwe_info) {}
+bool FakeVideoMediaSendChannel::GetStats(VideoMediaSendInfo* info) {
+ return false;
+}
+bool FakeVideoMediaSendChannel::SetSendCodecs(
+ const std::vector<VideoCodec>& codecs) {
+ if (fail_set_send_codecs()) {
+ // Fake the failure in SetSendCodecs.
+ return false;
+ }
+ send_codecs_ = codecs;
+
+ return true;
+}
+bool FakeVideoMediaSendChannel::SetOptions(const VideoOptions& options) {
+ options_ = options;
+ return true;
+}
+
+bool FakeVideoMediaSendChannel::SetMaxSendBandwidth(int bps) {
+ max_bps_ = bps;
+ return true;
+}
+void FakeVideoMediaSendChannel::GenerateSendKeyFrame(
+ uint32_t ssrc,
+ const std::vector<std::string>& rids) {}
+
+FakeVideoMediaReceiveChannel::FakeVideoMediaReceiveChannel(
+ const VideoOptions& options,
+ TaskQueueBase* network_thread)
+ : RtpReceiveChannelHelper<VideoMediaReceiveChannelInterface>(
+ network_thread),
+ max_bps_(-1) {
+ SetOptions(options);
+}
+FakeVideoMediaReceiveChannel::~FakeVideoMediaReceiveChannel() = default;
+const std::vector<VideoCodec>& FakeVideoMediaReceiveChannel::recv_codecs()
+ const {
+ return recv_codecs_;
+}
+bool FakeVideoMediaReceiveChannel::rendering() const {
+ return playout();
+}
+const VideoOptions& FakeVideoMediaReceiveChannel::options() const {
+ return options_;
+}
+const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
+FakeVideoMediaReceiveChannel::sinks() const {
+ return sinks_;
+}
+int FakeVideoMediaReceiveChannel::max_bps() const {
+ return max_bps_;
+}
+bool FakeVideoMediaReceiveChannel::SetReceiverParameters(
+ const VideoReceiverParameters& params) {
+ set_recv_rtcp_parameters(params.rtcp);
+ return (SetRecvCodecs(params.codecs) &&
+ SetRecvRtpHeaderExtensions(params.extensions));
+}
+bool FakeVideoMediaReceiveChannel::SetSink(
+ uint32_t ssrc,
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ auto it = sinks_.find(ssrc);
+ if (it == sinks_.end()) {
+ return false;
+ }
+ it->second = sink;
+ return true;
+}
+void FakeVideoMediaReceiveChannel::SetDefaultSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {}
+bool FakeVideoMediaReceiveChannel::HasSink(uint32_t ssrc) const {
+ return sinks_.find(ssrc) != sinks_.end() && sinks_.at(ssrc) != nullptr;
+}
+bool FakeVideoMediaReceiveChannel::HasSource(uint32_t ssrc) const {
+ return sources_.find(ssrc) != sources_.end() && sources_.at(ssrc) != nullptr;
+}
+bool FakeVideoMediaReceiveChannel::AddRecvStream(const StreamParams& sp) {
+ if (!RtpReceiveChannelHelper<
+ VideoMediaReceiveChannelInterface>::AddRecvStream(sp))
+ return false;
+ sinks_[sp.first_ssrc()] = NULL;
+ output_delays_[sp.first_ssrc()] = 0;
+ return true;
+}
+bool FakeVideoMediaReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
+ if (!RtpReceiveChannelHelper<
+ VideoMediaReceiveChannelInterface>::RemoveRecvStream(ssrc))
+ return false;
+ sinks_.erase(ssrc);
+ output_delays_.erase(ssrc);
+ return true;
+}
+std::vector<webrtc::RtpSource> FakeVideoMediaReceiveChannel::GetSources(
+ uint32_t ssrc) const {
+ return {};
+}
+bool FakeVideoMediaReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
+ int delay_ms) {
+ if (output_delays_.find(ssrc) == output_delays_.end()) {
+ return false;
+ } else {
+ output_delays_[ssrc] = delay_ms;
+ return true;
+ }
+}
+absl::optional<int> FakeVideoMediaReceiveChannel::GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const {
+ const auto it = output_delays_.find(ssrc);
+ if (it != output_delays_.end()) {
+ return it->second;
+ }
+ return absl::nullopt;
+}
+bool FakeVideoMediaReceiveChannel::SetRecvCodecs(
+ const std::vector<VideoCodec>& codecs) {
+ if (fail_set_recv_codecs()) {
+ // Fake the failure in SetRecvCodecs.
+ return false;
+ }
+ recv_codecs_ = codecs;
+ return true;
+}
+bool FakeVideoMediaReceiveChannel::SetOptions(const VideoOptions& options) {
+ options_ = options;
+ return true;
+}
+
+bool FakeVideoMediaReceiveChannel::SetMaxSendBandwidth(int bps) {
+ max_bps_ = bps;
+ return true;
+}
+
+void FakeVideoMediaReceiveChannel::SetRecordableEncodedFrameCallback(
+ uint32_t ssrc,
+ std::function<void(const webrtc::RecordableEncodedFrame&)> callback) {}
+
+void FakeVideoMediaReceiveChannel::ClearRecordableEncodedFrameCallback(
+ uint32_t ssrc) {}
+
+void FakeVideoMediaReceiveChannel::RequestRecvKeyFrame(uint32_t ssrc) {}
+
+bool FakeVideoMediaReceiveChannel::GetStats(VideoMediaReceiveInfo* info) {
+ return false;
+}
+
+FakeVoiceEngine::FakeVoiceEngine() : fail_create_channel_(false) {
+ // Add a fake audio codec. Note that the name must not be "" as there are
+ // sanity checks against that.
+ SetCodecs({cricket::CreateAudioCodec(101, "fake_audio_codec", 8000, 1)});
+}
+void FakeVoiceEngine::Init() {}
+rtc::scoped_refptr<webrtc::AudioState> FakeVoiceEngine::GetAudioState() const {
+ return rtc::scoped_refptr<webrtc::AudioState>();
+}
+std::unique_ptr<VoiceMediaSendChannelInterface>
+FakeVoiceEngine::CreateSendChannel(webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) {
+ std::unique_ptr<FakeVoiceMediaSendChannel> ch =
+ std::make_unique<FakeVoiceMediaSendChannel>(options,
+ call->network_thread());
+ return ch;
+}
+std::unique_ptr<VoiceMediaReceiveChannelInterface>
+FakeVoiceEngine::CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) {
+ std::unique_ptr<FakeVoiceMediaReceiveChannel> ch =
+ std::make_unique<FakeVoiceMediaReceiveChannel>(options,
+ call->network_thread());
+ return ch;
+}
+const std::vector<AudioCodec>& FakeVoiceEngine::send_codecs() const {
+ return send_codecs_;
+}
+const std::vector<AudioCodec>& FakeVoiceEngine::recv_codecs() const {
+ return recv_codecs_;
+}
+void FakeVoiceEngine::SetCodecs(const std::vector<AudioCodec>& codecs) {
+ send_codecs_ = codecs;
+ recv_codecs_ = codecs;
+}
+void FakeVoiceEngine::SetRecvCodecs(const std::vector<AudioCodec>& codecs) {
+ recv_codecs_ = codecs;
+}
+void FakeVoiceEngine::SetSendCodecs(const std::vector<AudioCodec>& codecs) {
+ send_codecs_ = codecs;
+}
+int FakeVoiceEngine::GetInputLevel() {
+ return 0;
+}
+bool FakeVoiceEngine::StartAecDump(webrtc::FileWrapper file,
+ int64_t max_size_bytes) {
+ return false;
+}
+absl::optional<webrtc::AudioDeviceModule::Stats>
+FakeVoiceEngine::GetAudioDeviceStats() {
+ return absl::nullopt;
+}
+void FakeVoiceEngine::StopAecDump() {}
+
+std::vector<webrtc::RtpHeaderExtensionCapability>
+FakeVoiceEngine::GetRtpHeaderExtensions() const {
+ return header_extensions_;
+}
+
+void FakeVoiceEngine::SetRtpHeaderExtensions(
+ std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions) {
+ header_extensions_ = std::move(header_extensions);
+}
+
+FakeVideoEngine::FakeVideoEngine()
+ : capture_(false), fail_create_channel_(false) {
+ // Add a fake video codec. Note that the name must not be "" as there are
+ // sanity checks against that.
+ send_codecs_.push_back(cricket::CreateVideoCodec(111, "fake_video_codec"));
+ recv_codecs_.push_back(cricket::CreateVideoCodec(111, "fake_video_codec"));
+}
+bool FakeVideoEngine::SetOptions(const VideoOptions& options) {
+ options_ = options;
+ return true;
+}
+std::unique_ptr<VideoMediaSendChannelInterface>
+FakeVideoEngine::CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
+ if (fail_create_channel_) {
+ return nullptr;
+ }
+
+ std::unique_ptr<FakeVideoMediaSendChannel> ch =
+ std::make_unique<FakeVideoMediaSendChannel>(options,
+ call->network_thread());
+ return ch;
+}
+std::unique_ptr<VideoMediaReceiveChannelInterface>
+FakeVideoEngine::CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options) {
+ if (fail_create_channel_) {
+ return nullptr;
+ }
+
+ std::unique_ptr<FakeVideoMediaReceiveChannel> ch =
+ std::make_unique<FakeVideoMediaReceiveChannel>(options,
+ call->network_thread());
+ return ch;
+}
+std::vector<VideoCodec> FakeVideoEngine::send_codecs(bool use_rtx) const {
+ return send_codecs_;
+}
+
+std::vector<VideoCodec> FakeVideoEngine::recv_codecs(bool use_rtx) const {
+ return recv_codecs_;
+}
+
+void FakeVideoEngine::SetSendCodecs(const std::vector<VideoCodec>& codecs) {
+ send_codecs_ = codecs;
+}
+
+void FakeVideoEngine::SetRecvCodecs(const std::vector<VideoCodec>& codecs) {
+ recv_codecs_ = codecs;
+}
+
+bool FakeVideoEngine::SetCapture(bool capture) {
+ capture_ = capture;
+ return true;
+}
+std::vector<webrtc::RtpHeaderExtensionCapability>
+FakeVideoEngine::GetRtpHeaderExtensions() const {
+ return header_extensions_;
+}
+void FakeVideoEngine::SetRtpHeaderExtensions(
+ std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions) {
+ header_extensions_ = std::move(header_extensions);
+}
+
+FakeMediaEngine::FakeMediaEngine()
+ : CompositeMediaEngine(std::make_unique<FakeVoiceEngine>(),
+ std::make_unique<FakeVideoEngine>()),
+ voice_(static_cast<FakeVoiceEngine*>(&voice())),
+ video_(static_cast<FakeVideoEngine*>(&video())) {}
+FakeMediaEngine::~FakeMediaEngine() {}
+void FakeMediaEngine::SetAudioCodecs(const std::vector<AudioCodec>& codecs) {
+ voice_->SetCodecs(codecs);
+}
+void FakeMediaEngine::SetAudioRecvCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ voice_->SetRecvCodecs(codecs);
+}
+void FakeMediaEngine::SetAudioSendCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ voice_->SetSendCodecs(codecs);
+}
+void FakeMediaEngine::SetVideoCodecs(const std::vector<VideoCodec>& codecs) {
+ video_->SetSendCodecs(codecs);
+ video_->SetRecvCodecs(codecs);
+}
+void FakeMediaEngine::set_fail_create_channel(bool fail) {
+ voice_->fail_create_channel_ = fail;
+ video_->fail_create_channel_ = fail;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/fake_media_engine.h b/third_party/libwebrtc/media/base/fake_media_engine.h
new file mode 100644
index 0000000000..eddc76057d
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_media_engine.h
@@ -0,0 +1,876 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_FAKE_MEDIA_ENGINE_H_
+#define MEDIA_BASE_FAKE_MEDIA_ENGINE_H_
+
+#include <atomic>
+#include <list>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/functional/any_invocable.h"
+#include "api/call/audio_sink.h"
+#include "api/media_types.h"
+#include "media/base/audio_source.h"
+#include "media/base/media_channel.h"
+#include "media/base/media_channel_impl.h"
+#include "media/base/media_engine.h"
+#include "media/base/rtp_utils.h"
+#include "media/base/stream_params.h"
+#include "media/engine/webrtc_video_engine.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/thread.h"
+
+using webrtc::RtpExtension;
+
+namespace cricket {
+
+class FakeMediaEngine;
+class FakeVideoEngine;
+class FakeVoiceEngine;
+
+// A common helper class that handles sending and receiving RTP/RTCP packets.
+template <class Base>
+class RtpReceiveChannelHelper : public Base, public MediaChannelUtil {
+ public:
+ explicit RtpReceiveChannelHelper(webrtc::TaskQueueBase* network_thread)
+ : MediaChannelUtil(network_thread),
+ playout_(false),
+ fail_set_recv_codecs_(false),
+ transport_overhead_per_packet_(0),
+ num_network_route_changes_(0) {}
+ virtual ~RtpReceiveChannelHelper() = default;
+ const std::vector<RtpExtension>& recv_extensions() {
+ return recv_extensions_;
+ }
+ bool playout() const { return playout_; }
+ const std::list<std::string>& rtp_packets() const { return rtp_packets_; }
+ const std::list<std::string>& rtcp_packets() const { return rtcp_packets_; }
+
+ bool SendRtcp(const void* data, size_t len) {
+ rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
+ kMaxRtpPacketLen);
+ return Base::SendRtcp(&packet, rtc::PacketOptions());
+ }
+
+ bool CheckRtp(const void* data, size_t len) {
+ bool success = !rtp_packets_.empty();
+ if (success) {
+ std::string packet = rtp_packets_.front();
+ rtp_packets_.pop_front();
+ success = (packet == std::string(static_cast<const char*>(data), len));
+ }
+ return success;
+ }
+ bool CheckRtcp(const void* data, size_t len) {
+ bool success = !rtcp_packets_.empty();
+ if (success) {
+ std::string packet = rtcp_packets_.front();
+ rtcp_packets_.pop_front();
+ success = (packet == std::string(static_cast<const char*>(data), len));
+ }
+ return success;
+ }
+ bool CheckNoRtp() { return rtp_packets_.empty(); }
+ bool CheckNoRtcp() { return rtcp_packets_.empty(); }
+ void set_fail_set_recv_codecs(bool fail) { fail_set_recv_codecs_ = fail; }
+ void ResetUnsignaledRecvStream() override {}
+ absl::optional<uint32_t> GetUnsignaledSsrc() const override {
+ return absl::nullopt;
+ }
+ void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override {}
+
+ virtual bool SetLocalSsrc(const StreamParams& sp) { return true; }
+ void OnDemuxerCriteriaUpdatePending() override {}
+ void OnDemuxerCriteriaUpdateComplete() override {}
+
+ bool AddRecvStream(const StreamParams& sp) override {
+ if (absl::c_linear_search(receive_streams_, sp)) {
+ return false;
+ }
+ receive_streams_.push_back(sp);
+ rtp_receive_parameters_[sp.first_ssrc()] =
+ CreateRtpParametersWithEncodings(sp);
+ return true;
+ }
+ bool RemoveRecvStream(uint32_t ssrc) override {
+ auto parameters_iterator = rtp_receive_parameters_.find(ssrc);
+ if (parameters_iterator != rtp_receive_parameters_.end()) {
+ rtp_receive_parameters_.erase(parameters_iterator);
+ }
+ return RemoveStreamBySsrc(&receive_streams_, ssrc);
+ }
+
+ webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override {
+ auto parameters_iterator = rtp_receive_parameters_.find(ssrc);
+ if (parameters_iterator != rtp_receive_parameters_.end()) {
+ return parameters_iterator->second;
+ }
+ return webrtc::RtpParameters();
+ }
+ webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override {
+ return webrtc::RtpParameters();
+ }
+
+ const std::vector<StreamParams>& recv_streams() const {
+ return receive_streams_;
+ }
+ bool HasRecvStream(uint32_t ssrc) const {
+ return GetStreamBySsrc(receive_streams_, ssrc) != nullptr;
+ }
+
+ const RtcpParameters& recv_rtcp_parameters() { return recv_rtcp_parameters_; }
+
+ int transport_overhead_per_packet() const {
+ return transport_overhead_per_packet_;
+ }
+
+ rtc::NetworkRoute last_network_route() const { return last_network_route_; }
+ int num_network_route_changes() const { return num_network_route_changes_; }
+ void set_num_network_route_changes(int changes) {
+ num_network_route_changes_ = changes;
+ }
+
+ void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
+ int64_t packet_time_us) {
+ rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
+ }
+
+ void SetFrameDecryptor(uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
+ frame_decryptor) override {}
+
+ void SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override {}
+
+ void SetInterface(MediaChannelNetworkInterface* iface) override {
+ network_interface_ = iface;
+ MediaChannelUtil::SetInterface(iface);
+ }
+
+ protected:
+ void set_playout(bool playout) { playout_ = playout; }
+ bool SetRecvRtpHeaderExtensions(const std::vector<RtpExtension>& extensions) {
+ recv_extensions_ = extensions;
+ return true;
+ }
+ void set_recv_rtcp_parameters(const RtcpParameters& params) {
+ recv_rtcp_parameters_ = params;
+ }
+ void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override {
+ rtp_packets_.push_back(
+ std::string(packet.Buffer().cdata<char>(), packet.size()));
+ }
+ bool fail_set_recv_codecs() const { return fail_set_recv_codecs_; }
+
+ private:
+ bool playout_;
+ std::vector<RtpExtension> recv_extensions_;
+ std::list<std::string> rtp_packets_;
+ std::list<std::string> rtcp_packets_;
+ std::vector<StreamParams> receive_streams_;
+ RtcpParameters recv_rtcp_parameters_;
+ std::map<uint32_t, webrtc::RtpParameters> rtp_receive_parameters_;
+ bool fail_set_recv_codecs_;
+ std::string rtcp_cname_;
+ int transport_overhead_per_packet_;
+ rtc::NetworkRoute last_network_route_;
+ int num_network_route_changes_;
+ MediaChannelNetworkInterface* network_interface_ = nullptr;
+};
+
+// A common helper class that handles sending and receiving RTP/RTCP packets.
+template <class Base>
+class RtpSendChannelHelper : public Base, public MediaChannelUtil {
+ public:
+ explicit RtpSendChannelHelper(webrtc::TaskQueueBase* network_thread)
+ : MediaChannelUtil(network_thread),
+ sending_(false),
+ fail_set_send_codecs_(false),
+ send_ssrc_(0),
+ ready_to_send_(false),
+ transport_overhead_per_packet_(0),
+ num_network_route_changes_(0) {}
+ virtual ~RtpSendChannelHelper() = default;
+ const std::vector<RtpExtension>& send_extensions() {
+ return send_extensions_;
+ }
+ bool sending() const { return sending_; }
+ const std::list<std::string>& rtp_packets() const { return rtp_packets_; }
+ const std::list<std::string>& rtcp_packets() const { return rtcp_packets_; }
+
+ bool SendPacket(const void* data,
+ size_t len,
+ const rtc::PacketOptions& options) {
+ if (!sending_) {
+ return false;
+ }
+ rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
+ kMaxRtpPacketLen);
+ return MediaChannelUtil::SendPacket(&packet, options);
+ }
+ bool SendRtcp(const void* data, size_t len) {
+ rtc::CopyOnWriteBuffer packet(reinterpret_cast<const uint8_t*>(data), len,
+ kMaxRtpPacketLen);
+ return MediaChannelUtil::SendRtcp(&packet, rtc::PacketOptions());
+ }
+
+ bool CheckRtp(const void* data, size_t len) {
+ bool success = !rtp_packets_.empty();
+ if (success) {
+ std::string packet = rtp_packets_.front();
+ rtp_packets_.pop_front();
+ success = (packet == std::string(static_cast<const char*>(data), len));
+ }
+ return success;
+ }
+ bool CheckRtcp(const void* data, size_t len) {
+ bool success = !rtcp_packets_.empty();
+ if (success) {
+ std::string packet = rtcp_packets_.front();
+ rtcp_packets_.pop_front();
+ success = (packet == std::string(static_cast<const char*>(data), len));
+ }
+ return success;
+ }
+ bool CheckNoRtp() { return rtp_packets_.empty(); }
+ bool CheckNoRtcp() { return rtcp_packets_.empty(); }
+ void set_fail_set_send_codecs(bool fail) { fail_set_send_codecs_ = fail; }
+ bool AddSendStream(const StreamParams& sp) override {
+ if (absl::c_linear_search(send_streams_, sp)) {
+ return false;
+ }
+ send_streams_.push_back(sp);
+ rtp_send_parameters_[sp.first_ssrc()] =
+ CreateRtpParametersWithEncodings(sp);
+
+ if (ssrc_list_changed_callback_) {
+ std::set<uint32_t> ssrcs_in_use;
+ for (const auto& send_stream : send_streams_) {
+ ssrcs_in_use.insert(send_stream.first_ssrc());
+ }
+ ssrc_list_changed_callback_(ssrcs_in_use);
+ }
+
+ return true;
+ }
+ bool RemoveSendStream(uint32_t ssrc) override {
+ auto parameters_iterator = rtp_send_parameters_.find(ssrc);
+ if (parameters_iterator != rtp_send_parameters_.end()) {
+ rtp_send_parameters_.erase(parameters_iterator);
+ }
+ return RemoveStreamBySsrc(&send_streams_, ssrc);
+ }
+ void SetSsrcListChangedCallback(
+ absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {
+ ssrc_list_changed_callback_ = std::move(callback);
+ }
+
+ void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
+ return MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
+ }
+ bool ExtmapAllowMixed() const override {
+ return MediaChannelUtil::ExtmapAllowMixed();
+ }
+
+ webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override {
+ auto parameters_iterator = rtp_send_parameters_.find(ssrc);
+ if (parameters_iterator != rtp_send_parameters_.end()) {
+ return parameters_iterator->second;
+ }
+ return webrtc::RtpParameters();
+ }
+ webrtc::RTCError SetRtpSendParameters(
+ uint32_t ssrc,
+ const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback) override {
+ auto parameters_iterator = rtp_send_parameters_.find(ssrc);
+ if (parameters_iterator != rtp_send_parameters_.end()) {
+ auto result = CheckRtpParametersInvalidModificationAndValues(
+ parameters_iterator->second, parameters);
+ if (!result.ok()) {
+ return webrtc::InvokeSetParametersCallback(callback, result);
+ }
+
+ parameters_iterator->second = parameters;
+
+ return webrtc::InvokeSetParametersCallback(callback,
+ webrtc::RTCError::OK());
+ }
+ // Replicate the behavior of the real media channel: return false
+ // when setting parameters for unknown SSRCs.
+ return InvokeSetParametersCallback(
+ callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
+ }
+
+ bool IsStreamMuted(uint32_t ssrc) const {
+ bool ret = muted_streams_.find(ssrc) != muted_streams_.end();
+ // If |ssrc = 0| check if the first send stream is muted.
+ if (!ret && ssrc == 0 && !send_streams_.empty()) {
+ return muted_streams_.find(send_streams_[0].first_ssrc()) !=
+ muted_streams_.end();
+ }
+ return ret;
+ }
+ const std::vector<StreamParams>& send_streams() const {
+ return send_streams_;
+ }
+ bool HasSendStream(uint32_t ssrc) const {
+ return GetStreamBySsrc(send_streams_, ssrc) != nullptr;
+ }
+ // TODO(perkj): This is to support legacy unit test that only check one
+ // sending stream.
+ uint32_t send_ssrc() const {
+ if (send_streams_.empty())
+ return 0;
+ return send_streams_[0].first_ssrc();
+ }
+
+ const RtcpParameters& send_rtcp_parameters() { return send_rtcp_parameters_; }
+
+ bool ready_to_send() const { return ready_to_send_; }
+
+ int transport_overhead_per_packet() const {
+ return transport_overhead_per_packet_;
+ }
+
+ rtc::NetworkRoute last_network_route() const { return last_network_route_; }
+ int num_network_route_changes() const { return num_network_route_changes_; }
+ void set_num_network_route_changes(int changes) {
+ num_network_route_changes_ = changes;
+ }
+
+ void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet,
+ int64_t packet_time_us) {
+ rtcp_packets_.push_back(std::string(packet->cdata<char>(), packet->size()));
+ }
+
+ // Stuff that deals with encryptors, transformers and the like
+ void SetFrameEncryptor(uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
+ frame_encryptor) override {}
+ void SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override {}
+
+ void SetInterface(MediaChannelNetworkInterface* iface) override {
+ network_interface_ = iface;
+ MediaChannelUtil::SetInterface(iface);
+ }
+ bool HasNetworkInterface() const override {
+ return network_interface_ != nullptr;
+ }
+
+ protected:
+ bool MuteStream(uint32_t ssrc, bool mute) {
+ if (!HasSendStream(ssrc) && ssrc != 0) {
+ return false;
+ }
+ if (mute) {
+ muted_streams_.insert(ssrc);
+ } else {
+ muted_streams_.erase(ssrc);
+ }
+ return true;
+ }
+ bool set_sending(bool send) {
+ sending_ = send;
+ return true;
+ }
+ bool SetSendRtpHeaderExtensions(const std::vector<RtpExtension>& extensions) {
+ send_extensions_ = extensions;
+ return true;
+ }
+ void set_send_rtcp_parameters(const RtcpParameters& params) {
+ send_rtcp_parameters_ = params;
+ }
+ void OnPacketSent(const rtc::SentPacket& sent_packet) override {}
+ void OnReadyToSend(bool ready) override { ready_to_send_ = ready; }
+ void OnNetworkRouteChanged(absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) override {
+ last_network_route_ = network_route;
+ ++num_network_route_changes_;
+ transport_overhead_per_packet_ = network_route.packet_overhead;
+ }
+ bool fail_set_send_codecs() const { return fail_set_send_codecs_; }
+
+ private:
+ // TODO(bugs.webrtc.org/12783): This flag is used from more than one thread.
+ // As a workaround for tsan, it's currently std::atomic but that might not
+ // be the appropriate fix.
+ std::atomic<bool> sending_;
+ std::vector<RtpExtension> send_extensions_;
+ std::list<std::string> rtp_packets_;
+ std::list<std::string> rtcp_packets_;
+ std::vector<StreamParams> send_streams_;
+ RtcpParameters send_rtcp_parameters_;
+ std::set<uint32_t> muted_streams_;
+ std::map<uint32_t, webrtc::RtpParameters> rtp_send_parameters_;
+ bool fail_set_send_codecs_;
+ uint32_t send_ssrc_;
+ std::string rtcp_cname_;
+ bool ready_to_send_;
+ int transport_overhead_per_packet_;
+ rtc::NetworkRoute last_network_route_;
+ int num_network_route_changes_;
+ MediaChannelNetworkInterface* network_interface_ = nullptr;
+ absl::AnyInvocable<void(const std::set<uint32_t>&)>
+ ssrc_list_changed_callback_ = nullptr;
+};
+
+class FakeVoiceMediaReceiveChannel
+ : public RtpReceiveChannelHelper<VoiceMediaReceiveChannelInterface> {
+ public:
+ struct DtmfInfo {
+ DtmfInfo(uint32_t ssrc, int event_code, int duration);
+ uint32_t ssrc;
+ int event_code;
+ int duration;
+ };
+ FakeVoiceMediaReceiveChannel(const AudioOptions& options,
+ webrtc::TaskQueueBase* network_thread);
+ virtual ~FakeVoiceMediaReceiveChannel();
+
+ // Test methods
+ const std::vector<AudioCodec>& recv_codecs() const;
+ const std::vector<DtmfInfo>& dtmf_info_queue() const;
+ const AudioOptions& options() const;
+ int max_bps() const;
+ bool HasSource(uint32_t ssrc) const;
+
+ // Overrides
+ VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
+ return nullptr;
+ }
+ VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
+ return this;
+ }
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_AUDIO;
+ }
+
+ bool SetReceiverParameters(const AudioReceiverParameters& params) override;
+ void SetPlayout(bool playout) override;
+
+ bool AddRecvStream(const StreamParams& sp) override;
+ bool RemoveRecvStream(uint32_t ssrc) override;
+
+ bool SetOutputVolume(uint32_t ssrc, double volume) override;
+ bool SetDefaultOutputVolume(double volume) override;
+
+ bool GetOutputVolume(uint32_t ssrc, double* volume);
+
+ bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
+ absl::optional<int> GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const override;
+
+ bool GetStats(VoiceMediaReceiveInfo* info,
+ bool get_and_clear_legacy_stats) override;
+
+ void SetRawAudioSink(
+ uint32_t ssrc,
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
+ void SetDefaultRawAudioSink(
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
+
+ std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
+ void SetReceiveNackEnabled(bool enabled) override {}
+ void SetReceiveNonSenderRttEnabled(bool enabled) override {}
+
+ private:
+ class VoiceChannelAudioSink : public AudioSource::Sink {
+ public:
+ explicit VoiceChannelAudioSink(AudioSource* source);
+ ~VoiceChannelAudioSink() override;
+ void OnData(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ absl::optional<int64_t> absolute_capture_timestamp_ms) override;
+ void OnClose() override;
+ int NumPreferredChannels() const override { return -1; }
+ AudioSource* source() const;
+
+ private:
+ AudioSource* source_;
+ };
+
+ bool SetRecvCodecs(const std::vector<AudioCodec>& codecs);
+ bool SetMaxSendBandwidth(int bps);
+ bool SetOptions(const AudioOptions& options);
+
+ std::vector<AudioCodec> recv_codecs_;
+ std::map<uint32_t, double> output_scalings_;
+ std::map<uint32_t, int> output_delays_;
+ std::vector<DtmfInfo> dtmf_info_queue_;
+ AudioOptions options_;
+ std::map<uint32_t, std::unique_ptr<VoiceChannelAudioSink>> local_sinks_;
+ std::unique_ptr<webrtc::AudioSinkInterface> sink_;
+ int max_bps_;
+};
+
+class FakeVoiceMediaSendChannel
+ : public RtpSendChannelHelper<VoiceMediaSendChannelInterface> {
+ public:
+ struct DtmfInfo {
+ DtmfInfo(uint32_t ssrc, int event_code, int duration);
+ uint32_t ssrc;
+ int event_code;
+ int duration;
+ };
+ FakeVoiceMediaSendChannel(const AudioOptions& options,
+ webrtc::TaskQueueBase* network_thread);
+ ~FakeVoiceMediaSendChannel() override;
+
+ const std::vector<AudioCodec>& send_codecs() const;
+ const std::vector<DtmfInfo>& dtmf_info_queue() const;
+ const AudioOptions& options() const;
+ int max_bps() const;
+ bool HasSource(uint32_t ssrc) const;
+ bool GetOutputVolume(uint32_t ssrc, double* volume);
+
+ // Overrides
+ VideoMediaSendChannelInterface* AsVideoSendChannel() override {
+ return nullptr;
+ }
+ VoiceMediaSendChannelInterface* AsVoiceSendChannel() override { return this; }
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_AUDIO;
+ }
+
+ bool SetSenderParameters(const AudioSenderParameter& params) override;
+ void SetSend(bool send) override;
+ bool SetAudioSend(uint32_t ssrc,
+ bool enable,
+ const AudioOptions* options,
+ AudioSource* source) override;
+
+ bool CanInsertDtmf() override;
+ bool InsertDtmf(uint32_t ssrc, int event_code, int duration) override;
+
+ bool SenderNackEnabled() const override { return false; }
+ bool SenderNonSenderRttEnabled() const override { return false; }
+ void SetReceiveNackEnabled(bool enabled) {}
+ void SetReceiveNonSenderRttEnabled(bool enabled) {}
+ bool SendCodecHasNack() const override { return false; }
+ void SetSendCodecChangedCallback(
+ absl::AnyInvocable<void()> callback) override {}
+ absl::optional<Codec> GetSendCodec() const override;
+
+ bool GetStats(VoiceMediaSendInfo* stats) override;
+
+ private:
+ class VoiceChannelAudioSink : public AudioSource::Sink {
+ public:
+ explicit VoiceChannelAudioSink(AudioSource* source);
+ ~VoiceChannelAudioSink() override;
+ void OnData(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ absl::optional<int64_t> absolute_capture_timestamp_ms) override;
+ void OnClose() override;
+ int NumPreferredChannels() const override { return -1; }
+ AudioSource* source() const;
+
+ private:
+ AudioSource* source_;
+ };
+
+ bool SetSendCodecs(const std::vector<AudioCodec>& codecs);
+ bool SetMaxSendBandwidth(int bps);
+ bool SetOptions(const AudioOptions& options);
+ bool SetLocalSource(uint32_t ssrc, AudioSource* source);
+
+ std::vector<AudioCodec> send_codecs_;
+ std::map<uint32_t, double> output_scalings_;
+ std::map<uint32_t, int> output_delays_;
+ std::vector<DtmfInfo> dtmf_info_queue_;
+ AudioOptions options_;
+ std::map<uint32_t, std::unique_ptr<VoiceChannelAudioSink>> local_sinks_;
+ int max_bps_;
+};
+
+// A helper function to compare the FakeVoiceMediaChannel::DtmfInfo.
+bool CompareDtmfInfo(const FakeVoiceMediaSendChannel::DtmfInfo& info,
+ uint32_t ssrc,
+ int event_code,
+ int duration);
+
+class FakeVideoMediaReceiveChannel
+ : public RtpReceiveChannelHelper<VideoMediaReceiveChannelInterface> {
+ public:
+ FakeVideoMediaReceiveChannel(const VideoOptions& options,
+ webrtc::TaskQueueBase* network_thread);
+
+ virtual ~FakeVideoMediaReceiveChannel();
+
+ VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
+ return this;
+ }
+ VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
+ return nullptr;
+ }
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_VIDEO;
+ }
+
+ const std::vector<VideoCodec>& recv_codecs() const;
+ const std::vector<VideoCodec>& send_codecs() const;
+ bool rendering() const;
+ const VideoOptions& options() const;
+ const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
+ sinks() const;
+ int max_bps() const;
+ bool SetReceiverParameters(const VideoReceiverParameters& params) override;
+
+ bool SetSink(uint32_t ssrc,
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+ void SetDefaultSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+ bool HasSink(uint32_t ssrc) const;
+
+ void SetReceive(bool receive) override {}
+
+ bool HasSource(uint32_t ssrc) const;
+ bool AddRecvStream(const StreamParams& sp) override;
+ bool RemoveRecvStream(uint32_t ssrc) override;
+
+ std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
+
+ bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
+ absl::optional<int> GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const override;
+
+ void SetRecordableEncodedFrameCallback(
+ uint32_t ssrc,
+ std::function<void(const webrtc::RecordableEncodedFrame&)> callback)
+ override;
+ void ClearRecordableEncodedFrameCallback(uint32_t ssrc) override;
+ void RequestRecvKeyFrame(uint32_t ssrc) override;
+ void SetReceiverFeedbackParameters(bool lntf_enabled,
+ bool nack_enabled,
+ webrtc::RtcpMode rtcp_mode,
+ absl::optional<int> rtx_time) override {}
+ bool GetStats(VideoMediaReceiveInfo* info) override;
+
+ bool AddDefaultRecvStreamForTesting(const StreamParams& sp) override {
+ RTC_CHECK_NOTREACHED();
+ return false;
+ }
+
+ private:
+ bool SetRecvCodecs(const std::vector<VideoCodec>& codecs);
+ bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
+ bool SetOptions(const VideoOptions& options);
+ bool SetMaxSendBandwidth(int bps);
+
+ std::vector<VideoCodec> recv_codecs_;
+ std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*> sinks_;
+ std::map<uint32_t, rtc::VideoSourceInterface<webrtc::VideoFrame>*> sources_;
+ std::map<uint32_t, int> output_delays_;
+ VideoOptions options_;
+ int max_bps_;
+};
+
+class FakeVideoMediaSendChannel
+ : public RtpSendChannelHelper<VideoMediaSendChannelInterface> {
+ public:
+ FakeVideoMediaSendChannel(const VideoOptions& options,
+ webrtc::TaskQueueBase* network_thread);
+
+ virtual ~FakeVideoMediaSendChannel();
+
+ VideoMediaSendChannelInterface* AsVideoSendChannel() override { return this; }
+ VoiceMediaSendChannelInterface* AsVoiceSendChannel() override {
+ return nullptr;
+ }
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_VIDEO;
+ }
+
+ const std::vector<VideoCodec>& send_codecs() const;
+ const std::vector<VideoCodec>& codecs() const;
+ const VideoOptions& options() const;
+ const std::map<uint32_t, rtc::VideoSinkInterface<webrtc::VideoFrame>*>&
+ sinks() const;
+ int max_bps() const;
+ bool SetSenderParameters(const VideoSenderParameters& params) override;
+
+ absl::optional<Codec> GetSendCodec() const override;
+
+ bool SetSend(bool send) override;
+ bool SetVideoSend(
+ uint32_t ssrc,
+ const VideoOptions* options,
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source) override;
+
+ bool HasSource(uint32_t ssrc) const;
+
+ void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) override;
+
+ void GenerateSendKeyFrame(uint32_t ssrc,
+ const std::vector<std::string>& rids) override;
+ webrtc::RtcpMode SendCodecRtcpMode() const override {
+ return webrtc::RtcpMode::kCompound;
+ }
+ void SetSendCodecChangedCallback(
+ absl::AnyInvocable<void()> callback) override {}
+ void SetSsrcListChangedCallback(
+ absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {}
+
+ bool SendCodecHasLntf() const override { return false; }
+ bool SendCodecHasNack() const override { return false; }
+ absl::optional<int> SendCodecRtxTime() const override {
+ return absl::nullopt;
+ }
+ bool GetStats(VideoMediaSendInfo* info) override;
+
+ private:
+ bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
+ bool SetOptions(const VideoOptions& options);
+ bool SetMaxSendBandwidth(int bps);
+
+ std::vector<VideoCodec> send_codecs_;
+ std::map<uint32_t, rtc::VideoSourceInterface<webrtc::VideoFrame>*> sources_;
+ VideoOptions options_;
+ int max_bps_;
+};
+
+class FakeVoiceEngine : public VoiceEngineInterface {
+ public:
+ FakeVoiceEngine();
+ void Init() override;
+ rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const override;
+
+ std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) override;
+ std::unique_ptr<VoiceMediaReceiveChannelInterface> CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) override;
+
+ // TODO(ossu): For proper testing, These should either individually settable
+ // or the voice engine should reference mockable factories.
+ const std::vector<AudioCodec>& send_codecs() const override;
+ const std::vector<AudioCodec>& recv_codecs() const override;
+ void SetCodecs(const std::vector<AudioCodec>& codecs);
+ void SetRecvCodecs(const std::vector<AudioCodec>& codecs);
+ void SetSendCodecs(const std::vector<AudioCodec>& codecs);
+ int GetInputLevel();
+ bool StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) override;
+ void StopAecDump() override;
+ absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
+ override;
+ std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
+ const override;
+ void SetRtpHeaderExtensions(
+ std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions);
+
+ private:
+ std::vector<AudioCodec> recv_codecs_;
+ std::vector<AudioCodec> send_codecs_;
+ bool fail_create_channel_;
+ std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions_;
+
+ friend class FakeMediaEngine;
+};
+
+class FakeVideoEngine : public VideoEngineInterface {
+ public:
+ FakeVideoEngine();
+ bool SetOptions(const VideoOptions& options);
+ std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
+ override;
+ std::unique_ptr<VideoMediaReceiveChannelInterface> CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options) override;
+ FakeVideoMediaSendChannel* GetSendChannel(size_t index);
+ FakeVideoMediaReceiveChannel* GetReceiveChannel(size_t index);
+
+ std::vector<VideoCodec> send_codecs() const override {
+ return send_codecs(true);
+ }
+ std::vector<VideoCodec> recv_codecs() const override {
+ return recv_codecs(true);
+ }
+ std::vector<VideoCodec> send_codecs(bool include_rtx) const override;
+ std::vector<VideoCodec> recv_codecs(bool include_rtx) const override;
+ void SetSendCodecs(const std::vector<VideoCodec>& codecs);
+ void SetRecvCodecs(const std::vector<VideoCodec>& codecs);
+ bool SetCapture(bool capture);
+ std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
+ const override;
+ void SetRtpHeaderExtensions(
+ std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions);
+
+ private:
+ std::vector<VideoCodec> send_codecs_;
+ std::vector<VideoCodec> recv_codecs_;
+ bool capture_;
+ VideoOptions options_;
+ bool fail_create_channel_;
+ std::vector<webrtc::RtpHeaderExtensionCapability> header_extensions_;
+
+ friend class FakeMediaEngine;
+};
+
+class FakeMediaEngine : public CompositeMediaEngine {
+ public:
+ FakeMediaEngine();
+
+ ~FakeMediaEngine() override;
+
+ void SetAudioCodecs(const std::vector<AudioCodec>& codecs);
+ void SetAudioRecvCodecs(const std::vector<AudioCodec>& codecs);
+ void SetAudioSendCodecs(const std::vector<AudioCodec>& codecs);
+ void SetVideoCodecs(const std::vector<VideoCodec>& codecs);
+
+ void set_fail_create_channel(bool fail);
+
+ FakeVoiceEngine* fake_voice_engine() { return voice_; }
+ FakeVideoEngine* fake_video_engine() { return video_; }
+
+ private:
+ FakeVoiceEngine* const voice_;
+ FakeVideoEngine* const video_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_FAKE_MEDIA_ENGINE_H_
diff --git a/third_party/libwebrtc/media/base/fake_network_interface.h b/third_party/libwebrtc/media/base/fake_network_interface.h
new file mode 100644
index 0000000000..d0763fe533
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_network_interface.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_
+#define MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_
+
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "media/base/media_channel.h"
+#include "media/base/rtp_utils.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "rtc_base/byte_order.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/dscp.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/time_utils.h"
+
+namespace cricket {
+
+// Fake NetworkInterface that sends/receives RTP/RTCP packets.
+class FakeNetworkInterface : public MediaChannelNetworkInterface {
+ public:
+ FakeNetworkInterface()
+ : thread_(rtc::Thread::Current()),
+ dest_(NULL),
+ conf_(false),
+ sendbuf_size_(-1),
+ recvbuf_size_(-1),
+ dscp_(rtc::DSCP_NO_CHANGE) {}
+
+ void SetDestination(MediaReceiveChannelInterface* dest) { dest_ = dest; }
+
+ // Conference mode is a mode where instead of simply forwarding the packets,
+ // the transport will send multiple copies of the packet with the specified
+ // SSRCs. This allows us to simulate receiving media from multiple sources.
+ void SetConferenceMode(bool conf, const std::vector<uint32_t>& ssrcs)
+ RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ conf_ = conf;
+ conf_sent_ssrcs_ = ssrcs;
+ }
+
+ int NumRtpBytes() RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ int bytes = 0;
+ for (size_t i = 0; i < rtp_packets_.size(); ++i) {
+ bytes += static_cast<int>(rtp_packets_[i].size());
+ }
+ return bytes;
+ }
+
+ int NumRtpBytes(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ int bytes = 0;
+ GetNumRtpBytesAndPackets(ssrc, &bytes, NULL);
+ return bytes;
+ }
+
+ int NumRtpPackets() RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ return static_cast<int>(rtp_packets_.size());
+ }
+
+ int NumRtpPackets(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ int packets = 0;
+ GetNumRtpBytesAndPackets(ssrc, NULL, &packets);
+ return packets;
+ }
+
+ int NumSentSsrcs() RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ return static_cast<int>(sent_ssrcs_.size());
+ }
+
+ rtc::CopyOnWriteBuffer GetRtpPacket(int index) RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ if (index >= static_cast<int>(rtp_packets_.size())) {
+ return {};
+ }
+ return rtp_packets_[index];
+ }
+
+ int NumRtcpPackets() RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ return static_cast<int>(rtcp_packets_.size());
+ }
+
+ // Note: callers are responsible for deleting the returned buffer.
+ const rtc::CopyOnWriteBuffer* GetRtcpPacket(int index)
+ RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ if (index >= static_cast<int>(rtcp_packets_.size())) {
+ return NULL;
+ }
+ return new rtc::CopyOnWriteBuffer(rtcp_packets_[index]);
+ }
+
+ int sendbuf_size() const { return sendbuf_size_; }
+ int recvbuf_size() const { return recvbuf_size_; }
+ rtc::DiffServCodePoint dscp() const { return dscp_; }
+ rtc::PacketOptions options() const { return options_; }
+
+ protected:
+ virtual bool SendPacket(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options)
+ RTC_LOCKS_EXCLUDED(mutex_) {
+ if (!webrtc::IsRtpPacket(*packet)) {
+ return false;
+ }
+
+ webrtc::MutexLock lock(&mutex_);
+ sent_ssrcs_[webrtc::ParseRtpSsrc(*packet)]++;
+ options_ = options;
+
+ rtp_packets_.push_back(*packet);
+ if (conf_) {
+ for (size_t i = 0; i < conf_sent_ssrcs_.size(); ++i) {
+ SetRtpSsrc(conf_sent_ssrcs_[i], *packet);
+ PostPacket(*packet);
+ }
+ } else {
+ PostPacket(*packet);
+ }
+ return true;
+ }
+
+ virtual bool SendRtcp(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options)
+ RTC_LOCKS_EXCLUDED(mutex_) {
+ webrtc::MutexLock lock(&mutex_);
+ rtcp_packets_.push_back(*packet);
+ options_ = options;
+ if (!conf_) {
+ // don't worry about RTCP in conf mode for now
+ RTC_LOG(LS_VERBOSE) << "Dropping RTCP packet, they are not handled by "
+ "MediaChannel anymore.";
+ }
+ return true;
+ }
+
+ virtual int SetOption(SocketType type, rtc::Socket::Option opt, int option) {
+ if (opt == rtc::Socket::OPT_SNDBUF) {
+ sendbuf_size_ = option;
+ } else if (opt == rtc::Socket::OPT_RCVBUF) {
+ recvbuf_size_ = option;
+ } else if (opt == rtc::Socket::OPT_DSCP) {
+ dscp_ = static_cast<rtc::DiffServCodePoint>(option);
+ }
+ return 0;
+ }
+
+ void PostPacket(rtc::CopyOnWriteBuffer packet) {
+ thread_->PostTask(
+ SafeTask(safety_.flag(), [this, packet = std::move(packet)]() mutable {
+ if (dest_) {
+ webrtc::RtpPacketReceived parsed_packet;
+ if (parsed_packet.Parse(packet)) {
+ parsed_packet.set_arrival_time(
+ webrtc::Timestamp::Micros(rtc::TimeMicros()));
+ dest_->OnPacketReceived(std::move(parsed_packet));
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ }
+ }));
+ }
+
+ private:
+ void SetRtpSsrc(uint32_t ssrc, rtc::CopyOnWriteBuffer& buffer) {
+ RTC_CHECK_GE(buffer.size(), 12);
+ rtc::SetBE32(buffer.MutableData() + 8, ssrc);
+ }
+
+ void GetNumRtpBytesAndPackets(uint32_t ssrc, int* bytes, int* packets) {
+ if (bytes) {
+ *bytes = 0;
+ }
+ if (packets) {
+ *packets = 0;
+ }
+ for (size_t i = 0; i < rtp_packets_.size(); ++i) {
+ if (ssrc == webrtc::ParseRtpSsrc(rtp_packets_[i])) {
+ if (bytes) {
+ *bytes += static_cast<int>(rtp_packets_[i].size());
+ }
+ if (packets) {
+ ++(*packets);
+ }
+ }
+ }
+ }
+
+ webrtc::TaskQueueBase* thread_;
+ MediaReceiveChannelInterface* dest_;
+ bool conf_;
+ // The ssrcs used in sending out packets in conference mode.
+ std::vector<uint32_t> conf_sent_ssrcs_;
+ // Map to track counts of packets that have been sent per ssrc.
+ // This includes packets that are dropped.
+ std::map<uint32_t, uint32_t> sent_ssrcs_;
+ // Map to track packet-number that needs to be dropped per ssrc.
+ std::map<uint32_t, std::set<uint32_t> > drop_map_;
+ webrtc::Mutex mutex_;
+ std::vector<rtc::CopyOnWriteBuffer> rtp_packets_;
+ std::vector<rtc::CopyOnWriteBuffer> rtcp_packets_;
+ int sendbuf_size_;
+ int recvbuf_size_;
+ rtc::DiffServCodePoint dscp_;
+ // Options of the most recently sent packet.
+ rtc::PacketOptions options_;
+ webrtc::ScopedTaskSafety safety_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_FAKE_NETWORK_INTERFACE_H_
diff --git a/third_party/libwebrtc/media/base/fake_rtp.cc b/third_party/libwebrtc/media/base/fake_rtp.cc
new file mode 100644
index 0000000000..21322419e1
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_rtp.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/fake_rtp.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+void CompareHeaderExtensions(const char* packet1,
+ size_t packet1_size,
+ const char* packet2,
+ size_t packet2_size,
+ const std::vector<int>& encrypted_headers,
+ bool expect_equal) {
+ // Sanity check: packets must be large enough to contain the RTP header and
+ // extensions header.
+ RTC_CHECK_GE(packet1_size, 12 + 4);
+ RTC_CHECK_GE(packet2_size, 12 + 4);
+ // RTP extension headers are the same.
+ EXPECT_EQ(0, memcmp(packet1 + 12, packet2 + 12, 4));
+ // Check for one-byte header extensions.
+ EXPECT_EQ('\xBE', packet1[12]);
+ EXPECT_EQ('\xDE', packet1[13]);
+ // Determine position and size of extension headers.
+ size_t extension_words = packet1[14] << 8 | packet1[15];
+ const char* extension_data1 = packet1 + 12 + 4;
+ const char* extension_end1 = extension_data1 + extension_words * 4;
+ const char* extension_data2 = packet2 + 12 + 4;
+ // Sanity check: packets must be large enough to contain the RTP header
+ // extensions.
+ RTC_CHECK_GE(packet1_size, 12 + 4 + extension_words * 4);
+ RTC_CHECK_GE(packet2_size, 12 + 4 + extension_words * 4);
+ while (extension_data1 < extension_end1) {
+ uint8_t id = (*extension_data1 & 0xf0) >> 4;
+ uint8_t len = (*extension_data1 & 0x0f) + 1;
+ extension_data1++;
+ extension_data2++;
+ EXPECT_LE(extension_data1, extension_end1);
+ if (id == 15) {
+ // Finished parsing.
+ break;
+ }
+
+ // The header extension doesn't get encrypted if the id is not in the
+ // list of header extensions to encrypt.
+ if (expect_equal || !absl::c_linear_search(encrypted_headers, id)) {
+ EXPECT_EQ(0, memcmp(extension_data1, extension_data2, len));
+ } else {
+ EXPECT_NE(0, memcmp(extension_data1, extension_data2, len));
+ }
+
+ extension_data1 += len;
+ extension_data2 += len;
+ // Skip padding.
+ while (extension_data1 < extension_end1 && *extension_data1 == 0) {
+ extension_data1++;
+ extension_data2++;
+ }
+ }
+}
diff --git a/third_party/libwebrtc/media/base/fake_rtp.h b/third_party/libwebrtc/media/base/fake_rtp.h
new file mode 100644
index 0000000000..8a176038cb
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_rtp.h
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Fake RTP and RTCP packets to use in unit tests.
+
+#ifndef MEDIA_BASE_FAKE_RTP_H_
+#define MEDIA_BASE_FAKE_RTP_H_
+
+#include <cstddef> // size_t
+#include <vector>
+
+// A typical PCMU RTP packet.
+// PT=0, SN=1, TS=0, SSRC=1
+// all data FF
+static const unsigned char kPcmuFrame[] = {
+ 0x80, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+};
+
+static const int kHeaderExtensionIDs[] = {1, 4};
+
+// A typical PCMU RTP packet with header extensions.
+// PT=0, SN=1, TS=0, SSRC=1
+// all data FF
+static const unsigned char kPcmuFrameWithExtensions[] = {
+ 0x90,
+ 0x00,
+ 0x00,
+ 0x01,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x01,
+ // RFC 5285, section 4.2. One-Byte Header.
+ 0xBE,
+ 0xDE,
+ // Header extension length 6 * 32 bits.
+ 0x00,
+ 0x06,
+ // 8 bytes header id 1.
+ 0x17,
+ 0x41,
+ 0x42,
+ 0x73,
+ 0xA4,
+ 0x75,
+ 0x26,
+ 0x27,
+ 0x48,
+ // 3 bytes header id 2.
+ 0x22,
+ 0x00,
+ 0x00,
+ 0xC8,
+ // 1 byte header id 3.
+ 0x30,
+ 0x8E,
+ // 7 bytes header id 4.
+ 0x46,
+ 0x55,
+ 0x99,
+ 0x63,
+ 0x86,
+ 0xB3,
+ 0x95,
+ 0xFB,
+ // 1 byte header padding.
+ 0x00,
+ // Payload data.
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+};
+
+// A typical Receiver Report RTCP packet.
+// PT=RR, LN=1, SSRC=1
+// send SSRC=2, all other fields 0
+static const unsigned char kRtcpReport[] = {
+ 0x80, 0xc9, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// PT = 97, TS = 0, Seq = 1, SSRC = 2
+// H264 - NRI = 1, Type = 1, bit stream = FF
+
+static const unsigned char kH264Packet[] = {
+ 0x80, 0x61, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x21, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+};
+
+// PT= 101, SN=2, TS=3, SSRC = 4
+static const unsigned char kDataPacket[] = {
+ 0x80, 0x65, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+};
+
+// This expects both packets to be based on kPcmuFrameWithExtensions.
+// Header extensions with an id in "encrypted_headers" are expected to be
+// different in the packets unless "expect_equal" is set to "true".
+void CompareHeaderExtensions(const char* packet1,
+ size_t packet1_size,
+ const char* packet2,
+ size_t packet2_size,
+ const std::vector<int>& encrypted_headers,
+ bool expect_equal);
+
+#endif // MEDIA_BASE_FAKE_RTP_H_
diff --git a/third_party/libwebrtc/media/base/fake_video_renderer.cc b/third_party/libwebrtc/media/base/fake_video_renderer.cc
new file mode 100644
index 0000000000..b235738d24
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_video_renderer.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/fake_video_renderer.h"
+
+namespace cricket {
+namespace {
+bool CheckFrameColorYuv(const webrtc::VideoFrame& frame) {
+ // TODO(zhurunz) Check with VP8 team to see if we can remove this
+ // tolerance on Y values. Some unit tests produce Y values close
+ // to 16 rather than close to zero, for supposedly black frames.
+ // Largest value observed is 34, e.g., running
+ // PeerConnectionIntegrationTest.SendAndReceive16To9AspectRatio.
+ static constexpr uint8_t y_min = 0;
+ static constexpr uint8_t y_max = 48;
+ static constexpr uint8_t u_min = 128;
+ static constexpr uint8_t u_max = 128;
+ static constexpr uint8_t v_min = 128;
+ static constexpr uint8_t v_max = 128;
+
+ if (!frame.video_frame_buffer()) {
+ return false;
+ }
+ rtc::scoped_refptr<const webrtc::I420BufferInterface> i420_buffer =
+ frame.video_frame_buffer()->ToI420();
+ // Y
+ int y_width = frame.width();
+ int y_height = frame.height();
+ const uint8_t* y_plane = i420_buffer->DataY();
+ const uint8_t* y_pos = y_plane;
+ int32_t y_pitch = i420_buffer->StrideY();
+ for (int i = 0; i < y_height; ++i) {
+ for (int j = 0; j < y_width; ++j) {
+ uint8_t y_value = *(y_pos + j);
+ if (y_value < y_min || y_value > y_max) {
+ return false;
+ }
+ }
+ y_pos += y_pitch;
+ }
+ // U and V
+ int chroma_width = i420_buffer->ChromaWidth();
+ int chroma_height = i420_buffer->ChromaHeight();
+ const uint8_t* u_plane = i420_buffer->DataU();
+ const uint8_t* v_plane = i420_buffer->DataV();
+ const uint8_t* u_pos = u_plane;
+ const uint8_t* v_pos = v_plane;
+ int32_t u_pitch = i420_buffer->StrideU();
+ int32_t v_pitch = i420_buffer->StrideV();
+ for (int i = 0; i < chroma_height; ++i) {
+ for (int j = 0; j < chroma_width; ++j) {
+ uint8_t u_value = *(u_pos + j);
+ if (u_value < u_min || u_value > u_max) {
+ return false;
+ }
+ uint8_t v_value = *(v_pos + j);
+ if (v_value < v_min || v_value > v_max) {
+ return false;
+ }
+ }
+ u_pos += u_pitch;
+ v_pos += v_pitch;
+ }
+ return true;
+}
+} // namespace
+
+FakeVideoRenderer::FakeVideoRenderer() = default;
+
+void FakeVideoRenderer::OnFrame(const webrtc::VideoFrame& frame) {
+ webrtc::MutexLock lock(&mutex_);
+ black_frame_ = CheckFrameColorYuv(frame);
+ ++num_rendered_frames_;
+ width_ = frame.width();
+ height_ = frame.height();
+ rotation_ = frame.rotation();
+ timestamp_us_ = frame.timestamp_us();
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/fake_video_renderer.h b/third_party/libwebrtc/media/base/fake_video_renderer.h
new file mode 100644
index 0000000000..33d99a2668
--- /dev/null
+++ b/third_party/libwebrtc/media/base/fake_video_renderer.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_FAKE_VIDEO_RENDERER_H_
+#define MEDIA_BASE_FAKE_VIDEO_RENDERER_H_
+
+#include <stdint.h>
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "api/video/video_sink_interface.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace cricket {
+
+// Faked video renderer that has a callback for actions on rendering.
+class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
+ public:
+ FakeVideoRenderer();
+
+ void OnFrame(const webrtc::VideoFrame& frame) override;
+
+ int width() const {
+ webrtc::MutexLock lock(&mutex_);
+ return width_;
+ }
+ int height() const {
+ webrtc::MutexLock lock(&mutex_);
+ return height_;
+ }
+
+ webrtc::VideoRotation rotation() const {
+ webrtc::MutexLock lock(&mutex_);
+ return rotation_;
+ }
+
+ int64_t timestamp_us() const {
+ webrtc::MutexLock lock(&mutex_);
+ return timestamp_us_;
+ }
+
+ int num_rendered_frames() const {
+ webrtc::MutexLock lock(&mutex_);
+ return num_rendered_frames_;
+ }
+
+ bool black_frame() const {
+ webrtc::MutexLock lock(&mutex_);
+ return black_frame_;
+ }
+
+ private:
+ int width_ = 0;
+ int height_ = 0;
+ webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
+ int64_t timestamp_us_ = 0;
+ int num_rendered_frames_ = 0;
+ bool black_frame_ = false;
+ mutable webrtc::Mutex mutex_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_FAKE_VIDEO_RENDERER_H_
diff --git a/third_party/libwebrtc/media/base/media_channel.h b/third_party/libwebrtc/media/base/media_channel.h
new file mode 100644
index 0000000000..1fe86f9588
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_channel.h
@@ -0,0 +1,1002 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_MEDIA_CHANNEL_H_
+#define MEDIA_BASE_MEDIA_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_options.h"
+#include "api/call/audio_sink.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/crypto/frame_encryptor_interface.h"
+#include "api/frame_transformer_interface.h"
+#include "api/media_stream_interface.h"
+#include "api/rtc_error.h"
+#include "api/rtp_parameters.h"
+#include "api/rtp_sender_interface.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/transport/data_channel_transport_interface.h"
+#include "api/transport/rtp/rtp_source.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "api/video/video_timing.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "call/video_receive_stream.h"
+#include "common_video/include/quality_limitation_reason.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+#include "media/base/stream_params.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/async_packet_socket.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/dscp.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/socket.h"
+#include "rtc_base/string_encode.h"
+#include "rtc_base/strings/string_builder.h"
+#include "video/config/video_encoder_config.h"
+
+namespace rtc {
+class Timing;
+}
+
+namespace webrtc {
+class VideoFrame;
+} // namespace webrtc
+
+namespace cricket {
+
+class AudioSource;
+class VideoCapturer;
+struct RtpHeader;
+struct VideoFormat;
+class VideoMediaSendChannelInterface;
+class VideoMediaReceiveChannelInterface;
+class VoiceMediaSendChannelInterface;
+class VoiceMediaReceiveChannelInterface;
+
+const int kScreencastDefaultFps = 5;
+
+template <class T>
+static std::string ToStringIfSet(const char* key,
+ const absl::optional<T>& val) {
+ std::string str;
+ if (val) {
+ str = key;
+ str += ": ";
+ str += val ? rtc::ToString(*val) : "";
+ str += ", ";
+ }
+ return str;
+}
+
+template <class T>
+static std::string VectorToString(const std::vector<T>& vals) {
+ rtc::StringBuilder ost; // no-presubmit-check TODO(webrtc:8982)
+ ost << "[";
+ for (size_t i = 0; i < vals.size(); ++i) {
+ if (i > 0) {
+ ost << ", ";
+ }
+ ost << vals[i].ToString();
+ }
+ ost << "]";
+ return ost.Release();
+}
+
+// Options that can be applied to a VideoMediaChannel or a VideoMediaEngine.
+// Used to be flags, but that makes it hard to selectively apply options.
+// We are moving all of the setting of options to structs like this,
+// but some things currently still use flags.
+struct VideoOptions {
+ VideoOptions();
+ ~VideoOptions();
+
+ void SetAll(const VideoOptions& change) {
+ SetFrom(&video_noise_reduction, change.video_noise_reduction);
+ SetFrom(&screencast_min_bitrate_kbps, change.screencast_min_bitrate_kbps);
+ SetFrom(&is_screencast, change.is_screencast);
+ }
+
+ bool operator==(const VideoOptions& o) const {
+ return video_noise_reduction == o.video_noise_reduction &&
+ screencast_min_bitrate_kbps == o.screencast_min_bitrate_kbps &&
+ is_screencast == o.is_screencast;
+ }
+ bool operator!=(const VideoOptions& o) const { return !(*this == o); }
+
+ std::string ToString() const {
+ rtc::StringBuilder ost;
+ ost << "VideoOptions {";
+ ost << ToStringIfSet("noise reduction", video_noise_reduction);
+ ost << ToStringIfSet("screencast min bitrate kbps",
+ screencast_min_bitrate_kbps);
+ ost << ToStringIfSet("is_screencast ", is_screencast);
+ ost << "}";
+ return ost.Release();
+ }
+
+ // Enable denoising? This flag comes from the getUserMedia
+ // constraint 'googNoiseReduction', and WebRtcVideoEngine passes it
+ // on to the codec options. Disabled by default.
+ absl::optional<bool> video_noise_reduction;
+ // Force screencast to use a minimum bitrate. This flag comes from
+ // the PeerConnection constraint 'googScreencastMinBitrate'. It is
+ // copied to the encoder config by WebRtcVideoChannel.
+ // TODO(https://crbug.com/1315155): Remove the ability to set it in Chromium
+ // and delete this flag (it should default to 100 kbps).
+ absl::optional<int> screencast_min_bitrate_kbps;
+ // Set by screencast sources. Implies selection of encoding settings
+ // suitable for screencast. Most likely not the right way to do
+ // things, e.g., screencast of a text document and screencast of a
+ // youtube video have different needs.
+ absl::optional<bool> is_screencast;
+ webrtc::VideoTrackInterface::ContentHint content_hint;
+
+ private:
+ template <typename T>
+ static void SetFrom(absl::optional<T>* s, const absl::optional<T>& o) {
+ if (o) {
+ *s = o;
+ }
+ }
+};
+
+class MediaChannelNetworkInterface {
+ public:
+ enum SocketType { ST_RTP, ST_RTCP };
+ virtual bool SendPacket(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options) = 0;
+ virtual bool SendRtcp(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options) = 0;
+ virtual int SetOption(SocketType type,
+ rtc::Socket::Option opt,
+ int option) = 0;
+ virtual ~MediaChannelNetworkInterface() {}
+};
+
+class MediaSendChannelInterface {
+ public:
+ virtual ~MediaSendChannelInterface() = default;
+
+ virtual VideoMediaSendChannelInterface* AsVideoSendChannel() = 0;
+
+ virtual VoiceMediaSendChannelInterface* AsVoiceSendChannel() = 0;
+ virtual cricket::MediaType media_type() const = 0;
+
+ // Gets the currently set codecs/payload types to be used for outgoing media.
+ virtual absl::optional<Codec> GetSendCodec() const = 0;
+
+ // Creates a new outgoing media stream with SSRCs and CNAME as described
+ // by sp.
+ virtual bool AddSendStream(const StreamParams& sp) = 0;
+ // Removes an outgoing media stream.
+ // SSRC must be the first SSRC of the media stream if the stream uses
+ // multiple SSRCs. In the case of an ssrc of 0, the possibly cached
+ // StreamParams is removed.
+ virtual bool RemoveSendStream(uint32_t ssrc) = 0;
+ // Called on the network thread after a transport has finished sending a
+ // packet.
+ virtual void OnPacketSent(const rtc::SentPacket& sent_packet) = 0;
+ // Called when the socket's ability to send has changed.
+ virtual void OnReadyToSend(bool ready) = 0;
+ // Called when the network route used for sending packets changed.
+ virtual void OnNetworkRouteChanged(
+ absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) = 0;
+ // Sets the abstract interface class for sending RTP/RTCP data.
+ virtual void SetInterface(MediaChannelNetworkInterface* iface) = 0;
+
+ // Returns `true` if a non-null MediaChannelNetworkInterface pointer is held.
+ // Must be called on the network thread.
+ virtual bool HasNetworkInterface() const = 0;
+
+ // Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
+ // Set to true if it's allowed to mix one- and two-byte RTP header extensions
+ // in the same stream. The setter and getter must only be called from
+ // worker_thread.
+ virtual void SetExtmapAllowMixed(bool extmap_allow_mixed) = 0;
+ virtual bool ExtmapAllowMixed() const = 0;
+
+ // Set the frame encryptor to use on all outgoing frames. This is optional.
+ // This pointers lifetime is managed by the set of RtpSender it is attached
+ // to.
+ virtual void SetFrameEncryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor) = 0;
+
+ virtual webrtc::RTCError SetRtpSendParameters(
+ uint32_t ssrc,
+ const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback = nullptr) = 0;
+
+ virtual void SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ frame_transformer) = 0;
+
+ // note: The encoder_selector object must remain valid for the lifetime of the
+ // MediaChannel, unless replaced.
+ virtual void SetEncoderSelector(
+ uint32_t ssrc,
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface* encoder_selector) {
+ }
+ virtual webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const = 0;
+ virtual bool SendCodecHasNack() const = 0;
+ // Called whenever the list of sending SSRCs changes.
+ virtual void SetSsrcListChangedCallback(
+ absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) = 0;
+ // TODO(bugs.webrtc.org/13931): Remove when configuration is more sensible
+ virtual void SetSendCodecChangedCallback(
+ absl::AnyInvocable<void()> callback) = 0;
+};
+
+class MediaReceiveChannelInterface {
+ public:
+ virtual ~MediaReceiveChannelInterface() = default;
+
+ virtual VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() = 0;
+ virtual VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() = 0;
+
+ virtual cricket::MediaType media_type() const = 0;
+ // Creates a new incoming media stream with SSRCs, CNAME as described
+ // by sp. In the case of a sp without SSRCs, the unsignaled sp is cached
+ // to be used later for unsignaled streams received.
+ virtual bool AddRecvStream(const StreamParams& sp) = 0;
+ // Removes an incoming media stream.
+ // ssrc must be the first SSRC of the media stream if the stream uses
+ // multiple SSRCs.
+ virtual bool RemoveRecvStream(uint32_t ssrc) = 0;
+ // Resets any cached StreamParams for an unsignaled RecvStream, and removes
+ // any existing unsignaled streams.
+ virtual void ResetUnsignaledRecvStream() = 0;
+ // Sets the abstract interface class for sending RTP/RTCP data.
+ virtual void SetInterface(MediaChannelNetworkInterface* iface) = 0;
+ // Called on the network when an RTP packet is received.
+ virtual void OnPacketReceived(const webrtc::RtpPacketReceived& packet) = 0;
+ // Gets the current unsignaled receive stream's SSRC, if there is one.
+ virtual absl::optional<uint32_t> GetUnsignaledSsrc() const = 0;
+ // Sets the local SSRC for listening to incoming RTCP reports.
+ virtual void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) = 0;
+ // This is currently a workaround because of the demuxer state being managed
+ // across two separate threads. Once the state is consistently managed on
+ // the same thread (network), this workaround can be removed.
+ // These two notifications inform the media channel when the transport's
+ // demuxer criteria is being updated.
+ // * OnDemuxerCriteriaUpdatePending() happens on the same thread that the
+ // channel's streams are added and removed (worker thread).
+ // * OnDemuxerCriteriaUpdateComplete() happens on the same thread.
+ // Because the demuxer is updated asynchronously, there is a window of time
+ // where packets are arriving to the channel for streams that have already
+ // been removed on the worker thread. It is important NOT to treat these as
+ // new unsignalled ssrcs.
+ virtual void OnDemuxerCriteriaUpdatePending() = 0;
+ virtual void OnDemuxerCriteriaUpdateComplete() = 0;
+ // Set the frame decryptor to use on all incoming frames. This is optional.
+ // This pointers lifetimes is managed by the set of RtpReceivers it is
+ // attached to.
+ virtual void SetFrameDecryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) = 0;
+
+ virtual void SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ frame_transformer) = 0;
+
+ // Set base minimum delay of the receive stream with specified ssrc.
+ // Base minimum delay sets lower bound on minimum delay value which
+ // determines minimum delay until audio playout.
+ // Returns false if there is no stream with given ssrc.
+ virtual bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) = 0;
+
+ // Returns current value of base minimum delay in milliseconds.
+ virtual absl::optional<int> GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const = 0;
+};
+
+// The stats information is structured as follows:
+// Media are represented by either MediaSenderInfo or MediaReceiverInfo.
+// Media contains a vector of SSRC infos that are exclusively used by this
+// media. (SSRCs shared between media streams can't be represented.)
+
+// Information about an SSRC.
+// This data may be locally recorded, or received in an RTCP SR or RR.
+struct SsrcSenderInfo {
+ uint32_t ssrc = 0;
+ double timestamp = 0.0; // NTP timestamp, represented as seconds since epoch.
+};
+
+struct SsrcReceiverInfo {
+ uint32_t ssrc = 0;
+ double timestamp = 0.0;
+};
+
+struct MediaSenderInfo {
+ MediaSenderInfo();
+ ~MediaSenderInfo();
+ void add_ssrc(const SsrcSenderInfo& stat) { local_stats.push_back(stat); }
+ // Temporary utility function for call sites that only provide SSRC.
+ // As more info is added into SsrcSenderInfo, this function should go away.
+ void add_ssrc(uint32_t ssrc) {
+ SsrcSenderInfo stat;
+ stat.ssrc = ssrc;
+ add_ssrc(stat);
+ }
+ // Utility accessor for clients that are only interested in ssrc numbers.
+ std::vector<uint32_t> ssrcs() const {
+ std::vector<uint32_t> retval;
+ for (std::vector<SsrcSenderInfo>::const_iterator it = local_stats.begin();
+ it != local_stats.end(); ++it) {
+ retval.push_back(it->ssrc);
+ }
+ return retval;
+ }
+ // Returns true if the media has been connected.
+ bool connected() const { return local_stats.size() > 0; }
+ // Utility accessor for clients that make the assumption only one ssrc
+ // exists per media.
+ // This will eventually go away.
+ // Call sites that compare this to zero should use connected() instead.
+ // https://bugs.webrtc.org/8694
+ uint32_t ssrc() const {
+ if (connected()) {
+ return local_stats[0].ssrc;
+ } else {
+ return 0;
+ }
+ }
+ // https://w3c.github.io/webrtc-stats/#dom-rtcsentrtpstreamstats-bytessent
+ int64_t payload_bytes_sent = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-headerbytessent
+ int64_t header_and_padding_bytes_sent = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-retransmittedbytessent
+ uint64_t retransmitted_bytes_sent = 0;
+ int packets_sent = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-retransmittedpacketssent
+ uint64_t retransmitted_packets_sent = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-nackcount
+ uint32_t nacks_received = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-targetbitrate
+ absl::optional<double> target_bitrate;
+ int packets_lost = 0;
+ float fraction_lost = 0.0f;
+ int64_t rtt_ms = 0;
+ std::string codec_name;
+ absl::optional<int> codec_payload_type;
+ std::vector<SsrcSenderInfo> local_stats;
+ std::vector<SsrcReceiverInfo> remote_stats;
+ // A snapshot of the most recent Report Block with additional data of interest
+ // to statistics. Used to implement RTCRemoteInboundRtpStreamStats. Within
+ // this list, the `ReportBlockData::source_ssrc()`, which is the SSRC of the
+ // corresponding outbound RTP stream, is unique.
+ std::vector<webrtc::ReportBlockData> report_block_datas;
+ absl::optional<bool> active;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalpacketsenddelay
+ webrtc::TimeDelta total_packet_send_delay = webrtc::TimeDelta::Zero();
+};
+
+struct MediaReceiverInfo {
+ MediaReceiverInfo();
+ ~MediaReceiverInfo();
+
+ void add_ssrc(const SsrcReceiverInfo& stat) { local_stats.push_back(stat); }
+ // Temporary utility function for call sites that only provide SSRC.
+ // As more info is added into SsrcSenderInfo, this function should go away.
+ void add_ssrc(uint32_t ssrc) {
+ SsrcReceiverInfo stat;
+ stat.ssrc = ssrc;
+ add_ssrc(stat);
+ }
+ std::vector<uint32_t> ssrcs() const {
+ std::vector<uint32_t> retval;
+ for (std::vector<SsrcReceiverInfo>::const_iterator it = local_stats.begin();
+ it != local_stats.end(); ++it) {
+ retval.push_back(it->ssrc);
+ }
+ return retval;
+ }
+ // Returns true if the media has been connected.
+ bool connected() const { return local_stats.size() > 0; }
+ // Utility accessor for clients that make the assumption only one ssrc
+ // exists per media.
+ // This will eventually go away.
+ // Call sites that compare this to zero should use connected();
+ // https://bugs.webrtc.org/8694
+ uint32_t ssrc() const {
+ if (connected()) {
+ return local_stats[0].ssrc;
+ } else {
+ return 0;
+ }
+ }
+
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-bytesreceived
+ int64_t payload_bytes_received = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-headerbytesreceived
+ int64_t header_and_padding_bytes_received = 0;
+ int packets_received = 0;
+ int packets_lost = 0;
+
+ absl::optional<uint64_t> retransmitted_bytes_received;
+ absl::optional<uint64_t> retransmitted_packets_received;
+ absl::optional<uint32_t> nacks_sent;
+ // Jitter (network-related) latency (cumulative).
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferdelay
+ double jitter_buffer_delay_seconds = 0.0;
+ // Target delay for the jitter buffer (cumulative).
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbuffertargetdelay
+ double jitter_buffer_target_delay_seconds = 0.0;
+ // Minimum obtainable delay for the jitter buffer (cumulative).
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferminimumdelay
+ double jitter_buffer_minimum_delay_seconds = 0.0;
+ // Number of observations for cumulative jitter latency.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferemittedcount
+ uint64_t jitter_buffer_emitted_count = 0;
+ // The timestamp at which the last packet was received, i.e. the time of the
+ // local clock when it was received - not the RTP timestamp of that packet.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-lastpacketreceivedtimestamp
+ absl::optional<webrtc::Timestamp> last_packet_received;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp
+ absl::optional<int64_t> estimated_playout_ntp_timestamp_ms;
+ std::string codec_name;
+ absl::optional<int> codec_payload_type;
+ std::vector<SsrcReceiverInfo> local_stats;
+ std::vector<SsrcSenderInfo> remote_stats;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-fecpacketsreceived
+ absl::optional<uint64_t> fec_packets_received;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-fecpacketsdiscarded
+ absl::optional<uint64_t> fec_packets_discarded;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-fecbytesreceived
+ absl::optional<uint64_t> fec_bytes_received;
+};
+
+struct VoiceSenderInfo : public MediaSenderInfo {
+ VoiceSenderInfo();
+ ~VoiceSenderInfo();
+ int jitter_ms = 0;
+ // Current audio level, expressed linearly [0,32767].
+ int audio_level = 0;
+ // See description of "totalAudioEnergy" in the WebRTC stats spec:
+ // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+ double total_input_energy = 0.0;
+ double total_input_duration = 0.0;
+ webrtc::ANAStats ana_statistics;
+ webrtc::AudioProcessingStats apm_statistics;
+};
+
+struct VoiceReceiverInfo : public MediaReceiverInfo {
+ VoiceReceiverInfo();
+ ~VoiceReceiverInfo();
+ int jitter_ms = 0;
+ int jitter_buffer_ms = 0;
+ int jitter_buffer_preferred_ms = 0;
+ int delay_estimate_ms = 0;
+ int audio_level = 0;
+ // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats
+ double total_output_energy = 0.0;
+ uint64_t total_samples_received = 0;
+ double total_output_duration = 0.0;
+ uint64_t concealed_samples = 0;
+ uint64_t silent_concealed_samples = 0;
+ uint64_t concealment_events = 0;
+ uint64_t inserted_samples_for_deceleration = 0;
+ uint64_t removed_samples_for_acceleration = 0;
+ // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+ // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats
+ uint64_t packets_discarded = 0;
+ // Stats below DO NOT correspond directly to anything in the WebRTC stats
+ // fraction of synthesized audio inserted through expansion.
+ float expand_rate = 0.0f;
+ // fraction of synthesized speech inserted through expansion.
+ float speech_expand_rate = 0.0f;
+ // fraction of data out of secondary decoding, including FEC and RED.
+ float secondary_decoded_rate = 0.0f;
+ // Fraction of secondary data, including FEC and RED, that is discarded.
+ // Discarding of secondary data can be caused by the reception of the primary
+ // data, obsoleting the secondary data. It can also be caused by early
+ // or late arrival of secondary data. This metric is the percentage of
+ // discarded secondary data since last query of receiver info.
+ float secondary_discarded_rate = 0.0f;
+ // Fraction of data removed through time compression.
+ float accelerate_rate = 0.0f;
+ // Fraction of data inserted through time stretching.
+ float preemptive_expand_rate = 0.0f;
+ int decoding_calls_to_silence_generator = 0;
+ int decoding_calls_to_neteq = 0;
+ int decoding_normal = 0;
+ // TODO(alexnarest): Consider decoding_neteq_plc for consistency
+ int decoding_plc = 0;
+ int decoding_codec_plc = 0;
+ int decoding_cng = 0;
+ int decoding_plc_cng = 0;
+ int decoding_muted_output = 0;
+ // Estimated capture start time in NTP time in ms.
+ int64_t capture_start_ntp_time_ms = -1;
+ // Count of the number of buffer flushes.
+ uint64_t jitter_buffer_flushes = 0;
+ // Number of samples expanded due to delayed packets.
+ uint64_t delayed_packet_outage_samples = 0;
+ // Arrival delay of received audio packets.
+ double relative_packet_arrival_delay_seconds = 0.0;
+ // Count and total duration of audio interruptions (loss-concealement periods
+ // longer than 150 ms).
+ int32_t interruption_count = 0;
+ int32_t total_interruption_duration_ms = 0;
+ // Remote outbound stats derived by the received RTCP sender reports.
+ // https://w3c.github.io/webrtc-stats/#remoteoutboundrtpstats-dict*
+ absl::optional<int64_t> last_sender_report_timestamp_ms;
+ absl::optional<int64_t> last_sender_report_remote_timestamp_ms;
+ uint64_t sender_reports_packets_sent = 0;
+ uint64_t sender_reports_bytes_sent = 0;
+ uint64_t sender_reports_reports_count = 0;
+ absl::optional<webrtc::TimeDelta> round_trip_time;
+ webrtc::TimeDelta total_round_trip_time = webrtc::TimeDelta::Zero();
+ int round_trip_time_measurements = 0;
+};
+
+struct VideoSenderInfo : public MediaSenderInfo {
+ VideoSenderInfo();
+ ~VideoSenderInfo();
+ std::vector<SsrcGroup> ssrc_groups;
+ absl::optional<std::string> encoder_implementation_name;
+ int firs_received = 0;
+ int plis_received = 0;
+ int send_frame_width = 0;
+ int send_frame_height = 0;
+ int frames = 0;
+ double framerate_input = 0;
+ int framerate_sent = 0;
+ int aggregated_framerate_sent = 0;
+ int nominal_bitrate = 0;
+ int adapt_reason = 0;
+ int adapt_changes = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationreason
+ webrtc::QualityLimitationReason quality_limitation_reason =
+ webrtc::QualityLimitationReason::kNone;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationdurations
+ std::map<webrtc::QualityLimitationReason, int64_t>
+ quality_limitation_durations_ms;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
+ uint32_t quality_limitation_resolution_changes = 0;
+ int avg_encode_ms = 0;
+ int encode_usage_percent = 0;
+ uint32_t frames_encoded = 0;
+ uint32_t key_frames_encoded = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodetime
+ uint64_t total_encode_time_ms = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodedbytestarget
+ uint64_t total_encoded_bytes_target = 0;
+ bool has_entered_low_resolution = false;
+ absl::optional<uint64_t> qp_sum;
+ webrtc::VideoContentType content_type = webrtc::VideoContentType::UNSPECIFIED;
+ uint32_t frames_sent = 0;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcvideosenderstats-hugeframessent
+ uint32_t huge_frames_sent = 0;
+ uint32_t aggregated_huge_frames_sent = 0;
+ absl::optional<std::string> rid;
+ absl::optional<bool> power_efficient_encoder;
+ absl::optional<webrtc::ScalabilityMode> scalability_mode;
+};
+
+struct VideoReceiverInfo : public MediaReceiverInfo {
+ VideoReceiverInfo();
+ ~VideoReceiverInfo();
+ std::vector<SsrcGroup> ssrc_groups;
+ absl::optional<std::string> decoder_implementation_name;
+ absl::optional<bool> power_efficient_decoder;
+ int packets_concealed = 0;
+ int firs_sent = 0;
+ int plis_sent = 0;
+ int frame_width = 0;
+ int frame_height = 0;
+ int framerate_received = 0;
+ int framerate_decoded = 0;
+ int framerate_output = 0;
+ // Framerate as sent to the renderer.
+ int framerate_render_input = 0;
+ // Framerate that the renderer reports.
+ int framerate_render_output = 0;
+ uint32_t frames_received = 0;
+ uint32_t frames_dropped = 0;
+ uint32_t frames_decoded = 0;
+ uint32_t key_frames_decoded = 0;
+ uint32_t frames_rendered = 0;
+ absl::optional<uint64_t> qp_sum;
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totaldecodetime
+ webrtc::TimeDelta total_decode_time = webrtc::TimeDelta::Zero();
+ // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalprocessingdelay
+ webrtc::TimeDelta total_processing_delay = webrtc::TimeDelta::Zero();
+ webrtc::TimeDelta total_assembly_time = webrtc::TimeDelta::Zero();
+ uint32_t frames_assembled_from_multiple_packets = 0;
+ double total_inter_frame_delay = 0;
+ double total_squared_inter_frame_delay = 0;
+ int64_t interframe_delay_max_ms = -1;
+ uint32_t freeze_count = 0;
+ uint32_t pause_count = 0;
+ uint32_t total_freezes_duration_ms = 0;
+ uint32_t total_pauses_duration_ms = 0;
+ uint32_t jitter_ms = 0;
+
+ webrtc::VideoContentType content_type = webrtc::VideoContentType::UNSPECIFIED;
+
+ // All stats below are gathered per-VideoReceiver, but some will be correlated
+ // across MediaStreamTracks. NOTE(hta): when sinking stats into per-SSRC
+ // structures, reflect this in the new layout.
+
+ // Current frame decode latency.
+ int decode_ms = 0;
+ // Maximum observed frame decode latency.
+ int max_decode_ms = 0;
+ // Jitter (network-related) latency.
+ int jitter_buffer_ms = 0;
+ // Requested minimum playout latency.
+ int min_playout_delay_ms = 0;
+ // Requested latency to account for rendering delay.
+ int render_delay_ms = 0;
+ // Target overall delay: network+decode+render, accounting for
+ // min_playout_delay_ms.
+ int target_delay_ms = 0;
+ // Current overall delay, possibly ramping towards target_delay_ms.
+ int current_delay_ms = 0;
+
+ // Estimated capture start time in NTP time in ms.
+ int64_t capture_start_ntp_time_ms = -1;
+
+ // First frame received to first frame decoded latency.
+ int64_t first_frame_received_to_decoded_ms = -1;
+
+ // Timing frame info: all important timestamps for a full lifetime of a
+ // single 'timing frame'.
+ absl::optional<webrtc::TimingFrameInfo> timing_frame_info;
+};
+
+struct BandwidthEstimationInfo {
+ int available_send_bandwidth = 0;
+ int available_recv_bandwidth = 0;
+ int target_enc_bitrate = 0;
+ int actual_enc_bitrate = 0;
+ int retransmit_bitrate = 0;
+ int transmit_bitrate = 0;
+ int64_t bucket_delay = 0;
+};
+
+// Maps from payload type to `RtpCodecParameters`.
+typedef std::map<int, webrtc::RtpCodecParameters> RtpCodecParametersMap;
+
+// Stats returned from VoiceMediaSendChannel.GetStats()
+struct VoiceMediaSendInfo {
+ VoiceMediaSendInfo();
+ ~VoiceMediaSendInfo();
+ void Clear() {
+ senders.clear();
+ send_codecs.clear();
+ }
+ std::vector<VoiceSenderInfo> senders;
+ RtpCodecParametersMap send_codecs;
+};
+
+// Stats returned from VoiceMediaReceiveChannel.GetStats()
+struct VoiceMediaReceiveInfo {
+ VoiceMediaReceiveInfo();
+ ~VoiceMediaReceiveInfo();
+ void Clear() {
+ receivers.clear();
+ receive_codecs.clear();
+ }
+ std::vector<VoiceReceiverInfo> receivers;
+ RtpCodecParametersMap receive_codecs;
+ int32_t device_underrun_count = 0;
+};
+
+// Combined VoiceMediaSendInfo and VoiceMediaReceiveInfo
+// Returned from Transceiver.getStats()
+struct VoiceMediaInfo {
+ VoiceMediaInfo();
+ VoiceMediaInfo(VoiceMediaSendInfo&& send, VoiceMediaReceiveInfo&& receive)
+ : senders(std::move(send.senders)),
+ receivers(std::move(receive.receivers)),
+ send_codecs(std::move(send.send_codecs)),
+ receive_codecs(std::move(receive.receive_codecs)),
+ device_underrun_count(receive.device_underrun_count) {}
+ ~VoiceMediaInfo();
+ void Clear() {
+ senders.clear();
+ receivers.clear();
+ send_codecs.clear();
+ receive_codecs.clear();
+ }
+ std::vector<VoiceSenderInfo> senders;
+ std::vector<VoiceReceiverInfo> receivers;
+ RtpCodecParametersMap send_codecs;
+ RtpCodecParametersMap receive_codecs;
+ int32_t device_underrun_count = 0;
+};
+
+// Stats for a VideoMediaSendChannel
+struct VideoMediaSendInfo {
+ VideoMediaSendInfo();
+ ~VideoMediaSendInfo();
+ void Clear() {
+ senders.clear();
+ aggregated_senders.clear();
+ send_codecs.clear();
+ }
+ // Each sender info represents one "outbound-rtp" stream.In non - simulcast,
+ // this means one info per RtpSender but if simulcast is used this means
+ // one info per simulcast layer.
+ std::vector<VideoSenderInfo> senders;
+ // Used for legacy getStats() API's "ssrc" stats and modern getStats() API's
+ // "track" stats. If simulcast is used, instead of having one sender info per
+ // simulcast layer, the metrics of all layers of an RtpSender are aggregated
+ // into a single sender info per RtpSender.
+ std::vector<VideoSenderInfo> aggregated_senders;
+ RtpCodecParametersMap send_codecs;
+};
+
+// Stats for a VideoMediaReceiveChannel
+struct VideoMediaReceiveInfo {
+ VideoMediaReceiveInfo();
+ ~VideoMediaReceiveInfo();
+ void Clear() {
+ receivers.clear();
+ receive_codecs.clear();
+ }
+ std::vector<VideoReceiverInfo> receivers;
+ RtpCodecParametersMap receive_codecs;
+};
+
+// Combined VideoMediaSenderInfo and VideoMediaReceiverInfo.
+// Returned from channel.GetStats()
+struct VideoMediaInfo {
+ VideoMediaInfo();
+ VideoMediaInfo(VideoMediaSendInfo&& send, VideoMediaReceiveInfo&& receive)
+ : senders(std::move(send.senders)),
+ aggregated_senders(std::move(send.aggregated_senders)),
+ receivers(std::move(receive.receivers)),
+ send_codecs(std::move(send.send_codecs)),
+ receive_codecs(std::move(receive.receive_codecs)) {}
+ ~VideoMediaInfo();
+ void Clear() {
+ senders.clear();
+ aggregated_senders.clear();
+ receivers.clear();
+ send_codecs.clear();
+ receive_codecs.clear();
+ }
+ // Each sender info represents one "outbound-rtp" stream. In non-simulcast,
+ // this means one info per RtpSender but if simulcast is used this means
+ // one info per simulcast layer.
+ std::vector<VideoSenderInfo> senders;
+ // Used for legacy getStats() API's "ssrc" stats and modern getStats() API's
+ // "track" stats. If simulcast is used, instead of having one sender info per
+ // simulcast layer, the metrics of all layers of an RtpSender are aggregated
+ // into a single sender info per RtpSender.
+ std::vector<VideoSenderInfo> aggregated_senders;
+ std::vector<VideoReceiverInfo> receivers;
+ RtpCodecParametersMap send_codecs;
+ RtpCodecParametersMap receive_codecs;
+};
+
+struct RtcpParameters {
+ bool reduced_size = false;
+ bool remote_estimate = false;
+};
+
+struct MediaChannelParameters {
+ virtual ~MediaChannelParameters() = default;
+
+ std::vector<Codec> codecs;
+ std::vector<webrtc::RtpExtension> extensions;
+ // For a send stream this is true if we've neogtiated a send direction,
+ // for a receive stream this is true if we've negotiated a receive direction.
+ bool is_stream_active = true;
+
+ // TODO(pthatcher): Add streams.
+ RtcpParameters rtcp;
+
+ std::string ToString() const {
+ rtc::StringBuilder ost;
+ ost << "{";
+ const char* separator = "";
+ for (const auto& entry : ToStringMap()) {
+ ost << separator << entry.first << ": " << entry.second;
+ separator = ", ";
+ }
+ ost << "}";
+ return ost.Release();
+ }
+
+ protected:
+ virtual std::map<std::string, std::string> ToStringMap() const {
+ return {{"codecs", VectorToString(codecs)},
+ {"extensions", VectorToString(extensions)}};
+ }
+};
+
+struct SenderParameters : MediaChannelParameters {
+ int max_bandwidth_bps = -1;
+ // This is the value to be sent in the MID RTP header extension (if the header
+ // extension in included in the list of extensions).
+ std::string mid;
+ bool extmap_allow_mixed = false;
+
+ protected:
+ std::map<std::string, std::string> ToStringMap() const override {
+ auto params = MediaChannelParameters::ToStringMap();
+ params["max_bandwidth_bps"] = rtc::ToString(max_bandwidth_bps);
+ params["mid"] = (mid.empty() ? "<not set>" : mid);
+ params["extmap-allow-mixed"] = extmap_allow_mixed ? "true" : "false";
+ return params;
+ }
+};
+
+struct AudioSenderParameter : SenderParameters {
+ AudioSenderParameter();
+ ~AudioSenderParameter() override;
+ AudioOptions options;
+
+ protected:
+ std::map<std::string, std::string> ToStringMap() const override;
+};
+
+struct AudioReceiverParameters : MediaChannelParameters {};
+
+class VoiceMediaSendChannelInterface : public MediaSendChannelInterface {
+ public:
+ virtual bool SetSenderParameters(const AudioSenderParameter& params) = 0;
+ // Starts or stops sending (and potentially capture) of local audio.
+ virtual void SetSend(bool send) = 0;
+ // Configure stream for sending.
+ virtual bool SetAudioSend(uint32_t ssrc,
+ bool enable,
+ const AudioOptions* options,
+ AudioSource* source) = 0;
+ // Returns if the telephone-event has been negotiated.
+ virtual bool CanInsertDtmf() = 0;
+ // Send a DTMF `event`. The DTMF out-of-band signal will be used.
+ // The `ssrc` should be either 0 or a valid send stream ssrc.
+ // The valid value for the `event` are 0 to 15 which corresponding to
+ // DTMF event 0-9, *, #, A-D.
+ virtual bool InsertDtmf(uint32_t ssrc, int event, int duration) = 0;
+ virtual bool GetStats(VoiceMediaSendInfo* stats) = 0;
+ virtual bool SenderNackEnabled() const = 0;
+ virtual bool SenderNonSenderRttEnabled() const = 0;
+};
+
+class VoiceMediaReceiveChannelInterface : public MediaReceiveChannelInterface {
+ public:
+ virtual bool SetReceiverParameters(const AudioReceiverParameters& params) = 0;
+ // Get the receive parameters for the incoming stream identified by `ssrc`.
+ virtual webrtc::RtpParameters GetRtpReceiverParameters(
+ uint32_t ssrc) const = 0;
+ virtual std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const = 0;
+ // Retrieve the receive parameters for the default receive
+ // stream, which is used when SSRCs are not signaled.
+ virtual webrtc::RtpParameters GetDefaultRtpReceiveParameters() const = 0;
+ // Starts or stops playout of received audio.
+ virtual void SetPlayout(bool playout) = 0;
+ // Set speaker output volume of the specified ssrc.
+ virtual bool SetOutputVolume(uint32_t ssrc, double volume) = 0;
+ // Set speaker output volume for future unsignaled streams.
+ virtual bool SetDefaultOutputVolume(double volume) = 0;
+ virtual void SetRawAudioSink(
+ uint32_t ssrc,
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) = 0;
+ virtual void SetDefaultRawAudioSink(
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) = 0;
+ virtual bool GetStats(VoiceMediaReceiveInfo* stats, bool reset_legacy) = 0;
+ virtual void SetReceiveNackEnabled(bool enabled) = 0;
+ virtual void SetReceiveNonSenderRttEnabled(bool enabled) = 0;
+};
+
+struct VideoSenderParameters : SenderParameters {
+ VideoSenderParameters();
+ ~VideoSenderParameters() override;
+ // Use conference mode? This flag comes from the remote
+ // description's SDP line 'a=x-google-flag:conference', copied over
+ // by VideoChannel::SetRemoteContent_w, and ultimately used by
+ // conference mode screencast logic in
+ // WebRtcVideoChannel::WebRtcVideoSendStream::CreateVideoEncoderConfig.
+ // The special screencast behaviour is disabled by default.
+ bool conference_mode = false;
+
+ protected:
+ std::map<std::string, std::string> ToStringMap() const override;
+};
+
+struct VideoReceiverParameters : MediaChannelParameters {};
+
+class VideoMediaSendChannelInterface : public MediaSendChannelInterface {
+ public:
+ virtual bool SetSenderParameters(const VideoSenderParameters& params) = 0;
+ // Starts or stops transmission (and potentially capture) of local video.
+ virtual bool SetSend(bool send) = 0;
+ // Configure stream for sending and register a source.
+ // The `ssrc` must correspond to a registered send stream.
+ virtual bool SetVideoSend(
+ uint32_t ssrc,
+ const VideoOptions* options,
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source) = 0;
+ // Cause generation of a keyframe for `ssrc` on a sending channel.
+ virtual void GenerateSendKeyFrame(uint32_t ssrc,
+ const std::vector<std::string>& rids) = 0;
+ virtual bool GetStats(VideoMediaSendInfo* stats) = 0;
+ // This fills the "bitrate parts" (rtx, video bitrate) of the
+ // BandwidthEstimationInfo, since that part that isn't possible to get
+ // through webrtc::Call::GetStats, as they are statistics of the send
+ // streams.
+ // TODO(holmer): We should change this so that either BWE graphs doesn't
+ // need access to bitrates of the streams, or change the (RTC)StatsCollector
+ // so that it's getting the send stream stats separately by calling
+ // GetStats(), and merges with BandwidthEstimationInfo by itself.
+ virtual void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) = 0;
+ // Information queries to support SetReceiverFeedbackParameters
+ virtual webrtc::RtcpMode SendCodecRtcpMode() const = 0;
+ virtual bool SendCodecHasLntf() const = 0;
+ virtual absl::optional<int> SendCodecRtxTime() const = 0;
+};
+
+class VideoMediaReceiveChannelInterface : public MediaReceiveChannelInterface {
+ public:
+ virtual bool SetReceiverParameters(const VideoReceiverParameters& params) = 0;
+ // Get the receive parameters for the incoming stream identified by `ssrc`.
+ virtual webrtc::RtpParameters GetRtpReceiverParameters(
+ uint32_t ssrc) const = 0;
+ // Starts or stops decoding of remote video.
+ virtual void SetReceive(bool receive) = 0;
+ // Retrieve the receive parameters for the default receive
+ // stream, which is used when SSRCs are not signaled.
+ virtual webrtc::RtpParameters GetDefaultRtpReceiveParameters() const = 0;
+ // Sets the sink object to be used for the specified stream.
+ virtual bool SetSink(uint32_t ssrc,
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) = 0;
+ // The sink is used for the 'default' stream.
+ virtual void SetDefaultSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) = 0;
+ // Request generation of a keyframe for `ssrc` on a receiving channel via
+ // RTCP feedback.
+ virtual void RequestRecvKeyFrame(uint32_t ssrc) = 0;
+
+ virtual std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const = 0;
+ // Set recordable encoded frame callback for `ssrc`
+ virtual void SetRecordableEncodedFrameCallback(
+ uint32_t ssrc,
+ std::function<void(const webrtc::RecordableEncodedFrame&)> callback) = 0;
+ // Clear recordable encoded frame callback for `ssrc`
+ virtual void ClearRecordableEncodedFrameCallback(uint32_t ssrc) = 0;
+ virtual bool GetStats(VideoMediaReceiveInfo* stats) = 0;
+ virtual void SetReceiverFeedbackParameters(bool lntf_enabled,
+ bool nack_enabled,
+ webrtc::RtcpMode rtcp_mode,
+ absl::optional<int> rtx_time) = 0;
+ virtual bool AddDefaultRecvStreamForTesting(const StreamParams& sp) = 0;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_MEDIA_CHANNEL_H_
diff --git a/third_party/libwebrtc/media/base/media_channel_impl.cc b/third_party/libwebrtc/media/base/media_channel_impl.cc
new file mode 100644
index 0000000000..5b41a9ccda
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_channel_impl.cc
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/media_channel_impl.h"
+
+#include <map>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/functional/any_invocable.h"
+#include "api/audio_options.h"
+#include "api/media_stream_interface.h"
+#include "api/rtc_error.h"
+#include "api/rtp_sender_interface.h"
+#include "api/units/time_delta.h"
+#include "api/video/video_timing.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "common_video/include/quality_limitation_reason.h"
+#include "media/base/codec.h"
+#include "media/base/media_channel.h"
+#include "media/base/rtp_utils.h"
+#include "media/base/stream_params.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "rtc_base/checks.h"
+
+namespace cricket {
+using webrtc::FrameDecryptorInterface;
+using webrtc::FrameEncryptorInterface;
+using webrtc::FrameTransformerInterface;
+using webrtc::PendingTaskSafetyFlag;
+using webrtc::SafeTask;
+using webrtc::TaskQueueBase;
+using webrtc::VideoTrackInterface;
+
+VideoOptions::VideoOptions()
+ : content_hint(VideoTrackInterface::ContentHint::kNone) {}
+VideoOptions::~VideoOptions() = default;
+
+MediaChannelUtil::MediaChannelUtil(TaskQueueBase* network_thread,
+ bool enable_dscp)
+ : transport_(network_thread, enable_dscp) {}
+
+MediaChannelUtil::~MediaChannelUtil() {}
+
+void MediaChannelUtil::SetInterface(MediaChannelNetworkInterface* iface) {
+ transport_.SetInterface(iface);
+}
+
+int MediaChannelUtil::GetRtpSendTimeExtnId() const {
+ return -1;
+}
+
+void MediaChannelUtil::SetFrameEncryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) {
+ // Placeholder should be pure virtual once internal supports it.
+}
+
+void MediaChannelUtil::SetFrameDecryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) {
+ // Placeholder should be pure virtual once internal supports it.
+}
+
+bool MediaChannelUtil::SendPacket(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options) {
+ return transport_.DoSendPacket(packet, false, options);
+}
+
+bool MediaChannelUtil::SendRtcp(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options) {
+ return transport_.DoSendPacket(packet, true, options);
+}
+
+int MediaChannelUtil::SetOption(MediaChannelNetworkInterface::SocketType type,
+ rtc::Socket::Option opt,
+ int option) {
+ return transport_.SetOption(type, opt, option);
+}
+
+// Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
+// Set to true if it's allowed to mix one- and two-byte RTP header extensions
+// in the same stream. The setter and getter must only be called from
+// worker_thread.
+void MediaChannelUtil::SetExtmapAllowMixed(bool extmap_allow_mixed) {
+ extmap_allow_mixed_ = extmap_allow_mixed;
+}
+
+bool MediaChannelUtil::ExtmapAllowMixed() const {
+ return extmap_allow_mixed_;
+}
+
+bool MediaChannelUtil::HasNetworkInterface() const {
+ return transport_.HasNetworkInterface();
+}
+
+void MediaChannelUtil::SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {}
+
+void MediaChannelUtil::SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {}
+
+bool MediaChannelUtil::DscpEnabled() const {
+ return transport_.DscpEnabled();
+}
+
+void MediaChannelUtil::SetPreferredDscp(rtc::DiffServCodePoint new_dscp) {
+ transport_.SetPreferredDscp(new_dscp);
+}
+
+MediaSenderInfo::MediaSenderInfo() = default;
+MediaSenderInfo::~MediaSenderInfo() = default;
+
+MediaReceiverInfo::MediaReceiverInfo() = default;
+MediaReceiverInfo::~MediaReceiverInfo() = default;
+
+VoiceSenderInfo::VoiceSenderInfo() = default;
+VoiceSenderInfo::~VoiceSenderInfo() = default;
+
+VoiceReceiverInfo::VoiceReceiverInfo() = default;
+VoiceReceiverInfo::~VoiceReceiverInfo() = default;
+
+VideoSenderInfo::VideoSenderInfo() = default;
+VideoSenderInfo::~VideoSenderInfo() = default;
+
+VideoReceiverInfo::VideoReceiverInfo() = default;
+VideoReceiverInfo::~VideoReceiverInfo() = default;
+
+VoiceMediaInfo::VoiceMediaInfo() = default;
+VoiceMediaInfo::~VoiceMediaInfo() = default;
+
+VideoMediaInfo::VideoMediaInfo() = default;
+VideoMediaInfo::~VideoMediaInfo() = default;
+
+VideoMediaSendInfo::VideoMediaSendInfo() = default;
+VideoMediaSendInfo::~VideoMediaSendInfo() = default;
+
+VoiceMediaSendInfo::VoiceMediaSendInfo() = default;
+VoiceMediaSendInfo::~VoiceMediaSendInfo() = default;
+
+VideoMediaReceiveInfo::VideoMediaReceiveInfo() = default;
+VideoMediaReceiveInfo::~VideoMediaReceiveInfo() = default;
+
+VoiceMediaReceiveInfo::VoiceMediaReceiveInfo() = default;
+VoiceMediaReceiveInfo::~VoiceMediaReceiveInfo() = default;
+
+AudioSenderParameter::AudioSenderParameter() = default;
+AudioSenderParameter::~AudioSenderParameter() = default;
+
+std::map<std::string, std::string> AudioSenderParameter::ToStringMap() const {
+ auto params = SenderParameters::ToStringMap();
+ params["options"] = options.ToString();
+ return params;
+}
+
+VideoSenderParameters::VideoSenderParameters() = default;
+VideoSenderParameters::~VideoSenderParameters() = default;
+
+std::map<std::string, std::string> VideoSenderParameters::ToStringMap() const {
+ auto params = SenderParameters::ToStringMap();
+ params["conference_mode"] = (conference_mode ? "yes" : "no");
+ return params;
+}
+
+// --------------------- MediaChannelUtil::TransportForMediaChannels -----
+
+MediaChannelUtil::TransportForMediaChannels::TransportForMediaChannels(
+ webrtc::TaskQueueBase* network_thread,
+ bool enable_dscp)
+ : network_safety_(webrtc::PendingTaskSafetyFlag::CreateDetachedInactive()),
+ network_thread_(network_thread),
+
+ enable_dscp_(enable_dscp) {}
+
+MediaChannelUtil::TransportForMediaChannels::~TransportForMediaChannels() {
+ RTC_DCHECK(!network_interface_);
+}
+
+bool MediaChannelUtil::TransportForMediaChannels::SendRtcp(
+ rtc::ArrayView<const uint8_t> packet) {
+ auto send = [this, packet = rtc::CopyOnWriteBuffer(
+ packet, kMaxRtpPacketLen)]() mutable {
+ rtc::PacketOptions rtc_options;
+ if (DscpEnabled()) {
+ rtc_options.dscp = PreferredDscp();
+ }
+ DoSendPacket(&packet, true, rtc_options);
+ };
+
+ if (network_thread_->IsCurrent()) {
+ send();
+ } else {
+ network_thread_->PostTask(SafeTask(network_safety_, std::move(send)));
+ }
+ return true;
+}
+
+bool MediaChannelUtil::TransportForMediaChannels::SendRtp(
+ rtc::ArrayView<const uint8_t> packet,
+ const webrtc::PacketOptions& options) {
+ auto send =
+ [this, packet_id = options.packet_id,
+ included_in_feedback = options.included_in_feedback,
+ included_in_allocation = options.included_in_allocation,
+ batchable = options.batchable,
+ last_packet_in_batch = options.last_packet_in_batch,
+ packet = rtc::CopyOnWriteBuffer(packet, kMaxRtpPacketLen)]() mutable {
+ rtc::PacketOptions rtc_options;
+ rtc_options.packet_id = packet_id;
+ if (DscpEnabled()) {
+ rtc_options.dscp = PreferredDscp();
+ }
+ rtc_options.info_signaled_after_sent.included_in_feedback =
+ included_in_feedback;
+ rtc_options.info_signaled_after_sent.included_in_allocation =
+ included_in_allocation;
+ rtc_options.batchable = batchable;
+ rtc_options.last_packet_in_batch = last_packet_in_batch;
+ DoSendPacket(&packet, false, rtc_options);
+ };
+
+ // TODO(bugs.webrtc.org/11993): ModuleRtpRtcpImpl2 and related classes (e.g.
+ // RTCPSender) aren't aware of the network thread and may trigger calls to
+ // this function from different threads. Update those classes to keep
+ // network traffic on the network thread.
+ if (network_thread_->IsCurrent()) {
+ send();
+ } else {
+ network_thread_->PostTask(SafeTask(network_safety_, std::move(send)));
+ }
+ return true;
+}
+
+void MediaChannelUtil::TransportForMediaChannels::SetInterface(
+ MediaChannelNetworkInterface* iface) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ iface ? network_safety_->SetAlive() : network_safety_->SetNotAlive();
+ network_interface_ = iface;
+ UpdateDscp();
+}
+
+void MediaChannelUtil::TransportForMediaChannels::UpdateDscp() {
+ rtc::DiffServCodePoint value =
+ enable_dscp_ ? preferred_dscp_ : rtc::DSCP_DEFAULT;
+ int ret = SetOptionLocked(MediaChannelNetworkInterface::ST_RTP,
+ rtc::Socket::OPT_DSCP, value);
+ if (ret == 0)
+ SetOptionLocked(MediaChannelNetworkInterface::ST_RTCP,
+ rtc::Socket::OPT_DSCP, value);
+}
+
+bool MediaChannelUtil::TransportForMediaChannels::DoSendPacket(
+ rtc::CopyOnWriteBuffer* packet,
+ bool rtcp,
+ const rtc::PacketOptions& options) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ if (!network_interface_)
+ return false;
+
+ return (!rtcp) ? network_interface_->SendPacket(packet, options)
+ : network_interface_->SendRtcp(packet, options);
+}
+
+int MediaChannelUtil::TransportForMediaChannels::SetOption(
+ MediaChannelNetworkInterface::SocketType type,
+ rtc::Socket::Option opt,
+ int option) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ return SetOptionLocked(type, opt, option);
+}
+
+int MediaChannelUtil::TransportForMediaChannels::SetOptionLocked(
+ MediaChannelNetworkInterface::SocketType type,
+ rtc::Socket::Option opt,
+ int option) {
+ if (!network_interface_)
+ return -1;
+ return network_interface_->SetOption(type, opt, option);
+}
+
+void MediaChannelUtil::TransportForMediaChannels::SetPreferredDscp(
+ rtc::DiffServCodePoint new_dscp) {
+ if (!network_thread_->IsCurrent()) {
+ // This is currently the common path as the derived channel classes
+ // get called on the worker thread. There are still some tests though
+ // that call directly on the network thread.
+ network_thread_->PostTask(SafeTask(
+ network_safety_, [this, new_dscp]() { SetPreferredDscp(new_dscp); }));
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(network_thread_);
+ if (new_dscp == preferred_dscp_)
+ return;
+
+ preferred_dscp_ = new_dscp;
+ UpdateDscp();
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/media_channel_impl.h b/third_party/libwebrtc/media/base/media_channel_impl.h
new file mode 100644
index 0000000000..f8c8174efa
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_channel_impl.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_
+#define MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <functional>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/functional/any_invocable.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/audio_options.h"
+#include "api/call/audio_sink.h"
+#include "api/call/transport.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/crypto/frame_encryptor_interface.h"
+#include "api/frame_transformer_interface.h"
+#include "api/media_types.h"
+#include "api/rtc_error.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+#include "api/rtp_sender_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/transport/rtp/rtp_source.h"
+#include "api/video/recordable_encoded_frame.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "media/base/codec.h"
+#include "media/base/media_channel.h"
+#include "media/base/stream_params.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/async_packet_socket.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/dscp.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/network/sent_packet.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/socket.h"
+#include "rtc_base/thread_annotations.h"
+// This file contains the base classes for classes that implement
+// the channel interfaces.
+// These implementation classes used to be the exposed interface names,
+// but this is in the process of being changed.
+
+namespace cricket {
+
+// The `MediaChannelUtil` class provides functionality that is used by
+// multiple MediaChannel-like objects, of both sending and receiving
+// types.
+class MediaChannelUtil {
+ public:
+ MediaChannelUtil(webrtc::TaskQueueBase* network_thread,
+ bool enable_dscp = false);
+ virtual ~MediaChannelUtil();
+ // Returns the absolute sendtime extension id value from media channel.
+ virtual int GetRtpSendTimeExtnId() const;
+
+ webrtc::Transport* transport() { return &transport_; }
+
+ // Base methods to send packet using MediaChannelNetworkInterface.
+ // These methods are used by some tests only.
+ bool SendPacket(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options);
+
+ bool SendRtcp(rtc::CopyOnWriteBuffer* packet,
+ const rtc::PacketOptions& options);
+
+ int SetOption(MediaChannelNetworkInterface::SocketType type,
+ rtc::Socket::Option opt,
+ int option);
+
+ // Functions that form part of one or more interface classes.
+ // Not marked override, since this class does not inherit from the
+ // interfaces.
+
+ // Corresponds to the SDP attribute extmap-allow-mixed, see RFC8285.
+ // Set to true if it's allowed to mix one- and two-byte RTP header extensions
+ // in the same stream. The setter and getter must only be called from
+ // worker_thread.
+ void SetExtmapAllowMixed(bool extmap_allow_mixed);
+ bool ExtmapAllowMixed() const;
+
+ void SetInterface(MediaChannelNetworkInterface* iface);
+ // Returns `true` if a non-null MediaChannelNetworkInterface pointer is held.
+ // Must be called on the network thread.
+ bool HasNetworkInterface() const;
+
+ void SetFrameEncryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor);
+ void SetFrameDecryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor);
+
+ void SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer);
+ void SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer);
+
+ protected:
+ bool DscpEnabled() const;
+
+ void SetPreferredDscp(rtc::DiffServCodePoint new_dscp);
+
+ private:
+ // Implementation of the webrtc::Transport interface required
+ // by Call().
+ class TransportForMediaChannels : public webrtc::Transport {
+ public:
+ TransportForMediaChannels(webrtc::TaskQueueBase* network_thread,
+ bool enable_dscp);
+
+ virtual ~TransportForMediaChannels();
+
+ // Implementation of webrtc::Transport
+ bool SendRtp(rtc::ArrayView<const uint8_t> packet,
+ const webrtc::PacketOptions& options) override;
+ bool SendRtcp(rtc::ArrayView<const uint8_t> packet) override;
+
+ // Not implementation of webrtc::Transport
+ void SetInterface(MediaChannelNetworkInterface* iface);
+
+ int SetOption(MediaChannelNetworkInterface::SocketType type,
+ rtc::Socket::Option opt,
+ int option);
+
+ bool DoSendPacket(rtc::CopyOnWriteBuffer* packet,
+ bool rtcp,
+ const rtc::PacketOptions& options);
+
+ bool HasNetworkInterface() const {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ return network_interface_ != nullptr;
+ }
+ bool DscpEnabled() const { return enable_dscp_; }
+
+ void SetPreferredDscp(rtc::DiffServCodePoint new_dscp);
+
+ private:
+ // This is the DSCP value used for both RTP and RTCP channels if DSCP is
+ // enabled. It can be changed at any time via `SetPreferredDscp`.
+ rtc::DiffServCodePoint PreferredDscp() const {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ return preferred_dscp_;
+ }
+
+ // Apply the preferred DSCP setting to the underlying network interface RTP
+ // and RTCP channels. If DSCP is disabled, then apply the default DSCP
+ // value.
+ void UpdateDscp() RTC_RUN_ON(network_thread_);
+
+ int SetOptionLocked(MediaChannelNetworkInterface::SocketType type,
+ rtc::Socket::Option opt,
+ int option) RTC_RUN_ON(network_thread_);
+
+ const rtc::scoped_refptr<webrtc::PendingTaskSafetyFlag> network_safety_
+ RTC_PT_GUARDED_BY(network_thread_);
+ webrtc::TaskQueueBase* const network_thread_;
+ const bool enable_dscp_;
+ MediaChannelNetworkInterface* network_interface_
+ RTC_GUARDED_BY(network_thread_) = nullptr;
+ rtc::DiffServCodePoint preferred_dscp_ RTC_GUARDED_BY(network_thread_) =
+ rtc::DSCP_DEFAULT;
+ };
+
+ bool extmap_allow_mixed_ = false;
+ TransportForMediaChannels transport_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_MEDIA_CHANNEL_IMPL_H_
diff --git a/third_party/libwebrtc/media/base/media_config.h b/third_party/libwebrtc/media/base/media_config.h
new file mode 100644
index 0000000000..782770569c
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_config.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_MEDIA_CONFIG_H_
+#define MEDIA_BASE_MEDIA_CONFIG_H_
+
+namespace cricket {
+
+// Construction-time settings, passed on when creating
+// MediaChannels.
+struct MediaConfig {
+ // Set DSCP value on packets. This flag comes from the
+ // PeerConnection constraint 'googDscp'.
+ // TODO(https://crbug.com/1315574): Remove the ability to set it in Chromium
+ // and delete this flag.
+ bool enable_dscp = true;
+
+ // Video-specific config.
+ struct Video {
+ // Enable WebRTC CPU Overuse Detection. This flag comes from the
+ // PeerConnection constraint 'googCpuOveruseDetection'.
+ // TODO(https://crbug.com/1315569): Remove the ability to set it in Chromium
+ // and delete this flag.
+ bool enable_cpu_adaptation = true;
+
+ // Enable WebRTC suspension of video. No video frames will be sent
+ // when the bitrate is below the configured minimum bitrate. This
+ // flag comes from the PeerConnection constraint
+ // 'googSuspendBelowMinBitrate', and WebRtcVideoChannel copies it
+ // to VideoSendStream::Config::suspend_below_min_bitrate.
+ // TODO(https://crbug.com/1315564): Remove the ability to set it in Chromium
+ // and delete this flag.
+ bool suspend_below_min_bitrate = false;
+
+ // Enable buffering and playout timing smoothing of decoded frames.
+ // If set to true, then WebRTC will buffer and potentially drop decoded
+ // frames in order to keep a smooth rendering.
+ // If set to false, then WebRTC will hand over the frame from the decoder
+ // to the renderer as soon as possible, meaning that the renderer is
+ // responsible for smooth rendering.
+ // Note that even if this flag is set to false, dropping of frames can
+ // still happen pre-decode, e.g., dropping of higher temporal layers.
+ // This flag comes from the PeerConnection RtcConfiguration.
+ bool enable_prerenderer_smoothing = true;
+
+ // Enables periodic bandwidth probing in application-limited region.
+ bool periodic_alr_bandwidth_probing = false;
+
+ // Enables the new method to estimate the cpu load from encoding, used for
+ // cpu adaptation. This flag is intended to be controlled primarily by a
+ // Chrome origin-trial.
+ // TODO(bugs.webrtc.org/8504): If all goes well, the flag will be removed
+ // together with the old method of estimation.
+ bool experiment_cpu_load_estimator = false;
+
+ // Time interval between RTCP report for video
+ int rtcp_report_interval_ms = 1000;
+
+ // Enables send packet batching from the egress RTP sender.
+ bool enable_send_packet_batching = false;
+ } video;
+
+ // Audio-specific config.
+ struct Audio {
+ // Time interval between RTCP report for audio
+ int rtcp_report_interval_ms = 5000;
+ } audio;
+
+ bool operator==(const MediaConfig& o) const {
+ return enable_dscp == o.enable_dscp &&
+ video.enable_cpu_adaptation == o.video.enable_cpu_adaptation &&
+ video.suspend_below_min_bitrate ==
+ o.video.suspend_below_min_bitrate &&
+ video.enable_prerenderer_smoothing ==
+ o.video.enable_prerenderer_smoothing &&
+ video.periodic_alr_bandwidth_probing ==
+ o.video.periodic_alr_bandwidth_probing &&
+ video.experiment_cpu_load_estimator ==
+ o.video.experiment_cpu_load_estimator &&
+ video.rtcp_report_interval_ms == o.video.rtcp_report_interval_ms &&
+ video.enable_send_packet_batching ==
+ o.video.enable_send_packet_batching &&
+ audio.rtcp_report_interval_ms == o.audio.rtcp_report_interval_ms;
+ }
+
+ bool operator!=(const MediaConfig& o) const { return !(*this == o); }
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_MEDIA_CONFIG_H_
diff --git a/third_party/libwebrtc/media/base/media_constants.cc b/third_party/libwebrtc/media/base/media_constants.cc
new file mode 100644
index 0000000000..2af0295a5a
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_constants.cc
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/media_constants.h"
+
+namespace cricket {
+
+const int kVideoCodecClockrate = 90000;
+
+const int kVideoMtu = 1200;
+const int kVideoRtpSendBufferSize = 262144;
+const int kVideoRtpRecvBufferSize = 262144;
+
+const float kHighSystemCpuThreshold = 0.85f;
+const float kLowSystemCpuThreshold = 0.65f;
+const float kProcessCpuThreshold = 0.10f;
+
+const char kRedCodecName[] = "red";
+const char kUlpfecCodecName[] = "ulpfec";
+const char kMultiplexCodecName[] = "multiplex";
+
+// TODO(brandtr): Change this to 'flexfec' when we are confident that the
+// header format is not changing anymore.
+const char kFlexfecCodecName[] = "flexfec-03";
+
+// draft-ietf-payload-flexible-fec-scheme-02.txt
+const char kFlexfecFmtpRepairWindow[] = "repair-window";
+
+// RFC 4588 RTP Retransmission Payload Format
+const char kRtxCodecName[] = "rtx";
+const char kCodecParamRtxTime[] = "rtx-time";
+const char kCodecParamAssociatedPayloadType[] = "apt";
+
+const char kCodecParamAssociatedCodecName[] = "acn";
+// Parameters that do not follow the key-value convention
+// are treated as having the empty string as key.
+const char kCodecParamNotInNameValueFormat[] = "";
+
+const char kOpusCodecName[] = "opus";
+const char kL16CodecName[] = "L16";
+const char kG722CodecName[] = "G722";
+const char kIlbcCodecName[] = "ILBC";
+const char kPcmuCodecName[] = "PCMU";
+const char kPcmaCodecName[] = "PCMA";
+const char kCnCodecName[] = "CN";
+const char kDtmfCodecName[] = "telephone-event";
+
+// draft-spittka-payload-rtp-opus-03.txt
+const char kCodecParamPTime[] = "ptime";
+const char kCodecParamMaxPTime[] = "maxptime";
+const char kCodecParamMinPTime[] = "minptime";
+const char kCodecParamSPropStereo[] = "sprop-stereo";
+const char kCodecParamStereo[] = "stereo";
+const char kCodecParamUseInbandFec[] = "useinbandfec";
+const char kCodecParamUseDtx[] = "usedtx";
+const char kCodecParamMaxAverageBitrate[] = "maxaveragebitrate";
+const char kCodecParamMaxPlaybackRate[] = "maxplaybackrate";
+
+const char kParamValueTrue[] = "1";
+const char kParamValueEmpty[] = "";
+
+const int kOpusDefaultMaxPTime = 120;
+const int kOpusDefaultPTime = 20;
+const int kOpusDefaultMinPTime = 3;
+const int kOpusDefaultSPropStereo = 0;
+const int kOpusDefaultStereo = 0;
+const int kOpusDefaultUseInbandFec = 0;
+const int kOpusDefaultUseDtx = 0;
+const int kOpusDefaultMaxPlaybackRate = 48000;
+
+const int kPreferredMaxPTime = 120;
+const int kPreferredMinPTime = 10;
+const int kPreferredSPropStereo = 0;
+const int kPreferredStereo = 0;
+const int kPreferredUseInbandFec = 0;
+
+const char kPacketizationParamRaw[] = "raw";
+
+const char kRtcpFbParamLntf[] = "goog-lntf";
+const char kRtcpFbParamNack[] = "nack";
+const char kRtcpFbNackParamPli[] = "pli";
+const char kRtcpFbParamRemb[] = "goog-remb";
+const char kRtcpFbParamTransportCc[] = "transport-cc";
+
+const char kRtcpFbParamCcm[] = "ccm";
+const char kRtcpFbCcmParamFir[] = "fir";
+const char kRtcpFbParamRrtr[] = "rrtr";
+const char kCodecParamMaxBitrate[] = "x-google-max-bitrate";
+const char kCodecParamMinBitrate[] = "x-google-min-bitrate";
+const char kCodecParamStartBitrate[] = "x-google-start-bitrate";
+const char kCodecParamMaxQuantization[] = "x-google-max-quantization";
+
+const char kComfortNoiseCodecName[] = "CN";
+
+const char kVp8CodecName[] = "VP8";
+const char kVp9CodecName[] = "VP9";
+const char kAv1CodecName[] = "AV1";
+const char kH264CodecName[] = "H264";
+const char kH265CodecName[] = "H265";
+
+// RFC 6184 RTP Payload Format for H.264 video
+const char kH264FmtpProfileLevelId[] = "profile-level-id";
+const char kH264FmtpLevelAsymmetryAllowed[] = "level-asymmetry-allowed";
+const char kH264FmtpPacketizationMode[] = "packetization-mode";
+const char kH264FmtpSpropParameterSets[] = "sprop-parameter-sets";
+const char kH264FmtpSpsPpsIdrInKeyframe[] = "sps-pps-idr-in-keyframe";
+const char kH264ProfileLevelConstrainedBaseline[] = "42e01f";
+const char kH264ProfileLevelConstrainedHigh[] = "640c1f";
+
+// RFC 7798 RTP Payload Format for H.265 video
+const char kH265FmtpProfileSpace[] = "profile-space";
+const char kH265FmtpTierFlag[] = "tier-flag";
+const char kH265FmtpProfileId[] = "profile-id";
+const char kH265FmtpLevelId[] = "level-id";
+const char kH265FmtpProfileCompatibilityIndicator[] =
+ "profile-compatibility-indicator";
+const char kH265FmtpInteropConstraints[] = "interop-constraints";
+const char kH265FmtpTxMode[] = "tx-mode";
+
+const char kVP9ProfileId[] = "profile-id";
+
+const int kDefaultVideoMaxFramerate = 60;
+// Max encode quantizer for VP8/9 and AV1 encoders assuming libvpx/libaom API
+// range [0, 63]
+const int kDefaultVideoMaxQpVpx = 56;
+// Max encode quantizer for H264/5 assuming the bitstream range [0, 51].
+const int kDefaultVideoMaxQpH26x = 51;
+
+const size_t kConferenceMaxNumSpatialLayers = 3;
+const size_t kConferenceMaxNumTemporalLayers = 3;
+const size_t kConferenceDefaultNumTemporalLayers = 3;
+
+// RFC 3556 and RFC 3890
+const char kApplicationSpecificBandwidth[] = "AS";
+const char kTransportSpecificBandwidth[] = "TIAS";
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/media_constants.h b/third_party/libwebrtc/media/base/media_constants.h
new file mode 100644
index 0000000000..877cc7a296
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_constants.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_MEDIA_CONSTANTS_H_
+#define MEDIA_BASE_MEDIA_CONSTANTS_H_
+
+#include <stddef.h>
+
+#include "rtc_base/system/rtc_export.h"
+
+// This file contains constants related to media.
+
+namespace cricket {
+
+extern const int kVideoCodecClockrate;
+
+extern const int kVideoMtu;
+extern const int kVideoRtpSendBufferSize;
+extern const int kVideoRtpRecvBufferSize;
+
+// Default CPU thresholds.
+extern const float kHighSystemCpuThreshold;
+extern const float kLowSystemCpuThreshold;
+extern const float kProcessCpuThreshold;
+
+extern const char kRedCodecName[];
+extern const char kUlpfecCodecName[];
+extern const char kFlexfecCodecName[];
+extern const char kMultiplexCodecName[];
+
+extern const char kFlexfecFmtpRepairWindow[];
+
+extern const char kRtxCodecName[];
+extern const char kCodecParamRtxTime[];
+extern const char kCodecParamAssociatedPayloadType[];
+
+extern const char kCodecParamAssociatedCodecName[];
+extern const char kCodecParamNotInNameValueFormat[];
+
+extern const char kOpusCodecName[];
+extern const char kL16CodecName[];
+extern const char kG722CodecName[];
+extern const char kIlbcCodecName[];
+extern const char kPcmuCodecName[];
+extern const char kPcmaCodecName[];
+extern const char kCnCodecName[];
+extern const char kDtmfCodecName[];
+
+// Attribute parameters
+extern const char kCodecParamPTime[];
+extern const char kCodecParamMaxPTime[];
+// fmtp parameters
+extern const char kCodecParamMinPTime[];
+extern const char kCodecParamSPropStereo[];
+extern const char kCodecParamStereo[];
+extern const char kCodecParamUseInbandFec[];
+extern const char kCodecParamUseDtx[];
+extern const char kCodecParamMaxAverageBitrate[];
+extern const char kCodecParamMaxPlaybackRate[];
+
+extern const char kParamValueTrue[];
+// Parameters are stored as parameter/value pairs. For parameters who do not
+// have a value, `kParamValueEmpty` should be used as value.
+extern const char kParamValueEmpty[];
+
+// opus parameters.
+// Default value for maxptime according to
+// http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
+extern const int kOpusDefaultMaxPTime;
+extern const int kOpusDefaultPTime;
+extern const int kOpusDefaultMinPTime;
+extern const int kOpusDefaultSPropStereo;
+extern const int kOpusDefaultStereo;
+extern const int kOpusDefaultUseInbandFec;
+extern const int kOpusDefaultUseDtx;
+extern const int kOpusDefaultMaxPlaybackRate;
+
+// Prefered values in this code base. Note that they may differ from the default
+// values in http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
+// Only frames larger or equal to 10 ms are currently supported in this code
+// base.
+extern const int kPreferredMaxPTime;
+extern const int kPreferredMinPTime;
+extern const int kPreferredSPropStereo;
+extern const int kPreferredStereo;
+extern const int kPreferredUseInbandFec;
+
+extern const char kPacketizationParamRaw[];
+
+// rtcp-fb message in its first experimental stages. Documentation pending.
+extern const char kRtcpFbParamLntf[];
+// rtcp-fb messages according to RFC 4585
+extern const char kRtcpFbParamNack[];
+extern const char kRtcpFbNackParamPli[];
+// rtcp-fb messages according to
+// http://tools.ietf.org/html/draft-alvestrand-rmcat-remb-00
+extern const char kRtcpFbParamRemb[];
+// rtcp-fb messages according to
+// https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions-01
+extern const char kRtcpFbParamTransportCc[];
+// ccm submessages according to RFC 5104
+extern const char kRtcpFbParamCcm[];
+extern const char kRtcpFbCcmParamFir[];
+// Receiver reference time report
+// https://tools.ietf.org/html/rfc3611 section 4.4
+extern const char kRtcpFbParamRrtr[];
+// Google specific parameters
+extern const char kCodecParamMaxBitrate[];
+extern const char kCodecParamMinBitrate[];
+extern const char kCodecParamStartBitrate[];
+extern const char kCodecParamMaxQuantization[];
+
+extern const char kComfortNoiseCodecName[];
+
+RTC_EXPORT extern const char kVp8CodecName[];
+RTC_EXPORT extern const char kVp9CodecName[];
+RTC_EXPORT extern const char kAv1CodecName[];
+RTC_EXPORT extern const char kH264CodecName[];
+RTC_EXPORT extern const char kH265CodecName[];
+
+// RFC 6184 RTP Payload Format for H.264 video
+RTC_EXPORT extern const char kH264FmtpProfileLevelId[];
+RTC_EXPORT extern const char kH264FmtpLevelAsymmetryAllowed[];
+RTC_EXPORT extern const char kH264FmtpPacketizationMode[];
+extern const char kH264FmtpSpropParameterSets[];
+extern const char kH264FmtpSpsPpsIdrInKeyframe[];
+extern const char kH264ProfileLevelConstrainedBaseline[];
+extern const char kH264ProfileLevelConstrainedHigh[];
+
+// RFC 7798 RTP Payload Format for H.265 video.
+// According to RFC 7742, the sprop parameters MUST NOT be included
+// in SDP generated by WebRTC, so for H.265 we don't handle them, though
+// current H.264 implementation honors them when receiving
+// sprop-parameter-sets in SDP.
+RTC_EXPORT extern const char kH265FmtpProfileSpace[];
+RTC_EXPORT extern const char kH265FmtpTierFlag[];
+RTC_EXPORT extern const char kH265FmtpProfileId[];
+RTC_EXPORT extern const char kH265FmtpLevelId[];
+RTC_EXPORT extern const char kH265FmtpProfileCompatibilityIndicator[];
+RTC_EXPORT extern const char kH265FmtpInteropConstraints[];
+RTC_EXPORT extern const char kH265FmtpTxMode[];
+
+extern const char kVP9ProfileId[];
+
+extern const int kDefaultVideoMaxFramerate;
+extern const int kDefaultVideoMaxQpVpx;
+extern const int kDefaultVideoMaxQpH26x;
+
+extern const size_t kConferenceMaxNumSpatialLayers;
+extern const size_t kConferenceMaxNumTemporalLayers;
+extern const size_t kConferenceDefaultNumTemporalLayers;
+
+extern const char kApplicationSpecificBandwidth[];
+extern const char kTransportSpecificBandwidth[];
+} // namespace cricket
+
+#endif // MEDIA_BASE_MEDIA_CONSTANTS_H_
diff --git a/third_party/libwebrtc/media/base/media_engine.cc b/third_party/libwebrtc/media/base/media_engine.cc
new file mode 100644
index 0000000000..7304ab03d7
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_engine.cc
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/media_engine.h"
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/string_encode.h"
+
+namespace cricket {
+
+RtpCapabilities::RtpCapabilities() = default;
+RtpCapabilities::~RtpCapabilities() = default;
+
+webrtc::RtpParameters CreateRtpParametersWithOneEncoding() {
+ webrtc::RtpParameters parameters;
+ webrtc::RtpEncodingParameters encoding;
+ parameters.encodings.push_back(encoding);
+ return parameters;
+}
+
+webrtc::RtpParameters CreateRtpParametersWithEncodings(StreamParams sp) {
+ std::vector<uint32_t> primary_ssrcs;
+ sp.GetPrimarySsrcs(&primary_ssrcs);
+ size_t encoding_count = primary_ssrcs.size();
+
+ std::vector<webrtc::RtpEncodingParameters> encodings(encoding_count);
+ for (size_t i = 0; i < encodings.size(); ++i) {
+ encodings[i].ssrc = primary_ssrcs[i];
+ }
+
+ const std::vector<RidDescription>& rids = sp.rids();
+ RTC_DCHECK(rids.size() == 0 || rids.size() == encoding_count);
+ for (size_t i = 0; i < rids.size(); ++i) {
+ encodings[i].rid = rids[i].rid;
+ }
+
+ webrtc::RtpParameters parameters;
+ parameters.encodings = encodings;
+ parameters.rtcp.cname = sp.cname;
+ return parameters;
+}
+
+std::vector<webrtc::RtpExtension> GetDefaultEnabledRtpHeaderExtensions(
+ const RtpHeaderExtensionQueryInterface& query_interface) {
+ std::vector<webrtc::RtpExtension> extensions;
+ for (const auto& entry : query_interface.GetRtpHeaderExtensions()) {
+ if (entry.direction != webrtc::RtpTransceiverDirection::kStopped)
+ extensions.emplace_back(entry.uri, *entry.preferred_id);
+ }
+ return extensions;
+}
+
+webrtc::RTCError CheckScalabilityModeValues(
+ const webrtc::RtpParameters& rtp_parameters,
+ rtc::ArrayView<cricket::Codec> codec_preferences,
+ absl::optional<cricket::Codec> send_codec) {
+ using webrtc::RTCErrorType;
+
+ if (codec_preferences.empty()) {
+ // This is an audio sender or an extra check in the stack where the codec
+ // list is not available and we can't check the scalability_mode values.
+ return webrtc::RTCError::OK();
+ }
+
+ for (size_t i = 0; i < rtp_parameters.encodings.size(); ++i) {
+ if (rtp_parameters.encodings[i].codec) {
+ bool codecFound = false;
+ for (const cricket::VideoCodec& codec : codec_preferences) {
+ if (codec.MatchesRtpCodec(*rtp_parameters.encodings[i].codec)) {
+ codecFound = true;
+ send_codec = codec;
+ break;
+ }
+ }
+ if (!codecFound) {
+ LOG_AND_RETURN_ERROR(
+ RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to use an unsupported codec for layer " +
+ std::to_string(i));
+ }
+ }
+ if (rtp_parameters.encodings[i].scalability_mode) {
+ if (!send_codec) {
+ bool scalabilityModeFound = false;
+ for (const cricket::VideoCodec& codec : codec_preferences) {
+ for (const auto& scalability_mode : codec.scalability_modes) {
+ if (ScalabilityModeToString(scalability_mode) ==
+ *rtp_parameters.encodings[i].scalability_mode) {
+ scalabilityModeFound = true;
+ break;
+ }
+ }
+ if (scalabilityModeFound)
+ break;
+ }
+
+ if (!scalabilityModeFound) {
+ LOG_AND_RETURN_ERROR(
+ RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to set RtpParameters scalabilityMode "
+ "to an unsupported value for the current codecs.");
+ }
+ } else {
+ bool scalabilityModeFound = false;
+ for (const auto& scalability_mode : send_codec->scalability_modes) {
+ if (ScalabilityModeToString(scalability_mode) ==
+ *rtp_parameters.encodings[i].scalability_mode) {
+ scalabilityModeFound = true;
+ break;
+ }
+ }
+ if (!scalabilityModeFound) {
+ LOG_AND_RETURN_ERROR(
+ RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to set RtpParameters scalabilityMode "
+ "to an unsupported value for the current codecs.");
+ }
+ }
+ }
+ }
+
+ return webrtc::RTCError::OK();
+}
+
+webrtc::RTCError CheckRtpParametersValues(
+ const webrtc::RtpParameters& rtp_parameters,
+ rtc::ArrayView<cricket::Codec> codec_preferences,
+ absl::optional<cricket::Codec> send_codec) {
+ using webrtc::RTCErrorType;
+
+ for (size_t i = 0; i < rtp_parameters.encodings.size(); ++i) {
+ if (rtp_parameters.encodings[i].bitrate_priority <= 0) {
+ LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
+ "Attempted to set RtpParameters bitrate_priority to "
+ "an invalid number. bitrate_priority must be > 0.");
+ }
+ if (rtp_parameters.encodings[i].scale_resolution_down_by &&
+ *rtp_parameters.encodings[i].scale_resolution_down_by < 1.0) {
+ LOG_AND_RETURN_ERROR(
+ RTCErrorType::INVALID_RANGE,
+ "Attempted to set RtpParameters scale_resolution_down_by to an "
+ "invalid value. scale_resolution_down_by must be >= 1.0");
+ }
+ if (rtp_parameters.encodings[i].max_framerate &&
+ *rtp_parameters.encodings[i].max_framerate < 0.0) {
+ LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
+ "Attempted to set RtpParameters max_framerate to an "
+ "invalid value. max_framerate must be >= 0.0");
+ }
+ if (rtp_parameters.encodings[i].min_bitrate_bps &&
+ rtp_parameters.encodings[i].max_bitrate_bps) {
+ if (*rtp_parameters.encodings[i].max_bitrate_bps <
+ *rtp_parameters.encodings[i].min_bitrate_bps) {
+ LOG_AND_RETURN_ERROR(webrtc::RTCErrorType::INVALID_RANGE,
+ "Attempted to set RtpParameters min bitrate "
+ "larger than max bitrate.");
+ }
+ }
+ if (rtp_parameters.encodings[i].num_temporal_layers) {
+ if (*rtp_parameters.encodings[i].num_temporal_layers < 1 ||
+ *rtp_parameters.encodings[i].num_temporal_layers >
+ webrtc::kMaxTemporalStreams) {
+ LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
+ "Attempted to set RtpParameters "
+ "num_temporal_layers to an invalid number.");
+ }
+ }
+
+ if (rtp_parameters.encodings[i].requested_resolution &&
+ rtp_parameters.encodings[i].scale_resolution_down_by) {
+ LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
+ "Attempted to set scale_resolution_down_by and "
+ "requested_resolution simultaniously.");
+ }
+
+ if (i > 0 && rtp_parameters.encodings[i - 1].codec !=
+ rtp_parameters.encodings[i].codec) {
+ LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_OPERATION,
+ "Attempted to use different codec values for "
+ "different encodings.");
+ }
+ }
+
+ return CheckScalabilityModeValues(rtp_parameters, codec_preferences,
+ send_codec);
+}
+
+webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
+ const webrtc::RtpParameters& old_rtp_parameters,
+ const webrtc::RtpParameters& rtp_parameters) {
+ return CheckRtpParametersInvalidModificationAndValues(
+ old_rtp_parameters, rtp_parameters, {}, absl::nullopt);
+}
+
+webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
+ const webrtc::RtpParameters& old_rtp_parameters,
+ const webrtc::RtpParameters& rtp_parameters,
+ rtc::ArrayView<cricket::Codec> codec_preferences,
+ absl::optional<cricket::Codec> send_codec) {
+ using webrtc::RTCErrorType;
+ if (rtp_parameters.encodings.size() != old_rtp_parameters.encodings.size()) {
+ LOG_AND_RETURN_ERROR(
+ RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to set RtpParameters with different encoding count");
+ }
+ if (rtp_parameters.rtcp != old_rtp_parameters.rtcp) {
+ LOG_AND_RETURN_ERROR(
+ RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to set RtpParameters with modified RTCP parameters");
+ }
+ if (rtp_parameters.header_extensions !=
+ old_rtp_parameters.header_extensions) {
+ LOG_AND_RETURN_ERROR(
+ RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to set RtpParameters with modified header extensions");
+ }
+ if (!absl::c_equal(old_rtp_parameters.encodings, rtp_parameters.encodings,
+ [](const webrtc::RtpEncodingParameters& encoding1,
+ const webrtc::RtpEncodingParameters& encoding2) {
+ return encoding1.rid == encoding2.rid;
+ })) {
+ LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to change RID values in the encodings.");
+ }
+ if (!absl::c_equal(old_rtp_parameters.encodings, rtp_parameters.encodings,
+ [](const webrtc::RtpEncodingParameters& encoding1,
+ const webrtc::RtpEncodingParameters& encoding2) {
+ return encoding1.ssrc == encoding2.ssrc;
+ })) {
+ LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to set RtpParameters with modified SSRC");
+ }
+
+ return CheckRtpParametersValues(rtp_parameters, codec_preferences,
+ send_codec);
+}
+
+CompositeMediaEngine::CompositeMediaEngine(
+ std::unique_ptr<webrtc::FieldTrialsView> trials,
+ std::unique_ptr<VoiceEngineInterface> audio_engine,
+ std::unique_ptr<VideoEngineInterface> video_engine)
+ : trials_(std::move(trials)),
+ voice_engine_(std::move(audio_engine)),
+ video_engine_(std::move(video_engine)) {}
+
+CompositeMediaEngine::CompositeMediaEngine(
+ std::unique_ptr<VoiceEngineInterface> audio_engine,
+ std::unique_ptr<VideoEngineInterface> video_engine)
+ : CompositeMediaEngine(nullptr,
+ std::move(audio_engine),
+ std::move(video_engine)) {}
+
+CompositeMediaEngine::~CompositeMediaEngine() = default;
+
+bool CompositeMediaEngine::Init() {
+ voice().Init();
+ return true;
+}
+
+VoiceEngineInterface& CompositeMediaEngine::voice() {
+ return *voice_engine_.get();
+}
+
+VideoEngineInterface& CompositeMediaEngine::video() {
+ return *video_engine_.get();
+}
+
+const VoiceEngineInterface& CompositeMediaEngine::voice() const {
+ return *voice_engine_.get();
+}
+
+const VideoEngineInterface& CompositeMediaEngine::video() const {
+ return *video_engine_.get();
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/media_engine.h b/third_party/libwebrtc/media/base/media_engine.h
new file mode 100644
index 0000000000..428123516f
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_engine.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_MEDIA_ENGINE_H_
+#define MEDIA_BASE_MEDIA_ENGINE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/crypto/crypto_options.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_parameters.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "call/audio_state.h"
+#include "media/base/codec.h"
+#include "media/base/media_channel.h"
+#include "media/base/media_channel_impl.h"
+#include "media/base/media_config.h"
+#include "media/base/video_common.h"
+#include "rtc_base/system/file_wrapper.h"
+
+namespace webrtc {
+class AudioDeviceModule;
+class AudioMixer;
+class AudioProcessing;
+class Call;
+} // namespace webrtc
+
+namespace cricket {
+
+// Checks that the scalability_mode value of each encoding is supported by at
+// least one video codec of the list. If the list is empty, no check is done.
+webrtc::RTCError CheckScalabilityModeValues(
+ const webrtc::RtpParameters& new_parameters,
+ rtc::ArrayView<cricket::Codec> codec_preferences,
+ absl::optional<cricket::Codec> send_codec);
+
+// Checks the parameters have valid and supported values, and checks parameters
+// with CheckScalabilityModeValues().
+webrtc::RTCError CheckRtpParametersValues(
+ const webrtc::RtpParameters& new_parameters,
+ rtc::ArrayView<cricket::Codec> codec_preferences,
+ absl::optional<cricket::Codec> send_codec);
+
+// Checks that the immutable values have not changed in new_parameters and
+// checks all parameters with CheckRtpParametersValues().
+webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
+ const webrtc::RtpParameters& old_parameters,
+ const webrtc::RtpParameters& new_parameters,
+ rtc::ArrayView<cricket::Codec> codec_preferences,
+ absl::optional<cricket::Codec> send_codec);
+
+// Checks that the immutable values have not changed in new_parameters and
+// checks parameters (except SVC) with CheckRtpParametersValues(). It should
+// usually be paired with a call to CheckScalabilityModeValues().
+webrtc::RTCError CheckRtpParametersInvalidModificationAndValues(
+ const webrtc::RtpParameters& old_parameters,
+ const webrtc::RtpParameters& new_parameters);
+
+struct RtpCapabilities {
+ RtpCapabilities();
+ ~RtpCapabilities();
+ std::vector<webrtc::RtpExtension> header_extensions;
+};
+
+class RtpHeaderExtensionQueryInterface {
+ public:
+ virtual ~RtpHeaderExtensionQueryInterface() = default;
+
+ // Returns a vector of RtpHeaderExtensionCapability, whose direction is
+ // kStopped if the extension is stopped (not used) by default.
+ virtual std::vector<webrtc::RtpHeaderExtensionCapability>
+ GetRtpHeaderExtensions() const = 0;
+};
+
+class VoiceEngineInterface : public RtpHeaderExtensionQueryInterface {
+ public:
+ VoiceEngineInterface() = default;
+ virtual ~VoiceEngineInterface() = default;
+
+ VoiceEngineInterface(const VoiceEngineInterface&) = delete;
+ VoiceEngineInterface& operator=(const VoiceEngineInterface&) = delete;
+
+ // Initialization
+ // Starts the engine.
+ virtual void Init() = 0;
+
+ // TODO(solenberg): Remove once VoE API refactoring is done.
+ virtual rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const = 0;
+
+ virtual std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) {
+ // TODO(hta): Make pure virtual when all downstream has updated
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+
+ virtual std::unique_ptr<VoiceMediaReceiveChannelInterface>
+ CreateReceiveChannel(webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) {
+ // TODO(hta): Make pure virtual when all downstream has updated
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+
+ virtual const std::vector<AudioCodec>& send_codecs() const = 0;
+ virtual const std::vector<AudioCodec>& recv_codecs() const = 0;
+
+ // Starts AEC dump using existing file, a maximum file size in bytes can be
+ // specified. Logging is stopped just before the size limit is exceeded.
+ // If max_size_bytes is set to a value <= 0, no limit will be used.
+ virtual bool StartAecDump(webrtc::FileWrapper file,
+ int64_t max_size_bytes) = 0;
+
+ // Stops recording AEC dump.
+ virtual void StopAecDump() = 0;
+
+ virtual absl::optional<webrtc::AudioDeviceModule::Stats>
+ GetAudioDeviceStats() = 0;
+};
+
+class VideoEngineInterface : public RtpHeaderExtensionQueryInterface {
+ public:
+ VideoEngineInterface() = default;
+ virtual ~VideoEngineInterface() = default;
+
+ VideoEngineInterface(const VideoEngineInterface&) = delete;
+ VideoEngineInterface& operator=(const VideoEngineInterface&) = delete;
+
+ virtual std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
+ // Default implementation, delete when all is updated
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+
+ virtual std::unique_ptr<VideoMediaReceiveChannelInterface>
+ CreateReceiveChannel(webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options) {
+ // Default implementation, delete when all is updated
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+
+ // Retrieve list of supported codecs.
+ virtual std::vector<VideoCodec> send_codecs() const = 0;
+ virtual std::vector<VideoCodec> recv_codecs() const = 0;
+ // As above, but if include_rtx is false, don't include RTX codecs.
+ // TODO(bugs.webrtc.org/13931): Remove default implementation once
+ // upstream subclasses have converted.
+ virtual std::vector<VideoCodec> send_codecs(bool include_rtx) const {
+ RTC_DCHECK(include_rtx);
+ return send_codecs();
+ }
+ virtual std::vector<VideoCodec> recv_codecs(bool include_rtx) const {
+ RTC_DCHECK(include_rtx);
+ return recv_codecs();
+ }
+};
+
+// MediaEngineInterface is an abstraction of a media engine which can be
+// subclassed to support different media componentry backends.
+// It supports voice and video operations in the same class to facilitate
+// proper synchronization between both media types.
+class MediaEngineInterface {
+ public:
+ virtual ~MediaEngineInterface() {}
+
+ // Initialization. Needs to be called on the worker thread.
+ virtual bool Init() = 0;
+
+ virtual VoiceEngineInterface& voice() = 0;
+ virtual VideoEngineInterface& video() = 0;
+ virtual const VoiceEngineInterface& voice() const = 0;
+ virtual const VideoEngineInterface& video() const = 0;
+};
+
+// CompositeMediaEngine constructs a MediaEngine from separate
+// voice and video engine classes.
+// Optionally owns a FieldTrialsView trials map.
+class CompositeMediaEngine : public MediaEngineInterface {
+ public:
+ CompositeMediaEngine(std::unique_ptr<webrtc::FieldTrialsView> trials,
+ std::unique_ptr<VoiceEngineInterface> audio_engine,
+ std::unique_ptr<VideoEngineInterface> video_engine);
+ CompositeMediaEngine(std::unique_ptr<VoiceEngineInterface> audio_engine,
+ std::unique_ptr<VideoEngineInterface> video_engine);
+ ~CompositeMediaEngine() override;
+
+ // Always succeeds.
+ bool Init() override;
+
+ VoiceEngineInterface& voice() override;
+ VideoEngineInterface& video() override;
+ const VoiceEngineInterface& voice() const override;
+ const VideoEngineInterface& video() const override;
+
+ private:
+ const std::unique_ptr<webrtc::FieldTrialsView> trials_;
+ const std::unique_ptr<VoiceEngineInterface> voice_engine_;
+ const std::unique_ptr<VideoEngineInterface> video_engine_;
+};
+
+webrtc::RtpParameters CreateRtpParametersWithOneEncoding();
+webrtc::RtpParameters CreateRtpParametersWithEncodings(StreamParams sp);
+
+// Returns a vector of RTP extensions as visible from RtpSender/Receiver
+// GetCapabilities(). The returned vector only shows what will definitely be
+// offered by default, i.e. the list of extensions returned from
+// GetRtpHeaderExtensions() that are not kStopped.
+std::vector<webrtc::RtpExtension> GetDefaultEnabledRtpHeaderExtensions(
+ const RtpHeaderExtensionQueryInterface& query_interface);
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_MEDIA_ENGINE_H_
diff --git a/third_party/libwebrtc/media/base/media_engine_unittest.cc b/third_party/libwebrtc/media/base/media_engine_unittest.cc
new file mode 100644
index 0000000000..b8db32a2d5
--- /dev/null
+++ b/third_party/libwebrtc/media/base/media_engine_unittest.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/media_engine.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::ElementsAre;
+using ::testing::Field;
+using ::testing::Return;
+using ::testing::StrEq;
+using ::webrtc::RtpExtension;
+using ::webrtc::RtpHeaderExtensionCapability;
+using ::webrtc::RtpTransceiverDirection;
+
+namespace cricket {
+namespace {
+
+class MockRtpHeaderExtensionQueryInterface
+ : public RtpHeaderExtensionQueryInterface {
+ public:
+ MOCK_METHOD(std::vector<RtpHeaderExtensionCapability>,
+ GetRtpHeaderExtensions,
+ (),
+ (const, override));
+};
+
+} // namespace
+
+TEST(MediaEngineTest, ReturnsNotStoppedHeaderExtensions) {
+ MockRtpHeaderExtensionQueryInterface mock;
+ std::vector<RtpHeaderExtensionCapability> extensions(
+ {RtpHeaderExtensionCapability("uri1", 1,
+ RtpTransceiverDirection::kInactive),
+ RtpHeaderExtensionCapability("uri2", 2,
+ RtpTransceiverDirection::kSendRecv),
+ RtpHeaderExtensionCapability("uri3", 3,
+ RtpTransceiverDirection::kStopped),
+ RtpHeaderExtensionCapability("uri4", 4,
+ RtpTransceiverDirection::kSendOnly),
+ RtpHeaderExtensionCapability("uri5", 5,
+ RtpTransceiverDirection::kRecvOnly)});
+ EXPECT_CALL(mock, GetRtpHeaderExtensions).WillOnce(Return(extensions));
+ EXPECT_THAT(GetDefaultEnabledRtpHeaderExtensions(mock),
+ ElementsAre(Field(&RtpExtension::uri, StrEq("uri1")),
+ Field(&RtpExtension::uri, StrEq("uri2")),
+ Field(&RtpExtension::uri, StrEq("uri4")),
+ Field(&RtpExtension::uri, StrEq("uri5"))));
+}
+
+// This class mocks methods declared as pure virtual in the interface.
+// Since the tests are aiming to check the patterns of overrides, the
+// functions with default implementations are not mocked.
+class MostlyMockVoiceEngineInterface : public VoiceEngineInterface {
+ public:
+ MOCK_METHOD(std::vector<webrtc::RtpHeaderExtensionCapability>,
+ GetRtpHeaderExtensions,
+ (),
+ (const, override));
+ MOCK_METHOD(void, Init, (), (override));
+ MOCK_METHOD(rtc::scoped_refptr<webrtc::AudioState>,
+ GetAudioState,
+ (),
+ (const, override));
+ MOCK_METHOD(std::vector<AudioCodec>&, send_codecs, (), (const, override));
+ MOCK_METHOD(std::vector<AudioCodec>&, recv_codecs, (), (const, override));
+ MOCK_METHOD(bool,
+ StartAecDump,
+ (webrtc::FileWrapper file, int64_t max_size_bytes),
+ (override));
+ MOCK_METHOD(void, StopAecDump, (), (override));
+ MOCK_METHOD(absl::optional<webrtc::AudioDeviceModule::Stats>,
+ GetAudioDeviceStats,
+ (),
+ (override));
+};
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/rid_description.cc b/third_party/libwebrtc/media/base/rid_description.cc
new file mode 100644
index 0000000000..b3eae272f9
--- /dev/null
+++ b/third_party/libwebrtc/media/base/rid_description.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/rid_description.h"
+
+namespace cricket {
+
+RidDescription::RidDescription() = default;
+RidDescription::RidDescription(const std::string& rid, RidDirection direction)
+ : rid{rid}, direction{direction} {}
+RidDescription::RidDescription(const RidDescription& other) = default;
+RidDescription::~RidDescription() = default;
+RidDescription& RidDescription::operator=(const RidDescription& other) =
+ default;
+bool RidDescription::operator==(const RidDescription& other) const {
+ return rid == other.rid && direction == other.direction &&
+ payload_types == other.payload_types &&
+ restrictions == other.restrictions;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/rid_description.h b/third_party/libwebrtc/media/base/rid_description.h
new file mode 100644
index 0000000000..04c0f3d4bc
--- /dev/null
+++ b/third_party/libwebrtc/media/base/rid_description.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_RID_DESCRIPTION_H_
+#define MEDIA_BASE_RID_DESCRIPTION_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+namespace cricket {
+
+enum class RidDirection { kSend, kReceive };
+
+// Description of a Restriction Id (RID) according to:
+// https://tools.ietf.org/html/draft-ietf-mmusic-rid-15
+// A Restriction Identifier serves two purposes:
+// 1. Uniquely identifies an RTP stream inside an RTP session.
+// When combined with MIDs (https://tools.ietf.org/html/rfc5888),
+// RIDs uniquely identify an RTP stream within an RTP session.
+// The MID will identify the media section and the RID will identify
+// the stream within the section.
+// RID identifiers must be unique within the media section.
+// 2. Allows indicating further restrictions to the stream.
+// These restrictions are added according to the direction specified.
+// The direction field identifies the direction of the RTP stream packets
+// to which the restrictions apply. The direction is independent of the
+// transceiver direction and can be one of {send, recv}.
+// The following are some examples of these restrictions:
+// a. max-width, max-height, max-fps, max-br, ...
+// b. further restricting the codec set (from what m= section specified)
+//
+// Note: Indicating dependencies between streams (using depend) will not be
+// supported, since the WG is adopting a different approach to achieve this.
+// As of 2018-12-04, the new SVC (Scalable Video Coder) approach is still not
+// mature enough to be implemented as part of this work.
+// See: https://w3c.github.io/webrtc-svc/ for more details.
+struct RidDescription final {
+ RidDescription();
+ RidDescription(const std::string& rid, RidDirection direction);
+ RidDescription(const RidDescription& other);
+ ~RidDescription();
+ RidDescription& operator=(const RidDescription& other);
+
+ // This is currently required for unit tests of StreamParams which contains
+ // RidDescription objects and checks for equality using operator==.
+ bool operator==(const RidDescription& other) const;
+ bool operator!=(const RidDescription& other) const {
+ return !(*this == other);
+ }
+
+ // The RID identifier that uniquely identifies the stream within the session.
+ std::string rid;
+
+ // Specifies the direction for which the specified restrictions hold.
+ // This direction is either send or receive and is independent of the
+ // direction of the transceiver.
+ // https://tools.ietf.org/html/draft-ietf-mmusic-rid-15#section-4 :
+ // The "direction" field identifies the direction of the RTP Stream
+ // packets to which the indicated restrictions are applied. It may be
+ // either "send" or "recv". Note that these restriction directions are
+ // expressed independently of any "inactive", "sendonly", "recvonly", or
+ // "sendrecv" attributes associated with the media section. It is, for
+ // example, valid to indicate "recv" restrictions on a "sendonly"
+ // stream; those restrictions would apply if, at a future point in time,
+ // the stream were changed to "sendrecv" or "recvonly".
+ RidDirection direction;
+
+ // The list of codec payload types for this stream.
+ // It should be a subset of the payloads supported for the media section.
+ std::vector<int> payload_types;
+
+ // Contains key-value pairs for restrictions.
+ // The keys are not validated against a known set.
+ // The meaning to infer for the values depends on each key.
+ // Examples:
+ // 1. An entry for max-width will have a value that is interpreted as an int.
+ // 2. An entry for max-bpp (bits per pixel) will have a float value.
+ // Interpretation (and validation of value) is left for the implementation.
+ // I.E. the media engines should validate values for parameters they support.
+ std::map<std::string, std::string> restrictions;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_RID_DESCRIPTION_H_
diff --git a/third_party/libwebrtc/media/base/rtp_utils.cc b/third_party/libwebrtc/media/base/rtp_utils.cc
new file mode 100644
index 0000000000..c630cbc7e4
--- /dev/null
+++ b/third_party/libwebrtc/media/base/rtp_utils.cc
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/rtp_utils.h"
+
+#include <string.h>
+
+#include <vector>
+
+// PacketTimeUpdateParams is defined in asyncpacketsocket.h.
+// TODO(sergeyu): Find more appropriate place for PacketTimeUpdateParams.
+#include "media/base/turn_utils.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "rtc_base/async_packet_socket.h"
+#include "rtc_base/byte_order.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/message_digest.h"
+
+namespace cricket {
+
+static const size_t kRtcpPayloadTypeOffset = 1;
+static const size_t kRtpExtensionHeaderLen = 4;
+static const size_t kAbsSendTimeExtensionLen = 3;
+static const size_t kOneByteExtensionHeaderLen = 1;
+static const size_t kTwoByteExtensionHeaderLen = 2;
+
+namespace {
+
+// Fake auth tag written by the sender when external authentication is enabled.
+// HMAC in packet will be compared against this value before updating packet
+// with actual HMAC value.
+static const uint8_t kFakeAuthTag[10] = {0xba, 0xdd, 0xba, 0xdd, 0xba,
+ 0xdd, 0xba, 0xdd, 0xba, 0xdd};
+
+void UpdateAbsSendTimeExtensionValue(uint8_t* extension_data,
+ size_t length,
+ uint64_t time_us) {
+ // Absolute send time in RTP streams.
+ //
+ // The absolute send time is signaled to the receiver in-band using the
+ // general mechanism for RTP header extensions [RFC5285]. The payload
+ // of this extension (the transmitted value) is a 24-bit unsigned integer
+ // containing the sender's current time in seconds as a fixed point number
+ // with 18 bits fractional part.
+ //
+ // The form of the absolute send time extension block:
+ //
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ID | len=2 | absolute send time |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ if (length != kAbsSendTimeExtensionLen) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+
+ // Convert microseconds to a 6.18 fixed point value in seconds.
+ uint32_t send_time = ((time_us << 18) / 1000000) & 0x00FFFFFF;
+ extension_data[0] = static_cast<uint8_t>(send_time >> 16);
+ extension_data[1] = static_cast<uint8_t>(send_time >> 8);
+ extension_data[2] = static_cast<uint8_t>(send_time);
+}
+
+// Assumes `length` is actual packet length + tag length. Updates HMAC at end of
+// the RTP packet.
+void UpdateRtpAuthTag(uint8_t* rtp,
+ size_t length,
+ const rtc::PacketTimeUpdateParams& packet_time_params) {
+ // If there is no key, return.
+ if (packet_time_params.srtp_auth_key.empty()) {
+ return;
+ }
+
+ size_t tag_length = packet_time_params.srtp_auth_tag_len;
+
+ // ROC (rollover counter) is at the beginning of the auth tag.
+ const size_t kRocLength = 4;
+ if (tag_length < kRocLength || tag_length > length) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+
+ uint8_t* auth_tag = rtp + (length - tag_length);
+
+ // We should have a fake HMAC value @ auth_tag.
+ RTC_DCHECK_EQ(0, memcmp(auth_tag, kFakeAuthTag, tag_length));
+
+ // Copy ROC after end of rtp packet.
+ memcpy(auth_tag, &packet_time_params.srtp_packet_index, kRocLength);
+ // Authentication of a RTP packet will have RTP packet + ROC size.
+ size_t auth_required_length = length - tag_length + kRocLength;
+
+ uint8_t output[64];
+ size_t result =
+ rtc::ComputeHmac(rtc::DIGEST_SHA_1, &packet_time_params.srtp_auth_key[0],
+ packet_time_params.srtp_auth_key.size(), rtp,
+ auth_required_length, output, sizeof(output));
+
+ if (result < tag_length) {
+ RTC_DCHECK_NOTREACHED();
+ return;
+ }
+
+ // Copy HMAC from output to packet. This is required as auth tag length
+ // may not be equal to the actual HMAC length.
+ memcpy(auth_tag, output, tag_length);
+}
+
+bool GetUint8(const void* data, size_t offset, int* value) {
+ if (!data || !value) {
+ return false;
+ }
+ *value = *(static_cast<const uint8_t*>(data) + offset);
+ return true;
+}
+
+} // namespace
+
+bool GetRtcpType(const void* data, size_t len, int* value) {
+ if (len < kMinRtcpPacketLen) {
+ return false;
+ }
+ return GetUint8(data, kRtcpPayloadTypeOffset, value);
+}
+
+// This method returns SSRC first of RTCP packet, except if packet is SDES.
+// TODO(mallinath) - Fully implement RFC 5506. This standard doesn't restrict
+// to send non-compound packets only to feedback messages.
+bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value) {
+ // Packet should be at least of 8 bytes, to get SSRC from a RTCP packet.
+ if (!data || len < kMinRtcpPacketLen + 4 || !value)
+ return false;
+ int pl_type;
+ if (!GetRtcpType(data, len, &pl_type))
+ return false;
+ // SDES packet parsing is not supported.
+ if (pl_type == kRtcpTypeSDES)
+ return false;
+ *value = rtc::GetBE32(static_cast<const uint8_t*>(data) + 4);
+ return true;
+}
+
+bool IsValidRtpPayloadType(int payload_type) {
+ return payload_type >= 0 && payload_type <= 127;
+}
+
+bool IsValidRtpPacketSize(RtpPacketType packet_type, size_t size) {
+ RTC_DCHECK_NE(RtpPacketType::kUnknown, packet_type);
+ size_t min_packet_length = packet_type == RtpPacketType::kRtcp
+ ? kMinRtcpPacketLen
+ : kMinRtpPacketLen;
+ return size >= min_packet_length && size <= kMaxRtpPacketLen;
+}
+
+absl::string_view RtpPacketTypeToString(RtpPacketType packet_type) {
+ switch (packet_type) {
+ case RtpPacketType::kRtp:
+ return "RTP";
+ case RtpPacketType::kRtcp:
+ return "RTCP";
+ case RtpPacketType::kUnknown:
+ return "Unknown";
+ }
+ RTC_CHECK_NOTREACHED();
+}
+
+RtpPacketType InferRtpPacketType(rtc::ArrayView<const char> packet) {
+ if (webrtc::IsRtcpPacket(
+ rtc::reinterpret_array_view<const uint8_t>(packet))) {
+ return RtpPacketType::kRtcp;
+ }
+ if (webrtc::IsRtpPacket(rtc::reinterpret_array_view<const uint8_t>(packet))) {
+ return RtpPacketType::kRtp;
+ }
+ return RtpPacketType::kUnknown;
+}
+
+bool ValidateRtpHeader(const uint8_t* rtp,
+ size_t length,
+ size_t* header_length) {
+ if (header_length) {
+ *header_length = 0;
+ }
+
+ if (length < kMinRtpPacketLen) {
+ return false;
+ }
+
+ size_t cc_count = rtp[0] & 0x0F;
+ size_t header_length_without_extension = kMinRtpPacketLen + 4 * cc_count;
+ if (header_length_without_extension > length) {
+ return false;
+ }
+
+ // If extension bit is not set, we are done with header processing, as input
+ // length is verified above.
+ if (!(rtp[0] & 0x10)) {
+ if (header_length)
+ *header_length = header_length_without_extension;
+
+ return true;
+ }
+
+ rtp += header_length_without_extension;
+
+ if (header_length_without_extension + kRtpExtensionHeaderLen > length) {
+ return false;
+ }
+
+ // Getting extension profile length.
+ // Length is in 32 bit words.
+ uint16_t extension_length_in_32bits = rtc::GetBE16(rtp + 2);
+ size_t extension_length = extension_length_in_32bits * 4;
+
+ size_t rtp_header_length = extension_length +
+ header_length_without_extension +
+ kRtpExtensionHeaderLen;
+
+ // Verify input length against total header size.
+ if (rtp_header_length > length) {
+ return false;
+ }
+
+ if (header_length) {
+ *header_length = rtp_header_length;
+ }
+ return true;
+}
+
+// ValidateRtpHeader() must be called before this method to make sure, we have
+// a sane rtp packet.
+bool UpdateRtpAbsSendTimeExtension(uint8_t* rtp,
+ size_t length,
+ int extension_id,
+ uint64_t time_us) {
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // |V=2|P|X| CC |M| PT | sequence number |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | timestamp |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | synchronization source (SSRC) identifier |
+ // +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ // | contributing source (CSRC) identifiers |
+ // | .... |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ // Return if extension bit is not set.
+ if (!(rtp[0] & 0x10)) {
+ return true;
+ }
+
+ size_t cc_count = rtp[0] & 0x0F;
+ size_t header_length_without_extension = kMinRtpPacketLen + 4 * cc_count;
+
+ rtp += header_length_without_extension;
+
+ // Getting extension profile ID and length.
+ uint16_t profile_id = rtc::GetBE16(rtp);
+ // Length is in 32 bit words.
+ uint16_t extension_length_in_32bits = rtc::GetBE16(rtp + 2);
+ size_t extension_length = extension_length_in_32bits * 4;
+
+ rtp += kRtpExtensionHeaderLen; // Moving past extension header.
+
+ constexpr uint16_t kOneByteExtensionProfileId = 0xBEDE;
+ constexpr uint16_t kTwoByteExtensionProfileId = 0x1000;
+
+ bool found = false;
+ if (profile_id == kOneByteExtensionProfileId ||
+ profile_id == kTwoByteExtensionProfileId) {
+ // OneByte extension header
+ // 0
+ // 0 1 2 3 4 5 6 7
+ // +-+-+-+-+-+-+-+-+
+ // | ID |length |
+ // +-+-+-+-+-+-+-+-+
+
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | 0xBE | 0xDE | length=3 |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ID | L=0 | data | ID | L=1 | data...
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // ...data | 0 (pad) | 0 (pad) | ID | L=3 |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | data |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ // TwoByte extension header
+ // 0
+ // 0 1 2 3 4 5 6 7
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ID | length |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | 0x10 | 0x00 | length=3 |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ID | L=1 | data | ID |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | L=2 | data | 0 (pad) |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ID | L=2 | data |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ size_t extension_header_length = profile_id == kOneByteExtensionProfileId
+ ? kOneByteExtensionHeaderLen
+ : kTwoByteExtensionHeaderLen;
+
+ const uint8_t* extension_start = rtp;
+ const uint8_t* extension_end = extension_start + extension_length;
+
+ // rtp + 1 since the minimum size per header extension is two bytes for both
+ // one- and two-byte header extensions.
+ while (rtp + 1 < extension_end) {
+ // See RFC8285 Section 4.2-4.3 for more information about one- and
+ // two-byte header extensions.
+ const int id =
+ profile_id == kOneByteExtensionProfileId ? (*rtp & 0xF0) >> 4 : *rtp;
+ const size_t length = profile_id == kOneByteExtensionProfileId
+ ? (*rtp & 0x0F) + 1
+ : *(rtp + 1);
+ if (rtp + extension_header_length + length > extension_end) {
+ return false;
+ }
+ if (id == extension_id) {
+ UpdateAbsSendTimeExtensionValue(rtp + extension_header_length, length,
+ time_us);
+ found = true;
+ break;
+ }
+ rtp += extension_header_length + length;
+ // Counting padding bytes.
+ while ((rtp < extension_end) && (*rtp == 0)) {
+ ++rtp;
+ }
+ }
+ }
+ return found;
+}
+
+bool ApplyPacketOptions(uint8_t* data,
+ size_t length,
+ const rtc::PacketTimeUpdateParams& packet_time_params,
+ uint64_t time_us) {
+ RTC_DCHECK(data);
+ RTC_DCHECK(length);
+
+ // if there is no valid `rtp_sendtime_extension_id` and `srtp_auth_key` in
+ // PacketOptions, nothing to be updated in this packet.
+ if (packet_time_params.rtp_sendtime_extension_id == -1 &&
+ packet_time_params.srtp_auth_key.empty()) {
+ return true;
+ }
+
+ // If there is a srtp auth key present then the packet must be an RTP packet.
+ // RTP packet may have been wrapped in a TURN Channel Data or TURN send
+ // indication.
+ size_t rtp_start_pos;
+ size_t rtp_length;
+ if (!UnwrapTurnPacket(data, length, &rtp_start_pos, &rtp_length)) {
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+
+ // Making sure we have a valid RTP packet at the end.
+ auto packet = rtc::MakeArrayView(data + rtp_start_pos, rtp_length);
+ if (!webrtc::IsRtpPacket(packet) ||
+ !ValidateRtpHeader(data + rtp_start_pos, rtp_length, nullptr)) {
+ RTC_DCHECK_NOTREACHED();
+ return false;
+ }
+
+ uint8_t* start = data + rtp_start_pos;
+ // If packet option has non default value (-1) for sendtime extension id,
+ // then we should parse the rtp packet to update the timestamp. Otherwise
+ // just calculate HMAC and update packet with it.
+ if (packet_time_params.rtp_sendtime_extension_id != -1) {
+ UpdateRtpAbsSendTimeExtension(start, rtp_length,
+ packet_time_params.rtp_sendtime_extension_id,
+ time_us);
+ }
+
+ UpdateRtpAuthTag(start, rtp_length, packet_time_params);
+ return true;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/rtp_utils.h b/third_party/libwebrtc/media/base/rtp_utils.h
new file mode 100644
index 0000000000..a501fd7af3
--- /dev/null
+++ b/third_party/libwebrtc/media/base/rtp_utils.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_RTP_UTILS_H_
+#define MEDIA_BASE_RTP_UTILS_H_
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "rtc_base/byte_order.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace rtc {
+struct PacketTimeUpdateParams;
+} // namespace rtc
+
+namespace cricket {
+
+const size_t kMinRtpPacketLen = 12;
+const size_t kMaxRtpPacketLen = 2048;
+const size_t kMinRtcpPacketLen = 4;
+
+enum RtcpTypes {
+ kRtcpTypeSR = 200, // Sender report payload type.
+ kRtcpTypeRR = 201, // Receiver report payload type.
+ kRtcpTypeSDES = 202, // SDES payload type.
+ kRtcpTypeBye = 203, // BYE payload type.
+ kRtcpTypeApp = 204, // APP payload type.
+ kRtcpTypeRTPFB = 205, // Transport layer Feedback message payload type.
+ kRtcpTypePSFB = 206, // Payload-specific Feedback message payload type.
+};
+
+enum class RtpPacketType {
+ kRtp,
+ kRtcp,
+ kUnknown,
+};
+
+bool GetRtcpType(const void* data, size_t len, int* value);
+bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value);
+
+// Checks the packet header to determine if it can be an RTP or RTCP packet.
+RtpPacketType InferRtpPacketType(rtc::ArrayView<const char> packet);
+// True if |payload type| is 0-127.
+bool IsValidRtpPayloadType(int payload_type);
+
+// True if `size` is appropriate for the indicated packet type.
+bool IsValidRtpPacketSize(RtpPacketType packet_type, size_t size);
+
+// Returns "RTCP", "RTP" or "Unknown" according to `packet_type`.
+absl::string_view RtpPacketTypeToString(RtpPacketType packet_type);
+
+// Verifies that a packet has a valid RTP header.
+bool RTC_EXPORT ValidateRtpHeader(const uint8_t* rtp,
+ size_t length,
+ size_t* header_length);
+
+// Helper method which updates the absolute send time extension if present.
+bool UpdateRtpAbsSendTimeExtension(uint8_t* rtp,
+ size_t length,
+ int extension_id,
+ uint64_t time_us);
+
+// Applies specified `options` to the packet. It updates the absolute send time
+// extension header if it is present present then updates HMAC.
+bool RTC_EXPORT
+ApplyPacketOptions(uint8_t* data,
+ size_t length,
+ const rtc::PacketTimeUpdateParams& packet_time_params,
+ uint64_t time_us);
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_RTP_UTILS_H_
diff --git a/third_party/libwebrtc/media/base/rtp_utils_unittest.cc b/third_party/libwebrtc/media/base/rtp_utils_unittest.cc
new file mode 100644
index 0000000000..a594f944c0
--- /dev/null
+++ b/third_party/libwebrtc/media/base/rtp_utils_unittest.cc
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/rtp_utils.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <vector>
+
+#include "media/base/fake_rtp.h"
+#include "rtc_base/async_packet_socket.h"
+#include "test/gtest.h"
+
+namespace cricket {
+
+static const uint8_t kInvalidPacket[] = {0x80, 0x00};
+
+// PT = 206, FMT = 1, Sender SSRC = 0x1111, Media SSRC = 0x1111
+// No FCI information is needed for PLI.
+static const uint8_t kNonCompoundRtcpPliFeedbackPacket[] = {
+ 0x81, 0xCE, 0x00, 0x0C, 0x00, 0x00, 0x11, 0x11, 0x00, 0x00, 0x11, 0x11};
+
+// Packet has only mandatory fixed RTCP header
+// PT = 204, SSRC = 0x1111
+static const uint8_t kNonCompoundRtcpAppPacket[] = {0x81, 0xCC, 0x00, 0x0C,
+ 0x00, 0x00, 0x11, 0x11};
+
+// PT = 202, Source count = 0
+static const uint8_t kNonCompoundRtcpSDESPacket[] = {0x80, 0xCA, 0x00, 0x00};
+
+static uint8_t kFakeTag[4] = {0xba, 0xdd, 0xba, 0xdd};
+static uint8_t kTestKey[] = "12345678901234567890";
+static uint8_t kTestAstValue[3] = {0xaa, 0xbb, 0xcc};
+
+// Valid rtp Message with 2 byte header extension.
+static uint8_t kRtpMsgWith2ByteExtnHeader[] = {
+ // clang-format off
+ // clang formatting doesn't respect inline comments.
+ 0x90, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xAA, 0xBB, 0xCC, 0XDD, // SSRC
+ 0x10, 0x00, 0x00, 0x01, // 2 Byte header extension
+ 0x01, 0x00, 0x00, 0x00
+ // clang-format on
+};
+
+// RTP packet with two one-byte header extensions. The last 4 bytes consist of
+// abs-send-time with extension id = 3 and length = 3.
+static uint8_t kRtpMsgWithOneByteAbsSendTimeExtension[] = {
+ 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xBE, 0xDE, 0x00, 0x02, 0x22, 0x00, 0x02, 0x1c, 0x32, 0xaa, 0xbb, 0xcc,
+};
+
+// RTP packet with two two-byte header extensions. The last 5 bytes consist of
+// abs-send-time with extension id = 3 and length = 3.
+static uint8_t kRtpMsgWithTwoByteAbsSendTimeExtension[] = {
+ 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x02, 0x02, 0x01, 0x02, 0x03, 0x03, 0xaa, 0xbb, 0xcc,
+};
+
+// Index of AbsSendTimeExtn data in message
+// `kRtpMsgWithOneByteAbsSendTimeExtension`.
+static const int kAstIndexInOneByteRtpMsg = 21;
+// and in message `kRtpMsgWithTwoByteAbsSendTimeExtension`.
+static const int kAstIndexInTwoByteRtpMsg = 21;
+
+static const rtc::ArrayView<const char> kPcmuFrameArrayView =
+ rtc::MakeArrayView(reinterpret_cast<const char*>(kPcmuFrame),
+ sizeof(kPcmuFrame));
+static const rtc::ArrayView<const char> kRtcpReportArrayView =
+ rtc::MakeArrayView(reinterpret_cast<const char*>(kRtcpReport),
+ sizeof(kRtcpReport));
+static const rtc::ArrayView<const char> kInvalidPacketArrayView =
+ rtc::MakeArrayView(reinterpret_cast<const char*>(kInvalidPacket),
+ sizeof(kInvalidPacket));
+
+TEST(RtpUtilsTest, GetRtcp) {
+ int pt;
+ EXPECT_TRUE(GetRtcpType(kRtcpReport, sizeof(kRtcpReport), &pt));
+ EXPECT_EQ(0xc9, pt);
+
+ EXPECT_FALSE(GetRtcpType(kInvalidPacket, sizeof(kInvalidPacket), &pt));
+
+ uint32_t ssrc;
+ EXPECT_TRUE(GetRtcpSsrc(kNonCompoundRtcpPliFeedbackPacket,
+ sizeof(kNonCompoundRtcpPliFeedbackPacket), &ssrc));
+ EXPECT_TRUE(GetRtcpSsrc(kNonCompoundRtcpAppPacket,
+ sizeof(kNonCompoundRtcpAppPacket), &ssrc));
+ EXPECT_FALSE(GetRtcpSsrc(kNonCompoundRtcpSDESPacket,
+ sizeof(kNonCompoundRtcpSDESPacket), &ssrc));
+}
+
+// Invalid RTP packets.
+TEST(RtpUtilsTest, InvalidRtpHeader) {
+ // Rtp message with invalid length.
+ const uint8_t kRtpMsgWithInvalidLength[] = {
+ // clang-format off
+ // clang formatting doesn't respect inline comments.
+ 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xAA, 0xBB, 0xCC, 0XDD, // SSRC
+ 0xDD, 0xCC, 0xBB, 0xAA, // Only 1 CSRC, but CC count is 4.
+ // clang-format on
+ };
+ EXPECT_FALSE(ValidateRtpHeader(kRtpMsgWithInvalidLength,
+ sizeof(kRtpMsgWithInvalidLength), nullptr));
+
+ // Rtp message with single byte header extension, invalid extension length.
+ const uint8_t kRtpMsgWithInvalidExtnLength[] = {
+ 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xBE, 0xDE, 0x0A, 0x00, // Extn length - 0x0A00
+ };
+ EXPECT_FALSE(ValidateRtpHeader(kRtpMsgWithInvalidExtnLength,
+ sizeof(kRtpMsgWithInvalidExtnLength),
+ nullptr));
+}
+
+// Valid RTP packet with a 2byte header extension.
+TEST(RtpUtilsTest, Valid2ByteExtnHdrRtpMessage) {
+ EXPECT_TRUE(ValidateRtpHeader(kRtpMsgWith2ByteExtnHeader,
+ sizeof(kRtpMsgWith2ByteExtnHeader), nullptr));
+}
+
+// Valid RTP packet which has 1 byte header AbsSendTime extension in it.
+TEST(RtpUtilsTest, ValidRtpPacketWithOneByteAbsSendTimeExtension) {
+ EXPECT_TRUE(ValidateRtpHeader(kRtpMsgWithOneByteAbsSendTimeExtension,
+ sizeof(kRtpMsgWithOneByteAbsSendTimeExtension),
+ nullptr));
+}
+
+// Valid RTP packet which has 2 byte header AbsSendTime extension in it.
+TEST(RtpUtilsTest, ValidRtpPacketWithTwoByteAbsSendTimeExtension) {
+ EXPECT_TRUE(ValidateRtpHeader(kRtpMsgWithTwoByteAbsSendTimeExtension,
+ sizeof(kRtpMsgWithTwoByteAbsSendTimeExtension),
+ nullptr));
+}
+
+// Verify finding an extension ID in the TURN send indication message.
+TEST(RtpUtilsTest, UpdateAbsSendTimeExtensionInTurnSendIndication) {
+ // A valid STUN indication message with a valid RTP header in data attribute
+ // payload field and no extension bit set.
+ uint8_t message_without_extension[] = {
+ // clang-format off
+ // clang formatting doesn't respect inline comments.
+ 0x00, 0x16, 0x00, 0x18, // length of
+ 0x21, 0x12, 0xA4, 0x42, // magic cookie
+ '0', '1', '2', '3', // transaction id
+ '4', '5', '6', '7',
+ '8', '9', 'a', 'b',
+ 0x00, 0x20, 0x00, 0x04, // Mapped address.
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x13, 0x00, 0x0C, // Data attribute.
+ 0x80, 0x00, 0x00, 0x00, // RTP packet.
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ // clang-format on
+ };
+ EXPECT_TRUE(UpdateRtpAbsSendTimeExtension(
+ message_without_extension, sizeof(message_without_extension), 3, 0));
+
+ // A valid STUN indication message with a valid RTP header and a extension
+ // header.
+ uint8_t message[] = {
+ // clang-format off
+ // clang formatting doesn't respect inline comments.
+ 0x00, 0x16, 0x00, 0x24, // length of
+ 0x21, 0x12, 0xA4, 0x42, // magic cookie
+ '0', '1', '2', '3', // transaction id
+ '4', '5', '6', '7',
+ '8', '9', 'a', 'b',
+ 0x00, 0x20, 0x00, 0x04, // Mapped address.
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x13, 0x00, 0x18, // Data attribute.
+ 0x90, 0x00, 0x00, 0x00, // RTP packet.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBE, 0xDE,
+ 0x00, 0x02, 0x22, 0xaa, 0xbb, 0xcc, 0x32, 0xaa, 0xbb, 0xcc,
+ // clang-format on
+ };
+ EXPECT_TRUE(UpdateRtpAbsSendTimeExtension(message, sizeof(message), 3, 0));
+}
+
+// Test without any packet options variables set. This method should return
+// without HMAC value in the packet.
+TEST(RtpUtilsTest, ApplyPacketOptionsWithDefaultValues) {
+ rtc::PacketTimeUpdateParams packet_time_params;
+ std::vector<uint8_t> rtp_packet(
+ kRtpMsgWithOneByteAbsSendTimeExtension,
+ kRtpMsgWithOneByteAbsSendTimeExtension +
+ sizeof(kRtpMsgWithOneByteAbsSendTimeExtension));
+ rtp_packet.insert(rtp_packet.end(), kFakeTag, kFakeTag + sizeof(kFakeTag));
+ EXPECT_TRUE(ApplyPacketOptions(&rtp_packet[0], rtp_packet.size(),
+ packet_time_params, 0));
+
+ // Making sure HMAC wasn't updated..
+ EXPECT_EQ(0,
+ memcmp(&rtp_packet[sizeof(kRtpMsgWithOneByteAbsSendTimeExtension)],
+ kFakeTag, 4));
+
+ // Verify AbsouluteSendTime extension field wasn't modified.
+ EXPECT_EQ(0, memcmp(&rtp_packet[kAstIndexInOneByteRtpMsg], kTestAstValue,
+ sizeof(kTestAstValue)));
+}
+
+// Veirfy HMAC is updated when packet option parameters are set.
+TEST(RtpUtilsTest, ApplyPacketOptionsWithAuthParams) {
+ rtc::PacketTimeUpdateParams packet_time_params;
+ packet_time_params.srtp_auth_key.assign(kTestKey,
+ kTestKey + sizeof(kTestKey));
+ packet_time_params.srtp_auth_tag_len = 4;
+
+ std::vector<uint8_t> rtp_packet(
+ kRtpMsgWithOneByteAbsSendTimeExtension,
+ kRtpMsgWithOneByteAbsSendTimeExtension +
+ sizeof(kRtpMsgWithOneByteAbsSendTimeExtension));
+ rtp_packet.insert(rtp_packet.end(), kFakeTag, kFakeTag + sizeof(kFakeTag));
+ EXPECT_TRUE(ApplyPacketOptions(&rtp_packet[0], rtp_packet.size(),
+ packet_time_params, 0));
+
+ uint8_t kExpectedTag[] = {0xc1, 0x7a, 0x8c, 0xa0};
+ EXPECT_EQ(0,
+ memcmp(&rtp_packet[sizeof(kRtpMsgWithOneByteAbsSendTimeExtension)],
+ kExpectedTag, sizeof(kExpectedTag)));
+
+ // Verify AbsouluteSendTime extension field is not modified.
+ EXPECT_EQ(0, memcmp(&rtp_packet[kAstIndexInOneByteRtpMsg], kTestAstValue,
+ sizeof(kTestAstValue)));
+}
+
+// Verify finding an extension ID in a raw rtp message.
+TEST(RtpUtilsTest, UpdateOneByteAbsSendTimeExtensionInRtpPacket) {
+ std::vector<uint8_t> rtp_packet(
+ kRtpMsgWithOneByteAbsSendTimeExtension,
+ kRtpMsgWithOneByteAbsSendTimeExtension +
+ sizeof(kRtpMsgWithOneByteAbsSendTimeExtension));
+
+ EXPECT_TRUE(UpdateRtpAbsSendTimeExtension(&rtp_packet[0], rtp_packet.size(),
+ 3, 51183266));
+
+ // Verify that the timestamp was updated.
+ const uint8_t kExpectedTimestamp[3] = {0xcc, 0xbb, 0xaa};
+ EXPECT_EQ(0, memcmp(&rtp_packet[kAstIndexInOneByteRtpMsg], kExpectedTimestamp,
+ sizeof(kExpectedTimestamp)));
+}
+
+// Verify finding an extension ID in a raw rtp message.
+TEST(RtpUtilsTest, UpdateTwoByteAbsSendTimeExtensionInRtpPacket) {
+ std::vector<uint8_t> rtp_packet(
+ kRtpMsgWithTwoByteAbsSendTimeExtension,
+ kRtpMsgWithTwoByteAbsSendTimeExtension +
+ sizeof(kRtpMsgWithTwoByteAbsSendTimeExtension));
+
+ EXPECT_TRUE(UpdateRtpAbsSendTimeExtension(&rtp_packet[0], rtp_packet.size(),
+ 3, 51183266));
+
+ // Verify that the timestamp was updated.
+ const uint8_t kExpectedTimestamp[3] = {0xcc, 0xbb, 0xaa};
+ EXPECT_EQ(0, memcmp(&rtp_packet[kAstIndexInTwoByteRtpMsg], kExpectedTimestamp,
+ sizeof(kExpectedTimestamp)));
+}
+
+// Verify we update both AbsSendTime extension header and HMAC.
+TEST(RtpUtilsTest, ApplyPacketOptionsWithAuthParamsAndAbsSendTime) {
+ rtc::PacketTimeUpdateParams packet_time_params;
+ packet_time_params.srtp_auth_key.assign(kTestKey,
+ kTestKey + sizeof(kTestKey));
+ packet_time_params.srtp_auth_tag_len = 4;
+ packet_time_params.rtp_sendtime_extension_id = 3;
+ // 3 is also present in the test message.
+
+ std::vector<uint8_t> rtp_packet(
+ kRtpMsgWithOneByteAbsSendTimeExtension,
+ kRtpMsgWithOneByteAbsSendTimeExtension +
+ sizeof(kRtpMsgWithOneByteAbsSendTimeExtension));
+ rtp_packet.insert(rtp_packet.end(), kFakeTag, kFakeTag + sizeof(kFakeTag));
+ EXPECT_TRUE(ApplyPacketOptions(&rtp_packet[0], rtp_packet.size(),
+ packet_time_params, 51183266));
+
+ const uint8_t kExpectedTag[] = {0x81, 0xd1, 0x2c, 0x0e};
+ EXPECT_EQ(0,
+ memcmp(&rtp_packet[sizeof(kRtpMsgWithOneByteAbsSendTimeExtension)],
+ kExpectedTag, sizeof(kExpectedTag)));
+
+ // Verify that the timestamp was updated.
+ const uint8_t kExpectedTimestamp[3] = {0xcc, 0xbb, 0xaa};
+ EXPECT_EQ(0, memcmp(&rtp_packet[kAstIndexInOneByteRtpMsg], kExpectedTimestamp,
+ sizeof(kExpectedTimestamp)));
+}
+
+TEST(RtpUtilsTest, InferRtpPacketType) {
+ EXPECT_EQ(RtpPacketType::kRtp, InferRtpPacketType(kPcmuFrameArrayView));
+ EXPECT_EQ(RtpPacketType::kRtcp, InferRtpPacketType(kRtcpReportArrayView));
+ EXPECT_EQ(RtpPacketType::kUnknown,
+ InferRtpPacketType(kInvalidPacketArrayView));
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/sdp_video_format_utils.cc b/third_party/libwebrtc/media/base/sdp_video_format_utils.cc
new file mode 100644
index 0000000000..a156afdc02
--- /dev/null
+++ b/third_party/libwebrtc/media/base/sdp_video_format_utils.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/sdp_video_format_utils.h"
+
+#include <cstring>
+#include <map>
+#include <utility>
+
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/string_to_number.h"
+
+namespace webrtc {
+namespace {
+const char kProfileLevelId[] = "profile-level-id";
+const char kH264LevelAsymmetryAllowed[] = "level-asymmetry-allowed";
+// Max frame rate for VP8 and VP9 video.
+const char kVPxFmtpMaxFrameRate[] = "max-fr";
+// Max frame size for VP8 and VP9 video.
+const char kVPxFmtpMaxFrameSize[] = "max-fs";
+const int kVPxFmtpFrameSizeSubBlockPixels = 256;
+
+bool IsH264LevelAsymmetryAllowed(const SdpVideoFormat::Parameters& params) {
+ const auto it = params.find(kH264LevelAsymmetryAllowed);
+ return it != params.end() && strcmp(it->second.c_str(), "1") == 0;
+}
+
+// Compare H264 levels and handle the level 1b case.
+bool H264LevelIsLess(H264Level a, H264Level b) {
+ if (a == H264Level::kLevel1_b)
+ return b != H264Level::kLevel1 && b != H264Level::kLevel1_b;
+ if (b == H264Level::kLevel1_b)
+ return a == H264Level::kLevel1;
+ return a < b;
+}
+
+H264Level H264LevelMin(H264Level a, H264Level b) {
+ return H264LevelIsLess(a, b) ? a : b;
+}
+
+absl::optional<int> ParsePositiveNumberFromParams(
+ const SdpVideoFormat::Parameters& params,
+ const char* parameter_name) {
+ const auto max_frame_rate_it = params.find(parameter_name);
+ if (max_frame_rate_it == params.end())
+ return absl::nullopt;
+
+ const absl::optional<int> i =
+ rtc::StringToNumber<int>(max_frame_rate_it->second);
+ if (!i.has_value() || i.value() <= 0)
+ return absl::nullopt;
+ return i;
+}
+
+} // namespace
+
+// Set level according to https://tools.ietf.org/html/rfc6184#section-8.2.2.
+void H264GenerateProfileLevelIdForAnswer(
+ const SdpVideoFormat::Parameters& local_supported_params,
+ const SdpVideoFormat::Parameters& remote_offered_params,
+ SdpVideoFormat::Parameters* answer_params) {
+ // If both local and remote haven't set profile-level-id, they are both using
+ // the default profile. In this case, don't set profile-level-id in answer
+ // either.
+ if (!local_supported_params.count(kProfileLevelId) &&
+ !remote_offered_params.count(kProfileLevelId)) {
+ return;
+ }
+
+ // Parse profile-level-ids.
+ const absl::optional<H264ProfileLevelId> local_profile_level_id =
+ ParseSdpForH264ProfileLevelId(local_supported_params);
+ const absl::optional<H264ProfileLevelId> remote_profile_level_id =
+ ParseSdpForH264ProfileLevelId(remote_offered_params);
+ // The local and remote codec must have valid and equal H264 Profiles.
+ RTC_DCHECK(local_profile_level_id);
+ RTC_DCHECK(remote_profile_level_id);
+ RTC_DCHECK_EQ(local_profile_level_id->profile,
+ remote_profile_level_id->profile);
+
+ // Parse level information.
+ const bool level_asymmetry_allowed =
+ IsH264LevelAsymmetryAllowed(local_supported_params) &&
+ IsH264LevelAsymmetryAllowed(remote_offered_params);
+ const H264Level local_level = local_profile_level_id->level;
+ const H264Level remote_level = remote_profile_level_id->level;
+ const H264Level min_level = H264LevelMin(local_level, remote_level);
+
+ // Determine answer level. When level asymmetry is not allowed, level upgrade
+ // is not allowed, i.e., the level in the answer must be equal to or lower
+ // than the level in the offer.
+ const H264Level answer_level =
+ level_asymmetry_allowed ? local_level : min_level;
+
+ // Set the resulting profile-level-id in the answer parameters.
+ (*answer_params)[kProfileLevelId] = *H264ProfileLevelIdToString(
+ H264ProfileLevelId(local_profile_level_id->profile, answer_level));
+}
+
+absl::optional<int> ParseSdpForVPxMaxFrameRate(
+ const SdpVideoFormat::Parameters& params) {
+ return ParsePositiveNumberFromParams(params, kVPxFmtpMaxFrameRate);
+}
+
+absl::optional<int> ParseSdpForVPxMaxFrameSize(
+ const SdpVideoFormat::Parameters& params) {
+ const absl::optional<int> i =
+ ParsePositiveNumberFromParams(params, kVPxFmtpMaxFrameSize);
+ return i ? absl::make_optional(i.value() * kVPxFmtpFrameSizeSubBlockPixels)
+ : absl::nullopt;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/base/sdp_video_format_utils.h b/third_party/libwebrtc/media/base/sdp_video_format_utils.h
new file mode 100644
index 0000000000..80c1e4d501
--- /dev/null
+++ b/third_party/libwebrtc/media/base/sdp_video_format_utils.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_
+#define MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/sdp_video_format.h"
+
+namespace webrtc {
+// Generate codec parameters that will be used as answer in an SDP negotiation
+// based on local supported parameters and remote offered parameters. Both
+// `local_supported_params`, `remote_offered_params`, and `answer_params`
+// represent sendrecv media descriptions, i.e they are a mix of both encode and
+// decode capabilities. In theory, when the profile in `local_supported_params`
+// represent a strict superset of the profile in `remote_offered_params`, we
+// could limit the profile in `answer_params` to the profile in
+// `remote_offered_params`. However, to simplify the code, each supported H264
+// profile should be listed explicitly in the list of local supported codecs,
+// even if they are redundant. Then each local codec in the list should be
+// tested one at a time against the remote codec, and only when the profiles are
+// equal should this function be called. Therefore, this function does not need
+// to handle profile intersection, and the profile of `local_supported_params`
+// and `remote_offered_params` must be equal before calling this function. The
+// parameters that are used when negotiating are the level part of
+// profile-level-id and level-asymmetry-allowed.
+void H264GenerateProfileLevelIdForAnswer(
+ const SdpVideoFormat::Parameters& local_supported_params,
+ const SdpVideoFormat::Parameters& remote_offered_params,
+ SdpVideoFormat::Parameters* answer_params);
+
+// Parse max frame rate from SDP FMTP line. absl::nullopt is returned if the
+// field is missing or not a number.
+absl::optional<int> ParseSdpForVPxMaxFrameRate(
+ const SdpVideoFormat::Parameters& params);
+
+// Parse max frame size from SDP FMTP line. absl::nullopt is returned if the
+// field is missing or not a number. Please note that the value is stored in sub
+// blocks but the returned value is in total number of pixels.
+absl::optional<int> ParseSdpForVPxMaxFrameSize(
+ const SdpVideoFormat::Parameters& params);
+
+} // namespace webrtc
+
+#endif // MEDIA_BASE_SDP_VIDEO_FORMAT_UTILS_H_
diff --git a/third_party/libwebrtc/media/base/sdp_video_format_utils_unittest.cc b/third_party/libwebrtc/media/base/sdp_video_format_utils_unittest.cc
new file mode 100644
index 0000000000..d8ef9ab827
--- /dev/null
+++ b/third_party/libwebrtc/media/base/sdp_video_format_utils_unittest.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/sdp_video_format_utils.h"
+
+#include <string.h>
+
+#include <map>
+#include <utility>
+
+#include "rtc_base/string_to_number.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+// Max frame rate for VP8 and VP9 video.
+const char kVPxFmtpMaxFrameRate[] = "max-fr";
+// Max frame size for VP8 and VP9 video.
+const char kVPxFmtpMaxFrameSize[] = "max-fs";
+} // namespace
+
+TEST(SdpVideoFormatUtilsTest, TestH264GenerateProfileLevelIdForAnswerEmpty) {
+ SdpVideoFormat::Parameters answer_params;
+ H264GenerateProfileLevelIdForAnswer(SdpVideoFormat::Parameters(),
+ SdpVideoFormat::Parameters(),
+ &answer_params);
+ EXPECT_TRUE(answer_params.empty());
+}
+
+TEST(SdpVideoFormatUtilsTest,
+ TestH264GenerateProfileLevelIdForAnswerLevelSymmetryCapped) {
+ SdpVideoFormat::Parameters low_level;
+ low_level["profile-level-id"] = "42e015";
+ SdpVideoFormat::Parameters high_level;
+ high_level["profile-level-id"] = "42e01f";
+
+ // Level asymmetry is not allowed; test that answer level is the lower of the
+ // local and remote levels.
+ SdpVideoFormat::Parameters answer_params;
+ H264GenerateProfileLevelIdForAnswer(low_level /* local_supported */,
+ high_level /* remote_offered */,
+ &answer_params);
+ EXPECT_EQ("42e015", answer_params["profile-level-id"]);
+
+ SdpVideoFormat::Parameters answer_params2;
+ H264GenerateProfileLevelIdForAnswer(high_level /* local_supported */,
+ low_level /* remote_offered */,
+ &answer_params2);
+ EXPECT_EQ("42e015", answer_params2["profile-level-id"]);
+}
+
+TEST(SdpVideoFormatUtilsTest,
+ TestH264GenerateProfileLevelIdForAnswerConstrainedBaselineLevelAsymmetry) {
+ SdpVideoFormat::Parameters local_params;
+ local_params["profile-level-id"] = "42e01f";
+ local_params["level-asymmetry-allowed"] = "1";
+ SdpVideoFormat::Parameters remote_params;
+ remote_params["profile-level-id"] = "42e015";
+ remote_params["level-asymmetry-allowed"] = "1";
+ SdpVideoFormat::Parameters answer_params;
+ H264GenerateProfileLevelIdForAnswer(local_params, remote_params,
+ &answer_params);
+ // When level asymmetry is allowed, we can answer a higher level than what was
+ // offered.
+ EXPECT_EQ("42e01f", answer_params["profile-level-id"]);
+}
+
+TEST(SdpVideoFormatUtilsTest, MaxFrameRateIsMissingOrInvalid) {
+ SdpVideoFormat::Parameters params;
+ absl::optional<int> empty = ParseSdpForVPxMaxFrameRate(params);
+ EXPECT_FALSE(empty);
+ params[kVPxFmtpMaxFrameRate] = "-1";
+ EXPECT_FALSE(ParseSdpForVPxMaxFrameRate(params));
+ params[kVPxFmtpMaxFrameRate] = "0";
+ EXPECT_FALSE(ParseSdpForVPxMaxFrameRate(params));
+ params[kVPxFmtpMaxFrameRate] = "abcde";
+ EXPECT_FALSE(ParseSdpForVPxMaxFrameRate(params));
+}
+
+TEST(SdpVideoFormatUtilsTest, MaxFrameRateIsSpecified) {
+ SdpVideoFormat::Parameters params;
+ params[kVPxFmtpMaxFrameRate] = "30";
+ EXPECT_EQ(ParseSdpForVPxMaxFrameRate(params), 30);
+ params[kVPxFmtpMaxFrameRate] = "60";
+ EXPECT_EQ(ParseSdpForVPxMaxFrameRate(params), 60);
+}
+
+TEST(SdpVideoFormatUtilsTest, MaxFrameSizeIsMissingOrInvalid) {
+ SdpVideoFormat::Parameters params;
+ absl::optional<int> empty = ParseSdpForVPxMaxFrameSize(params);
+ EXPECT_FALSE(empty);
+ params[kVPxFmtpMaxFrameSize] = "-1";
+ EXPECT_FALSE(ParseSdpForVPxMaxFrameSize(params));
+ params[kVPxFmtpMaxFrameSize] = "0";
+ EXPECT_FALSE(ParseSdpForVPxMaxFrameSize(params));
+ params[kVPxFmtpMaxFrameSize] = "abcde";
+ EXPECT_FALSE(ParseSdpForVPxMaxFrameSize(params));
+}
+
+TEST(SdpVideoFormatUtilsTest, MaxFrameSizeIsSpecified) {
+ SdpVideoFormat::Parameters params;
+ params[kVPxFmtpMaxFrameSize] = "8100"; // 1920 x 1080 / (16^2)
+ EXPECT_EQ(ParseSdpForVPxMaxFrameSize(params), 1920 * 1080);
+ params[kVPxFmtpMaxFrameSize] = "32400"; // 3840 x 2160 / (16^2)
+ EXPECT_EQ(ParseSdpForVPxMaxFrameSize(params), 3840 * 2160);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/base/stream_params.cc b/third_party/libwebrtc/media/base/stream_params.cc
new file mode 100644
index 0000000000..ac9daee200
--- /dev/null
+++ b/third_party/libwebrtc/media/base/stream_params.cc
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/stream_params.h"
+
+#include <stdint.h>
+
+#include <list>
+
+#include "absl/algorithm/container.h"
+#include "api/array_view.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace cricket {
+namespace {
+
+void AppendSsrcs(rtc::ArrayView<const uint32_t> ssrcs,
+ rtc::SimpleStringBuilder* sb) {
+ *sb << "ssrcs:[";
+ const char* delimiter = "";
+ for (uint32_t ssrc : ssrcs) {
+ *sb << delimiter << ssrc;
+ delimiter = ",";
+ }
+ *sb << "]";
+}
+
+void AppendSsrcGroups(rtc::ArrayView<const SsrcGroup> ssrc_groups,
+ rtc::SimpleStringBuilder* sb) {
+ *sb << "ssrc_groups:";
+ const char* delimiter = "";
+ for (const SsrcGroup& ssrc_group : ssrc_groups) {
+ *sb << delimiter << ssrc_group.ToString();
+ delimiter = ",";
+ }
+}
+
+void AppendStreamIds(rtc::ArrayView<const std::string> stream_ids,
+ rtc::SimpleStringBuilder* sb) {
+ *sb << "stream_ids:";
+ const char* delimiter = "";
+ for (const std::string& stream_id : stream_ids) {
+ *sb << delimiter << stream_id;
+ delimiter = ",";
+ }
+}
+
+void AppendRids(rtc::ArrayView<const RidDescription> rids,
+ rtc::SimpleStringBuilder* sb) {
+ *sb << "rids:[";
+ const char* delimiter = "";
+ for (const RidDescription& rid : rids) {
+ *sb << delimiter << rid.rid;
+ delimiter = ",";
+ }
+ *sb << "]";
+}
+
+} // namespace
+
+const char kFecSsrcGroupSemantics[] = "FEC";
+const char kFecFrSsrcGroupSemantics[] = "FEC-FR";
+const char kFidSsrcGroupSemantics[] = "FID";
+const char kSimSsrcGroupSemantics[] = "SIM";
+
+bool GetStream(const StreamParamsVec& streams,
+ const StreamSelector& selector,
+ StreamParams* stream_out) {
+ const StreamParams* found = GetStream(streams, selector);
+ if (found && stream_out)
+ *stream_out = *found;
+ return found != nullptr;
+}
+
+SsrcGroup::SsrcGroup(const std::string& usage,
+ const std::vector<uint32_t>& ssrcs)
+ : semantics(usage), ssrcs(ssrcs) {}
+SsrcGroup::SsrcGroup(const SsrcGroup&) = default;
+SsrcGroup::SsrcGroup(SsrcGroup&&) = default;
+SsrcGroup::~SsrcGroup() = default;
+
+SsrcGroup& SsrcGroup::operator=(const SsrcGroup&) = default;
+SsrcGroup& SsrcGroup::operator=(SsrcGroup&&) = default;
+
+bool SsrcGroup::has_semantics(const std::string& semantics_in) const {
+ return (semantics == semantics_in && ssrcs.size() > 0);
+}
+
+std::string SsrcGroup::ToString() const {
+ char buf[1024];
+ rtc::SimpleStringBuilder sb(buf);
+ sb << "{";
+ sb << "semantics:" << semantics << ";";
+ AppendSsrcs(ssrcs, &sb);
+ sb << "}";
+ return sb.str();
+}
+
+StreamParams::StreamParams() = default;
+StreamParams::StreamParams(const StreamParams&) = default;
+StreamParams::StreamParams(StreamParams&&) = default;
+StreamParams::~StreamParams() = default;
+StreamParams& StreamParams::operator=(const StreamParams&) = default;
+StreamParams& StreamParams::operator=(StreamParams&&) = default;
+
+bool StreamParams::operator==(const StreamParams& other) const {
+ return (id == other.id && ssrcs == other.ssrcs &&
+ ssrc_groups == other.ssrc_groups && cname == other.cname &&
+ stream_ids_ == other.stream_ids_ &&
+ // RIDs are not required to be in the same order for equality.
+ absl::c_is_permutation(rids_, other.rids_));
+}
+
+std::string StreamParams::ToString() const {
+ char buf[2 * 1024];
+ rtc::SimpleStringBuilder sb(buf);
+ sb << "{";
+ if (!id.empty()) {
+ sb << "id:" << id << ";";
+ }
+ AppendSsrcs(ssrcs, &sb);
+ sb << ";";
+ AppendSsrcGroups(ssrc_groups, &sb);
+ sb << ";";
+ if (!cname.empty()) {
+ sb << "cname:" << cname << ";";
+ }
+ AppendStreamIds(stream_ids_, &sb);
+ sb << ";";
+ if (!rids_.empty()) {
+ AppendRids(rids_, &sb);
+ sb << ";";
+ }
+ sb << "}";
+ return sb.str();
+}
+
+void StreamParams::GenerateSsrcs(int num_layers,
+ bool generate_fid,
+ bool generate_fec_fr,
+ rtc::UniqueRandomIdGenerator* ssrc_generator) {
+ RTC_DCHECK_GE(num_layers, 0);
+ RTC_DCHECK(ssrc_generator);
+ std::vector<uint32_t> primary_ssrcs;
+ for (int i = 0; i < num_layers; ++i) {
+ uint32_t ssrc = ssrc_generator->GenerateId();
+ primary_ssrcs.push_back(ssrc);
+ add_ssrc(ssrc);
+ }
+
+ if (num_layers > 1) {
+ SsrcGroup simulcast(kSimSsrcGroupSemantics, primary_ssrcs);
+ ssrc_groups.push_back(simulcast);
+ }
+
+ if (generate_fid) {
+ for (uint32_t ssrc : primary_ssrcs) {
+ AddFidSsrc(ssrc, ssrc_generator->GenerateId());
+ }
+ }
+
+ if (generate_fec_fr) {
+ for (uint32_t ssrc : primary_ssrcs) {
+ AddFecFrSsrc(ssrc, ssrc_generator->GenerateId());
+ }
+ }
+}
+
+void StreamParams::GetPrimarySsrcs(std::vector<uint32_t>* ssrcs) const {
+ const SsrcGroup* sim_group = get_ssrc_group(kSimSsrcGroupSemantics);
+ if (sim_group == NULL) {
+ ssrcs->push_back(first_ssrc());
+ } else {
+ ssrcs->insert(ssrcs->end(), sim_group->ssrcs.begin(),
+ sim_group->ssrcs.end());
+ }
+}
+
+void StreamParams::GetSecondarySsrcs(
+ const std::string& semantics,
+ const std::vector<uint32_t>& primary_ssrcs,
+ std::vector<uint32_t>* secondary_ssrcs) const {
+ for (uint32_t primary_ssrc : primary_ssrcs) {
+ uint32_t secondary_ssrc;
+ if (GetSecondarySsrc(semantics, primary_ssrc, &secondary_ssrc)) {
+ secondary_ssrcs->push_back(secondary_ssrc);
+ }
+ }
+}
+
+void StreamParams::GetFidSsrcs(const std::vector<uint32_t>& primary_ssrcs,
+ std::vector<uint32_t>* fid_ssrcs) const {
+ return GetSecondarySsrcs(kFidSsrcGroupSemantics, primary_ssrcs, fid_ssrcs);
+}
+
+bool StreamParams::AddSecondarySsrc(const std::string& semantics,
+ uint32_t primary_ssrc,
+ uint32_t secondary_ssrc) {
+ if (!has_ssrc(primary_ssrc)) {
+ return false;
+ }
+
+ ssrcs.push_back(secondary_ssrc);
+ ssrc_groups.push_back(SsrcGroup(semantics, {primary_ssrc, secondary_ssrc}));
+ return true;
+}
+
+bool StreamParams::GetSecondarySsrc(const std::string& semantics,
+ uint32_t primary_ssrc,
+ uint32_t* secondary_ssrc) const {
+ for (const SsrcGroup& ssrc_group : ssrc_groups) {
+ if (ssrc_group.has_semantics(semantics) && ssrc_group.ssrcs.size() >= 2 &&
+ ssrc_group.ssrcs[0] == primary_ssrc) {
+ *secondary_ssrc = ssrc_group.ssrcs[1];
+ return true;
+ }
+ }
+ return false;
+}
+
+std::vector<std::string> StreamParams::stream_ids() const {
+ return stream_ids_;
+}
+
+void StreamParams::set_stream_ids(const std::vector<std::string>& stream_ids) {
+ stream_ids_ = stream_ids;
+}
+
+std::string StreamParams::first_stream_id() const {
+ return stream_ids_.empty() ? "" : stream_ids_[0];
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/stream_params.h b/third_party/libwebrtc/media/base/stream_params.h
new file mode 100644
index 0000000000..89fc1554cc
--- /dev/null
+++ b/third_party/libwebrtc/media/base/stream_params.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains structures for describing SSRCs from a media source such
+// as a MediaStreamTrack when it is sent across an RTP session. Multiple media
+// sources may be sent across the same RTP session, each of them will be
+// described by one StreamParams object
+// SsrcGroup is used to describe the relationship between the SSRCs that
+// are used for this media source.
+// E.x: Consider a source that is sent as 3 simulcast streams
+// Let the simulcast elements have SSRC 10, 20, 30.
+// Let each simulcast element use FEC and let the protection packets have
+// SSRC 11,21,31.
+// To describe this 4 SsrcGroups are needed,
+// StreamParams would then contain ssrc = {10,11,20,21,30,31} and
+// ssrc_groups = {{SIM,{10,20,30}, {FEC,{10,11}, {FEC, {20,21}, {FEC {30,31}}}
+// Please see RFC 5576.
+// A spec-compliant way to achieve this is to use RIDs and Simulcast attribute
+// instead of the ssrc-group. In this method, the StreamParam object will
+// have multiple RidDescriptions, each corresponding to a simulcast layer
+// and the media section will have a simulcast attribute that indicates
+// that these layers are for the same source. This also removes the extra
+// lines for redundancy streams, as the same RIDs appear in the redundancy
+// packets.
+// Note: in the spec compliant simulcast scenario, some of the RIDs might be
+// alternatives for one another (such as different encodings for same data).
+// In the context of the StreamParams class, the notion of alternatives does
+// not exist and all the RIDs will describe different layers of the same source.
+// When the StreamParams class is used to configure the media engine, simulcast
+// considerations will be used to remove the alternative layers outside of this
+// class.
+// As an example, let the simulcast layers have RID 10, 20, 30.
+// StreamParams would contain rid = { 10, 20, 30 }.
+// MediaSection would contain SimulcastDescription specifying these rids.
+// a=simulcast:send 10;20;30 (or a=simulcast:send 10,20;30 or similar).
+// See https://tools.ietf.org/html/draft-ietf-mmusic-sdp-simulcast-13
+// and https://tools.ietf.org/html/draft-ietf-mmusic-rid-15.
+
+#ifndef MEDIA_BASE_STREAM_PARAMS_H_
+#define MEDIA_BASE_STREAM_PARAMS_H_
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "media/base/rid_description.h"
+#include "rtc_base/unique_id_generator.h"
+
+namespace cricket {
+
+extern const char kFecSsrcGroupSemantics[];
+extern const char kFecFrSsrcGroupSemantics[];
+extern const char kFidSsrcGroupSemantics[];
+extern const char kSimSsrcGroupSemantics[];
+
+struct SsrcGroup {
+ SsrcGroup(const std::string& usage, const std::vector<uint32_t>& ssrcs);
+ SsrcGroup(const SsrcGroup&);
+ SsrcGroup(SsrcGroup&&);
+ ~SsrcGroup();
+ SsrcGroup& operator=(const SsrcGroup&);
+ SsrcGroup& operator=(SsrcGroup&&);
+
+ bool operator==(const SsrcGroup& other) const {
+ return (semantics == other.semantics && ssrcs == other.ssrcs);
+ }
+ bool operator!=(const SsrcGroup& other) const { return !(*this == other); }
+
+ bool has_semantics(const std::string& semantics) const;
+
+ std::string ToString() const;
+
+ std::string semantics; // e.g FIX, FEC, SIM.
+ std::vector<uint32_t> ssrcs; // SSRCs of this type.
+};
+
+// StreamParams is used to represent a sender/track in a SessionDescription.
+// In Plan B, this means that multiple StreamParams can exist within one
+// MediaContentDescription, while in UnifiedPlan this means that there is one
+// StreamParams per MediaContentDescription.
+struct StreamParams {
+ StreamParams();
+ StreamParams(const StreamParams&);
+ StreamParams(StreamParams&&);
+ ~StreamParams();
+ StreamParams& operator=(const StreamParams&);
+ StreamParams& operator=(StreamParams&&);
+
+ static StreamParams CreateLegacy(uint32_t ssrc) {
+ StreamParams stream;
+ stream.ssrcs.push_back(ssrc);
+ return stream;
+ }
+
+ bool operator==(const StreamParams& other) const;
+ bool operator!=(const StreamParams& other) const { return !(*this == other); }
+
+ uint32_t first_ssrc() const {
+ if (ssrcs.empty()) {
+ return 0;
+ }
+
+ return ssrcs[0];
+ }
+ bool has_ssrcs() const { return !ssrcs.empty(); }
+ bool has_ssrc(uint32_t ssrc) const {
+ return absl::c_linear_search(ssrcs, ssrc);
+ }
+ void add_ssrc(uint32_t ssrc) { ssrcs.push_back(ssrc); }
+ bool has_ssrc_groups() const { return !ssrc_groups.empty(); }
+ bool has_ssrc_group(const std::string& semantics) const {
+ return (get_ssrc_group(semantics) != NULL);
+ }
+ const SsrcGroup* get_ssrc_group(const std::string& semantics) const {
+ for (const SsrcGroup& ssrc_group : ssrc_groups) {
+ if (ssrc_group.has_semantics(semantics)) {
+ return &ssrc_group;
+ }
+ }
+ return NULL;
+ }
+
+ // Convenience function to add an FID ssrc for a primary_ssrc
+ // that's already been added.
+ bool AddFidSsrc(uint32_t primary_ssrc, uint32_t fid_ssrc) {
+ return AddSecondarySsrc(kFidSsrcGroupSemantics, primary_ssrc, fid_ssrc);
+ }
+
+ // Convenience function to lookup the FID ssrc for a primary_ssrc.
+ // Returns false if primary_ssrc not found or FID not defined for it.
+ bool GetFidSsrc(uint32_t primary_ssrc, uint32_t* fid_ssrc) const {
+ return GetSecondarySsrc(kFidSsrcGroupSemantics, primary_ssrc, fid_ssrc);
+ }
+
+ // Convenience function to add an FEC-FR ssrc for a primary_ssrc
+ // that's already been added.
+ bool AddFecFrSsrc(uint32_t primary_ssrc, uint32_t fecfr_ssrc) {
+ return AddSecondarySsrc(kFecFrSsrcGroupSemantics, primary_ssrc, fecfr_ssrc);
+ }
+
+ // Convenience function to lookup the FEC-FR ssrc for a primary_ssrc.
+ // Returns false if primary_ssrc not found or FEC-FR not defined for it.
+ bool GetFecFrSsrc(uint32_t primary_ssrc, uint32_t* fecfr_ssrc) const {
+ return GetSecondarySsrc(kFecFrSsrcGroupSemantics, primary_ssrc, fecfr_ssrc);
+ }
+
+ // Convenience function to populate the StreamParams with the requested number
+ // of SSRCs along with accompanying FID and FEC-FR ssrcs if requested.
+ // SSRCs are generated using the given generator.
+ void GenerateSsrcs(int num_layers,
+ bool generate_fid,
+ bool generate_fec_fr,
+ rtc::UniqueRandomIdGenerator* ssrc_generator);
+
+ // Convenience to get all the SIM SSRCs if there are SIM ssrcs, or
+ // the first SSRC otherwise.
+ void GetPrimarySsrcs(std::vector<uint32_t>* ssrcs) const;
+
+ // Convenience to get all the secondary SSRCs for the given primary ssrcs
+ // of a particular semantic.
+ // If a given primary SSRC does not have a secondary SSRC, the list of
+ // secondary SSRCS will be smaller than the list of primary SSRCs.
+ void GetSecondarySsrcs(const std::string& semantic,
+ const std::vector<uint32_t>& primary_ssrcs,
+ std::vector<uint32_t>* fid_ssrcs) const;
+
+ // Convenience to get all the FID SSRCs for the given primary ssrcs.
+ // If a given primary SSRC does not have a FID SSRC, the list of FID
+ // SSRCS will be smaller than the list of primary SSRCs.
+ void GetFidSsrcs(const std::vector<uint32_t>& primary_ssrcs,
+ std::vector<uint32_t>* fid_ssrcs) const;
+
+ // Stream ids serialized to SDP.
+ std::vector<std::string> stream_ids() const;
+ void set_stream_ids(const std::vector<std::string>& stream_ids);
+
+ // Returns the first stream id or "" if none exist. This method exists only
+ // as temporary backwards compatibility with the old sync_label.
+ std::string first_stream_id() const;
+
+ std::string ToString() const;
+
+ // A unique identifier of the StreamParams object. When the SDP is created,
+ // this comes from the track ID of the sender that the StreamParams object
+ // is associated with.
+ std::string id;
+ // There may be no SSRCs stored in unsignaled case when stream_ids are
+ // signaled with a=msid lines.
+ std::vector<uint32_t> ssrcs; // All SSRCs for this source
+ std::vector<SsrcGroup> ssrc_groups; // e.g. FID, FEC, SIM
+ std::string cname; // RTCP CNAME
+
+ // RID functionality according to
+ // https://tools.ietf.org/html/draft-ietf-mmusic-rid-15
+ // Each layer can be represented by a RID identifier and can also have
+ // restrictions (such as max-width, max-height, etc.)
+ // If the track has multiple layers (ex. Simulcast), each layer will be
+ // represented by a RID.
+ bool has_rids() const { return !rids_.empty(); }
+ const std::vector<RidDescription>& rids() const { return rids_; }
+ void set_rids(const std::vector<RidDescription>& rids) { rids_ = rids; }
+
+ private:
+ bool AddSecondarySsrc(const std::string& semantics,
+ uint32_t primary_ssrc,
+ uint32_t secondary_ssrc);
+ bool GetSecondarySsrc(const std::string& semantics,
+ uint32_t primary_ssrc,
+ uint32_t* secondary_ssrc) const;
+
+ // The stream IDs of the sender that the StreamParams object is associated
+ // with. In Plan B this should always be size of 1, while in Unified Plan this
+ // could be none or multiple stream IDs.
+ std::vector<std::string> stream_ids_;
+
+ std::vector<RidDescription> rids_;
+};
+
+// A Stream can be selected by either id or ssrc.
+struct StreamSelector {
+ explicit StreamSelector(uint32_t ssrc) : ssrc(ssrc) {}
+
+ explicit StreamSelector(const std::string& streamid)
+ : ssrc(0), streamid(streamid) {}
+
+ bool Matches(const StreamParams& stream) const {
+ if (ssrc == 0) {
+ return stream.id == streamid;
+ } else {
+ return stream.has_ssrc(ssrc);
+ }
+ }
+
+ uint32_t ssrc;
+ std::string streamid;
+};
+
+typedef std::vector<StreamParams> StreamParamsVec;
+
+template <class Condition>
+const StreamParams* GetStream(const StreamParamsVec& streams,
+ Condition condition) {
+ auto found = absl::c_find_if(streams, condition);
+ return found == streams.end() ? nullptr : &(*found);
+}
+
+template <class Condition>
+StreamParams* GetStream(StreamParamsVec& streams, Condition condition) {
+ auto found = absl::c_find_if(streams, condition);
+ return found == streams.end() ? nullptr : &(*found);
+}
+
+inline bool HasStreamWithNoSsrcs(const StreamParamsVec& streams) {
+ return GetStream(streams,
+ [](const StreamParams& sp) { return !sp.has_ssrcs(); });
+}
+
+inline const StreamParams* GetStreamBySsrc(const StreamParamsVec& streams,
+ uint32_t ssrc) {
+ return GetStream(
+ streams, [&ssrc](const StreamParams& sp) { return sp.has_ssrc(ssrc); });
+}
+
+inline const StreamParams* GetStreamByIds(const StreamParamsVec& streams,
+ const std::string& id) {
+ return GetStream(streams,
+ [&id](const StreamParams& sp) { return sp.id == id; });
+}
+
+inline StreamParams* GetStreamByIds(StreamParamsVec& streams,
+ const std::string& id) {
+ return GetStream(streams,
+ [&id](const StreamParams& sp) { return sp.id == id; });
+}
+
+inline const StreamParams* GetStream(const StreamParamsVec& streams,
+ const StreamSelector& selector) {
+ return GetStream(streams, [&selector](const StreamParams& sp) {
+ return selector.Matches(sp);
+ });
+}
+
+template <class Condition>
+bool RemoveStream(StreamParamsVec* streams, Condition condition) {
+ auto iter(std::remove_if(streams->begin(), streams->end(), condition));
+ if (iter == streams->end())
+ return false;
+ streams->erase(iter, streams->end());
+ return true;
+}
+
+// Removes the stream from streams. Returns true if a stream is
+// found and removed.
+inline bool RemoveStream(StreamParamsVec* streams,
+ const StreamSelector& selector) {
+ return RemoveStream(streams, [&selector](const StreamParams& sp) {
+ return selector.Matches(sp);
+ });
+}
+inline bool RemoveStreamBySsrc(StreamParamsVec* streams, uint32_t ssrc) {
+ return RemoveStream(
+ streams, [&ssrc](const StreamParams& sp) { return sp.has_ssrc(ssrc); });
+}
+inline bool RemoveStreamByIds(StreamParamsVec* streams, const std::string& id) {
+ return RemoveStream(streams,
+ [&id](const StreamParams& sp) { return sp.id == id; });
+}
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_STREAM_PARAMS_H_
diff --git a/third_party/libwebrtc/media/base/stream_params_unittest.cc b/third_party/libwebrtc/media/base/stream_params_unittest.cc
new file mode 100644
index 0000000000..7adf0f517d
--- /dev/null
+++ b/third_party/libwebrtc/media/base/stream_params_unittest.cc
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/stream_params.h"
+
+#include <stdint.h>
+
+#include "media/base/test_utils.h"
+#include "rtc_base/arraysize.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Each;
+using ::testing::Ne;
+
+static const uint32_t kSsrcs1[] = {1};
+static const uint32_t kSsrcs2[] = {1, 2};
+
+static cricket::StreamParams CreateStreamParamsWithSsrcGroup(
+ const std::string& semantics,
+ const uint32_t ssrcs_in[],
+ size_t len) {
+ cricket::StreamParams stream;
+ std::vector<uint32_t> ssrcs(ssrcs_in, ssrcs_in + len);
+ cricket::SsrcGroup sg(semantics, ssrcs);
+ stream.ssrcs = ssrcs;
+ stream.ssrc_groups.push_back(sg);
+ return stream;
+}
+
+TEST(SsrcGroup, EqualNotEqual) {
+ cricket::SsrcGroup ssrc_groups[] = {
+ cricket::SsrcGroup("ABC", MAKE_VECTOR(kSsrcs1)),
+ cricket::SsrcGroup("ABC", MAKE_VECTOR(kSsrcs2)),
+ cricket::SsrcGroup("Abc", MAKE_VECTOR(kSsrcs2)),
+ cricket::SsrcGroup("abc", MAKE_VECTOR(kSsrcs2)),
+ };
+
+ for (size_t i = 0; i < arraysize(ssrc_groups); ++i) {
+ for (size_t j = 0; j < arraysize(ssrc_groups); ++j) {
+ EXPECT_EQ((ssrc_groups[i] == ssrc_groups[j]), (i == j));
+ EXPECT_EQ((ssrc_groups[i] != ssrc_groups[j]), (i != j));
+ }
+ }
+}
+
+TEST(SsrcGroup, HasSemantics) {
+ cricket::SsrcGroup sg1("ABC", MAKE_VECTOR(kSsrcs1));
+ EXPECT_TRUE(sg1.has_semantics("ABC"));
+
+ cricket::SsrcGroup sg2("Abc", MAKE_VECTOR(kSsrcs1));
+ EXPECT_FALSE(sg2.has_semantics("ABC"));
+
+ cricket::SsrcGroup sg3("abc", MAKE_VECTOR(kSsrcs1));
+ EXPECT_FALSE(sg3.has_semantics("ABC"));
+}
+
+TEST(SsrcGroup, ToString) {
+ cricket::SsrcGroup sg1("ABC", MAKE_VECTOR(kSsrcs1));
+ EXPECT_STREQ("{semantics:ABC;ssrcs:[1]}", sg1.ToString().c_str());
+}
+
+TEST(StreamParams, CreateLegacy) {
+ const uint32_t ssrc = 7;
+ cricket::StreamParams one_sp = cricket::StreamParams::CreateLegacy(ssrc);
+ EXPECT_EQ(1U, one_sp.ssrcs.size());
+ EXPECT_EQ(ssrc, one_sp.first_ssrc());
+ EXPECT_TRUE(one_sp.has_ssrcs());
+ EXPECT_TRUE(one_sp.has_ssrc(ssrc));
+ EXPECT_FALSE(one_sp.has_ssrc(ssrc + 1));
+ EXPECT_FALSE(one_sp.has_ssrc_groups());
+ EXPECT_EQ(0U, one_sp.ssrc_groups.size());
+}
+
+TEST(StreamParams, HasSsrcGroup) {
+ cricket::StreamParams sp =
+ CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, arraysize(kSsrcs2));
+ EXPECT_EQ(2U, sp.ssrcs.size());
+ EXPECT_EQ(kSsrcs2[0], sp.first_ssrc());
+ EXPECT_TRUE(sp.has_ssrcs());
+ EXPECT_TRUE(sp.has_ssrc(kSsrcs2[0]));
+ EXPECT_TRUE(sp.has_ssrc(kSsrcs2[1]));
+ EXPECT_TRUE(sp.has_ssrc_group("XYZ"));
+ EXPECT_EQ(1U, sp.ssrc_groups.size());
+ EXPECT_EQ(2U, sp.ssrc_groups[0].ssrcs.size());
+ EXPECT_EQ(kSsrcs2[0], sp.ssrc_groups[0].ssrcs[0]);
+ EXPECT_EQ(kSsrcs2[1], sp.ssrc_groups[0].ssrcs[1]);
+}
+
+TEST(StreamParams, GetSsrcGroup) {
+ cricket::StreamParams sp =
+ CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, arraysize(kSsrcs2));
+ EXPECT_EQ(NULL, sp.get_ssrc_group("xyz"));
+ EXPECT_EQ(&sp.ssrc_groups[0], sp.get_ssrc_group("XYZ"));
+}
+
+TEST(StreamParams, HasStreamWithNoSsrcs) {
+ cricket::StreamParams sp_1 = cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ cricket::StreamParams sp_2 = cricket::StreamParams::CreateLegacy(kSsrcs2[0]);
+ std::vector<cricket::StreamParams> streams({sp_1, sp_2});
+ EXPECT_FALSE(HasStreamWithNoSsrcs(streams));
+
+ cricket::StreamParams unsignaled_stream;
+ streams.push_back(unsignaled_stream);
+ EXPECT_TRUE(HasStreamWithNoSsrcs(streams));
+}
+
+TEST(StreamParams, EqualNotEqual) {
+ cricket::StreamParams l1 = cricket::StreamParams::CreateLegacy(1);
+ cricket::StreamParams l2 = cricket::StreamParams::CreateLegacy(2);
+ cricket::StreamParams sg1 =
+ CreateStreamParamsWithSsrcGroup("ABC", kSsrcs1, arraysize(kSsrcs1));
+ cricket::StreamParams sg2 =
+ CreateStreamParamsWithSsrcGroup("ABC", kSsrcs2, arraysize(kSsrcs2));
+ cricket::StreamParams sg3 =
+ CreateStreamParamsWithSsrcGroup("Abc", kSsrcs2, arraysize(kSsrcs2));
+ cricket::StreamParams sg4 =
+ CreateStreamParamsWithSsrcGroup("abc", kSsrcs2, arraysize(kSsrcs2));
+ cricket::StreamParams sps[] = {l1, l2, sg1, sg2, sg3, sg4};
+
+ for (size_t i = 0; i < arraysize(sps); ++i) {
+ for (size_t j = 0; j < arraysize(sps); ++j) {
+ EXPECT_EQ((sps[i] == sps[j]), (i == j));
+ EXPECT_EQ((sps[i] != sps[j]), (i != j));
+ }
+ }
+}
+
+TEST(StreamParams, FidFunctions) {
+ uint32_t fid_ssrc;
+
+ cricket::StreamParams sp = cricket::StreamParams::CreateLegacy(1);
+ EXPECT_FALSE(sp.AddFidSsrc(10, 20));
+ EXPECT_TRUE(sp.AddFidSsrc(1, 2));
+ EXPECT_TRUE(sp.GetFidSsrc(1, &fid_ssrc));
+ EXPECT_EQ(2u, fid_ssrc);
+ EXPECT_FALSE(sp.GetFidSsrc(15, &fid_ssrc));
+
+ sp.add_ssrc(20);
+ EXPECT_TRUE(sp.AddFidSsrc(20, 30));
+ EXPECT_TRUE(sp.GetFidSsrc(20, &fid_ssrc));
+ EXPECT_EQ(30u, fid_ssrc);
+
+ // Manually create SsrcGroup to test bounds-checking
+ // in GetSecondarySsrc. We construct an invalid StreamParams
+ // for this.
+ std::vector<uint32_t> fid_vector;
+ fid_vector.push_back(13);
+ cricket::SsrcGroup invalid_fid_group(cricket::kFidSsrcGroupSemantics,
+ fid_vector);
+ cricket::StreamParams sp_invalid;
+ sp_invalid.add_ssrc(13);
+ sp_invalid.ssrc_groups.push_back(invalid_fid_group);
+ EXPECT_FALSE(sp_invalid.GetFidSsrc(13, &fid_ssrc));
+}
+
+TEST(StreamParams, GetPrimaryAndFidSsrcs) {
+ cricket::StreamParams sp;
+ sp.ssrcs.push_back(1);
+ sp.ssrcs.push_back(2);
+ sp.ssrcs.push_back(3);
+
+ std::vector<uint32_t> primary_ssrcs;
+ sp.GetPrimarySsrcs(&primary_ssrcs);
+ std::vector<uint32_t> fid_ssrcs;
+ sp.GetFidSsrcs(primary_ssrcs, &fid_ssrcs);
+ ASSERT_EQ(1u, primary_ssrcs.size());
+ EXPECT_EQ(1u, primary_ssrcs[0]);
+ ASSERT_EQ(0u, fid_ssrcs.size());
+
+ sp.ssrc_groups.push_back(
+ cricket::SsrcGroup(cricket::kSimSsrcGroupSemantics, sp.ssrcs));
+ sp.AddFidSsrc(1, 10);
+ sp.AddFidSsrc(2, 20);
+
+ primary_ssrcs.clear();
+ sp.GetPrimarySsrcs(&primary_ssrcs);
+ fid_ssrcs.clear();
+ sp.GetFidSsrcs(primary_ssrcs, &fid_ssrcs);
+ ASSERT_EQ(3u, primary_ssrcs.size());
+ EXPECT_EQ(1u, primary_ssrcs[0]);
+ EXPECT_EQ(2u, primary_ssrcs[1]);
+ EXPECT_EQ(3u, primary_ssrcs[2]);
+ ASSERT_EQ(2u, fid_ssrcs.size());
+ EXPECT_EQ(10u, fid_ssrcs[0]);
+ EXPECT_EQ(20u, fid_ssrcs[1]);
+}
+
+TEST(StreamParams, FecFrFunctions) {
+ uint32_t fecfr_ssrc;
+
+ cricket::StreamParams sp = cricket::StreamParams::CreateLegacy(1);
+ EXPECT_FALSE(sp.AddFecFrSsrc(10, 20));
+ EXPECT_TRUE(sp.AddFecFrSsrc(1, 2));
+ EXPECT_TRUE(sp.GetFecFrSsrc(1, &fecfr_ssrc));
+ EXPECT_EQ(2u, fecfr_ssrc);
+ EXPECT_FALSE(sp.GetFecFrSsrc(15, &fecfr_ssrc));
+
+ sp.add_ssrc(20);
+ EXPECT_TRUE(sp.AddFecFrSsrc(20, 30));
+ EXPECT_TRUE(sp.GetFecFrSsrc(20, &fecfr_ssrc));
+ EXPECT_EQ(30u, fecfr_ssrc);
+
+ // Manually create SsrcGroup to test bounds-checking
+ // in GetSecondarySsrc. We construct an invalid StreamParams
+ // for this.
+ std::vector<uint32_t> fecfr_vector;
+ fecfr_vector.push_back(13);
+ cricket::SsrcGroup invalid_fecfr_group(cricket::kFecFrSsrcGroupSemantics,
+ fecfr_vector);
+ cricket::StreamParams sp_invalid;
+ sp_invalid.add_ssrc(13);
+ sp_invalid.ssrc_groups.push_back(invalid_fecfr_group);
+ EXPECT_FALSE(sp_invalid.GetFecFrSsrc(13, &fecfr_ssrc));
+}
+
+TEST(StreamParams, ToString) {
+ cricket::StreamParams sp =
+ CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, arraysize(kSsrcs2));
+ sp.set_stream_ids({"stream_id"});
+ EXPECT_STREQ(
+ "{ssrcs:[1,2];ssrc_groups:{semantics:XYZ;ssrcs:[1,2]};stream_ids:stream_"
+ "id;}",
+ sp.ToString().c_str());
+}
+
+TEST(StreamParams, TestGenerateSsrcs_SingleStreamWithRtxAndFlex) {
+ rtc::UniqueRandomIdGenerator generator;
+ cricket::StreamParams stream;
+ stream.GenerateSsrcs(1, true, true, &generator);
+ uint32_t primary_ssrc = stream.first_ssrc();
+ ASSERT_NE(0u, primary_ssrc);
+ uint32_t rtx_ssrc = 0;
+ uint32_t flex_ssrc = 0;
+ EXPECT_EQ(3u, stream.ssrcs.size());
+ EXPECT_TRUE(stream.GetFidSsrc(primary_ssrc, &rtx_ssrc));
+ EXPECT_NE(0u, rtx_ssrc);
+ EXPECT_TRUE(stream.GetFecFrSsrc(primary_ssrc, &flex_ssrc));
+ EXPECT_NE(0u, flex_ssrc);
+ EXPECT_FALSE(stream.has_ssrc_group(cricket::kSimSsrcGroupSemantics));
+ EXPECT_TRUE(stream.has_ssrc_group(cricket::kFidSsrcGroupSemantics));
+ EXPECT_TRUE(stream.has_ssrc_group(cricket::kFecFrSsrcGroupSemantics));
+}
+
+TEST(StreamParams, TestGenerateSsrcs_SingleStreamWithRtx) {
+ rtc::UniqueRandomIdGenerator generator;
+ cricket::StreamParams stream;
+ stream.GenerateSsrcs(1, true, false, &generator);
+ uint32_t primary_ssrc = stream.first_ssrc();
+ ASSERT_NE(0u, primary_ssrc);
+ uint32_t rtx_ssrc = 0;
+ uint32_t flex_ssrc = 0;
+ EXPECT_EQ(2u, stream.ssrcs.size());
+ EXPECT_TRUE(stream.GetFidSsrc(primary_ssrc, &rtx_ssrc));
+ EXPECT_NE(0u, rtx_ssrc);
+ EXPECT_FALSE(stream.GetFecFrSsrc(primary_ssrc, &flex_ssrc));
+ EXPECT_EQ(0u, flex_ssrc);
+ EXPECT_FALSE(stream.has_ssrc_group(cricket::kSimSsrcGroupSemantics));
+ EXPECT_TRUE(stream.has_ssrc_group(cricket::kFidSsrcGroupSemantics));
+}
+
+TEST(StreamParams, TestGenerateSsrcs_SingleStreamWithFlex) {
+ rtc::UniqueRandomIdGenerator generator;
+ cricket::StreamParams stream;
+ stream.GenerateSsrcs(1, false, true, &generator);
+ uint32_t primary_ssrc = stream.first_ssrc();
+ ASSERT_NE(0u, primary_ssrc);
+ uint32_t rtx_ssrc = 0;
+ uint32_t flex_ssrc = 0;
+ EXPECT_EQ(2u, stream.ssrcs.size());
+ EXPECT_FALSE(stream.GetFidSsrc(primary_ssrc, &rtx_ssrc));
+ EXPECT_EQ(0u, rtx_ssrc);
+ EXPECT_TRUE(stream.GetFecFrSsrc(primary_ssrc, &flex_ssrc));
+ EXPECT_NE(0u, flex_ssrc);
+ EXPECT_FALSE(stream.has_ssrc_group(cricket::kSimSsrcGroupSemantics));
+ EXPECT_TRUE(stream.has_ssrc_group(cricket::kFecFrSsrcGroupSemantics));
+}
+
+TEST(StreamParams, TestGenerateSsrcs_SimulcastLayersAndRtx) {
+ const size_t kNumStreams = 3;
+ rtc::UniqueRandomIdGenerator generator;
+ cricket::StreamParams stream;
+ stream.GenerateSsrcs(kNumStreams, true, false, &generator);
+ EXPECT_EQ(kNumStreams * 2, stream.ssrcs.size());
+ std::vector<uint32_t> primary_ssrcs, rtx_ssrcs;
+ stream.GetPrimarySsrcs(&primary_ssrcs);
+ EXPECT_EQ(kNumStreams, primary_ssrcs.size());
+ EXPECT_THAT(primary_ssrcs, Each(Ne(0u)));
+ stream.GetFidSsrcs(primary_ssrcs, &rtx_ssrcs);
+ EXPECT_EQ(kNumStreams, rtx_ssrcs.size());
+ EXPECT_THAT(rtx_ssrcs, Each(Ne(0u)));
+ EXPECT_TRUE(stream.has_ssrc_group(cricket::kSimSsrcGroupSemantics));
+ EXPECT_TRUE(stream.has_ssrc_group(cricket::kFidSsrcGroupSemantics));
+}
diff --git a/third_party/libwebrtc/media/base/test_utils.cc b/third_party/libwebrtc/media/base/test_utils.cc
new file mode 100644
index 0000000000..1b288735be
--- /dev/null
+++ b/third_party/libwebrtc/media/base/test_utils.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/test_utils.h"
+
+#include <cstdint>
+
+#include "api/video/video_frame.h"
+#include "api/video/video_source_interface.h"
+
+namespace cricket {
+
+cricket::StreamParams CreateSimStreamParams(
+ const std::string& cname,
+ const std::vector<uint32_t>& ssrcs) {
+ cricket::StreamParams sp;
+ cricket::SsrcGroup sg(cricket::kSimSsrcGroupSemantics, ssrcs);
+ sp.ssrcs = ssrcs;
+ sp.ssrc_groups.push_back(sg);
+ sp.cname = cname;
+ return sp;
+}
+
+// There should be an rtx_ssrc per ssrc.
+cricket::StreamParams CreateSimWithRtxStreamParams(
+ const std::string& cname,
+ const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& rtx_ssrcs) {
+ cricket::StreamParams sp = CreateSimStreamParams(cname, ssrcs);
+ for (size_t i = 0; i < ssrcs.size(); ++i) {
+ sp.AddFidSsrc(ssrcs[i], rtx_ssrcs[i]);
+ }
+ return sp;
+}
+
+// There should be one fec ssrc per ssrc.
+cricket::StreamParams CreatePrimaryWithFecFrStreamParams(
+ const std::string& cname,
+ uint32_t primary_ssrc,
+ uint32_t flexfec_ssrc) {
+ cricket::StreamParams sp;
+ sp.ssrcs = {primary_ssrc};
+ sp.cname = cname;
+ sp.AddFecFrSsrc(primary_ssrc, flexfec_ssrc);
+ return sp;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/test_utils.h b/third_party/libwebrtc/media/base/test_utils.h
new file mode 100644
index 0000000000..dc14e44046
--- /dev/null
+++ b/third_party/libwebrtc/media/base/test_utils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_TEST_UTILS_H_
+#define MEDIA_BASE_TEST_UTILS_H_
+
+#include <string>
+#include <vector>
+
+#include "media/base/media_channel.h"
+#include "media/base/video_common.h"
+#include "rtc_base/arraysize.h"
+
+namespace webrtc {
+class VideoFrame;
+}
+
+namespace cricket {
+
+// Returns size of 420 image with rounding on chroma for odd sizes.
+#define I420_SIZE(w, h) (w * h + (((w + 1) / 2) * ((h + 1) / 2)) * 2)
+// Returns size of ARGB image.
+#define ARGB_SIZE(w, h) (w * h * 4)
+
+template <class T>
+inline std::vector<T> MakeVector(const T a[], size_t s) {
+ return std::vector<T>(a, a + s);
+}
+#define MAKE_VECTOR(a) cricket::MakeVector(a, arraysize(a))
+
+// Create Simulcast StreamParams with given `ssrcs` and `cname`.
+cricket::StreamParams CreateSimStreamParams(const std::string& cname,
+ const std::vector<uint32_t>& ssrcs);
+// Create Simulcast stream with given `ssrcs` and `rtx_ssrcs`.
+// The number of `rtx_ssrcs` must match number of `ssrcs`.
+cricket::StreamParams CreateSimWithRtxStreamParams(
+ const std::string& cname,
+ const std::vector<uint32_t>& ssrcs,
+ const std::vector<uint32_t>& rtx_ssrcs);
+
+// Create StreamParams with single primary SSRC and corresponding FlexFEC SSRC.
+cricket::StreamParams CreatePrimaryWithFecFrStreamParams(
+ const std::string& cname,
+ uint32_t primary_ssrc,
+ uint32_t flexfec_ssrc);
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_TEST_UTILS_H_
diff --git a/third_party/libwebrtc/media/base/turn_utils.cc b/third_party/libwebrtc/media/base/turn_utils.cc
new file mode 100644
index 0000000000..c413117fb6
--- /dev/null
+++ b/third_party/libwebrtc/media/base/turn_utils.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/turn_utils.h"
+
+#include "api/transport/stun.h"
+#include "rtc_base/byte_order.h"
+
+namespace cricket {
+
+namespace {
+
+const size_t kTurnChannelHeaderLength = 4;
+
+bool IsTurnChannelData(const uint8_t* data, size_t length) {
+ return length >= kTurnChannelHeaderLength && ((*data & 0xC0) == 0x40);
+}
+
+bool IsTurnSendIndicationPacket(const uint8_t* data, size_t length) {
+ if (length < kStunHeaderSize) {
+ return false;
+ }
+
+ uint16_t type = rtc::GetBE16(data);
+ return (type == TURN_SEND_INDICATION);
+}
+
+} // namespace
+
+bool UnwrapTurnPacket(const uint8_t* packet,
+ size_t packet_size,
+ size_t* content_position,
+ size_t* content_size) {
+ if (IsTurnChannelData(packet, packet_size)) {
+ // Turn Channel Message header format.
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | Channel Number | Length |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | |
+ // / Application Data /
+ // / /
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ size_t length = rtc::GetBE16(&packet[2]);
+ if (length + kTurnChannelHeaderLength > packet_size) {
+ return false;
+ }
+
+ *content_position = kTurnChannelHeaderLength;
+ *content_size = length;
+ return true;
+ }
+
+ if (IsTurnSendIndicationPacket(packet, packet_size)) {
+ // Validate STUN message length.
+ const size_t stun_message_length = rtc::GetBE16(&packet[2]);
+ if (stun_message_length + kStunHeaderSize != packet_size) {
+ return false;
+ }
+
+ // First skip mandatory stun header which is of 20 bytes.
+ size_t pos = kStunHeaderSize;
+ // Loop through STUN attributes until we find STUN DATA attribute.
+ while (pos < packet_size) {
+ // Keep reading STUN attributes until we hit DATA attribute.
+ // Attribute will be a TLV structure.
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | Type | Length |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | Value (variable) ....
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // The value in the length field MUST contain the length of the Value
+ // part of the attribute, prior to padding, measured in bytes. Since
+ // STUN aligns attributes on 32-bit boundaries, attributes whose content
+ // is not a multiple of 4 bytes are padded with 1, 2, or 3 bytes of
+ // padding so that its value contains a multiple of 4 bytes. The
+ // padding bits are ignored, and may be any value.
+ uint16_t attr_type, attr_length;
+ const int kAttrHeaderLength = sizeof(attr_type) + sizeof(attr_length);
+
+ if (packet_size < pos + kAttrHeaderLength) {
+ return false;
+ }
+
+ // Getting attribute type and length.
+ attr_type = rtc::GetBE16(&packet[pos]);
+ attr_length = rtc::GetBE16(&packet[pos + sizeof(attr_type)]);
+
+ pos += kAttrHeaderLength; // Skip STUN_DATA_ATTR header.
+
+ // Checking for bogus attribute length.
+ if (pos + attr_length > packet_size) {
+ return false;
+ }
+
+ if (attr_type == STUN_ATTR_DATA) {
+ *content_position = pos;
+ *content_size = attr_length;
+ return true;
+ }
+
+ pos += attr_length;
+ if ((attr_length % 4) != 0) {
+ pos += (4 - (attr_length % 4));
+ }
+ }
+
+ // There is no data attribute present in the message.
+ return false;
+ }
+
+ // This is not a TURN packet.
+ *content_position = 0;
+ *content_size = packet_size;
+ return true;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/turn_utils.h b/third_party/libwebrtc/media/base/turn_utils.h
new file mode 100644
index 0000000000..82e492c028
--- /dev/null
+++ b/third_party/libwebrtc/media/base/turn_utils.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_TURN_UTILS_H_
+#define MEDIA_BASE_TURN_UTILS_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "rtc_base/system/rtc_export.h"
+
+namespace cricket {
+
+// Finds data location within a TURN Channel Message or TURN Send Indication
+// message.
+bool RTC_EXPORT UnwrapTurnPacket(const uint8_t* packet,
+ size_t packet_size,
+ size_t* content_position,
+ size_t* content_size);
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_TURN_UTILS_H_
diff --git a/third_party/libwebrtc/media/base/turn_utils_unittest.cc b/third_party/libwebrtc/media/base/turn_utils_unittest.cc
new file mode 100644
index 0000000000..f7bbf8b8d4
--- /dev/null
+++ b/third_party/libwebrtc/media/base/turn_utils_unittest.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/turn_utils.h"
+
+#include "test/gtest.h"
+
+namespace cricket {
+
+// Invalid TURN send indication messages. Messages are proper STUN
+// messages with incorrect values in attributes.
+TEST(TurnUtilsTest, InvalidTurnSendIndicationMessages) {
+ size_t content_pos = SIZE_MAX;
+ size_t content_size = SIZE_MAX;
+
+ // Stun Indication message with Zero length
+ uint8_t kTurnSendIndicationMsgWithNoAttributes[] = {
+ 0x00, 0x16, 0x00, 0x00, // Zero length
+ 0x21, 0x12, 0xA4, 0x42, // magic cookie
+ '0', '1', '2', '3', // transaction id
+ '4', '5', '6', '7', '8', '9', 'a', 'b',
+ };
+ EXPECT_FALSE(UnwrapTurnPacket(kTurnSendIndicationMsgWithNoAttributes,
+ sizeof(kTurnSendIndicationMsgWithNoAttributes),
+ &content_pos, &content_size));
+ EXPECT_EQ(SIZE_MAX, content_pos);
+ EXPECT_EQ(SIZE_MAX, content_size);
+
+ // Stun Send Indication message with invalid length in stun header.
+ const uint8_t kTurnSendIndicationMsgWithInvalidLength[] = {
+ 0x00, 0x16, 0xFF, 0x00, // length of 0xFF00
+ 0x21, 0x12, 0xA4, 0x42, // magic cookie
+ '0', '1', '2', '3', // transaction id
+ '4', '5', '6', '7', '8', '9', 'a', 'b',
+ };
+ EXPECT_FALSE(UnwrapTurnPacket(kTurnSendIndicationMsgWithInvalidLength,
+ sizeof(kTurnSendIndicationMsgWithInvalidLength),
+ &content_pos, &content_size));
+ EXPECT_EQ(SIZE_MAX, content_pos);
+ EXPECT_EQ(SIZE_MAX, content_size);
+
+ // Stun Send Indication message with no DATA attribute in message.
+ const uint8_t kTurnSendIndicatinMsgWithNoDataAttribute[] = {
+ // clang-format off
+ // clang formatting doesn't respect inline comments.
+ 0x00, 0x16, 0x00, 0x08, // length of
+ 0x21, 0x12, 0xA4, 0x42, // magic cookie
+ '0', '1', '2', '3', // transaction id
+ '4', '5', '6', '7', '8', '9', 'a', 'b',
+ 0x00, 0x20, 0x00, 0x04, // Mapped address.
+ 0x00, 0x00, 0x00, 0x00,
+ // clang-format on
+ };
+ EXPECT_FALSE(
+ UnwrapTurnPacket(kTurnSendIndicatinMsgWithNoDataAttribute,
+ sizeof(kTurnSendIndicatinMsgWithNoDataAttribute),
+ &content_pos, &content_size));
+ EXPECT_EQ(SIZE_MAX, content_pos);
+ EXPECT_EQ(SIZE_MAX, content_size);
+}
+
+// Valid TURN Send Indication messages.
+TEST(TurnUtilsTest, ValidTurnSendIndicationMessage) {
+ size_t content_pos = SIZE_MAX;
+ size_t content_size = SIZE_MAX;
+ // A valid STUN indication message with a valid RTP header in data attribute
+ // payload field and no extension bit set.
+ const uint8_t kTurnSendIndicationMsgWithoutRtpExtension[] = {
+ // clang-format off
+ // clang formatting doesn't respect inline comments.
+ 0x00, 0x16, 0x00, 0x18, // length of
+ 0x21, 0x12, 0xA4, 0x42, // magic cookie
+ '0', '1', '2', '3', // transaction id
+ '4', '5', '6', '7', '8', '9', 'a', 'b',
+ 0x00, 0x20, 0x00, 0x04, // Mapped address.
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x13, 0x00, 0x0C, // Data attribute.
+ 0x80, 0x00, 0x00, 0x00, // RTP packet.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // clang-format on
+ };
+ EXPECT_TRUE(
+ UnwrapTurnPacket(kTurnSendIndicationMsgWithoutRtpExtension,
+ sizeof(kTurnSendIndicationMsgWithoutRtpExtension),
+ &content_pos, &content_size));
+ EXPECT_EQ(12U, content_size);
+ EXPECT_EQ(32U, content_pos);
+}
+
+// Verify that parsing of valid TURN Channel Messages.
+TEST(TurnUtilsTest, ValidTurnChannelMessages) {
+ const uint8_t kTurnChannelMsgWithRtpPacket[] = {
+ // clang-format off
+ // clang formatting doesn't respect inline comments.
+ 0x40, 0x00, 0x00, 0x0C,
+ 0x80, 0x00, 0x00, 0x00, // RTP packet.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // clang-format on
+ };
+
+ size_t content_pos = 0, content_size = 0;
+ EXPECT_TRUE(UnwrapTurnPacket(kTurnChannelMsgWithRtpPacket,
+ sizeof(kTurnChannelMsgWithRtpPacket),
+ &content_pos, &content_size));
+ EXPECT_EQ(12U, content_size);
+ EXPECT_EQ(4U, content_pos);
+}
+
+TEST(TurnUtilsTest, ChannelMessageZeroLength) {
+ const uint8_t kTurnChannelMsgWithZeroLength[] = {0x40, 0x00, 0x00, 0x00};
+ size_t content_pos = SIZE_MAX;
+ size_t content_size = SIZE_MAX;
+ EXPECT_TRUE(UnwrapTurnPacket(kTurnChannelMsgWithZeroLength,
+ sizeof(kTurnChannelMsgWithZeroLength),
+ &content_pos, &content_size));
+ EXPECT_EQ(4u, content_pos);
+ EXPECT_EQ(0u, content_size);
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/video_adapter.cc b/third_party/libwebrtc/media/base/video_adapter.cc
new file mode 100644
index 0000000000..daac8cf856
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_adapter.cc
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/video_adapter.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "media/base/video_common.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace {
+
+struct Fraction {
+ int numerator;
+ int denominator;
+
+ void DivideByGcd() {
+ int g = cricket::GreatestCommonDivisor(numerator, denominator);
+ numerator /= g;
+ denominator /= g;
+ }
+
+ // Determines number of output pixels if both width and height of an input of
+ // `input_pixels` pixels is scaled with the fraction numerator / denominator.
+ int scale_pixel_count(int input_pixels) {
+ return (numerator * numerator * static_cast<int64_t>(input_pixels)) /
+ (denominator * denominator);
+ }
+};
+
+// Round `value_to_round` to a multiple of `multiple`. Prefer rounding upwards,
+// but never more than `max_value`.
+int roundUp(int value_to_round, int multiple, int max_value) {
+ const int rounded_value =
+ (value_to_round + multiple - 1) / multiple * multiple;
+ return rounded_value <= max_value ? rounded_value
+ : (max_value / multiple * multiple);
+}
+
+// Generates a scale factor that makes `input_pixels` close to `target_pixels`,
+// but no higher than `max_pixels`.
+Fraction FindScale(int input_width,
+ int input_height,
+ int target_pixels,
+ int max_pixels,
+ bool variable_start_scale_factor) {
+ // This function only makes sense for a positive target.
+ RTC_DCHECK_GT(target_pixels, 0);
+ RTC_DCHECK_GT(max_pixels, 0);
+ RTC_DCHECK_GE(max_pixels, target_pixels);
+
+ const int input_pixels = input_width * input_height;
+
+ // Don't scale up original.
+ if (target_pixels >= input_pixels)
+ return Fraction{1, 1};
+
+ Fraction current_scale = Fraction{1, 1};
+ Fraction best_scale = Fraction{1, 1};
+
+ if (variable_start_scale_factor) {
+ // Start scaling down by 2/3 depending on `input_width` and `input_height`.
+ if (input_width % 3 == 0 && input_height % 3 == 0) {
+ // 2/3 (then alternates 3/4, 2/3, 3/4,...).
+ current_scale = Fraction{6, 6};
+ }
+ if (input_width % 9 == 0 && input_height % 9 == 0) {
+ // 2/3, 2/3 (then alternates 3/4, 2/3, 3/4,...).
+ current_scale = Fraction{36, 36};
+ }
+ }
+
+ // The minimum (absolute) difference between the number of output pixels and
+ // the target pixel count.
+ int min_pixel_diff = std::numeric_limits<int>::max();
+ if (input_pixels <= max_pixels) {
+ // Start condition for 1/1 case, if it is less than max.
+ min_pixel_diff = std::abs(input_pixels - target_pixels);
+ }
+
+ // Alternately scale down by 3/4 and 2/3. This results in fractions which are
+ // effectively scalable. For instance, starting at 1280x720 will result in
+ // the series (3/4) => 960x540, (1/2) => 640x360, (3/8) => 480x270,
+ // (1/4) => 320x180, (3/16) => 240x125, (1/8) => 160x90.
+ while (current_scale.scale_pixel_count(input_pixels) > target_pixels) {
+ if (current_scale.numerator % 3 == 0 &&
+ current_scale.denominator % 2 == 0) {
+ // Multiply by 2/3.
+ current_scale.numerator /= 3;
+ current_scale.denominator /= 2;
+ } else {
+ // Multiply by 3/4.
+ current_scale.numerator *= 3;
+ current_scale.denominator *= 4;
+ }
+
+ int output_pixels = current_scale.scale_pixel_count(input_pixels);
+ if (output_pixels <= max_pixels) {
+ int diff = std::abs(target_pixels - output_pixels);
+ if (diff < min_pixel_diff) {
+ min_pixel_diff = diff;
+ best_scale = current_scale;
+ }
+ }
+ }
+ best_scale.DivideByGcd();
+
+ return best_scale;
+}
+
+absl::optional<std::pair<int, int>> Swap(
+ const absl::optional<std::pair<int, int>>& in) {
+ if (!in) {
+ return absl::nullopt;
+ }
+ return std::make_pair(in->second, in->first);
+}
+
+} // namespace
+
+namespace cricket {
+
+VideoAdapter::VideoAdapter(int source_resolution_alignment)
+ : frames_in_(0),
+ frames_out_(0),
+ frames_scaled_(0),
+ adaption_changes_(0),
+ previous_width_(0),
+ previous_height_(0),
+ variable_start_scale_factor_(!webrtc::field_trial::IsDisabled(
+ "WebRTC-Video-VariableStartScaleFactor")),
+ source_resolution_alignment_(source_resolution_alignment),
+ resolution_alignment_(source_resolution_alignment),
+ resolution_request_target_pixel_count_(std::numeric_limits<int>::max()),
+ resolution_request_max_pixel_count_(std::numeric_limits<int>::max()),
+ max_framerate_request_(std::numeric_limits<int>::max()) {}
+
+VideoAdapter::VideoAdapter() : VideoAdapter(1) {}
+
+VideoAdapter::~VideoAdapter() {}
+
+bool VideoAdapter::DropFrame(int64_t in_timestamp_ns) {
+ int max_fps = max_framerate_request_;
+ if (output_format_request_.max_fps)
+ max_fps = std::min(max_fps, *output_format_request_.max_fps);
+
+ framerate_controller_.SetMaxFramerate(max_fps);
+ return framerate_controller_.ShouldDropFrame(in_timestamp_ns);
+}
+
+bool VideoAdapter::AdaptFrameResolution(int in_width,
+ int in_height,
+ int64_t in_timestamp_ns,
+ int* cropped_width,
+ int* cropped_height,
+ int* out_width,
+ int* out_height) {
+ webrtc::MutexLock lock(&mutex_);
+ ++frames_in_;
+
+ // The max output pixel count is the minimum of the requests from
+ // OnOutputFormatRequest and OnResolutionFramerateRequest.
+ int max_pixel_count = resolution_request_max_pixel_count_;
+
+ // Select target aspect ratio and max pixel count depending on input frame
+ // orientation.
+ absl::optional<std::pair<int, int>> target_aspect_ratio;
+ if (in_width > in_height) {
+ target_aspect_ratio = output_format_request_.target_landscape_aspect_ratio;
+ if (output_format_request_.max_landscape_pixel_count)
+ max_pixel_count = std::min(
+ max_pixel_count, *output_format_request_.max_landscape_pixel_count);
+ } else {
+ target_aspect_ratio = output_format_request_.target_portrait_aspect_ratio;
+ if (output_format_request_.max_portrait_pixel_count)
+ max_pixel_count = std::min(
+ max_pixel_count, *output_format_request_.max_portrait_pixel_count);
+ }
+
+ int target_pixel_count =
+ std::min(resolution_request_target_pixel_count_, max_pixel_count);
+
+ // Drop the input frame if necessary.
+ if (max_pixel_count <= 0 || DropFrame(in_timestamp_ns)) {
+ // Show VAdapt log every 90 frames dropped. (3 seconds)
+ if ((frames_in_ - frames_out_) % 90 == 0) {
+ // TODO(fbarchard): Reduce to LS_VERBOSE when adapter info is not needed
+ // in default calls.
+ RTC_LOG(LS_INFO) << "VAdapt Drop Frame: scaled " << frames_scaled_
+ << " / out " << frames_out_ << " / in " << frames_in_
+ << " Changes: " << adaption_changes_
+ << " Input: " << in_width << "x" << in_height
+ << " timestamp: " << in_timestamp_ns
+ << " Output fps: " << max_framerate_request_ << "/"
+ << output_format_request_.max_fps.value_or(-1)
+ << " alignment: " << resolution_alignment_;
+ }
+
+ // Drop frame.
+ return false;
+ }
+
+ // Calculate how the input should be cropped.
+ if (!target_aspect_ratio || target_aspect_ratio->first <= 0 ||
+ target_aspect_ratio->second <= 0) {
+ *cropped_width = in_width;
+ *cropped_height = in_height;
+ } else {
+ const float requested_aspect =
+ target_aspect_ratio->first /
+ static_cast<float>(target_aspect_ratio->second);
+ *cropped_width =
+ std::min(in_width, static_cast<int>(in_height * requested_aspect));
+ *cropped_height =
+ std::min(in_height, static_cast<int>(in_width / requested_aspect));
+ }
+ const Fraction scale =
+ FindScale(*cropped_width, *cropped_height, target_pixel_count,
+ max_pixel_count, variable_start_scale_factor_);
+ // Adjust cropping slightly to get correctly aligned output size and a perfect
+ // scale factor.
+ *cropped_width = roundUp(*cropped_width,
+ scale.denominator * resolution_alignment_, in_width);
+ *cropped_height = roundUp(
+ *cropped_height, scale.denominator * resolution_alignment_, in_height);
+ RTC_DCHECK_EQ(0, *cropped_width % scale.denominator);
+ RTC_DCHECK_EQ(0, *cropped_height % scale.denominator);
+
+ // Calculate final output size.
+ *out_width = *cropped_width / scale.denominator * scale.numerator;
+ *out_height = *cropped_height / scale.denominator * scale.numerator;
+ RTC_DCHECK_EQ(0, *out_width % resolution_alignment_);
+ RTC_DCHECK_EQ(0, *out_height % resolution_alignment_);
+
+ ++frames_out_;
+ if (scale.numerator != scale.denominator)
+ ++frames_scaled_;
+
+ if (previous_width_ &&
+ (previous_width_ != *out_width || previous_height_ != *out_height)) {
+ ++adaption_changes_;
+ RTC_LOG(LS_INFO) << "Frame size changed: scaled " << frames_scaled_
+ << " / out " << frames_out_ << " / in " << frames_in_
+ << " Changes: " << adaption_changes_
+ << " Input: " << in_width << "x" << in_height
+ << " Scale: " << scale.numerator << "/"
+ << scale.denominator << " Output: " << *out_width << "x"
+ << *out_height << " fps: " << max_framerate_request_ << "/"
+ << output_format_request_.max_fps.value_or(-1)
+ << " alignment: " << resolution_alignment_;
+ }
+
+ previous_width_ = *out_width;
+ previous_height_ = *out_height;
+
+ return true;
+}
+
+void VideoAdapter::OnOutputFormatRequest(
+ const absl::optional<VideoFormat>& format) {
+ absl::optional<std::pair<int, int>> target_aspect_ratio;
+ absl::optional<int> max_pixel_count;
+ absl::optional<int> max_fps;
+ if (format) {
+ target_aspect_ratio = std::make_pair(format->width, format->height);
+ max_pixel_count = format->width * format->height;
+ if (format->interval > 0)
+ max_fps = rtc::kNumNanosecsPerSec / format->interval;
+ }
+ OnOutputFormatRequest(target_aspect_ratio, max_pixel_count, max_fps);
+}
+
+void VideoAdapter::OnOutputFormatRequest(
+ const absl::optional<std::pair<int, int>>& target_aspect_ratio,
+ const absl::optional<int>& max_pixel_count,
+ const absl::optional<int>& max_fps) {
+ absl::optional<std::pair<int, int>> target_landscape_aspect_ratio;
+ absl::optional<std::pair<int, int>> target_portrait_aspect_ratio;
+ if (target_aspect_ratio && target_aspect_ratio->first > 0 &&
+ target_aspect_ratio->second > 0) {
+ // Maintain input orientation.
+ const int max_side =
+ std::max(target_aspect_ratio->first, target_aspect_ratio->second);
+ const int min_side =
+ std::min(target_aspect_ratio->first, target_aspect_ratio->second);
+ target_landscape_aspect_ratio = std::make_pair(max_side, min_side);
+ target_portrait_aspect_ratio = std::make_pair(min_side, max_side);
+ }
+ OnOutputFormatRequest(target_landscape_aspect_ratio, max_pixel_count,
+ target_portrait_aspect_ratio, max_pixel_count, max_fps);
+}
+
+void VideoAdapter::OnOutputFormatRequest(
+ const absl::optional<std::pair<int, int>>& target_landscape_aspect_ratio,
+ const absl::optional<int>& max_landscape_pixel_count,
+ const absl::optional<std::pair<int, int>>& target_portrait_aspect_ratio,
+ const absl::optional<int>& max_portrait_pixel_count,
+ const absl::optional<int>& max_fps) {
+ webrtc::MutexLock lock(&mutex_);
+
+ OutputFormatRequest request = {
+ .target_landscape_aspect_ratio = target_landscape_aspect_ratio,
+ .max_landscape_pixel_count = max_landscape_pixel_count,
+ .target_portrait_aspect_ratio = target_portrait_aspect_ratio,
+ .max_portrait_pixel_count = max_portrait_pixel_count,
+ .max_fps = max_fps};
+
+ if (stashed_output_format_request_) {
+ // Save the output format request for later use in case the encoder making
+ // this call would become active, because currently all active encoders use
+ // requested_resolution instead.
+ stashed_output_format_request_ = request;
+ RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
+ << stashed_output_format_request_->ToString();
+ } else {
+ output_format_request_ = request;
+ RTC_LOG(LS_INFO) << "Setting output_format_request_: "
+ << output_format_request_.ToString();
+ }
+
+ framerate_controller_.Reset();
+}
+
+void VideoAdapter::OnSinkWants(const rtc::VideoSinkWants& sink_wants) {
+ webrtc::MutexLock lock(&mutex_);
+ resolution_request_max_pixel_count_ = sink_wants.max_pixel_count;
+ resolution_request_target_pixel_count_ =
+ sink_wants.target_pixel_count.value_or(
+ resolution_request_max_pixel_count_);
+ max_framerate_request_ = sink_wants.max_framerate_fps;
+ resolution_alignment_ = cricket::LeastCommonMultiple(
+ source_resolution_alignment_, sink_wants.resolution_alignment);
+
+ if (!sink_wants.aggregates) {
+ RTC_LOG(LS_WARNING)
+ << "These should always be created by VideoBroadcaster!";
+ return;
+ }
+
+ // If requested_resolution is used, and there are no active encoders
+ // that are NOT using requested_resolution (aka newapi), then override
+ // calls to OnOutputFormatRequest and use values from requested_resolution
+ // instead (combined with qualityscaling based on pixel counts above).
+ if (webrtc::field_trial::IsDisabled(
+ "WebRTC-Video-RequestedResolutionOverrideOutputFormatRequest")) {
+ // kill-switch...
+ return;
+ }
+
+ if (!sink_wants.requested_resolution) {
+ if (stashed_output_format_request_) {
+ // because current active_output_format_request is based on
+ // requested_resolution logic, while current encoder(s) doesn't want that,
+ // we have to restore the stashed request.
+ RTC_LOG(LS_INFO) << "Unstashing OnOutputFormatRequest: "
+ << stashed_output_format_request_->ToString();
+ output_format_request_ = *stashed_output_format_request_;
+ stashed_output_format_request_.reset();
+ }
+ return;
+ }
+
+ if (sink_wants.aggregates->any_active_without_requested_resolution) {
+ return;
+ }
+
+ if (!stashed_output_format_request_) {
+ // The active output format request is about to be rewritten by
+ // request_resolution. We need to save it for later use in case the encoder
+ // which doesn't use request_resolution logic become active in the future.
+ stashed_output_format_request_ = output_format_request_;
+ RTC_LOG(LS_INFO) << "Stashing OnOutputFormatRequest: "
+ << stashed_output_format_request_->ToString();
+ }
+
+ auto res = *sink_wants.requested_resolution;
+ auto pixel_count = res.width * res.height;
+ output_format_request_.target_landscape_aspect_ratio =
+ std::make_pair(res.width, res.height);
+ output_format_request_.max_landscape_pixel_count = pixel_count;
+ output_format_request_.target_portrait_aspect_ratio =
+ std::make_pair(res.height, res.width);
+ output_format_request_.max_portrait_pixel_count = pixel_count;
+ output_format_request_.max_fps = max_framerate_request_;
+ RTC_LOG(LS_INFO) << "Setting output_format_request_ based on sink_wants: "
+ << output_format_request_.ToString();
+}
+
+int VideoAdapter::GetTargetPixels() const {
+ webrtc::MutexLock lock(&mutex_);
+ return resolution_request_target_pixel_count_;
+}
+
+float VideoAdapter::GetMaxFramerate() const {
+ webrtc::MutexLock lock(&mutex_);
+ // Minimum of `output_format_request_.max_fps` and `max_framerate_request_` is
+ // used to throttle frame-rate.
+ int framerate =
+ std::min(max_framerate_request_,
+ output_format_request_.max_fps.value_or(max_framerate_request_));
+ if (framerate == std::numeric_limits<int>::max()) {
+ return std::numeric_limits<float>::infinity();
+ } else {
+ return max_framerate_request_;
+ }
+}
+
+std::string VideoAdapter::OutputFormatRequest::ToString() const {
+ rtc::StringBuilder oss;
+ oss << "[ ";
+ if (target_landscape_aspect_ratio == Swap(target_portrait_aspect_ratio) &&
+ max_landscape_pixel_count == max_portrait_pixel_count) {
+ if (target_landscape_aspect_ratio) {
+ oss << target_landscape_aspect_ratio->first << "x"
+ << target_landscape_aspect_ratio->second;
+ } else {
+ oss << "unset-resolution";
+ }
+ if (max_landscape_pixel_count) {
+ oss << " max_pixel_count: " << *max_landscape_pixel_count;
+ }
+ } else {
+ oss << "[ landscape: ";
+ if (target_landscape_aspect_ratio) {
+ oss << target_landscape_aspect_ratio->first << "x"
+ << target_landscape_aspect_ratio->second;
+ } else {
+ oss << "unset";
+ }
+ if (max_landscape_pixel_count) {
+ oss << " max_pixel_count: " << *max_landscape_pixel_count;
+ }
+ oss << " ] [ portrait: ";
+ if (target_portrait_aspect_ratio) {
+ oss << target_portrait_aspect_ratio->first << "x"
+ << target_portrait_aspect_ratio->second;
+ }
+ if (max_portrait_pixel_count) {
+ oss << " max_pixel_count: " << *max_portrait_pixel_count;
+ }
+ oss << " ]";
+ }
+ oss << " max_fps: ";
+ if (max_fps) {
+ oss << *max_fps;
+ } else {
+ oss << "unset";
+ }
+ oss << " ]";
+ return oss.Release();
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/video_adapter.h b/third_party/libwebrtc/media/base/video_adapter.h
new file mode 100644
index 0000000000..b3e69c492b
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_adapter.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_VIDEO_ADAPTER_H_
+#define MEDIA_BASE_VIDEO_ADAPTER_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/video/video_source_interface.h"
+#include "common_video/framerate_controller.h"
+#include "media/base/video_common.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/rtc_export.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace cricket {
+
+// VideoAdapter adapts an input video frame to an output frame based on the
+// specified input and output formats. The adaptation includes dropping frames
+// to reduce frame rate and scaling frames.
+// VideoAdapter is thread safe.
+class RTC_EXPORT VideoAdapter {
+ public:
+ VideoAdapter();
+ // The source requests output frames whose width and height are divisible
+ // by `source_resolution_alignment`.
+ explicit VideoAdapter(int source_resolution_alignment);
+ virtual ~VideoAdapter();
+
+ VideoAdapter(const VideoAdapter&) = delete;
+ VideoAdapter& operator=(const VideoAdapter&) = delete;
+
+ // Return the adapted resolution and cropping parameters given the
+ // input resolution. The input frame should first be cropped, then
+ // scaled to the final output resolution. Returns true if the frame
+ // should be adapted, and false if it should be dropped.
+ bool AdaptFrameResolution(int in_width,
+ int in_height,
+ int64_t in_timestamp_ns,
+ int* cropped_width,
+ int* cropped_height,
+ int* out_width,
+ int* out_height) RTC_LOCKS_EXCLUDED(mutex_);
+
+ // DEPRECATED. Please use OnOutputFormatRequest below.
+ // TODO(asapersson): Remove this once it is no longer used.
+ // Requests the output frame size and frame interval from
+ // `AdaptFrameResolution` to not be larger than `format`. Also, the input
+ // frame size will be cropped to match the requested aspect ratio. The
+ // requested aspect ratio is orientation agnostic and will be adjusted to
+ // maintain the input orientation, so it doesn't matter if e.g. 1280x720 or
+ // 720x1280 is requested.
+ // Note: Should be called from the source only.
+ void OnOutputFormatRequest(const absl::optional<VideoFormat>& format)
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Requests output frame size and frame interval from `AdaptFrameResolution`.
+ // `target_aspect_ratio`: The input frame size will be cropped to match the
+ // requested aspect ratio. The aspect ratio is orientation agnostic and will
+ // be adjusted to maintain the input orientation (i.e. it doesn't matter if
+ // e.g. <1280,720> or <720,1280> is requested).
+ // `max_pixel_count`: The maximum output frame size.
+ // `max_fps`: The maximum output framerate.
+ // Note: Should be called from the source only.
+ void OnOutputFormatRequest(
+ const absl::optional<std::pair<int, int>>& target_aspect_ratio,
+ const absl::optional<int>& max_pixel_count,
+ const absl::optional<int>& max_fps) RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Same as above, but allows setting two different target aspect ratios
+ // depending on incoming frame orientation. This gives more fine-grained
+ // control and can e.g. be used to force landscape video to be cropped to
+ // portrait video.
+ void OnOutputFormatRequest(
+ const absl::optional<std::pair<int, int>>& target_landscape_aspect_ratio,
+ const absl::optional<int>& max_landscape_pixel_count,
+ const absl::optional<std::pair<int, int>>& target_portrait_aspect_ratio,
+ const absl::optional<int>& max_portrait_pixel_count,
+ const absl::optional<int>& max_fps) RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Requests the output frame size from `AdaptFrameResolution` to have as close
+ // as possible to `sink_wants.target_pixel_count` pixels (if set)
+ // but no more than `sink_wants.max_pixel_count`.
+ // `sink_wants.max_framerate_fps` is essentially analogous to
+ // `sink_wants.max_pixel_count`, but for framerate rather than resolution.
+ // Set `sink_wants.max_pixel_count` and/or `sink_wants.max_framerate_fps` to
+ // std::numeric_limit<int>::max() if no upper limit is desired.
+ // The sink resolution alignment requirement is given by
+ // `sink_wants.resolution_alignment`.
+ // Note: Should be called from the sink only.
+ void OnSinkWants(const rtc::VideoSinkWants& sink_wants)
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Returns maximum image area, which shouldn't impose any adaptations.
+ // Can return `numeric_limits<int>::max()` if no limit is set.
+ int GetTargetPixels() const;
+
+ // Returns current frame-rate limit.
+ // Can return `numeric_limits<float>::infinity()` if no limit is set.
+ float GetMaxFramerate() const;
+
+ private:
+ // Determine if frame should be dropped based on input fps and requested fps.
+ bool DropFrame(int64_t in_timestamp_ns) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ int frames_in_ RTC_GUARDED_BY(mutex_); // Number of input frames.
+ int frames_out_ RTC_GUARDED_BY(mutex_); // Number of output frames.
+ int frames_scaled_ RTC_GUARDED_BY(mutex_); // Number of frames scaled.
+ int adaption_changes_
+ RTC_GUARDED_BY(mutex_); // Number of changes in scale factor.
+ int previous_width_ RTC_GUARDED_BY(mutex_); // Previous adapter output width.
+ int previous_height_
+ RTC_GUARDED_BY(mutex_); // Previous adapter output height.
+ const bool variable_start_scale_factor_;
+
+ // The fixed source resolution alignment requirement.
+ const int source_resolution_alignment_;
+ // The currently applied resolution alignment, as given by the requirements:
+ // - the fixed `source_resolution_alignment_`; and
+ // - the latest `sink_wants.resolution_alignment`.
+ int resolution_alignment_ RTC_GUARDED_BY(mutex_);
+
+ // Max number of pixels/fps requested via calls to OnOutputFormatRequest,
+ // OnResolutionFramerateRequest respectively.
+ // The adapted output format is the minimum of these.
+ struct OutputFormatRequest {
+ absl::optional<std::pair<int, int>> target_landscape_aspect_ratio;
+ absl::optional<int> max_landscape_pixel_count;
+ absl::optional<std::pair<int, int>> target_portrait_aspect_ratio;
+ absl::optional<int> max_portrait_pixel_count;
+ absl::optional<int> max_fps;
+
+ // For logging.
+ std::string ToString() const;
+ };
+
+ OutputFormatRequest output_format_request_ RTC_GUARDED_BY(mutex_);
+ int resolution_request_target_pixel_count_ RTC_GUARDED_BY(mutex_);
+ int resolution_request_max_pixel_count_ RTC_GUARDED_BY(mutex_);
+ int max_framerate_request_ RTC_GUARDED_BY(mutex_);
+
+ // Stashed OutputFormatRequest that is used to save value of
+ // OnOutputFormatRequest in case all active encoders are using
+ // requested_resolution. I.e when all active encoders are using
+ // requested_resolution, the call to OnOutputFormatRequest is ignored
+ // and the value from requested_resolution is used instead (to scale/crop
+ // frame). This allows for an application to only use
+ // RtpEncodingParameters::request_resolution and get the same behavior as if
+ // it had used VideoAdapter::OnOutputFormatRequest.
+ absl::optional<OutputFormatRequest> stashed_output_format_request_
+ RTC_GUARDED_BY(mutex_);
+
+ webrtc::FramerateController framerate_controller_ RTC_GUARDED_BY(mutex_);
+
+ // The critical section to protect the above variables.
+ mutable webrtc::Mutex mutex_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_VIDEO_ADAPTER_H_
diff --git a/third_party/libwebrtc/media/base/video_adapter_unittest.cc b/third_party/libwebrtc/media/base/video_adapter_unittest.cc
new file mode 100644
index 0000000000..778e61e74c
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_adapter_unittest.cc
@@ -0,0 +1,1336 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/video_adapter.h"
+
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "api/video/resolution.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_source_interface.h"
+#include "media/base/fake_frame_source.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/time_utils.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace cricket {
+namespace {
+const int kWidth = 1280;
+const int kHeight = 720;
+const int kDefaultFps = 30;
+
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::Pair;
+using webrtc::Resolution;
+
+rtc::VideoSinkWants BuildSinkWants(absl::optional<int> target_pixel_count,
+ int max_pixel_count,
+ int max_framerate_fps,
+ int sink_alignment = 1) {
+ rtc::VideoSinkWants wants;
+ wants.target_pixel_count = target_pixel_count;
+ wants.max_pixel_count = max_pixel_count;
+ wants.max_framerate_fps = max_framerate_fps;
+ wants.resolution_alignment = sink_alignment;
+ wants.is_active = true;
+ wants.aggregates.emplace(rtc::VideoSinkWants::Aggregates());
+ wants.aggregates->any_active_without_requested_resolution = false;
+ return wants;
+}
+
+rtc::VideoSinkWants BuildSinkWants(
+ absl::optional<webrtc::Resolution> requested_resolution,
+ bool any_active_without_requested_resolution) {
+ rtc::VideoSinkWants wants;
+ wants.max_framerate_fps = kDefaultFps;
+ wants.resolution_alignment = 1;
+ wants.is_active = true;
+ if (requested_resolution) {
+ wants.target_pixel_count = requested_resolution->PixelCount();
+ wants.max_pixel_count = requested_resolution->PixelCount();
+ wants.requested_resolution.emplace(rtc::VideoSinkWants::FrameSize(
+ requested_resolution->width, requested_resolution->height));
+ } else {
+ wants.target_pixel_count = kWidth * kHeight;
+ wants.max_pixel_count = kWidth * kHeight;
+ }
+ wants.aggregates.emplace(rtc::VideoSinkWants::Aggregates());
+ wants.aggregates->any_active_without_requested_resolution =
+ any_active_without_requested_resolution;
+ return wants;
+}
+
+} // namespace
+
+class VideoAdapterTest : public ::testing::Test,
+ public ::testing::WithParamInterface<bool> {
+ public:
+ VideoAdapterTest() : VideoAdapterTest("", 1) {}
+ explicit VideoAdapterTest(const std::string& field_trials,
+ int source_resolution_alignment)
+ : override_field_trials_(field_trials),
+ frame_source_(std::make_unique<FakeFrameSource>(
+ kWidth,
+ kHeight,
+ VideoFormat::FpsToInterval(kDefaultFps) /
+ rtc::kNumNanosecsPerMicrosec)),
+ adapter_(source_resolution_alignment),
+ adapter_wrapper_(std::make_unique<VideoAdapterWrapper>(&adapter_)),
+ use_new_format_request_(GetParam()) {}
+
+ protected:
+ // Wrap a VideoAdapter and collect stats.
+ class VideoAdapterWrapper {
+ public:
+ struct Stats {
+ int captured_frames = 0;
+ int dropped_frames = 0;
+ bool last_adapt_was_no_op = false;
+
+ int cropped_width = 0;
+ int cropped_height = 0;
+ int out_width = 0;
+ int out_height = 0;
+ };
+
+ explicit VideoAdapterWrapper(VideoAdapter* adapter)
+ : video_adapter_(adapter) {}
+
+ void AdaptFrame(const webrtc::VideoFrame& frame) {
+ const int in_width = frame.width();
+ const int in_height = frame.height();
+ int cropped_width;
+ int cropped_height;
+ int out_width;
+ int out_height;
+ if (video_adapter_->AdaptFrameResolution(
+ in_width, in_height,
+ frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec,
+ &cropped_width, &cropped_height, &out_width, &out_height)) {
+ stats_.cropped_width = cropped_width;
+ stats_.cropped_height = cropped_height;
+ stats_.out_width = out_width;
+ stats_.out_height = out_height;
+ stats_.last_adapt_was_no_op =
+ (in_width == cropped_width && in_height == cropped_height &&
+ in_width == out_width && in_height == out_height);
+ } else {
+ ++stats_.dropped_frames;
+ }
+ ++stats_.captured_frames;
+ }
+
+ Stats GetStats() const { return stats_; }
+
+ private:
+ VideoAdapter* video_adapter_;
+ Stats stats_;
+ };
+
+ void VerifyAdaptedResolution(const VideoAdapterWrapper::Stats& stats,
+ int cropped_width,
+ int cropped_height,
+ int out_width,
+ int out_height) {
+ EXPECT_EQ(cropped_width, stats.cropped_width);
+ EXPECT_EQ(cropped_height, stats.cropped_height);
+ EXPECT_EQ(out_width, stats.out_width);
+ EXPECT_EQ(out_height, stats.out_height);
+ }
+
+ void OnOutputFormatRequest(int width,
+ int height,
+ const absl::optional<int>& fps) {
+ if (use_new_format_request_) {
+ absl::optional<std::pair<int, int>> target_aspect_ratio =
+ std::make_pair(width, height);
+ absl::optional<int> max_pixel_count = width * height;
+ absl::optional<int> max_fps = fps;
+ adapter_.OnOutputFormatRequest(target_aspect_ratio, max_pixel_count,
+ max_fps);
+ return;
+ }
+ adapter_.OnOutputFormatRequest(
+ VideoFormat(width, height, fps ? VideoFormat::FpsToInterval(*fps) : 0,
+ cricket::FOURCC_I420));
+ }
+
+ // Return pair of <out resolution, cropping>
+ std::pair<webrtc::Resolution, webrtc::Resolution> AdaptFrameResolution(
+ webrtc::Resolution res) {
+ webrtc::Resolution out;
+ webrtc::Resolution cropped;
+ timestamp_ns_ += 1000000000;
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(
+ res.width, res.height, timestamp_ns_, &cropped.width, &cropped.height,
+ &out.width, &out.height));
+ return std::make_pair(out, cropped);
+ }
+
+ webrtc::test::ScopedFieldTrials override_field_trials_;
+ const std::unique_ptr<FakeFrameSource> frame_source_;
+ VideoAdapter adapter_;
+ int64_t timestamp_ns_ = 0;
+ int cropped_width_;
+ int cropped_height_;
+ int out_width_;
+ int out_height_;
+ const std::unique_ptr<VideoAdapterWrapper> adapter_wrapper_;
+ const bool use_new_format_request_;
+};
+
+INSTANTIATE_TEST_SUITE_P(OnOutputFormatRequests,
+ VideoAdapterTest,
+ ::testing::Values(true, false));
+
+// Do not adapt the frame rate or the resolution. Expect no frame drop, no
+// cropping, and no resolution change.
+TEST_P(VideoAdapterTest, AdaptNothing) {
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no frame drop and no resolution change.
+ VideoAdapterWrapper::Stats stats = adapter_wrapper_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, kWidth, kHeight, kWidth, kHeight);
+ EXPECT_TRUE(stats.last_adapt_was_no_op);
+}
+
+TEST_P(VideoAdapterTest, AdaptZeroInterval) {
+ OnOutputFormatRequest(kWidth, kHeight, absl::nullopt);
+ for (int i = 0; i < 40; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no crash and that frames aren't dropped.
+ VideoAdapterWrapper::Stats stats = adapter_wrapper_->GetStats();
+ EXPECT_GE(stats.captured_frames, 40);
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, kWidth, kHeight, kWidth, kHeight);
+}
+
+// Adapt the frame rate to be half of the capture rate at the beginning. Expect
+// the number of dropped frames to be half of the number the captured frames.
+TEST_P(VideoAdapterTest, AdaptFramerateToHalf) {
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps / 2);
+
+ // Capture 10 frames and verify that every other frame is dropped. The first
+ // frame should not be dropped.
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+ EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 10);
+ EXPECT_EQ(5, adapter_wrapper_->GetStats().dropped_frames);
+}
+
+// Adapt the frame rate to be two thirds of the capture rate at the beginning.
+// Expect the number of dropped frames to be one thirds of the number the
+// captured frames.
+TEST_P(VideoAdapterTest, AdaptFramerateToTwoThirds) {
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps * 2 / 3);
+
+ // Capture 10 frames and verify that every third frame is dropped. The first
+ // frame should not be dropped.
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+ EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 10);
+ EXPECT_EQ(3, adapter_wrapper_->GetStats().dropped_frames);
+}
+
+// Request frame rate twice as high as captured frame rate. Expect no frame
+// drop.
+TEST_P(VideoAdapterTest, AdaptFramerateHighLimit) {
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps * 2);
+
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no frame drop.
+ EXPECT_EQ(0, adapter_wrapper_->GetStats().dropped_frames);
+}
+
+// Adapt the frame rate to be half of the capture rate. No resolution limit set.
+// Expect the number of dropped frames to be half of the number the captured
+// frames.
+TEST_P(VideoAdapterTest, AdaptFramerateToHalfWithNoPixelLimit) {
+ adapter_.OnOutputFormatRequest(absl::nullopt, absl::nullopt, kDefaultFps / 2);
+
+ // Capture 10 frames and verify that every other frame is dropped. The first
+ // frame should not be dropped.
+ int expected_dropped_frames = 0;
+ for (int i = 0; i < 10; ++i) {
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+ EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, i + 1);
+ if (i % 2 == 1)
+ ++expected_dropped_frames;
+ EXPECT_EQ(expected_dropped_frames,
+ adapter_wrapper_->GetStats().dropped_frames);
+ VerifyAdaptedResolution(adapter_wrapper_->GetStats(), kWidth, kHeight,
+ kWidth, kHeight);
+ }
+}
+
+// Adapt the frame rate to be half of the capture rate after capturing no less
+// than 10 frames. Expect no frame dropped before adaptation and frame dropped
+// after adaptation.
+TEST_P(VideoAdapterTest, AdaptFramerateOntheFly) {
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps);
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no frame drop before adaptation.
+ EXPECT_EQ(0, adapter_wrapper_->GetStats().dropped_frames);
+
+ // Adapt the frame rate.
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps / 2);
+ for (int i = 0; i < 20; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify frame drop after adaptation.
+ EXPECT_GT(adapter_wrapper_->GetStats().dropped_frames, 0);
+}
+
+// Do not adapt the frame rate or the resolution. Expect no frame drop, no
+// cropping, and no resolution change.
+TEST_P(VideoAdapterTest, AdaptFramerateRequestMax) {
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt,
+ std::numeric_limits<int>::max(),
+ std::numeric_limits<int>::max()));
+
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no frame drop and no resolution change.
+ VideoAdapterWrapper::Stats stats = adapter_wrapper_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, kWidth, kHeight, kWidth, kHeight);
+ EXPECT_TRUE(stats.last_adapt_was_no_op);
+}
+
+TEST_P(VideoAdapterTest, AdaptFramerateRequestZero) {
+ adapter_.OnSinkWants(
+ BuildSinkWants(absl::nullopt, std::numeric_limits<int>::max(), 0));
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no crash and that frames aren't dropped.
+ VideoAdapterWrapper::Stats stats = adapter_wrapper_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(10, stats.dropped_frames);
+}
+
+// Adapt the frame rate to be half of the capture rate at the beginning. Expect
+// the number of dropped frames to be half of the number the captured frames.
+TEST_P(VideoAdapterTest, AdaptFramerateRequestHalf) {
+ adapter_.OnSinkWants(BuildSinkWants(
+ absl::nullopt, std::numeric_limits<int>::max(), kDefaultFps / 2));
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no crash and that frames aren't dropped.
+ VideoAdapterWrapper::Stats stats = adapter_wrapper_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(5, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, kWidth, kHeight, kWidth, kHeight);
+}
+
+// Set a very high output pixel resolution. Expect no cropping or resolution
+// change.
+TEST_P(VideoAdapterTest, AdaptFrameResolutionHighLimit) {
+ OnOutputFormatRequest(kWidth * 10, kHeight * 10, kDefaultFps);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(kWidth, cropped_width_);
+ EXPECT_EQ(kHeight, cropped_height_);
+ EXPECT_EQ(kWidth, out_width_);
+ EXPECT_EQ(kHeight, out_height_);
+}
+
+// Adapt the frame resolution to be the same as capture resolution. Expect no
+// cropping or resolution change.
+TEST_P(VideoAdapterTest, AdaptFrameResolutionIdentical) {
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(kWidth, cropped_width_);
+ EXPECT_EQ(kHeight, cropped_height_);
+ EXPECT_EQ(kWidth, out_width_);
+ EXPECT_EQ(kHeight, out_height_);
+}
+
+// Adapt the frame resolution to be a quarter of the capture resolution. Expect
+// no cropping, but a resolution change.
+TEST_P(VideoAdapterTest, AdaptFrameResolutionQuarter) {
+ OnOutputFormatRequest(kWidth / 2, kHeight / 2, kDefaultFps);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(kWidth, cropped_width_);
+ EXPECT_EQ(kHeight, cropped_height_);
+ EXPECT_EQ(kWidth / 2, out_width_);
+ EXPECT_EQ(kHeight / 2, out_height_);
+}
+
+// Adapt the pixel resolution to 0. Expect frame drop.
+TEST_P(VideoAdapterTest, AdaptFrameResolutionDrop) {
+ OnOutputFormatRequest(kWidth * 0, kHeight * 0, kDefaultFps);
+ EXPECT_FALSE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
+}
+
+// Adapt the frame resolution to be a quarter of the capture resolution at the
+// beginning. Expect no cropping but a resolution change.
+TEST_P(VideoAdapterTest, AdaptResolution) {
+ OnOutputFormatRequest(kWidth / 2, kHeight / 2, kDefaultFps);
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no frame drop, no cropping, and resolution change.
+ VideoAdapterWrapper::Stats stats = adapter_wrapper_->GetStats();
+ EXPECT_EQ(0, stats.dropped_frames);
+ VerifyAdaptedResolution(stats, kWidth, kHeight, kWidth / 2, kHeight / 2);
+}
+
+// Adapt the frame resolution to be a quarter of the capture resolution after
+// capturing no less than 10 frames. Expect no resolution change before
+// adaptation and resolution change after adaptation.
+TEST_P(VideoAdapterTest, AdaptResolutionOnTheFly) {
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps);
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no resolution change before adaptation.
+ VerifyAdaptedResolution(adapter_wrapper_->GetStats(), kWidth, kHeight, kWidth,
+ kHeight);
+
+ // Adapt the frame resolution.
+ OnOutputFormatRequest(kWidth / 2, kHeight / 2, kDefaultFps);
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify resolution change after adaptation.
+ VerifyAdaptedResolution(adapter_wrapper_->GetStats(), kWidth, kHeight,
+ kWidth / 2, kHeight / 2);
+}
+
+// Drop all frames for resolution 0x0.
+TEST_P(VideoAdapterTest, DropAllFrames) {
+ OnOutputFormatRequest(kWidth * 0, kHeight * 0, kDefaultFps);
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify all frames are dropped.
+ VideoAdapterWrapper::Stats stats = adapter_wrapper_->GetStats();
+ EXPECT_GE(stats.captured_frames, 10);
+ EXPECT_EQ(stats.captured_frames, stats.dropped_frames);
+}
+
+TEST_P(VideoAdapterTest, TestOnOutputFormatRequest) {
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(400, out_height_);
+
+ // Format request 640x400.
+ OnOutputFormatRequest(640, 400, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(400, out_height_);
+
+ // Request 1280x720, higher than input, but aspect 16:9. Expect cropping but
+ // no scaling.
+ OnOutputFormatRequest(1280, 720, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // Request 0x0.
+ OnOutputFormatRequest(0, 0, absl::nullopt);
+ EXPECT_FALSE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+
+ // Request 320x200. Expect scaling, but no cropping.
+ OnOutputFormatRequest(320, 200, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(320, out_width_);
+ EXPECT_EQ(200, out_height_);
+
+ // Request resolution close to 2/3 scale. Expect adapt down. Scaling to 2/3
+ // is not optimized and not allowed, therefore 1/2 scaling will be used
+ // instead.
+ OnOutputFormatRequest(424, 265, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(320, out_width_);
+ EXPECT_EQ(200, out_height_);
+
+ // Request resolution of 3 / 8. Expect adapt down.
+ OnOutputFormatRequest(640 * 3 / 8, 400 * 3 / 8, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(640 * 3 / 8, out_width_);
+ EXPECT_EQ(400 * 3 / 8, out_height_);
+
+ // Switch back up. Expect adapt.
+ OnOutputFormatRequest(320, 200, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(320, out_width_);
+ EXPECT_EQ(200, out_height_);
+
+ // Format request 480x300.
+ OnOutputFormatRequest(480, 300, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(300, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestViewRequestPlusCameraSwitch) {
+ // Start at HD.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+
+ // Format request for VGA.
+ OnOutputFormatRequest(640, 360, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // Now, the camera reopens at VGA.
+ // Both the frame and the output format should be 640x360.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 360, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // And another view request comes in for 640x360, which should have no
+ // real impact.
+ OnOutputFormatRequest(640, 360, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 360, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestVgaWidth) {
+ // Requested output format is 640x360.
+ OnOutputFormatRequest(640, 360, absl::nullopt);
+
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ // Expect cropping.
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // But if frames come in at 640x360, we shouldn't adapt them down.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 360, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestOnResolutionRequestInSmallSteps) {
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+
+ // Adapt down one step.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 1280 * 720 - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(960, out_width_);
+ EXPECT_EQ(540, out_height_);
+
+ // Adapt down one step more.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 960 * 540 - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // Adapt down one step more.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 640 * 360 - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ // Adapt up one step.
+ adapter_.OnSinkWants(
+ BuildSinkWants(640 * 360, 960 * 540, std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // Adapt up one step more.
+ adapter_.OnSinkWants(
+ BuildSinkWants(960 * 540, 1280 * 720, std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(960, out_width_);
+ EXPECT_EQ(540, out_height_);
+
+ // Adapt up one step more.
+ adapter_.OnSinkWants(
+ BuildSinkWants(1280 * 720, 1920 * 1080, std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestOnResolutionRequestMaxZero) {
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+
+ adapter_.OnSinkWants(
+ BuildSinkWants(absl::nullopt, 0, std::numeric_limits<int>::max()));
+ EXPECT_FALSE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+}
+
+TEST_P(VideoAdapterTest, TestOnResolutionRequestInLargeSteps) {
+ // Large step down.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 640 * 360 - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ // Large step up.
+ adapter_.OnSinkWants(
+ BuildSinkWants(1280 * 720, 1920 * 1080, std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestOnOutputFormatRequestCapsMaxResolution) {
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 640 * 360 - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ OnOutputFormatRequest(640, 360, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 960 * 720,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestOnResolutionRequestReset) {
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 640 * 360 - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt,
+ std::numeric_limits<int>::max(),
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestOnOutputFormatRequestResolutionReset) {
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+
+ adapter_.OnOutputFormatRequest(absl::nullopt, 640 * 360 - 1, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ adapter_.OnOutputFormatRequest(absl::nullopt, absl::nullopt, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(1280, cropped_width_);
+ EXPECT_EQ(720, cropped_height_);
+ EXPECT_EQ(1280, out_width_);
+ EXPECT_EQ(720, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestOnOutputFormatRequestFpsReset) {
+ OnOutputFormatRequest(kWidth, kHeight, kDefaultFps / 2);
+ for (int i = 0; i < 10; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify frame drop.
+ const int dropped_frames = adapter_wrapper_->GetStats().dropped_frames;
+ EXPECT_GT(dropped_frames, 0);
+
+ // Reset frame rate.
+ OnOutputFormatRequest(kWidth, kHeight, absl::nullopt);
+ for (int i = 0; i < 20; ++i)
+ adapter_wrapper_->AdaptFrame(frame_source_->GetFrame());
+
+ // Verify no frame drop after reset.
+ EXPECT_EQ(dropped_frames, adapter_wrapper_->GetStats().dropped_frames);
+}
+
+TEST_P(VideoAdapterTest, RequestAspectRatio) {
+ // Request aspect ratio 320/180 (16:9), smaller than input, but no resolution
+ // limit. Expect cropping but no scaling.
+ adapter_.OnOutputFormatRequest(std::make_pair(320, 180), absl::nullopt,
+ absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ adapter_.OnOutputFormatRequest(std::make_pair(1280, 720), 1280 * 720 - 1,
+ absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(2592, 1944, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(2592, cropped_width_);
+ EXPECT_EQ(1458, cropped_height_);
+ EXPECT_EQ(1152, out_width_);
+ EXPECT_EQ(648, out_height_);
+}
+
+TEST_P(VideoAdapterTest, RequestAspectRatioWithDifferentOrientation) {
+ // Request 720x1280, higher than input, but aspect 16:9. Orientation should
+ // not matter, expect cropping but no scaling.
+ OnOutputFormatRequest(720, 1280, absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+}
+
+TEST_P(VideoAdapterTest, InvalidAspectRatioIgnored) {
+ // Request aspect ratio 320/0. Expect no cropping.
+ adapter_.OnOutputFormatRequest(std::make_pair(320, 0), absl::nullopt,
+ absl::nullopt);
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 400, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(400, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(400, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestCroppingWithResolutionRequest) {
+ // Ask for 640x360 (16:9 aspect).
+ OnOutputFormatRequest(640, 360, absl::nullopt);
+ // Send 640x480 (4:3 aspect).
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ // Expect cropping to 16:9 format and no scaling.
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // Adapt down one step.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 640 * 360 - 1,
+ std::numeric_limits<int>::max()));
+ // Expect cropping to 16:9 format and 3/4 scaling.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ // Adapt down one step more.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 480 * 270 - 1,
+ std::numeric_limits<int>::max()));
+ // Expect cropping to 16:9 format and 1/2 scaling.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(320, out_width_);
+ EXPECT_EQ(180, out_height_);
+
+ // Adapt up one step.
+ adapter_.OnSinkWants(
+ BuildSinkWants(480 * 270, 640 * 360, std::numeric_limits<int>::max()));
+ // Expect cropping to 16:9 format and 3/4 scaling.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(480, out_width_);
+ EXPECT_EQ(270, out_height_);
+
+ // Adapt up one step more.
+ adapter_.OnSinkWants(
+ BuildSinkWants(640 * 360, 960 * 540, std::numeric_limits<int>::max()));
+ // Expect cropping to 16:9 format and no scaling.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+
+ // Try to adapt up one step more.
+ adapter_.OnSinkWants(
+ BuildSinkWants(960 * 540, 1280 * 720, std::numeric_limits<int>::max()));
+ // Expect cropping to 16:9 format and no scaling.
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(360, cropped_height_);
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestCroppingOddResolution) {
+ // Ask for 640x360 (16:9 aspect), with 3/16 scaling.
+ OnOutputFormatRequest(640, 360, absl::nullopt);
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt,
+ 640 * 360 * 3 / 16 * 3 / 16,
+ std::numeric_limits<int>::max()));
+
+ // Send 640x480 (4:3 aspect).
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+
+ // Instead of getting the exact aspect ratio with cropped resolution 640x360,
+ // the resolution should be adjusted to get a perfect scale factor instead.
+ EXPECT_EQ(640, cropped_width_);
+ EXPECT_EQ(368, cropped_height_);
+ EXPECT_EQ(120, out_width_);
+ EXPECT_EQ(69, out_height_);
+}
+
+TEST_P(VideoAdapterTest, TestAdaptToVerySmallResolution) {
+ // Ask for 1920x1080 (16:9 aspect), with 1/16 scaling.
+ const int w = 1920;
+ const int h = 1080;
+ OnOutputFormatRequest(w, h, absl::nullopt);
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, w * h * 1 / 16 * 1 / 16,
+ std::numeric_limits<int>::max()));
+
+ // Send 1920x1080 (16:9 aspect).
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(
+ w, h, 0, &cropped_width_, &cropped_height_, &out_width_, &out_height_));
+
+ // Instead of getting the exact aspect ratio with cropped resolution 1920x1080
+ // the resolution should be adjusted to get a perfect scale factor instead.
+ EXPECT_EQ(1920, cropped_width_);
+ EXPECT_EQ(1072, cropped_height_);
+ EXPECT_EQ(120, out_width_);
+ EXPECT_EQ(67, out_height_);
+
+ // Adapt back up one step to 3/32.
+ adapter_.OnSinkWants(BuildSinkWants(w * h * 3 / 32 * 3 / 32,
+ w * h * 1 / 8 * 1 / 8,
+ std::numeric_limits<int>::max()));
+
+ // Send 1920x1080 (16:9 aspect).
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(
+ w, h, 0, &cropped_width_, &cropped_height_, &out_width_, &out_height_));
+
+ EXPECT_EQ(160, out_width_);
+ EXPECT_EQ(90, out_height_);
+}
+
+TEST_P(VideoAdapterTest, AdaptFrameResolutionDropWithResolutionRequest) {
+ OnOutputFormatRequest(0, 0, kDefaultFps);
+ EXPECT_FALSE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
+
+ adapter_.OnSinkWants(BuildSinkWants(960 * 540,
+ std::numeric_limits<int>::max(),
+ std::numeric_limits<int>::max()));
+
+ // Still expect all frames to be dropped
+ EXPECT_FALSE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
+
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt, 640 * 480 - 1,
+ std::numeric_limits<int>::max()));
+
+ // Still expect all frames to be dropped
+ EXPECT_FALSE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
+}
+
+// Test that we will adapt to max given a target pixel count close to max.
+TEST_P(VideoAdapterTest, TestAdaptToMax) {
+ OnOutputFormatRequest(640, 360, kDefaultFps);
+ adapter_.OnSinkWants(BuildSinkWants(640 * 360 - 1 /* target */,
+ std::numeric_limits<int>::max(),
+ std::numeric_limits<int>::max()));
+
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 360, 0, &cropped_width_,
+ &cropped_height_, &out_width_,
+ &out_height_));
+ EXPECT_EQ(640, out_width_);
+ EXPECT_EQ(360, out_height_);
+}
+
+// Test adjusting to 16:9 in landscape, and 9:16 in portrait.
+TEST(VideoAdapterTestMultipleOrientation, TestNormal) {
+ VideoAdapter video_adapter;
+ video_adapter.OnOutputFormatRequest(std::make_pair(640, 360), 640 * 360,
+ std::make_pair(360, 640), 360 * 640, 30);
+
+ int cropped_width;
+ int cropped_height;
+ int out_width;
+ int out_height;
+ EXPECT_TRUE(video_adapter.AdaptFrameResolution(
+ /* in_width= */ 640, /* in_height= */ 480, /* in_timestamp_ns= */ 0,
+ &cropped_width, &cropped_height, &out_width, &out_height));
+ EXPECT_EQ(640, cropped_width);
+ EXPECT_EQ(360, cropped_height);
+ EXPECT_EQ(640, out_width);
+ EXPECT_EQ(360, out_height);
+
+ EXPECT_TRUE(video_adapter.AdaptFrameResolution(
+ /* in_width= */ 480, /* in_height= */ 640,
+ /* in_timestamp_ns= */ rtc::kNumNanosecsPerSec / 30, &cropped_width,
+ &cropped_height, &out_width, &out_height));
+ EXPECT_EQ(360, cropped_width);
+ EXPECT_EQ(640, cropped_height);
+ EXPECT_EQ(360, out_width);
+ EXPECT_EQ(640, out_height);
+}
+
+// Force output to be 9:16, even for landscape input.
+TEST(VideoAdapterTestMultipleOrientation, TestForcePortrait) {
+ VideoAdapter video_adapter;
+ video_adapter.OnOutputFormatRequest(std::make_pair(360, 640), 640 * 360,
+ std::make_pair(360, 640), 360 * 640, 30);
+
+ int cropped_width;
+ int cropped_height;
+ int out_width;
+ int out_height;
+ EXPECT_TRUE(video_adapter.AdaptFrameResolution(
+ /* in_width= */ 640, /* in_height= */ 480, /* in_timestamp_ns= */ 0,
+ &cropped_width, &cropped_height, &out_width, &out_height));
+ EXPECT_EQ(270, cropped_width);
+ EXPECT_EQ(480, cropped_height);
+ EXPECT_EQ(270, out_width);
+ EXPECT_EQ(480, out_height);
+
+ EXPECT_TRUE(video_adapter.AdaptFrameResolution(
+ /* in_width= */ 480, /* in_height= */ 640,
+ /* in_timestamp_ns= */ rtc::kNumNanosecsPerSec / 30, &cropped_width,
+ &cropped_height, &out_width, &out_height));
+ EXPECT_EQ(360, cropped_width);
+ EXPECT_EQ(640, cropped_height);
+ EXPECT_EQ(360, out_width);
+ EXPECT_EQ(640, out_height);
+}
+
+TEST_P(VideoAdapterTest, AdaptResolutionInStepsFirst3_4) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ OnOutputFormatRequest(kWidth, kHeight, absl::nullopt); // 16:9 aspect.
+
+ // Scale factors: 3/4, 2/3, 3/4, 2/3, ...
+ // Scale : 3/4, 1/2, 3/8, 1/4, 3/16, 1/8.
+ const int kExpectedWidths[] = {960, 640, 480, 320, 240, 160};
+ const int kExpectedHeights[] = {540, 360, 270, 180, 135, 90};
+
+ int request_width = kWidth;
+ int request_height = kHeight;
+
+ for (size_t i = 0; i < arraysize(kExpectedWidths); ++i) {
+ // Adapt down one step.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt,
+ request_width * request_height - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
+ EXPECT_EQ(kExpectedWidths[i], out_width_);
+ EXPECT_EQ(kExpectedHeights[i], out_height_);
+ request_width = out_width_;
+ request_height = out_height_;
+ }
+}
+
+TEST_P(VideoAdapterTest, AdaptResolutionInStepsFirst2_3) {
+ const int kWidth = 1920;
+ const int kHeight = 1080;
+ OnOutputFormatRequest(kWidth, kHeight, absl::nullopt); // 16:9 aspect.
+
+ // Scale factors: 2/3, 3/4, 2/3, 3/4, ...
+ // Scale: 2/3, 1/2, 1/3, 1/4, 1/6, 1/8, 1/12.
+ const int kExpectedWidths[] = {1280, 960, 640, 480, 320, 240, 160};
+ const int kExpectedHeights[] = {720, 540, 360, 270, 180, 135, 90};
+
+ int request_width = kWidth;
+ int request_height = kHeight;
+
+ for (size_t i = 0; i < arraysize(kExpectedWidths); ++i) {
+ // Adapt down one step.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt,
+ request_width * request_height - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
+ EXPECT_EQ(kExpectedWidths[i], out_width_);
+ EXPECT_EQ(kExpectedHeights[i], out_height_);
+ request_width = out_width_;
+ request_height = out_height_;
+ }
+}
+
+TEST_P(VideoAdapterTest, AdaptResolutionInStepsFirst2x2_3) {
+ const int kWidth = 1440;
+ const int kHeight = 1080;
+ OnOutputFormatRequest(kWidth, kHeight, absl::nullopt); // 4:3 aspect.
+
+ // Scale factors: 2/3, 2/3, 3/4, 2/3, 3/4, ...
+ // Scale : 2/3, 4/9, 1/3, 2/9, 1/6, 1/9, 1/12, 1/18, 1/24, 1/36.
+ const int kExpectedWidths[] = {960, 640, 480, 320, 240, 160, 120, 80, 60, 40};
+ const int kExpectedHeights[] = {720, 480, 360, 240, 180, 120, 90, 60, 45, 30};
+
+ int request_width = kWidth;
+ int request_height = kHeight;
+
+ for (size_t i = 0; i < arraysize(kExpectedWidths); ++i) {
+ // Adapt down one step.
+ adapter_.OnSinkWants(BuildSinkWants(absl::nullopt,
+ request_width * request_height - 1,
+ std::numeric_limits<int>::max()));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
+ EXPECT_EQ(kExpectedWidths[i], out_width_);
+ EXPECT_EQ(kExpectedHeights[i], out_height_);
+ request_width = out_width_;
+ request_height = out_height_;
+ }
+}
+
+TEST_P(VideoAdapterTest, AdaptResolutionWithSinkAlignment) {
+ constexpr int kSourceWidth = 1280;
+ constexpr int kSourceHeight = 720;
+ constexpr int kSourceFramerate = 30;
+ constexpr int kRequestedWidth = 480;
+ constexpr int kRequestedHeight = 270;
+ constexpr int kRequestedFramerate = 30;
+
+ OnOutputFormatRequest(kRequestedWidth, kRequestedHeight, kRequestedFramerate);
+
+ int frame_num = 1;
+ for (const int sink_alignment : {2, 3, 4, 5}) {
+ adapter_.OnSinkWants(
+ BuildSinkWants(absl::nullopt, std::numeric_limits<int>::max(),
+ std::numeric_limits<int>::max(), sink_alignment));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(
+ kSourceWidth, kSourceHeight,
+ frame_num * rtc::kNumNanosecsPerSec / kSourceFramerate, &cropped_width_,
+ &cropped_height_, &out_width_, &out_height_));
+ EXPECT_EQ(out_width_ % sink_alignment, 0);
+ EXPECT_EQ(out_height_ % sink_alignment, 0);
+
+ ++frame_num;
+ }
+}
+
+// Verify the cases the OnOutputFormatRequest is ignored and
+// requested_resolution is used instead.
+TEST_P(VideoAdapterTest, UseRequestedResolutionInsteadOfOnOutputFormatRequest) {
+ {
+ // Both new and old API active => Use OnOutputFormatRequest
+ OnOutputFormatRequest(640, 360, kDefaultFps);
+ adapter_.OnSinkWants(
+ BuildSinkWants(Resolution{.width = 960, .height = 540},
+ /* any_active_without_requested_resolution= */ true));
+
+ EXPECT_THAT(
+ AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
+ .first,
+ Eq(Resolution{.width = 640, .height = 360}));
+ }
+ {
+ // New API active, old API inactive, ignore OnOutputFormatRequest and use
+ // requested_resolution.
+ OnOutputFormatRequest(640, 360, kDefaultFps);
+ adapter_.OnSinkWants(
+ BuildSinkWants(Resolution{.width = 960, .height = 540},
+ /* any_active_without_requested_resolution= */ false));
+
+ EXPECT_THAT(
+ AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
+ .first,
+ Eq(Resolution{.width = 960, .height = 540}));
+ }
+
+ {
+ // New API inactive, old API inactive, use OnOutputFormatRequest.
+ OnOutputFormatRequest(640, 360, kDefaultFps);
+ adapter_.OnSinkWants(
+ BuildSinkWants(absl::nullopt,
+ /* any_active_without_requested_resolution= */ false));
+
+ EXPECT_THAT(
+ AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
+ .first,
+ Eq(Resolution{.width = 640, .height = 360}));
+ }
+
+ {
+ // New API active, old API inactive, remember OnOutputFormatRequest.
+ OnOutputFormatRequest(640, 360, kDefaultFps);
+ adapter_.OnSinkWants(
+ BuildSinkWants(Resolution{.width = 960, .height = 540},
+ /* any_active_without_requested_resolution= */ false));
+
+ EXPECT_THAT(
+ AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
+ .first,
+ Eq(Resolution{.width = 960, .height = 540}));
+
+ // This is ignored since there is not any active NOT using
+ // requested_resolution.
+ OnOutputFormatRequest(320, 180, kDefaultFps);
+
+ EXPECT_THAT(
+ AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
+ .first,
+ Eq(Resolution{.width = 960, .height = 540}));
+
+ // Disable new API => fallback to last OnOutputFormatRequest.
+ adapter_.OnSinkWants(
+ BuildSinkWants(absl::nullopt,
+ /* any_active_without_requested_resolution= */ false));
+
+ EXPECT_THAT(
+ AdaptFrameResolution(/* input frame */ {.width = 1280, .height = 720})
+ .first,
+ Eq(Resolution{.width = 320, .height = 180}));
+ }
+}
+
+class VideoAdapterWithSourceAlignmentTest : public VideoAdapterTest {
+ protected:
+ static constexpr int kSourceResolutionAlignment = 7;
+
+ VideoAdapterWithSourceAlignmentTest()
+ : VideoAdapterTest(/*field_trials=*/"", kSourceResolutionAlignment) {}
+};
+
+TEST_P(VideoAdapterWithSourceAlignmentTest, AdaptResolution) {
+ constexpr int kSourceWidth = 1280;
+ constexpr int kSourceHeight = 720;
+ constexpr int kRequestedWidth = 480;
+ constexpr int kRequestedHeight = 270;
+ constexpr int kRequestedFramerate = 30;
+
+ OnOutputFormatRequest(kRequestedWidth, kRequestedHeight, kRequestedFramerate);
+
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(
+ kSourceWidth, kSourceHeight, /*in_timestamp_ns=*/0, &cropped_width_,
+ &cropped_height_, &out_width_, &out_height_));
+ EXPECT_EQ(out_width_ % kSourceResolutionAlignment, 0);
+ EXPECT_EQ(out_height_ % kSourceResolutionAlignment, 0);
+}
+
+TEST_P(VideoAdapterWithSourceAlignmentTest, AdaptResolutionWithSinkAlignment) {
+ constexpr int kSourceWidth = 1280;
+ constexpr int kSourceHeight = 720;
+ // 7 and 8 neither divide 480 nor 270.
+ constexpr int kRequestedWidth = 480;
+ constexpr int kRequestedHeight = 270;
+ constexpr int kRequestedFramerate = 30;
+ constexpr int kSinkResolutionAlignment = 8;
+
+ OnOutputFormatRequest(kRequestedWidth, kRequestedHeight, kRequestedFramerate);
+
+ adapter_.OnSinkWants(BuildSinkWants(
+ absl::nullopt, std::numeric_limits<int>::max(),
+ std::numeric_limits<int>::max(), kSinkResolutionAlignment));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(
+ kSourceWidth, kSourceHeight, /*in_timestamp_ns=*/0, &cropped_width_,
+ &cropped_height_, &out_width_, &out_height_));
+ EXPECT_EQ(out_width_ % kSourceResolutionAlignment, 0);
+ EXPECT_EQ(out_height_ % kSourceResolutionAlignment, 0);
+ EXPECT_EQ(out_width_ % kSinkResolutionAlignment, 0);
+ EXPECT_EQ(out_height_ % kSinkResolutionAlignment, 0);
+}
+
+INSTANTIATE_TEST_SUITE_P(OnOutputFormatRequests,
+ VideoAdapterWithSourceAlignmentTest,
+ ::testing::Values(true, false));
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/video_broadcaster.cc b/third_party/libwebrtc/media/base/video_broadcaster.cc
new file mode 100644
index 0000000000..43c17734e3
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_broadcaster.cc
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/video_broadcaster.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_rotation.h"
+#include "media/base/video_common.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+VideoBroadcaster::VideoBroadcaster() = default;
+VideoBroadcaster::~VideoBroadcaster() = default;
+
+void VideoBroadcaster::AddOrUpdateSink(
+ VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const VideoSinkWants& wants) {
+ RTC_DCHECK(sink != nullptr);
+ webrtc::MutexLock lock(&sinks_and_wants_lock_);
+ if (!FindSinkPair(sink)) {
+ // `Sink` is a new sink, which didn't receive previous frame.
+ previous_frame_sent_to_all_sinks_ = false;
+
+ if (last_constraints_.has_value()) {
+ RTC_LOG(LS_INFO) << __func__ << " forwarding stored constraints min_fps "
+ << last_constraints_->min_fps.value_or(-1) << " max_fps "
+ << last_constraints_->max_fps.value_or(-1);
+ sink->OnConstraintsChanged(*last_constraints_);
+ }
+ }
+ VideoSourceBase::AddOrUpdateSink(sink, wants);
+ UpdateWants();
+}
+
+void VideoBroadcaster::RemoveSink(
+ VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ RTC_DCHECK(sink != nullptr);
+ webrtc::MutexLock lock(&sinks_and_wants_lock_);
+ VideoSourceBase::RemoveSink(sink);
+ UpdateWants();
+}
+
+bool VideoBroadcaster::frame_wanted() const {
+ webrtc::MutexLock lock(&sinks_and_wants_lock_);
+ return !sink_pairs().empty();
+}
+
+VideoSinkWants VideoBroadcaster::wants() const {
+ webrtc::MutexLock lock(&sinks_and_wants_lock_);
+ return current_wants_;
+}
+
+void VideoBroadcaster::OnFrame(const webrtc::VideoFrame& frame) {
+ webrtc::MutexLock lock(&sinks_and_wants_lock_);
+ bool current_frame_was_discarded = false;
+ for (auto& sink_pair : sink_pairs()) {
+ if (sink_pair.wants.rotation_applied &&
+ frame.rotation() != webrtc::kVideoRotation_0) {
+ // Calls to OnFrame are not synchronized with changes to the sink wants.
+ // When rotation_applied is set to true, one or a few frames may get here
+ // with rotation still pending. Protect sinks that don't expect any
+ // pending rotation.
+ RTC_LOG(LS_VERBOSE) << "Discarding frame with unexpected rotation.";
+ sink_pair.sink->OnDiscardedFrame();
+ current_frame_was_discarded = true;
+ continue;
+ }
+ if (sink_pair.wants.black_frames) {
+ webrtc::VideoFrame black_frame =
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(
+ GetBlackFrameBuffer(frame.width(), frame.height()))
+ .set_rotation(frame.rotation())
+ .set_timestamp_us(frame.timestamp_us())
+ .set_id(frame.id())
+ .build();
+ sink_pair.sink->OnFrame(black_frame);
+ } else if (!previous_frame_sent_to_all_sinks_ && frame.has_update_rect()) {
+ // Since last frame was not sent to some sinks, no reliable update
+ // information is available, so we need to clear the update rect.
+ webrtc::VideoFrame copy = frame;
+ copy.clear_update_rect();
+ sink_pair.sink->OnFrame(copy);
+ } else {
+ sink_pair.sink->OnFrame(frame);
+ }
+ }
+ previous_frame_sent_to_all_sinks_ = !current_frame_was_discarded;
+}
+
+void VideoBroadcaster::OnDiscardedFrame() {
+ webrtc::MutexLock lock(&sinks_and_wants_lock_);
+ for (auto& sink_pair : sink_pairs()) {
+ sink_pair.sink->OnDiscardedFrame();
+ }
+}
+
+void VideoBroadcaster::ProcessConstraints(
+ const webrtc::VideoTrackSourceConstraints& constraints) {
+ webrtc::MutexLock lock(&sinks_and_wants_lock_);
+ RTC_LOG(LS_INFO) << __func__ << " min_fps "
+ << constraints.min_fps.value_or(-1) << " max_fps "
+ << constraints.max_fps.value_or(-1) << " broadcasting to "
+ << sink_pairs().size() << " sinks.";
+ last_constraints_ = constraints;
+ for (auto& sink_pair : sink_pairs())
+ sink_pair.sink->OnConstraintsChanged(constraints);
+}
+
+void VideoBroadcaster::UpdateWants() {
+ VideoSinkWants wants;
+ wants.rotation_applied = false;
+ wants.resolution_alignment = 1;
+ wants.aggregates.emplace(VideoSinkWants::Aggregates());
+ wants.is_active = false;
+
+ // TODO(webrtc:14451) : I think it makes sense to always
+ // "ignore" encoders that are not active. But that would
+ // probably require a controlled roll out with a field trials?
+ // To play it safe, only ignore inactive encoders is there is an
+ // active encoder using the new api (requested_resolution),
+ // this means that there is only a behavioural change when using new
+ // api.
+ bool ignore_inactive_encoders_old_api = false;
+ for (auto& sink : sink_pairs()) {
+ if (sink.wants.is_active && sink.wants.requested_resolution.has_value()) {
+ ignore_inactive_encoders_old_api = true;
+ break;
+ }
+ }
+
+ for (auto& sink : sink_pairs()) {
+ if (!sink.wants.is_active &&
+ (sink.wants.requested_resolution || ignore_inactive_encoders_old_api)) {
+ continue;
+ }
+ // wants.rotation_applied == ANY(sink.wants.rotation_applied)
+ if (sink.wants.rotation_applied) {
+ wants.rotation_applied = true;
+ }
+ // wants.max_pixel_count == MIN(sink.wants.max_pixel_count)
+ if (sink.wants.max_pixel_count < wants.max_pixel_count) {
+ wants.max_pixel_count = sink.wants.max_pixel_count;
+ }
+ // Select the minimum requested target_pixel_count, if any, of all sinks so
+ // that we don't over utilize the resources for any one.
+ // TODO(sprang): Consider using the median instead, since the limit can be
+ // expressed by max_pixel_count.
+ if (sink.wants.target_pixel_count &&
+ (!wants.target_pixel_count ||
+ (*sink.wants.target_pixel_count < *wants.target_pixel_count))) {
+ wants.target_pixel_count = sink.wants.target_pixel_count;
+ }
+ // Select the minimum for the requested max framerates.
+ if (sink.wants.max_framerate_fps < wants.max_framerate_fps) {
+ wants.max_framerate_fps = sink.wants.max_framerate_fps;
+ }
+ wants.resolution_alignment = cricket::LeastCommonMultiple(
+ wants.resolution_alignment, sink.wants.resolution_alignment);
+
+ // Pick MAX(requested_resolution) since the actual can be downscaled
+ // in encoder instead.
+ if (sink.wants.requested_resolution) {
+ if (!wants.requested_resolution) {
+ wants.requested_resolution = sink.wants.requested_resolution;
+ } else {
+ wants.requested_resolution->width =
+ std::max(wants.requested_resolution->width,
+ sink.wants.requested_resolution->width);
+ wants.requested_resolution->height =
+ std::max(wants.requested_resolution->height,
+ sink.wants.requested_resolution->height);
+ }
+ } else if (sink.wants.is_active) {
+ wants.aggregates->any_active_without_requested_resolution = true;
+ }
+
+ wants.is_active |= sink.wants.is_active;
+ }
+
+ if (wants.target_pixel_count &&
+ *wants.target_pixel_count >= wants.max_pixel_count) {
+ wants.target_pixel_count.emplace(wants.max_pixel_count);
+ }
+ current_wants_ = wants;
+}
+
+const rtc::scoped_refptr<webrtc::VideoFrameBuffer>&
+VideoBroadcaster::GetBlackFrameBuffer(int width, int height) {
+ if (!black_frame_buffer_ || black_frame_buffer_->width() != width ||
+ black_frame_buffer_->height() != height) {
+ rtc::scoped_refptr<webrtc::I420Buffer> buffer =
+ webrtc::I420Buffer::Create(width, height);
+ webrtc::I420Buffer::SetBlack(buffer.get());
+ black_frame_buffer_ = buffer;
+ }
+
+ return black_frame_buffer_;
+}
+
+} // namespace rtc
diff --git a/third_party/libwebrtc/media/base/video_broadcaster.h b/third_party/libwebrtc/media/base/video_broadcaster.h
new file mode 100644
index 0000000000..c253d44b09
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_broadcaster.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_VIDEO_BROADCASTER_H_
+#define MEDIA_BASE_VIDEO_BROADCASTER_H_
+
+#include "api/media_stream_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_source_interface.h"
+#include "media/base/video_source_base.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace rtc {
+
+// VideoBroadcaster broadcast video frames to sinks and combines VideoSinkWants
+// from its sinks. It does that by implementing rtc::VideoSourceInterface and
+// rtc::VideoSinkInterface. The class is threadsafe; methods may be called on
+// any thread. This is needed because VideoStreamEncoder calls AddOrUpdateSink
+// both on the worker thread and on the encoder task queue.
+class VideoBroadcaster : public VideoSourceBase,
+ public VideoSinkInterface<webrtc::VideoFrame> {
+ public:
+ VideoBroadcaster();
+ ~VideoBroadcaster() override;
+
+ // Adds a new, or updates an already existing sink. If the sink is new and
+ // ProcessConstraints has been called previously, the new sink's
+ // OnConstraintsCalled method will be invoked with the most recent
+ // constraints.
+ void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const VideoSinkWants& wants) override;
+ void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+
+ // Returns true if the next frame will be delivered to at least one sink.
+ bool frame_wanted() const;
+
+ // Returns VideoSinkWants a source is requested to fulfill. They are
+ // aggregated by all VideoSinkWants from all sinks.
+ VideoSinkWants wants() const;
+
+ // This method ensures that if a sink sets rotation_applied == true,
+ // it will never receive a frame with pending rotation. Our caller
+ // may pass in frames without precise synchronization with changes
+ // to the VideoSinkWants.
+ void OnFrame(const webrtc::VideoFrame& frame) override;
+
+ void OnDiscardedFrame() override;
+
+ // Called on the network thread when constraints change. Forwards the
+ // constraints to sinks added with AddOrUpdateSink via OnConstraintsChanged.
+ void ProcessConstraints(
+ const webrtc::VideoTrackSourceConstraints& constraints);
+
+ protected:
+ void UpdateWants() RTC_EXCLUSIVE_LOCKS_REQUIRED(sinks_and_wants_lock_);
+ const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& GetBlackFrameBuffer(
+ int width,
+ int height) RTC_EXCLUSIVE_LOCKS_REQUIRED(sinks_and_wants_lock_);
+
+ mutable webrtc::Mutex sinks_and_wants_lock_;
+
+ VideoSinkWants current_wants_ RTC_GUARDED_BY(sinks_and_wants_lock_);
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> black_frame_buffer_;
+ bool previous_frame_sent_to_all_sinks_ RTC_GUARDED_BY(sinks_and_wants_lock_) =
+ true;
+ absl::optional<webrtc::VideoTrackSourceConstraints> last_constraints_
+ RTC_GUARDED_BY(sinks_and_wants_lock_);
+};
+
+} // namespace rtc
+
+#endif // MEDIA_BASE_VIDEO_BROADCASTER_H_
diff --git a/third_party/libwebrtc/media/base/video_broadcaster_unittest.cc b/third_party/libwebrtc/media/base/video_broadcaster_unittest.cc
new file mode 100644
index 0000000000..bb80c11930
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_broadcaster_unittest.cc
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/video_broadcaster.h"
+
+#include <limits>
+
+#include "absl/types/optional.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_rotation.h"
+#include "api/video/video_source_interface.h"
+#include "media/base/fake_video_renderer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using cricket::FakeVideoRenderer;
+using rtc::VideoBroadcaster;
+using rtc::VideoSinkWants;
+using FrameSize = rtc::VideoSinkWants::FrameSize;
+
+using ::testing::AllOf;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Mock;
+using ::testing::Optional;
+
+class MockSink : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
+ public:
+ void OnFrame(const webrtc::VideoFrame&) override {}
+
+ MOCK_METHOD(void,
+ OnConstraintsChanged,
+ (const webrtc::VideoTrackSourceConstraints& constraints),
+ (override));
+};
+
+TEST(VideoBroadcasterTest, frame_wanted) {
+ VideoBroadcaster broadcaster;
+ EXPECT_FALSE(broadcaster.frame_wanted());
+
+ FakeVideoRenderer sink;
+ broadcaster.AddOrUpdateSink(&sink, rtc::VideoSinkWants());
+ EXPECT_TRUE(broadcaster.frame_wanted());
+
+ broadcaster.RemoveSink(&sink);
+ EXPECT_FALSE(broadcaster.frame_wanted());
+}
+
+TEST(VideoBroadcasterTest, OnFrame) {
+ VideoBroadcaster broadcaster;
+
+ FakeVideoRenderer sink1;
+ FakeVideoRenderer sink2;
+ broadcaster.AddOrUpdateSink(&sink1, rtc::VideoSinkWants());
+ broadcaster.AddOrUpdateSink(&sink2, rtc::VideoSinkWants());
+ static int kWidth = 100;
+ static int kHeight = 50;
+
+ rtc::scoped_refptr<webrtc::I420Buffer> buffer(
+ webrtc::I420Buffer::Create(kWidth, kHeight));
+ // Initialize, to avoid warnings on use of initialized values.
+ webrtc::I420Buffer::SetBlack(buffer.get());
+
+ webrtc::VideoFrame frame = webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(0)
+ .build();
+
+ broadcaster.OnFrame(frame);
+ EXPECT_EQ(1, sink1.num_rendered_frames());
+ EXPECT_EQ(1, sink2.num_rendered_frames());
+
+ broadcaster.RemoveSink(&sink1);
+ broadcaster.OnFrame(frame);
+ EXPECT_EQ(1, sink1.num_rendered_frames());
+ EXPECT_EQ(2, sink2.num_rendered_frames());
+
+ broadcaster.AddOrUpdateSink(&sink1, rtc::VideoSinkWants());
+ broadcaster.OnFrame(frame);
+ EXPECT_EQ(2, sink1.num_rendered_frames());
+ EXPECT_EQ(3, sink2.num_rendered_frames());
+}
+
+TEST(VideoBroadcasterTest, AppliesRotationIfAnySinkWantsRotationApplied) {
+ VideoBroadcaster broadcaster;
+ EXPECT_FALSE(broadcaster.wants().rotation_applied);
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.rotation_applied = false;
+
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_FALSE(broadcaster.wants().rotation_applied);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.rotation_applied = true;
+
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_TRUE(broadcaster.wants().rotation_applied);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_FALSE(broadcaster.wants().rotation_applied);
+}
+
+TEST(VideoBroadcasterTest, AppliesMinOfSinkWantsMaxPixelCount) {
+ VideoBroadcaster broadcaster;
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ broadcaster.wants().max_pixel_count);
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.max_pixel_count = 1280 * 720;
+
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(1280 * 720, broadcaster.wants().max_pixel_count);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.max_pixel_count = 640 * 360;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(640 * 360, broadcaster.wants().max_pixel_count);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(1280 * 720, broadcaster.wants().max_pixel_count);
+}
+
+TEST(VideoBroadcasterTest, AppliesMinOfSinkWantsMaxAndTargetPixelCount) {
+ VideoBroadcaster broadcaster;
+ EXPECT_TRUE(!broadcaster.wants().target_pixel_count);
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.target_pixel_count = 1280 * 720;
+
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(1280 * 720, *broadcaster.wants().target_pixel_count);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.target_pixel_count = 640 * 360;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(640 * 360, *broadcaster.wants().target_pixel_count);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(1280 * 720, *broadcaster.wants().target_pixel_count);
+}
+
+TEST(VideoBroadcasterTest, AppliesMinOfSinkWantsMaxFramerate) {
+ VideoBroadcaster broadcaster;
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ broadcaster.wants().max_framerate_fps);
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.max_framerate_fps = 30;
+
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(30, broadcaster.wants().max_framerate_fps);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.max_framerate_fps = 15;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(15, broadcaster.wants().max_framerate_fps);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(30, broadcaster.wants().max_framerate_fps);
+}
+
+TEST(VideoBroadcasterTest,
+ AppliesLeastCommonMultipleOfSinkWantsResolutionAlignment) {
+ VideoBroadcaster broadcaster;
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 1);
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.resolution_alignment = 2;
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 2);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.resolution_alignment = 3;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 6);
+
+ FakeVideoRenderer sink3;
+ VideoSinkWants wants3;
+ wants3.resolution_alignment = 4;
+ broadcaster.AddOrUpdateSink(&sink3, wants3);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 12);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 4);
+}
+
+TEST(VideoBroadcasterTest, SinkWantsBlackFrames) {
+ VideoBroadcaster broadcaster;
+ EXPECT_TRUE(!broadcaster.wants().black_frames);
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.black_frames = true;
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.black_frames = false;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+
+ rtc::scoped_refptr<webrtc::I420Buffer> buffer(
+ webrtc::I420Buffer::Create(100, 200));
+ // Makes it not all black.
+ buffer->InitializeData();
+
+ webrtc::VideoFrame frame1 = webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(10)
+ .build();
+ broadcaster.OnFrame(frame1);
+ EXPECT_TRUE(sink1.black_frame());
+ EXPECT_EQ(10, sink1.timestamp_us());
+ EXPECT_FALSE(sink2.black_frame());
+ EXPECT_EQ(10, sink2.timestamp_us());
+
+ // Switch the sink wants.
+ wants1.black_frames = false;
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ wants2.black_frames = true;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+
+ webrtc::VideoFrame frame2 = webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_us(30)
+ .build();
+ broadcaster.OnFrame(frame2);
+ EXPECT_FALSE(sink1.black_frame());
+ EXPECT_EQ(30, sink1.timestamp_us());
+ EXPECT_TRUE(sink2.black_frame());
+ EXPECT_EQ(30, sink2.timestamp_us());
+}
+
+TEST(VideoBroadcasterTest, ConstraintsChangedNotCalledOnSinkAddition) {
+ MockSink sink;
+ VideoBroadcaster broadcaster;
+ EXPECT_CALL(sink, OnConstraintsChanged).Times(0);
+ broadcaster.AddOrUpdateSink(&sink, VideoSinkWants());
+}
+
+TEST(VideoBroadcasterTest, ForwardsLastConstraintsOnAdd) {
+ MockSink sink;
+ VideoBroadcaster broadcaster;
+ broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{2, 3});
+ broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{1, 4});
+ EXPECT_CALL(
+ sink,
+ OnConstraintsChanged(AllOf(
+ Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(1)),
+ Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(4)))));
+ broadcaster.AddOrUpdateSink(&sink, VideoSinkWants());
+}
+
+TEST(VideoBroadcasterTest, UpdatesOnlyNewSinksWithConstraints) {
+ MockSink sink1;
+ VideoBroadcaster broadcaster;
+ broadcaster.AddOrUpdateSink(&sink1, VideoSinkWants());
+ broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{1, 4});
+ Mock::VerifyAndClearExpectations(&sink1);
+ EXPECT_CALL(sink1, OnConstraintsChanged).Times(0);
+ MockSink sink2;
+ EXPECT_CALL(
+ sink2,
+ OnConstraintsChanged(AllOf(
+ Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(1)),
+ Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(4)))));
+ broadcaster.AddOrUpdateSink(&sink2, VideoSinkWants());
+}
+
+TEST(VideoBroadcasterTest, ForwardsConstraintsToSink) {
+ MockSink sink;
+ VideoBroadcaster broadcaster;
+ EXPECT_CALL(sink, OnConstraintsChanged).Times(0);
+ broadcaster.AddOrUpdateSink(&sink, VideoSinkWants());
+ Mock::VerifyAndClearExpectations(&sink);
+
+ EXPECT_CALL(sink, OnConstraintsChanged(AllOf(
+ Field(&webrtc::VideoTrackSourceConstraints::min_fps,
+ Eq(absl::nullopt)),
+ Field(&webrtc::VideoTrackSourceConstraints::max_fps,
+ Eq(absl::nullopt)))));
+ broadcaster.ProcessConstraints(
+ webrtc::VideoTrackSourceConstraints{absl::nullopt, absl::nullopt});
+ Mock::VerifyAndClearExpectations(&sink);
+
+ EXPECT_CALL(
+ sink,
+ OnConstraintsChanged(AllOf(
+ Field(&webrtc::VideoTrackSourceConstraints::min_fps,
+ Eq(absl::nullopt)),
+ Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(3)))));
+ broadcaster.ProcessConstraints(
+ webrtc::VideoTrackSourceConstraints{absl::nullopt, 3});
+ Mock::VerifyAndClearExpectations(&sink);
+
+ EXPECT_CALL(
+ sink,
+ OnConstraintsChanged(AllOf(
+ Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(2)),
+ Field(&webrtc::VideoTrackSourceConstraints::max_fps,
+ Eq(absl::nullopt)))));
+ broadcaster.ProcessConstraints(
+ webrtc::VideoTrackSourceConstraints{2, absl::nullopt});
+ Mock::VerifyAndClearExpectations(&sink);
+
+ EXPECT_CALL(
+ sink,
+ OnConstraintsChanged(AllOf(
+ Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(2)),
+ Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(3)))));
+ broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{2, 3});
+}
+
+TEST(VideoBroadcasterTest, AppliesMaxOfSinkWantsRequestedResolution) {
+ VideoBroadcaster broadcaster;
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.is_active = true;
+ wants1.requested_resolution = FrameSize(640, 360);
+
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(FrameSize(640, 360), *broadcaster.wants().requested_resolution);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.is_active = true;
+ wants2.requested_resolution = FrameSize(650, 350);
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(FrameSize(650, 360), *broadcaster.wants().requested_resolution);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(FrameSize(640, 360), *broadcaster.wants().requested_resolution);
+}
+
+TEST(VideoBroadcasterTest, AnyActive) {
+ VideoBroadcaster broadcaster;
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.is_active = false;
+
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(false, broadcaster.wants().is_active);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.is_active = true;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(true, broadcaster.wants().is_active);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(false, broadcaster.wants().is_active);
+}
+
+TEST(VideoBroadcasterTest, AnyActiveWithoutRequestedResolution) {
+ VideoBroadcaster broadcaster;
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.is_active = true;
+ wants1.requested_resolution = FrameSize(640, 360);
+
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(
+ false,
+ broadcaster.wants().aggregates->any_active_without_requested_resolution);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.is_active = true;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(
+ true,
+ broadcaster.wants().aggregates->any_active_without_requested_resolution);
+
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(
+ false,
+ broadcaster.wants().aggregates->any_active_without_requested_resolution);
+}
+
+// This verifies that the VideoSinkWants from a Sink that is_active = false
+// is ignored IF there is an active sink using new api (Requested_Resolution).
+// The uses resolution_alignment for verification.
+TEST(VideoBroadcasterTest, IgnoreInactiveSinkIfNewApiUsed) {
+ VideoBroadcaster broadcaster;
+
+ FakeVideoRenderer sink1;
+ VideoSinkWants wants1;
+ wants1.is_active = true;
+ wants1.requested_resolution = FrameSize(640, 360);
+ wants1.resolution_alignment = 2;
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 2);
+
+ FakeVideoRenderer sink2;
+ VideoSinkWants wants2;
+ wants2.is_active = true;
+ wants2.resolution_alignment = 8;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 8);
+
+ // Now wants2 will be ignored.
+ wants2.is_active = false;
+ broadcaster.AddOrUpdateSink(&sink2, wants2);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 2);
+
+ // But when wants1 is inactive, wants2 matters again.
+ wants1.is_active = false;
+ broadcaster.AddOrUpdateSink(&sink1, wants1);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 8);
+
+ // inactive wants1 (new api) is always ignored.
+ broadcaster.RemoveSink(&sink2);
+ EXPECT_EQ(broadcaster.wants().resolution_alignment, 1);
+}
diff --git a/third_party/libwebrtc/media/base/video_common.cc b/third_party/libwebrtc/media/base/video_common.cc
new file mode 100644
index 0000000000..0ac3b3790e
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_common.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/video_common.h"
+
+#include "api/array_view.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace cricket {
+
+struct FourCCAliasEntry {
+ uint32_t alias;
+ uint32_t canonical;
+};
+
+static const FourCCAliasEntry kFourCCAliases[] = {
+ {FOURCC_IYUV, FOURCC_I420},
+ {FOURCC_YU16, FOURCC_I422},
+ {FOURCC_YU24, FOURCC_I444},
+ {FOURCC_YUYV, FOURCC_YUY2},
+ {FOURCC_YUVS, FOURCC_YUY2},
+ {FOURCC_HDYC, FOURCC_UYVY},
+ {FOURCC_2VUY, FOURCC_UYVY},
+ {FOURCC_JPEG, FOURCC_MJPG}, // Note: JPEG has DHT while MJPG does not.
+ {FOURCC_DMB1, FOURCC_MJPG},
+ {FOURCC_BA81, FOURCC_BGGR},
+ {FOURCC_RGB3, FOURCC_RAW},
+ {FOURCC_BGR3, FOURCC_24BG},
+ {FOURCC_CM32, FOURCC_BGRA},
+ {FOURCC_CM24, FOURCC_RAW},
+};
+
+uint32_t CanonicalFourCC(uint32_t fourcc) {
+ for (uint32_t i = 0; i < arraysize(kFourCCAliases); ++i) {
+ if (kFourCCAliases[i].alias == fourcc) {
+ return kFourCCAliases[i].canonical;
+ }
+ }
+ // Not an alias, so return it as-is.
+ return fourcc;
+}
+
+// The C++ standard requires a namespace-scope definition of static const
+// integral types even when they are initialized in the declaration (see
+// [class.static.data]/4), but MSVC with /Ze is non-conforming and treats that
+// as a multiply defined symbol error. See Also:
+// http://msdn.microsoft.com/en-us/library/34h23df8.aspx
+#ifndef _MSC_EXTENSIONS
+const int64_t VideoFormat::kMinimumInterval; // Initialized in header.
+#endif
+
+std::string VideoFormat::ToString() const {
+ std::string fourcc_name = GetFourccName(fourcc) + " ";
+ for (std::string::const_iterator i = fourcc_name.begin();
+ i < fourcc_name.end(); ++i) {
+ // Test character is printable; Avoid isprint() which asserts on negatives.
+ if (*i < 32 || *i >= 127) {
+ fourcc_name = "";
+ break;
+ }
+ }
+
+ char buf[256];
+ rtc::SimpleStringBuilder sb(buf);
+ sb << fourcc_name << width << "x" << height << "x"
+ << IntervalToFpsFloat(interval);
+ return sb.str();
+}
+
+int GreatestCommonDivisor(int a, int b) {
+ RTC_DCHECK_GE(a, 0);
+ RTC_DCHECK_GT(b, 0);
+ int c = a % b;
+ while (c != 0) {
+ a = b;
+ b = c;
+ c = a % b;
+ }
+ return b;
+}
+
+int LeastCommonMultiple(int a, int b) {
+ RTC_DCHECK_GT(a, 0);
+ RTC_DCHECK_GT(b, 0);
+ return a * (b / GreatestCommonDivisor(a, b));
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/video_common.h b/third_party/libwebrtc/media/base/video_common.h
new file mode 100644
index 0000000000..f27e008d26
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_common.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Common definition for video, including fourcc and VideoFormat.
+
+#ifndef MEDIA_BASE_VIDEO_COMMON_H_
+#define MEDIA_BASE_VIDEO_COMMON_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "rtc_base/system/rtc_export.h"
+#include "rtc_base/time_utils.h"
+
+namespace cricket {
+
+//////////////////////////////////////////////////////////////////////////////
+// Definition of FourCC codes
+//////////////////////////////////////////////////////////////////////////////
+// Convert four characters to a FourCC code.
+// Needs to be a macro otherwise the OS X compiler complains when the kFormat*
+// constants are used in a switch.
+#define CRICKET_FOURCC(a, b, c, d) \
+ ((static_cast<uint32_t>(a)) | (static_cast<uint32_t>(b) << 8) | \
+ (static_cast<uint32_t>(c) << 16) | (static_cast<uint32_t>(d) << 24))
+// Some pages discussing FourCC codes:
+// http://www.fourcc.org/yuv.php
+// http://v4l2spec.bytesex.org/spec/book1.htm
+// http://developer.apple.com/quicktime/icefloe/dispatch020.html
+// http://msdn.microsoft.com/library/windows/desktop/dd206750.aspx#nv12
+// http://people.xiph.org/~xiphmont/containers/nut/nut4cc.txt
+
+// FourCC codes grouped according to implementation efficiency.
+// Primary formats should convert in 1 efficient step.
+// Secondary formats are converted in 2 steps.
+// Auxilliary formats call primary converters.
+enum FourCC {
+ // 9 Primary YUV formats: 5 planar, 2 biplanar, 2 packed.
+ FOURCC_I420 = CRICKET_FOURCC('I', '4', '2', '0'),
+ FOURCC_I422 = CRICKET_FOURCC('I', '4', '2', '2'),
+ FOURCC_I444 = CRICKET_FOURCC('I', '4', '4', '4'),
+ FOURCC_I411 = CRICKET_FOURCC('I', '4', '1', '1'),
+ FOURCC_I400 = CRICKET_FOURCC('I', '4', '0', '0'),
+ FOURCC_NV21 = CRICKET_FOURCC('N', 'V', '2', '1'),
+ FOURCC_NV12 = CRICKET_FOURCC('N', 'V', '1', '2'),
+ FOURCC_YUY2 = CRICKET_FOURCC('Y', 'U', 'Y', '2'),
+ FOURCC_UYVY = CRICKET_FOURCC('U', 'Y', 'V', 'Y'),
+
+ // 2 Secondary YUV formats: row biplanar.
+ FOURCC_M420 = CRICKET_FOURCC('M', '4', '2', '0'),
+
+ // 9 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp.
+ FOURCC_ARGB = CRICKET_FOURCC('A', 'R', 'G', 'B'),
+ FOURCC_BGRA = CRICKET_FOURCC('B', 'G', 'R', 'A'),
+ FOURCC_ABGR = CRICKET_FOURCC('A', 'B', 'G', 'R'),
+ FOURCC_24BG = CRICKET_FOURCC('2', '4', 'B', 'G'),
+ FOURCC_RAW = CRICKET_FOURCC('r', 'a', 'w', ' '),
+ FOURCC_RGBA = CRICKET_FOURCC('R', 'G', 'B', 'A'),
+ FOURCC_RGBP = CRICKET_FOURCC('R', 'G', 'B', 'P'), // bgr565.
+ FOURCC_RGBO = CRICKET_FOURCC('R', 'G', 'B', 'O'), // abgr1555.
+ FOURCC_R444 = CRICKET_FOURCC('R', '4', '4', '4'), // argb4444.
+
+ // 4 Secondary RGB formats: 4 Bayer Patterns.
+ FOURCC_RGGB = CRICKET_FOURCC('R', 'G', 'G', 'B'),
+ FOURCC_BGGR = CRICKET_FOURCC('B', 'G', 'G', 'R'),
+ FOURCC_GRBG = CRICKET_FOURCC('G', 'R', 'B', 'G'),
+ FOURCC_GBRG = CRICKET_FOURCC('G', 'B', 'R', 'G'),
+
+ // 1 Primary Compressed YUV format.
+ FOURCC_MJPG = CRICKET_FOURCC('M', 'J', 'P', 'G'),
+
+ // 5 Auxiliary YUV variations: 3 with U and V planes are swapped, 1 Alias.
+ FOURCC_YV12 = CRICKET_FOURCC('Y', 'V', '1', '2'),
+ FOURCC_YV16 = CRICKET_FOURCC('Y', 'V', '1', '6'),
+ FOURCC_YV24 = CRICKET_FOURCC('Y', 'V', '2', '4'),
+ FOURCC_YU12 = CRICKET_FOURCC('Y', 'U', '1', '2'), // Linux version of I420.
+ FOURCC_J420 = CRICKET_FOURCC('J', '4', '2', '0'),
+ FOURCC_J400 = CRICKET_FOURCC('J', '4', '0', '0'),
+
+ // 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical FOURCC.
+ FOURCC_IYUV = CRICKET_FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
+ FOURCC_YU16 = CRICKET_FOURCC('Y', 'U', '1', '6'), // Alias for I422.
+ FOURCC_YU24 = CRICKET_FOURCC('Y', 'U', '2', '4'), // Alias for I444.
+ FOURCC_YUYV = CRICKET_FOURCC('Y', 'U', 'Y', 'V'), // Alias for YUY2.
+ FOURCC_YUVS = CRICKET_FOURCC('y', 'u', 'v', 's'), // Alias for YUY2 on Mac.
+ FOURCC_HDYC = CRICKET_FOURCC('H', 'D', 'Y', 'C'), // Alias for UYVY.
+ FOURCC_2VUY = CRICKET_FOURCC('2', 'v', 'u', 'y'), // Alias for UYVY on Mac.
+ FOURCC_JPEG = CRICKET_FOURCC('J', 'P', 'E', 'G'), // Alias for MJPG.
+ FOURCC_DMB1 = CRICKET_FOURCC('d', 'm', 'b', '1'), // Alias for MJPG on Mac.
+ FOURCC_BA81 = CRICKET_FOURCC('B', 'A', '8', '1'), // Alias for BGGR.
+ FOURCC_RGB3 = CRICKET_FOURCC('R', 'G', 'B', '3'), // Alias for RAW.
+ FOURCC_BGR3 = CRICKET_FOURCC('B', 'G', 'R', '3'), // Alias for 24BG.
+ FOURCC_CM32 = CRICKET_FOURCC(0, 0, 0, 32), // BGRA kCMPixelFormat_32ARGB
+ FOURCC_CM24 = CRICKET_FOURCC(0, 0, 0, 24), // RAW kCMPixelFormat_24RGB
+
+ // 1 Auxiliary compressed YUV format set aside for capturer.
+ FOURCC_H264 = CRICKET_FOURCC('H', '2', '6', '4'),
+};
+
+#undef CRICKET_FOURCC
+
+// Match any fourcc.
+
+// We move this out of the enum because using it in many places caused
+// the compiler to get grumpy, presumably since the above enum is
+// backed by an int.
+static const uint32_t FOURCC_ANY = 0xFFFFFFFF;
+
+// Converts fourcc aliases into canonical ones.
+uint32_t CanonicalFourCC(uint32_t fourcc);
+
+// Get FourCC code as a string.
+inline std::string GetFourccName(uint32_t fourcc) {
+ std::string name;
+ name.push_back(static_cast<char>(fourcc & 0xFF));
+ name.push_back(static_cast<char>((fourcc >> 8) & 0xFF));
+ name.push_back(static_cast<char>((fourcc >> 16) & 0xFF));
+ name.push_back(static_cast<char>((fourcc >> 24) & 0xFF));
+ return name;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Definition of VideoFormat.
+//////////////////////////////////////////////////////////////////////////////
+
+// VideoFormat with Plain Old Data for global variables.
+struct VideoFormatPod {
+ int width; // Number of pixels.
+ int height; // Number of pixels.
+ int64_t interval; // Nanoseconds.
+ uint32_t fourcc; // Color space. FOURCC_ANY means that any color space is OK.
+};
+
+struct RTC_EXPORT VideoFormat : VideoFormatPod {
+ static const int64_t kMinimumInterval =
+ rtc::kNumNanosecsPerSec / 10000; // 10k fps.
+
+ VideoFormat() { Construct(0, 0, 0, 0); }
+
+ VideoFormat(int w, int h, int64_t interval_ns, uint32_t cc) {
+ Construct(w, h, interval_ns, cc);
+ }
+
+ explicit VideoFormat(const VideoFormatPod& format) {
+ Construct(format.width, format.height, format.interval, format.fourcc);
+ }
+
+ void Construct(int w, int h, int64_t interval_ns, uint32_t cc) {
+ width = w;
+ height = h;
+ interval = interval_ns;
+ fourcc = cc;
+ }
+
+ static int64_t FpsToInterval(int fps) {
+ return fps ? rtc::kNumNanosecsPerSec / fps : kMinimumInterval;
+ }
+
+ static int IntervalToFps(int64_t interval) {
+ if (!interval) {
+ return 0;
+ }
+ return static_cast<int>(rtc::kNumNanosecsPerSec / interval);
+ }
+
+ static float IntervalToFpsFloat(int64_t interval) {
+ if (!interval) {
+ return 0.f;
+ }
+ return static_cast<float>(rtc::kNumNanosecsPerSec) /
+ static_cast<float>(interval);
+ }
+
+ bool operator==(const VideoFormat& format) const {
+ return width == format.width && height == format.height &&
+ interval == format.interval && fourcc == format.fourcc;
+ }
+
+ bool operator!=(const VideoFormat& format) const {
+ return !(*this == format);
+ }
+
+ bool operator<(const VideoFormat& format) const {
+ return (fourcc < format.fourcc) ||
+ (fourcc == format.fourcc && width < format.width) ||
+ (fourcc == format.fourcc && width == format.width &&
+ height < format.height) ||
+ (fourcc == format.fourcc && width == format.width &&
+ height == format.height && interval > format.interval);
+ }
+
+ int framerate() const { return IntervalToFps(interval); }
+
+ // Check if both width and height are 0.
+ bool IsSize0x0() const { return 0 == width && 0 == height; }
+
+ // Check if this format is less than another one by comparing the resolution
+ // and frame rate.
+ bool IsPixelRateLess(const VideoFormat& format) const {
+ return width * height * framerate() <
+ format.width * format.height * format.framerate();
+ }
+
+ // Get a string presentation in the form of "fourcc width x height x fps"
+ std::string ToString() const;
+};
+
+// Returns the largest positive integer that divides both `a` and `b`.
+int GreatestCommonDivisor(int a, int b);
+
+// Returns the smallest positive integer that is divisible by both `a` and `b`.
+int LeastCommonMultiple(int a, int b);
+
+} // namespace cricket
+
+#endif // MEDIA_BASE_VIDEO_COMMON_H_
diff --git a/third_party/libwebrtc/media/base/video_common_unittest.cc b/third_party/libwebrtc/media/base/video_common_unittest.cc
new file mode 100644
index 0000000000..3f445c7769
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_common_unittest.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2008 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/video_common.h"
+
+#include "test/gtest.h"
+
+namespace cricket {
+
+TEST(VideoCommonTest, TestCanonicalFourCC) {
+ // Canonical fourccs are not changed.
+ EXPECT_EQ(FOURCC_I420, CanonicalFourCC(FOURCC_I420));
+ // The special FOURCC_ANY value is not changed.
+ EXPECT_EQ(FOURCC_ANY, CanonicalFourCC(FOURCC_ANY));
+ // Aliases are translated to the canonical equivalent.
+ EXPECT_EQ(FOURCC_I420, CanonicalFourCC(FOURCC_IYUV));
+ EXPECT_EQ(FOURCC_I422, CanonicalFourCC(FOURCC_YU16));
+ EXPECT_EQ(FOURCC_I444, CanonicalFourCC(FOURCC_YU24));
+ EXPECT_EQ(FOURCC_YUY2, CanonicalFourCC(FOURCC_YUYV));
+ EXPECT_EQ(FOURCC_YUY2, CanonicalFourCC(FOURCC_YUVS));
+ EXPECT_EQ(FOURCC_UYVY, CanonicalFourCC(FOURCC_HDYC));
+ EXPECT_EQ(FOURCC_UYVY, CanonicalFourCC(FOURCC_2VUY));
+ EXPECT_EQ(FOURCC_MJPG, CanonicalFourCC(FOURCC_JPEG));
+ EXPECT_EQ(FOURCC_MJPG, CanonicalFourCC(FOURCC_DMB1));
+ EXPECT_EQ(FOURCC_BGGR, CanonicalFourCC(FOURCC_BA81));
+ EXPECT_EQ(FOURCC_RAW, CanonicalFourCC(FOURCC_RGB3));
+ EXPECT_EQ(FOURCC_24BG, CanonicalFourCC(FOURCC_BGR3));
+ EXPECT_EQ(FOURCC_BGRA, CanonicalFourCC(FOURCC_CM32));
+ EXPECT_EQ(FOURCC_RAW, CanonicalFourCC(FOURCC_CM24));
+}
+
+// Test conversion between interval and fps
+TEST(VideoCommonTest, TestVideoFormatFps) {
+ EXPECT_EQ(VideoFormat::kMinimumInterval, VideoFormat::FpsToInterval(0));
+ EXPECT_EQ(rtc::kNumNanosecsPerSec / 20, VideoFormat::FpsToInterval(20));
+ EXPECT_EQ(20, VideoFormat::IntervalToFps(rtc::kNumNanosecsPerSec / 20));
+ EXPECT_EQ(0, VideoFormat::IntervalToFps(0));
+}
+
+// Test IsSize0x0
+TEST(VideoCommonTest, TestVideoFormatIsSize0x0) {
+ VideoFormat format;
+ EXPECT_TRUE(format.IsSize0x0());
+ format.width = 320;
+ EXPECT_FALSE(format.IsSize0x0());
+}
+
+// Test ToString: print fourcc when it is printable.
+TEST(VideoCommonTest, TestVideoFormatToString) {
+ VideoFormat format;
+ EXPECT_EQ("0x0x0", format.ToString());
+
+ format.fourcc = FOURCC_I420;
+ format.width = 640;
+ format.height = 480;
+ format.interval = VideoFormat::FpsToInterval(20);
+ EXPECT_EQ("I420 640x480x20", format.ToString());
+
+ format.fourcc = FOURCC_ANY;
+ format.width = 640;
+ format.height = 480;
+ format.interval = VideoFormat::FpsToInterval(20);
+ EXPECT_EQ("640x480x20", format.ToString());
+}
+
+// Test comparison.
+TEST(VideoCommonTest, TestVideoFormatCompare) {
+ VideoFormat format(640, 480, VideoFormat::FpsToInterval(20), FOURCC_I420);
+ VideoFormat format2;
+ EXPECT_NE(format, format2);
+
+ // Same pixelrate, different fourcc.
+ format2 = format;
+ format2.fourcc = FOURCC_YUY2;
+ EXPECT_NE(format, format2);
+ EXPECT_FALSE(format.IsPixelRateLess(format2) ||
+ format2.IsPixelRateLess(format2));
+
+ format2 = format;
+ format2.interval /= 2;
+ EXPECT_TRUE(format.IsPixelRateLess(format2));
+
+ format2 = format;
+ format2.width *= 2;
+ EXPECT_TRUE(format.IsPixelRateLess(format2));
+}
+
+TEST(VideoCommonTest, GreatestCommonDivisor) {
+ EXPECT_EQ(GreatestCommonDivisor(0, 1000), 1000);
+ EXPECT_EQ(GreatestCommonDivisor(1, 1), 1);
+ EXPECT_EQ(GreatestCommonDivisor(8, 12), 4);
+ EXPECT_EQ(GreatestCommonDivisor(24, 54), 6);
+}
+
+TEST(VideoCommonTest, LeastCommonMultiple) {
+ EXPECT_EQ(LeastCommonMultiple(1, 1), 1);
+ EXPECT_EQ(LeastCommonMultiple(2, 3), 6);
+ EXPECT_EQ(LeastCommonMultiple(16, 32), 32);
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/base/video_source_base.cc b/third_party/libwebrtc/media/base/video_source_base.cc
new file mode 100644
index 0000000000..2454902069
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_source_base.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/base/video_source_base.h"
+
+#include <algorithm>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+VideoSourceBase::VideoSourceBase() = default;
+VideoSourceBase::~VideoSourceBase() = default;
+
+void VideoSourceBase::AddOrUpdateSink(
+ VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const VideoSinkWants& wants) {
+ RTC_DCHECK(sink != nullptr);
+
+ SinkPair* sink_pair = FindSinkPair(sink);
+ if (!sink_pair) {
+ sinks_.push_back(SinkPair(sink, wants));
+ } else {
+ sink_pair->wants = wants;
+ }
+}
+
+void VideoSourceBase::RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ RTC_DCHECK(sink != nullptr);
+ RTC_DCHECK(FindSinkPair(sink));
+ sinks_.erase(std::remove_if(sinks_.begin(), sinks_.end(),
+ [sink](const SinkPair& sink_pair) {
+ return sink_pair.sink == sink;
+ }),
+ sinks_.end());
+}
+
+VideoSourceBase::SinkPair* VideoSourceBase::FindSinkPair(
+ const VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ auto sink_pair_it = absl::c_find_if(
+ sinks_,
+ [sink](const SinkPair& sink_pair) { return sink_pair.sink == sink; });
+ if (sink_pair_it != sinks_.end()) {
+ return &*sink_pair_it;
+ }
+ return nullptr;
+}
+
+VideoSourceBaseGuarded::VideoSourceBaseGuarded() = default;
+VideoSourceBaseGuarded::~VideoSourceBaseGuarded() = default;
+
+void VideoSourceBaseGuarded::AddOrUpdateSink(
+ VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const VideoSinkWants& wants) {
+ RTC_DCHECK_RUN_ON(&source_sequence_);
+ RTC_DCHECK(sink != nullptr);
+
+ SinkPair* sink_pair = FindSinkPair(sink);
+ if (!sink_pair) {
+ sinks_.push_back(SinkPair(sink, wants));
+ } else {
+ sink_pair->wants = wants;
+ }
+}
+
+void VideoSourceBaseGuarded::RemoveSink(
+ VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ RTC_DCHECK_RUN_ON(&source_sequence_);
+ RTC_DCHECK(sink != nullptr);
+ RTC_DCHECK(FindSinkPair(sink));
+ sinks_.erase(std::remove_if(sinks_.begin(), sinks_.end(),
+ [sink](const SinkPair& sink_pair) {
+ return sink_pair.sink == sink;
+ }),
+ sinks_.end());
+}
+
+VideoSourceBaseGuarded::SinkPair* VideoSourceBaseGuarded::FindSinkPair(
+ const VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ RTC_DCHECK_RUN_ON(&source_sequence_);
+ auto sink_pair_it = absl::c_find_if(
+ sinks_,
+ [sink](const SinkPair& sink_pair) { return sink_pair.sink == sink; });
+ if (sink_pair_it != sinks_.end()) {
+ return &*sink_pair_it;
+ }
+ return nullptr;
+}
+
+const std::vector<VideoSourceBaseGuarded::SinkPair>&
+VideoSourceBaseGuarded::sink_pairs() const {
+ RTC_DCHECK_RUN_ON(&source_sequence_);
+ return sinks_;
+}
+
+} // namespace rtc
diff --git a/third_party/libwebrtc/media/base/video_source_base.h b/third_party/libwebrtc/media/base/video_source_base.h
new file mode 100644
index 0000000000..2644723aa7
--- /dev/null
+++ b/third_party/libwebrtc/media/base/video_source_base.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_BASE_VIDEO_SOURCE_BASE_H_
+#define MEDIA_BASE_VIDEO_SOURCE_BASE_H_
+
+#include <vector>
+
+#include "api/sequence_checker.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "rtc_base/system/no_unique_address.h"
+
+namespace rtc {
+
+// VideoSourceBase is not thread safe. Before using this class, consider using
+// VideoSourceBaseGuarded below instead, which is an identical implementation
+// but applies a sequence checker to help protect internal state.
+// TODO(bugs.webrtc.org/12780): Delete this class.
+class VideoSourceBase : public VideoSourceInterface<webrtc::VideoFrame> {
+ public:
+ VideoSourceBase();
+ ~VideoSourceBase() override;
+ void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const VideoSinkWants& wants) override;
+ void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+
+ protected:
+ struct SinkPair {
+ SinkPair(VideoSinkInterface<webrtc::VideoFrame>* sink, VideoSinkWants wants)
+ : sink(sink), wants(wants) {}
+ VideoSinkInterface<webrtc::VideoFrame>* sink;
+ VideoSinkWants wants;
+ };
+ SinkPair* FindSinkPair(const VideoSinkInterface<webrtc::VideoFrame>* sink);
+
+ const std::vector<SinkPair>& sink_pairs() const { return sinks_; }
+
+ private:
+ std::vector<SinkPair> sinks_;
+};
+
+// VideoSourceBaseGuarded assumes that operations related to sinks, occur on the
+// same TQ/thread that the object was constructed on.
+class VideoSourceBaseGuarded : public VideoSourceInterface<webrtc::VideoFrame> {
+ public:
+ VideoSourceBaseGuarded();
+ ~VideoSourceBaseGuarded() override;
+
+ void AddOrUpdateSink(VideoSinkInterface<webrtc::VideoFrame>* sink,
+ const VideoSinkWants& wants) override;
+ void RemoveSink(VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+
+ protected:
+ struct SinkPair {
+ SinkPair(VideoSinkInterface<webrtc::VideoFrame>* sink, VideoSinkWants wants)
+ : sink(sink), wants(wants) {}
+ VideoSinkInterface<webrtc::VideoFrame>* sink;
+ VideoSinkWants wants;
+ };
+
+ SinkPair* FindSinkPair(const VideoSinkInterface<webrtc::VideoFrame>* sink);
+ const std::vector<SinkPair>& sink_pairs() const;
+
+ // Keep the `source_sequence_` checker protected to allow sub classes the
+ // ability to call Detach() if/when appropriate.
+ RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker source_sequence_;
+
+ private:
+ std::vector<SinkPair> sinks_ RTC_GUARDED_BY(&source_sequence_);
+};
+
+} // namespace rtc
+
+#endif // MEDIA_BASE_VIDEO_SOURCE_BASE_H_
diff --git a/third_party/libwebrtc/media/codec_gn/moz.build b/third_party/libwebrtc/media/codec_gn/moz.build
new file mode 100644
index 0000000000..a6fa3b4063
--- /dev/null
+++ b/third_party/libwebrtc/media/codec_gn/moz.build
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/media/base/codec.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("codec_gn")
diff --git a/third_party/libwebrtc/media/engine/adm_helpers.cc b/third_party/libwebrtc/media/engine/adm_helpers.cc
new file mode 100644
index 0000000000..c349b7ce06
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/adm_helpers.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/adm_helpers.h"
+
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace adm_helpers {
+
+// On Windows Vista and newer, Microsoft introduced the concept of "Default
+// Communications Device". This means that there are two types of default
+// devices (old Wave Audio style default and Default Communications Device).
+//
+// On Windows systems which only support Wave Audio style default, uses either
+// -1 or 0 to select the default device.
+//
+// Using a #define for AUDIO_DEVICE since we will call *different* versions of
+// the ADM functions, depending on the ID type.
+#if defined(WEBRTC_WIN)
+#define AUDIO_DEVICE_ID \
+ (AudioDeviceModule::WindowsDeviceType::kDefaultCommunicationDevice)
+#else
+#define AUDIO_DEVICE_ID (0u)
+#endif // defined(WEBRTC_WIN)
+
+void Init(AudioDeviceModule* adm) {
+ RTC_DCHECK(adm);
+
+ RTC_CHECK_EQ(0, adm->Init()) << "Failed to initialize the ADM.";
+
+ // Playout device.
+ {
+ if (adm->SetPlayoutDevice(AUDIO_DEVICE_ID) != 0) {
+ RTC_LOG(LS_ERROR) << "Unable to set playout device.";
+ return;
+ }
+ if (adm->InitSpeaker() != 0) {
+ RTC_LOG(LS_ERROR) << "Unable to access speaker.";
+ }
+
+ // Set number of channels
+ bool available = false;
+ if (adm->StereoPlayoutIsAvailable(&available) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to query stereo playout.";
+ }
+ if (adm->SetStereoPlayout(available) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to set stereo playout mode.";
+ }
+ }
+
+ // Recording device.
+ {
+ if (adm->SetRecordingDevice(AUDIO_DEVICE_ID) != 0) {
+ RTC_LOG(LS_ERROR) << "Unable to set recording device.";
+ return;
+ }
+ if (adm->InitMicrophone() != 0) {
+ RTC_LOG(LS_ERROR) << "Unable to access microphone.";
+ }
+
+ // Set number of channels
+ bool available = false;
+ if (adm->StereoRecordingIsAvailable(&available) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to query stereo recording.";
+ }
+ if (adm->SetStereoRecording(available) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to set stereo recording mode.";
+ }
+ }
+}
+} // namespace adm_helpers
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/adm_helpers.h b/third_party/libwebrtc/media/engine/adm_helpers.h
new file mode 100644
index 0000000000..2a35d26b47
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/adm_helpers.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_ADM_HELPERS_H_
+#define MEDIA_ENGINE_ADM_HELPERS_H_
+
+namespace webrtc {
+
+class AudioDeviceModule;
+
+namespace adm_helpers {
+
+void Init(AudioDeviceModule* adm);
+
+} // namespace adm_helpers
+} // namespace webrtc
+
+#endif // MEDIA_ENGINE_ADM_HELPERS_H_
diff --git a/third_party/libwebrtc/media/engine/fake_video_codec_factory.cc b/third_party/libwebrtc/media/engine/fake_video_codec_factory.cc
new file mode 100644
index 0000000000..6f4f796b16
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/fake_video_codec_factory.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/fake_video_codec_factory.h"
+
+#include <memory>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "test/fake_decoder.h"
+#include "test/fake_encoder.h"
+
+namespace {
+
+static const char kFakeCodecFactoryCodecName[] = "FakeCodec";
+
+} // anonymous namespace
+
+namespace webrtc {
+
+FakeVideoEncoderFactory::FakeVideoEncoderFactory() = default;
+
+// static
+std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder() {
+ return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
+}
+
+std::vector<SdpVideoFormat> FakeVideoEncoderFactory::GetSupportedFormats()
+ const {
+ return std::vector<SdpVideoFormat>(
+ 1, SdpVideoFormat(kFakeCodecFactoryCodecName));
+}
+
+std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder(
+ const SdpVideoFormat& format) {
+ return std::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
+}
+
+FakeVideoDecoderFactory::FakeVideoDecoderFactory() = default;
+
+// static
+std::unique_ptr<VideoDecoder> FakeVideoDecoderFactory::CreateVideoDecoder() {
+ return std::make_unique<test::FakeDecoder>();
+}
+
+std::vector<SdpVideoFormat> FakeVideoDecoderFactory::GetSupportedFormats()
+ const {
+ return std::vector<SdpVideoFormat>(
+ 1, SdpVideoFormat(kFakeCodecFactoryCodecName));
+}
+
+std::unique_ptr<VideoDecoder> FakeVideoDecoderFactory::CreateVideoDecoder(
+ const SdpVideoFormat& format) {
+ return std::make_unique<test::FakeDecoder>();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/fake_video_codec_factory.h b/third_party/libwebrtc/media/engine/fake_video_codec_factory.h
new file mode 100644
index 0000000000..4a99120467
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/fake_video_codec_factory.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_
+#define MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// Provides a fake video encoder instance that produces frames large enough for
+// the given bitrate constraints.
+class RTC_EXPORT FakeVideoEncoderFactory : public VideoEncoderFactory {
+ public:
+ FakeVideoEncoderFactory();
+
+ static std::unique_ptr<VideoEncoder> CreateVideoEncoder();
+
+ // VideoEncoderFactory implementation
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ std::unique_ptr<VideoEncoder> CreateVideoEncoder(
+ const SdpVideoFormat& format) override;
+};
+
+// Provides a fake video decoder instance that ignores the given bitstream and
+// produces frames.
+class RTC_EXPORT FakeVideoDecoderFactory : public VideoDecoderFactory {
+ public:
+ FakeVideoDecoderFactory();
+
+ static std::unique_ptr<VideoDecoder> CreateVideoDecoder();
+
+ // VideoDecoderFactory implementation
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ const SdpVideoFormat& format) override;
+};
+
+} // namespace webrtc
+
+#endif // MEDIA_ENGINE_FAKE_VIDEO_CODEC_FACTORY_H_
diff --git a/third_party/libwebrtc/media/engine/fake_webrtc_call.cc b/third_party/libwebrtc/media/engine/fake_webrtc_call.cc
new file mode 100644
index 0000000000..16e7169b21
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/fake_webrtc_call.cc
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/fake_webrtc_call.h"
+
+#include <cstdint>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/string_view.h"
+#include "api/call/audio_sink.h"
+#include "api/units/timestamp.h"
+#include "call/packet_receiver.h"
+#include "media/base/media_channel.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/thread.h"
+#include "video/config/encoder_stream_factory.h"
+
+namespace cricket {
+
+using ::webrtc::ParseRtpSsrc;
+
+FakeAudioSendStream::FakeAudioSendStream(
+ int id,
+ const webrtc::AudioSendStream::Config& config)
+ : id_(id), config_(config) {}
+
+void FakeAudioSendStream::Reconfigure(
+ const webrtc::AudioSendStream::Config& config,
+ webrtc::SetParametersCallback callback) {
+ config_ = config;
+ webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
+}
+
+const webrtc::AudioSendStream::Config& FakeAudioSendStream::GetConfig() const {
+ return config_;
+}
+
+void FakeAudioSendStream::SetStats(
+ const webrtc::AudioSendStream::Stats& stats) {
+ stats_ = stats;
+}
+
+FakeAudioSendStream::TelephoneEvent
+FakeAudioSendStream::GetLatestTelephoneEvent() const {
+ return latest_telephone_event_;
+}
+
+bool FakeAudioSendStream::SendTelephoneEvent(int payload_type,
+ int payload_frequency,
+ int event,
+ int duration_ms) {
+ latest_telephone_event_.payload_type = payload_type;
+ latest_telephone_event_.payload_frequency = payload_frequency;
+ latest_telephone_event_.event_code = event;
+ latest_telephone_event_.duration_ms = duration_ms;
+ return true;
+}
+
+void FakeAudioSendStream::SetMuted(bool muted) {
+ muted_ = muted;
+}
+
+webrtc::AudioSendStream::Stats FakeAudioSendStream::GetStats() const {
+ return stats_;
+}
+
+webrtc::AudioSendStream::Stats FakeAudioSendStream::GetStats(
+ bool /*has_remote_tracks*/) const {
+ return stats_;
+}
+
+FakeAudioReceiveStream::FakeAudioReceiveStream(
+ int id,
+ const webrtc::AudioReceiveStreamInterface::Config& config)
+ : id_(id), config_(config) {}
+
+const webrtc::AudioReceiveStreamInterface::Config&
+FakeAudioReceiveStream::GetConfig() const {
+ return config_;
+}
+
+void FakeAudioReceiveStream::SetStats(
+ const webrtc::AudioReceiveStreamInterface::Stats& stats) {
+ stats_ = stats;
+}
+
+bool FakeAudioReceiveStream::VerifyLastPacket(const uint8_t* data,
+ size_t length) const {
+ return last_packet_ == rtc::Buffer(data, length);
+}
+
+bool FakeAudioReceiveStream::DeliverRtp(const uint8_t* packet,
+ size_t length,
+ int64_t /* packet_time_us */) {
+ ++received_packets_;
+ last_packet_.SetData(packet, length);
+ return true;
+}
+
+void FakeAudioReceiveStream::SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ config_.frame_transformer = std::move(frame_transformer);
+}
+
+void FakeAudioReceiveStream::SetDecoderMap(
+ std::map<int, webrtc::SdpAudioFormat> decoder_map) {
+ config_.decoder_map = std::move(decoder_map);
+}
+
+void FakeAudioReceiveStream::SetNackHistory(int history_ms) {
+ config_.rtp.nack.rtp_history_ms = history_ms;
+}
+
+void FakeAudioReceiveStream::SetNonSenderRttMeasurement(bool enabled) {
+ config_.enable_non_sender_rtt = enabled;
+}
+
+void FakeAudioReceiveStream::SetFrameDecryptor(
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
+ config_.frame_decryptor = std::move(frame_decryptor);
+}
+
+webrtc::AudioReceiveStreamInterface::Stats FakeAudioReceiveStream::GetStats(
+ bool get_and_clear_legacy_stats) const {
+ return stats_;
+}
+
+void FakeAudioReceiveStream::SetSink(webrtc::AudioSinkInterface* sink) {
+ sink_ = sink;
+}
+
+void FakeAudioReceiveStream::SetGain(float gain) {
+ gain_ = gain;
+}
+
+FakeVideoSendStream::FakeVideoSendStream(
+ webrtc::VideoSendStream::Config config,
+ webrtc::VideoEncoderConfig encoder_config)
+ : sending_(false),
+ config_(std::move(config)),
+ codec_settings_set_(false),
+ resolution_scaling_enabled_(false),
+ framerate_scaling_enabled_(false),
+ source_(nullptr),
+ num_swapped_frames_(0) {
+ RTC_DCHECK(config.encoder_settings.encoder_factory != nullptr);
+ RTC_DCHECK(config.encoder_settings.bitrate_allocator_factory != nullptr);
+ ReconfigureVideoEncoder(std::move(encoder_config));
+}
+
+FakeVideoSendStream::~FakeVideoSendStream() {
+ if (source_)
+ source_->RemoveSink(this);
+}
+
+const webrtc::VideoSendStream::Config& FakeVideoSendStream::GetConfig() const {
+ return config_;
+}
+
+const webrtc::VideoEncoderConfig& FakeVideoSendStream::GetEncoderConfig()
+ const {
+ return encoder_config_;
+}
+
+const std::vector<webrtc::VideoStream>& FakeVideoSendStream::GetVideoStreams()
+ const {
+ return video_streams_;
+}
+
+bool FakeVideoSendStream::IsSending() const {
+ return sending_;
+}
+
+bool FakeVideoSendStream::GetVp8Settings(
+ webrtc::VideoCodecVP8* settings) const {
+ if (!codec_settings_set_) {
+ return false;
+ }
+
+ *settings = codec_specific_settings_.vp8;
+ return true;
+}
+
+bool FakeVideoSendStream::GetVp9Settings(
+ webrtc::VideoCodecVP9* settings) const {
+ if (!codec_settings_set_) {
+ return false;
+ }
+
+ *settings = codec_specific_settings_.vp9;
+ return true;
+}
+
+bool FakeVideoSendStream::GetH264Settings(
+ webrtc::VideoCodecH264* settings) const {
+ if (!codec_settings_set_) {
+ return false;
+ }
+
+ *settings = codec_specific_settings_.h264;
+ return true;
+}
+
+bool FakeVideoSendStream::GetAv1Settings(
+ webrtc::VideoCodecAV1* settings) const {
+ if (!codec_settings_set_) {
+ return false;
+ }
+
+ *settings = codec_specific_settings_.av1;
+ return true;
+}
+
+int FakeVideoSendStream::GetNumberOfSwappedFrames() const {
+ return num_swapped_frames_;
+}
+
+int FakeVideoSendStream::GetLastWidth() const {
+ return last_frame_->width();
+}
+
+int FakeVideoSendStream::GetLastHeight() const {
+ return last_frame_->height();
+}
+
+int64_t FakeVideoSendStream::GetLastTimestamp() const {
+ RTC_DCHECK(last_frame_->ntp_time_ms() == 0);
+ return last_frame_->render_time_ms();
+}
+
+void FakeVideoSendStream::OnFrame(const webrtc::VideoFrame& frame) {
+ ++num_swapped_frames_;
+ if (!last_frame_ || frame.width() != last_frame_->width() ||
+ frame.height() != last_frame_->height() ||
+ frame.rotation() != last_frame_->rotation()) {
+ if (encoder_config_.video_stream_factory) {
+ // Note: only tests set their own EncoderStreamFactory...
+ video_streams_ =
+ encoder_config_.video_stream_factory->CreateEncoderStreams(
+ frame.width(), frame.height(), encoder_config_);
+ } else {
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ rtc::scoped_refptr<
+ webrtc::VideoEncoderConfig::VideoStreamFactoryInterface>
+ factory = rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ encoder_config_.video_format.name, encoder_config_.max_qp,
+ encoder_config_.content_type ==
+ webrtc::VideoEncoderConfig::ContentType::kScreen,
+ encoder_config_.legacy_conference_mode, encoder_info);
+
+ video_streams_ = factory->CreateEncoderStreams(
+ frame.width(), frame.height(), encoder_config_);
+ }
+ }
+ last_frame_ = frame;
+}
+
+void FakeVideoSendStream::SetStats(
+ const webrtc::VideoSendStream::Stats& stats) {
+ stats_ = stats;
+}
+
+webrtc::VideoSendStream::Stats FakeVideoSendStream::GetStats() {
+ return stats_;
+}
+
+void FakeVideoSendStream::ReconfigureVideoEncoder(
+ webrtc::VideoEncoderConfig config) {
+ ReconfigureVideoEncoder(std::move(config), nullptr);
+}
+
+void FakeVideoSendStream::ReconfigureVideoEncoder(
+ webrtc::VideoEncoderConfig config,
+ webrtc::SetParametersCallback callback) {
+ int width, height;
+ if (last_frame_) {
+ width = last_frame_->width();
+ height = last_frame_->height();
+ } else {
+ width = height = 0;
+ }
+ if (config.video_stream_factory) {
+ // Note: only tests set their own EncoderStreamFactory...
+ video_streams_ = config.video_stream_factory->CreateEncoderStreams(
+ width, height, config);
+ } else {
+ webrtc::VideoEncoder::EncoderInfo encoder_info;
+ rtc::scoped_refptr<webrtc::VideoEncoderConfig::VideoStreamFactoryInterface>
+ factory = rtc::make_ref_counted<cricket::EncoderStreamFactory>(
+ config.video_format.name, config.max_qp,
+ config.content_type ==
+ webrtc::VideoEncoderConfig::ContentType::kScreen,
+ config.legacy_conference_mode, encoder_info);
+
+ video_streams_ = factory->CreateEncoderStreams(width, height, config);
+ }
+
+ if (config.encoder_specific_settings != nullptr) {
+ const unsigned char num_temporal_layers = static_cast<unsigned char>(
+ video_streams_.back().num_temporal_layers.value_or(1));
+ if (config_.rtp.payload_name == "VP8") {
+ config.encoder_specific_settings->FillVideoCodecVp8(
+ &codec_specific_settings_.vp8);
+ if (!video_streams_.empty()) {
+ codec_specific_settings_.vp8.numberOfTemporalLayers =
+ num_temporal_layers;
+ }
+ } else if (config_.rtp.payload_name == "VP9") {
+ config.encoder_specific_settings->FillVideoCodecVp9(
+ &codec_specific_settings_.vp9);
+ if (!video_streams_.empty()) {
+ codec_specific_settings_.vp9.numberOfTemporalLayers =
+ num_temporal_layers;
+ }
+ } else if (config_.rtp.payload_name == "H264") {
+ codec_specific_settings_.h264.numberOfTemporalLayers =
+ num_temporal_layers;
+ } else if (config_.rtp.payload_name == "AV1") {
+ config.encoder_specific_settings->FillVideoCodecAv1(
+ &codec_specific_settings_.av1);
+ } else {
+ ADD_FAILURE() << "Unsupported encoder payload: "
+ << config_.rtp.payload_name;
+ }
+ }
+ codec_settings_set_ = config.encoder_specific_settings != nullptr;
+ encoder_config_ = std::move(config);
+ ++num_encoder_reconfigurations_;
+ webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
+}
+
+void FakeVideoSendStream::StartPerRtpStream(
+ const std::vector<bool> active_layers) {
+ sending_ = false;
+ for (const bool active_layer : active_layers) {
+ if (active_layer) {
+ sending_ = true;
+ break;
+ }
+ }
+}
+
+void FakeVideoSendStream::Start() {
+ sending_ = true;
+}
+
+void FakeVideoSendStream::Stop() {
+ sending_ = false;
+}
+
+void FakeVideoSendStream::AddAdaptationResource(
+ rtc::scoped_refptr<webrtc::Resource> resource) {}
+
+std::vector<rtc::scoped_refptr<webrtc::Resource>>
+FakeVideoSendStream::GetAdaptationResources() {
+ return {};
+}
+
+void FakeVideoSendStream::SetSource(
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
+ const webrtc::DegradationPreference& degradation_preference) {
+ if (source_)
+ source_->RemoveSink(this);
+ source_ = source;
+ switch (degradation_preference) {
+ case webrtc::DegradationPreference::MAINTAIN_FRAMERATE:
+ resolution_scaling_enabled_ = true;
+ framerate_scaling_enabled_ = false;
+ break;
+ case webrtc::DegradationPreference::MAINTAIN_RESOLUTION:
+ resolution_scaling_enabled_ = false;
+ framerate_scaling_enabled_ = true;
+ break;
+ case webrtc::DegradationPreference::BALANCED:
+ resolution_scaling_enabled_ = true;
+ framerate_scaling_enabled_ = true;
+ break;
+ case webrtc::DegradationPreference::DISABLED:
+ resolution_scaling_enabled_ = false;
+ framerate_scaling_enabled_ = false;
+ break;
+ }
+ if (source)
+ source->AddOrUpdateSink(this, resolution_scaling_enabled_
+ ? sink_wants_
+ : rtc::VideoSinkWants());
+}
+
+void FakeVideoSendStream::GenerateKeyFrame(
+ const std::vector<std::string>& rids) {
+ keyframes_requested_by_rid_ = rids;
+}
+
+void FakeVideoSendStream::InjectVideoSinkWants(
+ const rtc::VideoSinkWants& wants) {
+ sink_wants_ = wants;
+ source_->AddOrUpdateSink(this, wants);
+}
+
+FakeVideoReceiveStream::FakeVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface::Config config)
+ : config_(std::move(config)), receiving_(false) {}
+
+const webrtc::VideoReceiveStreamInterface::Config&
+FakeVideoReceiveStream::GetConfig() const {
+ return config_;
+}
+
+bool FakeVideoReceiveStream::IsReceiving() const {
+ return receiving_;
+}
+
+void FakeVideoReceiveStream::InjectFrame(const webrtc::VideoFrame& frame) {
+ config_.renderer->OnFrame(frame);
+}
+
+webrtc::VideoReceiveStreamInterface::Stats FakeVideoReceiveStream::GetStats()
+ const {
+ return stats_;
+}
+
+void FakeVideoReceiveStream::Start() {
+ receiving_ = true;
+}
+
+void FakeVideoReceiveStream::Stop() {
+ receiving_ = false;
+}
+
+void FakeVideoReceiveStream::SetStats(
+ const webrtc::VideoReceiveStreamInterface::Stats& stats) {
+ stats_ = stats;
+}
+
+FakeFlexfecReceiveStream::FakeFlexfecReceiveStream(
+ const webrtc::FlexfecReceiveStream::Config config)
+ : config_(std::move(config)) {}
+
+const webrtc::FlexfecReceiveStream::Config&
+FakeFlexfecReceiveStream::GetConfig() const {
+ return config_;
+}
+
+void FakeFlexfecReceiveStream::OnRtpPacket(const webrtc::RtpPacketReceived&) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented.";
+}
+
+FakeCall::FakeCall(webrtc::test::ScopedKeyValueConfig* field_trials)
+ : FakeCall(rtc::Thread::Current(), rtc::Thread::Current(), field_trials) {}
+
+FakeCall::FakeCall(webrtc::TaskQueueBase* worker_thread,
+ webrtc::TaskQueueBase* network_thread,
+ webrtc::test::ScopedKeyValueConfig* field_trials)
+ : network_thread_(network_thread),
+ worker_thread_(worker_thread),
+ audio_network_state_(webrtc::kNetworkUp),
+ video_network_state_(webrtc::kNetworkUp),
+ num_created_send_streams_(0),
+ num_created_receive_streams_(0),
+ trials_(field_trials ? field_trials : &fallback_trials_) {}
+
+FakeCall::~FakeCall() {
+ EXPECT_EQ(0u, video_send_streams_.size());
+ EXPECT_EQ(0u, audio_send_streams_.size());
+ EXPECT_EQ(0u, video_receive_streams_.size());
+ EXPECT_EQ(0u, audio_receive_streams_.size());
+}
+
+const std::vector<FakeVideoSendStream*>& FakeCall::GetVideoSendStreams() {
+ return video_send_streams_;
+}
+
+const std::vector<FakeVideoReceiveStream*>& FakeCall::GetVideoReceiveStreams() {
+ return video_receive_streams_;
+}
+
+const FakeVideoReceiveStream* FakeCall::GetVideoReceiveStream(uint32_t ssrc) {
+ for (const auto* p : GetVideoReceiveStreams()) {
+ if (p->GetConfig().rtp.remote_ssrc == ssrc) {
+ return p;
+ }
+ }
+ return nullptr;
+}
+
+const std::vector<FakeAudioSendStream*>& FakeCall::GetAudioSendStreams() {
+ return audio_send_streams_;
+}
+
+const FakeAudioSendStream* FakeCall::GetAudioSendStream(uint32_t ssrc) {
+ for (const auto* p : GetAudioSendStreams()) {
+ if (p->GetConfig().rtp.ssrc == ssrc) {
+ return p;
+ }
+ }
+ return nullptr;
+}
+
+const std::vector<FakeAudioReceiveStream*>& FakeCall::GetAudioReceiveStreams() {
+ return audio_receive_streams_;
+}
+
+const FakeAudioReceiveStream* FakeCall::GetAudioReceiveStream(uint32_t ssrc) {
+ for (const auto* p : GetAudioReceiveStreams()) {
+ if (p->GetConfig().rtp.remote_ssrc == ssrc) {
+ return p;
+ }
+ }
+ return nullptr;
+}
+
+const std::vector<FakeFlexfecReceiveStream*>&
+FakeCall::GetFlexfecReceiveStreams() {
+ return flexfec_receive_streams_;
+}
+
+webrtc::NetworkState FakeCall::GetNetworkState(webrtc::MediaType media) const {
+ switch (media) {
+ case webrtc::MediaType::AUDIO:
+ return audio_network_state_;
+ case webrtc::MediaType::VIDEO:
+ return video_network_state_;
+ case webrtc::MediaType::DATA:
+ case webrtc::MediaType::ANY:
+ ADD_FAILURE() << "GetNetworkState called with unknown parameter.";
+ return webrtc::kNetworkDown;
+ }
+ // Even though all the values for the enum class are listed above,the compiler
+ // will emit a warning as the method may be called with a value outside of the
+ // valid enum range, unless this case is also handled.
+ ADD_FAILURE() << "GetNetworkState called with unknown parameter.";
+ return webrtc::kNetworkDown;
+}
+
+webrtc::AudioSendStream* FakeCall::CreateAudioSendStream(
+ const webrtc::AudioSendStream::Config& config) {
+ FakeAudioSendStream* fake_stream =
+ new FakeAudioSendStream(next_stream_id_++, config);
+ audio_send_streams_.push_back(fake_stream);
+ ++num_created_send_streams_;
+ return fake_stream;
+}
+
+void FakeCall::DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) {
+ auto it = absl::c_find(audio_send_streams_,
+ static_cast<FakeAudioSendStream*>(send_stream));
+ if (it == audio_send_streams_.end()) {
+ ADD_FAILURE() << "DestroyAudioSendStream called with unknown parameter.";
+ } else {
+ delete *it;
+ audio_send_streams_.erase(it);
+ }
+}
+
+webrtc::AudioReceiveStreamInterface* FakeCall::CreateAudioReceiveStream(
+ const webrtc::AudioReceiveStreamInterface::Config& config) {
+ audio_receive_streams_.push_back(
+ new FakeAudioReceiveStream(next_stream_id_++, config));
+ ++num_created_receive_streams_;
+ return audio_receive_streams_.back();
+}
+
+void FakeCall::DestroyAudioReceiveStream(
+ webrtc::AudioReceiveStreamInterface* receive_stream) {
+ auto it = absl::c_find(audio_receive_streams_,
+ static_cast<FakeAudioReceiveStream*>(receive_stream));
+ if (it == audio_receive_streams_.end()) {
+ ADD_FAILURE() << "DestroyAudioReceiveStream called with unknown parameter.";
+ } else {
+ delete *it;
+ audio_receive_streams_.erase(it);
+ }
+}
+
+webrtc::VideoSendStream* FakeCall::CreateVideoSendStream(
+ webrtc::VideoSendStream::Config config,
+ webrtc::VideoEncoderConfig encoder_config) {
+ FakeVideoSendStream* fake_stream =
+ new FakeVideoSendStream(std::move(config), std::move(encoder_config));
+ video_send_streams_.push_back(fake_stream);
+ ++num_created_send_streams_;
+ return fake_stream;
+}
+
+void FakeCall::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) {
+ auto it = absl::c_find(video_send_streams_,
+ static_cast<FakeVideoSendStream*>(send_stream));
+ if (it == video_send_streams_.end()) {
+ ADD_FAILURE() << "DestroyVideoSendStream called with unknown parameter.";
+ } else {
+ delete *it;
+ video_send_streams_.erase(it);
+ }
+}
+
+webrtc::VideoReceiveStreamInterface* FakeCall::CreateVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface::Config config) {
+ video_receive_streams_.push_back(
+ new FakeVideoReceiveStream(std::move(config)));
+ ++num_created_receive_streams_;
+ return video_receive_streams_.back();
+}
+
+void FakeCall::DestroyVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface* receive_stream) {
+ auto it = absl::c_find(video_receive_streams_,
+ static_cast<FakeVideoReceiveStream*>(receive_stream));
+ if (it == video_receive_streams_.end()) {
+ ADD_FAILURE() << "DestroyVideoReceiveStream called with unknown parameter.";
+ } else {
+ delete *it;
+ video_receive_streams_.erase(it);
+ }
+}
+
+webrtc::FlexfecReceiveStream* FakeCall::CreateFlexfecReceiveStream(
+ const webrtc::FlexfecReceiveStream::Config config) {
+ FakeFlexfecReceiveStream* fake_stream =
+ new FakeFlexfecReceiveStream(std::move(config));
+ flexfec_receive_streams_.push_back(fake_stream);
+ ++num_created_receive_streams_;
+ return fake_stream;
+}
+
+void FakeCall::DestroyFlexfecReceiveStream(
+ webrtc::FlexfecReceiveStream* receive_stream) {
+ auto it =
+ absl::c_find(flexfec_receive_streams_,
+ static_cast<FakeFlexfecReceiveStream*>(receive_stream));
+ if (it == flexfec_receive_streams_.end()) {
+ ADD_FAILURE()
+ << "DestroyFlexfecReceiveStream called with unknown parameter.";
+ } else {
+ delete *it;
+ flexfec_receive_streams_.erase(it);
+ }
+}
+
+void FakeCall::AddAdaptationResource(
+ rtc::scoped_refptr<webrtc::Resource> resource) {}
+
+webrtc::PacketReceiver* FakeCall::Receiver() {
+ return this;
+}
+
+void FakeCall::DeliverRtpPacket(
+ webrtc::MediaType media_type,
+ webrtc::RtpPacketReceived packet,
+ OnUndemuxablePacketHandler undemuxable_packet_handler) {
+ if (!DeliverPacketInternal(media_type, packet.Ssrc(), packet.Buffer(),
+ packet.arrival_time())) {
+ if (undemuxable_packet_handler(packet)) {
+ DeliverPacketInternal(media_type, packet.Ssrc(), packet.Buffer(),
+ packet.arrival_time());
+ }
+ }
+ last_received_rtp_packet_ = packet;
+}
+
+bool FakeCall::DeliverPacketInternal(webrtc::MediaType media_type,
+ uint32_t ssrc,
+ const rtc::CopyOnWriteBuffer& packet,
+ webrtc::Timestamp arrival_time) {
+ EXPECT_GE(packet.size(), 12u);
+ RTC_DCHECK(arrival_time.IsFinite());
+ RTC_DCHECK(media_type == webrtc::MediaType::AUDIO ||
+ media_type == webrtc::MediaType::VIDEO);
+
+ if (media_type == webrtc::MediaType::VIDEO) {
+ for (auto receiver : video_receive_streams_) {
+ if (receiver->GetConfig().rtp.remote_ssrc == ssrc ||
+ receiver->GetConfig().rtp.rtx_ssrc == ssrc) {
+ ++delivered_packets_by_ssrc_[ssrc];
+ return true;
+ }
+ }
+ }
+ if (media_type == webrtc::MediaType::AUDIO) {
+ for (auto receiver : audio_receive_streams_) {
+ if (receiver->GetConfig().rtp.remote_ssrc == ssrc) {
+ receiver->DeliverRtp(packet.cdata(), packet.size(), arrival_time.us());
+ ++delivered_packets_by_ssrc_[ssrc];
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void FakeCall::SetStats(const webrtc::Call::Stats& stats) {
+ stats_ = stats;
+}
+
+int FakeCall::GetNumCreatedSendStreams() const {
+ return num_created_send_streams_;
+}
+
+int FakeCall::GetNumCreatedReceiveStreams() const {
+ return num_created_receive_streams_;
+}
+
+webrtc::Call::Stats FakeCall::GetStats() const {
+ return stats_;
+}
+
+webrtc::TaskQueueBase* FakeCall::network_thread() const {
+ return network_thread_;
+}
+
+webrtc::TaskQueueBase* FakeCall::worker_thread() const {
+ return worker_thread_;
+}
+
+void FakeCall::SignalChannelNetworkState(webrtc::MediaType media,
+ webrtc::NetworkState state) {
+ switch (media) {
+ case webrtc::MediaType::AUDIO:
+ audio_network_state_ = state;
+ break;
+ case webrtc::MediaType::VIDEO:
+ video_network_state_ = state;
+ break;
+ case webrtc::MediaType::DATA:
+ case webrtc::MediaType::ANY:
+ ADD_FAILURE()
+ << "SignalChannelNetworkState called with unknown parameter.";
+ }
+}
+
+void FakeCall::OnAudioTransportOverheadChanged(
+ int transport_overhead_per_packet) {}
+
+void FakeCall::OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
+ uint32_t local_ssrc) {
+ auto& fake_stream = static_cast<FakeAudioReceiveStream&>(stream);
+ fake_stream.SetLocalSsrc(local_ssrc);
+}
+
+void FakeCall::OnLocalSsrcUpdated(webrtc::VideoReceiveStreamInterface& stream,
+ uint32_t local_ssrc) {
+ auto& fake_stream = static_cast<FakeVideoReceiveStream&>(stream);
+ fake_stream.SetLocalSsrc(local_ssrc);
+}
+
+void FakeCall::OnLocalSsrcUpdated(webrtc::FlexfecReceiveStream& stream,
+ uint32_t local_ssrc) {
+ auto& fake_stream = static_cast<FakeFlexfecReceiveStream&>(stream);
+ fake_stream.SetLocalSsrc(local_ssrc);
+}
+
+void FakeCall::OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
+ absl::string_view sync_group) {
+ auto& fake_stream = static_cast<FakeAudioReceiveStream&>(stream);
+ fake_stream.SetSyncGroup(sync_group);
+}
+
+void FakeCall::OnSentPacket(const rtc::SentPacket& sent_packet) {
+ last_sent_packet_ = sent_packet;
+ if (sent_packet.packet_id >= 0) {
+ last_sent_nonnegative_packet_id_ = sent_packet.packet_id;
+ }
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/fake_webrtc_call.h b/third_party/libwebrtc/media/engine/fake_webrtc_call.h
new file mode 100644
index 0000000000..3dd6bdf397
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/fake_webrtc_call.h
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains fake implementations, for use in unit tests, of the
+// following classes:
+//
+// webrtc::Call
+// webrtc::AudioSendStream
+// webrtc::AudioReceiveStreamInterface
+// webrtc::VideoSendStream
+// webrtc::VideoReceiveStreamInterface
+
+#ifndef MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_
+#define MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/video_frame.h"
+#include "call/audio_receive_stream.h"
+#include "call/audio_send_stream.h"
+#include "call/call.h"
+#include "call/flexfec_receive_stream.h"
+#include "call/test/mock_rtp_transport_controller_send.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/buffer.h"
+#include "test/scoped_key_value_config.h"
+
+namespace cricket {
+class FakeAudioSendStream final : public webrtc::AudioSendStream {
+ public:
+ struct TelephoneEvent {
+ int payload_type = -1;
+ int payload_frequency = -1;
+ int event_code = 0;
+ int duration_ms = 0;
+ };
+
+ explicit FakeAudioSendStream(int id,
+ const webrtc::AudioSendStream::Config& config);
+
+ int id() const { return id_; }
+ const webrtc::AudioSendStream::Config& GetConfig() const override;
+ void SetStats(const webrtc::AudioSendStream::Stats& stats);
+ TelephoneEvent GetLatestTelephoneEvent() const;
+ bool IsSending() const { return sending_; }
+ bool muted() const { return muted_; }
+
+ private:
+ // webrtc::AudioSendStream implementation.
+ void Reconfigure(const webrtc::AudioSendStream::Config& config,
+ webrtc::SetParametersCallback callback) override;
+ void Start() override { sending_ = true; }
+ void Stop() override { sending_ = false; }
+ void SendAudioData(std::unique_ptr<webrtc::AudioFrame> audio_frame) override {
+ }
+ bool SendTelephoneEvent(int payload_type,
+ int payload_frequency,
+ int event,
+ int duration_ms) override;
+ void SetMuted(bool muted) override;
+ webrtc::AudioSendStream::Stats GetStats() const override;
+ webrtc::AudioSendStream::Stats GetStats(
+ bool has_remote_tracks) const override;
+
+ int id_ = -1;
+ TelephoneEvent latest_telephone_event_;
+ webrtc::AudioSendStream::Config config_;
+ webrtc::AudioSendStream::Stats stats_;
+ bool sending_ = false;
+ bool muted_ = false;
+};
+
+class FakeAudioReceiveStream final
+ : public webrtc::AudioReceiveStreamInterface {
+ public:
+ explicit FakeAudioReceiveStream(
+ int id,
+ const webrtc::AudioReceiveStreamInterface::Config& config);
+
+ int id() const { return id_; }
+ const webrtc::AudioReceiveStreamInterface::Config& GetConfig() const;
+ void SetStats(const webrtc::AudioReceiveStreamInterface::Stats& stats);
+ int received_packets() const { return received_packets_; }
+ bool VerifyLastPacket(const uint8_t* data, size_t length) const;
+ const webrtc::AudioSinkInterface* sink() const { return sink_; }
+ float gain() const { return gain_; }
+ bool DeliverRtp(const uint8_t* packet, size_t length, int64_t packet_time_us);
+ bool started() const { return started_; }
+ int base_mininum_playout_delay_ms() const {
+ return base_mininum_playout_delay_ms_;
+ }
+
+ void SetLocalSsrc(uint32_t local_ssrc) {
+ config_.rtp.local_ssrc = local_ssrc;
+ }
+
+ void SetSyncGroup(absl::string_view sync_group) {
+ config_.sync_group = std::string(sync_group);
+ }
+
+ uint32_t remote_ssrc() const override { return config_.rtp.remote_ssrc; }
+ void Start() override { started_ = true; }
+ void Stop() override { started_ = false; }
+ bool IsRunning() const override { return started_; }
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override;
+ void SetDecoderMap(
+ std::map<int, webrtc::SdpAudioFormat> decoder_map) override;
+ void SetNackHistory(int history_ms) override;
+ void SetNonSenderRttMeasurement(bool enabled) override;
+ void SetFrameDecryptor(rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
+ frame_decryptor) override;
+
+ webrtc::AudioReceiveStreamInterface::Stats GetStats(
+ bool get_and_clear_legacy_stats) const override;
+ void SetSink(webrtc::AudioSinkInterface* sink) override;
+ void SetGain(float gain) override;
+ bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override {
+ base_mininum_playout_delay_ms_ = delay_ms;
+ return true;
+ }
+ int GetBaseMinimumPlayoutDelayMs() const override {
+ return base_mininum_playout_delay_ms_;
+ }
+ std::vector<webrtc::RtpSource> GetSources() const override {
+ return std::vector<webrtc::RtpSource>();
+ }
+
+ private:
+ int id_ = -1;
+ webrtc::AudioReceiveStreamInterface::Config config_;
+ webrtc::AudioReceiveStreamInterface::Stats stats_;
+ int received_packets_ = 0;
+ webrtc::AudioSinkInterface* sink_ = nullptr;
+ float gain_ = 1.0f;
+ rtc::Buffer last_packet_;
+ bool started_ = false;
+ int base_mininum_playout_delay_ms_ = 0;
+};
+
+class FakeVideoSendStream final
+ : public webrtc::VideoSendStream,
+ public rtc::VideoSinkInterface<webrtc::VideoFrame> {
+ public:
+ FakeVideoSendStream(webrtc::VideoSendStream::Config config,
+ webrtc::VideoEncoderConfig encoder_config);
+ ~FakeVideoSendStream() override;
+ const webrtc::VideoSendStream::Config& GetConfig() const;
+ const webrtc::VideoEncoderConfig& GetEncoderConfig() const;
+ const std::vector<webrtc::VideoStream>& GetVideoStreams() const;
+
+ bool IsSending() const;
+ bool GetVp8Settings(webrtc::VideoCodecVP8* settings) const;
+ bool GetVp9Settings(webrtc::VideoCodecVP9* settings) const;
+ bool GetH264Settings(webrtc::VideoCodecH264* settings) const;
+ bool GetAv1Settings(webrtc::VideoCodecAV1* settings) const;
+
+ int GetNumberOfSwappedFrames() const;
+ int GetLastWidth() const;
+ int GetLastHeight() const;
+ int64_t GetLastTimestamp() const;
+ void SetStats(const webrtc::VideoSendStream::Stats& stats);
+ int num_encoder_reconfigurations() const {
+ return num_encoder_reconfigurations_;
+ }
+
+ bool resolution_scaling_enabled() const {
+ return resolution_scaling_enabled_;
+ }
+ bool framerate_scaling_enabled() const { return framerate_scaling_enabled_; }
+ void InjectVideoSinkWants(const rtc::VideoSinkWants& wants);
+
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source() const {
+ return source_;
+ }
+ void GenerateKeyFrame(const std::vector<std::string>& rids);
+ const std::vector<std::string>& GetKeyFramesRequested() const {
+ return keyframes_requested_by_rid_;
+ }
+
+ private:
+ // rtc::VideoSinkInterface<VideoFrame> implementation.
+ void OnFrame(const webrtc::VideoFrame& frame) override;
+
+ // webrtc::VideoSendStream implementation.
+ void StartPerRtpStream(std::vector<bool> active_layers) override;
+ void Start() override;
+ void Stop() override;
+ bool started() override { return IsSending(); }
+ void AddAdaptationResource(
+ rtc::scoped_refptr<webrtc::Resource> resource) override;
+ std::vector<rtc::scoped_refptr<webrtc::Resource>> GetAdaptationResources()
+ override;
+ void SetSource(
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
+ const webrtc::DegradationPreference& degradation_preference) override;
+ webrtc::VideoSendStream::Stats GetStats() override;
+
+ void ReconfigureVideoEncoder(webrtc::VideoEncoderConfig config) override;
+ void ReconfigureVideoEncoder(webrtc::VideoEncoderConfig config,
+ webrtc::SetParametersCallback callback) override;
+
+ bool sending_;
+ webrtc::VideoSendStream::Config config_;
+ webrtc::VideoEncoderConfig encoder_config_;
+ std::vector<webrtc::VideoStream> video_streams_;
+ rtc::VideoSinkWants sink_wants_;
+
+ bool codec_settings_set_;
+ union CodecSpecificSettings {
+ webrtc::VideoCodecVP8 vp8;
+ webrtc::VideoCodecVP9 vp9;
+ webrtc::VideoCodecH264 h264;
+ webrtc::VideoCodecAV1 av1;
+ } codec_specific_settings_;
+ bool resolution_scaling_enabled_;
+ bool framerate_scaling_enabled_;
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source_;
+ int num_swapped_frames_;
+ absl::optional<webrtc::VideoFrame> last_frame_;
+ webrtc::VideoSendStream::Stats stats_;
+ int num_encoder_reconfigurations_ = 0;
+ std::vector<std::string> keyframes_requested_by_rid_;
+};
+
+class FakeVideoReceiveStream final
+ : public webrtc::VideoReceiveStreamInterface {
+ public:
+ explicit FakeVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface::Config config);
+
+ const webrtc::VideoReceiveStreamInterface::Config& GetConfig() const;
+
+ bool IsReceiving() const;
+
+ void InjectFrame(const webrtc::VideoFrame& frame);
+
+ void SetStats(const webrtc::VideoReceiveStreamInterface::Stats& stats);
+
+ std::vector<webrtc::RtpSource> GetSources() const override {
+ return std::vector<webrtc::RtpSource>();
+ }
+
+ int base_mininum_playout_delay_ms() const {
+ return base_mininum_playout_delay_ms_;
+ }
+
+ void SetLocalSsrc(uint32_t local_ssrc) {
+ config_.rtp.local_ssrc = local_ssrc;
+ }
+
+ void UpdateRtxSsrc(uint32_t ssrc) { config_.rtp.rtx_ssrc = ssrc; }
+
+ void SetFrameDecryptor(rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
+ frame_decryptor) override {}
+
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override {}
+
+ RecordingState SetAndGetRecordingState(RecordingState state,
+ bool generate_key_frame) override {
+ return RecordingState();
+ }
+ void GenerateKeyFrame() override {}
+
+ void SetRtcpMode(webrtc::RtcpMode mode) override {
+ config_.rtp.rtcp_mode = mode;
+ }
+
+ void SetFlexFecProtection(webrtc::RtpPacketSinkInterface* sink) override {
+ config_.rtp.packet_sink_ = sink;
+ config_.rtp.protected_by_flexfec = (sink != nullptr);
+ }
+
+ void SetLossNotificationEnabled(bool enabled) override {
+ config_.rtp.lntf.enabled = enabled;
+ }
+
+ void SetNackHistory(webrtc::TimeDelta history) override {
+ config_.rtp.nack.rtp_history_ms = history.ms();
+ }
+
+ void SetProtectionPayloadTypes(int red_payload_type,
+ int ulpfec_payload_type) override {
+ config_.rtp.red_payload_type = red_payload_type;
+ config_.rtp.ulpfec_payload_type = ulpfec_payload_type;
+ }
+
+ void SetRtcpXr(Config::Rtp::RtcpXr rtcp_xr) override {
+ config_.rtp.rtcp_xr = rtcp_xr;
+ }
+
+ void SetAssociatedPayloadTypes(std::map<int, int> associated_payload_types) {
+ config_.rtp.rtx_associated_payload_types =
+ std::move(associated_payload_types);
+ }
+
+ void Start() override;
+ void Stop() override;
+
+ webrtc::VideoReceiveStreamInterface::Stats GetStats() const override;
+
+ bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override {
+ base_mininum_playout_delay_ms_ = delay_ms;
+ return true;
+ }
+
+ int GetBaseMinimumPlayoutDelayMs() const override {
+ return base_mininum_playout_delay_ms_;
+ }
+
+ private:
+ webrtc::VideoReceiveStreamInterface::Config config_;
+ bool receiving_;
+ webrtc::VideoReceiveStreamInterface::Stats stats_;
+
+ int base_mininum_playout_delay_ms_ = 0;
+};
+
+class FakeFlexfecReceiveStream final : public webrtc::FlexfecReceiveStream {
+ public:
+ explicit FakeFlexfecReceiveStream(
+ const webrtc::FlexfecReceiveStream::Config config);
+
+ void SetLocalSsrc(uint32_t local_ssrc) {
+ config_.rtp.local_ssrc = local_ssrc;
+ }
+
+ void SetRtcpMode(webrtc::RtcpMode mode) override { config_.rtcp_mode = mode; }
+
+ int payload_type() const override { return config_.payload_type; }
+ void SetPayloadType(int payload_type) override {
+ config_.payload_type = payload_type;
+ }
+
+ const webrtc::FlexfecReceiveStream::Config& GetConfig() const;
+
+ uint32_t remote_ssrc() const { return config_.rtp.remote_ssrc; }
+
+ const webrtc::ReceiveStatistics* GetStats() const override { return nullptr; }
+
+ private:
+ void OnRtpPacket(const webrtc::RtpPacketReceived& packet) override;
+
+ webrtc::FlexfecReceiveStream::Config config_;
+};
+
+class FakeCall final : public webrtc::Call, public webrtc::PacketReceiver {
+ public:
+ explicit FakeCall(webrtc::test::ScopedKeyValueConfig* field_trials = nullptr);
+ FakeCall(webrtc::TaskQueueBase* worker_thread,
+ webrtc::TaskQueueBase* network_thread,
+ webrtc::test::ScopedKeyValueConfig* field_trials = nullptr);
+ ~FakeCall() override;
+
+ webrtc::MockRtpTransportControllerSend* GetMockTransportControllerSend() {
+ return &transport_controller_send_;
+ }
+
+ const std::vector<FakeVideoSendStream*>& GetVideoSendStreams();
+ const std::vector<FakeVideoReceiveStream*>& GetVideoReceiveStreams();
+
+ const std::vector<FakeAudioSendStream*>& GetAudioSendStreams();
+ const FakeAudioSendStream* GetAudioSendStream(uint32_t ssrc);
+ const std::vector<FakeAudioReceiveStream*>& GetAudioReceiveStreams();
+ const FakeAudioReceiveStream* GetAudioReceiveStream(uint32_t ssrc);
+ const FakeVideoReceiveStream* GetVideoReceiveStream(uint32_t ssrc);
+
+ const std::vector<FakeFlexfecReceiveStream*>& GetFlexfecReceiveStreams();
+
+ rtc::SentPacket last_sent_packet() const { return last_sent_packet_; }
+ const webrtc::RtpPacketReceived& last_received_rtp_packet() const {
+ return last_received_rtp_packet_;
+ }
+ size_t GetDeliveredPacketsForSsrc(uint32_t ssrc) const {
+ auto it = delivered_packets_by_ssrc_.find(ssrc);
+ return it != delivered_packets_by_ssrc_.end() ? it->second : 0u;
+ }
+
+ // This is useful if we care about the last media packet (with id populated)
+ // but not the last ICE packet (with -1 ID).
+ int last_sent_nonnegative_packet_id() const {
+ return last_sent_nonnegative_packet_id_;
+ }
+
+ webrtc::NetworkState GetNetworkState(webrtc::MediaType media) const;
+ int GetNumCreatedSendStreams() const;
+ int GetNumCreatedReceiveStreams() const;
+ void SetStats(const webrtc::Call::Stats& stats);
+
+ void SetClientBitratePreferences(
+ const webrtc::BitrateSettings& preferences) override {}
+
+ void SetFieldTrial(const std::string& field_trial_string) {
+ trials_overrides_ = std::make_unique<webrtc::test::ScopedKeyValueConfig>(
+ *trials_, field_trial_string);
+ }
+
+ const webrtc::FieldTrialsView& trials() const override { return *trials_; }
+
+ private:
+ webrtc::AudioSendStream* CreateAudioSendStream(
+ const webrtc::AudioSendStream::Config& config) override;
+ void DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) override;
+
+ webrtc::AudioReceiveStreamInterface* CreateAudioReceiveStream(
+ const webrtc::AudioReceiveStreamInterface::Config& config) override;
+ void DestroyAudioReceiveStream(
+ webrtc::AudioReceiveStreamInterface* receive_stream) override;
+
+ webrtc::VideoSendStream* CreateVideoSendStream(
+ webrtc::VideoSendStream::Config config,
+ webrtc::VideoEncoderConfig encoder_config) override;
+ void DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) override;
+
+ webrtc::VideoReceiveStreamInterface* CreateVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface::Config config) override;
+ void DestroyVideoReceiveStream(
+ webrtc::VideoReceiveStreamInterface* receive_stream) override;
+
+ webrtc::FlexfecReceiveStream* CreateFlexfecReceiveStream(
+ const webrtc::FlexfecReceiveStream::Config config) override;
+ void DestroyFlexfecReceiveStream(
+ webrtc::FlexfecReceiveStream* receive_stream) override;
+
+ void AddAdaptationResource(
+ rtc::scoped_refptr<webrtc::Resource> resource) override;
+
+ webrtc::PacketReceiver* Receiver() override;
+
+ void DeliverRtcpPacket(rtc::CopyOnWriteBuffer packet) override {}
+
+ void DeliverRtpPacket(
+ webrtc::MediaType media_type,
+ webrtc::RtpPacketReceived packet,
+ OnUndemuxablePacketHandler un_demuxable_packet_handler) override;
+
+ bool DeliverPacketInternal(webrtc::MediaType media_type,
+ uint32_t ssrc,
+ const rtc::CopyOnWriteBuffer& packet,
+ webrtc::Timestamp arrival_time);
+
+ webrtc::RtpTransportControllerSendInterface* GetTransportControllerSend()
+ override {
+ return &transport_controller_send_;
+ }
+
+ webrtc::Call::Stats GetStats() const override;
+
+ webrtc::TaskQueueBase* network_thread() const override;
+ webrtc::TaskQueueBase* worker_thread() const override;
+
+ void SignalChannelNetworkState(webrtc::MediaType media,
+ webrtc::NetworkState state) override;
+ void OnAudioTransportOverheadChanged(
+ int transport_overhead_per_packet) override;
+ void OnLocalSsrcUpdated(webrtc::AudioReceiveStreamInterface& stream,
+ uint32_t local_ssrc) override;
+ void OnLocalSsrcUpdated(webrtc::VideoReceiveStreamInterface& stream,
+ uint32_t local_ssrc) override;
+ void OnLocalSsrcUpdated(webrtc::FlexfecReceiveStream& stream,
+ uint32_t local_ssrc) override;
+ void OnUpdateSyncGroup(webrtc::AudioReceiveStreamInterface& stream,
+ absl::string_view sync_group) override;
+ void OnSentPacket(const rtc::SentPacket& sent_packet) override;
+
+ webrtc::TaskQueueBase* const network_thread_;
+ webrtc::TaskQueueBase* const worker_thread_;
+
+ ::testing::NiceMock<webrtc::MockRtpTransportControllerSend>
+ transport_controller_send_;
+
+ webrtc::NetworkState audio_network_state_;
+ webrtc::NetworkState video_network_state_;
+ rtc::SentPacket last_sent_packet_;
+ webrtc::RtpPacketReceived last_received_rtp_packet_;
+ int last_sent_nonnegative_packet_id_ = -1;
+ int next_stream_id_ = 665;
+ webrtc::Call::Stats stats_;
+ std::vector<FakeVideoSendStream*> video_send_streams_;
+ std::vector<FakeAudioSendStream*> audio_send_streams_;
+ std::vector<FakeVideoReceiveStream*> video_receive_streams_;
+ std::vector<FakeAudioReceiveStream*> audio_receive_streams_;
+ std::vector<FakeFlexfecReceiveStream*> flexfec_receive_streams_;
+ std::map<uint32_t, size_t> delivered_packets_by_ssrc_;
+
+ int num_created_send_streams_;
+ int num_created_receive_streams_;
+
+ // The field trials that are in use, either supplied by caller
+ // or pointer to &fallback_trials_.
+ webrtc::test::ScopedKeyValueConfig* trials_;
+
+ // fallback_trials_ is used if caller does not provide any field trials.
+ webrtc::test::ScopedKeyValueConfig fallback_trials_;
+
+ // An extra field trial that can be set using SetFieldTrial.
+ std::unique_ptr<webrtc::test::ScopedKeyValueConfig> trials_overrides_;
+};
+
+} // namespace cricket
+#endif // MEDIA_ENGINE_FAKE_WEBRTC_CALL_H_
diff --git a/third_party/libwebrtc/media/engine/fake_webrtc_video_engine.cc b/third_party/libwebrtc/media/engine/fake_webrtc_video_engine.cc
new file mode 100644
index 0000000000..cf402478a0
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/fake_webrtc_video_engine.cc
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/fake_webrtc_video_engine.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "absl/strings/match.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/time_utils.h"
+
+namespace cricket {
+
+namespace {
+
+static constexpr webrtc::TimeDelta kEventTimeout =
+ webrtc::TimeDelta::Seconds(10);
+
+bool IsScalabilityModeSupported(
+ const std::vector<webrtc::SdpVideoFormat>& formats,
+ absl::optional<std::string> scalability_mode) {
+ if (!scalability_mode.has_value()) {
+ return true;
+ }
+ for (const auto& format : formats) {
+ for (const auto& mode : format.scalability_modes) {
+ if (ScalabilityModeToString(mode) == scalability_mode)
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+// Decoder.
+FakeWebRtcVideoDecoder::FakeWebRtcVideoDecoder(
+ FakeWebRtcVideoDecoderFactory* factory)
+ : num_frames_received_(0), factory_(factory) {}
+
+FakeWebRtcVideoDecoder::~FakeWebRtcVideoDecoder() {
+ if (factory_) {
+ factory_->DecoderDestroyed(this);
+ }
+}
+
+bool FakeWebRtcVideoDecoder::Configure(const Settings& settings) {
+ return true;
+}
+
+int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&,
+ int64_t) {
+ num_frames_received_++;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t FakeWebRtcVideoDecoder::RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback*) {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t FakeWebRtcVideoDecoder::Release() {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int FakeWebRtcVideoDecoder::GetNumFramesReceived() const {
+ return num_frames_received_;
+}
+
+// Decoder factory.
+FakeWebRtcVideoDecoderFactory::FakeWebRtcVideoDecoderFactory()
+ : num_created_decoders_(0) {}
+
+std::vector<webrtc::SdpVideoFormat>
+FakeWebRtcVideoDecoderFactory::GetSupportedFormats() const {
+ std::vector<webrtc::SdpVideoFormat> formats;
+
+ for (const webrtc::SdpVideoFormat& format : supported_codec_formats_) {
+ // Don't add same codec twice.
+ if (!format.IsCodecInList(formats))
+ formats.push_back(format);
+ }
+
+ return formats;
+}
+
+std::unique_ptr<webrtc::VideoDecoder>
+FakeWebRtcVideoDecoderFactory::CreateVideoDecoder(
+ const webrtc::SdpVideoFormat& format) {
+ if (format.IsCodecInList(supported_codec_formats_)) {
+ num_created_decoders_++;
+ std::unique_ptr<FakeWebRtcVideoDecoder> decoder =
+ std::make_unique<FakeWebRtcVideoDecoder>(this);
+ decoders_.push_back(decoder.get());
+ return decoder;
+ }
+
+ return nullptr;
+}
+
+void FakeWebRtcVideoDecoderFactory::DecoderDestroyed(
+ FakeWebRtcVideoDecoder* decoder) {
+ decoders_.erase(std::remove(decoders_.begin(), decoders_.end(), decoder),
+ decoders_.end());
+}
+
+void FakeWebRtcVideoDecoderFactory::AddSupportedVideoCodecType(
+ const std::string& name) {
+ // This is to match the default H264 params of cricket::VideoCodec.
+ cricket::VideoCodec video_codec = cricket::CreateVideoCodec(name);
+ supported_codec_formats_.push_back(
+ webrtc::SdpVideoFormat(video_codec.name, video_codec.params));
+}
+
+int FakeWebRtcVideoDecoderFactory::GetNumCreatedDecoders() {
+ return num_created_decoders_;
+}
+
+const std::vector<FakeWebRtcVideoDecoder*>&
+FakeWebRtcVideoDecoderFactory::decoders() {
+ return decoders_;
+}
+
+// Encoder.
+FakeWebRtcVideoEncoder::FakeWebRtcVideoEncoder(
+ FakeWebRtcVideoEncoderFactory* factory)
+ : num_frames_encoded_(0), factory_(factory) {}
+
+FakeWebRtcVideoEncoder::~FakeWebRtcVideoEncoder() {
+ if (factory_) {
+ factory_->EncoderDestroyed(this);
+ }
+}
+
+void FakeWebRtcVideoEncoder::SetFecControllerOverride(
+ webrtc::FecControllerOverride* fec_controller_override) {
+ // Ignored.
+}
+
+int32_t FakeWebRtcVideoEncoder::InitEncode(
+ const webrtc::VideoCodec* codecSettings,
+ const VideoEncoder::Settings& settings) {
+ webrtc::MutexLock lock(&mutex_);
+ codec_settings_ = *codecSettings;
+ init_encode_event_.Set();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t FakeWebRtcVideoEncoder::Encode(
+ const webrtc::VideoFrame& inputImage,
+ const std::vector<webrtc::VideoFrameType>* frame_types) {
+ webrtc::MutexLock lock(&mutex_);
+ ++num_frames_encoded_;
+ init_encode_event_.Set();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t FakeWebRtcVideoEncoder::RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t FakeWebRtcVideoEncoder::Release() {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void FakeWebRtcVideoEncoder::SetRates(const RateControlParameters& parameters) {
+}
+
+webrtc::VideoEncoder::EncoderInfo FakeWebRtcVideoEncoder::GetEncoderInfo()
+ const {
+ EncoderInfo info;
+ info.is_hardware_accelerated = true;
+ return info;
+}
+
+bool FakeWebRtcVideoEncoder::WaitForInitEncode() {
+ return init_encode_event_.Wait(kEventTimeout);
+}
+
+webrtc::VideoCodec FakeWebRtcVideoEncoder::GetCodecSettings() {
+ webrtc::MutexLock lock(&mutex_);
+ return codec_settings_;
+}
+
+int FakeWebRtcVideoEncoder::GetNumEncodedFrames() {
+ webrtc::MutexLock lock(&mutex_);
+ return num_frames_encoded_;
+}
+
+// Video encoder factory.
+FakeWebRtcVideoEncoderFactory::FakeWebRtcVideoEncoderFactory()
+ : num_created_encoders_(0), vp8_factory_mode_(false) {}
+
+std::vector<webrtc::SdpVideoFormat>
+FakeWebRtcVideoEncoderFactory::GetSupportedFormats() const {
+ std::vector<webrtc::SdpVideoFormat> formats;
+
+ for (const webrtc::SdpVideoFormat& format : formats_) {
+ // Don't add same codec twice.
+ if (!format.IsCodecInList(formats))
+ formats.push_back(format);
+ }
+
+ return formats;
+}
+
+webrtc::VideoEncoderFactory::CodecSupport
+FakeWebRtcVideoEncoderFactory::QueryCodecSupport(
+ const webrtc::SdpVideoFormat& format,
+ absl::optional<std::string> scalability_mode) const {
+ std::vector<webrtc::SdpVideoFormat> supported_formats;
+ for (const auto& f : formats_) {
+ if (format.IsSameCodec(f))
+ supported_formats.push_back(f);
+ }
+ if (format.IsCodecInList(formats_)) {
+ return {.is_supported = IsScalabilityModeSupported(supported_formats,
+ scalability_mode)};
+ }
+ return {.is_supported = false};
+}
+
+std::unique_ptr<webrtc::VideoEncoder>
+FakeWebRtcVideoEncoderFactory::CreateVideoEncoder(
+ const webrtc::SdpVideoFormat& format) {
+ webrtc::MutexLock lock(&mutex_);
+ std::unique_ptr<webrtc::VideoEncoder> encoder;
+ if (format.IsCodecInList(formats_)) {
+ if (absl::EqualsIgnoreCase(format.name, kVp8CodecName) &&
+ !vp8_factory_mode_) {
+ // The simulcast adapter will ask this factory for multiple VP8
+ // encoders. Enter vp8_factory_mode so that we now create these encoders
+ // instead of more adapters.
+ vp8_factory_mode_ = true;
+ encoder = std::make_unique<webrtc::SimulcastEncoderAdapter>(this, format);
+ } else {
+ num_created_encoders_++;
+ created_video_encoder_event_.Set();
+ encoder = std::make_unique<FakeWebRtcVideoEncoder>(this);
+ encoders_.push_back(static_cast<FakeWebRtcVideoEncoder*>(encoder.get()));
+ }
+ }
+ return encoder;
+}
+
+bool FakeWebRtcVideoEncoderFactory::WaitForCreatedVideoEncoders(
+ int num_encoders) {
+ int64_t start_offset_ms = rtc::TimeMillis();
+ int64_t wait_time = kEventTimeout.ms();
+ do {
+ if (GetNumCreatedEncoders() >= num_encoders)
+ return true;
+ wait_time = kEventTimeout.ms() - (rtc::TimeMillis() - start_offset_ms);
+ } while (wait_time > 0 && created_video_encoder_event_.Wait(
+ webrtc::TimeDelta::Millis(wait_time)));
+ return false;
+}
+
+void FakeWebRtcVideoEncoderFactory::EncoderDestroyed(
+ FakeWebRtcVideoEncoder* encoder) {
+ webrtc::MutexLock lock(&mutex_);
+ encoders_.erase(std::remove(encoders_.begin(), encoders_.end(), encoder),
+ encoders_.end());
+}
+
+void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodec(
+ const webrtc::SdpVideoFormat& format) {
+ formats_.push_back(format);
+}
+
+void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodecType(
+ const std::string& name,
+ const std::vector<webrtc::ScalabilityMode>& scalability_modes) {
+ // This is to match the default H264 params of cricket::VideoCodec.
+ cricket::VideoCodec video_codec = cricket::CreateVideoCodec(name);
+ formats_.push_back(webrtc::SdpVideoFormat(
+ video_codec.name, video_codec.params,
+ {scalability_modes.begin(), scalability_modes.end()}));
+}
+
+int FakeWebRtcVideoEncoderFactory::GetNumCreatedEncoders() {
+ webrtc::MutexLock lock(&mutex_);
+ return num_created_encoders_;
+}
+
+const std::vector<FakeWebRtcVideoEncoder*>
+FakeWebRtcVideoEncoderFactory::encoders() {
+ webrtc::MutexLock lock(&mutex_);
+ return encoders_;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/fake_webrtc_video_engine.h b/third_party/libwebrtc/media/engine/fake_webrtc_video_engine.h
new file mode 100644
index 0000000000..3db4225ced
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/fake_webrtc_video_engine.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_
+#define MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/fec_controller_override.h"
+#include "api/video/encoded_image.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace cricket {
+
+class FakeWebRtcVideoDecoderFactory;
+class FakeWebRtcVideoEncoderFactory;
+
+// Fake class for mocking out webrtc::VideoDecoder
+class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder {
+ public:
+ explicit FakeWebRtcVideoDecoder(FakeWebRtcVideoDecoderFactory* factory);
+ ~FakeWebRtcVideoDecoder();
+
+ bool Configure(const Settings& settings) override;
+ int32_t Decode(const webrtc::EncodedImage&, int64_t) override;
+ int32_t RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback*) override;
+ int32_t Release() override;
+
+ int GetNumFramesReceived() const;
+
+ private:
+ int num_frames_received_;
+ FakeWebRtcVideoDecoderFactory* factory_;
+};
+
+// Fake class for mocking out webrtc::VideoDecoderFactory.
+class FakeWebRtcVideoDecoderFactory : public webrtc::VideoDecoderFactory {
+ public:
+ FakeWebRtcVideoDecoderFactory();
+
+ std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override;
+ std::unique_ptr<webrtc::VideoDecoder> CreateVideoDecoder(
+ const webrtc::SdpVideoFormat& format) override;
+
+ void DecoderDestroyed(FakeWebRtcVideoDecoder* decoder);
+ void AddSupportedVideoCodecType(const std::string& name);
+ int GetNumCreatedDecoders();
+ const std::vector<FakeWebRtcVideoDecoder*>& decoders();
+
+ private:
+ std::vector<webrtc::SdpVideoFormat> supported_codec_formats_;
+ std::vector<FakeWebRtcVideoDecoder*> decoders_;
+ int num_created_decoders_;
+};
+
+// Fake class for mocking out webrtc::VideoEnoder
+class FakeWebRtcVideoEncoder : public webrtc::VideoEncoder {
+ public:
+ explicit FakeWebRtcVideoEncoder(FakeWebRtcVideoEncoderFactory* factory);
+ ~FakeWebRtcVideoEncoder();
+
+ void SetFecControllerOverride(
+ webrtc::FecControllerOverride* fec_controller_override) override;
+ int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
+ const VideoEncoder::Settings& settings) override;
+ int32_t Encode(
+ const webrtc::VideoFrame& inputImage,
+ const std::vector<webrtc::VideoFrameType>* frame_types) override;
+ int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) override;
+ int32_t Release() override;
+ void SetRates(const RateControlParameters& parameters) override;
+ webrtc::VideoEncoder::EncoderInfo GetEncoderInfo() const override;
+
+ bool WaitForInitEncode();
+ webrtc::VideoCodec GetCodecSettings();
+ int GetNumEncodedFrames();
+
+ private:
+ webrtc::Mutex mutex_;
+ rtc::Event init_encode_event_;
+ int num_frames_encoded_ RTC_GUARDED_BY(mutex_);
+ webrtc::VideoCodec codec_settings_ RTC_GUARDED_BY(mutex_);
+ FakeWebRtcVideoEncoderFactory* factory_;
+};
+
+// Fake class for mocking out webrtc::VideoEncoderFactory.
+class FakeWebRtcVideoEncoderFactory : public webrtc::VideoEncoderFactory {
+ public:
+ FakeWebRtcVideoEncoderFactory();
+
+ std::vector<webrtc::SdpVideoFormat> GetSupportedFormats() const override;
+ webrtc::VideoEncoderFactory::CodecSupport QueryCodecSupport(
+ const webrtc::SdpVideoFormat& format,
+ absl::optional<std::string> scalability_mode) const override;
+ std::unique_ptr<webrtc::VideoEncoder> CreateVideoEncoder(
+ const webrtc::SdpVideoFormat& format) override;
+
+ bool WaitForCreatedVideoEncoders(int num_encoders);
+ void EncoderDestroyed(FakeWebRtcVideoEncoder* encoder);
+ void set_encoders_have_internal_sources(bool internal_source);
+ void AddSupportedVideoCodec(const webrtc::SdpVideoFormat& format);
+ void AddSupportedVideoCodecType(
+ const std::string& name,
+ const std::vector<webrtc::ScalabilityMode>& scalability_modes = {});
+ int GetNumCreatedEncoders();
+ const std::vector<FakeWebRtcVideoEncoder*> encoders();
+
+ private:
+ webrtc::Mutex mutex_;
+ rtc::Event created_video_encoder_event_;
+ std::vector<webrtc::SdpVideoFormat> formats_;
+ std::vector<FakeWebRtcVideoEncoder*> encoders_ RTC_GUARDED_BY(mutex_);
+ int num_created_encoders_ RTC_GUARDED_BY(mutex_);
+ bool vp8_factory_mode_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_ENGINE_FAKE_WEBRTC_VIDEO_ENGINE_H_
diff --git a/third_party/libwebrtc/media/engine/internal_decoder_factory.cc b/third_party/libwebrtc/media/engine/internal_decoder_factory.cc
new file mode 100644
index 0000000000..001c666313
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/internal_decoder_factory.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/internal_decoder_factory.h"
+
+#include "absl/strings/match.h"
+#include "api/video_codecs/av1_profile.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+#include "modules/video_coding/codecs/av1/dav1d_decoder.h" // nogncheck
+#endif
+
+namespace webrtc {
+namespace {
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+constexpr bool kDav1dIsIncluded = true;
+#else
+constexpr bool kDav1dIsIncluded = false;
+std::unique_ptr<VideoDecoder> CreateDav1dDecoder() {
+ return nullptr;
+}
+#endif
+
+} // namespace
+
+std::vector<SdpVideoFormat> InternalDecoderFactory::GetSupportedFormats()
+ const {
+ std::vector<SdpVideoFormat> formats;
+ formats.push_back(SdpVideoFormat(cricket::kVp8CodecName));
+ for (const SdpVideoFormat& format : SupportedVP9DecoderCodecs())
+ formats.push_back(format);
+ for (const SdpVideoFormat& h264_format : SupportedH264DecoderCodecs())
+ formats.push_back(h264_format);
+
+#if !defined(WEBRTC_MOZILLA_BUILD)
+ if (kDav1dIsIncluded) {
+ formats.push_back(SdpVideoFormat(cricket::kAv1CodecName));
+ formats.push_back(SdpVideoFormat(
+ cricket::kAv1CodecName,
+ {{kAV1FmtpProfile, AV1ProfileToString(AV1Profile::kProfile1).data()}}));
+ }
+#endif
+
+ return formats;
+}
+
+VideoDecoderFactory::CodecSupport InternalDecoderFactory::QueryCodecSupport(
+ const SdpVideoFormat& format,
+ bool reference_scaling) const {
+ // Query for supported formats and check if the specified format is supported.
+ // Return unsupported if an invalid combination of format and
+ // reference_scaling is specified.
+ if (reference_scaling) {
+ VideoCodecType codec = PayloadStringToCodecType(format.name);
+ if (codec != kVideoCodecVP9 && codec != kVideoCodecAV1) {
+ return {/*is_supported=*/false, /*is_power_efficient=*/false};
+ }
+ }
+
+ CodecSupport codec_support;
+ codec_support.is_supported = format.IsCodecInList(GetSupportedFormats());
+ return codec_support;
+}
+
+std::unique_ptr<VideoDecoder> InternalDecoderFactory::CreateVideoDecoder(
+ const SdpVideoFormat& format) {
+ if (!format.IsCodecInList(GetSupportedFormats())) {
+ RTC_LOG(LS_WARNING) << "Trying to create decoder for unsupported format. "
+ << format.ToString();
+ return nullptr;
+ }
+
+ if (absl::EqualsIgnoreCase(format.name, cricket::kVp8CodecName))
+ return VP8Decoder::Create();
+ if (absl::EqualsIgnoreCase(format.name, cricket::kVp9CodecName))
+ return VP9Decoder::Create();
+ if (absl::EqualsIgnoreCase(format.name, cricket::kH264CodecName))
+ return H264Decoder::Create();
+
+ if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName) &&
+ kDav1dIsIncluded) {
+ return CreateDav1dDecoder();
+ }
+
+ RTC_DCHECK_NOTREACHED();
+ return nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/internal_decoder_factory.h b/third_party/libwebrtc/media/engine/internal_decoder_factory.h
new file mode 100644
index 0000000000..0129fb2173
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/internal_decoder_factory.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_
+#define MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+class RTC_EXPORT InternalDecoderFactory : public VideoDecoderFactory {
+ public:
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ CodecSupport QueryCodecSupport(const SdpVideoFormat& format,
+ bool reference_scaling) const override;
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ const SdpVideoFormat& format) override;
+};
+
+} // namespace webrtc
+
+#endif // MEDIA_ENGINE_INTERNAL_DECODER_FACTORY_H_
diff --git a/third_party/libwebrtc/media/engine/internal_decoder_factory_unittest.cc b/third_party/libwebrtc/media/engine/internal_decoder_factory_unittest.cc
new file mode 100644
index 0000000000..bb2e24d5d8
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/internal_decoder_factory_unittest.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/internal_decoder_factory.h"
+
+#include "api/video_codecs/av1_profile.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "media/base/media_constants.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+using ::testing::Contains;
+using ::testing::Field;
+using ::testing::Not;
+
+using ::webrtc::field_trial::InitFieldTrialsFromString;
+
+#ifdef RTC_ENABLE_VP9
+constexpr bool kVp9Enabled = true;
+#else
+constexpr bool kVp9Enabled = false;
+#endif
+#ifdef WEBRTC_USE_H264
+constexpr bool kH264Enabled = true;
+#else
+constexpr bool kH264Enabled = false;
+#endif
+#ifdef RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY
+constexpr bool kDav1dIsIncluded = true;
+#else
+constexpr bool kDav1dIsIncluded = false;
+#endif
+constexpr VideoDecoderFactory::CodecSupport kSupported = {
+ /*is_supported=*/true, /*is_power_efficient=*/false};
+constexpr VideoDecoderFactory::CodecSupport kUnsupported = {
+ /*is_supported=*/false, /*is_power_efficient=*/false};
+
+MATCHER_P(Support, expected, "") {
+ return arg.is_supported == expected.is_supported &&
+ arg.is_power_efficient == expected.is_power_efficient;
+}
+
+TEST(InternalDecoderFactoryTest, Vp8) {
+ InternalDecoderFactory factory;
+ std::unique_ptr<VideoDecoder> decoder =
+ factory.CreateVideoDecoder(SdpVideoFormat(cricket::kVp8CodecName));
+ EXPECT_TRUE(decoder);
+}
+
+TEST(InternalDecoderFactoryTest, Vp9Profile0) {
+ InternalDecoderFactory factory;
+ std::unique_ptr<VideoDecoder> decoder =
+ factory.CreateVideoDecoder(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}}));
+ EXPECT_EQ(static_cast<bool>(decoder), kVp9Enabled);
+}
+
+TEST(InternalDecoderFactoryTest, Vp9Profile1) {
+ InternalDecoderFactory factory;
+ std::unique_ptr<VideoDecoder> decoder =
+ factory.CreateVideoDecoder(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile1)}}));
+ EXPECT_EQ(static_cast<bool>(decoder), kVp9Enabled);
+}
+
+TEST(InternalDecoderFactoryTest, H264) {
+ InternalDecoderFactory factory;
+ std::unique_ptr<VideoDecoder> decoder =
+ factory.CreateVideoDecoder(SdpVideoFormat(cricket::kH264CodecName));
+ EXPECT_EQ(static_cast<bool>(decoder), kH264Enabled);
+}
+
+TEST(InternalDecoderFactoryTest, Av1Profile0) {
+ InternalDecoderFactory factory;
+ if (kDav1dIsIncluded) {
+ EXPECT_THAT(factory.GetSupportedFormats(),
+ Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName)));
+ EXPECT_TRUE(
+ factory.CreateVideoDecoder(SdpVideoFormat(cricket::kAv1CodecName)));
+ } else {
+ EXPECT_THAT(
+ factory.GetSupportedFormats(),
+ Not(Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName))));
+ }
+}
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+TEST(InternalDecoderFactoryTest, Av1) {
+ InternalDecoderFactory factory;
+ EXPECT_THAT(factory.GetSupportedFormats(),
+ Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName)));
+}
+#endif
+
+TEST(InternalDecoderFactoryTest, Av1Profile1_Dav1dDecoderTrialEnabled) {
+ InternalDecoderFactory factory;
+ std::unique_ptr<VideoDecoder> decoder = factory.CreateVideoDecoder(
+ SdpVideoFormat(cricket::kAv1CodecName,
+ {{kAV1FmtpProfile,
+ AV1ProfileToString(AV1Profile::kProfile1).data()}}));
+ EXPECT_EQ(static_cast<bool>(decoder), kDav1dIsIncluded);
+}
+
+TEST(InternalDecoderFactoryTest, QueryCodecSupportNoReferenceScaling) {
+ InternalDecoderFactory factory;
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName),
+ /*reference_scaling=*/false),
+ Support(kSupported));
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName),
+ /*reference_scaling=*/false),
+ Support(kVp9Enabled ? kSupported : kUnsupported));
+ EXPECT_THAT(factory.QueryCodecSupport(
+ SdpVideoFormat(cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId,
+ VP9ProfileToString(VP9Profile::kProfile1)}}),
+ /*reference_scaling=*/false),
+ Support(kVp9Enabled ? kSupported : kUnsupported));
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName),
+ /*reference_scaling=*/false),
+ Support(kSupported));
+#endif
+}
+
+TEST(InternalDecoderFactoryTest, QueryCodecSupportReferenceScaling) {
+ InternalDecoderFactory factory;
+ // VP9 and AV1 support for spatial layers.
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName),
+ /*reference_scaling=*/true),
+ Support(kVp9Enabled ? kSupported : kUnsupported));
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName),
+ /*reference_scaling=*/true),
+ Support(kSupported));
+#endif
+
+ // Invalid config even though VP8 and H264 are supported.
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kH264CodecName),
+ /*reference_scaling=*/true),
+ Support(kUnsupported));
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName),
+ /*reference_scaling=*/true),
+ Support(kUnsupported));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/internal_encoder_factory.cc b/third_party/libwebrtc/media/engine/internal_encoder_factory.cc
new file mode 100644
index 0000000000..7b5fc24e0a
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/internal_encoder_factory.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/internal_encoder_factory.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/strings/match.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "api/video_codecs/video_encoder_factory_template.h"
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h" // nogncheck
+#endif
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
+#if defined(WEBRTC_USE_H264)
+#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h" // nogncheck
+#endif
+
+namespace webrtc {
+namespace {
+
+using Factory =
+ VideoEncoderFactoryTemplate<webrtc::LibvpxVp8EncoderTemplateAdapter,
+#if defined(WEBRTC_USE_H264)
+ webrtc::OpenH264EncoderTemplateAdapter,
+#endif
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+ webrtc::LibaomAv1EncoderTemplateAdapter,
+#endif
+ webrtc::LibvpxVp9EncoderTemplateAdapter>;
+} // namespace
+
+std::vector<SdpVideoFormat> InternalEncoderFactory::GetSupportedFormats()
+ const {
+ return Factory().GetSupportedFormats();
+}
+
+std::unique_ptr<VideoEncoder> InternalEncoderFactory::CreateVideoEncoder(
+ const SdpVideoFormat& format) {
+ auto original_format =
+ FuzzyMatchSdpVideoFormat(Factory().GetSupportedFormats(), format);
+ return original_format ? Factory().CreateVideoEncoder(*original_format)
+ : nullptr;
+}
+
+VideoEncoderFactory::CodecSupport InternalEncoderFactory::QueryCodecSupport(
+ const SdpVideoFormat& format,
+ absl::optional<std::string> scalability_mode) const {
+ auto original_format =
+ FuzzyMatchSdpVideoFormat(Factory().GetSupportedFormats(), format);
+ return original_format
+ ? Factory().QueryCodecSupport(*original_format, scalability_mode)
+ : VideoEncoderFactory::CodecSupport{.is_supported = false};
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/internal_encoder_factory.h b/third_party/libwebrtc/media/engine/internal_encoder_factory.h
new file mode 100644
index 0000000000..25480d088f
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/internal_encoder_factory.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_
+#define MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/video_codecs/video_encoder_factory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+class RTC_EXPORT InternalEncoderFactory : public VideoEncoderFactory {
+ public:
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ CodecSupport QueryCodecSupport(
+ const SdpVideoFormat& format,
+ absl::optional<std::string> scalability_mode) const override;
+ std::unique_ptr<VideoEncoder> CreateVideoEncoder(
+ const SdpVideoFormat& format) override;
+};
+
+} // namespace webrtc
+
+#endif // MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_
diff --git a/third_party/libwebrtc/media/engine/internal_encoder_factory_unittest.cc b/third_party/libwebrtc/media/engine/internal_encoder_factory_unittest.cc
new file mode 100644
index 0000000000..a1c90b8cf4
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/internal_encoder_factory_unittest.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/internal_encoder_factory.h"
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/vp9_profile.h"
+#include "media/base/media_constants.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+using ::testing::Contains;
+using ::testing::Field;
+using ::testing::Not;
+
+#ifdef RTC_ENABLE_VP9
+constexpr bool kVp9Enabled = true;
+#else
+constexpr bool kVp9Enabled = false;
+#endif
+#ifdef WEBRTC_USE_H264
+constexpr bool kH264Enabled = true;
+#else
+constexpr bool kH264Enabled = false;
+#endif
+constexpr VideoEncoderFactory::CodecSupport kSupported = {
+ /*is_supported=*/true, /*is_power_efficient=*/false};
+constexpr VideoEncoderFactory::CodecSupport kUnsupported = {
+ /*is_supported=*/false, /*is_power_efficient=*/false};
+
+MATCHER_P(Support, expected, "") {
+ return arg.is_supported == expected.is_supported &&
+ arg.is_power_efficient == expected.is_power_efficient;
+}
+
+TEST(InternalEncoderFactoryTest, Vp8) {
+ InternalEncoderFactory factory;
+ std::unique_ptr<VideoEncoder> encoder =
+ factory.CreateVideoEncoder(SdpVideoFormat(cricket::kVp8CodecName));
+ EXPECT_TRUE(encoder);
+}
+
+TEST(InternalEncoderFactoryTest, Vp9Profile0) {
+ InternalEncoderFactory factory;
+ if (kVp9Enabled) {
+ std::unique_ptr<VideoEncoder> encoder =
+ factory.CreateVideoEncoder(SdpVideoFormat(
+ cricket::kVp9CodecName,
+ {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}}));
+ EXPECT_TRUE(encoder);
+ } else {
+ EXPECT_THAT(
+ factory.GetSupportedFormats(),
+ Not(Contains(Field(&SdpVideoFormat::name, cricket::kVp9CodecName))));
+ }
+}
+
+TEST(InternalEncoderFactoryTest, H264) {
+ InternalEncoderFactory factory;
+ if (kH264Enabled) {
+ std::unique_ptr<VideoEncoder> encoder =
+ factory.CreateVideoEncoder(SdpVideoFormat(cricket::kH264CodecName));
+ EXPECT_TRUE(encoder);
+ } else {
+ EXPECT_THAT(
+ factory.GetSupportedFormats(),
+ Not(Contains(Field(&SdpVideoFormat::name, cricket::kH264CodecName))));
+ }
+}
+
+TEST(InternalEncoderFactoryTest, QueryCodecSupportWithScalabilityMode) {
+ InternalEncoderFactory factory;
+ // VP8 and VP9 supported for singles spatial layers.
+ EXPECT_THAT(
+ factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName), "L1T2"),
+ Support(kSupported));
+ EXPECT_THAT(
+ factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName), "L1T3"),
+ Support(kVp9Enabled ? kSupported : kUnsupported));
+
+ // VP9 support for spatial layers.
+ EXPECT_THAT(
+ factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName), "L3T3"),
+ Support(kVp9Enabled ? kSupported : kUnsupported));
+
+ // Invalid scalability modes even though VP8 and H264 are supported.
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kH264CodecName),
+ "L2T2"),
+ Support(kUnsupported));
+ EXPECT_THAT(
+ factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName), "L3T3"),
+ Support(kUnsupported));
+}
+
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+TEST(InternalEncoderFactoryTest, Av1) {
+ InternalEncoderFactory factory;
+ EXPECT_THAT(factory.GetSupportedFormats(),
+ Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName)));
+ EXPECT_TRUE(
+ factory.CreateVideoEncoder(SdpVideoFormat(cricket::kAv1CodecName)));
+}
+
+TEST(InternalEncoderFactoryTest, QueryCodecSupportNoScalabilityModeAv1) {
+ InternalEncoderFactory factory;
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName),
+ /*scalability_mode=*/absl::nullopt),
+ Support(kSupported));
+}
+
+TEST(InternalEncoderFactoryTest, QueryCodecSupportNoScalabilityMode) {
+ InternalEncoderFactory factory;
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName),
+ /*scalability_mode=*/absl::nullopt),
+ Support(kSupported));
+ EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName),
+ /*scalability_mode=*/absl::nullopt),
+ Support(kVp9Enabled ? kSupported : kUnsupported));
+}
+
+TEST(InternalEncoderFactoryTest, QueryCodecSupportWithScalabilityModeAv1) {
+ InternalEncoderFactory factory;
+ EXPECT_THAT(
+ factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName), "L2T1"),
+ Support(kSupported));
+}
+#endif // defined(RTC_USE_LIBAOM_AV1_ENCODER)
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/multiplex_codec_factory.cc b/third_party/libwebrtc/media/engine/multiplex_codec_factory.cc
new file mode 100644
index 0000000000..90df02a77e
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/multiplex_codec_factory.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/multiplex_codec_factory.h"
+
+#include <map>
+#include <string>
+#include <utility>
+
+#include "absl/strings/match.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
+#include "rtc_base/logging.h"
+
+namespace {
+
+bool IsMultiplexCodec(const cricket::VideoCodec& codec) {
+ return absl::EqualsIgnoreCase(codec.name.c_str(),
+ cricket::kMultiplexCodecName);
+}
+
+} // anonymous namespace
+
+namespace webrtc {
+
+constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
+
+MultiplexEncoderFactory::MultiplexEncoderFactory(
+ std::unique_ptr<VideoEncoderFactory> factory,
+ bool supports_augmenting_data)
+ : factory_(std::move(factory)),
+ supports_augmenting_data_(supports_augmenting_data) {}
+
+std::vector<SdpVideoFormat> MultiplexEncoderFactory::GetSupportedFormats()
+ const {
+ std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
+ for (const auto& format : formats) {
+ if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
+ SdpVideoFormat multiplex_format = format;
+ multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
+ format.name;
+ multiplex_format.name = cricket::kMultiplexCodecName;
+ formats.push_back(multiplex_format);
+ break;
+ }
+ }
+ return formats;
+}
+
+std::unique_ptr<VideoEncoder> MultiplexEncoderFactory::CreateVideoEncoder(
+ const SdpVideoFormat& format) {
+ if (!IsMultiplexCodec(cricket::CreateVideoCodec(format)))
+ return factory_->CreateVideoEncoder(format);
+ const auto& it =
+ format.parameters.find(cricket::kCodecParamAssociatedCodecName);
+ if (it == format.parameters.end()) {
+ RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
+ return nullptr;
+ }
+ SdpVideoFormat associated_format = format;
+ associated_format.name = it->second;
+ return std::unique_ptr<VideoEncoder>(new MultiplexEncoderAdapter(
+ factory_.get(), associated_format, supports_augmenting_data_));
+}
+
+MultiplexDecoderFactory::MultiplexDecoderFactory(
+ std::unique_ptr<VideoDecoderFactory> factory,
+ bool supports_augmenting_data)
+ : factory_(std::move(factory)),
+ supports_augmenting_data_(supports_augmenting_data) {}
+
+std::vector<SdpVideoFormat> MultiplexDecoderFactory::GetSupportedFormats()
+ const {
+ std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
+ std::vector<SdpVideoFormat> augmented_formats = formats;
+ for (const auto& format : formats) {
+ if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
+ SdpVideoFormat multiplex_format = format;
+ multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
+ format.name;
+ multiplex_format.name = cricket::kMultiplexCodecName;
+ augmented_formats.push_back(multiplex_format);
+ }
+ }
+ return augmented_formats;
+}
+
+std::unique_ptr<VideoDecoder> MultiplexDecoderFactory::CreateVideoDecoder(
+ const SdpVideoFormat& format) {
+ if (!IsMultiplexCodec(cricket::CreateVideoCodec(format)))
+ return factory_->CreateVideoDecoder(format);
+ const auto& it =
+ format.parameters.find(cricket::kCodecParamAssociatedCodecName);
+ if (it == format.parameters.end()) {
+ RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
+ return nullptr;
+ }
+ SdpVideoFormat associated_format = format;
+ associated_format.name = it->second;
+ return std::unique_ptr<VideoDecoder>(new MultiplexDecoderAdapter(
+ factory_.get(), associated_format, supports_augmenting_data_));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/multiplex_codec_factory.h b/third_party/libwebrtc/media/engine/multiplex_codec_factory.h
new file mode 100644
index 0000000000..a4272a2eb2
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/multiplex_codec_factory.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
+#define MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+// Multiplex codec is a completely modular/optional codec that allows users to
+// send more than a frame's opaque content(RGB/YUV) over video channels.
+// - Allows sending Alpha channel over the wire iff input is
+// I420ABufferInterface. Users can expect to receive I420ABufferInterface as the
+// decoded video frame buffer. I420A data is split into YUV/AXX portions,
+// encoded/decoded seperately and bitstreams are concatanated.
+// - Allows sending augmenting data over the wire attached to the frame. This
+// attached data portion is not encoded in any way and sent as it is. Users can
+// input AugmentedVideoFrameBuffer and can expect the same interface as the
+// decoded video frame buffer.
+// - Showcases an example of how to add a custom codec in webrtc video channel.
+// How to use it end-to-end:
+// - Wrap your existing VideoEncoderFactory implemention with
+// MultiplexEncoderFactory and VideoDecoderFactory implemention with
+// MultiplexDecoderFactory below. For actual coding, multiplex creates encoder
+// and decoder instance(s) using these factories.
+// - Use Multiplex*coderFactory classes in CreatePeerConnectionFactory() calls.
+// - Select "multiplex" codec in SDP negotiation.
+class RTC_EXPORT MultiplexEncoderFactory : public VideoEncoderFactory {
+ public:
+ // `supports_augmenting_data` defines if the encoder would support augmenting
+ // data. If set, the encoder expects to receive video frame buffers of type
+ // AugmentedVideoFrameBuffer.
+ MultiplexEncoderFactory(std::unique_ptr<VideoEncoderFactory> factory,
+ bool supports_augmenting_data = false);
+
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ std::unique_ptr<VideoEncoder> CreateVideoEncoder(
+ const SdpVideoFormat& format) override;
+
+ private:
+ std::unique_ptr<VideoEncoderFactory> factory_;
+ const bool supports_augmenting_data_;
+};
+
+class RTC_EXPORT MultiplexDecoderFactory : public VideoDecoderFactory {
+ public:
+ // `supports_augmenting_data` defines if the decoder would support augmenting
+ // data. If set, the decoder is expected to output video frame buffers of type
+ // AugmentedVideoFrameBuffer.
+ MultiplexDecoderFactory(std::unique_ptr<VideoDecoderFactory> factory,
+ bool supports_augmenting_data = false);
+
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ const SdpVideoFormat& format) override;
+
+ private:
+ std::unique_ptr<VideoDecoderFactory> factory_;
+ const bool supports_augmenting_data_;
+};
+
+} // namespace webrtc
+
+#endif // MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
diff --git a/third_party/libwebrtc/media/engine/multiplex_codec_factory_unittest.cc b/third_party/libwebrtc/media/engine/multiplex_codec_factory_unittest.cc
new file mode 100644
index 0000000000..1cde2f37d8
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/multiplex_codec_factory_unittest.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/multiplex_codec_factory.h"
+
+#include <utility>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "api/video_codecs/video_encoder.h"
+#include "media/base/media_constants.h"
+#include "media/engine/internal_decoder_factory.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(MultiplexDecoderFactory, CreateVideoDecoder) {
+ std::unique_ptr<VideoDecoderFactory> internal_factory(
+ new InternalDecoderFactory());
+ MultiplexDecoderFactory factory(std::move(internal_factory));
+ std::unique_ptr<VideoDecoder> decoder =
+ factory.CreateVideoDecoder(SdpVideoFormat(
+ cricket::kMultiplexCodecName,
+ {{cricket::kCodecParamAssociatedCodecName, cricket::kVp9CodecName}}));
+ EXPECT_TRUE(decoder);
+}
+
+TEST(MultiplexEncoderFactory, CreateVideoEncoder) {
+ std::unique_ptr<VideoEncoderFactory> internal_factory(
+ new InternalEncoderFactory());
+ MultiplexEncoderFactory factory(std::move(internal_factory));
+ std::unique_ptr<VideoEncoder> encoder =
+ factory.CreateVideoEncoder(SdpVideoFormat(
+ cricket::kMultiplexCodecName,
+ {{cricket::kCodecParamAssociatedCodecName, cricket::kVp9CodecName}}));
+ EXPECT_TRUE(encoder);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/null_webrtc_video_engine.h b/third_party/libwebrtc/media/engine/null_webrtc_video_engine.h
new file mode 100644
index 0000000000..f94cb43e75
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/null_webrtc_video_engine.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_
+#define MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_
+
+#include <vector>
+
+#include "media/base/media_channel.h"
+#include "media/base/media_engine.h"
+
+namespace webrtc {
+
+class Call;
+
+} // namespace webrtc
+
+namespace cricket {
+
+// Video engine implementation that does nothing and can be used in
+// CompositeMediaEngine.
+class NullWebRtcVideoEngine : public VideoEngineInterface {
+ public:
+ std::vector<VideoCodec> send_codecs(bool) const override {
+ return std::vector<VideoCodec>();
+ }
+
+ std::vector<VideoCodec> recv_codecs(bool) const override {
+ return std::vector<VideoCodec>();
+ }
+ std::vector<VideoCodec> send_codecs() const override {
+ return std::vector<VideoCodec>();
+ }
+
+ std::vector<VideoCodec> recv_codecs() const override {
+ return std::vector<VideoCodec>();
+ }
+
+ std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
+ const override {
+ return {};
+ }
+};
+
+} // namespace cricket
+
+#endif // MEDIA_ENGINE_NULL_WEBRTC_VIDEO_ENGINE_H_
diff --git a/third_party/libwebrtc/media/engine/null_webrtc_video_engine_unittest.cc b/third_party/libwebrtc/media/engine/null_webrtc_video_engine_unittest.cc
new file mode 100644
index 0000000000..31c442d53d
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/null_webrtc_video_engine_unittest.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/null_webrtc_video_engine.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/transport/field_trial_based_config.h"
+#include "media/engine/webrtc_voice_engine.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+#include "test/mock_audio_encoder_factory.h"
+
+namespace cricket {
+
+// Simple test to check if NullWebRtcVideoEngine implements the methods
+// required by CompositeMediaEngine.
+TEST(NullWebRtcVideoEngineTest, CheckInterface) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ webrtc::FieldTrialBasedConfig trials;
+ auto audio_engine = std::make_unique<WebRtcVoiceEngine>(
+ task_queue_factory.get(), adm.get(),
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr,
+ webrtc::AudioProcessingBuilder().Create(), nullptr, nullptr, trials);
+
+ CompositeMediaEngine engine(std::move(audio_engine),
+ std::make_unique<NullWebRtcVideoEngine>());
+ engine.Init();
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/payload_type_mapper.cc b/third_party/libwebrtc/media/engine/payload_type_mapper.cc
new file mode 100644
index 0000000000..bd86453b1c
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/payload_type_mapper.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/payload_type_mapper.h"
+
+#include <utility>
+
+#include "absl/strings/ascii.h"
+#include "api/audio_codecs/audio_format.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+
+namespace cricket {
+
+webrtc::SdpAudioFormat AudioCodecToSdpAudioFormat(const AudioCodec& ac) {
+ return webrtc::SdpAudioFormat(ac.name, ac.clockrate, ac.channels, ac.params);
+}
+
+PayloadTypeMapper::PayloadTypeMapper()
+ // RFC 3551 reserves payload type numbers in the range 96-127 exclusively
+ // for dynamic assignment. Once those are used up, it is recommended that
+ // payload types unassigned by the RFC are used for dynamic payload type
+ // mapping, before any static payload ids. At this point, we only support
+ // mapping within the exclusive range.
+ : next_unused_payload_type_(96),
+ max_payload_type_(127),
+ mappings_(
+ {// Static payload type assignments according to RFC 3551.
+ {{kPcmuCodecName, 8000, 1}, 0},
+ {{"GSM", 8000, 1}, 3},
+ {{"G723", 8000, 1}, 4},
+ {{"DVI4", 8000, 1}, 5},
+ {{"DVI4", 16000, 1}, 6},
+ {{"LPC", 8000, 1}, 7},
+ {{kPcmaCodecName, 8000, 1}, 8},
+ {{kG722CodecName, 8000, 1}, 9},
+ {{kL16CodecName, 44100, 2}, 10},
+ {{kL16CodecName, 44100, 1}, 11},
+ {{"QCELP", 8000, 1}, 12},
+ {{kCnCodecName, 8000, 1}, 13},
+ // RFC 4566 is a bit ambiguous on the contents of the "encoding
+ // parameters" field, which, for audio, encodes the number of
+ // channels. It is "optional and may be omitted if the number of
+ // channels is one". Does that necessarily imply that an omitted
+ // encoding parameter means one channel? Since RFC 3551 doesn't
+ // specify a value for this parameter for MPA, I've included both 0
+ // and 1 here, to increase the chances it will be correctly used if
+ // someone implements an MPEG audio encoder/decoder.
+ {{"MPA", 90000, 0}, 14},
+ {{"MPA", 90000, 1}, 14},
+ {{"G728", 8000, 1}, 15},
+ {{"DVI4", 11025, 1}, 16},
+ {{"DVI4", 22050, 1}, 17},
+ {{"G729", 8000, 1}, 18},
+
+ // Payload type assignments currently used by WebRTC.
+ // Includes data to reduce collisions (and thus reassignments)
+ {{kIlbcCodecName, 8000, 1}, 102},
+ {{kCnCodecName, 16000, 1}, 105},
+ {{kCnCodecName, 32000, 1}, 106},
+ {{kOpusCodecName,
+ 48000,
+ 2,
+ {{kCodecParamMinPTime, "10"},
+ {kCodecParamUseInbandFec, kParamValueTrue}}},
+ 111},
+ // RED for opus is assigned in the lower range, starting at the top.
+ // Note that the FMTP refers to the opus payload type.
+ {{kRedCodecName,
+ 48000,
+ 2,
+ {{kCodecParamNotInNameValueFormat, "111/111"}}},
+ 63},
+ // TODO(solenberg): Remove the hard coded 16k,32k,48k DTMF once we
+ // assign payload types dynamically for send side as well.
+ {{kDtmfCodecName, 48000, 1}, 110},
+ {{kDtmfCodecName, 32000, 1}, 112},
+ {{kDtmfCodecName, 16000, 1}, 113},
+ {{kDtmfCodecName, 8000, 1}, 126}}) {
+ // TODO(ossu): Try to keep this as change-proof as possible until we're able
+ // to remove the payload type constants from everywhere in the code.
+ for (const auto& mapping : mappings_) {
+ used_payload_types_.insert(mapping.second);
+ }
+}
+
+PayloadTypeMapper::~PayloadTypeMapper() = default;
+
+absl::optional<int> PayloadTypeMapper::GetMappingFor(
+ const webrtc::SdpAudioFormat& format) {
+ auto iter = mappings_.find(format);
+ if (iter != mappings_.end())
+ return iter->second;
+
+ for (; next_unused_payload_type_ <= max_payload_type_;
+ ++next_unused_payload_type_) {
+ int payload_type = next_unused_payload_type_;
+ if (used_payload_types_.find(payload_type) == used_payload_types_.end()) {
+ used_payload_types_.insert(payload_type);
+ mappings_[format] = payload_type;
+ ++next_unused_payload_type_;
+ return payload_type;
+ }
+ }
+
+ return absl::nullopt;
+}
+
+absl::optional<int> PayloadTypeMapper::FindMappingFor(
+ const webrtc::SdpAudioFormat& format) const {
+ auto iter = mappings_.find(format);
+ if (iter != mappings_.end())
+ return iter->second;
+
+ return absl::nullopt;
+}
+
+absl::optional<AudioCodec> PayloadTypeMapper::ToAudioCodec(
+ const webrtc::SdpAudioFormat& format) {
+ // TODO(ossu): We can safely set bitrate to zero here, since that field is
+ // not presented in the SDP. It is used to ferry around some target bitrate
+ // values for certain codecs (ISAC and Opus) and in ways it really
+ // shouldn't. It should be removed once we no longer use CodecInsts in the
+ // ACM or NetEq.
+ auto opt_payload_type = GetMappingFor(format);
+ if (opt_payload_type) {
+ AudioCodec codec =
+ cricket::CreateAudioCodec(*opt_payload_type, format.name,
+ format.clockrate_hz, format.num_channels);
+ codec.params = format.parameters;
+ return std::move(codec);
+ }
+
+ return absl::nullopt;
+}
+
+bool PayloadTypeMapper::SdpAudioFormatOrdering::operator()(
+ const webrtc::SdpAudioFormat& a,
+ const webrtc::SdpAudioFormat& b) const {
+ if (a.clockrate_hz == b.clockrate_hz) {
+ if (a.num_channels == b.num_channels) {
+ int name_cmp =
+ absl::AsciiStrToLower(a.name).compare(absl::AsciiStrToLower(b.name));
+ if (name_cmp == 0)
+ return a.parameters < b.parameters;
+ return name_cmp < 0;
+ }
+ return a.num_channels < b.num_channels;
+ }
+ return a.clockrate_hz < b.clockrate_hz;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/payload_type_mapper.h b/third_party/libwebrtc/media/engine/payload_type_mapper.h
new file mode 100644
index 0000000000..1d5cd7198f
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/payload_type_mapper.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_
+#define MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_
+
+#include <map>
+#include <set>
+
+#include "absl/types/optional.h"
+#include "api/audio_codecs/audio_format.h"
+#include "media/base/codec.h"
+
+namespace cricket {
+
+webrtc::SdpAudioFormat AudioCodecToSdpAudioFormat(const AudioCodec& ac);
+
+class PayloadTypeMapper {
+ public:
+ PayloadTypeMapper();
+ ~PayloadTypeMapper();
+
+ // Finds the current payload type for `format` or assigns a new one, if no
+ // current mapping exists. Will return an empty value if it was unable to
+ // create a mapping, i.e. if all dynamic payload type ids have been used up.
+ absl::optional<int> GetMappingFor(const webrtc::SdpAudioFormat& format);
+
+ // Finds the current payload type for `format`, if any. Returns an empty value
+ // if no payload type mapping exists for the format.
+ absl::optional<int> FindMappingFor(
+ const webrtc::SdpAudioFormat& format) const;
+
+ // Like GetMappingFor, but fills in an AudioCodec structure with the necessary
+ // information instead.
+ absl::optional<AudioCodec> ToAudioCodec(const webrtc::SdpAudioFormat& format);
+
+ private:
+ struct SdpAudioFormatOrdering {
+ bool operator()(const webrtc::SdpAudioFormat& a,
+ const webrtc::SdpAudioFormat& b) const;
+ };
+
+ int next_unused_payload_type_;
+ int max_payload_type_;
+ std::map<webrtc::SdpAudioFormat, int, SdpAudioFormatOrdering> mappings_;
+ std::set<int> used_payload_types_;
+};
+
+} // namespace cricket
+#endif // MEDIA_ENGINE_PAYLOAD_TYPE_MAPPER_H_
diff --git a/third_party/libwebrtc/media/engine/payload_type_mapper_unittest.cc b/third_party/libwebrtc/media/engine/payload_type_mapper_unittest.cc
new file mode 100644
index 0000000000..92253a0f5d
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/payload_type_mapper_unittest.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/payload_type_mapper.h"
+
+#include <set>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "media/base/media_constants.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace cricket {
+
+class PayloadTypeMapperTest : public ::testing::Test {
+ protected:
+ PayloadTypeMapper mapper_;
+};
+
+TEST_F(PayloadTypeMapperTest, StaticPayloadTypes) {
+ EXPECT_EQ(0, mapper_.FindMappingFor({"pcmu", 8000, 1}));
+ EXPECT_EQ(3, mapper_.FindMappingFor({"gsm", 8000, 1}));
+ EXPECT_EQ(4, mapper_.FindMappingFor({"g723", 8000, 1}));
+ EXPECT_EQ(5, mapper_.FindMappingFor({"dvi4", 8000, 1}));
+ EXPECT_EQ(6, mapper_.FindMappingFor({"dvi4", 16000, 1}));
+ EXPECT_EQ(7, mapper_.FindMappingFor({"lpc", 8000, 1}));
+ EXPECT_EQ(8, mapper_.FindMappingFor({"pcma", 8000, 1}));
+ EXPECT_EQ(9, mapper_.FindMappingFor({"g722", 8000, 1}));
+ EXPECT_EQ(10, mapper_.FindMappingFor({"l16", 44100, 2}));
+ EXPECT_EQ(11, mapper_.FindMappingFor({"l16", 44100, 1}));
+ EXPECT_EQ(12, mapper_.FindMappingFor({"qcelp", 8000, 1}));
+ EXPECT_EQ(13, mapper_.FindMappingFor({"cn", 8000, 1}));
+ EXPECT_EQ(14, mapper_.FindMappingFor({"mpa", 90000, 0}));
+ EXPECT_EQ(14, mapper_.FindMappingFor({"mpa", 90000, 1}));
+ EXPECT_EQ(15, mapper_.FindMappingFor({"g728", 8000, 1}));
+ EXPECT_EQ(16, mapper_.FindMappingFor({"dvi4", 11025, 1}));
+ EXPECT_EQ(17, mapper_.FindMappingFor({"dvi4", 22050, 1}));
+ EXPECT_EQ(18, mapper_.FindMappingFor({"g729", 8000, 1}));
+}
+
+TEST_F(PayloadTypeMapperTest, WebRTCPayloadTypes) {
+ // Tests that the payload mapper knows about the audio formats we've
+ // been using in WebRTC, with their hard coded values.
+ EXPECT_EQ(102, mapper_.FindMappingFor({kIlbcCodecName, 8000, 1}));
+ EXPECT_EQ(105, mapper_.FindMappingFor({kCnCodecName, 16000, 1}));
+ EXPECT_EQ(106, mapper_.FindMappingFor({kCnCodecName, 32000, 1}));
+ EXPECT_EQ(111, mapper_.FindMappingFor(
+ {kOpusCodecName,
+ 48000,
+ 2,
+ {{"minptime", "10"}, {"useinbandfec", "1"}}}));
+ EXPECT_EQ(
+ 63, mapper_.FindMappingFor({kRedCodecName, 48000, 2, {{"", "111/111"}}}));
+ // TODO(solenberg): Remove 16k, 32k, 48k DTMF checks once these payload types
+ // are dynamically assigned.
+ EXPECT_EQ(110, mapper_.FindMappingFor({kDtmfCodecName, 48000, 1}));
+ EXPECT_EQ(112, mapper_.FindMappingFor({kDtmfCodecName, 32000, 1}));
+ EXPECT_EQ(113, mapper_.FindMappingFor({kDtmfCodecName, 16000, 1}));
+ EXPECT_EQ(126, mapper_.FindMappingFor({kDtmfCodecName, 8000, 1}));
+}
+
+TEST_F(PayloadTypeMapperTest, ValidDynamicPayloadTypes) {
+ // RFC 3551 says:
+ // "This profile reserves payload type numbers in the range 96-127
+ // exclusively for dynamic assignment. Applications SHOULD first use
+ // values in this range for dynamic payload types. Those applications
+ // which need to define more than 32 dynamic payload types MAY bind
+ // codes below 96, in which case it is RECOMMENDED that unassigned
+ // payload type numbers be used first. However, the statically assigned
+ // payload types are default bindings and MAY be dynamically bound to
+ // new encodings if needed."
+
+ // Tests that the payload mapper uses values in the dynamic payload type range
+ // (96 - 127) before any others and that the values returned are all valid.
+ bool has_been_below_96 = false;
+ std::set<int> used_payload_types;
+ for (int i = 0; i != 256; ++i) {
+ std::string format_name = "unknown_format_" + std::to_string(i);
+ webrtc::SdpAudioFormat format(format_name.c_str(), i * 100, (i % 2) + 1);
+ auto opt_payload_type = mapper_.GetMappingFor(format);
+ bool mapper_is_full = false;
+
+ // There's a limited number of slots for payload types. We're fine with not
+ // being able to map them all.
+ if (opt_payload_type) {
+ int payload_type = *opt_payload_type;
+ EXPECT_FALSE(mapper_is_full) << "Mapping should not fail sporadically";
+ EXPECT_EQ(used_payload_types.find(payload_type), used_payload_types.end())
+ << "Payload types must not be reused";
+ used_payload_types.insert(payload_type);
+ EXPECT_GE(payload_type, 0) << "Negative payload types are invalid";
+ EXPECT_LE(payload_type, 127) << "Payload types above 127 are invalid";
+ EXPECT_FALSE(payload_type >= 96 && has_been_below_96);
+ if (payload_type < 96)
+ has_been_below_96 = true;
+
+ EXPECT_EQ(payload_type, mapper_.FindMappingFor(format))
+ << "Mapping must be permanent after successful call to "
+ "GetMappingFor";
+ EXPECT_EQ(payload_type, mapper_.GetMappingFor(format))
+ << "Subsequent calls to GetMappingFor must return the same value";
+ } else {
+ mapper_is_full = true;
+ }
+ }
+
+ // Also, we must've been able to map at least one dynamic payload type.
+ EXPECT_FALSE(used_payload_types.empty())
+ << "Mapper must support at least one user-defined payload type";
+}
+
+TEST_F(PayloadTypeMapperTest, ToAudioCodec) {
+ webrtc::SdpAudioFormat format("unknown_format", 4711, 17);
+ auto opt_payload_type = mapper_.GetMappingFor(format);
+ EXPECT_TRUE(opt_payload_type);
+ auto opt_audio_codec = mapper_.ToAudioCodec(format);
+ EXPECT_TRUE(opt_audio_codec);
+
+ if (opt_payload_type && opt_audio_codec) {
+ int payload_type = *opt_payload_type;
+ const AudioCodec& codec = *opt_audio_codec;
+
+ EXPECT_EQ(codec.id, payload_type);
+ EXPECT_EQ(codec.name, format.name);
+ EXPECT_EQ(codec.clockrate, format.clockrate_hz);
+ EXPECT_EQ(codec.channels, format.num_channels);
+ EXPECT_THAT(codec.params, ::testing::ContainerEq(format.parameters));
+ }
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/simulcast_encoder_adapter.cc b/third_party/libwebrtc/media/engine/simulcast_encoder_adapter.cc
new file mode 100644
index 0000000000..4853e68996
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/simulcast_encoder_adapter.cc
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/simulcast_encoder_adapter.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "api/field_trials_view.h"
+#include "api/scoped_refptr.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video/video_frame_buffer.h"
+#include "api/video/video_rotation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "api/video_codecs/video_encoder_software_fallback_wrapper.h"
+#include "media/base/media_constants.h"
+#include "media/base/video_common.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "modules/video_coding/utility/simulcast_rate_allocator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/experiments/rate_control_settings.h"
+#include "rtc_base/logging.h"
+
+namespace {
+
+// Max qp for lowest spatial resolution when doing simulcast.
+const unsigned int kLowestResMaxQp = 45;
+
+absl::optional<unsigned int> GetScreenshareBoostedQpValue(
+ const webrtc::FieldTrialsView& field_trials) {
+ std::string experiment_group =
+ field_trials.Lookup("WebRTC-BoostedScreenshareQp");
+ unsigned int qp;
+ if (sscanf(experiment_group.c_str(), "%u", &qp) != 1)
+ return absl::nullopt;
+ qp = std::min(qp, 63u);
+ qp = std::max(qp, 1u);
+ return qp;
+}
+
+uint32_t SumStreamMaxBitrate(int streams, const webrtc::VideoCodec& codec) {
+ uint32_t bitrate_sum = 0;
+ for (int i = 0; i < streams; ++i) {
+ bitrate_sum += codec.simulcastStream[i].maxBitrate;
+ }
+ return bitrate_sum;
+}
+
+int CountAllStreams(const webrtc::VideoCodec& codec) {
+ int total_streams_count =
+ codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
+ uint32_t simulcast_max_bitrate =
+ SumStreamMaxBitrate(total_streams_count, codec);
+ if (simulcast_max_bitrate == 0) {
+ total_streams_count = 1;
+ }
+ return total_streams_count;
+}
+
+int CountActiveStreams(const webrtc::VideoCodec& codec) {
+ if (codec.numberOfSimulcastStreams < 1) {
+ return 1;
+ }
+ int total_streams_count = CountAllStreams(codec);
+ int active_streams_count = 0;
+ for (int i = 0; i < total_streams_count; ++i) {
+ if (codec.simulcastStream[i].active) {
+ ++active_streams_count;
+ }
+ }
+ return active_streams_count;
+}
+
+int VerifyCodec(const webrtc::VideoCodec* codec_settings) {
+ if (codec_settings == nullptr) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings->maxFramerate < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // allow zero to represent an unspecified maxBitRate
+ if (codec_settings->maxBitrate > 0 &&
+ codec_settings->startBitrate > codec_settings->maxBitrate) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings->width <= 1 || codec_settings->height <= 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ if (codec_settings->codecType == webrtc::kVideoCodecVP8 &&
+ codec_settings->VP8().automaticResizeOn &&
+ CountActiveStreams(*codec_settings) > 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool StreamQualityCompare(const webrtc::SimulcastStream& a,
+ const webrtc::SimulcastStream& b) {
+ return std::tie(a.height, a.width, a.maxBitrate, a.maxFramerate) <
+ std::tie(b.height, b.width, b.maxBitrate, b.maxFramerate);
+}
+
+void GetLowestAndHighestQualityStreamIndixes(
+ rtc::ArrayView<webrtc::SimulcastStream> streams,
+ int* lowest_quality_stream_idx,
+ int* highest_quality_stream_idx) {
+ const auto lowest_highest_quality_streams =
+ absl::c_minmax_element(streams, StreamQualityCompare);
+ *lowest_quality_stream_idx =
+ std::distance(streams.begin(), lowest_highest_quality_streams.first);
+ *highest_quality_stream_idx =
+ std::distance(streams.begin(), lowest_highest_quality_streams.second);
+}
+
+std::vector<uint32_t> GetStreamStartBitratesKbps(
+ const webrtc::VideoCodec& codec) {
+ std::vector<uint32_t> start_bitrates;
+ std::unique_ptr<webrtc::VideoBitrateAllocator> rate_allocator =
+ std::make_unique<webrtc::SimulcastRateAllocator>(codec);
+ webrtc::VideoBitrateAllocation allocation =
+ rate_allocator->Allocate(webrtc::VideoBitrateAllocationParameters(
+ codec.startBitrate * 1000, codec.maxFramerate));
+
+ int total_streams_count = CountAllStreams(codec);
+ for (int i = 0; i < total_streams_count; ++i) {
+ uint32_t stream_bitrate = allocation.GetSpatialLayerSum(i) / 1000;
+ start_bitrates.push_back(stream_bitrate);
+ }
+ return start_bitrates;
+}
+
+} // namespace
+
+namespace webrtc {
+
+SimulcastEncoderAdapter::EncoderContext::EncoderContext(
+ std::unique_ptr<VideoEncoder> encoder,
+ bool prefer_temporal_support,
+ VideoEncoder::EncoderInfo primary_info,
+ VideoEncoder::EncoderInfo fallback_info)
+ : encoder_(std::move(encoder)),
+ prefer_temporal_support_(prefer_temporal_support),
+ primary_info_(std::move(primary_info)),
+ fallback_info_(std::move(fallback_info)) {}
+
+void SimulcastEncoderAdapter::EncoderContext::Release() {
+ if (encoder_) {
+ encoder_->Release();
+ encoder_->RegisterEncodeCompleteCallback(nullptr);
+ }
+}
+
+SimulcastEncoderAdapter::StreamContext::StreamContext(
+ SimulcastEncoderAdapter* parent,
+ std::unique_ptr<EncoderContext> encoder_context,
+ std::unique_ptr<FramerateController> framerate_controller,
+ int stream_idx,
+ uint16_t width,
+ uint16_t height,
+ bool is_paused)
+ : parent_(parent),
+ encoder_context_(std::move(encoder_context)),
+ framerate_controller_(std::move(framerate_controller)),
+ stream_idx_(stream_idx),
+ width_(width),
+ height_(height),
+ is_keyframe_needed_(false),
+ is_paused_(is_paused) {
+ if (parent_) {
+ encoder_context_->encoder().RegisterEncodeCompleteCallback(this);
+ }
+}
+
+SimulcastEncoderAdapter::StreamContext::StreamContext(StreamContext&& rhs)
+ : parent_(rhs.parent_),
+ encoder_context_(std::move(rhs.encoder_context_)),
+ framerate_controller_(std::move(rhs.framerate_controller_)),
+ stream_idx_(rhs.stream_idx_),
+ width_(rhs.width_),
+ height_(rhs.height_),
+ is_keyframe_needed_(rhs.is_keyframe_needed_),
+ is_paused_(rhs.is_paused_) {
+ if (parent_) {
+ encoder_context_->encoder().RegisterEncodeCompleteCallback(this);
+ }
+}
+
+SimulcastEncoderAdapter::StreamContext::~StreamContext() {
+ if (encoder_context_) {
+ encoder_context_->Release();
+ }
+}
+
+std::unique_ptr<SimulcastEncoderAdapter::EncoderContext>
+SimulcastEncoderAdapter::StreamContext::ReleaseEncoderContext() && {
+ encoder_context_->Release();
+ return std::move(encoder_context_);
+}
+
+void SimulcastEncoderAdapter::StreamContext::OnKeyframe(Timestamp timestamp) {
+ is_keyframe_needed_ = false;
+ if (framerate_controller_) {
+ framerate_controller_->KeepFrame(timestamp.us() * 1000);
+ }
+}
+
+bool SimulcastEncoderAdapter::StreamContext::ShouldDropFrame(
+ Timestamp timestamp) {
+ if (!framerate_controller_) {
+ return false;
+ }
+ return framerate_controller_->ShouldDropFrame(timestamp.us() * 1000);
+}
+
+EncodedImageCallback::Result
+SimulcastEncoderAdapter::StreamContext::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ RTC_CHECK(parent_); // If null, this method should never be called.
+ return parent_->OnEncodedImage(stream_idx_, encoded_image,
+ codec_specific_info);
+}
+
+void SimulcastEncoderAdapter::StreamContext::OnDroppedFrame(
+ DropReason /*reason*/) {
+ RTC_CHECK(parent_); // If null, this method should never be called.
+ parent_->OnDroppedFrame(stream_idx_);
+}
+
+SimulcastEncoderAdapter::SimulcastEncoderAdapter(VideoEncoderFactory* factory,
+ const SdpVideoFormat& format)
+ : SimulcastEncoderAdapter(factory,
+ nullptr,
+ format,
+ FieldTrialBasedConfig()) {}
+
+SimulcastEncoderAdapter::SimulcastEncoderAdapter(
+ VideoEncoderFactory* primary_factory,
+ VideoEncoderFactory* fallback_factory,
+ const SdpVideoFormat& format,
+ const FieldTrialsView& field_trials)
+ : inited_(0),
+ primary_encoder_factory_(primary_factory),
+ fallback_encoder_factory_(fallback_factory),
+ video_format_(format),
+ total_streams_count_(0),
+ bypass_mode_(false),
+ encoded_complete_callback_(nullptr),
+ experimental_boosted_screenshare_qp_(
+ GetScreenshareBoostedQpValue(field_trials)),
+ boost_base_layer_quality_(
+ RateControlSettings::ParseFromKeyValueConfig(&field_trials)
+ .Vp8BoostBaseLayerQuality()),
+ prefer_temporal_support_on_base_layer_(field_trials.IsEnabled(
+ "WebRTC-Video-PreferTemporalSupportOnBaseLayer")) {
+ RTC_DCHECK(primary_factory);
+
+ // The adapter is typically created on the worker thread, but operated on
+ // the encoder task queue.
+ encoder_queue_.Detach();
+}
+
+SimulcastEncoderAdapter::~SimulcastEncoderAdapter() {
+ RTC_DCHECK(!Initialized());
+ DestroyStoredEncoders();
+}
+
+void SimulcastEncoderAdapter::SetFecControllerOverride(
+ FecControllerOverride* /*fec_controller_override*/) {
+ // Ignored.
+}
+
+int SimulcastEncoderAdapter::Release() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ while (!stream_contexts_.empty()) {
+ // Move the encoder instances and put it on the `cached_encoder_contexts_`
+ // where it may possibly be reused from (ordering does not matter).
+ cached_encoder_contexts_.push_front(
+ std::move(stream_contexts_.back()).ReleaseEncoderContext());
+ stream_contexts_.pop_back();
+ }
+
+ bypass_mode_ = false;
+
+ // It's legal to move the encoder to another queue now.
+ encoder_queue_.Detach();
+
+ inited_.store(0);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int SimulcastEncoderAdapter::InitEncode(
+ const VideoCodec* codec_settings,
+ const VideoEncoder::Settings& settings) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ if (settings.number_of_cores < 1) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ int ret = VerifyCodec(codec_settings);
+ if (ret < 0) {
+ return ret;
+ }
+
+ Release();
+
+ codec_ = *codec_settings;
+ total_streams_count_ = CountAllStreams(*codec_settings);
+
+ bool is_legacy_singlecast = codec_.numberOfSimulcastStreams == 0;
+ int lowest_quality_stream_idx = 0;
+ int highest_quality_stream_idx = 0;
+ if (!is_legacy_singlecast) {
+ GetLowestAndHighestQualityStreamIndixes(
+ rtc::ArrayView<SimulcastStream>(codec_.simulcastStream,
+ total_streams_count_),
+ &lowest_quality_stream_idx, &highest_quality_stream_idx);
+ }
+
+ std::unique_ptr<EncoderContext> encoder_context = FetchOrCreateEncoderContext(
+ /*is_lowest_quality_stream=*/(
+ is_legacy_singlecast ||
+ codec_.simulcastStream[lowest_quality_stream_idx].active));
+ if (encoder_context == nullptr) {
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+
+ // Two distinct scenarios:
+ // * Singlecast (total_streams_count == 1) or simulcast with simulcast-capable
+ // underlaying encoder implementation if active_streams_count > 1. SEA
+ // operates in bypass mode: original settings are passed to the underlaying
+ // encoder, frame encode complete callback is not intercepted.
+ // * Multi-encoder simulcast or singlecast if layers are deactivated
+ // (active_streams_count >= 1). SEA creates N=active_streams_count encoders
+ // and configures each to produce a single stream.
+
+ int active_streams_count = CountActiveStreams(*codec_settings);
+ // If we only have a single active layer it is better to create an encoder
+ // with only one configured layer than creating it with all-but-one disabled
+ // layers because that way we control scaling.
+ bool separate_encoders_needed =
+ !encoder_context->encoder().GetEncoderInfo().supports_simulcast ||
+ active_streams_count == 1;
+ // Singlecast or simulcast with simulcast-capable underlaying encoder.
+ if (total_streams_count_ == 1 || !separate_encoders_needed) {
+ int ret = encoder_context->encoder().InitEncode(&codec_, settings);
+ if (ret >= 0) {
+ stream_contexts_.emplace_back(
+ /*parent=*/nullptr, std::move(encoder_context),
+ /*framerate_controller=*/nullptr, /*stream_idx=*/0, codec_.width,
+ codec_.height, /*is_paused=*/active_streams_count == 0);
+ bypass_mode_ = true;
+
+ DestroyStoredEncoders();
+ inited_.store(1);
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ encoder_context->Release();
+ if (total_streams_count_ == 1) {
+ // Failed to initialize singlecast encoder.
+ return ret;
+ }
+ }
+
+ // Multi-encoder simulcast or singlecast (deactivated layers).
+ std::vector<uint32_t> stream_start_bitrate_kbps =
+ GetStreamStartBitratesKbps(codec_);
+
+ for (int stream_idx = 0; stream_idx < total_streams_count_; ++stream_idx) {
+ if (!is_legacy_singlecast && !codec_.simulcastStream[stream_idx].active) {
+ continue;
+ }
+
+ if (encoder_context == nullptr) {
+ encoder_context = FetchOrCreateEncoderContext(
+ /*is_lowest_quality_stream=*/stream_idx == lowest_quality_stream_idx);
+ }
+ if (encoder_context == nullptr) {
+ Release();
+ return WEBRTC_VIDEO_CODEC_MEMORY;
+ }
+
+ VideoCodec stream_codec = MakeStreamCodec(
+ codec_, stream_idx, stream_start_bitrate_kbps[stream_idx],
+ /*is_lowest_quality_stream=*/stream_idx == lowest_quality_stream_idx,
+ /*is_highest_quality_stream=*/stream_idx == highest_quality_stream_idx);
+
+ int ret = encoder_context->encoder().InitEncode(&stream_codec, settings);
+ if (ret < 0) {
+ encoder_context.reset();
+ Release();
+ return ret;
+ }
+
+ // Intercept frame encode complete callback only for upper streams, where
+ // we need to set a correct stream index. Set `parent` to nullptr for the
+ // lowest stream to bypass the callback.
+ SimulcastEncoderAdapter* parent = stream_idx > 0 ? this : nullptr;
+
+ bool is_paused = stream_start_bitrate_kbps[stream_idx] == 0;
+ stream_contexts_.emplace_back(
+ parent, std::move(encoder_context),
+ std::make_unique<FramerateController>(stream_codec.maxFramerate),
+ stream_idx, stream_codec.width, stream_codec.height, is_paused);
+ }
+
+ // To save memory, don't store encoders that we don't use.
+ DestroyStoredEncoders();
+
+ inited_.store(1);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int SimulcastEncoderAdapter::Encode(
+ const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ if (!Initialized()) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ if (encoded_complete_callback_ == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ if (encoder_info_override_.requested_resolution_alignment()) {
+ const int alignment =
+ *encoder_info_override_.requested_resolution_alignment();
+ if (input_image.width() % alignment != 0 ||
+ input_image.height() % alignment != 0) {
+ RTC_LOG(LS_WARNING) << "Frame " << input_image.width() << "x"
+ << input_image.height() << " not divisible by "
+ << alignment;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ if (encoder_info_override_.apply_alignment_to_all_simulcast_layers()) {
+ for (const auto& layer : stream_contexts_) {
+ if (layer.width() % alignment != 0 || layer.height() % alignment != 0) {
+ RTC_LOG(LS_WARNING)
+ << "Codec " << layer.width() << "x" << layer.height()
+ << " not divisible by " << alignment;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+ }
+ }
+
+ bool is_keyframe_needed = false;
+ for (const auto& layer : stream_contexts_) {
+ if (layer.is_keyframe_needed()) {
+ // This is legacy behavior, generating a keyframe on all layers
+ // when generating one for a layer that became active for the first time
+ // or after being disabled.
+ is_keyframe_needed = true;
+ break;
+ }
+ }
+
+ // Temporary thay may hold the result of texture to i420 buffer conversion.
+ rtc::scoped_refptr<VideoFrameBuffer> src_buffer;
+ int src_width = input_image.width();
+ int src_height = input_image.height();
+
+ for (auto& layer : stream_contexts_) {
+ // Don't encode frames in resolutions that we don't intend to send.
+ if (layer.is_paused()) {
+ continue;
+ }
+
+ // Convert timestamp from RTP 90kHz clock.
+ const Timestamp frame_timestamp =
+ Timestamp::Micros((1000 * input_image.timestamp()) / 90);
+
+ // If adapter is passed through and only one sw encoder does simulcast,
+ // frame types for all streams should be passed to the encoder unchanged.
+ // Otherwise a single per-encoder frame type is passed.
+ std::vector<VideoFrameType> stream_frame_types(
+ bypass_mode_
+ ? std::max<unsigned char>(codec_.numberOfSimulcastStreams, 1)
+ : 1,
+ VideoFrameType::kVideoFrameDelta);
+
+ bool keyframe_requested = false;
+ if (is_keyframe_needed) {
+ std::fill(stream_frame_types.begin(), stream_frame_types.end(),
+ VideoFrameType::kVideoFrameKey);
+ keyframe_requested = true;
+ } else if (frame_types) {
+ if (bypass_mode_) {
+ // In bypass mode, we effectively pass on frame_types.
+ RTC_DCHECK_EQ(frame_types->size(), stream_frame_types.size());
+ stream_frame_types = *frame_types;
+ keyframe_requested =
+ absl::c_any_of(*frame_types, [](const VideoFrameType frame_type) {
+ return frame_type == VideoFrameType::kVideoFrameKey;
+ });
+ } else {
+ size_t stream_idx = static_cast<size_t>(layer.stream_idx());
+ if (frame_types->size() >= stream_idx &&
+ (*frame_types)[stream_idx] == VideoFrameType::kVideoFrameKey) {
+ stream_frame_types[0] = VideoFrameType::kVideoFrameKey;
+ keyframe_requested = true;
+ }
+ }
+ }
+ if (keyframe_requested) {
+ layer.OnKeyframe(frame_timestamp);
+ } else if (layer.ShouldDropFrame(frame_timestamp)) {
+ continue;
+ }
+
+ // If scaling isn't required, because the input resolution
+ // matches the destination or the input image is empty (e.g.
+ // a keyframe request for encoders with internal camera
+ // sources) or the source image has a native handle, pass the image on
+ // directly. Otherwise, we'll scale it to match what the encoder expects
+ // (below).
+ // For texture frames, the underlying encoder is expected to be able to
+ // correctly sample/scale the source texture.
+ // TODO(perkj): ensure that works going forward, and figure out how this
+ // affects webrtc:5683.
+ if ((layer.width() == src_width && layer.height() == src_height) ||
+ (input_image.video_frame_buffer()->type() ==
+ VideoFrameBuffer::Type::kNative &&
+ layer.encoder().GetEncoderInfo().supports_native_handle)) {
+ int ret = layer.encoder().Encode(input_image, &stream_frame_types);
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ return ret;
+ }
+ } else {
+ if (src_buffer == nullptr) {
+ src_buffer = input_image.video_frame_buffer();
+ }
+ rtc::scoped_refptr<VideoFrameBuffer> dst_buffer =
+ src_buffer->Scale(layer.width(), layer.height());
+ if (!dst_buffer) {
+ RTC_LOG(LS_ERROR) << "Failed to scale video frame";
+ return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE;
+ }
+
+ // UpdateRect is not propagated to lower simulcast layers currently.
+ // TODO(ilnik): Consider scaling UpdateRect together with the buffer.
+ VideoFrame frame(input_image);
+ frame.set_video_frame_buffer(dst_buffer);
+ frame.set_rotation(webrtc::kVideoRotation_0);
+ frame.set_update_rect(
+ VideoFrame::UpdateRect{0, 0, frame.width(), frame.height()});
+ int ret = layer.encoder().Encode(frame, &stream_frame_types);
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ return ret;
+ }
+ }
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int SimulcastEncoderAdapter::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ encoded_complete_callback_ = callback;
+ if (!stream_contexts_.empty() && stream_contexts_.front().stream_idx() == 0) {
+ // Bypass frame encode complete callback for the lowest layer since there is
+ // no need to override frame's spatial index.
+ stream_contexts_.front().encoder().RegisterEncodeCompleteCallback(callback);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void SimulcastEncoderAdapter::SetRates(
+ const RateControlParameters& parameters) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ if (!Initialized()) {
+ RTC_LOG(LS_WARNING) << "SetRates while not initialized";
+ return;
+ }
+
+ if (parameters.framerate_fps < 1.0) {
+ RTC_LOG(LS_WARNING) << "Invalid framerate: " << parameters.framerate_fps;
+ return;
+ }
+
+ codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+
+ if (bypass_mode_) {
+ stream_contexts_.front().encoder().SetRates(parameters);
+ return;
+ }
+
+ for (StreamContext& layer_context : stream_contexts_) {
+ int stream_idx = layer_context.stream_idx();
+ uint32_t stream_bitrate_kbps =
+ parameters.bitrate.GetSpatialLayerSum(stream_idx) / 1000;
+
+ // Need a key frame if we have not sent this stream before.
+ if (stream_bitrate_kbps > 0 && layer_context.is_paused()) {
+ layer_context.set_is_keyframe_needed();
+ }
+ layer_context.set_is_paused(stream_bitrate_kbps == 0);
+
+ // Slice the temporal layers out of the full allocation and pass it on to
+ // the encoder handling the current simulcast stream.
+ RateControlParameters stream_parameters = parameters;
+ stream_parameters.bitrate = VideoBitrateAllocation();
+ for (int i = 0; i < kMaxTemporalStreams; ++i) {
+ if (parameters.bitrate.HasBitrate(stream_idx, i)) {
+ stream_parameters.bitrate.SetBitrate(
+ 0, i, parameters.bitrate.GetBitrate(stream_idx, i));
+ }
+ }
+
+ // Assign link allocation proportionally to spatial layer allocation.
+ if (!parameters.bandwidth_allocation.IsZero() &&
+ parameters.bitrate.get_sum_bps() > 0) {
+ stream_parameters.bandwidth_allocation =
+ DataRate::BitsPerSec((parameters.bandwidth_allocation.bps() *
+ stream_parameters.bitrate.get_sum_bps()) /
+ parameters.bitrate.get_sum_bps());
+ // Make sure we don't allocate bandwidth lower than target bitrate.
+ if (stream_parameters.bandwidth_allocation.bps() <
+ stream_parameters.bitrate.get_sum_bps()) {
+ stream_parameters.bandwidth_allocation =
+ DataRate::BitsPerSec(stream_parameters.bitrate.get_sum_bps());
+ }
+ }
+
+ stream_parameters.framerate_fps = std::min<double>(
+ parameters.framerate_fps,
+ layer_context.target_fps().value_or(parameters.framerate_fps));
+
+ layer_context.encoder().SetRates(stream_parameters);
+ }
+}
+
+void SimulcastEncoderAdapter::OnPacketLossRateUpdate(float packet_loss_rate) {
+ for (auto& c : stream_contexts_) {
+ c.encoder().OnPacketLossRateUpdate(packet_loss_rate);
+ }
+}
+
+void SimulcastEncoderAdapter::OnRttUpdate(int64_t rtt_ms) {
+ for (auto& c : stream_contexts_) {
+ c.encoder().OnRttUpdate(rtt_ms);
+ }
+}
+
+void SimulcastEncoderAdapter::OnLossNotification(
+ const LossNotification& loss_notification) {
+ for (auto& c : stream_contexts_) {
+ c.encoder().OnLossNotification(loss_notification);
+ }
+}
+
+// TODO(brandtr): Add task checker to this member function, when all encoder
+// callbacks are coming in on the encoder queue.
+EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
+ size_t stream_idx,
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo) {
+ EncodedImage stream_image(encodedImage);
+ CodecSpecificInfo stream_codec_specific = *codecSpecificInfo;
+
+ stream_image.SetSimulcastIndex(stream_idx);
+
+ return encoded_complete_callback_->OnEncodedImage(stream_image,
+ &stream_codec_specific);
+}
+
+void SimulcastEncoderAdapter::OnDroppedFrame(size_t stream_idx) {
+ // Not yet implemented.
+}
+
+bool SimulcastEncoderAdapter::Initialized() const {
+ return inited_.load() == 1;
+}
+
+void SimulcastEncoderAdapter::DestroyStoredEncoders() {
+ while (!cached_encoder_contexts_.empty()) {
+ cached_encoder_contexts_.pop_back();
+ }
+}
+
+std::unique_ptr<SimulcastEncoderAdapter::EncoderContext>
+SimulcastEncoderAdapter::FetchOrCreateEncoderContext(
+ bool is_lowest_quality_stream) const {
+ bool prefer_temporal_support = fallback_encoder_factory_ != nullptr &&
+ is_lowest_quality_stream &&
+ prefer_temporal_support_on_base_layer_;
+
+ // Toggling of `prefer_temporal_support` requires encoder recreation. Find
+ // and reuse encoder with desired `prefer_temporal_support`. Otherwise, if
+ // there is no such encoder in the cache, create a new instance.
+ auto encoder_context_iter =
+ std::find_if(cached_encoder_contexts_.begin(),
+ cached_encoder_contexts_.end(), [&](auto& encoder_context) {
+ return encoder_context->prefer_temporal_support() ==
+ prefer_temporal_support;
+ });
+
+ std::unique_ptr<SimulcastEncoderAdapter::EncoderContext> encoder_context;
+ if (encoder_context_iter != cached_encoder_contexts_.end()) {
+ encoder_context = std::move(*encoder_context_iter);
+ cached_encoder_contexts_.erase(encoder_context_iter);
+ } else {
+ std::unique_ptr<VideoEncoder> primary_encoder =
+ primary_encoder_factory_->CreateVideoEncoder(video_format_);
+
+ std::unique_ptr<VideoEncoder> fallback_encoder;
+ if (fallback_encoder_factory_ != nullptr) {
+ fallback_encoder =
+ fallback_encoder_factory_->CreateVideoEncoder(video_format_);
+ }
+
+ std::unique_ptr<VideoEncoder> encoder;
+ VideoEncoder::EncoderInfo primary_info;
+ VideoEncoder::EncoderInfo fallback_info;
+
+ if (primary_encoder != nullptr) {
+ primary_info = primary_encoder->GetEncoderInfo();
+ fallback_info = primary_info;
+
+ if (fallback_encoder == nullptr) {
+ encoder = std::move(primary_encoder);
+ } else {
+ encoder = CreateVideoEncoderSoftwareFallbackWrapper(
+ std::move(fallback_encoder), std::move(primary_encoder),
+ prefer_temporal_support);
+ }
+ } else if (fallback_encoder != nullptr) {
+ RTC_LOG(LS_WARNING) << "Failed to create primary " << video_format_.name
+ << " encoder. Use fallback encoder.";
+ fallback_info = fallback_encoder->GetEncoderInfo();
+ primary_info = fallback_info;
+ encoder = std::move(fallback_encoder);
+ } else {
+ RTC_LOG(LS_ERROR) << "Failed to create primary and fallback "
+ << video_format_.name << " encoders.";
+ return nullptr;
+ }
+
+ encoder_context = std::make_unique<SimulcastEncoderAdapter::EncoderContext>(
+ std::move(encoder), prefer_temporal_support, primary_info,
+ fallback_info);
+ }
+
+ encoder_context->encoder().RegisterEncodeCompleteCallback(
+ encoded_complete_callback_);
+ return encoder_context;
+}
+
+webrtc::VideoCodec SimulcastEncoderAdapter::MakeStreamCodec(
+ const webrtc::VideoCodec& codec,
+ int stream_idx,
+ uint32_t start_bitrate_kbps,
+ bool is_lowest_quality_stream,
+ bool is_highest_quality_stream) {
+ webrtc::VideoCodec codec_params = codec;
+ const SimulcastStream& stream_params = codec.simulcastStream[stream_idx];
+
+ codec_params.numberOfSimulcastStreams = 0;
+ codec_params.width = stream_params.width;
+ codec_params.height = stream_params.height;
+ codec_params.maxBitrate = stream_params.maxBitrate;
+ codec_params.minBitrate = stream_params.minBitrate;
+ codec_params.maxFramerate = stream_params.maxFramerate;
+ codec_params.qpMax = stream_params.qpMax;
+ codec_params.active = stream_params.active;
+ // By default, `scalability_mode` comes from SimulcastStream when
+ // SimulcastEncoderAdapter is used. This allows multiple encodings of L1Tx,
+ // but SimulcastStream currently does not support multiple spatial layers.
+ ScalabilityMode scalability_mode = stream_params.GetScalabilityMode();
+ // To support the full set of scalability modes in the event that this is the
+ // only active encoding, prefer VideoCodec::GetScalabilityMode() if all other
+ // encodings are inactive.
+ if (codec.GetScalabilityMode().has_value()) {
+ bool only_active_stream = true;
+ for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
+ if (i != stream_idx && codec.simulcastStream[i].active) {
+ only_active_stream = false;
+ break;
+ }
+ }
+ if (only_active_stream) {
+ scalability_mode = codec.GetScalabilityMode().value();
+ }
+ }
+ codec_params.SetScalabilityMode(scalability_mode);
+ // Settings that are based on stream/resolution.
+ if (is_lowest_quality_stream) {
+ // Settings for lowest spatial resolutions.
+ if (codec.mode == VideoCodecMode::kScreensharing) {
+ if (experimental_boosted_screenshare_qp_) {
+ codec_params.qpMax = *experimental_boosted_screenshare_qp_;
+ }
+ } else if (boost_base_layer_quality_) {
+ codec_params.qpMax = kLowestResMaxQp;
+ }
+ }
+ if (codec.codecType == webrtc::kVideoCodecVP8) {
+ codec_params.VP8()->numberOfTemporalLayers =
+ stream_params.numberOfTemporalLayers;
+ if (!is_highest_quality_stream) {
+ // For resolutions below CIF, set the codec `complexity` parameter to
+ // kComplexityHigher, which maps to cpu_used = -4.
+ int pixels_per_frame = codec_params.width * codec_params.height;
+ if (pixels_per_frame < 352 * 288) {
+ codec_params.SetVideoEncoderComplexity(
+ webrtc::VideoCodecComplexity::kComplexityHigher);
+ }
+ // Turn off denoising for all streams but the highest resolution.
+ codec_params.VP8()->denoisingOn = false;
+ }
+ } else if (codec.codecType == webrtc::kVideoCodecH264) {
+ codec_params.H264()->numberOfTemporalLayers =
+ stream_params.numberOfTemporalLayers;
+ }
+
+ // Cap start bitrate to the min bitrate in order to avoid strange codec
+ // behavior.
+ codec_params.startBitrate =
+ std::max(stream_params.minBitrate, start_bitrate_kbps);
+
+ // Legacy screenshare mode is only enabled for the first simulcast layer
+ codec_params.legacy_conference_mode =
+ codec.legacy_conference_mode && stream_idx == 0;
+
+ return codec_params;
+}
+
+void SimulcastEncoderAdapter::OverrideFromFieldTrial(
+ VideoEncoder::EncoderInfo* info) const {
+ if (encoder_info_override_.requested_resolution_alignment()) {
+ info->requested_resolution_alignment = cricket::LeastCommonMultiple(
+ info->requested_resolution_alignment,
+ *encoder_info_override_.requested_resolution_alignment());
+ info->apply_alignment_to_all_simulcast_layers =
+ info->apply_alignment_to_all_simulcast_layers ||
+ encoder_info_override_.apply_alignment_to_all_simulcast_layers();
+ }
+ // Override resolution bitrate limits unless they're set already.
+ if (info->resolution_bitrate_limits.empty() &&
+ !encoder_info_override_.resolution_bitrate_limits().empty()) {
+ info->resolution_bitrate_limits =
+ encoder_info_override_.resolution_bitrate_limits();
+ }
+}
+
+VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const {
+ if (stream_contexts_.size() == 1) {
+ // Not using simulcast adapting functionality, just pass through.
+ VideoEncoder::EncoderInfo info =
+ stream_contexts_.front().encoder().GetEncoderInfo();
+ OverrideFromFieldTrial(&info);
+ return info;
+ }
+
+ VideoEncoder::EncoderInfo encoder_info;
+ encoder_info.implementation_name = "SimulcastEncoderAdapter";
+ encoder_info.requested_resolution_alignment = 1;
+ encoder_info.apply_alignment_to_all_simulcast_layers = false;
+ encoder_info.supports_native_handle = true;
+ encoder_info.scaling_settings.thresholds = absl::nullopt;
+
+ if (stream_contexts_.empty()) {
+ // GetEncoderInfo queried before InitEncode. Only alignment info is needed
+ // to be filled.
+ // Create one encoder and query it.
+
+ std::unique_ptr<SimulcastEncoderAdapter::EncoderContext> encoder_context =
+ FetchOrCreateEncoderContext(/*is_lowest_quality_stream=*/true);
+ if (encoder_context == nullptr) {
+ return encoder_info;
+ }
+
+ const VideoEncoder::EncoderInfo& primary_info =
+ encoder_context->PrimaryInfo();
+ const VideoEncoder::EncoderInfo& fallback_info =
+ encoder_context->FallbackInfo();
+
+ encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple(
+ primary_info.requested_resolution_alignment,
+ fallback_info.requested_resolution_alignment);
+
+ encoder_info.apply_alignment_to_all_simulcast_layers =
+ primary_info.apply_alignment_to_all_simulcast_layers ||
+ fallback_info.apply_alignment_to_all_simulcast_layers;
+
+ if (!primary_info.supports_simulcast || !fallback_info.supports_simulcast) {
+ encoder_info.apply_alignment_to_all_simulcast_layers = true;
+ }
+
+ cached_encoder_contexts_.emplace_back(std::move(encoder_context));
+
+ OverrideFromFieldTrial(&encoder_info);
+ return encoder_info;
+ }
+
+ encoder_info.scaling_settings = VideoEncoder::ScalingSettings::kOff;
+
+ for (size_t i = 0; i < stream_contexts_.size(); ++i) {
+ VideoEncoder::EncoderInfo encoder_impl_info =
+ stream_contexts_[i].encoder().GetEncoderInfo();
+ if (i == 0) {
+ // Encoder name indicates names of all sub-encoders.
+ encoder_info.implementation_name += " (";
+ encoder_info.implementation_name += encoder_impl_info.implementation_name;
+
+ encoder_info.supports_native_handle =
+ encoder_impl_info.supports_native_handle;
+ encoder_info.has_trusted_rate_controller =
+ encoder_impl_info.has_trusted_rate_controller;
+ encoder_info.is_hardware_accelerated =
+ encoder_impl_info.is_hardware_accelerated;
+ encoder_info.is_qp_trusted = encoder_impl_info.is_qp_trusted;
+ } else {
+ encoder_info.implementation_name += ", ";
+ encoder_info.implementation_name += encoder_impl_info.implementation_name;
+
+ // Native handle supported if any encoder supports it.
+ encoder_info.supports_native_handle |=
+ encoder_impl_info.supports_native_handle;
+
+ // Trusted rate controller only if all encoders have it.
+ encoder_info.has_trusted_rate_controller &=
+ encoder_impl_info.has_trusted_rate_controller;
+
+ // Uses hardware support if any of the encoders uses it.
+ // For example, if we are having issues with down-scaling due to
+ // pipelining delay in HW encoders we need higher encoder usage
+ // thresholds in CPU adaptation.
+ encoder_info.is_hardware_accelerated |=
+ encoder_impl_info.is_hardware_accelerated;
+
+ // Treat QP from frame/slice/tile header as average QP only if all
+ // encoders report it as average QP.
+ encoder_info.is_qp_trusted =
+ encoder_info.is_qp_trusted.value_or(true) &&
+ encoder_impl_info.is_qp_trusted.value_or(true);
+ }
+ encoder_info.fps_allocation[i] = encoder_impl_info.fps_allocation[0];
+ encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple(
+ encoder_info.requested_resolution_alignment,
+ encoder_impl_info.requested_resolution_alignment);
+ // request alignment on all layers if any of the encoders may need it, or
+ // if any non-top layer encoder requests a non-trivial alignment.
+ if (encoder_impl_info.apply_alignment_to_all_simulcast_layers ||
+ (encoder_impl_info.requested_resolution_alignment > 1 &&
+ (codec_.simulcastStream[i].height < codec_.height ||
+ codec_.simulcastStream[i].width < codec_.width))) {
+ encoder_info.apply_alignment_to_all_simulcast_layers = true;
+ }
+ }
+ encoder_info.implementation_name += ")";
+
+ OverrideFromFieldTrial(&encoder_info);
+
+ return encoder_info;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/simulcast_encoder_adapter.h b/third_party/libwebrtc/media/engine/simulcast_encoder_adapter.h
new file mode 100644
index 0000000000..553a6a0819
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/simulcast_encoder_adapter.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_
+#define MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_
+
+#include <atomic>
+#include <list>
+#include <memory>
+#include <stack>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/fec_controller_override.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "common_video/framerate_controller.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/experiments/encoder_info_settings.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// SimulcastEncoderAdapter implements simulcast support by creating multiple
+// webrtc::VideoEncoder instances with the given VideoEncoderFactory.
+// The object is created and destroyed on the worker thread, but all public
+// interfaces should be called from the encoder task queue.
+class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder {
+ public:
+ // TODO(bugs.webrtc.org/11000): Remove when downstream usage is gone.
+ SimulcastEncoderAdapter(VideoEncoderFactory* primarty_factory,
+ const SdpVideoFormat& format);
+ // `primary_factory` produces the first-choice encoders to use.
+ // `fallback_factory`, if non-null, is used to create fallback encoder that
+ // will be used if InitEncode() fails for the primary encoder.
+ SimulcastEncoderAdapter(VideoEncoderFactory* primary_factory,
+ VideoEncoderFactory* fallback_factory,
+ const SdpVideoFormat& format,
+ const FieldTrialsView& field_trials);
+ ~SimulcastEncoderAdapter() override;
+
+ // Implements VideoEncoder.
+ void SetFecControllerOverride(
+ FecControllerOverride* fec_controller_override) override;
+ int Release() override;
+ int InitEncode(const VideoCodec* codec_settings,
+ const VideoEncoder::Settings& settings) override;
+ int Encode(const VideoFrame& input_image,
+ const std::vector<VideoFrameType>* frame_types) override;
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+ void SetRates(const RateControlParameters& parameters) override;
+ void OnPacketLossRateUpdate(float packet_loss_rate) override;
+ void OnRttUpdate(int64_t rtt_ms) override;
+ void OnLossNotification(const LossNotification& loss_notification) override;
+
+ EncoderInfo GetEncoderInfo() const override;
+
+ private:
+ class EncoderContext {
+ public:
+ EncoderContext(std::unique_ptr<VideoEncoder> encoder,
+ bool prefer_temporal_support,
+ VideoEncoder::EncoderInfo primary_info,
+ VideoEncoder::EncoderInfo fallback_info);
+ EncoderContext& operator=(EncoderContext&&) = delete;
+
+ VideoEncoder& encoder() { return *encoder_; }
+ bool prefer_temporal_support() { return prefer_temporal_support_; }
+ void Release();
+
+ const VideoEncoder::EncoderInfo& PrimaryInfo() { return primary_info_; }
+
+ const VideoEncoder::EncoderInfo& FallbackInfo() { return fallback_info_; }
+
+ private:
+ std::unique_ptr<VideoEncoder> encoder_;
+ bool prefer_temporal_support_;
+ const VideoEncoder::EncoderInfo primary_info_;
+ const VideoEncoder::EncoderInfo fallback_info_;
+ };
+
+ class StreamContext : public EncodedImageCallback {
+ public:
+ StreamContext(SimulcastEncoderAdapter* parent,
+ std::unique_ptr<EncoderContext> encoder_context,
+ std::unique_ptr<FramerateController> framerate_controller,
+ int stream_idx,
+ uint16_t width,
+ uint16_t height,
+ bool send_stream);
+ StreamContext(StreamContext&& rhs);
+ StreamContext& operator=(StreamContext&&) = delete;
+ ~StreamContext() override;
+
+ Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override;
+ void OnDroppedFrame(DropReason reason) override;
+
+ VideoEncoder& encoder() { return encoder_context_->encoder(); }
+ const VideoEncoder& encoder() const { return encoder_context_->encoder(); }
+ int stream_idx() const { return stream_idx_; }
+ uint16_t width() const { return width_; }
+ uint16_t height() const { return height_; }
+ bool is_keyframe_needed() const {
+ return !is_paused_ && is_keyframe_needed_;
+ }
+ void set_is_keyframe_needed() { is_keyframe_needed_ = true; }
+ bool is_paused() const { return is_paused_; }
+ void set_is_paused(bool is_paused) { is_paused_ = is_paused; }
+ absl::optional<double> target_fps() const {
+ return framerate_controller_ == nullptr
+ ? absl::nullopt
+ : absl::optional<double>(
+ framerate_controller_->GetMaxFramerate());
+ }
+
+ std::unique_ptr<EncoderContext> ReleaseEncoderContext() &&;
+ void OnKeyframe(Timestamp timestamp);
+ bool ShouldDropFrame(Timestamp timestamp);
+
+ private:
+ SimulcastEncoderAdapter* const parent_;
+ std::unique_ptr<EncoderContext> encoder_context_;
+ std::unique_ptr<FramerateController> framerate_controller_;
+ const int stream_idx_;
+ const uint16_t width_;
+ const uint16_t height_;
+ bool is_keyframe_needed_;
+ bool is_paused_;
+ };
+
+ bool Initialized() const;
+
+ void DestroyStoredEncoders();
+
+ // This method creates encoder. May reuse previously created encoders from
+ // `cached_encoder_contexts_`. It's const because it's used from
+ // const GetEncoderInfo().
+ std::unique_ptr<EncoderContext> FetchOrCreateEncoderContext(
+ bool is_lowest_quality_stream) const;
+
+ webrtc::VideoCodec MakeStreamCodec(const webrtc::VideoCodec& codec,
+ int stream_idx,
+ uint32_t start_bitrate_kbps,
+ bool is_lowest_quality_stream,
+ bool is_highest_quality_stream);
+
+ EncodedImageCallback::Result OnEncodedImage(
+ size_t stream_idx,
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info);
+
+ void OnDroppedFrame(size_t stream_idx);
+
+ void OverrideFromFieldTrial(VideoEncoder::EncoderInfo* info) const;
+
+ std::atomic<int> inited_;
+ VideoEncoderFactory* const primary_encoder_factory_;
+ VideoEncoderFactory* const fallback_encoder_factory_;
+ const SdpVideoFormat video_format_;
+ VideoCodec codec_;
+ int total_streams_count_;
+ bool bypass_mode_;
+ std::vector<StreamContext> stream_contexts_;
+ EncodedImageCallback* encoded_complete_callback_;
+
+ // Used for checking the single-threaded access of the encoder interface.
+ RTC_NO_UNIQUE_ADDRESS SequenceChecker encoder_queue_;
+
+ // Store previously created and released encoders , so they don't have to be
+ // recreated. Remaining encoders are destroyed by the destructor.
+ // Marked as `mutable` becuase we may need to temporarily create encoder in
+ // GetEncoderInfo(), which is const.
+ mutable std::list<std::unique_ptr<EncoderContext>> cached_encoder_contexts_;
+
+ const absl::optional<unsigned int> experimental_boosted_screenshare_qp_;
+ const bool boost_base_layer_quality_;
+ const bool prefer_temporal_support_on_base_layer_;
+
+ const SimulcastEncoderAdapterEncoderInfoSettings encoder_info_override_;
+};
+
+} // namespace webrtc
+
+#endif // MEDIA_ENGINE_SIMULCAST_ENCODER_ADAPTER_H_
diff --git a/third_party/libwebrtc/media/engine/simulcast_encoder_adapter_unittest.cc b/third_party/libwebrtc/media/engine/simulcast_encoder_adapter_unittest.cc
new file mode 100644
index 0000000000..e2ac5ea390
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/simulcast_encoder_adapter_unittest.cc
@@ -0,0 +1,1902 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/simulcast_encoder_adapter.h"
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "api/field_trials_view.h"
+#include "api/test/create_simulcast_test_fixture.h"
+#include "api/test/simulcast_test_fixture.h"
+#include "api/test/video/function_video_decoder_factory.h"
+#include "api/test/video/function_video_encoder_factory.h"
+#include "api/video/video_codec_constants.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "common_video/include/video_frame_buffer.h"
+#include "media/base/media_constants.h"
+#include "media/engine/internal_encoder_factory.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/utility/simulcast_test_fixture_impl.h"
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+using ::testing::_;
+using ::testing::Return;
+using EncoderInfo = webrtc::VideoEncoder::EncoderInfo;
+using FramerateFractions =
+ absl::InlinedVector<uint8_t, webrtc::kMaxTemporalStreams>;
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+constexpr int kDefaultWidth = 1280;
+constexpr int kDefaultHeight = 720;
+
+const VideoEncoder::Capabilities kCapabilities(false);
+const VideoEncoder::Settings kSettings(kCapabilities, 1, 1200);
+
+std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture(
+ VideoEncoderFactory* internal_encoder_factory) {
+ std::unique_ptr<VideoEncoderFactory> encoder_factory =
+ std::make_unique<FunctionVideoEncoderFactory>(
+ [internal_encoder_factory]() {
+ return std::make_unique<SimulcastEncoderAdapter>(
+ internal_encoder_factory,
+ SdpVideoFormat(cricket::kVp8CodecName));
+ });
+ std::unique_ptr<VideoDecoderFactory> decoder_factory =
+ std::make_unique<FunctionVideoDecoderFactory>(
+ []() { return VP8Decoder::Create(); });
+ return CreateSimulcastTestFixture(std::move(encoder_factory),
+ std::move(decoder_factory),
+ SdpVideoFormat(cricket::kVp8CodecName));
+}
+} // namespace
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestKeyFrameRequestsOnAllStreams) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestKeyFrameRequestsOnAllStreams();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingAllStreams) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestPaddingAllStreams();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingTwoStreams) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestPaddingTwoStreams();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingTwoStreamsOneMaxedOut) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestPaddingTwoStreamsOneMaxedOut();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingOneStream) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestPaddingOneStream();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingOneStreamTwoMaxedOut) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestPaddingOneStreamTwoMaxedOut();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestSendAllStreams) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestSendAllStreams();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestDisablingStreams) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestDisablingStreams();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestActiveStreams) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestActiveStreams();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestSwitchingToOneStream) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestSwitchingToOneStream();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestSwitchingToOneOddStream) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestSwitchingToOneOddStream();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestStrideEncodeDecode) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestStrideEncodeDecode();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest,
+ TestSpatioTemporalLayers333PatternEncoder) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestSpatioTemporalLayers333PatternEncoder();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest,
+ TestSpatioTemporalLayers321PatternEncoder) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestSpatioTemporalLayers321PatternEncoder();
+}
+
+TEST(SimulcastEncoderAdapterSimulcastTest, TestDecodeWidthHeightSet) {
+ InternalEncoderFactory internal_encoder_factory;
+ auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
+ fixture->TestDecodeWidthHeightSet();
+}
+
+class MockVideoEncoder;
+
+class MockVideoEncoderFactory : public VideoEncoderFactory {
+ public:
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+
+ std::unique_ptr<VideoEncoder> CreateVideoEncoder(
+ const SdpVideoFormat& format) override;
+
+ const std::vector<MockVideoEncoder*>& encoders() const;
+ void SetEncoderNames(const std::vector<const char*>& encoder_names);
+ void set_create_video_encode_return_nullptr(bool return_nullptr) {
+ create_video_encoder_return_nullptr_ = return_nullptr;
+ }
+ void set_init_encode_return_value(int32_t value);
+ void set_requested_resolution_alignments(
+ std::vector<uint32_t> requested_resolution_alignments) {
+ requested_resolution_alignments_ = requested_resolution_alignments;
+ }
+ void set_supports_simulcast(bool supports_simulcast) {
+ supports_simulcast_ = supports_simulcast;
+ }
+ void set_resolution_bitrate_limits(
+ std::vector<VideoEncoder::ResolutionBitrateLimits> limits) {
+ resolution_bitrate_limits_ = limits;
+ }
+
+ void DestroyVideoEncoder(VideoEncoder* encoder);
+
+ private:
+ bool create_video_encoder_return_nullptr_ = false;
+ int32_t init_encode_return_value_ = 0;
+ std::vector<MockVideoEncoder*> encoders_;
+ std::vector<const char*> encoder_names_;
+ // Keep number of entries in sync with `kMaxSimulcastStreams`.
+ std::vector<uint32_t> requested_resolution_alignments_ = {1, 1, 1};
+ bool supports_simulcast_ = false;
+ std::vector<VideoEncoder::ResolutionBitrateLimits> resolution_bitrate_limits_;
+};
+
+class MockVideoEncoder : public VideoEncoder {
+ public:
+ explicit MockVideoEncoder(MockVideoEncoderFactory* factory)
+ : factory_(factory),
+ scaling_settings_(VideoEncoder::ScalingSettings::kOff),
+ video_format_("unknown"),
+ callback_(nullptr) {}
+
+ MOCK_METHOD(void,
+ SetFecControllerOverride,
+ (FecControllerOverride * fec_controller_override),
+ (override));
+
+ int32_t InitEncode(const VideoCodec* codecSettings,
+ const VideoEncoder::Settings& settings) override {
+ codec_ = *codecSettings;
+ return init_encode_return_value_;
+ }
+
+ MOCK_METHOD(int32_t,
+ Encode,
+ (const VideoFrame& inputImage,
+ const std::vector<VideoFrameType>* frame_types),
+ (override));
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) override {
+ callback_ = callback;
+ return 0;
+ }
+
+ MOCK_METHOD(int32_t, Release, (), (override));
+
+ void SetRates(const RateControlParameters& parameters) {
+ last_set_rates_ = parameters;
+ }
+
+ EncoderInfo GetEncoderInfo() const override {
+ EncoderInfo info;
+ info.supports_native_handle = supports_native_handle_;
+ info.implementation_name = implementation_name_;
+ info.scaling_settings = scaling_settings_;
+ info.requested_resolution_alignment = requested_resolution_alignment_;
+ info.apply_alignment_to_all_simulcast_layers =
+ apply_alignment_to_all_simulcast_layers_;
+ info.has_trusted_rate_controller = has_trusted_rate_controller_;
+ info.is_hardware_accelerated = is_hardware_accelerated_;
+ info.fps_allocation[0] = fps_allocation_;
+ info.supports_simulcast = supports_simulcast_;
+ info.is_qp_trusted = is_qp_trusted_;
+ info.resolution_bitrate_limits = resolution_bitrate_limits;
+ return info;
+ }
+
+ virtual ~MockVideoEncoder() { factory_->DestroyVideoEncoder(this); }
+
+ const VideoCodec& codec() const { return codec_; }
+
+ void SendEncodedImage(int width, int height) {
+ // Sends a fake image of the given width/height.
+ EncodedImage image;
+ image._encodedWidth = width;
+ image._encodedHeight = height;
+ CodecSpecificInfo codec_specific_info;
+ codec_specific_info.codecType = webrtc::kVideoCodecVP8;
+ callback_->OnEncodedImage(image, &codec_specific_info);
+ }
+
+ void set_supports_native_handle(bool enabled) {
+ supports_native_handle_ = enabled;
+ }
+
+ void set_implementation_name(const std::string& name) {
+ implementation_name_ = name;
+ }
+
+ void set_init_encode_return_value(int32_t value) {
+ init_encode_return_value_ = value;
+ }
+
+ void set_scaling_settings(const VideoEncoder::ScalingSettings& settings) {
+ scaling_settings_ = settings;
+ }
+
+ void set_requested_resolution_alignment(
+ uint32_t requested_resolution_alignment) {
+ requested_resolution_alignment_ = requested_resolution_alignment;
+ }
+
+ void set_apply_alignment_to_all_simulcast_layers(bool apply) {
+ apply_alignment_to_all_simulcast_layers_ = apply;
+ }
+
+ void set_has_trusted_rate_controller(bool trusted) {
+ has_trusted_rate_controller_ = trusted;
+ }
+
+ void set_is_hardware_accelerated(bool is_hardware_accelerated) {
+ is_hardware_accelerated_ = is_hardware_accelerated;
+ }
+
+ void set_fps_allocation(const FramerateFractions& fps_allocation) {
+ fps_allocation_ = fps_allocation;
+ }
+
+ RateControlParameters last_set_rates() const { return last_set_rates_; }
+
+ void set_supports_simulcast(bool supports_simulcast) {
+ supports_simulcast_ = supports_simulcast;
+ }
+
+ void set_video_format(const SdpVideoFormat& video_format) {
+ video_format_ = video_format;
+ }
+
+ void set_is_qp_trusted(absl::optional<bool> is_qp_trusted) {
+ is_qp_trusted_ = is_qp_trusted;
+ }
+
+ void set_resolution_bitrate_limits(
+ std::vector<VideoEncoder::ResolutionBitrateLimits> limits) {
+ resolution_bitrate_limits = limits;
+ }
+
+ bool supports_simulcast() const { return supports_simulcast_; }
+
+ SdpVideoFormat video_format() const { return video_format_; }
+
+ private:
+ MockVideoEncoderFactory* const factory_;
+ bool supports_native_handle_ = false;
+ std::string implementation_name_ = "unknown";
+ VideoEncoder::ScalingSettings scaling_settings_;
+ uint32_t requested_resolution_alignment_ = 1;
+ bool apply_alignment_to_all_simulcast_layers_ = false;
+ bool has_trusted_rate_controller_ = false;
+ bool is_hardware_accelerated_ = false;
+ int32_t init_encode_return_value_ = 0;
+ VideoEncoder::RateControlParameters last_set_rates_;
+ FramerateFractions fps_allocation_;
+ bool supports_simulcast_ = false;
+ absl::optional<bool> is_qp_trusted_;
+ SdpVideoFormat video_format_;
+ std::vector<VideoEncoder::ResolutionBitrateLimits> resolution_bitrate_limits;
+
+ VideoCodec codec_;
+ EncodedImageCallback* callback_;
+};
+
+std::vector<SdpVideoFormat> MockVideoEncoderFactory::GetSupportedFormats()
+ const {
+ std::vector<SdpVideoFormat> formats = {SdpVideoFormat("VP8")};
+ return formats;
+}
+
+std::unique_ptr<VideoEncoder> MockVideoEncoderFactory::CreateVideoEncoder(
+ const SdpVideoFormat& format) {
+ if (create_video_encoder_return_nullptr_) {
+ return nullptr;
+ }
+
+ auto encoder = std::make_unique<::testing::NiceMock<MockVideoEncoder>>(this);
+ encoder->set_init_encode_return_value(init_encode_return_value_);
+ const char* encoder_name = encoder_names_.empty()
+ ? "codec_implementation_name"
+ : encoder_names_[encoders_.size()];
+ encoder->set_implementation_name(encoder_name);
+ RTC_CHECK_LT(encoders_.size(), requested_resolution_alignments_.size());
+ encoder->set_requested_resolution_alignment(
+ requested_resolution_alignments_[encoders_.size()]);
+ encoder->set_supports_simulcast(supports_simulcast_);
+ encoder->set_video_format(format);
+ encoder->set_resolution_bitrate_limits(resolution_bitrate_limits_);
+ encoders_.push_back(encoder.get());
+ return encoder;
+}
+
+void MockVideoEncoderFactory::DestroyVideoEncoder(VideoEncoder* encoder) {
+ for (size_t i = 0; i < encoders_.size(); ++i) {
+ if (encoders_[i] == encoder) {
+ encoders_.erase(encoders_.begin() + i);
+ break;
+ }
+ }
+}
+
+const std::vector<MockVideoEncoder*>& MockVideoEncoderFactory::encoders()
+ const {
+ return encoders_;
+}
+void MockVideoEncoderFactory::SetEncoderNames(
+ const std::vector<const char*>& encoder_names) {
+ encoder_names_ = encoder_names;
+}
+void MockVideoEncoderFactory::set_init_encode_return_value(int32_t value) {
+ init_encode_return_value_ = value;
+}
+
+class TestSimulcastEncoderAdapterFakeHelper {
+ public:
+ explicit TestSimulcastEncoderAdapterFakeHelper(
+ bool use_fallback_factory,
+ const SdpVideoFormat& video_format,
+ const FieldTrialsView& field_trials)
+ : primary_factory_(new MockVideoEncoderFactory()),
+ fallback_factory_(use_fallback_factory ? new MockVideoEncoderFactory()
+ : nullptr),
+ video_format_(video_format),
+ field_trials_(field_trials) {}
+
+ // Can only be called once as the SimulcastEncoderAdapter will take the
+ // ownership of `factory_`.
+ VideoEncoder* CreateMockEncoderAdapter() {
+ return new SimulcastEncoderAdapter(primary_factory_.get(),
+ fallback_factory_.get(), video_format_,
+ field_trials_);
+ }
+
+ MockVideoEncoderFactory* factory() { return primary_factory_.get(); }
+ MockVideoEncoderFactory* fallback_factory() {
+ return fallback_factory_.get();
+ }
+
+ private:
+ std::unique_ptr<MockVideoEncoderFactory> primary_factory_;
+ std::unique_ptr<MockVideoEncoderFactory> fallback_factory_;
+ SdpVideoFormat video_format_;
+ const FieldTrialsView& field_trials_;
+};
+
+static const int kTestTemporalLayerProfile[3] = {3, 2, 1};
+
+class TestSimulcastEncoderAdapterFake : public ::testing::Test,
+ public EncodedImageCallback {
+ public:
+ TestSimulcastEncoderAdapterFake() : use_fallback_factory_(false) {}
+
+ virtual ~TestSimulcastEncoderAdapterFake() {
+ if (adapter_) {
+ adapter_->Release();
+ }
+ }
+
+ void SetUp() override {
+ helper_.reset(new TestSimulcastEncoderAdapterFakeHelper(
+ use_fallback_factory_, SdpVideoFormat("VP8", sdp_video_parameters_),
+ field_trials_));
+ adapter_.reset(helper_->CreateMockEncoderAdapter());
+ last_encoded_image_width_ = absl::nullopt;
+ last_encoded_image_height_ = absl::nullopt;
+ last_encoded_image_simulcast_index_ = absl::nullopt;
+ }
+
+ void ReSetUp() {
+ if (adapter_) {
+ adapter_->Release();
+ // `helper_` owns factories which `adapter_` needs to destroy encoders.
+ // Release `adapter_` before `helper_` (released in SetUp()).
+ adapter_.reset();
+ }
+ SetUp();
+ }
+
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ last_encoded_image_width_ = encoded_image._encodedWidth;
+ last_encoded_image_height_ = encoded_image._encodedHeight;
+ last_encoded_image_simulcast_index_ = encoded_image.SimulcastIndex();
+
+ return Result(Result::OK, encoded_image.RtpTimestamp());
+ }
+
+ bool GetLastEncodedImageInfo(absl::optional<int>* out_width,
+ absl::optional<int>* out_height,
+ absl::optional<int>* out_simulcast_index) {
+ if (!last_encoded_image_width_.has_value()) {
+ return false;
+ }
+ *out_width = last_encoded_image_width_;
+ *out_height = last_encoded_image_height_;
+ *out_simulcast_index = last_encoded_image_simulcast_index_;
+ return true;
+ }
+
+ void SetupCodec() { SetupCodec(/*active_streams=*/{true, true, true}); }
+
+ void SetupCodec(std::vector<bool> active_streams) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ ASSERT_LE(active_streams.size(), codec_.numberOfSimulcastStreams);
+ codec_.numberOfSimulcastStreams = active_streams.size();
+ for (size_t stream_idx = 0; stream_idx < kMaxSimulcastStreams;
+ ++stream_idx) {
+ if (stream_idx >= codec_.numberOfSimulcastStreams) {
+ // Reset parameters of unspecified stream.
+ codec_.simulcastStream[stream_idx] = {0};
+ } else {
+ codec_.simulcastStream[stream_idx].active = active_streams[stream_idx];
+ }
+ }
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ }
+
+ void VerifyCodec(const VideoCodec& ref, int stream_index) {
+ const VideoCodec& target =
+ helper_->factory()->encoders()[stream_index]->codec();
+ EXPECT_EQ(ref.codecType, target.codecType);
+ EXPECT_EQ(ref.width, target.width);
+ EXPECT_EQ(ref.height, target.height);
+ EXPECT_EQ(ref.startBitrate, target.startBitrate);
+ EXPECT_EQ(ref.maxBitrate, target.maxBitrate);
+ EXPECT_EQ(ref.minBitrate, target.minBitrate);
+ EXPECT_EQ(ref.maxFramerate, target.maxFramerate);
+ EXPECT_EQ(ref.GetVideoEncoderComplexity(),
+ target.GetVideoEncoderComplexity());
+ EXPECT_EQ(ref.VP8().numberOfTemporalLayers,
+ target.VP8().numberOfTemporalLayers);
+ EXPECT_EQ(ref.VP8().denoisingOn, target.VP8().denoisingOn);
+ EXPECT_EQ(ref.VP8().automaticResizeOn, target.VP8().automaticResizeOn);
+ EXPECT_EQ(ref.GetFrameDropEnabled(), target.GetFrameDropEnabled());
+ EXPECT_EQ(ref.VP8().keyFrameInterval, target.VP8().keyFrameInterval);
+ EXPECT_EQ(ref.qpMax, target.qpMax);
+ EXPECT_EQ(0, target.numberOfSimulcastStreams);
+ EXPECT_EQ(ref.mode, target.mode);
+
+ // No need to compare simulcastStream as numberOfSimulcastStreams should
+ // always be 0.
+ }
+
+ void InitRefCodec(int stream_index,
+ VideoCodec* ref_codec,
+ bool reverse_layer_order = false) {
+ *ref_codec = codec_;
+ ref_codec->VP8()->numberOfTemporalLayers =
+ kTestTemporalLayerProfile[reverse_layer_order ? 2 - stream_index
+ : stream_index];
+ ref_codec->width = codec_.simulcastStream[stream_index].width;
+ ref_codec->height = codec_.simulcastStream[stream_index].height;
+ ref_codec->maxBitrate = codec_.simulcastStream[stream_index].maxBitrate;
+ ref_codec->minBitrate = codec_.simulcastStream[stream_index].minBitrate;
+ ref_codec->qpMax = codec_.simulcastStream[stream_index].qpMax;
+ }
+
+ void VerifyCodecSettings() {
+ EXPECT_EQ(3u, helper_->factory()->encoders().size());
+ VideoCodec ref_codec;
+
+ // stream 0, the lowest resolution stream.
+ InitRefCodec(0, &ref_codec);
+ ref_codec.qpMax = 45;
+ ref_codec.SetVideoEncoderComplexity(
+ webrtc::VideoCodecComplexity::kComplexityHigher);
+ ref_codec.VP8()->denoisingOn = false;
+ ref_codec.startBitrate = 100; // Should equal to the target bitrate.
+ VerifyCodec(ref_codec, 0);
+
+ // stream 1
+ InitRefCodec(1, &ref_codec);
+ ref_codec.VP8()->denoisingOn = false;
+ // The start bitrate (300kbit) minus what we have for the lower layers
+ // (100kbit).
+ ref_codec.startBitrate = 200;
+ VerifyCodec(ref_codec, 1);
+
+ // stream 2, the biggest resolution stream.
+ InitRefCodec(2, &ref_codec);
+ // We don't have enough bits to send this, so the adapter should have
+ // configured it to use the min bitrate for this layer (600kbit) but turn
+ // off sending.
+ ref_codec.startBitrate = 600;
+ VerifyCodec(ref_codec, 2);
+ }
+
+ protected:
+ std::unique_ptr<TestSimulcastEncoderAdapterFakeHelper> helper_;
+ std::unique_ptr<VideoEncoder> adapter_;
+ VideoCodec codec_;
+ absl::optional<int> last_encoded_image_width_;
+ absl::optional<int> last_encoded_image_height_;
+ absl::optional<int> last_encoded_image_simulcast_index_;
+ std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
+ bool use_fallback_factory_;
+ SdpVideoFormat::Parameters sdp_video_parameters_;
+ test::ScopedKeyValueConfig field_trials_;
+};
+
+TEST_F(TestSimulcastEncoderAdapterFake, InitEncode) {
+ SetupCodec();
+ VerifyCodecSettings();
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, ReleaseWithoutInitEncode) {
+ EXPECT_EQ(0, adapter_->Release());
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, Reinit) {
+ SetupCodec();
+ EXPECT_EQ(0, adapter_->Release());
+
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, EncodedCallbackForDifferentEncoders) {
+ SetupCodec();
+
+ // Set bitrates so that we send all layers.
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(VideoBitrateAllocationParameters(1200, 30)),
+ 30.0));
+
+ // At this point, the simulcast encoder adapter should have 3 streams: HD,
+ // quarter HD, and quarter quarter HD. We're going to mostly ignore the exact
+ // resolutions, to test that the adapter forwards on the correct resolution
+ // and simulcast index values, going only off the encoder that generates the
+ // image.
+ std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
+ ASSERT_EQ(3u, encoders.size());
+ encoders[0]->SendEncodedImage(1152, 704);
+ absl::optional<int> width;
+ absl::optional<int> height;
+ absl::optional<int> simulcast_index;
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ ASSERT_TRUE(width.has_value());
+ EXPECT_EQ(1152, width.value());
+ ASSERT_TRUE(height.has_value());
+ EXPECT_EQ(704, height.value());
+ // SEA doesn't intercept frame encode complete callback for the lowest stream.
+ EXPECT_FALSE(simulcast_index.has_value());
+
+ encoders[1]->SendEncodedImage(300, 620);
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ ASSERT_TRUE(width.has_value());
+ EXPECT_EQ(300, width.value());
+ ASSERT_TRUE(height.has_value());
+ EXPECT_EQ(620, height.value());
+ ASSERT_TRUE(simulcast_index.has_value());
+ EXPECT_EQ(1, simulcast_index.value());
+
+ encoders[2]->SendEncodedImage(120, 240);
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ ASSERT_TRUE(width.has_value());
+ EXPECT_EQ(120, width.value());
+ ASSERT_TRUE(height.has_value());
+ EXPECT_EQ(240, height.value());
+ ASSERT_TRUE(simulcast_index.has_value());
+ EXPECT_EQ(2, simulcast_index.value());
+}
+
+// This test verifies that the underlying encoders are reused, when the adapter
+// is reinited with different number of simulcast streams. It further checks
+// that the allocated encoders are reused in the same order as before, starting
+// with the lowest stream.
+TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
+ // Set up common settings for three streams.
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ const uint32_t target_bitrate =
+ 1000 * (codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ codec_.simulcastStream[2].minBitrate);
+
+ // Input data.
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(100)
+ .set_timestamp_ms(1000)
+ .set_rotation(kVideoRotation_180)
+ .build();
+ std::vector<VideoFrameType> frame_types;
+
+ // Encode with three streams.
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ VerifyCodecSettings();
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(target_bitrate, 30)),
+ 30.0));
+
+ std::vector<MockVideoEncoder*> original_encoders =
+ helper_->factory()->encoders();
+ ASSERT_EQ(3u, original_encoders.size());
+ EXPECT_CALL(*original_encoders[0], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[1], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[2], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ frame_types.resize(3, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+ EXPECT_CALL(*original_encoders[0], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[1], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[2], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_EQ(0, adapter_->Release());
+
+ // Encode with two streams.
+ codec_.width /= 2;
+ codec_.height /= 2;
+ codec_.numberOfSimulcastStreams = 2;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(target_bitrate, 30)),
+ 30.0));
+ std::vector<MockVideoEncoder*> new_encoders = helper_->factory()->encoders();
+ ASSERT_EQ(2u, new_encoders.size());
+ ASSERT_EQ(original_encoders[0], new_encoders[0]);
+ EXPECT_CALL(*original_encoders[0], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ ASSERT_EQ(original_encoders[1], new_encoders[1]);
+ EXPECT_CALL(*original_encoders[1], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ frame_types.resize(2, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+ EXPECT_CALL(*original_encoders[0], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[1], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_EQ(0, adapter_->Release());
+
+ // Encode with single stream.
+ codec_.width /= 2;
+ codec_.height /= 2;
+ codec_.numberOfSimulcastStreams = 1;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(target_bitrate, 30)),
+ 30.0));
+ new_encoders = helper_->factory()->encoders();
+ ASSERT_EQ(1u, new_encoders.size());
+ ASSERT_EQ(original_encoders[0], new_encoders[0]);
+ EXPECT_CALL(*original_encoders[0], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ frame_types.resize(1, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+ EXPECT_CALL(*original_encoders[0], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_EQ(0, adapter_->Release());
+
+ // Encode with three streams, again.
+ codec_.width *= 4;
+ codec_.height *= 4;
+ codec_.numberOfSimulcastStreams = 3;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(target_bitrate, 30)),
+ 30.0));
+ new_encoders = helper_->factory()->encoders();
+ ASSERT_EQ(3u, new_encoders.size());
+ // The first encoder is reused.
+ ASSERT_EQ(original_encoders[0], new_encoders[0]);
+ EXPECT_CALL(*original_encoders[0], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ // The second and third encoders are new.
+ EXPECT_CALL(*new_encoders[1], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*new_encoders[2], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ frame_types.resize(3, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+ EXPECT_CALL(*original_encoders[0], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*new_encoders[1], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*new_encoders[2], Release())
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_EQ(0, adapter_->Release());
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, DoesNotLeakEncoders) {
+ SetupCodec();
+ VerifyCodecSettings();
+
+ EXPECT_EQ(3u, helper_->factory()->encoders().size());
+
+ // The adapter should destroy all encoders it has allocated. Since
+ // `helper_->factory()` is owned by `adapter_`, however, we need to rely on
+ // lsan to find leaks here.
+ EXPECT_EQ(0, adapter_->Release());
+ adapter_.reset();
+}
+
+// This test verifies that an adapter reinit with the same codec settings as
+// before does not change the underlying encoder codec settings.
+TEST_F(TestSimulcastEncoderAdapterFake, ReinitDoesNotReorderEncoderSettings) {
+ SetupCodec();
+ VerifyCodecSettings();
+
+ // Capture current codec settings.
+ std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
+ ASSERT_EQ(3u, encoders.size());
+ std::array<VideoCodec, 3> codecs_before;
+ for (int i = 0; i < 3; ++i) {
+ codecs_before[i] = encoders[i]->codec();
+ }
+
+ // Reinitialize and verify that the new codec settings are the same.
+ EXPECT_EQ(0, adapter_->Release());
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ for (int i = 0; i < 3; ++i) {
+ const VideoCodec& codec_before = codecs_before[i];
+ const VideoCodec& codec_after = encoders[i]->codec();
+
+ // webrtc::VideoCodec does not implement operator==.
+ EXPECT_EQ(codec_before.codecType, codec_after.codecType);
+ EXPECT_EQ(codec_before.width, codec_after.width);
+ EXPECT_EQ(codec_before.height, codec_after.height);
+ EXPECT_EQ(codec_before.startBitrate, codec_after.startBitrate);
+ EXPECT_EQ(codec_before.maxBitrate, codec_after.maxBitrate);
+ EXPECT_EQ(codec_before.minBitrate, codec_after.minBitrate);
+ EXPECT_EQ(codec_before.maxFramerate, codec_after.maxFramerate);
+ EXPECT_EQ(codec_before.qpMax, codec_after.qpMax);
+ EXPECT_EQ(codec_before.numberOfSimulcastStreams,
+ codec_after.numberOfSimulcastStreams);
+ EXPECT_EQ(codec_before.mode, codec_after.mode);
+ EXPECT_EQ(codec_before.expect_encode_from_texture,
+ codec_after.expect_encode_from_texture);
+ }
+}
+
+// This test is similar to the one above, except that it tests the simulcastIdx
+// from the CodecSpecificInfo that is connected to an encoded frame. The
+// PayloadRouter demuxes the incoming encoded frames on different RTP modules
+// using the simulcastIdx, so it's important that there is no corresponding
+// encoder reordering in between adapter reinits as this would lead to PictureID
+// discontinuities.
+TEST_F(TestSimulcastEncoderAdapterFake, ReinitDoesNotReorderFrameSimulcastIdx) {
+ SetupCodec();
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(VideoBitrateAllocationParameters(1200, 30)),
+ 30.0));
+ VerifyCodecSettings();
+
+ // Send frames on all streams.
+ std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
+ ASSERT_EQ(3u, encoders.size());
+ encoders[0]->SendEncodedImage(1152, 704);
+ absl::optional<int> width;
+ absl::optional<int> height;
+ absl::optional<int> simulcast_index;
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ // SEA doesn't intercept frame encode complete callback for the lowest stream.
+ EXPECT_FALSE(simulcast_index.has_value());
+
+ encoders[1]->SendEncodedImage(300, 620);
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ ASSERT_TRUE(simulcast_index.has_value());
+ EXPECT_EQ(1, simulcast_index.value());
+
+ encoders[2]->SendEncodedImage(120, 240);
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ ASSERT_TRUE(simulcast_index.has_value());
+ EXPECT_EQ(2, simulcast_index.value());
+
+ // Reinitialize.
+ EXPECT_EQ(0, adapter_->Release());
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(VideoBitrateAllocationParameters(1200, 30)),
+ 30.0));
+
+ // Verify that the same encoder sends out frames on the same simulcast index.
+ encoders[0]->SendEncodedImage(1152, 704);
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ EXPECT_FALSE(simulcast_index.has_value());
+
+ encoders[1]->SendEncodedImage(300, 620);
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ ASSERT_TRUE(simulcast_index.has_value());
+ EXPECT_EQ(1, simulcast_index.value());
+
+ encoders[2]->SendEncodedImage(120, 240);
+ EXPECT_TRUE(GetLastEncodedImageInfo(&width, &height, &simulcast_index));
+ ASSERT_TRUE(simulcast_index.has_value());
+ EXPECT_EQ(2, simulcast_index.value());
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, SupportsNativeHandleForSingleStreams) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 1;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+ helper_->factory()->encoders()[0]->set_supports_native_handle(true);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(adapter_->GetEncoderInfo().supports_native_handle);
+ helper_->factory()->encoders()[0]->set_supports_native_handle(false);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_FALSE(adapter_->GetEncoderInfo().supports_native_handle);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, SetRatesUnderMinBitrate) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.minBitrate = 50;
+ codec_.numberOfSimulcastStreams = 1;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+
+ // Above min should be respected.
+ VideoBitrateAllocation target_bitrate = rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(codec_.minBitrate * 1000, 30));
+ adapter_->SetRates(VideoEncoder::RateControlParameters(target_bitrate, 30.0));
+ EXPECT_EQ(target_bitrate,
+ helper_->factory()->encoders()[0]->last_set_rates().bitrate);
+
+ // Below min but non-zero should be replaced with the min bitrate.
+ VideoBitrateAllocation too_low_bitrate = rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters((codec_.minBitrate - 1) * 1000, 30));
+ adapter_->SetRates(
+ VideoEncoder::RateControlParameters(too_low_bitrate, 30.0));
+ EXPECT_EQ(target_bitrate,
+ helper_->factory()->encoders()[0]->last_set_rates().bitrate);
+
+ // Zero should be passed on as is, since it means "pause".
+ adapter_->SetRates(
+ VideoEncoder::RateControlParameters(VideoBitrateAllocation(), 30.0));
+ EXPECT_EQ(VideoBitrateAllocation(),
+ helper_->factory()->encoders()[0]->last_set_rates().bitrate);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, SupportsImplementationName) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ std::vector<const char*> encoder_names;
+ encoder_names.push_back("codec1");
+ encoder_names.push_back("codec2");
+ encoder_names.push_back("codec3");
+ helper_->factory()->SetEncoderNames(encoder_names);
+ EXPECT_EQ("SimulcastEncoderAdapter",
+ adapter_->GetEncoderInfo().implementation_name);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_EQ("SimulcastEncoderAdapter (codec1, codec2, codec3)",
+ adapter_->GetEncoderInfo().implementation_name);
+
+ // Single streams should not expose "SimulcastEncoderAdapter" in name.
+ EXPECT_EQ(0, adapter_->Release());
+ codec_.numberOfSimulcastStreams = 1;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+ EXPECT_EQ("codec1", adapter_->GetEncoderInfo().implementation_name);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, RuntimeEncoderInfoUpdate) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ std::vector<const char*> encoder_names;
+ encoder_names.push_back("codec1");
+ encoder_names.push_back("codec2");
+ encoder_names.push_back("codec3");
+ helper_->factory()->SetEncoderNames(encoder_names);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_EQ("SimulcastEncoderAdapter (codec1, codec2, codec3)",
+ adapter_->GetEncoderInfo().implementation_name);
+
+ // Change name of first encoder to indicate it has done a fallback to another
+ // implementation.
+ helper_->factory()->encoders().front()->set_implementation_name("fallback1");
+ EXPECT_EQ("SimulcastEncoderAdapter (fallback1, codec2, codec3)",
+ adapter_->GetEncoderInfo().implementation_name);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ SupportsNativeHandleForMultipleStreams) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+ for (MockVideoEncoder* encoder : helper_->factory()->encoders())
+ encoder->set_supports_native_handle(true);
+ // As long as one encoder supports native handle, it's enabled.
+ helper_->factory()->encoders()[0]->set_supports_native_handle(false);
+ EXPECT_TRUE(adapter_->GetEncoderInfo().supports_native_handle);
+ // Once none do, then the adapter claims no support.
+ helper_->factory()->encoders()[1]->set_supports_native_handle(false);
+ helper_->factory()->encoders()[2]->set_supports_native_handle(false);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_FALSE(adapter_->GetEncoderInfo().supports_native_handle);
+}
+
+class FakeNativeBufferI420 : public VideoFrameBuffer {
+ public:
+ FakeNativeBufferI420(int width, int height, bool allow_to_i420)
+ : width_(width), height_(height), allow_to_i420_(allow_to_i420) {}
+
+ Type type() const override { return Type::kNative; }
+ int width() const override { return width_; }
+ int height() const override { return height_; }
+
+ rtc::scoped_refptr<I420BufferInterface> ToI420() override {
+ if (allow_to_i420_) {
+ return I420Buffer::Create(width_, height_);
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ }
+ return nullptr;
+ }
+
+ private:
+ const int width_;
+ const int height_;
+ const bool allow_to_i420_;
+};
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ NativeHandleForwardingForMultipleStreams) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ // High start bitrate, so all streams are enabled.
+ codec_.startBitrate = 3000;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+ for (MockVideoEncoder* encoder : helper_->factory()->encoders())
+ encoder->set_supports_native_handle(true);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(adapter_->GetEncoderInfo().supports_native_handle);
+
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(
+ rtc::make_ref_counted<FakeNativeBufferI420>(1280, 720,
+ /*allow_to_i420=*/false));
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(100)
+ .set_timestamp_ms(1000)
+ .set_rotation(kVideoRotation_180)
+ .build();
+ // Expect calls with the given video frame verbatim, since it's a texture
+ // frame and can't otherwise be modified/resized.
+ for (MockVideoEncoder* encoder : helper_->factory()->encoders())
+ EXPECT_CALL(*encoder, Encode(::testing::Ref(input_frame), _)).Times(1);
+ std::vector<VideoFrameType> frame_types(3, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, NativeHandleForwardingOnlyIfSupported) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ // High start bitrate, so all streams are enabled.
+ codec_.startBitrate = 3000;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+
+ // QVGA encoders has fallen back to software.
+ auto& encoders = helper_->factory()->encoders();
+ encoders[0]->set_supports_native_handle(false);
+ encoders[1]->set_supports_native_handle(true);
+ encoders[2]->set_supports_native_handle(true);
+
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(adapter_->GetEncoderInfo().supports_native_handle);
+
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(
+ rtc::make_ref_counted<FakeNativeBufferI420>(1280, 720,
+ /*allow_to_i420=*/true));
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(100)
+ .set_timestamp_ms(1000)
+ .set_rotation(kVideoRotation_180)
+ .build();
+ // Expect calls with the given video frame verbatim, since it's a texture
+ // frame and can't otherwise be modified/resized, but only on the two
+ // streams supporting it...
+ EXPECT_CALL(*encoders[1], Encode(::testing::Ref(input_frame), _)).Times(1);
+ EXPECT_CALL(*encoders[2], Encode(::testing::Ref(input_frame), _)).Times(1);
+ // ...the lowest one gets a software buffer.
+ EXPECT_CALL(*encoders[0], Encode)
+ .WillOnce([&](const VideoFrame& frame,
+ const std::vector<VideoFrameType>* frame_types) {
+ EXPECT_EQ(frame.video_frame_buffer()->type(),
+ VideoFrameBuffer::Type::kI420);
+ return 0;
+ });
+ std::vector<VideoFrameType> frame_types(3, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, GeneratesKeyFramesOnRequestedLayers) {
+ // Set up common settings for three streams.
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+ adapter_->RegisterEncodeCompleteCallback(this);
+
+ // Input data.
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
+
+ // Encode with three streams.
+ codec_.startBitrate = 3000;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+
+ std::vector<VideoFrameType> frame_types;
+ frame_types.resize(3, VideoFrameType::kVideoFrameKey);
+
+ std::vector<VideoFrameType> expected_keyframe(1,
+ VideoFrameType::kVideoFrameKey);
+ std::vector<VideoFrameType> expected_deltaframe(
+ 1, VideoFrameType::kVideoFrameDelta);
+
+ std::vector<MockVideoEncoder*> original_encoders =
+ helper_->factory()->encoders();
+ ASSERT_EQ(3u, original_encoders.size());
+ EXPECT_CALL(*original_encoders[0],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_keyframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[1],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_keyframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[2],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_keyframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ VideoFrame first_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(0)
+ .set_timestamp_ms(0)
+ .build();
+ EXPECT_EQ(0, adapter_->Encode(first_frame, &frame_types));
+
+ // Request [key, delta, delta].
+ EXPECT_CALL(*original_encoders[0],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_keyframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[1],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_deltaframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[2],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_deltaframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ frame_types[1] = VideoFrameType::kVideoFrameKey;
+ frame_types[1] = VideoFrameType::kVideoFrameDelta;
+ frame_types[2] = VideoFrameType::kVideoFrameDelta;
+ VideoFrame second_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(10000)
+ .set_timestamp_ms(100000)
+ .build();
+ EXPECT_EQ(0, adapter_->Encode(second_frame, &frame_types));
+
+ // Request [delta, key, delta].
+ EXPECT_CALL(*original_encoders[0],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_deltaframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[1],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_keyframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[2],
+ Encode(_, ::testing::Pointee(::testing::Eq(expected_deltaframe))))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ frame_types[0] = VideoFrameType::kVideoFrameDelta;
+ frame_types[1] = VideoFrameType::kVideoFrameKey;
+ frame_types[2] = VideoFrameType::kVideoFrameDelta;
+ VideoFrame third_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(20000)
+ .set_timestamp_ms(200000)
+ .build();
+ EXPECT_EQ(0, adapter_->Encode(third_frame, &frame_types));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+ // Tell the 2nd encoder to request software fallback.
+ EXPECT_CALL(*helper_->factory()->encoders()[1], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE));
+
+ // Send a fake frame and assert the return is software fallback.
+ rtc::scoped_refptr<I420Buffer> input_buffer =
+ I420Buffer::Create(kDefaultWidth, kDefaultHeight);
+ input_buffer->InitializeData();
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(input_buffer)
+ .set_timestamp_rtp(0)
+ .set_timestamp_us(0)
+ .set_rotation(kVideoRotation_0)
+ .build();
+ std::vector<VideoFrameType> frame_types(3, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE,
+ adapter_->Encode(input_frame, &frame_types));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, TestInitFailureCleansUpEncoders) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ helper_->factory()->set_init_encode_return_value(
+ WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE,
+ adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(helper_->factory()->encoders().empty());
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, DoesNotAlterMaxQpForScreenshare) {
+ const int kHighMaxQp = 56;
+ const int kLowMaxQp = 46;
+
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ codec_.simulcastStream[0].qpMax = kHighMaxQp;
+ codec_.mode = VideoCodecMode::kScreensharing;
+
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_EQ(3u, helper_->factory()->encoders().size());
+
+ // Just check the lowest stream, which is the one that where the adapter
+ // might alter the max qp setting.
+ VideoCodec ref_codec;
+ InitRefCodec(0, &ref_codec);
+ ref_codec.qpMax = kHighMaxQp;
+ ref_codec.SetVideoEncoderComplexity(
+ webrtc::VideoCodecComplexity::kComplexityHigher);
+ ref_codec.VP8()->denoisingOn = false;
+ ref_codec.startBitrate = 100; // Should equal to the target bitrate.
+ VerifyCodec(ref_codec, 0);
+
+ // Change the max qp and try again.
+ codec_.simulcastStream[0].qpMax = kLowMaxQp;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_EQ(3u, helper_->factory()->encoders().size());
+ ref_codec.qpMax = kLowMaxQp;
+ VerifyCodec(ref_codec, 0);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ DoesNotAlterMaxQpForScreenshareReversedLayer) {
+ const int kHighMaxQp = 56;
+ const int kLowMaxQp = 46;
+
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8, true /* reverse_layer_order */);
+ codec_.numberOfSimulcastStreams = 3;
+ codec_.simulcastStream[2].qpMax = kHighMaxQp;
+ codec_.mode = VideoCodecMode::kScreensharing;
+
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_EQ(3u, helper_->factory()->encoders().size());
+
+ // Just check the lowest stream, which is the one that where the adapter
+ // might alter the max qp setting.
+ VideoCodec ref_codec;
+ InitRefCodec(2, &ref_codec, true /* reverse_layer_order */);
+ ref_codec.qpMax = kHighMaxQp;
+ ref_codec.SetVideoEncoderComplexity(
+ webrtc::VideoCodecComplexity::kComplexityHigher);
+ ref_codec.VP8()->denoisingOn = false;
+ ref_codec.startBitrate = 100; // Should equal to the target bitrate.
+ VerifyCodec(ref_codec, 2);
+
+ // Change the max qp and try again.
+ codec_.simulcastStream[2].qpMax = kLowMaxQp;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_EQ(3u, helper_->factory()->encoders().size());
+ ref_codec.qpMax = kLowMaxQp;
+ VerifyCodec(ref_codec, 2);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, ActivatesCorrectStreamsInInitEncode) {
+ // Set up common settings for three streams.
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+ adapter_->RegisterEncodeCompleteCallback(this);
+
+ // Only enough start bitrate for the lowest stream.
+ ASSERT_EQ(3u, codec_.numberOfSimulcastStreams);
+ codec_.startBitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].minBitrate - 1;
+
+ // Input data.
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(100)
+ .set_timestamp_ms(1000)
+ .set_rotation(kVideoRotation_180)
+ .build();
+
+ // Encode with three streams.
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ std::vector<MockVideoEncoder*> original_encoders =
+ helper_->factory()->encoders();
+ ASSERT_EQ(3u, original_encoders.size());
+ // Only first encoder will be active and called.
+ EXPECT_CALL(*original_encoders[0], Encode(_, _))
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*original_encoders[1], Encode(_, _)).Times(0);
+ EXPECT_CALL(*original_encoders[2], Encode(_, _)).Times(0);
+
+ std::vector<VideoFrameType> frame_types;
+ frame_types.resize(3, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, TrustedRateControl) {
+ // Set up common settings for three streams.
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+ adapter_->RegisterEncodeCompleteCallback(this);
+
+ // Only enough start bitrate for the lowest stream.
+ ASSERT_EQ(3u, codec_.numberOfSimulcastStreams);
+ codec_.startBitrate = codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].minBitrate - 1;
+
+ // Input data.
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(100)
+ .set_timestamp_ms(1000)
+ .set_rotation(kVideoRotation_180)
+ .build();
+
+ // No encoder trusted, so simulcast adapter should not be either.
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_FALSE(adapter_->GetEncoderInfo().has_trusted_rate_controller);
+
+ // Encode with three streams.
+ std::vector<MockVideoEncoder*> original_encoders =
+ helper_->factory()->encoders();
+
+ // All encoders are trusted, so simulcast adapter should be too.
+ original_encoders[0]->set_has_trusted_rate_controller(true);
+ original_encoders[1]->set_has_trusted_rate_controller(true);
+ original_encoders[2]->set_has_trusted_rate_controller(true);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(adapter_->GetEncoderInfo().has_trusted_rate_controller);
+
+ // One encoder not trusted, so simulcast adapter should not be either.
+ original_encoders[2]->set_has_trusted_rate_controller(false);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_FALSE(adapter_->GetEncoderInfo().has_trusted_rate_controller);
+
+ // No encoder trusted, so simulcast adapter should not be either.
+ original_encoders[0]->set_has_trusted_rate_controller(false);
+ original_encoders[1]->set_has_trusted_rate_controller(false);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_FALSE(adapter_->GetEncoderInfo().has_trusted_rate_controller);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, ReportsHardwareAccelerated) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ adapter_->RegisterEncodeCompleteCallback(this);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+
+ // None of the encoders uses HW support, so simulcast adapter reports false.
+ for (MockVideoEncoder* encoder : helper_->factory()->encoders()) {
+ encoder->set_is_hardware_accelerated(false);
+ }
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_FALSE(adapter_->GetEncoderInfo().is_hardware_accelerated);
+
+ // One encoder uses HW support, so simulcast adapter reports true.
+ helper_->factory()->encoders()[2]->set_is_hardware_accelerated(true);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(adapter_->GetEncoderInfo().is_hardware_accelerated);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ ReportsLeastCommonMultipleOfRequestedResolutionAlignments) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ helper_->factory()->set_requested_resolution_alignments({2, 4, 7});
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+
+ EXPECT_EQ(adapter_->GetEncoderInfo().requested_resolution_alignment, 28u);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ ReportsApplyAlignmentToSimulcastLayers) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+
+ // No encoder has apply_alignment_to_all_simulcast_layers, report false.
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+ for (MockVideoEncoder* encoder : helper_->factory()->encoders()) {
+ encoder->set_apply_alignment_to_all_simulcast_layers(false);
+ }
+ EXPECT_FALSE(
+ adapter_->GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+
+ // One encoder has apply_alignment_to_all_simulcast_layers, report true.
+ helper_->factory()
+ ->encoders()[1]
+ ->set_apply_alignment_to_all_simulcast_layers(true);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(
+ adapter_->GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+}
+
+TEST_F(
+ TestSimulcastEncoderAdapterFake,
+ EncoderInfoFromFieldTrialDoesNotOverrideExistingBitrateLimitsInSinglecast) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/"
+ "frame_size_pixels:123|456|789,"
+ "min_start_bitrate_bps:11000|22000|33000,"
+ "min_bitrate_bps:44000|55000|66000,"
+ "max_bitrate_bps:77000|88000|99000/");
+ SetUp();
+
+ std::vector<VideoEncoder::ResolutionBitrateLimits> bitrate_limits;
+ bitrate_limits.push_back(
+ VideoEncoder::ResolutionBitrateLimits(111, 11100, 44400, 77700));
+ bitrate_limits.push_back(
+ VideoEncoder::ResolutionBitrateLimits(444, 22200, 55500, 88700));
+ bitrate_limits.push_back(
+ VideoEncoder::ResolutionBitrateLimits(777, 33300, 66600, 99900));
+ SetUp();
+ helper_->factory()->set_resolution_bitrate_limits(bitrate_limits);
+
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 1;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+ EXPECT_EQ(adapter_->GetEncoderInfo().resolution_bitrate_limits,
+ bitrate_limits);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, EncoderInfoFromFieldTrial) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/"
+ "requested_resolution_alignment:8,"
+ "apply_alignment_to_all_simulcast_layers/");
+ SetUp();
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+
+ EXPECT_EQ(8u, adapter_->GetEncoderInfo().requested_resolution_alignment);
+ EXPECT_TRUE(
+ adapter_->GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+ EXPECT_TRUE(adapter_->GetEncoderInfo().resolution_bitrate_limits.empty());
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ EncoderInfoFromFieldTrialForSingleStream) {
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_,
+ "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/"
+ "requested_resolution_alignment:9,"
+ "frame_size_pixels:123|456|789,"
+ "min_start_bitrate_bps:11000|22000|33000,"
+ "min_bitrate_bps:44000|55000|66000,"
+ "max_bitrate_bps:77000|88000|99000/");
+ SetUp();
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 1;
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+
+ EXPECT_EQ(9u, adapter_->GetEncoderInfo().requested_resolution_alignment);
+ EXPECT_FALSE(
+ adapter_->GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+ EXPECT_THAT(
+ adapter_->GetEncoderInfo().resolution_bitrate_limits,
+ ::testing::ElementsAre(
+ VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000},
+ VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000},
+ VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000}));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, ReportsIsQpTrusted) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ adapter_->RegisterEncodeCompleteCallback(this);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+
+ // All encoders have internal source, simulcast adapter reports true.
+ for (MockVideoEncoder* encoder : helper_->factory()->encoders()) {
+ encoder->set_is_qp_trusted(true);
+ }
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(adapter_->GetEncoderInfo().is_qp_trusted.value_or(false));
+
+ // One encoder reports QP not trusted, simulcast adapter reports false.
+ helper_->factory()->encoders()[2]->set_is_qp_trusted(false);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_FALSE(adapter_->GetEncoderInfo().is_qp_trusted.value_or(true));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, ReportsFpsAllocation) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ adapter_->RegisterEncodeCompleteCallback(this);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+
+ // Combination of three different supported mode:
+ // Simulcast stream 0 has undefined fps behavior.
+ // Simulcast stream 1 has three temporal layers.
+ // Simulcast stream 2 has 1 temporal layer.
+ FramerateFractions expected_fps_allocation[kMaxSpatialLayers];
+ expected_fps_allocation[1].push_back(EncoderInfo::kMaxFramerateFraction / 4);
+ expected_fps_allocation[1].push_back(EncoderInfo::kMaxFramerateFraction / 2);
+ expected_fps_allocation[1].push_back(EncoderInfo::kMaxFramerateFraction);
+ expected_fps_allocation[2].push_back(EncoderInfo::kMaxFramerateFraction);
+
+ // All encoders have internal source, simulcast adapter reports true.
+ for (size_t i = 0; i < codec_.numberOfSimulcastStreams; ++i) {
+ MockVideoEncoder* encoder = helper_->factory()->encoders()[i];
+ encoder->set_fps_allocation(expected_fps_allocation[i]);
+ }
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_THAT(adapter_->GetEncoderInfo().fps_allocation,
+ ::testing::ElementsAreArray(expected_fps_allocation));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, SetRateDistributesBandwithAllocation) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ const DataRate target_bitrate =
+ DataRate::KilobitsPerSec(codec_.simulcastStream[0].targetBitrate +
+ codec_.simulcastStream[1].targetBitrate +
+ codec_.simulcastStream[2].minBitrate);
+ const DataRate bandwidth_allocation =
+ target_bitrate + DataRate::KilobitsPerSec(600);
+
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+
+ // Set bitrates so that we send all layers.
+ adapter_->SetRates(VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(target_bitrate.bps(), 30)),
+ 30.0, bandwidth_allocation));
+
+ std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
+
+ ASSERT_EQ(3u, encoders.size());
+
+ for (size_t i = 0; i < 3; ++i) {
+ const uint32_t layer_bitrate_bps =
+ (i < static_cast<size_t>(codec_.numberOfSimulcastStreams) - 1
+ ? codec_.simulcastStream[i].targetBitrate
+ : codec_.simulcastStream[i].minBitrate) *
+ 1000;
+ EXPECT_EQ(layer_bitrate_bps,
+ encoders[i]->last_set_rates().bitrate.get_sum_bps())
+ << i;
+ EXPECT_EQ(
+ (layer_bitrate_bps * bandwidth_allocation.bps()) / target_bitrate.bps(),
+ encoders[i]->last_set_rates().bandwidth_allocation.bps())
+ << i;
+ }
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, CanSetZeroBitrateWithHeadroom) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+
+ rate_allocator_.reset(new SimulcastRateAllocator(codec_));
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->RegisterEncodeCompleteCallback(this);
+
+ // Set allocated bitrate to 0, but keep (network) bandwidth allocation.
+ VideoEncoder::RateControlParameters rate_params;
+ rate_params.framerate_fps = 30;
+ rate_params.bandwidth_allocation = DataRate::KilobitsPerSec(600);
+
+ adapter_->SetRates(rate_params);
+
+ std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
+
+ ASSERT_EQ(3u, encoders.size());
+ for (size_t i = 0; i < 3; ++i) {
+ EXPECT_EQ(0u, encoders[i]->last_set_rates().bitrate.get_sum_bps());
+ }
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, SupportsSimulcast) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+
+ // Indicate that mock encoders internally support simulcast.
+ helper_->factory()->set_supports_simulcast(true);
+ adapter_->RegisterEncodeCompleteCallback(this);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+
+ // Only one encoder should have been produced.
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(100)
+ .set_timestamp_ms(1000)
+ .set_rotation(kVideoRotation_180)
+ .build();
+ EXPECT_CALL(*helper_->factory()->encoders()[0], Encode)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ std::vector<VideoFrameType> frame_types(3, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, PassesSdpVideoFormatToEncoder) {
+ sdp_video_parameters_ = {{"test_param", "test_value"}};
+ SetUp();
+ SetupCodec();
+ std::vector<MockVideoEncoder*> encoders = helper_->factory()->encoders();
+ ASSERT_GT(encoders.size(), 0u);
+ EXPECT_EQ(encoders[0]->video_format(),
+ SdpVideoFormat("VP8", sdp_video_parameters_));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, SupportsFallback) {
+ // Enable support for fallback encoder factory and re-setup.
+ use_fallback_factory_ = true;
+ SetUp();
+
+ SetupCodec();
+
+ // Make sure we have bitrate for all layers.
+ DataRate max_bitrate = DataRate::Zero();
+ for (int i = 0; i < 3; ++i) {
+ max_bitrate +=
+ DataRate::KilobitsPerSec(codec_.simulcastStream[i].maxBitrate);
+ }
+ const auto rate_settings = VideoEncoder::RateControlParameters(
+ rate_allocator_->Allocate(
+ VideoBitrateAllocationParameters(max_bitrate.bps(), 30)),
+ 30.0, max_bitrate);
+ adapter_->SetRates(rate_settings);
+
+ std::vector<MockVideoEncoder*> primary_encoders =
+ helper_->factory()->encoders();
+ std::vector<MockVideoEncoder*> fallback_encoders =
+ helper_->fallback_factory()->encoders();
+
+ ASSERT_EQ(3u, primary_encoders.size());
+ ASSERT_EQ(3u, fallback_encoders.size());
+
+ // Create frame to test with.
+ rtc::scoped_refptr<VideoFrameBuffer> buffer(I420Buffer::Create(1280, 720));
+ VideoFrame input_frame = VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp(100)
+ .set_timestamp_ms(1000)
+ .set_rotation(kVideoRotation_180)
+ .build();
+ std::vector<VideoFrameType> frame_types(3, VideoFrameType::kVideoFrameKey);
+
+ // All primary encoders used.
+ for (auto codec : primary_encoders) {
+ EXPECT_CALL(*codec, Encode).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ }
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+
+ // Trigger fallback on first encoder.
+ primary_encoders[0]->set_init_encode_return_value(
+ WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->SetRates(rate_settings);
+ EXPECT_CALL(*fallback_encoders[0], Encode)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*primary_encoders[1], Encode)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*primary_encoders[2], Encode)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+
+ // Trigger fallback on all encoder.
+ primary_encoders[1]->set_init_encode_return_value(
+ WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
+ primary_encoders[2]->set_init_encode_return_value(
+ WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->SetRates(rate_settings);
+ EXPECT_CALL(*fallback_encoders[0], Encode)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*fallback_encoders[1], Encode)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_CALL(*fallback_encoders[2], Encode)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+
+ // Return to primary encoders on all streams.
+ for (int i = 0; i < 3; ++i) {
+ primary_encoders[i]->set_init_encode_return_value(WEBRTC_VIDEO_CODEC_OK);
+ }
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ adapter_->SetRates(rate_settings);
+ for (auto codec : primary_encoders) {
+ EXPECT_CALL(*codec, Encode).WillOnce(Return(WEBRTC_VIDEO_CODEC_OK));
+ }
+ EXPECT_EQ(0, adapter_->Encode(input_frame, &frame_types));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, SupportsPerSimulcastLayerMaxFramerate) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ codec_.simulcastStream[0].maxFramerate = 60;
+ codec_.simulcastStream[1].maxFramerate = 30;
+ codec_.simulcastStream[2].maxFramerate = 10;
+
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+ EXPECT_EQ(60u, helper_->factory()->encoders()[0]->codec().maxFramerate);
+ EXPECT_EQ(30u, helper_->factory()->encoders()[1]->codec().maxFramerate);
+ EXPECT_EQ(10u, helper_->factory()->encoders()[2]->codec().maxFramerate);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, CreatesEncoderOnlyIfStreamIsActive) {
+ // Legacy singlecast
+ SetupCodec(/*active_streams=*/{});
+ EXPECT_EQ(1u, helper_->factory()->encoders().size());
+
+ // Simulcast-capable underlaying encoder
+ ReSetUp();
+ helper_->factory()->set_supports_simulcast(true);
+ SetupCodec(/*active_streams=*/{true, true});
+ EXPECT_EQ(1u, helper_->factory()->encoders().size());
+
+ // Muti-encoder simulcast
+ ReSetUp();
+ helper_->factory()->set_supports_simulcast(false);
+ SetupCodec(/*active_streams=*/{true, true});
+ EXPECT_EQ(2u, helper_->factory()->encoders().size());
+
+ // Singlecast via layers deactivation. Lowest layer is active.
+ ReSetUp();
+ helper_->factory()->set_supports_simulcast(false);
+ SetupCodec(/*active_streams=*/{true, false});
+ EXPECT_EQ(1u, helper_->factory()->encoders().size());
+
+ // Singlecast via layers deactivation. Highest layer is active.
+ ReSetUp();
+ helper_->factory()->set_supports_simulcast(false);
+ SetupCodec(/*active_streams=*/{false, true});
+ EXPECT_EQ(1u, helper_->factory()->encoders().size());
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ RecreateEncoderIfPreferTemporalSupportIsEnabled) {
+ // Normally SEA reuses encoders. But, when TL-based SW fallback is enabled,
+ // the encoder which served the lowest stream should be recreated before it
+ // can be used to process an upper layer and vice-versa.
+ test::ScopedKeyValueConfig field_trials(
+ field_trials_, "WebRTC-Video-PreferTemporalSupportOnBaseLayer/Enabled/");
+ use_fallback_factory_ = true;
+ ReSetUp();
+
+ // Legacy singlecast
+ SetupCodec(/*active_streams=*/{});
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+
+ // Singlecast, the lowest stream is active. Encoder should be reused.
+ MockVideoEncoder* prev_encoder = helper_->factory()->encoders()[0];
+ SetupCodec(/*active_streams=*/{true, false});
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+ EXPECT_EQ(helper_->factory()->encoders()[0], prev_encoder);
+
+ // Singlecast, an upper stream is active. Encoder should be recreated.
+ EXPECT_CALL(*prev_encoder, Release()).Times(1);
+ SetupCodec(/*active_streams=*/{false, true});
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+ EXPECT_NE(helper_->factory()->encoders()[0], prev_encoder);
+
+ // Singlecast, the lowest stream is active. Encoder should be recreated.
+ prev_encoder = helper_->factory()->encoders()[0];
+ EXPECT_CALL(*prev_encoder, Release()).Times(1);
+ SetupCodec(/*active_streams=*/{true, false});
+ ASSERT_EQ(1u, helper_->factory()->encoders().size());
+ EXPECT_NE(helper_->factory()->encoders()[0], prev_encoder);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ UseFallbackEncoderIfCreatePrimaryEncoderFailed) {
+ // Enable support for fallback encoder factory and re-setup.
+ use_fallback_factory_ = true;
+ SetUp();
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 1;
+ helper_->factory()->SetEncoderNames({"primary"});
+ helper_->fallback_factory()->SetEncoderNames({"fallback"});
+
+ // Emulate failure at creating of primary encoder and verify that SEA switches
+ // to fallback encoder.
+ helper_->factory()->set_create_video_encode_return_nullptr(true);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(0u, helper_->factory()->encoders().size());
+ ASSERT_EQ(1u, helper_->fallback_factory()->encoders().size());
+ EXPECT_EQ("fallback", adapter_->GetEncoderInfo().implementation_name);
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake,
+ InitEncodeReturnsErrorIfEncoderCannotBeCreated) {
+ // Enable support for fallback encoder factory and re-setup.
+ use_fallback_factory_ = true;
+ SetUp();
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 1;
+ helper_->factory()->SetEncoderNames({"primary"});
+ helper_->fallback_factory()->SetEncoderNames({"fallback"});
+
+ // Emulate failure at creating of primary and fallback encoders and verify
+ // that `InitEncode` returns an error.
+ helper_->factory()->set_create_video_encode_return_nullptr(true);
+ helper_->fallback_factory()->set_create_video_encode_return_nullptr(true);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_MEMORY,
+ adapter_->InitEncode(&codec_, kSettings));
+}
+
+TEST_F(TestSimulcastEncoderAdapterFake, PopulatesScalabilityModeOfSubcodecs) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+ codec_.simulcastStream[0].numberOfTemporalLayers = 1;
+ codec_.simulcastStream[1].numberOfTemporalLayers = 2;
+ codec_.simulcastStream[2].numberOfTemporalLayers = 3;
+
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+ EXPECT_EQ(helper_->factory()->encoders()[0]->codec().GetScalabilityMode(),
+ ScalabilityMode::kL1T1);
+ EXPECT_EQ(helper_->factory()->encoders()[1]->codec().GetScalabilityMode(),
+ ScalabilityMode::kL1T2);
+ EXPECT_EQ(helper_->factory()->encoders()[2]->codec().GetScalabilityMode(),
+ ScalabilityMode::kL1T3);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/webrtc_media_engine.cc b/third_party/libwebrtc/media/engine/webrtc_media_engine.cc
new file mode 100644
index 0000000000..99d7dd2704
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_media_engine.cc
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/webrtc_media_engine.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
+#include "api/transport/field_trial_based_config.h"
+#include "media/base/media_constants.h"
+#include "media/engine/webrtc_voice_engine.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+#ifdef HAVE_WEBRTC_VIDEO
+#include "media/engine/webrtc_video_engine.h"
+#else
+#include "media/engine/null_webrtc_video_engine.h"
+#endif
+
+namespace cricket {
+
+std::unique_ptr<MediaEngineInterface> CreateMediaEngine(
+ MediaEngineDependencies dependencies) {
+ // TODO(sprang): Make populating `dependencies.trials` mandatory and remove
+ // these fallbacks.
+ std::unique_ptr<webrtc::FieldTrialsView> fallback_trials(
+ dependencies.trials ? nullptr : new webrtc::FieldTrialBasedConfig());
+ const webrtc::FieldTrialsView& trials =
+ dependencies.trials ? *dependencies.trials : *fallback_trials;
+ auto audio_engine = std::make_unique<WebRtcVoiceEngine>(
+ dependencies.task_queue_factory, dependencies.adm.get(),
+ std::move(dependencies.audio_encoder_factory),
+ std::move(dependencies.audio_decoder_factory),
+ std::move(dependencies.audio_mixer),
+ std::move(dependencies.audio_processing),
+ dependencies.audio_frame_processor,
+ std::move(dependencies.owned_audio_frame_processor), trials);
+#ifdef HAVE_WEBRTC_VIDEO
+ auto video_engine = std::make_unique<WebRtcVideoEngine>(
+ std::move(dependencies.video_encoder_factory),
+ std::move(dependencies.video_decoder_factory), trials);
+#else
+ auto video_engine = std::make_unique<NullWebRtcVideoEngine>();
+#endif
+ return std::make_unique<CompositeMediaEngine>(std::move(fallback_trials),
+ std::move(audio_engine),
+ std::move(video_engine));
+}
+
+namespace {
+// Remove mutually exclusive extensions with lower priority.
+void DiscardRedundantExtensions(
+ std::vector<webrtc::RtpExtension>* extensions,
+ rtc::ArrayView<const char* const> extensions_decreasing_prio) {
+ RTC_DCHECK(extensions);
+ bool found = false;
+ for (const char* uri : extensions_decreasing_prio) {
+ auto it = absl::c_find_if(
+ *extensions,
+ [uri](const webrtc::RtpExtension& rhs) { return rhs.uri == uri; });
+ if (it != extensions->end()) {
+ if (found) {
+ extensions->erase(it);
+ }
+ found = true;
+ }
+ }
+}
+} // namespace
+
+bool ValidateRtpExtensions(
+ rtc::ArrayView<const webrtc::RtpExtension> extensions,
+ rtc::ArrayView<const webrtc::RtpExtension> old_extensions) {
+ bool id_used[1 + webrtc::RtpExtension::kMaxId] = {false};
+ for (const auto& extension : extensions) {
+ if (extension.id < webrtc::RtpExtension::kMinId ||
+ extension.id > webrtc::RtpExtension::kMaxId) {
+ RTC_LOG(LS_ERROR) << "Bad RTP extension ID: " << extension.ToString();
+ return false;
+ }
+ if (id_used[extension.id]) {
+ RTC_LOG(LS_ERROR) << "Duplicate RTP extension ID: "
+ << extension.ToString();
+ return false;
+ }
+ id_used[extension.id] = true;
+ }
+ // Validate the extension list against the already negotiated extensions.
+ // Re-registering is OK, re-mapping (either same URL at new ID or same
+ // ID used with new URL) is an illegal remap.
+
+ // This is required in order to avoid a crash when registering an
+ // extension. A better structure would use the registered extensions
+ // in the RTPSender. This requires spinning through:
+ //
+ // WebRtcVoiceMediaChannel::::WebRtcAudioSendStream::stream_ (pointer)
+ // AudioSendStream::rtp_rtcp_module_ (pointer)
+ // ModuleRtpRtcpImpl2::rtp_sender_ (pointer)
+ // RtpSenderContext::packet_generator (struct member)
+ // RTPSender::rtp_header_extension_map_ (class member)
+ //
+ // Getting at this seems like a hard slog.
+ if (!old_extensions.empty()) {
+ absl::string_view urimap[1 + webrtc::RtpExtension::kMaxId];
+ std::map<absl::string_view, int> idmap;
+ for (const auto& old_extension : old_extensions) {
+ urimap[old_extension.id] = old_extension.uri;
+ idmap[old_extension.uri] = old_extension.id;
+ }
+ for (const auto& extension : extensions) {
+ if (!urimap[extension.id].empty() &&
+ urimap[extension.id] != extension.uri) {
+ RTC_LOG(LS_ERROR) << "Extension negotiation failure: " << extension.id
+ << " was mapped to " << urimap[extension.id]
+ << " but is proposed changed to " << extension.uri;
+ return false;
+ }
+ const auto& it = idmap.find(extension.uri);
+ if (it != idmap.end() && it->second != extension.id) {
+ RTC_LOG(LS_ERROR) << "Extension negotation failure: " << extension.uri
+ << " was identified by " << it->second
+ << " but is proposed changed to " << extension.id;
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+std::vector<webrtc::RtpExtension> FilterRtpExtensions(
+ const std::vector<webrtc::RtpExtension>& extensions,
+ bool (*supported)(absl::string_view),
+ bool filter_redundant_extensions,
+ const webrtc::FieldTrialsView& trials) {
+ // Don't check against old parameters; this should have been done earlier.
+ RTC_DCHECK(ValidateRtpExtensions(extensions, {}));
+ RTC_DCHECK(supported);
+ std::vector<webrtc::RtpExtension> result;
+
+ // Ignore any extensions that we don't recognize.
+ for (const auto& extension : extensions) {
+ if (supported(extension.uri)) {
+ result.push_back(extension);
+ } else {
+ RTC_LOG(LS_WARNING) << "Unsupported RTP extension: "
+ << extension.ToString();
+ }
+ }
+
+ // Sort by name, ascending (prioritise encryption), so that we don't reset
+ // extensions if they were specified in a different order (also allows us
+ // to use std::unique below).
+ absl::c_sort(result, [](const webrtc::RtpExtension& rhs,
+ const webrtc::RtpExtension& lhs) {
+ return rhs.encrypt == lhs.encrypt ? rhs.uri < lhs.uri
+ : rhs.encrypt > lhs.encrypt;
+ });
+
+ // Remove unnecessary extensions (used on send side).
+ if (filter_redundant_extensions) {
+ auto it = std::unique(
+ result.begin(), result.end(),
+ [](const webrtc::RtpExtension& rhs, const webrtc::RtpExtension& lhs) {
+ return rhs.uri == lhs.uri && rhs.encrypt == lhs.encrypt;
+ });
+ result.erase(it, result.end());
+
+ // Keep just the highest priority extension of any in the following lists.
+ if (absl::StartsWith(trials.Lookup("WebRTC-FilterAbsSendTimeExtension"),
+ "Enabled")) {
+ static const char* const kBweExtensionPriorities[] = {
+ webrtc::RtpExtension::kTransportSequenceNumberUri,
+ webrtc::RtpExtension::kAbsSendTimeUri,
+ webrtc::RtpExtension::kTimestampOffsetUri};
+ DiscardRedundantExtensions(&result, kBweExtensionPriorities);
+ } else {
+ static const char* const kBweExtensionPriorities[] = {
+ webrtc::RtpExtension::kAbsSendTimeUri,
+ webrtc::RtpExtension::kTimestampOffsetUri};
+ DiscardRedundantExtensions(&result, kBweExtensionPriorities);
+ }
+ }
+ return result;
+}
+
+webrtc::BitrateConstraints GetBitrateConfigForCodec(const Codec& codec) {
+ webrtc::BitrateConstraints config;
+ int bitrate_kbps = 0;
+ if (codec.GetParam(kCodecParamMinBitrate, &bitrate_kbps) &&
+ bitrate_kbps > 0) {
+ config.min_bitrate_bps = bitrate_kbps * 1000;
+ } else {
+ config.min_bitrate_bps = 0;
+ }
+ if (codec.GetParam(kCodecParamStartBitrate, &bitrate_kbps) &&
+ bitrate_kbps > 0) {
+ config.start_bitrate_bps = bitrate_kbps * 1000;
+ } else {
+ // Do not reconfigure start bitrate unless it's specified and positive.
+ config.start_bitrate_bps = -1;
+ }
+ if (codec.GetParam(kCodecParamMaxBitrate, &bitrate_kbps) &&
+ bitrate_kbps > 0) {
+ config.max_bitrate_bps = bitrate_kbps * 1000;
+ } else {
+ config.max_bitrate_bps = -1;
+ }
+ return config;
+}
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/webrtc_media_engine.h b/third_party/libwebrtc/media/engine/webrtc_media_engine.h
new file mode 100644
index 0000000000..0f6dce35b5
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_media_engine.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_
+#define MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_
+
+#include <memory>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/array_view.h"
+#include "api/audio/audio_frame_processor.h"
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/field_trials_view.h"
+#include "api/rtp_parameters.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "media/base/codec.h"
+#include "media/base/media_engine.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace cricket {
+
+struct MediaEngineDependencies {
+ MediaEngineDependencies() = default;
+ MediaEngineDependencies(const MediaEngineDependencies&) = delete;
+ MediaEngineDependencies(MediaEngineDependencies&&) = default;
+ MediaEngineDependencies& operator=(const MediaEngineDependencies&) = delete;
+ MediaEngineDependencies& operator=(MediaEngineDependencies&&) = default;
+ ~MediaEngineDependencies() = default;
+
+ webrtc::TaskQueueFactory* task_queue_factory = nullptr;
+ rtc::scoped_refptr<webrtc::AudioDeviceModule> adm;
+ rtc::scoped_refptr<webrtc::AudioEncoderFactory> audio_encoder_factory;
+ rtc::scoped_refptr<webrtc::AudioDecoderFactory> audio_decoder_factory;
+ rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer;
+ rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing;
+ // TODO(bugs.webrtc.org/15111):
+ // Remove the raw AudioFrameProcessor pointer in the follow-up.
+ webrtc::AudioFrameProcessor* audio_frame_processor = nullptr;
+ std::unique_ptr<webrtc::AudioFrameProcessor> owned_audio_frame_processor;
+
+ std::unique_ptr<webrtc::VideoEncoderFactory> video_encoder_factory;
+ std::unique_ptr<webrtc::VideoDecoderFactory> video_decoder_factory;
+
+ const webrtc::FieldTrialsView* trials = nullptr;
+};
+
+// CreateMediaEngine may be called on any thread, though the engine is
+// only expected to be used on one thread, internally called the "worker
+// thread". This is the thread Init must be called on.
+RTC_EXPORT std::unique_ptr<MediaEngineInterface> CreateMediaEngine(
+ MediaEngineDependencies dependencies);
+
+// Verify that extension IDs are within 1-byte extension range and are not
+// overlapping, and that they form a legal change from previously registerd
+// extensions (if any).
+bool ValidateRtpExtensions(
+ rtc::ArrayView<const webrtc::RtpExtension> extennsions,
+ rtc::ArrayView<const webrtc::RtpExtension> old_extensions);
+
+// Discard any extensions not validated by the 'supported' predicate. Duplicate
+// extensions are removed if 'filter_redundant_extensions' is set, and also any
+// mutually exclusive extensions (see implementation for details) are removed.
+std::vector<webrtc::RtpExtension> FilterRtpExtensions(
+ const std::vector<webrtc::RtpExtension>& extensions,
+ bool (*supported)(absl::string_view),
+ bool filter_redundant_extensions,
+ const webrtc::FieldTrialsView& trials);
+
+webrtc::BitrateConstraints GetBitrateConfigForCodec(const Codec& codec);
+
+} // namespace cricket
+
+#endif // MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_H_
diff --git a/third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.cc b/third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.cc
new file mode 100644
index 0000000000..1660873e8b
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "media/engine/webrtc_media_engine_defaults.h"
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video_codecs/builtin_video_decoder_factory.h"
+#include "api/video_codecs/builtin_video_encoder_factory.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void SetMediaEngineDefaults(cricket::MediaEngineDependencies* deps) {
+ RTC_DCHECK(deps);
+ if (deps->task_queue_factory == nullptr) {
+ static TaskQueueFactory* const task_queue_factory =
+ CreateDefaultTaskQueueFactory().release();
+ deps->task_queue_factory = task_queue_factory;
+ }
+ if (deps->audio_encoder_factory == nullptr)
+ deps->audio_encoder_factory = CreateBuiltinAudioEncoderFactory();
+ if (deps->audio_decoder_factory == nullptr)
+ deps->audio_decoder_factory = CreateBuiltinAudioDecoderFactory();
+ if (deps->audio_processing == nullptr)
+ deps->audio_processing = AudioProcessingBuilder().Create();
+
+ if (deps->video_encoder_factory == nullptr)
+ deps->video_encoder_factory = CreateBuiltinVideoEncoderFactory();
+ if (deps->video_decoder_factory == nullptr)
+ deps->video_decoder_factory = CreateBuiltinVideoDecoderFactory();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.h b/third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.h
new file mode 100644
index 0000000000..16b1d462e3
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_media_engine_defaults.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_DEFAULTS_H_
+#define MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_DEFAULTS_H_
+
+#include "media/engine/webrtc_media_engine.h"
+#include "rtc_base/system/rtc_export.h"
+
+namespace webrtc {
+
+// Sets required but null dependencies with default factories.
+RTC_EXPORT void SetMediaEngineDefaults(cricket::MediaEngineDependencies* deps);
+
+} // namespace webrtc
+
+#endif // MEDIA_ENGINE_WEBRTC_MEDIA_ENGINE_DEFAULTS_H_
diff --git a/third_party/libwebrtc/media/engine/webrtc_media_engine_unittest.cc b/third_party/libwebrtc/media/engine/webrtc_media_engine_unittest.cc
new file mode 100644
index 0000000000..4615f03deb
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_media_engine_unittest.cc
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/webrtc_media_engine.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "media/engine/webrtc_media_engine_defaults.h"
+#include "test/gtest.h"
+#include "test/scoped_key_value_config.h"
+
+using webrtc::RtpExtension;
+
+namespace cricket {
+namespace {
+
+std::vector<RtpExtension> MakeUniqueExtensions() {
+ std::vector<RtpExtension> result;
+ char name[] = "a";
+ for (int i = 0; i < 7; ++i) {
+ result.push_back(RtpExtension(name, 1 + i));
+ name[0]++;
+ result.push_back(RtpExtension(name, 255 - i));
+ name[0]++;
+ }
+ return result;
+}
+
+std::vector<RtpExtension> MakeRedundantExtensions() {
+ std::vector<RtpExtension> result;
+ char name[] = "a";
+ for (int i = 0; i < 7; ++i) {
+ result.push_back(RtpExtension(name, 1 + i));
+ result.push_back(RtpExtension(name, 255 - i));
+ name[0]++;
+ }
+ return result;
+}
+
+bool SupportedExtensions1(absl::string_view name) {
+ return name == "c" || name == "i";
+}
+
+bool SupportedExtensions2(absl::string_view name) {
+ return name != "a" && name != "n";
+}
+
+bool IsSorted(const std::vector<webrtc::RtpExtension>& extensions) {
+ const std::string* last = nullptr;
+ for (const auto& extension : extensions) {
+ if (last && *last > extension.uri) {
+ return false;
+ }
+ last = &extension.uri;
+ }
+ return true;
+}
+} // namespace
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsEmptyList) {
+ std::vector<RtpExtension> extensions;
+ EXPECT_TRUE(ValidateRtpExtensions(extensions, {}));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsAllGood) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ EXPECT_TRUE(ValidateRtpExtensions(extensions, {}));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOutOfRangeId_Low) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpExtension("foo", 0));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions, {}));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOutOfRangeIdHigh) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpExtension("foo", 256));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions, {}));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOverlappingIdsStartOfSet) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpExtension("foo", 1));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions, {}));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOverlappingIdsEndOfSet) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpExtension("foo", 255));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions, {}));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsEmptyToEmpty) {
+ std::vector<RtpExtension> extensions;
+ EXPECT_TRUE(ValidateRtpExtensions(extensions, extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsNoChange) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ EXPECT_TRUE(ValidateRtpExtensions(extensions, extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsChangeIdNotUrl) {
+ std::vector<RtpExtension> old_extensions = MakeUniqueExtensions();
+ std::vector<RtpExtension> new_extensions = old_extensions;
+ std::swap(new_extensions[0].id, new_extensions[1].id);
+
+ EXPECT_FALSE(ValidateRtpExtensions(new_extensions, old_extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsChangeIdForUrl) {
+ std::vector<RtpExtension> old_extensions = MakeUniqueExtensions();
+ std::vector<RtpExtension> new_extensions = old_extensions;
+ // Change first extension to something not generated by MakeUniqueExtensions
+ new_extensions[0].id = 123;
+
+ EXPECT_FALSE(ValidateRtpExtensions(new_extensions, old_extensions));
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsEmptyList) {
+ std::vector<RtpExtension> extensions;
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions1, true, trials);
+ EXPECT_EQ(0u, filtered.size());
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsIncludeOnlySupported) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions1, false, trials);
+ EXPECT_EQ(2u, filtered.size());
+ EXPECT_EQ("c", filtered[0].uri);
+ EXPECT_EQ("i", filtered[1].uri);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsSortedByName1) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, false, trials);
+ EXPECT_EQ(12u, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsSortedByName2) {
+ std::vector<RtpExtension> extensions = MakeUniqueExtensions();
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(12u, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsDontRemoveRedundant) {
+ std::vector<RtpExtension> extensions = MakeRedundantExtensions();
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, false, trials);
+ EXPECT_EQ(12u, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+ EXPECT_EQ(filtered[0].uri, filtered[1].uri);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundant) {
+ std::vector<RtpExtension> extensions = MakeRedundantExtensions();
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(6u, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+ EXPECT_NE(filtered[0].uri, filtered[1].uri);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantEncrypted1) {
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(webrtc::RtpExtension("b", 1));
+ extensions.push_back(webrtc::RtpExtension("b", 2, true));
+ extensions.push_back(webrtc::RtpExtension("c", 3));
+ extensions.push_back(webrtc::RtpExtension("b", 4));
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(3u, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+ EXPECT_EQ(filtered[0].uri, filtered[1].uri);
+ EXPECT_NE(filtered[0].encrypt, filtered[1].encrypt);
+ EXPECT_NE(filtered[0].uri, filtered[2].uri);
+ EXPECT_NE(filtered[1].uri, filtered[2].uri);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantEncrypted2) {
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(webrtc::RtpExtension("b", 1, true));
+ extensions.push_back(webrtc::RtpExtension("b", 2));
+ extensions.push_back(webrtc::RtpExtension("c", 3));
+ extensions.push_back(webrtc::RtpExtension("b", 4));
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(3u, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+ EXPECT_EQ(filtered[0].uri, filtered[1].uri);
+ EXPECT_NE(filtered[0].encrypt, filtered[1].encrypt);
+ EXPECT_NE(filtered[0].uri, filtered[2].uri);
+ EXPECT_NE(filtered[1].uri, filtered[2].uri);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBwe1) {
+ webrtc::test::ScopedKeyValueConfig trials(
+ "WebRTC-FilterAbsSendTimeExtension/Enabled/");
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 3));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 9));
+ extensions.push_back(RtpExtension(RtpExtension::kAbsSendTimeUri, 6));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 1));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 14));
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(1u, filtered.size());
+ EXPECT_EQ(RtpExtension::kTransportSequenceNumberUri, filtered[0].uri);
+}
+
+TEST(WebRtcMediaEngineTest,
+ FilterRtpExtensionsRemoveRedundantBwe1KeepAbsSendTime) {
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 3));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 9));
+ extensions.push_back(RtpExtension(RtpExtension::kAbsSendTimeUri, 6));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 1));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 14));
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(2u, filtered.size());
+ EXPECT_EQ(RtpExtension::kTransportSequenceNumberUri, filtered[0].uri);
+ EXPECT_EQ(RtpExtension::kAbsSendTimeUri, filtered[1].uri);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBweEncrypted1) {
+ webrtc::test::ScopedKeyValueConfig trials(
+ "WebRTC-FilterAbsSendTimeExtension/Enabled/");
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 3));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 4, true));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 9));
+ extensions.push_back(RtpExtension(RtpExtension::kAbsSendTimeUri, 6));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 1));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 2, true));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 14));
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(2u, filtered.size());
+ EXPECT_EQ(RtpExtension::kTransportSequenceNumberUri, filtered[0].uri);
+ EXPECT_EQ(RtpExtension::kTransportSequenceNumberUri, filtered[1].uri);
+ EXPECT_NE(filtered[0].encrypt, filtered[1].encrypt);
+}
+
+TEST(WebRtcMediaEngineTest,
+ FilterRtpExtensionsRemoveRedundantBweEncrypted1KeepAbsSendTime) {
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 3));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 4, true));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 9));
+ extensions.push_back(RtpExtension(RtpExtension::kAbsSendTimeUri, 6));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 1));
+ extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, 2, true));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 14));
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(3u, filtered.size());
+ EXPECT_EQ(RtpExtension::kTransportSequenceNumberUri, filtered[0].uri);
+ EXPECT_EQ(RtpExtension::kTransportSequenceNumberUri, filtered[1].uri);
+ EXPECT_EQ(RtpExtension::kAbsSendTimeUri, filtered[2].uri);
+ EXPECT_NE(filtered[0].encrypt, filtered[1].encrypt);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBwe2) {
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 1));
+ extensions.push_back(RtpExtension(RtpExtension::kAbsSendTimeUri, 14));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 7));
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(1u, filtered.size());
+ EXPECT_EQ(RtpExtension::kAbsSendTimeUri, filtered[0].uri);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBwe3) {
+ std::vector<RtpExtension> extensions;
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 2));
+ extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 14));
+ webrtc::test::ScopedKeyValueConfig trials;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true, trials);
+ EXPECT_EQ(1u, filtered.size());
+ EXPECT_EQ(RtpExtension::kTimestampOffsetUri, filtered[0].uri);
+}
+
+TEST(WebRtcMediaEngineTest, Create) {
+ MediaEngineDependencies deps;
+ webrtc::SetMediaEngineDefaults(&deps);
+ webrtc::test::ScopedKeyValueConfig trials;
+ deps.trials = &trials;
+
+ std::unique_ptr<MediaEngineInterface> engine =
+ CreateMediaEngine(std::move(deps));
+
+ EXPECT_TRUE(engine);
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/webrtc_video_engine.cc b/third_party/libwebrtc/media/engine/webrtc_video_engine.cc
new file mode 100644
index 0000000000..8a9d6ff95c
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_video_engine.cc
@@ -0,0 +1,3943 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/webrtc_video_engine.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <initializer_list>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/functional/bind_front.h"
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/make_ref_counted.h"
+#include "api/media_stream_interface.h"
+#include "api/media_types.h"
+#include "api/priority.h"
+#include "api/rtc_error.h"
+#include "api/rtp_transceiver_direction.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/resolution.h"
+#include "api/video/video_codec_type.h"
+#include "api/video_codecs/scalability_mode.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "call/call.h"
+#include "call/packet_receiver.h"
+#include "call/receive_stream.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "common_video/frame_counts.h"
+#include "common_video/include/quality_limitation_reason.h"
+#include "media/base/codec.h"
+#include "media/base/media_channel.h"
+#include "media/base/media_constants.h"
+#include "media/base/rid_description.h"
+#include "media/base/rtp_utils.h"
+#include "media/engine/webrtc_media_engine.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/dscp.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/socket.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+
+namespace cricket {
+
+namespace {
+
+using ::webrtc::ParseRtpPayloadType;
+using ::webrtc::ParseRtpSsrc;
+
+constexpr int64_t kUnsignaledSsrcCooldownMs = rtc::kNumMillisecsPerSec / 2;
+
+// TODO(bugs.webrtc.org/13166): Remove AV1X when backwards compatibility is not
+// needed.
+constexpr char kAv1xCodecName[] = "AV1X";
+
+// This constant is really an on/off, lower-level configurable NACK history
+// duration hasn't been implemented.
+const int kNackHistoryMs = 1000;
+
+const int kDefaultRtcpReceiverReportSsrc = 1;
+
+// Minimum time interval for logging stats.
+const int64_t kStatsLogIntervalMs = 10000;
+
+const char* StreamTypeToString(
+ webrtc::VideoSendStream::StreamStats::StreamType type) {
+ switch (type) {
+ case webrtc::VideoSendStream::StreamStats::StreamType::kMedia:
+ return "kMedia";
+ case webrtc::VideoSendStream::StreamStats::StreamType::kRtx:
+ return "kRtx";
+ case webrtc::VideoSendStream::StreamStats::StreamType::kFlexfec:
+ return "kFlexfec";
+ }
+ return nullptr;
+}
+
+bool IsEnabled(const webrtc::FieldTrialsView& trials, absl::string_view name) {
+ return absl::StartsWith(trials.Lookup(name), "Enabled");
+}
+
+bool IsDisabled(const webrtc::FieldTrialsView& trials, absl::string_view name) {
+ return absl::StartsWith(trials.Lookup(name), "Disabled");
+}
+
+void AddDefaultFeedbackParams(VideoCodec* codec,
+ const webrtc::FieldTrialsView& trials) {
+ // Don't add any feedback params for RED and ULPFEC.
+ if (codec->name == kRedCodecName || codec->name == kUlpfecCodecName)
+ return;
+ codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamRemb, kParamValueEmpty));
+ codec->AddFeedbackParam(
+ FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
+ // Don't add any more feedback params for FLEXFEC.
+ if (codec->name == kFlexfecCodecName)
+ return;
+ codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamCcm, kRtcpFbCcmParamFir));
+ codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamNack, kParamValueEmpty));
+ codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamNack, kRtcpFbNackParamPli));
+ if (codec->name == kVp8CodecName &&
+ IsEnabled(trials, "WebRTC-RtcpLossNotification")) {
+ codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamLntf, kParamValueEmpty));
+ }
+}
+
+// Helper function to determine whether a codec should use the [35, 63] range.
+// Should be used when adding new codecs (or variants).
+bool IsCodecValidForLowerRange(const VideoCodec& codec) {
+ if (absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName) ||
+ absl::EqualsIgnoreCase(codec.name, kAv1CodecName) ||
+ absl::EqualsIgnoreCase(codec.name, kAv1xCodecName)) {
+ return true;
+ } else if (absl::EqualsIgnoreCase(codec.name, kH264CodecName)) {
+ std::string profile_level_id;
+ std::string packetization_mode;
+
+ if (codec.GetParam(kH264FmtpProfileLevelId, &profile_level_id)) {
+ if (absl::StartsWithIgnoreCase(profile_level_id, "4d00")) {
+ if (codec.GetParam(kH264FmtpPacketizationMode, &packetization_mode)) {
+ return packetization_mode == "0";
+ }
+ }
+ // H264 with YUV444.
+ return absl::StartsWithIgnoreCase(profile_level_id, "f400");
+ }
+ } else if (absl::EqualsIgnoreCase(codec.name, kVp9CodecName)) {
+ std::string profile_id;
+
+ if (codec.GetParam(kVP9ProfileId, &profile_id)) {
+ if (profile_id.compare("1") == 0 || profile_id.compare("3") == 0) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// This function will assign dynamic payload types (in the range [96, 127]
+// and then [35, 63]) to the input codecs, and also add ULPFEC, RED, FlexFEC,
+// and associated RTX codecs for recognized codecs (VP8, VP9, H264, and RED).
+// It will also add default feedback params to the codecs.
+// is_decoder_factory is needed to keep track of the implict assumption that any
+// H264 decoder also supports constrained base line profile.
+// Also, is_decoder_factory is used to decide whether FlexFEC video format
+// should be advertised as supported.
+// TODO(kron): Perhaps it is better to move the implicit knowledge to the place
+// where codecs are negotiated.
+template <class T>
+std::vector<VideoCodec> GetPayloadTypesAndDefaultCodecs(
+ const T* factory,
+ bool is_decoder_factory,
+ bool include_rtx,
+ const webrtc::FieldTrialsView& trials) {
+ if (!factory) {
+ return {};
+ }
+
+ std::vector<webrtc::SdpVideoFormat> supported_formats =
+ factory->GetSupportedFormats();
+ if (is_decoder_factory) {
+ AddH264ConstrainedBaselineProfileToSupportedFormats(&supported_formats);
+ }
+
+ if (supported_formats.empty())
+ return std::vector<VideoCodec>();
+
+ supported_formats.push_back(webrtc::SdpVideoFormat(kRedCodecName));
+ supported_formats.push_back(webrtc::SdpVideoFormat(kUlpfecCodecName));
+
+ // flexfec-03 is always supported as receive codec and as send codec
+ // only if WebRTC-FlexFEC-03-Advertised is enabled
+ if (is_decoder_factory || IsEnabled(trials, "WebRTC-FlexFEC-03-Advertised")) {
+ webrtc::SdpVideoFormat flexfec_format(kFlexfecCodecName);
+ // This value is currently arbitrarily set to 10 seconds. (The unit
+ // is microseconds.) This parameter MUST be present in the SDP, but
+ // we never use the actual value anywhere in our code however.
+ // TODO(brandtr): Consider honouring this value in the sender and receiver.
+ flexfec_format.parameters = {{kFlexfecFmtpRepairWindow, "10000000"}};
+ supported_formats.push_back(flexfec_format);
+ }
+
+ // Due to interoperability issues with old Chrome/WebRTC versions that
+ // ignore the [35, 63] range prefer the lower range for new codecs.
+ static const int kFirstDynamicPayloadTypeLowerRange = 35;
+ static const int kLastDynamicPayloadTypeLowerRange = 63;
+
+ static const int kFirstDynamicPayloadTypeUpperRange = 96;
+ static const int kLastDynamicPayloadTypeUpperRange = 127;
+ int payload_type_upper = kFirstDynamicPayloadTypeUpperRange;
+ int payload_type_lower = kFirstDynamicPayloadTypeLowerRange;
+
+ std::vector<VideoCodec> output_codecs;
+ for (const webrtc::SdpVideoFormat& format : supported_formats) {
+ VideoCodec codec = cricket::CreateVideoCodec(format);
+ bool isFecCodec = absl::EqualsIgnoreCase(codec.name, kUlpfecCodecName) ||
+ absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName);
+
+ // Check if we ran out of payload types.
+ if (payload_type_lower > kLastDynamicPayloadTypeLowerRange) {
+ // TODO(https://bugs.chromium.org/p/webrtc/issues/detail?id=12248):
+ // return an error.
+ RTC_LOG(LS_ERROR) << "Out of dynamic payload types [35,63] after "
+ "fallback from [96, 127], skipping the rest.";
+ RTC_DCHECK_EQ(payload_type_upper, kLastDynamicPayloadTypeUpperRange);
+ break;
+ }
+
+ // Lower range gets used for "new" codecs or when running out of payload
+ // types in the upper range.
+ if (IsCodecValidForLowerRange(codec) ||
+ payload_type_upper >= kLastDynamicPayloadTypeUpperRange) {
+ codec.id = payload_type_lower++;
+ } else {
+ codec.id = payload_type_upper++;
+ }
+ AddDefaultFeedbackParams(&codec, trials);
+ output_codecs.push_back(codec);
+
+ // Add associated RTX codec for non-FEC codecs.
+ if (include_rtx) {
+ if (!isFecCodec) {
+ // Check if we ran out of payload types.
+ if (payload_type_lower > kLastDynamicPayloadTypeLowerRange) {
+ // TODO(https://bugs.chromium.org/p/webrtc/issues/detail?id=12248):
+ // return an error.
+ RTC_LOG(LS_ERROR) << "Out of dynamic payload types [35,63] after "
+ "fallback from [96, 127], skipping the rest.";
+ RTC_DCHECK_EQ(payload_type_upper, kLastDynamicPayloadTypeUpperRange);
+ break;
+ }
+ if (IsCodecValidForLowerRange(codec) ||
+ payload_type_upper >= kLastDynamicPayloadTypeUpperRange) {
+ output_codecs.push_back(
+ cricket::CreateVideoRtxCodec(payload_type_lower++, codec.id));
+ } else {
+ output_codecs.push_back(
+ cricket::CreateVideoRtxCodec(payload_type_upper++, codec.id));
+ }
+ }
+ }
+ }
+ return output_codecs;
+}
+
+static std::string CodecVectorToString(const std::vector<VideoCodec>& codecs) {
+ rtc::StringBuilder out;
+ out << "{";
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ out << codecs[i].ToString();
+ if (i != codecs.size() - 1) {
+ out << ", ";
+ }
+ }
+ out << "}";
+ return out.Release();
+}
+
+static bool ValidateCodecFormats(const std::vector<VideoCodec>& codecs) {
+ bool has_video = false;
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ if (!codecs[i].ValidateCodecFormat()) {
+ return false;
+ }
+ if (codecs[i].IsMediaCodec()) {
+ has_video = true;
+ }
+ }
+ if (!has_video) {
+ RTC_LOG(LS_ERROR) << "Setting codecs without a video codec is invalid: "
+ << CodecVectorToString(codecs);
+ return false;
+ }
+ return true;
+}
+
+static bool ValidateStreamParams(const StreamParams& sp) {
+ if (sp.ssrcs.empty()) {
+ RTC_LOG(LS_ERROR) << "No SSRCs in stream parameters: " << sp.ToString();
+ return false;
+ }
+
+ // Validate that a primary SSRC can only have one ssrc-group per semantics.
+ std::map<uint32_t, std::set<std::string>> primary_ssrc_to_semantics;
+ for (const auto& group : sp.ssrc_groups) {
+ auto result = primary_ssrc_to_semantics.try_emplace(
+ group.ssrcs[0], std::set<std::string>({group.semantics}));
+ if (!result.second) {
+ // A duplicate SSRC was found, check for duplicate semantics.
+ auto semantics_it = result.first->second.insert(group.semantics);
+ if (!semantics_it.second) {
+ RTC_LOG(LS_ERROR) << "Duplicate ssrc-group '" << group.semantics
+ << " for primary SSRC " << group.ssrcs[0] << " "
+ << sp.ToString();
+ return false;
+ }
+ }
+ }
+
+ std::vector<uint32_t> primary_ssrcs;
+ sp.GetPrimarySsrcs(&primary_ssrcs);
+ for (const auto& semantic :
+ {kFidSsrcGroupSemantics, kFecFrSsrcGroupSemantics}) {
+ if (!sp.has_ssrc_group(semantic)) {
+ continue;
+ }
+ std::vector<uint32_t> secondary_ssrcs;
+ sp.GetSecondarySsrcs(semantic, primary_ssrcs, &secondary_ssrcs);
+ for (uint32_t secondary_ssrc : secondary_ssrcs) {
+ bool secondary_ssrc_present = false;
+ for (uint32_t sp_ssrc : sp.ssrcs) {
+ if (sp_ssrc == secondary_ssrc) {
+ secondary_ssrc_present = true;
+ break;
+ }
+ }
+ if (!secondary_ssrc_present) {
+ RTC_LOG(LS_ERROR) << "SSRC '" << secondary_ssrc
+ << "' missing from StreamParams ssrcs with semantics "
+ << semantic << ": " << sp.ToString();
+ return false;
+ }
+ }
+ if (!secondary_ssrcs.empty() &&
+ primary_ssrcs.size() != secondary_ssrcs.size()) {
+ RTC_LOG(LS_ERROR)
+ << semantic
+ << " secondary SSRCs exist, but don't cover all SSRCs (unsupported): "
+ << sp.ToString();
+ return false;
+ }
+ }
+ for (const auto& group : sp.ssrc_groups) {
+ if (!(group.semantics == kFidSsrcGroupSemantics ||
+ group.semantics == kSimSsrcGroupSemantics ||
+ group.semantics == kFecFrSsrcGroupSemantics)) {
+ continue;
+ }
+ for (uint32_t group_ssrc : group.ssrcs) {
+ auto it = absl::c_find_if(sp.ssrcs, [&group_ssrc](uint32_t ssrc) {
+ return ssrc == group_ssrc;
+ });
+ if (it == sp.ssrcs.end()) {
+ RTC_LOG(LS_ERROR) << "SSRC '" << group_ssrc
+ << "' missing from StreamParams ssrcs with semantics "
+ << group.semantics << ": " << sp.ToString();
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Returns true if the given codec is disallowed from doing simulcast.
+bool IsCodecDisabledForSimulcast(bool legacy_scalability_mode,
+ webrtc::VideoCodecType codec_type) {
+ if (legacy_scalability_mode && (codec_type == webrtc::kVideoCodecVP9 ||
+ codec_type == webrtc::kVideoCodecAV1)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool IsLayerActive(const webrtc::RtpEncodingParameters& layer) {
+ return layer.active &&
+ (!layer.max_bitrate_bps || *layer.max_bitrate_bps > 0) &&
+ (!layer.max_framerate || *layer.max_framerate > 0);
+}
+
+int NumActiveStreams(const webrtc::RtpParameters& rtp_parameters) {
+ int res = 0;
+ for (size_t i = 0; i < rtp_parameters.encodings.size(); ++i) {
+ if (rtp_parameters.encodings[i].active) {
+ ++res;
+ }
+ }
+ return res;
+}
+
+absl::optional<int> NumSpatialLayersFromEncoding(
+ const webrtc::RtpParameters& rtp_parameters,
+ size_t idx) {
+ if (idx >= rtp_parameters.encodings.size())
+ return absl::nullopt;
+
+ absl::optional<webrtc::ScalabilityMode> scalability_mode =
+ webrtc::ScalabilityModeFromString(
+ rtp_parameters.encodings[idx].scalability_mode.value_or(""));
+ return scalability_mode
+ ? absl::optional<int>(
+ ScalabilityModeToNumSpatialLayers(*scalability_mode))
+ : absl::nullopt;
+}
+
+std::map<uint32_t, webrtc::VideoSendStream::StreamStats>
+MergeInfoAboutOutboundRtpSubstreams(
+ const std::map<uint32_t, webrtc::VideoSendStream::StreamStats>&
+ substreams) {
+ std::map<uint32_t, webrtc::VideoSendStream::StreamStats> rtp_substreams;
+ // Add substreams for all RTP media streams.
+ for (const auto& pair : substreams) {
+ uint32_t ssrc = pair.first;
+ const webrtc::VideoSendStream::StreamStats& substream = pair.second;
+ switch (substream.type) {
+ case webrtc::VideoSendStream::StreamStats::StreamType::kMedia:
+ break;
+ case webrtc::VideoSendStream::StreamStats::StreamType::kRtx:
+ case webrtc::VideoSendStream::StreamStats::StreamType::kFlexfec:
+ continue;
+ }
+ rtp_substreams.insert(std::make_pair(ssrc, substream));
+ }
+ // Complement the kMedia substream stats with the associated kRtx and kFlexfec
+ // substream stats.
+ for (const auto& pair : substreams) {
+ switch (pair.second.type) {
+ case webrtc::VideoSendStream::StreamStats::StreamType::kMedia:
+ continue;
+ case webrtc::VideoSendStream::StreamStats::StreamType::kRtx:
+ case webrtc::VideoSendStream::StreamStats::StreamType::kFlexfec:
+ break;
+ }
+ // The associated substream is an RTX or FlexFEC substream that is
+ // referencing an RTP media substream.
+ const webrtc::VideoSendStream::StreamStats& associated_substream =
+ pair.second;
+ RTC_DCHECK(associated_substream.referenced_media_ssrc.has_value());
+ uint32_t media_ssrc = associated_substream.referenced_media_ssrc.value();
+ if (substreams.find(media_ssrc) == substreams.end()) {
+ RTC_LOG(LS_WARNING) << "Substream [ssrc: " << pair.first << ", type: "
+ << StreamTypeToString(associated_substream.type)
+ << "] is associated with a media ssrc (" << media_ssrc
+ << ") that does not have StreamStats. Ignoring its "
+ << "RTP stats.";
+ continue;
+ }
+ webrtc::VideoSendStream::StreamStats& rtp_substream =
+ rtp_substreams[media_ssrc];
+
+ // We only merge `rtp_stats`. All other metrics are not applicable for RTX
+ // and FlexFEC.
+ // TODO(hbos): kRtx and kFlexfec stats should use a separate struct to make
+ // it clear what is or is not applicable.
+ rtp_substream.rtp_stats.Add(associated_substream.rtp_stats);
+ }
+ return rtp_substreams;
+}
+
+bool IsActiveFromEncodings(
+ absl::optional<uint32_t> ssrc,
+ const std::vector<webrtc::RtpEncodingParameters>& encodings) {
+ if (ssrc.has_value()) {
+ // Report the `active` value of a specific ssrc, or false if an encoding
+ // with this ssrc does not exist.
+ auto encoding_it = std::find_if(
+ encodings.begin(), encodings.end(),
+ [ssrc = ssrc.value()](const webrtc::RtpEncodingParameters& encoding) {
+ return encoding.ssrc.has_value() && encoding.ssrc.value() == ssrc;
+ });
+ return encoding_it != encodings.end() ? encoding_it->active : false;
+ }
+ // If `ssrc` is not specified then any encoding being active counts as active.
+ for (const auto& encoding : encodings) {
+ if (encoding.active) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool IsScalabilityModeSupportedByCodec(
+ const VideoCodec& codec,
+ const std::string& scalability_mode,
+ const webrtc::VideoSendStream::Config& config) {
+ return config.encoder_settings.encoder_factory
+ ->QueryCodecSupport(webrtc::SdpVideoFormat(codec.name, codec.params),
+ scalability_mode)
+ .is_supported;
+}
+
+// Fallback to default value if the scalability mode is unset or unsupported by
+// the codec.
+void FallbackToDefaultScalabilityModeIfNotSupported(
+ const VideoCodec& codec,
+ const webrtc::VideoSendStream::Config& config,
+ std::vector<webrtc::RtpEncodingParameters>& encodings) {
+ if (!absl::c_any_of(encodings,
+ [](const webrtc::RtpEncodingParameters& encoding) {
+ return encoding.scalability_mode &&
+ !encoding.scalability_mode->empty();
+ })) {
+ // Fallback is only enabled if the scalability mode is configured for any of
+ // the encodings for now.
+ return;
+ }
+ if (config.encoder_settings.encoder_factory == nullptr) {
+ return;
+ }
+ for (auto& encoding : encodings) {
+ RTC_LOG(LS_INFO) << "Encoding scalability_mode: "
+ << encoding.scalability_mode.value_or("-");
+ if (!encoding.active && !encoding.scalability_mode.has_value()) {
+ // Inactive encodings should not fallback since apps may only specify the
+ // scalability mode of the first encoding when the others are inactive.
+ continue;
+ }
+ if (!encoding.scalability_mode.has_value() ||
+ !IsScalabilityModeSupportedByCodec(codec, *encoding.scalability_mode,
+ config)) {
+ encoding.scalability_mode = webrtc::kDefaultScalabilityModeStr;
+ RTC_LOG(LS_INFO) << " -> " << *encoding.scalability_mode;
+ }
+ }
+}
+
+// Generate the list of codec parameters to pass down based on the negotiated
+// "codecs". Note that VideoCodecSettings correspond to concrete codecs like
+// VP8, VP9, H264 while VideoCodecs correspond also to "virtual" codecs like
+// RTX, ULPFEC, FLEXFEC.
+std::vector<VideoCodecSettings> MapCodecs(
+ const std::vector<VideoCodec>& codecs) {
+ if (codecs.empty()) {
+ return {};
+ }
+
+ std::vector<VideoCodecSettings> video_codecs;
+ std::map<int, Codec::ResiliencyType> payload_codec_type;
+ // `rtx_mapping` maps video payload type to rtx payload type.
+ std::map<int, int> rtx_mapping;
+ std::map<int, int> rtx_time_mapping;
+
+ webrtc::UlpfecConfig ulpfec_config;
+ absl::optional<int> flexfec_payload_type;
+
+ for (const VideoCodec& in_codec : codecs) {
+ const int payload_type = in_codec.id;
+
+ if (payload_codec_type.find(payload_type) != payload_codec_type.end()) {
+ RTC_LOG(LS_ERROR) << "Payload type already registered: "
+ << in_codec.ToString();
+ return {};
+ }
+ payload_codec_type[payload_type] = in_codec.GetResiliencyType();
+
+ switch (in_codec.GetResiliencyType()) {
+ case Codec::ResiliencyType::kRed: {
+ if (ulpfec_config.red_payload_type != -1) {
+ RTC_LOG(LS_ERROR)
+ << "Duplicate RED codec: ignoring PT=" << payload_type
+ << " in favor of PT=" << ulpfec_config.red_payload_type
+ << " which was specified first.";
+ break;
+ }
+ ulpfec_config.red_payload_type = payload_type;
+ break;
+ }
+
+ case Codec::ResiliencyType::kUlpfec: {
+ if (ulpfec_config.ulpfec_payload_type != -1) {
+ RTC_LOG(LS_ERROR)
+ << "Duplicate ULPFEC codec: ignoring PT=" << payload_type
+ << " in favor of PT=" << ulpfec_config.ulpfec_payload_type
+ << " which was specified first.";
+ break;
+ }
+ ulpfec_config.ulpfec_payload_type = payload_type;
+ break;
+ }
+
+ case Codec::ResiliencyType::kFlexfec: {
+ if (flexfec_payload_type) {
+ RTC_LOG(LS_ERROR)
+ << "Duplicate FLEXFEC codec: ignoring PT=" << payload_type
+ << " in favor of PT=" << *flexfec_payload_type
+ << " which was specified first.";
+ break;
+ }
+ flexfec_payload_type = payload_type;
+ break;
+ }
+
+ case Codec::ResiliencyType::kRtx: {
+ int associated_payload_type;
+ if (!in_codec.GetParam(kCodecParamAssociatedPayloadType,
+ &associated_payload_type) ||
+ !IsValidRtpPayloadType(associated_payload_type)) {
+ RTC_LOG(LS_ERROR)
+ << "RTX codec with invalid or no associated payload type: "
+ << in_codec.ToString();
+ return {};
+ }
+ int rtx_time;
+ if (in_codec.GetParam(kCodecParamRtxTime, &rtx_time) && rtx_time > 0) {
+ rtx_time_mapping[associated_payload_type] = rtx_time;
+ }
+ rtx_mapping[associated_payload_type] = payload_type;
+ break;
+ }
+
+ case Codec::ResiliencyType::kNone: {
+ video_codecs.emplace_back(in_codec);
+ break;
+ }
+ }
+ }
+
+ // One of these codecs should have been a video codec. Only having FEC
+ // parameters into this code is a logic error.
+ RTC_DCHECK(!video_codecs.empty());
+
+ for (const auto& entry : rtx_mapping) {
+ const int associated_payload_type = entry.first;
+ const int rtx_payload_type = entry.second;
+ auto it = payload_codec_type.find(associated_payload_type);
+ if (it == payload_codec_type.end()) {
+ RTC_LOG(LS_ERROR) << "RTX codec (PT=" << rtx_payload_type
+ << ") mapped to PT=" << associated_payload_type
+ << " which is not in the codec list.";
+ return {};
+ }
+ const Codec::ResiliencyType associated_codec_type = it->second;
+ if (associated_codec_type != Codec::ResiliencyType::kNone &&
+ associated_codec_type != Codec::ResiliencyType::kRed) {
+ RTC_LOG(LS_ERROR)
+ << "RTX PT=" << rtx_payload_type
+ << " not mapped to regular video codec or RED codec (PT="
+ << associated_payload_type << ").";
+ return {};
+ }
+
+ if (associated_payload_type == ulpfec_config.red_payload_type) {
+ ulpfec_config.red_rtx_payload_type = rtx_payload_type;
+ }
+ }
+
+ for (VideoCodecSettings& codec_settings : video_codecs) {
+ const int payload_type = codec_settings.codec.id;
+ codec_settings.ulpfec = ulpfec_config;
+ codec_settings.flexfec_payload_type = flexfec_payload_type.value_or(-1);
+ auto it = rtx_mapping.find(payload_type);
+ if (it != rtx_mapping.end()) {
+ const int rtx_payload_type = it->second;
+ codec_settings.rtx_payload_type = rtx_payload_type;
+
+ auto rtx_time_it = rtx_time_mapping.find(payload_type);
+ if (rtx_time_it != rtx_time_mapping.end()) {
+ const int rtx_time = rtx_time_it->second;
+ if (rtx_time < kNackHistoryMs) {
+ codec_settings.rtx_time = rtx_time;
+ } else {
+ codec_settings.rtx_time = kNackHistoryMs;
+ }
+ }
+ }
+ }
+
+ return video_codecs;
+}
+
+bool NonFlexfecReceiveCodecsHaveChanged(std::vector<VideoCodecSettings> before,
+ std::vector<VideoCodecSettings> after) {
+ // The receive codec order doesn't matter, so we sort the codecs before
+ // comparing. This is necessary because currently the
+ // only way to change the send codec is to munge SDP, which causes
+ // the receive codec list to change order, which causes the streams
+ // to be recreates which causes a "blink" of black video. In order
+ // to support munging the SDP in this way without recreating receive
+ // streams, we ignore the order of the received codecs so that
+ // changing the order doesn't cause this "blink".
+ auto comparison = [](const VideoCodecSettings& codec1,
+ const VideoCodecSettings& codec2) {
+ return codec1.codec.id > codec2.codec.id;
+ };
+ absl::c_sort(before, comparison);
+ absl::c_sort(after, comparison);
+
+ // Changes in FlexFEC payload type are handled separately in
+ // WebRtcVideoReceiveChannel::GetChangedReceiverParameters, so disregard
+ // FlexFEC in the comparison here.
+ return !absl::c_equal(before, after,
+ VideoCodecSettings::EqualsDisregardingFlexfec);
+}
+
+std::string CodecSettingsVectorToString(
+ const std::vector<VideoCodecSettings>& codecs) {
+ rtc::StringBuilder out;
+ out << "{";
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ out << codecs[i].codec.ToString();
+ if (i != codecs.size() - 1) {
+ out << ", ";
+ }
+ }
+ out << "}";
+ return out.Release();
+}
+
+void ExtractCodecInformation(
+ rtc::ArrayView<const VideoCodecSettings> recv_codecs,
+ std::map<int, int>& rtx_associated_payload_types,
+ std::set<int>& raw_payload_types,
+ std::vector<webrtc::VideoReceiveStreamInterface::Decoder>& decoders) {
+ RTC_DCHECK(!recv_codecs.empty());
+ RTC_DCHECK(rtx_associated_payload_types.empty());
+ RTC_DCHECK(raw_payload_types.empty());
+ RTC_DCHECK(decoders.empty());
+
+ for (const VideoCodecSettings& recv_codec : recv_codecs) {
+ decoders.emplace_back(
+ webrtc::SdpVideoFormat(recv_codec.codec.name, recv_codec.codec.params),
+ recv_codec.codec.id);
+ rtx_associated_payload_types.emplace(recv_codec.rtx_payload_type,
+ recv_codec.codec.id);
+ if (recv_codec.codec.packetization == kPacketizationParamRaw) {
+ raw_payload_types.insert(recv_codec.codec.id);
+ }
+ }
+}
+
+int ParseReceiveBufferSize(const webrtc::FieldTrialsView& trials) {
+ webrtc::FieldTrialParameter<int> size_bytes("size_bytes",
+ kVideoRtpRecvBufferSize);
+ webrtc::ParseFieldTrial({&size_bytes},
+ trials.Lookup("WebRTC-ReceiveBufferSize"));
+ if (size_bytes.Get() < 10'000 || size_bytes.Get() > 10'000'000) {
+ RTC_LOG(LS_WARNING) << "WebRTC-ReceiveBufferSize out of bounds: "
+ << size_bytes.Get();
+ return kVideoRtpRecvBufferSize;
+ }
+ return size_bytes.Get();
+}
+
+} // namespace
+// --------------- WebRtcVideoEngine ---------------------------
+
+WebRtcVideoEngine::WebRtcVideoEngine(
+ std::unique_ptr<webrtc::VideoEncoderFactory> video_encoder_factory,
+ std::unique_ptr<webrtc::VideoDecoderFactory> video_decoder_factory,
+ const webrtc::FieldTrialsView& trials)
+ : decoder_factory_(std::move(video_decoder_factory)),
+ encoder_factory_(std::move(video_encoder_factory)),
+ trials_(trials) {
+ RTC_DLOG(LS_INFO) << "WebRtcVideoEngine::WebRtcVideoEngine()";
+}
+
+WebRtcVideoEngine::~WebRtcVideoEngine() {
+ RTC_DLOG(LS_INFO) << "WebRtcVideoEngine::~WebRtcVideoEngine";
+}
+
+std::unique_ptr<VideoMediaSendChannelInterface>
+WebRtcVideoEngine::CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
+ return std::make_unique<WebRtcVideoSendChannel>(
+ call, config, options, crypto_options, encoder_factory_.get(),
+ decoder_factory_.get(), video_bitrate_allocator_factory);
+}
+std::unique_ptr<VideoMediaReceiveChannelInterface>
+WebRtcVideoEngine::CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options) {
+ return std::make_unique<WebRtcVideoReceiveChannel>(
+ call, config, options, crypto_options, decoder_factory_.get());
+}
+
+std::vector<VideoCodec> WebRtcVideoEngine::send_codecs(bool include_rtx) const {
+ return GetPayloadTypesAndDefaultCodecs(encoder_factory_.get(),
+ /*is_decoder_factory=*/false,
+ include_rtx, trials_);
+}
+
+std::vector<VideoCodec> WebRtcVideoEngine::recv_codecs(bool include_rtx) const {
+ return GetPayloadTypesAndDefaultCodecs(decoder_factory_.get(),
+ /*is_decoder_factory=*/true,
+ include_rtx, trials_);
+}
+
+std::vector<webrtc::RtpHeaderExtensionCapability>
+WebRtcVideoEngine::GetRtpHeaderExtensions() const {
+ std::vector<webrtc::RtpHeaderExtensionCapability> result;
+ int id = 1;
+ for (const auto& uri :
+ {webrtc::RtpExtension::kTimestampOffsetUri,
+ webrtc::RtpExtension::kAbsSendTimeUri,
+ webrtc::RtpExtension::kVideoRotationUri,
+ webrtc::RtpExtension::kTransportSequenceNumberUri,
+ webrtc::RtpExtension::kPlayoutDelayUri,
+ webrtc::RtpExtension::kVideoContentTypeUri,
+ webrtc::RtpExtension::kVideoTimingUri,
+ webrtc::RtpExtension::kColorSpaceUri, webrtc::RtpExtension::kMidUri,
+ webrtc::RtpExtension::kRidUri, webrtc::RtpExtension::kRepairedRidUri}) {
+ result.emplace_back(uri, id++, webrtc::RtpTransceiverDirection::kSendRecv);
+ }
+ for (const auto& uri : {webrtc::RtpExtension::kAbsoluteCaptureTimeUri}) {
+ result.emplace_back(uri, id++, webrtc::RtpTransceiverDirection::kStopped);
+ }
+ result.emplace_back(webrtc::RtpExtension::kGenericFrameDescriptorUri00, id++,
+ IsEnabled(trials_, "WebRTC-GenericDescriptorAdvertised")
+ ? webrtc::RtpTransceiverDirection::kSendRecv
+ : webrtc::RtpTransceiverDirection::kStopped);
+ result.emplace_back(
+ webrtc::RtpExtension::kDependencyDescriptorUri, id++,
+ IsEnabled(trials_, "WebRTC-DependencyDescriptorAdvertised")
+ ? webrtc::RtpTransceiverDirection::kSendRecv
+ : webrtc::RtpTransceiverDirection::kStopped);
+
+ result.emplace_back(
+ webrtc::RtpExtension::kVideoLayersAllocationUri, id++,
+ IsEnabled(trials_, "WebRTC-VideoLayersAllocationAdvertised")
+ ? webrtc::RtpTransceiverDirection::kSendRecv
+ : webrtc::RtpTransceiverDirection::kStopped);
+
+ // VideoFrameTrackingId is a test-only extension.
+ if (IsEnabled(trials_, "WebRTC-VideoFrameTrackingIdAdvertised")) {
+ result.emplace_back(webrtc::RtpExtension::kVideoFrameTrackingIdUri, id++,
+ webrtc::RtpTransceiverDirection::kSendRecv);
+ }
+ return result;
+}
+
+// Free function, exported for testing
+std::map<uint32_t, webrtc::VideoSendStream::StreamStats>
+MergeInfoAboutOutboundRtpSubstreamsForTesting(
+ const std::map<uint32_t, webrtc::VideoSendStream::StreamStats>&
+ substreams) {
+ return MergeInfoAboutOutboundRtpSubstreams(substreams);
+}
+
+// --------------- WebRtcVideoSendChannel ----------------------
+WebRtcVideoSendChannel::WebRtcVideoSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoEncoderFactory* encoder_factory,
+ webrtc::VideoDecoderFactory* decoder_factory,
+ webrtc::VideoBitrateAllocatorFactory* bitrate_allocator_factory)
+ : MediaChannelUtil(call->network_thread(), config.enable_dscp),
+ worker_thread_(call->worker_thread()),
+ sending_(false),
+ receiving_(false),
+ call_(call),
+ default_sink_(nullptr),
+ video_config_(config.video),
+ encoder_factory_(encoder_factory),
+ decoder_factory_(decoder_factory),
+ bitrate_allocator_factory_(bitrate_allocator_factory),
+ default_send_options_(options),
+ last_send_stats_log_ms_(-1),
+ last_receive_stats_log_ms_(-1),
+ discard_unknown_ssrc_packets_(
+ IsEnabled(call_->trials(),
+ "WebRTC-Video-DiscardPacketsWithUnknownSsrc")),
+ crypto_options_(crypto_options) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ rtcp_receiver_report_ssrc_ = kDefaultRtcpReceiverReportSsrc;
+ recv_codecs_ = MapCodecs(GetPayloadTypesAndDefaultCodecs(
+ decoder_factory_, /*is_decoder_factory=*/true,
+ /*include_rtx=*/true, call_->trials()));
+ recv_flexfec_payload_type_ =
+ recv_codecs_.empty() ? 0 : recv_codecs_.front().flexfec_payload_type;
+}
+
+WebRtcVideoSendChannel::~WebRtcVideoSendChannel() {
+ for (auto& kv : send_streams_)
+ delete kv.second;
+}
+
+rtc::scoped_refptr<webrtc::VideoEncoderConfig::EncoderSpecificSettings>
+WebRtcVideoSendChannel::WebRtcVideoSendStream::ConfigureVideoEncoderSettings(
+ const VideoCodec& codec) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ bool is_screencast = parameters_.options.is_screencast.value_or(false);
+ // No automatic resizing when using simulcast or screencast, or when
+ // disabled by field trial flag.
+ bool automatic_resize = !disable_automatic_resize_ && !is_screencast &&
+ (parameters_.config.rtp.ssrcs.size() == 1 ||
+ NumActiveStreams(rtp_parameters_) == 1);
+
+ bool denoising;
+ bool codec_default_denoising = false;
+ if (is_screencast) {
+ denoising = false;
+ } else {
+ // Use codec default if video_noise_reduction is unset.
+ codec_default_denoising = !parameters_.options.video_noise_reduction;
+ denoising = parameters_.options.video_noise_reduction.value_or(false);
+ }
+
+ if (absl::EqualsIgnoreCase(codec.name, kH264CodecName)) {
+ return nullptr;
+ }
+ if (absl::EqualsIgnoreCase(codec.name, kVp8CodecName)) {
+ webrtc::VideoCodecVP8 vp8_settings =
+ webrtc::VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.automaticResizeOn = automatic_resize;
+ // VP8 denoising is enabled by default.
+ vp8_settings.denoisingOn = codec_default_denoising ? true : denoising;
+ return rtc::make_ref_counted<
+ webrtc::VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ }
+ if (absl::EqualsIgnoreCase(codec.name, kVp9CodecName)) {
+ webrtc::VideoCodecVP9 vp9_settings =
+ webrtc::VideoEncoder::GetDefaultVp9Settings();
+
+ vp9_settings.numberOfSpatialLayers = std::min<unsigned char>(
+ parameters_.config.rtp.ssrcs.size(), kConferenceMaxNumSpatialLayers);
+ vp9_settings.numberOfTemporalLayers =
+ std::min<unsigned char>(parameters_.config.rtp.ssrcs.size() > 1
+ ? kConferenceDefaultNumTemporalLayers
+ : 1,
+ kConferenceMaxNumTemporalLayers);
+
+ // VP9 denoising is disabled by default.
+ vp9_settings.denoisingOn = codec_default_denoising ? true : denoising;
+ // Disable automatic resize if more than one spatial layer is requested.
+ bool vp9_automatic_resize = automatic_resize;
+ absl::optional<int> num_spatial_layers =
+ NumSpatialLayersFromEncoding(rtp_parameters_, /*idx=*/0);
+ if (num_spatial_layers && *num_spatial_layers > 1) {
+ vp9_automatic_resize = false;
+ }
+ vp9_settings.automaticResizeOn = vp9_automatic_resize;
+ if (!is_screencast) {
+ webrtc::FieldTrialFlag interlayer_pred_experiment_enabled("Enabled");
+ webrtc::FieldTrialEnum<webrtc::InterLayerPredMode> inter_layer_pred_mode(
+ "inter_layer_pred_mode", webrtc::InterLayerPredMode::kOnKeyPic,
+ {{"off", webrtc::InterLayerPredMode::kOff},
+ {"on", webrtc::InterLayerPredMode::kOn},
+ {"onkeypic", webrtc::InterLayerPredMode::kOnKeyPic}});
+ webrtc::FieldTrialFlag force_flexible_mode("FlexibleMode");
+ webrtc::ParseFieldTrial(
+ {&interlayer_pred_experiment_enabled, &inter_layer_pred_mode,
+ &force_flexible_mode},
+ call_->trials().Lookup("WebRTC-Vp9InterLayerPred"));
+ if (interlayer_pred_experiment_enabled) {
+ vp9_settings.interLayerPred = inter_layer_pred_mode;
+ } else {
+ // Limit inter-layer prediction to key pictures by default.
+ vp9_settings.interLayerPred = webrtc::InterLayerPredMode::kOnKeyPic;
+ }
+ vp9_settings.flexibleMode = force_flexible_mode.Get();
+ } else {
+ // Multiple spatial layers vp9 screenshare needs flexible mode.
+ vp9_settings.flexibleMode = vp9_settings.numberOfSpatialLayers > 1;
+ vp9_settings.interLayerPred = webrtc::InterLayerPredMode::kOn;
+ }
+ return rtc::make_ref_counted<
+ webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ }
+ if (absl::EqualsIgnoreCase(codec.name, kAv1CodecName)) {
+ webrtc::VideoCodecAV1 av1_settings = {.automatic_resize_on =
+ automatic_resize};
+ if (NumSpatialLayersFromEncoding(rtp_parameters_, /*idx=*/0) > 1) {
+ av1_settings.automatic_resize_on = false;
+ }
+ return rtc::make_ref_counted<
+ webrtc::VideoEncoderConfig::Av1EncoderSpecificSettings>(av1_settings);
+ }
+ return nullptr;
+}
+std::vector<VideoCodecSettings> WebRtcVideoSendChannel::SelectSendVideoCodecs(
+ const std::vector<VideoCodecSettings>& remote_mapped_codecs) const {
+ std::vector<webrtc::SdpVideoFormat> sdp_formats =
+ encoder_factory_ ? encoder_factory_->GetImplementations()
+ : std::vector<webrtc::SdpVideoFormat>();
+
+ // The returned vector holds the VideoCodecSettings in term of preference.
+ // They are orderd by receive codec preference first and local implementation
+ // preference second.
+ std::vector<VideoCodecSettings> encoders;
+ for (const VideoCodecSettings& remote_codec : remote_mapped_codecs) {
+ for (auto format_it = sdp_formats.begin();
+ format_it != sdp_formats.end();) {
+ // For H264, we will limit the encode level to the remote offered level
+ // regardless if level asymmetry is allowed or not. This is strictly not
+ // following the spec in https://tools.ietf.org/html/rfc6184#section-8.2.2
+ // since we should limit the encode level to the lower of local and remote
+ // level when level asymmetry is not allowed.
+ if (format_it->IsSameCodec(
+ {remote_codec.codec.name, remote_codec.codec.params})) {
+ encoders.push_back(remote_codec);
+
+ // To allow the VideoEncoderFactory to keep information about which
+ // implementation to instantitate when CreateEncoder is called the two
+ // parmeter sets are merged.
+ encoders.back().codec.params.insert(format_it->parameters.begin(),
+ format_it->parameters.end());
+
+ format_it = sdp_formats.erase(format_it);
+ } else {
+ ++format_it;
+ }
+ }
+ }
+
+ return encoders;
+}
+
+bool WebRtcVideoSendChannel::GetChangedSenderParameters(
+ const VideoSenderParameters& params,
+ ChangedSenderParameters* changed_params) const {
+ if (!ValidateCodecFormats(params.codecs) ||
+ !ValidateRtpExtensions(params.extensions, send_rtp_extensions_)) {
+ return false;
+ }
+
+ std::vector<VideoCodecSettings> negotiated_codecs =
+ SelectSendVideoCodecs(MapCodecs(params.codecs));
+
+ // We should only fail here if send direction is enabled.
+ if (params.is_stream_active && negotiated_codecs.empty()) {
+ RTC_LOG(LS_ERROR) << "No video codecs supported.";
+ return false;
+ }
+
+ // Never enable sending FlexFEC, unless we are in the experiment.
+ if (!IsEnabled(call_->trials(), "WebRTC-FlexFEC-03")) {
+ for (VideoCodecSettings& codec : negotiated_codecs)
+ codec.flexfec_payload_type = -1;
+ }
+
+ absl::optional<VideoCodecSettings> force_codec;
+ if (!send_streams_.empty()) {
+ // Since we do not support mixed-codec simulcast yet,
+ // all send streams must have the same codec value.
+ auto rtp_parameters = send_streams_.begin()->second->GetRtpParameters();
+ if (rtp_parameters.encodings[0].codec) {
+ auto matched_codec =
+ absl::c_find_if(negotiated_codecs, [&](auto negotiated_codec) {
+ return negotiated_codec.codec.MatchesRtpCodec(
+ *rtp_parameters.encodings[0].codec);
+ });
+ if (matched_codec != negotiated_codecs.end()) {
+ force_codec = *matched_codec;
+ } else {
+ // The requested codec has been negotiated away, we clear it from the
+ // parameters.
+ for (auto& encoding : rtp_parameters.encodings) {
+ encoding.codec.reset();
+ }
+ send_streams_.begin()->second->SetRtpParameters(rtp_parameters,
+ nullptr);
+ }
+ }
+ }
+
+ if (negotiated_codecs_ != negotiated_codecs) {
+ if (negotiated_codecs.empty()) {
+ changed_params->send_codec = absl::nullopt;
+ } else if (force_codec) {
+ changed_params->send_codec = force_codec;
+ } else if (send_codec() != negotiated_codecs.front()) {
+ changed_params->send_codec = negotiated_codecs.front();
+ }
+ changed_params->negotiated_codecs = std::move(negotiated_codecs);
+ }
+
+ // Handle RTP header extensions.
+ if (params.extmap_allow_mixed != ExtmapAllowMixed()) {
+ changed_params->extmap_allow_mixed = params.extmap_allow_mixed;
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions = FilterRtpExtensions(
+ params.extensions, webrtc::RtpExtension::IsSupportedForVideo, true,
+ call_->trials());
+ if (send_rtp_extensions_ != filtered_extensions) {
+ changed_params->rtp_header_extensions =
+ absl::optional<std::vector<webrtc::RtpExtension>>(filtered_extensions);
+ }
+
+ if (params.mid != send_params_.mid) {
+ changed_params->mid = params.mid;
+ }
+
+ // Handle max bitrate.
+ if (params.max_bandwidth_bps != send_params_.max_bandwidth_bps &&
+ params.max_bandwidth_bps >= -1) {
+ // 0 or -1 uncaps max bitrate.
+ // TODO(pbos): Reconsider how 0 should be treated. It is not mentioned as a
+ // special value and might very well be used for stopping sending.
+ changed_params->max_bandwidth_bps =
+ params.max_bandwidth_bps == 0 ? -1 : params.max_bandwidth_bps;
+ }
+
+ // Handle conference mode.
+ if (params.conference_mode != send_params_.conference_mode) {
+ changed_params->conference_mode = params.conference_mode;
+ }
+
+ // Handle RTCP mode.
+ if (params.rtcp.reduced_size != send_params_.rtcp.reduced_size) {
+ changed_params->rtcp_mode = params.rtcp.reduced_size
+ ? webrtc::RtcpMode::kReducedSize
+ : webrtc::RtcpMode::kCompound;
+ }
+
+ return true;
+}
+
+bool WebRtcVideoSendChannel::SetSenderParameters(
+ const VideoSenderParameters& params) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "WebRtcVideoSendChannel::SetSenderParameters");
+ RTC_LOG(LS_INFO) << "SetSenderParameters: " << params.ToString();
+ ChangedSenderParameters changed_params;
+ if (!GetChangedSenderParameters(params, &changed_params)) {
+ return false;
+ }
+
+ if (changed_params.negotiated_codecs) {
+ for (const auto& send_codec : *changed_params.negotiated_codecs)
+ RTC_LOG(LS_INFO) << "Negotiated codec: " << send_codec.codec.ToString();
+ }
+
+ send_params_ = params;
+ return ApplyChangedParams(changed_params);
+}
+
+void WebRtcVideoSendChannel::RequestEncoderFallback() {
+ if (!worker_thread_->IsCurrent()) {
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [this] { RequestEncoderFallback(); }));
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (negotiated_codecs_.size() <= 1) {
+ RTC_LOG(LS_WARNING) << "Encoder failed but no fallback codec is available";
+ return;
+ }
+
+ ChangedSenderParameters params;
+ params.negotiated_codecs = negotiated_codecs_;
+ params.negotiated_codecs->erase(params.negotiated_codecs->begin());
+ params.send_codec = params.negotiated_codecs->front();
+ ApplyChangedParams(params);
+}
+
+void WebRtcVideoSendChannel::RequestEncoderSwitch(
+ const webrtc::SdpVideoFormat& format,
+ bool allow_default_fallback) {
+ if (!worker_thread_->IsCurrent()) {
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [this, format, allow_default_fallback] {
+ RequestEncoderSwitch(format, allow_default_fallback);
+ }));
+ return;
+ }
+
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ for (const VideoCodecSettings& codec_setting : negotiated_codecs_) {
+ if (format.IsSameCodec(
+ {codec_setting.codec.name, codec_setting.codec.params})) {
+ VideoCodecSettings new_codec_setting = codec_setting;
+ for (const auto& kv : format.parameters) {
+ new_codec_setting.codec.params[kv.first] = kv.second;
+ }
+
+ if (send_codec() == new_codec_setting) {
+ // Already using this codec, no switch required.
+ return;
+ }
+
+ ChangedSenderParameters params;
+ params.send_codec = new_codec_setting;
+ ApplyChangedParams(params);
+ return;
+ }
+ }
+
+ RTC_LOG(LS_WARNING) << "Failed to switch encoder to: " << format.ToString()
+ << ". Is default fallback allowed: "
+ << allow_default_fallback;
+
+ if (allow_default_fallback) {
+ RequestEncoderFallback();
+ }
+}
+
+bool WebRtcVideoSendChannel::ApplyChangedParams(
+ const ChangedSenderParameters& changed_params) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (changed_params.negotiated_codecs)
+ negotiated_codecs_ = *changed_params.negotiated_codecs;
+
+ if (changed_params.send_codec)
+ send_codec() = changed_params.send_codec;
+
+ if (changed_params.extmap_allow_mixed) {
+ SetExtmapAllowMixed(*changed_params.extmap_allow_mixed);
+ }
+ if (changed_params.rtp_header_extensions) {
+ send_rtp_extensions_ = *changed_params.rtp_header_extensions;
+ }
+
+ if (changed_params.send_codec || changed_params.max_bandwidth_bps) {
+ if (send_params_.max_bandwidth_bps == -1) {
+ // Unset the global max bitrate (max_bitrate_bps) if max_bandwidth_bps is
+ // -1, which corresponds to no "b=AS" attribute in SDP. Note that the
+ // global max bitrate may be set below in GetBitrateConfigForCodec, from
+ // the codec max bitrate.
+ // TODO(pbos): This should be reconsidered (codec max bitrate should
+ // probably not affect global call max bitrate).
+ bitrate_config_.max_bitrate_bps = -1;
+ }
+
+ if (send_codec()) {
+ // TODO(holmer): Changing the codec parameters shouldn't necessarily mean
+ // that we change the min/max of bandwidth estimation. Reevaluate this.
+ bitrate_config_ = GetBitrateConfigForCodec(send_codec()->codec);
+ if (!changed_params.send_codec) {
+ // If the codec isn't changing, set the start bitrate to -1 which means
+ // "unchanged" so that BWE isn't affected.
+ bitrate_config_.start_bitrate_bps = -1;
+ }
+ }
+
+ if (send_params_.max_bandwidth_bps >= 0) {
+ // Note that max_bandwidth_bps intentionally takes priority over the
+ // bitrate config for the codec. This allows FEC to be applied above the
+ // codec target bitrate.
+ // TODO(pbos): Figure out whether b=AS means max bitrate for this
+ // WebRtcVideoSendChannel (in which case we're good), or per sender
+ // (SSRC), in which case this should not set a BitrateConstraints but
+ // rather reconfigure all senders.
+ bitrate_config_.max_bitrate_bps = send_params_.max_bandwidth_bps == 0
+ ? -1
+ : send_params_.max_bandwidth_bps;
+ }
+
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config_);
+ }
+
+ for (auto& kv : send_streams_) {
+ kv.second->SetSenderParameters(changed_params);
+ }
+ if (changed_params.send_codec || changed_params.rtcp_mode) {
+ if (send_codec_changed_callback_) {
+ send_codec_changed_callback_();
+ }
+ }
+ return true;
+}
+
+webrtc::RtpParameters WebRtcVideoSendChannel::GetRtpSendParameters(
+ uint32_t ssrc) const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "Attempting to get RTP send parameters for stream "
+ "with ssrc "
+ << ssrc << " which doesn't exist.";
+ return webrtc::RtpParameters();
+ }
+
+ webrtc::RtpParameters rtp_params = it->second->GetRtpParameters();
+ // Need to add the common list of codecs to the send stream-specific
+ // RTP parameters.
+ for (const VideoCodec& codec : send_params_.codecs) {
+ if (send_codec() && send_codec()->codec.id == codec.id) {
+ // Put the current send codec to the front of the codecs list.
+ RTC_DCHECK_EQ(codec.name, send_codec()->codec.name);
+ rtp_params.codecs.insert(rtp_params.codecs.begin(),
+ codec.ToCodecParameters());
+ } else {
+ rtp_params.codecs.push_back(codec.ToCodecParameters());
+ }
+ }
+
+ return rtp_params;
+}
+
+webrtc::RTCError WebRtcVideoSendChannel::SetRtpSendParameters(
+ uint32_t ssrc,
+ const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "WebRtcVideoSendChannel::SetRtpSendParameters");
+ auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ RTC_LOG(LS_ERROR) << "Attempting to set RTP send parameters for stream "
+ "with ssrc "
+ << ssrc << " which doesn't exist.";
+ return webrtc::InvokeSetParametersCallback(
+ callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
+ }
+
+ // TODO(deadbeef): Handle setting parameters with a list of codecs in a
+ // different order (which should change the send codec).
+ webrtc::RtpParameters current_parameters = GetRtpSendParameters(ssrc);
+ if (current_parameters.codecs != parameters.codecs) {
+ RTC_DLOG(LS_ERROR) << "Using SetParameters to change the set of codecs "
+ "is not currently supported.";
+ return webrtc::InvokeSetParametersCallback(
+ callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
+ }
+
+ if (!parameters.encodings.empty()) {
+ // Note that these values come from:
+ // https://tools.ietf.org/html/draft-ietf-tsvwg-rtcweb-qos-16#section-5
+ // TODO(deadbeef): Change values depending on whether we are sending a
+ // keyframe or non-keyframe.
+ rtc::DiffServCodePoint new_dscp = rtc::DSCP_DEFAULT;
+ switch (parameters.encodings[0].network_priority) {
+ case webrtc::Priority::kVeryLow:
+ new_dscp = rtc::DSCP_CS1;
+ break;
+ case webrtc::Priority::kLow:
+ new_dscp = rtc::DSCP_DEFAULT;
+ break;
+ case webrtc::Priority::kMedium:
+ new_dscp = rtc::DSCP_AF42;
+ break;
+ case webrtc::Priority::kHigh:
+ new_dscp = rtc::DSCP_AF41;
+ break;
+ }
+
+ // Since we validate that all layers have the same value, we can just check
+ // the first layer.
+ // TODO(orphis): Support mixed-codec simulcast
+ if (parameters.encodings[0].codec && send_codec_ &&
+ !send_codec_->codec.MatchesRtpCodec(*parameters.encodings[0].codec)) {
+ RTC_LOG(LS_VERBOSE) << "Trying to change codec to "
+ << parameters.encodings[0].codec->name;
+ auto matched_codec =
+ absl::c_find_if(negotiated_codecs_, [&](auto negotiated_codec) {
+ return negotiated_codec.codec.MatchesRtpCodec(
+ *parameters.encodings[0].codec);
+ });
+ if (matched_codec == negotiated_codecs_.end()) {
+ return webrtc::InvokeSetParametersCallback(
+ callback, webrtc::RTCError(
+ webrtc::RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to use an unsupported codec for layer 0"));
+ }
+
+ ChangedSenderParameters params;
+ params.send_codec = *matched_codec;
+ ApplyChangedParams(params);
+ }
+
+ SetPreferredDscp(new_dscp);
+ }
+
+ return it->second->SetRtpParameters(parameters, std::move(callback));
+}
+absl::optional<Codec> WebRtcVideoSendChannel::GetSendCodec() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!send_codec()) {
+ RTC_LOG(LS_VERBOSE) << "GetSendCodec: No send codec set.";
+ return absl::nullopt;
+ }
+ return send_codec()->codec;
+}
+
+bool WebRtcVideoSendChannel::SetSend(bool send) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "WebRtcVideoSendChannel::SetSend");
+ RTC_LOG(LS_VERBOSE) << "SetSend: " << (send ? "true" : "false");
+ if (send && !send_codec()) {
+ RTC_DLOG(LS_ERROR) << "SetSend(true) called before setting codec.";
+ return false;
+ }
+ for (const auto& kv : send_streams_) {
+ kv.second->SetSend(send);
+ }
+ sending_ = send;
+ return true;
+}
+
+bool WebRtcVideoSendChannel::SetVideoSend(
+ uint32_t ssrc,
+ const VideoOptions* options,
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "SetVideoSend");
+ RTC_DCHECK(ssrc != 0);
+ RTC_LOG(LS_INFO) << "SetVideoSend (ssrc= " << ssrc << ", options: "
+ << (options ? options->ToString() : "nullptr")
+ << ", source = " << (source ? "(source)" : "nullptr") << ")";
+
+ const auto& kv = send_streams_.find(ssrc);
+ if (kv == send_streams_.end()) {
+ // Allow unknown ssrc only if source is null.
+ RTC_CHECK(source == nullptr);
+ RTC_LOG(LS_ERROR) << "No sending stream on ssrc " << ssrc;
+ return false;
+ }
+
+ return kv->second->SetVideoSend(options, source);
+}
+
+bool WebRtcVideoSendChannel::ValidateSendSsrcAvailability(
+ const StreamParams& sp) const {
+ for (uint32_t ssrc : sp.ssrcs) {
+ if (send_ssrcs_.find(ssrc) != send_ssrcs_.end()) {
+ RTC_LOG(LS_ERROR) << "Send stream with SSRC '" << ssrc
+ << "' already exists.";
+ return false;
+ }
+ }
+ return true;
+}
+bool WebRtcVideoSendChannel::AddSendStream(const StreamParams& sp) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ RTC_LOG(LS_INFO) << "AddSendStream: " << sp.ToString();
+ if (!ValidateStreamParams(sp))
+ return false;
+
+ if (!ValidateSendSsrcAvailability(sp))
+ return false;
+
+ for (uint32_t used_ssrc : sp.ssrcs)
+ send_ssrcs_.insert(used_ssrc);
+
+ webrtc::VideoSendStream::Config config(transport());
+
+ for (const RidDescription& rid : sp.rids()) {
+ config.rtp.rids.push_back(rid.rid);
+ }
+
+ config.suspend_below_min_bitrate = video_config_.suspend_below_min_bitrate;
+ config.periodic_alr_bandwidth_probing =
+ video_config_.periodic_alr_bandwidth_probing;
+ config.encoder_settings.experiment_cpu_load_estimator =
+ video_config_.experiment_cpu_load_estimator;
+ config.encoder_settings.encoder_factory = encoder_factory_;
+ config.encoder_settings.bitrate_allocator_factory =
+ bitrate_allocator_factory_;
+ config.encoder_settings.encoder_switch_request_callback = this;
+ config.crypto_options = crypto_options_;
+ config.rtp.extmap_allow_mixed = ExtmapAllowMixed();
+ config.rtcp_report_interval_ms = video_config_.rtcp_report_interval_ms;
+ config.rtp.enable_send_packet_batching =
+ video_config_.enable_send_packet_batching;
+
+ WebRtcVideoSendStream* stream = new WebRtcVideoSendStream(
+ call_, sp, std::move(config), default_send_options_,
+ video_config_.enable_cpu_adaptation, bitrate_config_.max_bitrate_bps,
+ send_codec(), send_rtp_extensions_, send_params_);
+
+ uint32_t ssrc = sp.first_ssrc();
+ RTC_DCHECK(ssrc != 0);
+ send_streams_[ssrc] = stream;
+
+ if (ssrc_list_changed_callback_) {
+ ssrc_list_changed_callback_(send_ssrcs_);
+ }
+
+ if (sending_) {
+ stream->SetSend(true);
+ }
+
+ return true;
+}
+
+bool WebRtcVideoSendChannel::RemoveSendStream(uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "RemoveSendStream: " << ssrc;
+
+ WebRtcVideoSendStream* removed_stream;
+ auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ return false;
+ }
+
+ for (uint32_t old_ssrc : it->second->GetSsrcs())
+ send_ssrcs_.erase(old_ssrc);
+
+ removed_stream = it->second;
+ send_streams_.erase(it);
+
+ // Switch receiver report SSRCs, in case the one in use is no longer valid.
+ if (ssrc_list_changed_callback_) {
+ ssrc_list_changed_callback_(send_ssrcs_);
+ }
+
+ delete removed_stream;
+
+ return true;
+}
+
+bool WebRtcVideoSendChannel::GetStats(VideoMediaSendInfo* info) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "WebRtcVideoSendChannel::GetSendStats");
+
+ info->Clear();
+ if (send_streams_.empty()) {
+ return true;
+ }
+
+ // Log stats periodically.
+ bool log_stats = false;
+ int64_t now_ms = rtc::TimeMillis();
+ if (last_send_stats_log_ms_ == -1 ||
+ now_ms - last_send_stats_log_ms_ > kStatsLogIntervalMs) {
+ last_send_stats_log_ms_ = now_ms;
+ log_stats = true;
+ }
+
+ info->Clear();
+ FillSenderStats(info, log_stats);
+ FillSendCodecStats(info);
+ // TODO(holmer): We should either have rtt available as a metric on
+ // VideoSend/ReceiveStreams, or we should remove rtt from VideoSenderInfo.
+ webrtc::Call::Stats stats = call_->GetStats();
+ if (stats.rtt_ms != -1) {
+ for (size_t i = 0; i < info->senders.size(); ++i) {
+ info->senders[i].rtt_ms = stats.rtt_ms;
+ }
+ for (size_t i = 0; i < info->aggregated_senders.size(); ++i) {
+ info->aggregated_senders[i].rtt_ms = stats.rtt_ms;
+ }
+ }
+
+ if (log_stats)
+ RTC_LOG(LS_INFO) << stats.ToString(now_ms);
+
+ return true;
+}
+void WebRtcVideoSendChannel::FillSenderStats(
+ VideoMediaSendInfo* video_media_info,
+ bool log_stats) {
+ for (const auto& it : send_streams_) {
+ auto infos = it.second->GetPerLayerVideoSenderInfos(log_stats);
+ if (infos.empty())
+ continue;
+ video_media_info->aggregated_senders.push_back(
+ it.second->GetAggregatedVideoSenderInfo(infos));
+ for (auto&& info : infos) {
+ video_media_info->senders.push_back(info);
+ }
+ }
+}
+
+void WebRtcVideoSendChannel::FillBitrateInfo(
+ BandwidthEstimationInfo* bwe_info) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ for (const auto& it : send_streams_) {
+ it.second->FillBitrateInfo(bwe_info);
+ }
+}
+
+void WebRtcVideoSendChannel::FillSendCodecStats(
+ VideoMediaSendInfo* video_media_info) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!send_codec()) {
+ return;
+ }
+ // Note: since RTP stats don't account for RTX and FEC separately (see
+ // https://w3c.github.io/webrtc-stats/#dom-rtcstatstype-outbound-rtp)
+ // we can omit the codec information for those here and only insert the
+ // primary codec that is being used to send here.
+ video_media_info->send_codecs.insert(std::make_pair(
+ send_codec()->codec.id, send_codec()->codec.ToCodecParameters()));
+}
+
+void WebRtcVideoSendChannel::OnPacketSent(const rtc::SentPacket& sent_packet) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+ // TODO(tommi): We shouldn't need to go through call_ to deliver this
+ // notification. We should already have direct access to
+ // video_send_delay_stats_ and transport_send_ptr_ via `stream_`.
+ // So we should be able to remove OnSentPacket from Call and handle this per
+ // channel instead. At the moment Call::OnSentPacket calls OnSentPacket for
+ // the video stats, for all sent packets, including audio, which causes
+ // unnecessary lookups.
+ call_->OnSentPacket(sent_packet);
+}
+
+void WebRtcVideoSendChannel::OnReadyToSend(bool ready) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+ RTC_LOG(LS_VERBOSE) << "OnReadyToSend: " << (ready ? "Ready." : "Not ready.");
+ call_->SignalChannelNetworkState(
+ webrtc::MediaType::VIDEO,
+ ready ? webrtc::kNetworkUp : webrtc::kNetworkDown);
+}
+
+void WebRtcVideoSendChannel::OnNetworkRouteChanged(
+ absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+ worker_thread_->PostTask(SafeTask(
+ task_safety_.flag(),
+ [this, name = std::string(transport_name), route = network_route] {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ webrtc::RtpTransportControllerSendInterface* transport =
+ call_->GetTransportControllerSend();
+ transport->OnNetworkRouteChanged(name, route);
+ transport->OnTransportOverheadChanged(route.packet_overhead);
+ }));
+}
+
+void WebRtcVideoSendChannel::SetInterface(MediaChannelNetworkInterface* iface) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+ MediaChannelUtil::SetInterface(iface);
+
+ // Speculative change to increase the outbound socket buffer size.
+ // In b/15152257, we are seeing a significant number of packets discarded
+ // due to lack of socket buffer space, although it's not yet clear what the
+ // ideal value should be.
+ const std::string group_name_send_buf_size =
+ call_->trials().Lookup("WebRTC-SendBufferSizeBytes");
+ int send_buffer_size = kVideoRtpSendBufferSize;
+ if (!group_name_send_buf_size.empty() &&
+ (sscanf(group_name_send_buf_size.c_str(), "%d", &send_buffer_size) != 1 ||
+ send_buffer_size <= 0)) {
+ RTC_LOG(LS_WARNING) << "Invalid send buffer size: "
+ << group_name_send_buf_size;
+ send_buffer_size = kVideoRtpSendBufferSize;
+ }
+
+ MediaChannelUtil::SetOption(MediaChannelNetworkInterface::ST_RTP,
+ rtc::Socket::OPT_SNDBUF, send_buffer_size);
+}
+
+void WebRtcVideoSendChannel::SetFrameEncryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ auto matching_stream = send_streams_.find(ssrc);
+ if (matching_stream != send_streams_.end()) {
+ matching_stream->second->SetFrameEncryptor(frame_encryptor);
+ } else {
+ RTC_LOG(LS_ERROR) << "No stream found to attach frame encryptor";
+ }
+}
+
+void WebRtcVideoSendChannel::SetEncoderSelector(
+ uint32_t ssrc,
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface* encoder_selector) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ auto matching_stream = send_streams_.find(ssrc);
+ if (matching_stream != send_streams_.end()) {
+ matching_stream->second->SetEncoderSelector(encoder_selector);
+ } else {
+ RTC_LOG(LS_ERROR) << "No stream found to attach encoder selector";
+ }
+}
+
+WebRtcVideoSendChannel::WebRtcVideoSendStream::VideoSendStreamParameters::
+ VideoSendStreamParameters(
+ webrtc::VideoSendStream::Config config,
+ const VideoOptions& options,
+ int max_bitrate_bps,
+ const absl::optional<VideoCodecSettings>& codec_settings)
+ : config(std::move(config)),
+ options(options),
+ max_bitrate_bps(max_bitrate_bps),
+ conference_mode(false),
+ codec_settings(codec_settings) {}
+
+WebRtcVideoSendChannel::WebRtcVideoSendStream::WebRtcVideoSendStream(
+ webrtc::Call* call,
+ const StreamParams& sp,
+ webrtc::VideoSendStream::Config config,
+ const VideoOptions& options,
+ bool enable_cpu_overuse_detection,
+ int max_bitrate_bps,
+ const absl::optional<VideoCodecSettings>& codec_settings,
+ const absl::optional<std::vector<webrtc::RtpExtension>>& rtp_extensions,
+ // TODO(deadbeef): Don't duplicate information between send_params,
+ // rtp_extensions, options, etc.
+ const VideoSenderParameters& send_params)
+ : worker_thread_(call->worker_thread()),
+ ssrcs_(sp.ssrcs),
+ ssrc_groups_(sp.ssrc_groups),
+ call_(call),
+ enable_cpu_overuse_detection_(enable_cpu_overuse_detection),
+ source_(nullptr),
+ stream_(nullptr),
+ parameters_(std::move(config), options, max_bitrate_bps, codec_settings),
+ rtp_parameters_(CreateRtpParametersWithEncodings(sp)),
+ sending_(false),
+ disable_automatic_resize_(
+ IsEnabled(call->trials(), "WebRTC-Video-DisableAutomaticResize")) {
+ // Maximum packet size may come in RtpConfig from external transport, for
+ // example from QuicTransportInterface implementation, so do not exceed
+ // given max_packet_size.
+ parameters_.config.rtp.max_packet_size =
+ std::min<size_t>(parameters_.config.rtp.max_packet_size, kVideoMtu);
+ parameters_.conference_mode = send_params.conference_mode;
+
+ sp.GetPrimarySsrcs(&parameters_.config.rtp.ssrcs);
+
+ // ValidateStreamParams should prevent this from happening.
+ RTC_CHECK(!parameters_.config.rtp.ssrcs.empty());
+ rtp_parameters_.encodings[0].ssrc = parameters_.config.rtp.ssrcs[0];
+
+ // RTX.
+ sp.GetFidSsrcs(parameters_.config.rtp.ssrcs,
+ &parameters_.config.rtp.rtx.ssrcs);
+
+ // FlexFEC SSRCs.
+ // TODO(brandtr): This code needs to be generalized when we add support for
+ // multistream protection.
+ if (IsEnabled(call_->trials(), "WebRTC-FlexFEC-03")) {
+ uint32_t flexfec_ssrc;
+ bool flexfec_enabled = false;
+ for (uint32_t primary_ssrc : parameters_.config.rtp.ssrcs) {
+ if (sp.GetFecFrSsrc(primary_ssrc, &flexfec_ssrc)) {
+ if (flexfec_enabled) {
+ RTC_LOG(LS_INFO)
+ << "Multiple FlexFEC streams in local SDP, but "
+ "our implementation only supports a single FlexFEC "
+ "stream. Will not enable FlexFEC for proposed "
+ "stream with SSRC: "
+ << flexfec_ssrc << ".";
+ continue;
+ }
+
+ flexfec_enabled = true;
+ parameters_.config.rtp.flexfec.ssrc = flexfec_ssrc;
+ parameters_.config.rtp.flexfec.protected_media_ssrcs = {primary_ssrc};
+ }
+ }
+ }
+
+ parameters_.config.rtp.c_name = sp.cname;
+ if (rtp_extensions) {
+ parameters_.config.rtp.extensions = *rtp_extensions;
+ rtp_parameters_.header_extensions = *rtp_extensions;
+ }
+ parameters_.config.rtp.rtcp_mode = send_params.rtcp.reduced_size
+ ? webrtc::RtcpMode::kReducedSize
+ : webrtc::RtcpMode::kCompound;
+ parameters_.config.rtp.mid = send_params.mid;
+ rtp_parameters_.rtcp.reduced_size = send_params.rtcp.reduced_size;
+
+ if (codec_settings) {
+ SetCodec(*codec_settings);
+ }
+}
+
+WebRtcVideoSendChannel::WebRtcVideoSendStream::~WebRtcVideoSendStream() {
+ if (stream_ != NULL) {
+ call_->DestroyVideoSendStream(stream_);
+ }
+}
+
+bool WebRtcVideoSendChannel::WebRtcVideoSendStream::SetVideoSend(
+ const VideoOptions* options,
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source) {
+ TRACE_EVENT0("webrtc", "WebRtcVideoSendStream::SetVideoSend");
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ if (options) {
+ VideoOptions old_options = parameters_.options;
+ parameters_.options.SetAll(*options);
+ if (parameters_.options.is_screencast.value_or(false) !=
+ old_options.is_screencast.value_or(false) &&
+ parameters_.codec_settings) {
+ // If screen content settings change, we may need to recreate the codec
+ // instance so that the correct type is used.
+
+ SetCodec(*parameters_.codec_settings);
+ // Mark screenshare parameter as being updated, then test for any other
+ // changes that may require codec reconfiguration.
+ old_options.is_screencast = options->is_screencast;
+ }
+ if (parameters_.options != old_options) {
+ ReconfigureEncoder(nullptr);
+ }
+ }
+
+ if (source_ && stream_) {
+ stream_->SetSource(nullptr, webrtc::DegradationPreference::DISABLED);
+ }
+ // Switch to the new source.
+ source_ = source;
+ if (source && stream_) {
+ stream_->SetSource(source_, GetDegradationPreference());
+ }
+ return true;
+}
+
+webrtc::DegradationPreference
+WebRtcVideoSendChannel::WebRtcVideoSendStream::GetDegradationPreference()
+ const {
+ // Do not adapt resolution for screen content as this will likely
+ // result in blurry and unreadable text.
+ // `this` acts like a VideoSource to make sure SinkWants are handled on the
+ // correct thread.
+ if (!enable_cpu_overuse_detection_) {
+ return webrtc::DegradationPreference::DISABLED;
+ }
+
+ webrtc::DegradationPreference degradation_preference;
+ if (rtp_parameters_.degradation_preference.has_value()) {
+ degradation_preference = *rtp_parameters_.degradation_preference;
+ } else {
+ if (parameters_.options.content_hint ==
+ webrtc::VideoTrackInterface::ContentHint::kFluid) {
+ degradation_preference =
+ webrtc::DegradationPreference::MAINTAIN_FRAMERATE;
+ } else if (parameters_.options.is_screencast.value_or(false) ||
+ parameters_.options.content_hint ==
+ webrtc::VideoTrackInterface::ContentHint::kDetailed ||
+ parameters_.options.content_hint ==
+ webrtc::VideoTrackInterface::ContentHint::kText) {
+ degradation_preference =
+ webrtc::DegradationPreference::MAINTAIN_RESOLUTION;
+ } else if (IsEnabled(call_->trials(), "WebRTC-Video-BalancedDegradation")) {
+ // Standard wants balanced by default, but it needs to be tuned first.
+ degradation_preference = webrtc::DegradationPreference::BALANCED;
+ } else {
+ // Keep MAINTAIN_FRAMERATE by default until BALANCED has been tuned for
+ // all codecs and launched.
+ degradation_preference =
+ webrtc::DegradationPreference::MAINTAIN_FRAMERATE;
+ }
+ }
+
+ return degradation_preference;
+}
+
+const std::vector<uint32_t>&
+WebRtcVideoSendChannel::WebRtcVideoSendStream::GetSsrcs() const {
+ return ssrcs_;
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::SetCodec(
+ const VideoCodecSettings& codec_settings) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ FallbackToDefaultScalabilityModeIfNotSupported(
+ codec_settings.codec, parameters_.config, rtp_parameters_.encodings);
+
+ parameters_.encoder_config = CreateVideoEncoderConfig(codec_settings.codec);
+ RTC_DCHECK_GT(parameters_.encoder_config.number_of_streams, 0);
+
+ parameters_.config.rtp.payload_name = codec_settings.codec.name;
+ parameters_.config.rtp.payload_type = codec_settings.codec.id;
+ parameters_.config.rtp.raw_payload =
+ codec_settings.codec.packetization == kPacketizationParamRaw;
+ parameters_.config.rtp.ulpfec = codec_settings.ulpfec;
+ parameters_.config.rtp.flexfec.payload_type =
+ codec_settings.flexfec_payload_type;
+
+ // Set RTX payload type if RTX is enabled.
+ if (!parameters_.config.rtp.rtx.ssrcs.empty()) {
+ if (codec_settings.rtx_payload_type == -1) {
+ RTC_LOG(LS_WARNING)
+ << "RTX SSRCs configured but there's no configured RTX "
+ "payload type. Ignoring.";
+ parameters_.config.rtp.rtx.ssrcs.clear();
+ } else {
+ parameters_.config.rtp.rtx.payload_type = codec_settings.rtx_payload_type;
+ }
+ }
+
+ const bool has_lntf = HasLntf(codec_settings.codec);
+ parameters_.config.rtp.lntf.enabled = has_lntf;
+ parameters_.config.encoder_settings.capabilities.loss_notification = has_lntf;
+
+ parameters_.config.rtp.nack.rtp_history_ms =
+ HasNack(codec_settings.codec) ? kNackHistoryMs : 0;
+
+ parameters_.codec_settings = codec_settings;
+
+ // TODO(bugs.webrtc.org/8830): Avoid recreation, it should be enough to call
+ // ReconfigureEncoder.
+ RTC_LOG(LS_INFO) << "RecreateWebRtcStream (send) because of SetCodec.";
+ RecreateWebRtcStream();
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::SetSenderParameters(
+ const ChangedSenderParameters& params) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ // `recreate_stream` means construction-time parameters have changed and the
+ // sending stream needs to be reset with the new config.
+ bool recreate_stream = false;
+ if (params.rtcp_mode) {
+ parameters_.config.rtp.rtcp_mode = *params.rtcp_mode;
+ rtp_parameters_.rtcp.reduced_size =
+ parameters_.config.rtp.rtcp_mode == webrtc::RtcpMode::kReducedSize;
+ recreate_stream = true;
+ }
+ if (params.extmap_allow_mixed) {
+ parameters_.config.rtp.extmap_allow_mixed = *params.extmap_allow_mixed;
+ recreate_stream = true;
+ }
+ if (params.rtp_header_extensions) {
+ parameters_.config.rtp.extensions = *params.rtp_header_extensions;
+ rtp_parameters_.header_extensions = *params.rtp_header_extensions;
+ recreate_stream = true;
+ }
+ if (params.mid) {
+ parameters_.config.rtp.mid = *params.mid;
+ recreate_stream = true;
+ }
+ if (params.max_bandwidth_bps) {
+ parameters_.max_bitrate_bps = *params.max_bandwidth_bps;
+ ReconfigureEncoder(nullptr);
+ }
+ if (params.conference_mode) {
+ parameters_.conference_mode = *params.conference_mode;
+ }
+
+ // Set codecs and options.
+ if (params.send_codec) {
+ SetCodec(*params.send_codec);
+ recreate_stream = false; // SetCodec has already recreated the stream.
+ } else if (params.conference_mode && parameters_.codec_settings) {
+ SetCodec(*parameters_.codec_settings);
+ recreate_stream = false; // SetCodec has already recreated the stream.
+ }
+ if (recreate_stream) {
+ RTC_LOG(LS_INFO)
+ << "RecreateWebRtcStream (send) because of SetSenderParameters";
+ RecreateWebRtcStream();
+ }
+}
+
+webrtc::RTCError
+WebRtcVideoSendChannel::WebRtcVideoSendStream::SetRtpParameters(
+ const webrtc::RtpParameters& new_parameters,
+ webrtc::SetParametersCallback callback) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ // This is checked higher in the stack (RtpSender), so this is only checking
+ // for users accessing the private APIs or tests, not specification
+ // conformance.
+ // TODO(orphis): Migrate tests to later make this a DCHECK only
+ webrtc::RTCError error = CheckRtpParametersInvalidModificationAndValues(
+ rtp_parameters_, new_parameters);
+ if (!error.ok()) {
+ // Error is propagated to the callback at a higher level
+ return error;
+ }
+
+ bool new_param = false;
+ for (size_t i = 0; i < rtp_parameters_.encodings.size(); ++i) {
+ if ((new_parameters.encodings[i].min_bitrate_bps !=
+ rtp_parameters_.encodings[i].min_bitrate_bps) ||
+ (new_parameters.encodings[i].max_bitrate_bps !=
+ rtp_parameters_.encodings[i].max_bitrate_bps) ||
+ (new_parameters.encodings[i].max_framerate !=
+ rtp_parameters_.encodings[i].max_framerate) ||
+ (new_parameters.encodings[i].scale_resolution_down_by !=
+ rtp_parameters_.encodings[i].scale_resolution_down_by) ||
+ (new_parameters.encodings[i].num_temporal_layers !=
+ rtp_parameters_.encodings[i].num_temporal_layers) ||
+ (new_parameters.encodings[i].requested_resolution !=
+ rtp_parameters_.encodings[i].requested_resolution) ||
+ (new_parameters.encodings[i].scalability_mode !=
+ rtp_parameters_.encodings[i].scalability_mode)) {
+ new_param = true;
+ break;
+ }
+ }
+
+ bool new_degradation_preference = false;
+ if (new_parameters.degradation_preference !=
+ rtp_parameters_.degradation_preference) {
+ new_degradation_preference = true;
+ }
+
+ // Some fields (e.g. bitrate priority) only need to update the bitrate
+ // allocator which is updated via ReconfigureEncoder (however, note that the
+ // actual encoder should only be reconfigured if needed).
+ bool reconfigure_encoder =
+ new_param || (new_parameters.encodings[0].bitrate_priority !=
+ rtp_parameters_.encodings[0].bitrate_priority);
+
+ // Note that the simulcast encoder adapter relies on the fact that layers
+ // de/activation triggers encoder reinitialization.
+ bool new_send_state = false;
+ for (size_t i = 0; i < rtp_parameters_.encodings.size(); ++i) {
+ bool new_active = IsLayerActive(new_parameters.encodings[i]);
+ bool old_active = IsLayerActive(rtp_parameters_.encodings[i]);
+ if (new_active != old_active) {
+ new_send_state = true;
+ }
+ }
+ rtp_parameters_ = new_parameters;
+ // Codecs are currently handled at the WebRtcVideoSendChannel level.
+ rtp_parameters_.codecs.clear();
+ if (reconfigure_encoder || new_send_state) {
+ // Callback responsibility is delegated to ReconfigureEncoder()
+ ReconfigureEncoder(std::move(callback));
+ callback = nullptr;
+ }
+ if (new_send_state) {
+ UpdateSendState();
+ }
+ if (new_degradation_preference) {
+ if (source_ && stream_) {
+ stream_->SetSource(source_, GetDegradationPreference());
+ }
+ }
+ // Check if a key frame was requested via setParameters.
+ std::vector<std::string> key_frames_requested_by_rid;
+ for (const auto& encoding : rtp_parameters_.encodings) {
+ if (encoding.request_key_frame) {
+ key_frames_requested_by_rid.push_back(encoding.rid);
+ }
+ }
+ if (!key_frames_requested_by_rid.empty()) {
+ if (key_frames_requested_by_rid.size() == 1 &&
+ key_frames_requested_by_rid[0] == "") {
+ // For non-simulcast cases there is no rid,
+ // request a keyframe on all layers.
+ key_frames_requested_by_rid.clear();
+ }
+ GenerateKeyFrame(key_frames_requested_by_rid);
+ }
+ return webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
+}
+
+webrtc::RtpParameters
+WebRtcVideoSendChannel::WebRtcVideoSendStream::GetRtpParameters() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return rtp_parameters_;
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::SetFrameEncryptor(
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ parameters_.config.frame_encryptor = frame_encryptor;
+ if (stream_) {
+ RTC_LOG(LS_INFO)
+ << "RecreateWebRtcStream (send) because of SetFrameEncryptor, ssrc="
+ << parameters_.config.rtp.ssrcs[0];
+ RecreateWebRtcStream();
+ }
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::SetEncoderSelector(
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface* encoder_selector) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ parameters_.config.encoder_selector = encoder_selector;
+ if (stream_) {
+ RTC_LOG(LS_INFO)
+ << "RecreateWebRtcStream (send) because of SetEncoderSelector, ssrc="
+ << parameters_.config.rtp.ssrcs[0];
+ RecreateWebRtcStream();
+ }
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::UpdateSendState() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (sending_) {
+ RTC_DCHECK(stream_ != nullptr);
+ size_t num_layers = rtp_parameters_.encodings.size();
+ if (parameters_.encoder_config.number_of_streams == 1) {
+ // SVC is used. Only one simulcast layer is present.
+ num_layers = 1;
+ }
+ std::vector<bool> active_layers(num_layers);
+ for (size_t i = 0; i < num_layers; ++i) {
+ active_layers[i] = IsLayerActive(rtp_parameters_.encodings[i]);
+ }
+ if (parameters_.encoder_config.number_of_streams == 1 &&
+ rtp_parameters_.encodings.size() > 1) {
+ // SVC is used.
+ // The only present simulcast layer should be active if any of the
+ // configured SVC layers is active.
+ active_layers[0] =
+ absl::c_any_of(rtp_parameters_.encodings,
+ [](const auto& encoding) { return encoding.active; });
+ }
+ // This updates what simulcast layers are sending, and possibly starts
+ // or stops the VideoSendStream.
+ stream_->StartPerRtpStream(active_layers);
+ } else {
+ if (stream_ != nullptr) {
+ stream_->Stop();
+ }
+ }
+}
+
+webrtc::VideoEncoderConfig
+WebRtcVideoSendChannel::WebRtcVideoSendStream::CreateVideoEncoderConfig(
+ const VideoCodec& codec) const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ webrtc::VideoEncoderConfig encoder_config;
+ encoder_config.codec_type = webrtc::PayloadStringToCodecType(codec.name);
+ encoder_config.video_format =
+ webrtc::SdpVideoFormat(codec.name, codec.params);
+
+ bool is_screencast = parameters_.options.is_screencast.value_or(false);
+ if (is_screencast) {
+ encoder_config.min_transmit_bitrate_bps =
+ 1000 * parameters_.options.screencast_min_bitrate_kbps.value_or(0);
+ encoder_config.content_type =
+ webrtc::VideoEncoderConfig::ContentType::kScreen;
+ } else {
+ encoder_config.min_transmit_bitrate_bps = 0;
+ encoder_config.content_type =
+ webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo;
+ }
+
+ // By default, the stream count for the codec configuration should match the
+ // number of negotiated ssrcs but this may be capped below depending on the
+ // `legacy_scalability_mode` and codec used.
+ encoder_config.number_of_streams = parameters_.config.rtp.ssrcs.size();
+ bool legacy_scalability_mode = true;
+ for (const webrtc::RtpEncodingParameters& encoding :
+ rtp_parameters_.encodings) {
+ if (encoding.scalability_mode.has_value() &&
+ encoding.scale_resolution_down_by.has_value()) {
+ legacy_scalability_mode = false;
+ break;
+ }
+ }
+ // Maybe limit the number of simulcast layers depending on
+ // `legacy_scalability_mode`, codec types (VP9/AV1). This path only exists
+ // for backwards compatibility and will one day be deleted. If you want SVC,
+ // please specify with the `scalability_mode` API instead amd disabling all
+ // but one encoding.
+ if (IsCodecDisabledForSimulcast(legacy_scalability_mode,
+ encoder_config.codec_type)) {
+ encoder_config.number_of_streams = 1;
+ }
+
+ // parameters_.max_bitrate comes from the max bitrate set at the SDP
+ // (m-section) level with the attribute "b=AS." Note that stream max bitrate
+ // is the RtpSender's max bitrate, but each individual encoding may also have
+ // its own max bitrate specified by SetParameters.
+ int stream_max_bitrate = parameters_.max_bitrate_bps;
+ // The codec max bitrate comes from the "x-google-max-bitrate" parameter
+ // attribute set in the SDP for a specific codec. It only has an effect if
+ // max bitrate is not specified through other means.
+ bool encodings_has_max_bitrate = false;
+ for (const auto& encoding : rtp_parameters_.encodings) {
+ if (encoding.active && encoding.max_bitrate_bps.value_or(0) > 0) {
+ encodings_has_max_bitrate = true;
+ break;
+ }
+ }
+ int codec_max_bitrate_kbps;
+ if (codec.GetParam(kCodecParamMaxBitrate, &codec_max_bitrate_kbps) &&
+ stream_max_bitrate == -1 && !encodings_has_max_bitrate) {
+ stream_max_bitrate = codec_max_bitrate_kbps * 1000;
+ }
+ encoder_config.max_bitrate_bps = stream_max_bitrate;
+
+ // The encoder config's default bitrate priority is set to 1.0,
+ // unless it is set through the sender's encoding parameters.
+ // The bitrate priority, which is used in the bitrate allocation, is done
+ // on a per sender basis, so we use the first encoding's value.
+ encoder_config.bitrate_priority =
+ rtp_parameters_.encodings[0].bitrate_priority;
+
+ // Application-controlled state is held in the encoder_config's
+ // simulcast_layers. Currently this is used to control which simulcast layers
+ // are active and for configuring the min/max bitrate and max framerate.
+ // The encoder_config's simulcast_layers is also used for non-simulcast (when
+ // there is a single layer).
+ RTC_DCHECK_GE(rtp_parameters_.encodings.size(),
+ encoder_config.number_of_streams);
+ RTC_DCHECK_GT(encoder_config.number_of_streams, 0);
+
+ // Copy all provided constraints.
+ encoder_config.simulcast_layers.resize(rtp_parameters_.encodings.size());
+ for (size_t i = 0; i < encoder_config.simulcast_layers.size(); ++i) {
+ encoder_config.simulcast_layers[i].active =
+ rtp_parameters_.encodings[i].active;
+ encoder_config.simulcast_layers[i].scalability_mode =
+ webrtc::ScalabilityModeFromString(
+ rtp_parameters_.encodings[i].scalability_mode.value_or(""));
+ if (rtp_parameters_.encodings[i].min_bitrate_bps) {
+ encoder_config.simulcast_layers[i].min_bitrate_bps =
+ *rtp_parameters_.encodings[i].min_bitrate_bps;
+ }
+ if (rtp_parameters_.encodings[i].max_bitrate_bps) {
+ encoder_config.simulcast_layers[i].max_bitrate_bps =
+ *rtp_parameters_.encodings[i].max_bitrate_bps;
+ }
+ if (rtp_parameters_.encodings[i].max_framerate) {
+ encoder_config.simulcast_layers[i].max_framerate =
+ *rtp_parameters_.encodings[i].max_framerate;
+ }
+ if (rtp_parameters_.encodings[i].scale_resolution_down_by) {
+ encoder_config.simulcast_layers[i].scale_resolution_down_by =
+ *rtp_parameters_.encodings[i].scale_resolution_down_by;
+ }
+ if (rtp_parameters_.encodings[i].num_temporal_layers) {
+ encoder_config.simulcast_layers[i].num_temporal_layers =
+ *rtp_parameters_.encodings[i].num_temporal_layers;
+ }
+ encoder_config.simulcast_layers[i].requested_resolution =
+ rtp_parameters_.encodings[i].requested_resolution;
+ }
+
+ encoder_config.legacy_conference_mode = parameters_.conference_mode;
+
+ encoder_config.is_quality_scaling_allowed =
+ !disable_automatic_resize_ && !is_screencast &&
+ (parameters_.config.rtp.ssrcs.size() == 1 ||
+ NumActiveStreams(rtp_parameters_) == 1);
+
+ // Ensure frame dropping is always enabled.
+ encoder_config.frame_drop_enabled = true;
+
+ int max_qp;
+ switch (encoder_config.codec_type) {
+ case webrtc::kVideoCodecH264:
+ case webrtc::kVideoCodecH265:
+ max_qp = kDefaultVideoMaxQpH26x;
+ break;
+ case webrtc::kVideoCodecVP8:
+ case webrtc::kVideoCodecVP9:
+ case webrtc::kVideoCodecAV1:
+ case webrtc::kVideoCodecGeneric:
+ case webrtc::kVideoCodecMultiplex:
+ max_qp = kDefaultVideoMaxQpVpx;
+ break;
+ }
+ codec.GetParam(kCodecParamMaxQuantization, &max_qp);
+ encoder_config.max_qp = max_qp;
+
+ return encoder_config;
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::ReconfigureEncoder(
+ webrtc::SetParametersCallback callback) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!stream_) {
+ // The webrtc::VideoSendStream `stream_` has not yet been created but other
+ // parameters has changed.
+ webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
+ return;
+ }
+
+ RTC_DCHECK_GT(parameters_.encoder_config.number_of_streams, 0);
+
+ RTC_CHECK(parameters_.codec_settings);
+ VideoCodecSettings codec_settings = *parameters_.codec_settings;
+
+ FallbackToDefaultScalabilityModeIfNotSupported(
+ codec_settings.codec, parameters_.config, rtp_parameters_.encodings);
+
+ // Latest config, with and without encoder specfic settings.
+ webrtc::VideoEncoderConfig encoder_config =
+ CreateVideoEncoderConfig(codec_settings.codec);
+ encoder_config.encoder_specific_settings =
+ ConfigureVideoEncoderSettings(codec_settings.codec);
+ webrtc::VideoEncoderConfig encoder_config_with_specifics =
+ encoder_config.Copy();
+ encoder_config.encoder_specific_settings = nullptr;
+
+ // When switching between legacy SVC (3 encodings interpreted as 1 stream with
+ // 3 spatial layers) and the standard API (3 encodings = 3 streams and spatial
+ // layers specified by `scalability_mode`), the number of streams can change.
+ bool num_streams_changed = parameters_.encoder_config.number_of_streams !=
+ encoder_config.number_of_streams;
+ parameters_.encoder_config = std::move(encoder_config);
+
+ if (num_streams_changed) {
+ // The app is switching between legacy and standard modes, recreate instead
+ // of reconfiguring to avoid number of streams not matching in lower layers.
+ RecreateWebRtcStream();
+ webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
+ return;
+ }
+
+ stream_->ReconfigureVideoEncoder(std::move(encoder_config_with_specifics),
+ std::move(callback));
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::SetSend(bool send) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ sending_ = send;
+ UpdateSendState();
+}
+
+std::vector<VideoSenderInfo>
+WebRtcVideoSendChannel::WebRtcVideoSendStream::GetPerLayerVideoSenderInfos(
+ bool log_stats) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ VideoSenderInfo common_info;
+ if (parameters_.codec_settings) {
+ common_info.codec_name = parameters_.codec_settings->codec.name;
+ common_info.codec_payload_type = parameters_.codec_settings->codec.id;
+ }
+ std::vector<VideoSenderInfo> infos;
+ webrtc::VideoSendStream::Stats stats;
+ if (stream_ == nullptr) {
+ for (uint32_t ssrc : parameters_.config.rtp.ssrcs) {
+ common_info.add_ssrc(ssrc);
+ }
+ infos.push_back(common_info);
+ return infos;
+ } else {
+ stats = stream_->GetStats();
+ if (log_stats)
+ RTC_LOG(LS_INFO) << stats.ToString(rtc::TimeMillis());
+
+ // Metrics that are in common for all substreams.
+ common_info.adapt_changes = stats.number_of_cpu_adapt_changes;
+ common_info.adapt_reason =
+ stats.cpu_limited_resolution ? ADAPTREASON_CPU : ADAPTREASON_NONE;
+ common_info.has_entered_low_resolution = stats.has_entered_low_resolution;
+
+ // Get bandwidth limitation info from stream_->GetStats().
+ // Input resolution (output from video_adapter) can be further scaled down
+ // or higher video layer(s) can be dropped due to bitrate constraints.
+ // Note, adapt_changes only include changes from the video_adapter.
+ if (stats.bw_limited_resolution)
+ common_info.adapt_reason |= ADAPTREASON_BANDWIDTH;
+
+ common_info.quality_limitation_reason = stats.quality_limitation_reason;
+ common_info.quality_limitation_durations_ms =
+ stats.quality_limitation_durations_ms;
+ common_info.quality_limitation_resolution_changes =
+ stats.quality_limitation_resolution_changes;
+ common_info.encoder_implementation_name = stats.encoder_implementation_name;
+ common_info.target_bitrate = stats.target_media_bitrate_bps;
+ common_info.ssrc_groups = ssrc_groups_;
+ common_info.frames = stats.frames;
+ common_info.framerate_input = stats.input_frame_rate;
+ common_info.avg_encode_ms = stats.avg_encode_time_ms;
+ common_info.encode_usage_percent = stats.encode_usage_percent;
+ common_info.nominal_bitrate = stats.media_bitrate_bps;
+ common_info.content_type = stats.content_type;
+ common_info.aggregated_framerate_sent = stats.encode_frame_rate;
+ common_info.aggregated_huge_frames_sent = stats.huge_frames_sent;
+ common_info.power_efficient_encoder = stats.power_efficient_encoder;
+
+ // The normal case is that substreams are present, handled below. But if
+ // substreams are missing (can happen before negotiated/connected where we
+ // have no stats yet) a single outbound-rtp is created representing any and
+ // all layers.
+ if (stats.substreams.empty()) {
+ for (uint32_t ssrc : parameters_.config.rtp.ssrcs) {
+ common_info.add_ssrc(ssrc);
+ }
+ common_info.active =
+ IsActiveFromEncodings(absl::nullopt, rtp_parameters_.encodings);
+ common_info.framerate_sent = stats.encode_frame_rate;
+ common_info.frames_encoded = stats.frames_encoded;
+ common_info.total_encode_time_ms = stats.total_encode_time_ms;
+ common_info.total_encoded_bytes_target = stats.total_encoded_bytes_target;
+ common_info.frames_sent = stats.frames_encoded;
+ common_info.huge_frames_sent = stats.huge_frames_sent;
+ infos.push_back(common_info);
+ return infos;
+ }
+ }
+ // Merge `stats.substreams`, which may contain additional SSRCs for RTX or
+ // Flexfec, with media SSRCs. This results in a set of substreams that match
+ // with the outbound-rtp stats objects.
+ auto outbound_rtp_substreams =
+ MergeInfoAboutOutboundRtpSubstreams(stats.substreams);
+ // If SVC is used, one stream is configured but multiple encodings exist. This
+ // is not spec-compliant, but it is how we've implemented SVC so this affects
+ // how the RTP stream's "active" value is determined.
+ bool is_svc = (parameters_.encoder_config.number_of_streams == 1 &&
+ rtp_parameters_.encodings.size() > 1);
+ for (const auto& pair : outbound_rtp_substreams) {
+ auto info = common_info;
+ uint32_t ssrc = pair.first;
+ info.add_ssrc(ssrc);
+ info.rid = parameters_.config.rtp.GetRidForSsrc(ssrc);
+ info.active = IsActiveFromEncodings(
+ !is_svc ? absl::optional<uint32_t>(ssrc) : absl::nullopt,
+ rtp_parameters_.encodings);
+ auto stream_stats = pair.second;
+ RTC_DCHECK_EQ(stream_stats.type,
+ webrtc::VideoSendStream::StreamStats::StreamType::kMedia);
+ info.payload_bytes_sent = stream_stats.rtp_stats.transmitted.payload_bytes;
+ info.header_and_padding_bytes_sent =
+ stream_stats.rtp_stats.transmitted.header_bytes +
+ stream_stats.rtp_stats.transmitted.padding_bytes;
+ info.packets_sent = stream_stats.rtp_stats.transmitted.packets;
+ info.total_packet_send_delay +=
+ stream_stats.rtp_stats.transmitted.total_packet_delay;
+ info.send_frame_width = stream_stats.width;
+ info.send_frame_height = stream_stats.height;
+ info.key_frames_encoded = stream_stats.frame_counts.key_frames;
+ info.framerate_sent = stream_stats.encode_frame_rate;
+ info.frames_encoded = stream_stats.frames_encoded;
+ info.frames_sent = stream_stats.frames_encoded;
+ info.retransmitted_bytes_sent =
+ stream_stats.rtp_stats.retransmitted.payload_bytes;
+ info.retransmitted_packets_sent =
+ stream_stats.rtp_stats.retransmitted.packets;
+ info.firs_received = stream_stats.rtcp_packet_type_counts.fir_packets;
+ info.nacks_received = stream_stats.rtcp_packet_type_counts.nack_packets;
+ info.plis_received = stream_stats.rtcp_packet_type_counts.pli_packets;
+ if (stream_stats.report_block_data.has_value()) {
+ info.packets_lost = stream_stats.report_block_data->cumulative_lost();
+ info.fraction_lost = stream_stats.report_block_data->fraction_lost();
+ info.report_block_datas.push_back(*stream_stats.report_block_data);
+ }
+ info.qp_sum = stream_stats.qp_sum;
+ info.total_encode_time_ms = stream_stats.total_encode_time_ms;
+ info.total_encoded_bytes_target = stream_stats.total_encoded_bytes_target;
+ info.huge_frames_sent = stream_stats.huge_frames_sent;
+ info.scalability_mode = stream_stats.scalability_mode;
+ infos.push_back(info);
+ }
+ return infos;
+}
+
+VideoSenderInfo
+WebRtcVideoSendChannel::WebRtcVideoSendStream::GetAggregatedVideoSenderInfo(
+ const std::vector<VideoSenderInfo>& infos) const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_CHECK(!infos.empty());
+ if (infos.size() == 1) {
+ return infos[0];
+ }
+ VideoSenderInfo info = infos[0];
+ info.local_stats.clear();
+ for (uint32_t ssrc : parameters_.config.rtp.ssrcs) {
+ info.add_ssrc(ssrc);
+ }
+ info.framerate_sent = info.aggregated_framerate_sent;
+ info.huge_frames_sent = info.aggregated_huge_frames_sent;
+
+ for (size_t i = 1; i < infos.size(); i++) {
+ info.key_frames_encoded += infos[i].key_frames_encoded;
+ info.payload_bytes_sent += infos[i].payload_bytes_sent;
+ info.header_and_padding_bytes_sent +=
+ infos[i].header_and_padding_bytes_sent;
+ info.packets_sent += infos[i].packets_sent;
+ info.total_packet_send_delay += infos[i].total_packet_send_delay;
+ info.retransmitted_bytes_sent += infos[i].retransmitted_bytes_sent;
+ info.retransmitted_packets_sent += infos[i].retransmitted_packets_sent;
+ info.packets_lost += infos[i].packets_lost;
+ if (infos[i].send_frame_width > info.send_frame_width)
+ info.send_frame_width = infos[i].send_frame_width;
+ if (infos[i].send_frame_height > info.send_frame_height)
+ info.send_frame_height = infos[i].send_frame_height;
+ info.firs_received += infos[i].firs_received;
+ info.nacks_received += infos[i].nacks_received;
+ info.plis_received += infos[i].plis_received;
+ if (infos[i].report_block_datas.size())
+ info.report_block_datas.push_back(infos[i].report_block_datas[0]);
+ if (infos[i].qp_sum) {
+ if (!info.qp_sum) {
+ info.qp_sum = 0;
+ }
+ info.qp_sum = *info.qp_sum + *infos[i].qp_sum;
+ }
+ info.frames_encoded += infos[i].frames_encoded;
+ info.frames_sent += infos[i].frames_sent;
+ info.total_encode_time_ms += infos[i].total_encode_time_ms;
+ info.total_encoded_bytes_target += infos[i].total_encoded_bytes_target;
+ }
+ return info;
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::FillBitrateInfo(
+ BandwidthEstimationInfo* bwe_info) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (stream_ == NULL) {
+ return;
+ }
+ webrtc::VideoSendStream::Stats stats = stream_->GetStats();
+ for (const auto& it : stats.substreams) {
+ bwe_info->transmit_bitrate += it.second.total_bitrate_bps;
+ bwe_info->retransmit_bitrate += it.second.retransmit_bitrate_bps;
+ }
+ bwe_info->target_enc_bitrate += stats.target_media_bitrate_bps;
+ bwe_info->actual_enc_bitrate += stats.media_bitrate_bps;
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::
+ SetEncoderToPacketizerFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ frame_transformer) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ parameters_.config.frame_transformer = std::move(frame_transformer);
+ if (stream_)
+ RecreateWebRtcStream();
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::RecreateWebRtcStream() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (stream_ != NULL) {
+ call_->DestroyVideoSendStream(stream_);
+ }
+
+ RTC_CHECK(parameters_.codec_settings);
+ RTC_DCHECK_EQ((parameters_.encoder_config.content_type ==
+ webrtc::VideoEncoderConfig::ContentType::kScreen),
+ parameters_.options.is_screencast.value_or(false))
+ << "encoder content type inconsistent with screencast option";
+ parameters_.encoder_config.encoder_specific_settings =
+ ConfigureVideoEncoderSettings(parameters_.codec_settings->codec);
+
+ webrtc::VideoSendStream::Config config = parameters_.config.Copy();
+ if (!config.rtp.rtx.ssrcs.empty() && config.rtp.rtx.payload_type == -1) {
+ RTC_LOG(LS_WARNING) << "RTX SSRCs configured but there's no configured RTX "
+ "payload type the set codec. Ignoring RTX.";
+ config.rtp.rtx.ssrcs.clear();
+ }
+ if (parameters_.encoder_config.number_of_streams == 1) {
+ // SVC is used instead of simulcast. Remove unnecessary SSRCs.
+ if (config.rtp.ssrcs.size() > 1) {
+ config.rtp.ssrcs.resize(1);
+ if (config.rtp.rtx.ssrcs.size() > 1) {
+ config.rtp.rtx.ssrcs.resize(1);
+ }
+ }
+ }
+ stream_ = call_->CreateVideoSendStream(std::move(config),
+ parameters_.encoder_config.Copy());
+
+ parameters_.encoder_config.encoder_specific_settings = NULL;
+
+ // Calls stream_->StartPerRtpStream() to start the VideoSendStream
+ // if necessary conditions are met.
+ UpdateSendState();
+
+ // Attach the source after starting the send stream to prevent frames from
+ // being injected into a not-yet initializated video stream encoder.
+ if (source_) {
+ stream_->SetSource(source_, GetDegradationPreference());
+ }
+}
+
+void WebRtcVideoSendChannel::WebRtcVideoSendStream::GenerateKeyFrame(
+ const std::vector<std::string>& rids) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (stream_ != NULL) {
+ stream_->GenerateKeyFrame(rids);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Absent send stream; ignoring request to generate keyframe.";
+ }
+}
+
+void WebRtcVideoSendChannel::GenerateSendKeyFrame(
+ uint32_t ssrc,
+ const std::vector<std::string>& rids) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ auto it = send_streams_.find(ssrc);
+ if (it != send_streams_.end()) {
+ it->second->GenerateKeyFrame(rids);
+ } else {
+ RTC_LOG(LS_ERROR)
+ << "Absent send stream; ignoring key frame generation for ssrc "
+ << ssrc;
+ }
+}
+
+void WebRtcVideoSendChannel::SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ auto matching_stream = send_streams_.find(ssrc);
+ if (matching_stream != send_streams_.end()) {
+ matching_stream->second->SetEncoderToPacketizerFrameTransformer(
+ std::move(frame_transformer));
+ }
+}
+
+// ------------------------ WebRtcVideoReceiveChannel ---------------------
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoDecoderFactory* decoder_factory)
+ : MediaChannelUtil(call->network_thread(), config.enable_dscp),
+ worker_thread_(call->worker_thread()),
+ receiving_(false),
+ call_(call),
+ default_sink_(nullptr),
+ video_config_(config.video),
+ decoder_factory_(decoder_factory),
+ default_send_options_(options),
+ last_receive_stats_log_ms_(-1),
+ discard_unknown_ssrc_packets_(
+ IsEnabled(call_->trials(),
+ "WebRTC-Video-DiscardPacketsWithUnknownSsrc")),
+ crypto_options_(crypto_options),
+ receive_buffer_size_(ParseReceiveBufferSize(call_->trials())) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ rtcp_receiver_report_ssrc_ = kDefaultRtcpReceiverReportSsrc;
+ recv_codecs_ = MapCodecs(GetPayloadTypesAndDefaultCodecs(
+ decoder_factory_, /*is_decoder_factory=*/true,
+ /*include_rtx=*/true, call_->trials()));
+ recv_flexfec_payload_type_ =
+ recv_codecs_.empty() ? 0 : recv_codecs_.front().flexfec_payload_type;
+}
+
+WebRtcVideoReceiveChannel::~WebRtcVideoReceiveChannel() {
+ for (auto& kv : receive_streams_)
+ delete kv.second;
+}
+
+void WebRtcVideoReceiveChannel::SetReceiverFeedbackParameters(
+ bool lntf_enabled,
+ bool nack_enabled,
+ webrtc::RtcpMode rtcp_mode,
+ absl::optional<int> rtx_time) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ // Update receive feedback parameters from new codec or RTCP mode.
+ for (auto& kv : receive_streams_) {
+ RTC_DCHECK(kv.second != nullptr);
+ kv.second->SetFeedbackParameters(lntf_enabled, nack_enabled, rtcp_mode,
+ rtx_time);
+ }
+ // Store for future creation of receive streams
+ rtp_config_.lntf.enabled = lntf_enabled;
+ if (nack_enabled) {
+ rtp_config_.nack.rtp_history_ms = kNackHistoryMs;
+ } else {
+ rtp_config_.nack.rtp_history_ms = 0;
+ }
+ rtp_config_.rtcp_mode = rtcp_mode;
+ // Note: There is no place in config to store rtx_time.
+}
+
+webrtc::RtpParameters WebRtcVideoReceiveChannel::GetRtpReceiverParameters(
+ uint32_t ssrc) const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ webrtc::RtpParameters rtp_params;
+ auto it = receive_streams_.find(ssrc);
+ if (it == receive_streams_.end()) {
+ RTC_LOG(LS_WARNING)
+ << "Attempting to get RTP receive parameters for stream "
+ "with SSRC "
+ << ssrc << " which doesn't exist.";
+ return webrtc::RtpParameters();
+ }
+ rtp_params = it->second->GetRtpParameters();
+ rtp_params.header_extensions = recv_rtp_extensions_;
+
+ // Add codecs, which any stream is prepared to receive.
+ for (const VideoCodec& codec : recv_params_.codecs) {
+ rtp_params.codecs.push_back(codec.ToCodecParameters());
+ }
+
+ return rtp_params;
+}
+
+webrtc::RtpParameters
+WebRtcVideoReceiveChannel::GetDefaultRtpReceiveParameters() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ webrtc::RtpParameters rtp_params;
+ if (!default_sink_) {
+ // Getting parameters on a default, unsignaled video receive stream but
+ // because we've not configured to receive such a stream, `encodings` is
+ // empty.
+ return rtp_params;
+ }
+ rtp_params.encodings.emplace_back();
+
+ // Add codecs, which any stream is prepared to receive.
+ for (const VideoCodec& codec : recv_params_.codecs) {
+ rtp_params.codecs.push_back(codec.ToCodecParameters());
+ }
+
+ return rtp_params;
+}
+
+bool WebRtcVideoReceiveChannel::GetChangedReceiverParameters(
+ const VideoReceiverParameters& params,
+ ChangedReceiverParameters* changed_params) const {
+ if (!ValidateCodecFormats(params.codecs) ||
+ !ValidateRtpExtensions(params.extensions, recv_rtp_extensions_)) {
+ return false;
+ }
+
+ // Handle receive codecs.
+ const std::vector<VideoCodecSettings> mapped_codecs =
+ MapCodecs(params.codecs);
+ if (mapped_codecs.empty()) {
+ RTC_LOG(LS_ERROR)
+ << "GetChangedReceiverParameters called without any video codecs.";
+ return false;
+ }
+
+ // Verify that every mapped codec is supported locally.
+ if (params.is_stream_active) {
+ const std::vector<VideoCodec> local_supported_codecs =
+ GetPayloadTypesAndDefaultCodecs(decoder_factory_,
+ /*is_decoder_factory=*/true,
+ /*include_rtx=*/true, call_->trials());
+ for (const VideoCodecSettings& mapped_codec : mapped_codecs) {
+ if (!FindMatchingVideoCodec(local_supported_codecs, mapped_codec.codec)) {
+ RTC_LOG(LS_ERROR) << "GetChangedReceiverParameters called with "
+ "unsupported video codec: "
+ << mapped_codec.codec.ToString();
+ return false;
+ }
+ }
+ }
+
+ if (NonFlexfecReceiveCodecsHaveChanged(recv_codecs_, mapped_codecs)) {
+ changed_params->codec_settings =
+ absl::optional<std::vector<VideoCodecSettings>>(mapped_codecs);
+ }
+
+ // Handle RTP header extensions.
+ std::vector<webrtc::RtpExtension> filtered_extensions = FilterRtpExtensions(
+ params.extensions, webrtc::RtpExtension::IsSupportedForVideo, false,
+ call_->trials());
+ if (filtered_extensions != recv_rtp_extensions_) {
+ changed_params->rtp_header_extensions =
+ absl::optional<std::vector<webrtc::RtpExtension>>(filtered_extensions);
+ }
+
+ int flexfec_payload_type = mapped_codecs.front().flexfec_payload_type;
+ if (flexfec_payload_type != recv_flexfec_payload_type_) {
+ changed_params->flexfec_payload_type = flexfec_payload_type;
+ }
+
+ return true;
+}
+
+bool WebRtcVideoReceiveChannel::SetReceiverParameters(
+ const VideoReceiverParameters& params) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "WebRtcVideoReceiveChannel::SetReceiverParameters");
+ RTC_LOG(LS_INFO) << "SetReceiverParameters: " << params.ToString();
+ ChangedReceiverParameters changed_params;
+ if (!GetChangedReceiverParameters(params, &changed_params)) {
+ return false;
+ }
+ if (changed_params.flexfec_payload_type) {
+ RTC_DLOG(LS_INFO) << "Changing FlexFEC payload type (recv) from "
+ << recv_flexfec_payload_type_ << " to "
+ << *changed_params.flexfec_payload_type;
+ recv_flexfec_payload_type_ = *changed_params.flexfec_payload_type;
+ }
+ if (changed_params.rtp_header_extensions) {
+ recv_rtp_extensions_ = *changed_params.rtp_header_extensions;
+ recv_rtp_extension_map_ =
+ webrtc::RtpHeaderExtensionMap(recv_rtp_extensions_);
+ }
+ if (changed_params.codec_settings) {
+ RTC_DLOG(LS_INFO) << "Changing recv codecs from "
+ << CodecSettingsVectorToString(recv_codecs_) << " to "
+ << CodecSettingsVectorToString(
+ *changed_params.codec_settings);
+ recv_codecs_ = *changed_params.codec_settings;
+ }
+
+ for (auto& kv : receive_streams_) {
+ kv.second->SetReceiverParameters(changed_params);
+ }
+ recv_params_ = params;
+ return true;
+}
+
+void WebRtcVideoReceiveChannel::SetReceiverReportSsrc(uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (ssrc == rtcp_receiver_report_ssrc_)
+ return;
+
+ rtcp_receiver_report_ssrc_ = ssrc;
+ for (auto& [unused, receive_stream] : receive_streams_)
+ receive_stream->SetLocalSsrc(ssrc);
+}
+
+void WebRtcVideoReceiveChannel::ChooseReceiverReportSsrc(
+ const std::set<uint32_t>& choices) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ // If we can continue using the current receiver report, do so.
+ if (choices.find(rtcp_receiver_report_ssrc_) != choices.end()) {
+ return;
+ }
+ // Go back to the default if list has been emptied.
+ if (choices.empty()) {
+ SetReceiverReportSsrc(kDefaultRtcpReceiverReportSsrc);
+ return;
+ }
+ // Any number is as good as any other.
+ SetReceiverReportSsrc(*choices.begin());
+}
+
+void WebRtcVideoReceiveChannel::SetReceive(bool receive) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "WebRtcVideoReceiveChannel::SetReceive");
+ RTC_LOG(LS_VERBOSE) << "SetReceive: " << (receive ? "true" : "false");
+ for (const auto& kv : receive_streams_) {
+ if (receive) {
+ kv.second->StartReceiveStream();
+ } else {
+ kv.second->StopReceiveStream();
+ }
+ }
+ receiving_ = receive;
+}
+
+bool WebRtcVideoReceiveChannel::ValidateReceiveSsrcAvailability(
+ const StreamParams& sp) const {
+ for (uint32_t ssrc : sp.ssrcs) {
+ if (receive_ssrcs_.find(ssrc) != receive_ssrcs_.end()) {
+ RTC_LOG(LS_ERROR) << "Receive stream with SSRC '" << ssrc
+ << "' already exists.";
+ return false;
+ }
+ }
+ return true;
+}
+
+void WebRtcVideoReceiveChannel::DeleteReceiveStream(
+ WebRtcVideoReceiveStream* stream) {
+ for (uint32_t old_ssrc : stream->GetSsrcs())
+ receive_ssrcs_.erase(old_ssrc);
+ delete stream;
+}
+
+bool WebRtcVideoReceiveChannel::AddRecvStream(const StreamParams& sp) {
+ return AddRecvStream(sp, false);
+}
+
+bool WebRtcVideoReceiveChannel::AddRecvStream(const StreamParams& sp,
+ bool default_stream) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ RTC_LOG(LS_INFO) << "AddRecvStream"
+ << (default_stream ? " (default stream)" : "") << ": "
+ << sp.ToString();
+ if (!sp.has_ssrcs()) {
+ // This is a StreamParam with unsignaled SSRCs. Store it, so it can be used
+ // later when we know the SSRC on the first packet arrival.
+ unsignaled_stream_params_ = sp;
+ return true;
+ }
+
+ if (!ValidateStreamParams(sp))
+ return false;
+
+ for (uint32_t ssrc : sp.ssrcs) {
+ // Remove running stream if this was a default stream.
+ const auto& prev_stream = receive_streams_.find(ssrc);
+ if (prev_stream != receive_streams_.end()) {
+ if (default_stream || !prev_stream->second->IsDefaultStream()) {
+ RTC_LOG(LS_ERROR) << "Receive stream for SSRC '" << ssrc
+ << "' already exists.";
+ return false;
+ }
+ DeleteReceiveStream(prev_stream->second);
+ receive_streams_.erase(prev_stream);
+ }
+ }
+
+ if (!ValidateReceiveSsrcAvailability(sp))
+ return false;
+
+ for (uint32_t used_ssrc : sp.ssrcs)
+ receive_ssrcs_.insert(used_ssrc);
+
+ webrtc::VideoReceiveStreamInterface::Config config(transport(),
+ decoder_factory_);
+ webrtc::FlexfecReceiveStream::Config flexfec_config(transport());
+ ConfigureReceiverRtp(&config, &flexfec_config, sp);
+
+ config.crypto_options = crypto_options_;
+ config.enable_prerenderer_smoothing =
+ video_config_.enable_prerenderer_smoothing;
+ if (!sp.stream_ids().empty()) {
+ config.sync_group = sp.stream_ids()[0];
+ }
+
+ if (unsignaled_frame_transformer_ && !config.frame_transformer)
+ config.frame_transformer = unsignaled_frame_transformer_;
+
+ auto receive_stream =
+ new WebRtcVideoReceiveStream(call_, sp, std::move(config), default_stream,
+ recv_codecs_, flexfec_config);
+ if (receiving_) {
+ receive_stream->StartReceiveStream();
+ }
+ receive_streams_[sp.first_ssrc()] = receive_stream;
+ return true;
+}
+
+void WebRtcVideoReceiveChannel::ConfigureReceiverRtp(
+ webrtc::VideoReceiveStreamInterface::Config* config,
+ webrtc::FlexfecReceiveStream::Config* flexfec_config,
+ const StreamParams& sp) const {
+ uint32_t ssrc = sp.first_ssrc();
+
+ config->rtp.remote_ssrc = ssrc;
+ config->rtp.local_ssrc = rtcp_receiver_report_ssrc_;
+
+ // TODO(pbos): This protection is against setting the same local ssrc as
+ // remote which is not permitted by the lower-level API. RTCP requires a
+ // corresponding sender SSRC. Figure out what to do when we don't have
+ // (receive-only) or know a good local SSRC.
+ if (config->rtp.remote_ssrc == config->rtp.local_ssrc) {
+ if (config->rtp.local_ssrc != kDefaultRtcpReceiverReportSsrc) {
+ config->rtp.local_ssrc = kDefaultRtcpReceiverReportSsrc;
+ } else {
+ config->rtp.local_ssrc = kDefaultRtcpReceiverReportSsrc + 1;
+ }
+ }
+
+ // The mode and rtx time is determined by a call to the configuration
+ // function.
+ config->rtp.rtcp_mode = rtp_config_.rtcp_mode;
+
+ sp.GetFidSsrc(ssrc, &config->rtp.rtx_ssrc);
+
+ // TODO(brandtr): Generalize when we add support for multistream protection.
+ flexfec_config->payload_type = recv_flexfec_payload_type_;
+ if (!IsDisabled(call_->trials(), "WebRTC-FlexFEC-03-Advertised") &&
+ sp.GetFecFrSsrc(ssrc, &flexfec_config->rtp.remote_ssrc)) {
+ flexfec_config->protected_media_ssrcs = {ssrc};
+ flexfec_config->rtp.local_ssrc = config->rtp.local_ssrc;
+ flexfec_config->rtcp_mode = config->rtp.rtcp_mode;
+ }
+}
+
+bool WebRtcVideoReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "RemoveRecvStream: " << ssrc;
+
+ auto stream = receive_streams_.find(ssrc);
+ if (stream == receive_streams_.end()) {
+ RTC_LOG(LS_ERROR) << "Stream not found for ssrc: " << ssrc;
+ return false;
+ }
+ DeleteReceiveStream(stream->second);
+ receive_streams_.erase(stream);
+
+ return true;
+}
+
+void WebRtcVideoReceiveChannel::ResetUnsignaledRecvStream() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "ResetUnsignaledRecvStream.";
+ unsignaled_stream_params_ = StreamParams();
+ last_unsignalled_ssrc_creation_time_ms_ = absl::nullopt;
+
+ // Delete any created default streams. This is needed to avoid SSRC collisions
+ // in Call's RtpDemuxer, in the case that `this` has created a default video
+ // receiver, and then some other WebRtcVideoReceiveChannel gets the SSRC
+ // signaled in the corresponding Unified Plan "m=" section.
+ auto it = receive_streams_.begin();
+ while (it != receive_streams_.end()) {
+ if (it->second->IsDefaultStream()) {
+ DeleteReceiveStream(it->second);
+ receive_streams_.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+}
+
+absl::optional<uint32_t> WebRtcVideoReceiveChannel::GetUnsignaledSsrc() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ absl::optional<uint32_t> ssrc;
+ for (auto it = receive_streams_.begin(); it != receive_streams_.end(); ++it) {
+ if (it->second->IsDefaultStream()) {
+ ssrc.emplace(it->first);
+ break;
+ }
+ }
+ return ssrc;
+}
+
+void WebRtcVideoReceiveChannel::OnDemuxerCriteriaUpdatePending() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ ++demuxer_criteria_id_;
+}
+
+void WebRtcVideoReceiveChannel::OnDemuxerCriteriaUpdateComplete() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ ++demuxer_criteria_completed_id_;
+}
+
+bool WebRtcVideoReceiveChannel::SetSink(
+ uint32_t ssrc,
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "SetSink: ssrc:" << ssrc << " "
+ << (sink ? "(ptr)" : "nullptr");
+
+ auto it = receive_streams_.find(ssrc);
+ if (it == receive_streams_.end()) {
+ return false;
+ }
+
+ it->second->SetSink(sink);
+ return true;
+}
+
+void WebRtcVideoReceiveChannel::SetDefaultSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "SetDefaultSink: " << (sink ? "(ptr)" : "nullptr");
+ default_sink_ = sink;
+}
+
+bool WebRtcVideoReceiveChannel::GetStats(VideoMediaReceiveInfo* info) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ TRACE_EVENT0("webrtc", "WebRtcVideoReceiveChannel::GetStats");
+
+ info->Clear();
+ if (receive_streams_.empty()) {
+ return true;
+ }
+
+ // Log stats periodically.
+ bool log_stats = false;
+ int64_t now_ms = rtc::TimeMillis();
+ if (last_receive_stats_log_ms_ == -1 ||
+ now_ms - last_receive_stats_log_ms_ > kStatsLogIntervalMs) {
+ last_receive_stats_log_ms_ = now_ms;
+ log_stats = true;
+ }
+
+ FillReceiverStats(info, log_stats);
+ FillReceiveCodecStats(info);
+
+ return true;
+}
+
+void WebRtcVideoReceiveChannel::FillReceiverStats(
+ VideoMediaReceiveInfo* video_media_info,
+ bool log_stats) {
+ for (const auto& it : receive_streams_) {
+ video_media_info->receivers.push_back(
+ it.second->GetVideoReceiverInfo(log_stats));
+ }
+}
+
+void WebRtcVideoReceiveChannel::FillReceiveCodecStats(
+ VideoMediaReceiveInfo* video_media_info) {
+ for (const auto& receiver : video_media_info->receivers) {
+ auto codec =
+ absl::c_find_if(recv_params_.codecs, [&receiver](const VideoCodec& c) {
+ return receiver.codec_payload_type &&
+ *receiver.codec_payload_type == c.id;
+ });
+ if (codec != recv_params_.codecs.end()) {
+ video_media_info->receive_codecs.insert(
+ std::make_pair(codec->id, codec->ToCodecParameters()));
+ }
+ }
+}
+
+void WebRtcVideoReceiveChannel::OnPacketReceived(
+ const webrtc::RtpPacketReceived& packet) {
+ // Note: the network_thread_checker may refer to the worker thread if the two
+ // threads are combined, but this is either always true or always false
+ // depending on configuration set at object initialization.
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+
+ // TODO(crbug.com/1373439): Stop posting to the worker thread when the
+ // combined network/worker project launches.
+ if (webrtc::TaskQueueBase::Current() != worker_thread_) {
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [this, packet = packet]() mutable {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ ProcessReceivedPacket(std::move(packet));
+ }));
+ } else {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ ProcessReceivedPacket(packet);
+ }
+}
+
+bool WebRtcVideoReceiveChannel::MaybeCreateDefaultReceiveStream(
+ const webrtc::RtpPacketReceived& packet) {
+ if (discard_unknown_ssrc_packets_) {
+ return false;
+ }
+
+ if (packet.PayloadType() == recv_flexfec_payload_type_) {
+ return false;
+ }
+
+ // Ignore unknown ssrcs if there is a demuxer criteria update pending.
+ // During a demuxer update we may receive ssrcs that were recently
+ // removed or we may receve ssrcs that were recently configured for a
+ // different video channel.
+ if (demuxer_criteria_id_ != demuxer_criteria_completed_id_) {
+ return false;
+ }
+
+ // See if this payload_type is registered as one that usually gets its
+ // own SSRC (RTX) or at least is safe to drop either way (FEC). If it
+ // is, and it wasn't handled above by DeliverPacket, that means we don't
+ // know what stream it associates with, and we shouldn't ever create an
+ // implicit channel for these.
+ bool is_rtx_payload = false;
+ for (auto& codec : recv_codecs_) {
+ if (packet.PayloadType() == codec.ulpfec.red_rtx_payload_type ||
+ packet.PayloadType() == codec.ulpfec.ulpfec_payload_type) {
+ return false;
+ }
+
+ if (packet.PayloadType() == codec.rtx_payload_type) {
+ is_rtx_payload = true;
+ break;
+ }
+ }
+
+ if (is_rtx_payload) {
+ // As we don't support receiving simulcast there can only be one RTX
+ // stream, which will be associated with unsignaled media stream.
+ absl::optional<uint32_t> current_default_ssrc = GetUnsignaledSsrc();
+ if (current_default_ssrc) {
+ FindReceiveStream(*current_default_ssrc)->UpdateRtxSsrc(packet.Ssrc());
+ } else {
+ // Received unsignaled RTX packet before a media packet. Create a default
+ // stream with a "random" SSRC and the RTX SSRC from the packet. The
+ // stream will be recreated on the first media packet, unless we are
+ // extremely lucky and used the right media SSRC.
+ ReCreateDefaultReceiveStream(/*ssrc =*/14795, /*rtx_ssrc=*/packet.Ssrc());
+ }
+ return true;
+ } else {
+ // Ignore unknown ssrcs if we recently created an unsignalled receive
+ // stream since this shouldn't happen frequently. Getting into a state
+ // of creating decoders on every packet eats up processing time (e.g.
+ // https://crbug.com/1069603) and this cooldown prevents that.
+ if (last_unsignalled_ssrc_creation_time_ms_.has_value()) {
+ int64_t now_ms = rtc::TimeMillis();
+ if (now_ms - last_unsignalled_ssrc_creation_time_ms_.value() <
+ kUnsignaledSsrcCooldownMs) {
+ // We've already created an unsignalled ssrc stream within the last
+ // 0.5 s, ignore with a warning.
+ RTC_LOG(LS_WARNING)
+ << "Another unsignalled ssrc packet arrived shortly after the "
+ << "creation of an unsignalled ssrc stream. Dropping packet.";
+ return false;
+ }
+ }
+ }
+ // RTX SSRC not yet known.
+ ReCreateDefaultReceiveStream(packet.Ssrc(), absl::nullopt);
+ last_unsignalled_ssrc_creation_time_ms_ = rtc::TimeMillis();
+ return true;
+}
+
+void WebRtcVideoReceiveChannel::ReCreateDefaultReceiveStream(
+ uint32_t ssrc,
+ absl::optional<uint32_t> rtx_ssrc) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ absl::optional<uint32_t> default_recv_ssrc = GetUnsignaledSsrc();
+ if (default_recv_ssrc) {
+ RTC_LOG(LS_INFO) << "Destroying old default receive stream for SSRC="
+ << ssrc << ".";
+ RemoveRecvStream(*default_recv_ssrc);
+ }
+
+ StreamParams sp = unsignaled_stream_params();
+ sp.ssrcs.push_back(ssrc);
+ if (rtx_ssrc) {
+ sp.AddFidSsrc(ssrc, *rtx_ssrc);
+ }
+ RTC_LOG(LS_INFO) << "Creating default receive stream for SSRC=" << ssrc
+ << ".";
+ if (!AddRecvStream(sp, /*default_stream=*/true)) {
+ RTC_LOG(LS_WARNING) << "Could not create default receive stream.";
+ }
+
+ // SSRC 0 returns default_recv_base_minimum_delay_ms.
+ const int unsignaled_ssrc = 0;
+ int default_recv_base_minimum_delay_ms =
+ GetBaseMinimumPlayoutDelayMs(unsignaled_ssrc).value_or(0);
+ // Set base minimum delay if it was set before for the default receive
+ // stream.
+ SetBaseMinimumPlayoutDelayMs(ssrc, default_recv_base_minimum_delay_ms);
+ SetSink(ssrc, default_sink_);
+}
+
+void WebRtcVideoReceiveChannel::SetInterface(
+ MediaChannelNetworkInterface* iface) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+ MediaChannelUtil::SetInterface(iface);
+ // Set the RTP recv/send buffer to a bigger size.
+ MediaChannelUtil::SetOption(MediaChannelNetworkInterface::ST_RTP,
+ rtc::Socket::OPT_RCVBUF, receive_buffer_size_);
+}
+
+void WebRtcVideoReceiveChannel::SetFrameDecryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ auto matching_stream = receive_streams_.find(ssrc);
+ if (matching_stream != receive_streams_.end()) {
+ matching_stream->second->SetFrameDecryptor(frame_decryptor);
+ }
+}
+
+bool WebRtcVideoReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
+ int delay_ms) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ absl::optional<uint32_t> default_ssrc = GetUnsignaledSsrc();
+
+ // SSRC of 0 represents the default receive stream.
+ if (ssrc == 0) {
+ default_recv_base_minimum_delay_ms_ = delay_ms;
+ }
+
+ if (ssrc == 0 && !default_ssrc) {
+ return true;
+ }
+
+ if (ssrc == 0 && default_ssrc) {
+ ssrc = default_ssrc.value();
+ }
+
+ auto stream = receive_streams_.find(ssrc);
+ if (stream != receive_streams_.end()) {
+ stream->second->SetBaseMinimumPlayoutDelayMs(delay_ms);
+ return true;
+ } else {
+ RTC_LOG(LS_ERROR) << "No stream found to set base minimum playout delay";
+ return false;
+ }
+}
+
+absl::optional<int> WebRtcVideoReceiveChannel::GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ // SSRC of 0 represents the default receive stream.
+ if (ssrc == 0) {
+ return default_recv_base_minimum_delay_ms_;
+ }
+
+ auto stream = receive_streams_.find(ssrc);
+ if (stream != receive_streams_.end()) {
+ return stream->second->GetBaseMinimumPlayoutDelayMs();
+ } else {
+ RTC_LOG(LS_ERROR) << "No stream found to get base minimum playout delay";
+ return absl::nullopt;
+ }
+}
+
+std::vector<webrtc::RtpSource> WebRtcVideoReceiveChannel::GetSources(
+ uint32_t ssrc) const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ auto it = receive_streams_.find(ssrc);
+ if (it == receive_streams_.end()) {
+ // TODO(bugs.webrtc.org/9781): Investigate standard compliance
+ // with sources for streams that has been removed.
+ RTC_LOG(LS_ERROR) << "Attempting to get contributing sources for SSRC:"
+ << ssrc << " which doesn't exist.";
+ return {};
+ }
+ return it->second->GetSources();
+}
+
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream(
+ webrtc::Call* call,
+ const StreamParams& sp,
+ webrtc::VideoReceiveStreamInterface::Config config,
+ bool default_stream,
+ const std::vector<VideoCodecSettings>& recv_codecs,
+ const webrtc::FlexfecReceiveStream::Config& flexfec_config)
+ : call_(call),
+ stream_params_(sp),
+ stream_(NULL),
+ default_stream_(default_stream),
+ config_(std::move(config)),
+ flexfec_config_(flexfec_config),
+ flexfec_stream_(nullptr),
+ sink_(NULL),
+ first_frame_timestamp_(-1),
+ estimated_remote_start_ntp_time_ms_(0),
+ receiving_(false) {
+ RTC_DCHECK(config_.decoder_factory);
+ RTC_DCHECK(config_.decoders.empty())
+ << "Decoder info is supplied via `recv_codecs`";
+
+ ExtractCodecInformation(recv_codecs, config_.rtp.rtx_associated_payload_types,
+ config_.rtp.raw_payload_types, config_.decoders);
+ const VideoCodecSettings& codec = recv_codecs.front();
+ config_.rtp.ulpfec_payload_type = codec.ulpfec.ulpfec_payload_type;
+ config_.rtp.red_payload_type = codec.ulpfec.red_payload_type;
+ config_.rtp.lntf.enabled = HasLntf(codec.codec);
+ config_.rtp.nack.rtp_history_ms = HasNack(codec.codec) ? kNackHistoryMs : 0;
+ if (codec.rtx_time && config_.rtp.nack.rtp_history_ms != 0) {
+ config_.rtp.nack.rtp_history_ms = *codec.rtx_time;
+ }
+
+ config_.rtp.rtcp_xr.receiver_reference_time_report = HasRrtr(codec.codec);
+
+ if (codec.ulpfec.red_rtx_payload_type != -1) {
+ config_.rtp
+ .rtx_associated_payload_types[codec.ulpfec.red_rtx_payload_type] =
+ codec.ulpfec.red_payload_type;
+ }
+
+ config_.renderer = this;
+ flexfec_config_.payload_type = flexfec_config.payload_type;
+
+ CreateReceiveStream();
+}
+
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ ~WebRtcVideoReceiveStream() {
+ call_->DestroyVideoReceiveStream(stream_);
+ if (flexfec_stream_)
+ call_->DestroyFlexfecReceiveStream(flexfec_stream_);
+}
+
+webrtc::VideoReceiveStreamInterface&
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::stream() {
+ RTC_DCHECK(stream_);
+ return *stream_;
+}
+
+webrtc::FlexfecReceiveStream*
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::flexfec_stream() {
+ return flexfec_stream_;
+}
+
+const std::vector<uint32_t>&
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::GetSsrcs() const {
+ return stream_params_.ssrcs;
+}
+
+std::vector<webrtc::RtpSource>
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::GetSources() {
+ RTC_DCHECK(stream_);
+ return stream_->GetSources();
+}
+
+webrtc::RtpParameters
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::GetRtpParameters() const {
+ webrtc::RtpParameters rtp_parameters;
+
+ std::vector<uint32_t> primary_ssrcs;
+ stream_params_.GetPrimarySsrcs(&primary_ssrcs);
+ for (uint32_t ssrc : primary_ssrcs) {
+ rtp_parameters.encodings.emplace_back();
+ rtp_parameters.encodings.back().ssrc = ssrc;
+ }
+
+ rtp_parameters.rtcp.reduced_size =
+ config_.rtp.rtcp_mode == webrtc::RtcpMode::kReducedSize;
+
+ return rtp_parameters;
+}
+
+bool WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::ReconfigureCodecs(
+ const std::vector<VideoCodecSettings>& recv_codecs) {
+ RTC_DCHECK(stream_);
+ RTC_DCHECK(!recv_codecs.empty());
+
+ std::map<int, int> rtx_associated_payload_types;
+ std::set<int> raw_payload_types;
+ std::vector<webrtc::VideoReceiveStreamInterface::Decoder> decoders;
+ ExtractCodecInformation(recv_codecs, rtx_associated_payload_types,
+ raw_payload_types, decoders);
+
+ const auto& codec = recv_codecs.front();
+
+ if (config_.rtp.red_payload_type != codec.ulpfec.red_payload_type ||
+ config_.rtp.ulpfec_payload_type != codec.ulpfec.ulpfec_payload_type) {
+ config_.rtp.ulpfec_payload_type = codec.ulpfec.ulpfec_payload_type;
+ config_.rtp.red_payload_type = codec.ulpfec.red_payload_type;
+ stream_->SetProtectionPayloadTypes(config_.rtp.red_payload_type,
+ config_.rtp.ulpfec_payload_type);
+ }
+
+ const bool has_lntf = HasLntf(codec.codec);
+ if (config_.rtp.lntf.enabled != has_lntf) {
+ config_.rtp.lntf.enabled = has_lntf;
+ stream_->SetLossNotificationEnabled(has_lntf);
+ }
+
+ int new_history_ms = config_.rtp.nack.rtp_history_ms;
+ const int rtp_history_ms = HasNack(codec.codec) ? kNackHistoryMs : 0;
+ if (rtp_history_ms != config_.rtp.nack.rtp_history_ms) {
+ new_history_ms = rtp_history_ms;
+ }
+
+ // The rtx-time parameter can be used to override the hardcoded default for
+ // the NACK buffer length.
+ if (codec.rtx_time && new_history_ms != 0) {
+ new_history_ms = *codec.rtx_time;
+ }
+
+ if (config_.rtp.nack.rtp_history_ms != new_history_ms) {
+ config_.rtp.nack.rtp_history_ms = new_history_ms;
+ stream_->SetNackHistory(webrtc::TimeDelta::Millis(new_history_ms));
+ }
+
+ const bool has_rtr = HasRrtr(codec.codec);
+ if (has_rtr != config_.rtp.rtcp_xr.receiver_reference_time_report) {
+ config_.rtp.rtcp_xr.receiver_reference_time_report = has_rtr;
+ stream_->SetRtcpXr(config_.rtp.rtcp_xr);
+ }
+
+ if (codec.ulpfec.red_rtx_payload_type != -1) {
+ rtx_associated_payload_types[codec.ulpfec.red_rtx_payload_type] =
+ codec.ulpfec.red_payload_type;
+ }
+
+ if (config_.rtp.rtx_associated_payload_types !=
+ rtx_associated_payload_types) {
+ stream_->SetAssociatedPayloadTypes(rtx_associated_payload_types);
+ rtx_associated_payload_types.swap(config_.rtp.rtx_associated_payload_types);
+ }
+
+ bool recreate_needed = false;
+
+ if (raw_payload_types != config_.rtp.raw_payload_types) {
+ raw_payload_types.swap(config_.rtp.raw_payload_types);
+ recreate_needed = true;
+ }
+
+ if (decoders != config_.decoders) {
+ decoders.swap(config_.decoders);
+ recreate_needed = true;
+ }
+
+ return recreate_needed;
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::SetFeedbackParameters(
+ bool lntf_enabled,
+ bool nack_enabled,
+ webrtc::RtcpMode rtcp_mode,
+ absl::optional<int> rtx_time) {
+ RTC_DCHECK(stream_);
+
+ if (config_.rtp.rtcp_mode != rtcp_mode) {
+ config_.rtp.rtcp_mode = rtcp_mode;
+ stream_->SetRtcpMode(rtcp_mode);
+
+ flexfec_config_.rtcp_mode = rtcp_mode;
+ if (flexfec_stream_) {
+ flexfec_stream_->SetRtcpMode(rtcp_mode);
+ }
+ }
+
+ config_.rtp.lntf.enabled = lntf_enabled;
+ stream_->SetLossNotificationEnabled(lntf_enabled);
+
+ int nack_history_ms = nack_enabled ? rtx_time.value_or(kNackHistoryMs) : 0;
+ config_.rtp.nack.rtp_history_ms = nack_history_ms;
+ stream_->SetNackHistory(webrtc::TimeDelta::Millis(nack_history_ms));
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::SetFlexFecPayload(
+ int payload_type) {
+ // TODO(bugs.webrtc.org/11993, tommi): See if it is better to always have a
+ // flexfec stream object around and instead of recreating the video stream,
+ // reconfigure the flexfec object from within the rtp callback (soon to be on
+ // the network thread).
+ if (flexfec_stream_) {
+ if (flexfec_stream_->payload_type() == payload_type) {
+ RTC_DCHECK_EQ(flexfec_config_.payload_type, payload_type);
+ return;
+ }
+
+ flexfec_config_.payload_type = payload_type;
+ flexfec_stream_->SetPayloadType(payload_type);
+
+ if (payload_type == -1) {
+ stream_->SetFlexFecProtection(nullptr);
+ call_->DestroyFlexfecReceiveStream(flexfec_stream_);
+ flexfec_stream_ = nullptr;
+ }
+ } else if (payload_type != -1) {
+ flexfec_config_.payload_type = payload_type;
+ if (flexfec_config_.IsCompleteAndEnabled()) {
+ flexfec_stream_ = call_->CreateFlexfecReceiveStream(flexfec_config_);
+ stream_->SetFlexFecProtection(flexfec_stream_);
+ }
+ } else {
+ // Noop. No flexfec stream exists and "new" payload_type == -1.
+ RTC_DCHECK(!flexfec_config_.IsCompleteAndEnabled());
+ flexfec_config_.payload_type = payload_type;
+ }
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::SetReceiverParameters(
+ const ChangedReceiverParameters& params) {
+ RTC_DCHECK(stream_);
+ bool video_needs_recreation = false;
+ if (params.codec_settings) {
+ video_needs_recreation = ReconfigureCodecs(*params.codec_settings);
+ }
+
+ if (params.flexfec_payload_type)
+ SetFlexFecPayload(*params.flexfec_payload_type);
+
+ if (video_needs_recreation) {
+ RecreateReceiveStream();
+ } else {
+ RTC_DLOG_F(LS_INFO) << "No receive stream recreate needed.";
+ }
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ RecreateReceiveStream() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(stream_);
+ absl::optional<int> base_minimum_playout_delay_ms;
+ absl::optional<webrtc::VideoReceiveStreamInterface::RecordingState>
+ recording_state;
+ if (stream_) {
+ base_minimum_playout_delay_ms = stream_->GetBaseMinimumPlayoutDelayMs();
+ recording_state = stream_->SetAndGetRecordingState(
+ webrtc::VideoReceiveStreamInterface::RecordingState(),
+ /*generate_key_frame=*/false);
+ call_->DestroyVideoReceiveStream(stream_);
+ stream_ = nullptr;
+ }
+
+ if (flexfec_stream_) {
+ call_->DestroyFlexfecReceiveStream(flexfec_stream_);
+ flexfec_stream_ = nullptr;
+ }
+
+ CreateReceiveStream();
+
+ if (base_minimum_playout_delay_ms) {
+ stream_->SetBaseMinimumPlayoutDelayMs(
+ base_minimum_playout_delay_ms.value());
+ }
+ if (recording_state) {
+ stream_->SetAndGetRecordingState(std::move(*recording_state),
+ /*generate_key_frame=*/false);
+ }
+ if (receiving_) {
+ StartReceiveStream();
+ }
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ CreateReceiveStream() {
+ RTC_DCHECK(!stream_);
+ RTC_DCHECK(!flexfec_stream_);
+ if (flexfec_config_.IsCompleteAndEnabled()) {
+ flexfec_stream_ = call_->CreateFlexfecReceiveStream(flexfec_config_);
+ }
+
+ webrtc::VideoReceiveStreamInterface::Config config = config_.Copy();
+ config.rtp.protected_by_flexfec = (flexfec_stream_ != nullptr);
+ config.rtp.packet_sink_ = flexfec_stream_;
+ stream_ = call_->CreateVideoReceiveStream(std::move(config));
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::StartReceiveStream() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ receiving_ = true;
+ stream_->Start();
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::StopReceiveStream() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ receiving_ = false;
+ stream_->Stop();
+ RecreateReceiveStream();
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::OnFrame(
+ const webrtc::VideoFrame& frame) {
+ webrtc::MutexLock lock(&sink_lock_);
+
+ int64_t time_now_ms = rtc::TimeMillis();
+ if (first_frame_timestamp_ < 0)
+ first_frame_timestamp_ = time_now_ms;
+ int64_t elapsed_time_ms = time_now_ms - first_frame_timestamp_;
+ if (frame.ntp_time_ms() > 0)
+ estimated_remote_start_ntp_time_ms_ = frame.ntp_time_ms() - elapsed_time_ms;
+
+ if (sink_ == NULL) {
+ RTC_LOG(LS_WARNING)
+ << "VideoReceiveStreamInterface not connected to a VideoSink.";
+ return;
+ }
+
+ sink_->OnFrame(frame);
+}
+
+bool WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::IsDefaultStream()
+ const {
+ return default_stream_;
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::SetFrameDecryptor(
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
+ config_.frame_decryptor = frame_decryptor;
+ if (stream_) {
+ RTC_LOG(LS_INFO)
+ << "Setting FrameDecryptor (recv) because of SetFrameDecryptor, "
+ "remote_ssrc="
+ << config_.rtp.remote_ssrc;
+ stream_->SetFrameDecryptor(frame_decryptor);
+ }
+}
+
+bool WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ SetBaseMinimumPlayoutDelayMs(int delay_ms) {
+ return stream_ ? stream_->SetBaseMinimumPlayoutDelayMs(delay_ms) : false;
+}
+
+int WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ GetBaseMinimumPlayoutDelayMs() const {
+ return stream_ ? stream_->GetBaseMinimumPlayoutDelayMs() : 0;
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::SetSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
+ webrtc::MutexLock lock(&sink_lock_);
+ sink_ = sink;
+}
+
+VideoReceiverInfo
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::GetVideoReceiverInfo(
+ bool log_stats) {
+ VideoReceiverInfo info;
+ info.ssrc_groups = stream_params_.ssrc_groups;
+ info.add_ssrc(config_.rtp.remote_ssrc);
+ webrtc::VideoReceiveStreamInterface::Stats stats = stream_->GetStats();
+ info.decoder_implementation_name = stats.decoder_implementation_name;
+ info.power_efficient_decoder = stats.power_efficient_decoder;
+ if (stats.current_payload_type != -1) {
+ info.codec_payload_type = stats.current_payload_type;
+ auto decoder_it = absl::c_find_if(config_.decoders, [&](const auto& d) {
+ return d.payload_type == stats.current_payload_type;
+ });
+ if (decoder_it != config_.decoders.end())
+ info.codec_name = decoder_it->video_format.name;
+ }
+ info.payload_bytes_received = stats.rtp_stats.packet_counter.payload_bytes;
+ info.header_and_padding_bytes_received =
+ stats.rtp_stats.packet_counter.header_bytes +
+ stats.rtp_stats.packet_counter.padding_bytes;
+ info.packets_received = stats.rtp_stats.packet_counter.packets;
+ info.packets_lost = stats.rtp_stats.packets_lost;
+ info.jitter_ms = stats.rtp_stats.jitter / (kVideoCodecClockrate / 1000);
+
+ info.framerate_received = stats.network_frame_rate;
+ info.framerate_decoded = stats.decode_frame_rate;
+ info.framerate_output = stats.render_frame_rate;
+ info.frame_width = stats.width;
+ info.frame_height = stats.height;
+
+ {
+ webrtc::MutexLock frame_cs(&sink_lock_);
+ info.capture_start_ntp_time_ms = estimated_remote_start_ntp_time_ms_;
+ }
+
+ info.decode_ms = stats.decode_ms;
+ info.max_decode_ms = stats.max_decode_ms;
+ info.current_delay_ms = stats.current_delay_ms;
+ info.target_delay_ms = stats.target_delay_ms;
+ info.jitter_buffer_ms = stats.jitter_buffer_ms;
+ info.jitter_buffer_delay_seconds =
+ stats.jitter_buffer_delay.seconds<double>();
+ info.jitter_buffer_target_delay_seconds =
+ stats.jitter_buffer_target_delay.seconds<double>();
+ info.jitter_buffer_emitted_count = stats.jitter_buffer_emitted_count;
+ info.jitter_buffer_minimum_delay_seconds =
+ stats.jitter_buffer_minimum_delay.seconds<double>();
+ info.min_playout_delay_ms = stats.min_playout_delay_ms;
+ info.render_delay_ms = stats.render_delay_ms;
+ info.frames_received =
+ stats.frame_counts.key_frames + stats.frame_counts.delta_frames;
+ info.frames_dropped = stats.frames_dropped;
+ info.frames_decoded = stats.frames_decoded;
+ info.key_frames_decoded = stats.frame_counts.key_frames;
+ info.frames_rendered = stats.frames_rendered;
+ info.qp_sum = stats.qp_sum;
+ info.total_decode_time = stats.total_decode_time;
+ info.total_processing_delay = stats.total_processing_delay;
+ info.total_assembly_time = stats.total_assembly_time;
+ info.frames_assembled_from_multiple_packets =
+ stats.frames_assembled_from_multiple_packets;
+ info.last_packet_received = stats.rtp_stats.last_packet_received;
+ info.estimated_playout_ntp_timestamp_ms =
+ stats.estimated_playout_ntp_timestamp_ms;
+ info.first_frame_received_to_decoded_ms =
+ stats.first_frame_received_to_decoded_ms;
+ info.total_inter_frame_delay = stats.total_inter_frame_delay;
+ info.total_squared_inter_frame_delay = stats.total_squared_inter_frame_delay;
+ info.interframe_delay_max_ms = stats.interframe_delay_max_ms;
+ info.freeze_count = stats.freeze_count;
+ info.pause_count = stats.pause_count;
+ info.total_freezes_duration_ms = stats.total_freezes_duration_ms;
+ info.total_pauses_duration_ms = stats.total_pauses_duration_ms;
+
+ info.content_type = stats.content_type;
+
+ info.firs_sent = stats.rtcp_packet_type_counts.fir_packets;
+ info.plis_sent = stats.rtcp_packet_type_counts.pli_packets;
+ info.nacks_sent = stats.rtcp_packet_type_counts.nack_packets;
+ // TODO(bugs.webrtc.org/10662): Add stats for LNTF.
+
+ info.timing_frame_info = stats.timing_frame_info;
+
+ if (stats.rtx_rtp_stats.has_value()) {
+ info.retransmitted_packets_received =
+ stats.rtx_rtp_stats->packet_counter.packets;
+ info.retransmitted_bytes_received =
+ stats.rtx_rtp_stats->packet_counter.payload_bytes;
+ // RTX information gets added to primary counters.
+ info.payload_bytes_received +=
+ stats.rtx_rtp_stats->packet_counter.payload_bytes;
+ info.header_and_padding_bytes_received +=
+ stats.rtx_rtp_stats->packet_counter.header_bytes +
+ stats.rtx_rtp_stats->packet_counter.padding_bytes;
+ info.packets_received += stats.rtx_rtp_stats->packet_counter.packets;
+ }
+
+ if (flexfec_stream_) {
+ const webrtc::ReceiveStatistics* fec_stats = flexfec_stream_->GetStats();
+ if (fec_stats) {
+ const webrtc::StreamStatistician* statistican =
+ fec_stats->GetStatistician(flexfec_config_.rtp.remote_ssrc);
+ if (statistican) {
+ const webrtc::RtpReceiveStats fec_rtp_stats = statistican->GetStats();
+ info.fec_packets_received = fec_rtp_stats.packet_counter.packets;
+ // TODO(bugs.webrtc.org/15250): implement fecPacketsDiscarded.
+ info.fec_bytes_received = fec_rtp_stats.packet_counter.payload_bytes;
+ // FEC information gets added to primary counters.
+ info.payload_bytes_received +=
+ fec_rtp_stats.packet_counter.payload_bytes;
+ info.header_and_padding_bytes_received +=
+ fec_rtp_stats.packet_counter.header_bytes +
+ fec_rtp_stats.packet_counter.padding_bytes;
+ info.packets_received += fec_rtp_stats.packet_counter.packets;
+ } else {
+ info.fec_packets_received = 0;
+ }
+ }
+ }
+
+ if (log_stats)
+ RTC_LOG(LS_INFO) << stats.ToString(rtc::TimeMillis());
+
+ return info;
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ SetRecordableEncodedFrameCallback(
+ std::function<void(const webrtc::RecordableEncodedFrame&)> callback) {
+ if (stream_) {
+ stream_->SetAndGetRecordingState(
+ webrtc::VideoReceiveStreamInterface::RecordingState(
+ std::move(callback)),
+ /*generate_key_frame=*/true);
+ } else {
+ RTC_LOG(LS_ERROR) << "Absent receive stream; ignoring setting encoded "
+ "frame sink";
+ }
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ ClearRecordableEncodedFrameCallback() {
+ if (stream_) {
+ stream_->SetAndGetRecordingState(
+ webrtc::VideoReceiveStreamInterface::RecordingState(),
+ /*generate_key_frame=*/false);
+ } else {
+ RTC_LOG(LS_ERROR) << "Absent receive stream; ignoring clearing encoded "
+ "frame sink";
+ }
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::GenerateKeyFrame() {
+ if (stream_) {
+ stream_->GenerateKeyFrame();
+ } else {
+ RTC_LOG(LS_ERROR)
+ << "Absent receive stream; ignoring key frame generation request.";
+ }
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::
+ SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ frame_transformer) {
+ config_.frame_transformer = frame_transformer;
+ if (stream_)
+ stream_->SetDepacketizerToDecoderFrameTransformer(frame_transformer);
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::SetLocalSsrc(
+ uint32_t ssrc) {
+ config_.rtp.local_ssrc = ssrc;
+ call_->OnLocalSsrcUpdated(stream(), ssrc);
+ if (flexfec_stream_)
+ call_->OnLocalSsrcUpdated(*flexfec_stream_, ssrc);
+}
+
+void WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream::UpdateRtxSsrc(
+ uint32_t ssrc) {
+ stream_->UpdateRtxSsrc(ssrc);
+}
+WebRtcVideoReceiveChannel::WebRtcVideoReceiveStream*
+WebRtcVideoReceiveChannel::FindReceiveStream(uint32_t ssrc) {
+ if (ssrc == 0) {
+ absl::optional<uint32_t> default_ssrc = GetUnsignaledSsrc();
+ if (!default_ssrc) {
+ return nullptr;
+ }
+ ssrc = *default_ssrc;
+ }
+ auto it = receive_streams_.find(ssrc);
+ if (it != receive_streams_.end()) {
+ return it->second;
+ }
+ return nullptr;
+}
+
+// RTC_RUN_ON(worker_thread_)
+void WebRtcVideoReceiveChannel::ProcessReceivedPacket(
+ webrtc::RtpPacketReceived packet) {
+ // TODO(bugs.webrtc.org/11993): This code is very similar to what
+ // WebRtcVoiceMediaChannel::OnPacketReceived does. For maintainability and
+ // consistency it would be good to move the interaction with call_->Receiver()
+ // to a common implementation and provide a callback on the worker thread
+ // for the exception case (DELIVERY_UNKNOWN_SSRC) and how retry is attempted.
+ // TODO(bugs.webrtc.org/7135): extensions in `packet` is currently set
+ // in RtpTransport and does not neccessarily include extensions specific
+ // to this channel/MID. Also see comment in
+ // BaseChannel::MaybeUpdateDemuxerAndRtpExtensions_w.
+ // It would likely be good if extensions where merged per BUNDLE and
+ // applied directly in RtpTransport::DemuxPacket;
+ packet.IdentifyExtensions(recv_rtp_extension_map_);
+ packet.set_payload_type_frequency(webrtc::kVideoPayloadTypeFrequency);
+ if (!packet.arrival_time().IsFinite()) {
+ packet.set_arrival_time(webrtc::Timestamp::Micros(rtc::TimeMicros()));
+ }
+
+ call_->Receiver()->DeliverRtpPacket(
+ webrtc::MediaType::VIDEO, std::move(packet),
+ absl::bind_front(
+ &WebRtcVideoReceiveChannel::MaybeCreateDefaultReceiveStream, this));
+}
+
+void WebRtcVideoReceiveChannel::SetRecordableEncodedFrameCallback(
+ uint32_t ssrc,
+ std::function<void(const webrtc::RecordableEncodedFrame&)> callback) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ WebRtcVideoReceiveStream* stream = FindReceiveStream(ssrc);
+ if (stream) {
+ stream->SetRecordableEncodedFrameCallback(std::move(callback));
+ } else {
+ RTC_LOG(LS_ERROR) << "Absent receive stream; ignoring setting encoded "
+ "frame sink for ssrc "
+ << ssrc;
+ }
+}
+
+void WebRtcVideoReceiveChannel::ClearRecordableEncodedFrameCallback(
+ uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ WebRtcVideoReceiveStream* stream = FindReceiveStream(ssrc);
+ if (stream) {
+ stream->ClearRecordableEncodedFrameCallback();
+ } else {
+ RTC_LOG(LS_ERROR) << "Absent receive stream; ignoring clearing encoded "
+ "frame sink for ssrc "
+ << ssrc;
+ }
+}
+
+void WebRtcVideoReceiveChannel::RequestRecvKeyFrame(uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ WebRtcVideoReceiveStream* stream = FindReceiveStream(ssrc);
+ if (stream) {
+ return stream->GenerateKeyFrame();
+ } else {
+ RTC_LOG(LS_ERROR)
+ << "Absent receive stream; ignoring key frame generation for ssrc "
+ << ssrc;
+ }
+}
+
+void WebRtcVideoReceiveChannel::SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK(frame_transformer);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (ssrc == 0) {
+ // If the receiver is unsignaled, save the frame transformer and set it
+ // when the stream is associated with an ssrc.
+ unsignaled_frame_transformer_ = std::move(frame_transformer);
+ return;
+ }
+
+ auto matching_stream = receive_streams_.find(ssrc);
+ if (matching_stream != receive_streams_.end()) {
+ matching_stream->second->SetDepacketizerToDecoderFrameTransformer(
+ std::move(frame_transformer));
+ }
+}
+
+// ------------------------- VideoCodecSettings --------------------
+
+VideoCodecSettings::VideoCodecSettings(const VideoCodec& codec)
+ : codec(codec), flexfec_payload_type(-1), rtx_payload_type(-1) {}
+
+bool VideoCodecSettings::operator==(const VideoCodecSettings& other) const {
+ return codec == other.codec && ulpfec == other.ulpfec &&
+ flexfec_payload_type == other.flexfec_payload_type &&
+ rtx_payload_type == other.rtx_payload_type &&
+ rtx_time == other.rtx_time;
+}
+
+bool VideoCodecSettings::EqualsDisregardingFlexfec(
+ const VideoCodecSettings& a,
+ const VideoCodecSettings& b) {
+ return a.codec == b.codec && a.ulpfec == b.ulpfec &&
+ a.rtx_payload_type == b.rtx_payload_type && a.rtx_time == b.rtx_time;
+}
+
+bool VideoCodecSettings::operator!=(const VideoCodecSettings& other) const {
+ return !(*this == other);
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/webrtc_video_engine.h b/third_party/libwebrtc/media/engine/webrtc_video_engine.h
new file mode 100644
index 0000000000..e4b1b2765b
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_video_engine.h
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_
+#define MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/functional/any_invocable.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/call/transport.h"
+#include "api/crypto/crypto_options.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/crypto/frame_encryptor_interface.h"
+#include "api/field_trials_view.h"
+#include "api/frame_transformer_interface.h"
+#include "api/rtc_error.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+#include "api/rtp_sender_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/transport/rtp/rtp_source.h"
+#include "api/video/recordable_encoded_frame.h"
+#include "api/video/video_bitrate_allocator_factory.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "api/video/video_stream_encoder_settings.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "call/call.h"
+#include "call/flexfec_receive_stream.h"
+#include "call/rtp_config.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "media/base/codec.h"
+#include "media/base/media_channel.h"
+#include "media/base/media_channel_impl.h"
+#include "media/base/media_config.h"
+#include "media/base/media_engine.h"
+#include "media/base/stream_params.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/network/sent_packet.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/system/no_unique_address.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/config/video_encoder_config.h"
+
+namespace webrtc {
+class VideoDecoderFactory;
+class VideoEncoderFactory;
+} // namespace webrtc
+
+namespace cricket {
+
+// Public for testing.
+// Inputs StreamStats for all types of substreams (kMedia, kRtx, kFlexfec) and
+// merges any non-kMedia substream stats object into its referenced kMedia-type
+// substream. The resulting substreams are all kMedia. This means, for example,
+// that packet and byte counters of RTX and FlexFEC streams are accounted for in
+// the relevant RTP media stream's stats. This makes the resulting StreamStats
+// objects ready to be turned into "outbound-rtp" stats objects for GetStats()
+// which does not create separate stream stats objects for complementary
+// streams.
+std::map<uint32_t, webrtc::VideoSendStream::StreamStats>
+MergeInfoAboutOutboundRtpSubstreamsForTesting(
+ const std::map<uint32_t, webrtc::VideoSendStream::StreamStats>& substreams);
+
+// WebRtcVideoEngine is used for the new native WebRTC Video API (webrtc:1667).
+class WebRtcVideoEngine : public VideoEngineInterface {
+ public:
+ // These video codec factories represents all video codecs, i.e. both software
+ // and external hardware codecs.
+ WebRtcVideoEngine(
+ std::unique_ptr<webrtc::VideoEncoderFactory> video_encoder_factory,
+ std::unique_ptr<webrtc::VideoDecoderFactory> video_decoder_factory,
+ const webrtc::FieldTrialsView& trials);
+
+ ~WebRtcVideoEngine() override;
+
+ std::unique_ptr<VideoMediaSendChannelInterface> CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
+ override;
+ std::unique_ptr<VideoMediaReceiveChannelInterface> CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options) override;
+
+ std::vector<VideoCodec> send_codecs() const override {
+ return send_codecs(true);
+ }
+ std::vector<VideoCodec> recv_codecs() const override {
+ return recv_codecs(true);
+ }
+ std::vector<VideoCodec> send_codecs(bool include_rtx) const override;
+ std::vector<VideoCodec> recv_codecs(bool include_rtx) const override;
+ std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
+ const override;
+
+ private:
+ const std::unique_ptr<webrtc::VideoDecoderFactory> decoder_factory_;
+ const std::unique_ptr<webrtc::VideoEncoderFactory> encoder_factory_;
+ const std::unique_ptr<webrtc::VideoBitrateAllocatorFactory>
+ bitrate_allocator_factory_;
+ const webrtc::FieldTrialsView& trials_;
+};
+
+struct VideoCodecSettings {
+ explicit VideoCodecSettings(const VideoCodec& codec);
+
+ // Checks if all members of |*this| are equal to the corresponding members
+ // of `other`.
+ bool operator==(const VideoCodecSettings& other) const;
+ bool operator!=(const VideoCodecSettings& other) const;
+
+ // Checks if all members of `a`, except `flexfec_payload_type`, are equal
+ // to the corresponding members of `b`.
+ static bool EqualsDisregardingFlexfec(const VideoCodecSettings& a,
+ const VideoCodecSettings& b);
+
+ VideoCodec codec;
+ webrtc::UlpfecConfig ulpfec;
+ int flexfec_payload_type; // -1 if absent.
+ int rtx_payload_type; // -1 if absent.
+ absl::optional<int> rtx_time;
+};
+
+class WebRtcVideoSendChannel : public MediaChannelUtil,
+ public VideoMediaSendChannelInterface,
+ public webrtc::EncoderSwitchRequestCallback {
+ public:
+ WebRtcVideoSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoEncoderFactory* encoder_factory,
+ webrtc::VideoDecoderFactory* decoder_factory,
+ webrtc::VideoBitrateAllocatorFactory* bitrate_allocator_factory);
+ ~WebRtcVideoSendChannel() override;
+
+ MediaType media_type() const override { return MEDIA_TYPE_VIDEO; }
+ // Type manipulations
+ VideoMediaSendChannelInterface* AsVideoSendChannel() override { return this; }
+ VoiceMediaSendChannelInterface* AsVoiceSendChannel() override {
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+ // Functions imported from MediaChannelUtil
+ bool HasNetworkInterface() const override {
+ return MediaChannelUtil::HasNetworkInterface();
+ }
+ void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
+ MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
+ }
+ bool ExtmapAllowMixed() const override {
+ return MediaChannelUtil::ExtmapAllowMixed();
+ }
+
+ // Common functions between sender and receiver
+ void SetInterface(MediaChannelNetworkInterface* iface) override;
+ // VideoMediaSendChannelInterface implementation
+ bool SetSenderParameters(const VideoSenderParameters& params) override;
+ webrtc::RTCError SetRtpSendParameters(
+ uint32_t ssrc,
+ const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback) override;
+ webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override;
+ absl::optional<Codec> GetSendCodec() const override;
+ bool SetSend(bool send) override;
+ bool SetVideoSend(
+ uint32_t ssrc,
+ const VideoOptions* options,
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source) override;
+ bool AddSendStream(const StreamParams& sp) override;
+ bool RemoveSendStream(uint32_t ssrc) override;
+ void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) override;
+ bool GetStats(VideoMediaSendInfo* info) override;
+
+ void OnPacketSent(const rtc::SentPacket& sent_packet) override;
+ void OnReadyToSend(bool ready) override;
+ void OnNetworkRouteChanged(absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) override;
+
+ // Set a frame encryptor to a particular ssrc that will intercept all
+ // outgoing video frames and attempt to encrypt them and forward the result
+ // to the packetizer.
+ void SetFrameEncryptor(uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
+ frame_encryptor) override;
+
+ // note: The encoder_selector object must remain valid for the lifetime of the
+ // MediaChannel, unless replaced.
+ void SetEncoderSelector(uint32_t ssrc,
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface*
+ encoder_selector) override;
+
+ void SetSendCodecChangedCallback(
+ absl::AnyInvocable<void()> callback) override {
+ send_codec_changed_callback_ = std::move(callback);
+ }
+
+ void SetSsrcListChangedCallback(
+ absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override {
+ ssrc_list_changed_callback_ = std::move(callback);
+ }
+
+ // Implemented for VideoMediaChannelTest.
+ bool sending() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return sending_;
+ }
+
+ // AdaptReason is used for expressing why a WebRtcVideoSendStream request
+ // a lower input frame size than the currently configured camera input frame
+ // size. There can be more than one reason OR:ed together.
+ enum AdaptReason {
+ ADAPTREASON_NONE = 0,
+ ADAPTREASON_CPU = 1,
+ ADAPTREASON_BANDWIDTH = 2,
+ };
+
+ // TODO(webrtc:14852): Update downstream projects to use
+ // cricket::kDefaultVideoMaxQpVpx/H26x and remove.
+ static constexpr int kDefaultQpMax = 56;
+
+ // Implements webrtc::EncoderSwitchRequestCallback.
+ void RequestEncoderFallback() override;
+ void RequestEncoderSwitch(const webrtc::SdpVideoFormat& format,
+ bool allow_default_fallback) override;
+
+ void GenerateSendKeyFrame(uint32_t ssrc,
+ const std::vector<std::string>& rids) override;
+
+ void SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override;
+ // Information queries to support SetReceiverFeedbackParameters
+ webrtc::RtcpMode SendCodecRtcpMode() const override {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return send_params_.rtcp.reduced_size ? webrtc::RtcpMode::kReducedSize
+ : webrtc::RtcpMode::kCompound;
+ }
+
+ bool SendCodecHasLntf() const override {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!send_codec()) {
+ return false;
+ }
+ return HasLntf(send_codec()->codec);
+ }
+ bool SendCodecHasNack() const override {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!send_codec()) {
+ return false;
+ }
+ return HasNack(send_codec()->codec);
+ }
+ absl::optional<int> SendCodecRtxTime() const override {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!send_codec()) {
+ return absl::nullopt;
+ }
+ return send_codec()->rtx_time;
+ }
+
+ private:
+ struct ChangedSenderParameters {
+ // These optionals are unset if not changed.
+ absl::optional<VideoCodecSettings> send_codec;
+ absl::optional<std::vector<VideoCodecSettings>> negotiated_codecs;
+ absl::optional<std::vector<webrtc::RtpExtension>> rtp_header_extensions;
+ absl::optional<std::string> mid;
+ absl::optional<bool> extmap_allow_mixed;
+ absl::optional<int> max_bandwidth_bps;
+ absl::optional<bool> conference_mode;
+ absl::optional<webrtc::RtcpMode> rtcp_mode;
+ };
+
+ bool GetChangedSenderParameters(const VideoSenderParameters& params,
+ ChangedSenderParameters* changed_params) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+ bool ApplyChangedParams(const ChangedSenderParameters& changed_params);
+ bool ValidateSendSsrcAvailability(const StreamParams& sp) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+
+ // Populates `rtx_associated_payload_types`, `raw_payload_types` and
+ // `decoders` based on codec settings provided by `recv_codecs`.
+ // `recv_codecs` must be non-empty and all other parameters must be empty.
+ static void ExtractCodecInformation(
+ rtc::ArrayView<const VideoCodecSettings> recv_codecs,
+ std::map<int, int>& rtx_associated_payload_types,
+ std::set<int>& raw_payload_types,
+ std::vector<webrtc::VideoReceiveStreamInterface::Decoder>& decoders);
+
+ // Wrapper for the sender part.
+ class WebRtcVideoSendStream {
+ public:
+ WebRtcVideoSendStream(
+ webrtc::Call* call,
+ const StreamParams& sp,
+ webrtc::VideoSendStream::Config config,
+ const VideoOptions& options,
+ bool enable_cpu_overuse_detection,
+ int max_bitrate_bps,
+ const absl::optional<VideoCodecSettings>& codec_settings,
+ const absl::optional<std::vector<webrtc::RtpExtension>>& rtp_extensions,
+ const VideoSenderParameters& send_params);
+ ~WebRtcVideoSendStream();
+
+ void SetSenderParameters(const ChangedSenderParameters& send_params);
+ webrtc::RTCError SetRtpParameters(const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback);
+ webrtc::RtpParameters GetRtpParameters() const;
+
+ void SetFrameEncryptor(
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor);
+
+ bool SetVideoSend(const VideoOptions* options,
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source);
+
+ // note: The encoder_selector object must remain valid for the lifetime of
+ // the MediaChannel, unless replaced.
+ void SetEncoderSelector(
+ webrtc::VideoEncoderFactory::EncoderSelectorInterface*
+ encoder_selector);
+
+ void SetSend(bool send);
+
+ const std::vector<uint32_t>& GetSsrcs() const;
+ // Returns per ssrc VideoSenderInfos. Useful for simulcast scenario.
+ std::vector<VideoSenderInfo> GetPerLayerVideoSenderInfos(bool log_stats);
+ // Aggregates per ssrc VideoSenderInfos to single VideoSenderInfo for
+ // legacy reasons. Used in old GetStats API and track stats.
+ VideoSenderInfo GetAggregatedVideoSenderInfo(
+ const std::vector<VideoSenderInfo>& infos) const;
+ void FillBitrateInfo(BandwidthEstimationInfo* bwe_info);
+
+ void SetEncoderToPacketizerFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ frame_transformer);
+ void GenerateKeyFrame(const std::vector<std::string>& rids);
+
+ private:
+ // Parameters needed to reconstruct the underlying stream.
+ // webrtc::VideoSendStream doesn't support setting a lot of options on the
+ // fly, so when those need to be changed we tear down and reconstruct with
+ // similar parameters depending on which options changed etc.
+ struct VideoSendStreamParameters {
+ VideoSendStreamParameters(
+ webrtc::VideoSendStream::Config config,
+ const VideoOptions& options,
+ int max_bitrate_bps,
+ const absl::optional<VideoCodecSettings>& codec_settings);
+ webrtc::VideoSendStream::Config config;
+ VideoOptions options;
+ int max_bitrate_bps;
+ bool conference_mode;
+ absl::optional<VideoCodecSettings> codec_settings;
+ // Sent resolutions + bitrates etc. by the underlying VideoSendStream,
+ // typically changes when setting a new resolution or reconfiguring
+ // bitrates.
+ webrtc::VideoEncoderConfig encoder_config;
+ };
+
+ rtc::scoped_refptr<webrtc::VideoEncoderConfig::EncoderSpecificSettings>
+ ConfigureVideoEncoderSettings(const VideoCodec& codec);
+ void SetCodec(const VideoCodecSettings& codec);
+ void RecreateWebRtcStream();
+ webrtc::VideoEncoderConfig CreateVideoEncoderConfig(
+ const VideoCodec& codec) const;
+ void ReconfigureEncoder(webrtc::SetParametersCallback callback);
+
+ // Calls Start or Stop according to whether or not `sending_` is true,
+ // and whether or not the encoding in `rtp_parameters_` is active.
+ void UpdateSendState();
+
+ webrtc::DegradationPreference GetDegradationPreference() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&thread_checker_);
+
+ RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
+ webrtc::TaskQueueBase* const worker_thread_;
+ const std::vector<uint32_t> ssrcs_ RTC_GUARDED_BY(&thread_checker_);
+ const std::vector<SsrcGroup> ssrc_groups_ RTC_GUARDED_BY(&thread_checker_);
+ webrtc::Call* const call_;
+ const bool enable_cpu_overuse_detection_;
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source_
+ RTC_GUARDED_BY(&thread_checker_);
+
+ webrtc::VideoSendStream* stream_ RTC_GUARDED_BY(&thread_checker_);
+
+ // Contains settings that are the same for all streams in the MediaChannel,
+ // such as codecs, header extensions, and the global bitrate limit for the
+ // entire channel.
+ VideoSendStreamParameters parameters_ RTC_GUARDED_BY(&thread_checker_);
+ // Contains settings that are unique for each stream, such as max_bitrate.
+ // Does *not* contain codecs, however.
+ // TODO(skvlad): Move ssrcs_ and ssrc_groups_ into rtp_parameters_.
+ // TODO(skvlad): Combine parameters_ and rtp_parameters_ once we have only
+ // one stream per MediaChannel.
+ webrtc::RtpParameters rtp_parameters_ RTC_GUARDED_BY(&thread_checker_);
+
+ bool sending_ RTC_GUARDED_BY(&thread_checker_);
+
+ // TODO(asapersson): investigate why setting
+ // DegrationPreferences::MAINTAIN_RESOLUTION isn't sufficient to disable
+ // downscaling everywhere in the pipeline.
+ const bool disable_automatic_resize_;
+ };
+
+ void Construct(webrtc::Call* call, WebRtcVideoEngine* engine);
+
+ // Get all codecs that are compatible with the receiver.
+ std::vector<VideoCodecSettings> SelectSendVideoCodecs(
+ const std::vector<VideoCodecSettings>& remote_mapped_codecs) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+
+ void FillSenderStats(VideoMediaSendInfo* info, bool log_stats)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+ void FillBandwidthEstimationStats(const webrtc::Call::Stats& stats,
+ VideoMediaInfo* info)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+ void FillSendCodecStats(VideoMediaSendInfo* video_media_info)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+
+ // Accessor function for send_codec_. Introduced in order to ensure
+ // that a receive channel does not touch the send codec directly.
+ // Can go away once these are different classes.
+ // TODO(bugs.webrtc.org/13931): Remove this function
+ absl::optional<VideoCodecSettings>& send_codec() { return send_codec_; }
+ const absl::optional<VideoCodecSettings>& send_codec() const {
+ return send_codec_;
+ }
+ webrtc::TaskQueueBase* const worker_thread_;
+ webrtc::ScopedTaskSafety task_safety_;
+ RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker network_thread_checker_{
+ webrtc::SequenceChecker::kDetached};
+ RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
+
+ uint32_t rtcp_receiver_report_ssrc_ RTC_GUARDED_BY(thread_checker_);
+ bool sending_ RTC_GUARDED_BY(thread_checker_);
+ bool receiving_ RTC_GUARDED_BY(&thread_checker_);
+ webrtc::Call* const call_;
+
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* default_sink_
+ RTC_GUARDED_BY(thread_checker_);
+
+ // Delay for unsignaled streams, which may be set before the stream exists.
+ int default_recv_base_minimum_delay_ms_ RTC_GUARDED_BY(thread_checker_) = 0;
+
+ const MediaConfig::Video video_config_ RTC_GUARDED_BY(thread_checker_);
+
+ // Using primary-ssrc (first ssrc) as key.
+ std::map<uint32_t, WebRtcVideoSendStream*> send_streams_
+ RTC_GUARDED_BY(thread_checker_);
+ // When the channel and demuxer get reconfigured, there is a window of time
+ // where we have to be prepared for packets arriving based on the old demuxer
+ // criteria because the streams live on the worker thread and the demuxer
+ // lives on the network thread. Because packets are posted from the network
+ // thread to the worker thread, they can still be in-flight when streams are
+ // reconfgured. This can happen when `demuxer_criteria_id_` and
+ // `demuxer_criteria_completed_id_` don't match. During this time, we do not
+ // want to create unsignalled receive streams and should instead drop the
+ // packets. E.g:
+ // * If RemoveRecvStream(old_ssrc) was recently called, there may be packets
+ // in-flight for that ssrc. This happens when a receiver becomes inactive.
+ // * If we go from one to many m= sections, the demuxer may change from
+ // forwarding all packets to only forwarding the configured ssrcs, so there
+ // is a risk of receiving ssrcs for other, recently added m= sections.
+ uint32_t demuxer_criteria_id_ RTC_GUARDED_BY(thread_checker_) = 0;
+ uint32_t demuxer_criteria_completed_id_ RTC_GUARDED_BY(thread_checker_) = 0;
+ absl::optional<int64_t> last_unsignalled_ssrc_creation_time_ms_
+ RTC_GUARDED_BY(thread_checker_);
+ std::set<uint32_t> send_ssrcs_ RTC_GUARDED_BY(thread_checker_);
+ std::set<uint32_t> receive_ssrcs_ RTC_GUARDED_BY(thread_checker_);
+
+ absl::optional<VideoCodecSettings> send_codec_
+ RTC_GUARDED_BY(thread_checker_);
+ std::vector<VideoCodecSettings> negotiated_codecs_
+ RTC_GUARDED_BY(thread_checker_);
+
+ std::vector<webrtc::RtpExtension> send_rtp_extensions_
+ RTC_GUARDED_BY(thread_checker_);
+
+ webrtc::VideoEncoderFactory* const encoder_factory_
+ RTC_GUARDED_BY(thread_checker_);
+ webrtc::VideoDecoderFactory* const decoder_factory_
+ RTC_GUARDED_BY(thread_checker_);
+ webrtc::VideoBitrateAllocatorFactory* const bitrate_allocator_factory_
+ RTC_GUARDED_BY(thread_checker_);
+ std::vector<VideoCodecSettings> recv_codecs_ RTC_GUARDED_BY(thread_checker_);
+ webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_
+ RTC_GUARDED_BY(thread_checker_);
+ std::vector<webrtc::RtpExtension> recv_rtp_extensions_
+ RTC_GUARDED_BY(thread_checker_);
+ // See reason for keeping track of the FlexFEC payload type separately in
+ // comment in WebRtcVideoChannel::ChangedReceiverParameters.
+ int recv_flexfec_payload_type_ RTC_GUARDED_BY(thread_checker_);
+ webrtc::BitrateConstraints bitrate_config_ RTC_GUARDED_BY(thread_checker_);
+ // TODO(deadbeef): Don't duplicate information between
+ // send_params/recv_params, rtp_extensions, options, etc.
+ VideoSenderParameters send_params_ RTC_GUARDED_BY(thread_checker_);
+ VideoOptions default_send_options_ RTC_GUARDED_BY(thread_checker_);
+ VideoReceiverParameters recv_params_ RTC_GUARDED_BY(thread_checker_);
+ int64_t last_send_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
+ int64_t last_receive_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
+ const bool discard_unknown_ssrc_packets_ RTC_GUARDED_BY(thread_checker_);
+ // This is a stream param that comes from the remote description, but wasn't
+ // signaled with any a=ssrc lines. It holds information that was signaled
+ // before the unsignaled receive stream is created when the first packet is
+ // received.
+ StreamParams unsignaled_stream_params_ RTC_GUARDED_BY(thread_checker_);
+ // Per peer connection crypto options that last for the lifetime of the peer
+ // connection.
+ const webrtc::CryptoOptions crypto_options_ RTC_GUARDED_BY(thread_checker_);
+
+ // Optional frame transformer set on unsignaled streams.
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ unsignaled_frame_transformer_ RTC_GUARDED_BY(thread_checker_);
+
+ // RTP parameters that need to be set when creating a video receive stream.
+ // Only used in Receiver mode - in Both mode, it reads those things from the
+ // codec.
+ webrtc::VideoReceiveStreamInterface::Config::Rtp rtp_config_;
+
+ // Callback invoked whenever the send codec changes.
+ // TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
+ absl::AnyInvocable<void()> send_codec_changed_callback_;
+ // Callback invoked whenever the list of SSRCs changes.
+ absl::AnyInvocable<void(const std::set<uint32_t>&)>
+ ssrc_list_changed_callback_;
+};
+
+class WebRtcVideoReceiveChannel : public MediaChannelUtil,
+ public VideoMediaReceiveChannelInterface {
+ public:
+ WebRtcVideoReceiveChannel(webrtc::Call* call,
+ const MediaConfig& config,
+ const VideoOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::VideoDecoderFactory* decoder_factory);
+ ~WebRtcVideoReceiveChannel() override;
+
+ public:
+ MediaType media_type() const override { return MEDIA_TYPE_VIDEO; }
+ VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
+ return this;
+ }
+ VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+
+ // Common functions between sender and receiver
+ void SetInterface(MediaChannelNetworkInterface* iface) override;
+ // VideoMediaReceiveChannelInterface implementation
+ bool SetReceiverParameters(const VideoReceiverParameters& params) override;
+ webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override;
+ webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override;
+ void SetReceive(bool receive) override;
+ bool AddRecvStream(const StreamParams& sp) override;
+ bool AddDefaultRecvStreamForTesting(const StreamParams& sp) override {
+ // Invokes private AddRecvStream variant function
+ return AddRecvStream(sp, true);
+ }
+ bool RemoveRecvStream(uint32_t ssrc) override;
+ void ResetUnsignaledRecvStream() override;
+ absl::optional<uint32_t> GetUnsignaledSsrc() const override;
+ void OnDemuxerCriteriaUpdatePending() override;
+ void OnDemuxerCriteriaUpdateComplete() override;
+ bool SetSink(uint32_t ssrc,
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+ void SetDefaultSink(
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
+ bool GetStats(VideoMediaReceiveInfo* info) override;
+ void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override;
+ bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
+
+ absl::optional<int> GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const override;
+
+ // Choose one of the available SSRCs (or default if none) as the current
+ // receiver report SSRC.
+ void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override;
+
+ // E2E Encrypted Video Frame API
+ // Set a frame decryptor to a particular ssrc that will intercept all
+ // incoming video frames and attempt to decrypt them before forwarding the
+ // result.
+ void SetFrameDecryptor(uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
+ frame_decryptor) override;
+ void SetRecordableEncodedFrameCallback(
+ uint32_t ssrc,
+ std::function<void(const webrtc::RecordableEncodedFrame&)> callback)
+ override;
+ void ClearRecordableEncodedFrameCallback(uint32_t ssrc) override;
+ void RequestRecvKeyFrame(uint32_t ssrc) override;
+ void SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override;
+ std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
+
+ void SetReceiverFeedbackParameters(bool lntf_enabled,
+ bool nack_enabled,
+ webrtc::RtcpMode rtcp_mode,
+ absl::optional<int> rtx_time) override;
+
+ private:
+ class WebRtcVideoReceiveStream;
+ struct ChangedReceiverParameters {
+ // These optionals are unset if not changed.
+ absl::optional<std::vector<VideoCodecSettings>> codec_settings;
+ absl::optional<std::vector<webrtc::RtpExtension>> rtp_header_extensions;
+ // Keep track of the FlexFEC payload type separately from `codec_settings`.
+ // This allows us to recreate the FlexfecReceiveStream separately from the
+ // VideoReceiveStreamInterface when the FlexFEC payload type is changed.
+ absl::optional<int> flexfec_payload_type;
+ };
+
+ // Finds VideoReceiveStreamInterface corresponding to ssrc. Aware of
+ // unsignalled ssrc handling.
+ WebRtcVideoReceiveStream* FindReceiveStream(uint32_t ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+
+ void ProcessReceivedPacket(webrtc::RtpPacketReceived packet)
+ RTC_RUN_ON(thread_checker_);
+
+ // Expected to be invoked once per packet that belongs to this channel that
+ // can not be demuxed.
+ // Returns true if a new default stream has been created.
+ bool MaybeCreateDefaultReceiveStream(
+ const webrtc::RtpPacketReceived& parsed_packet)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+ void ReCreateDefaultReceiveStream(uint32_t ssrc,
+ absl::optional<uint32_t> rtx_ssrc);
+ // Add a receive stream. Used for testing.
+ bool AddRecvStream(const StreamParams& sp, bool default_stream);
+
+ void ConfigureReceiverRtp(
+ webrtc::VideoReceiveStreamInterface::Config* config,
+ webrtc::FlexfecReceiveStream::Config* flexfec_config,
+ const StreamParams& sp) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+ bool ValidateReceiveSsrcAvailability(const StreamParams& sp) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+ void DeleteReceiveStream(WebRtcVideoReceiveStream* stream)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+
+ // Called when the local ssrc changes. Sets `rtcp_receiver_report_ssrc_` and
+ // updates the receive streams.
+ void SetReceiverReportSsrc(uint32_t ssrc) RTC_RUN_ON(&thread_checker_);
+
+ // Wrapper for the receiver part, contains configs etc. that are needed to
+ // reconstruct the underlying VideoReceiveStreamInterface.
+ class WebRtcVideoReceiveStream
+ : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
+ public:
+ WebRtcVideoReceiveStream(
+ webrtc::Call* call,
+ const StreamParams& sp,
+ webrtc::VideoReceiveStreamInterface::Config config,
+ bool default_stream,
+ const std::vector<VideoCodecSettings>& recv_codecs,
+ const webrtc::FlexfecReceiveStream::Config& flexfec_config);
+ ~WebRtcVideoReceiveStream();
+
+ webrtc::VideoReceiveStreamInterface& stream();
+ // Return value may be nullptr.
+ webrtc::FlexfecReceiveStream* flexfec_stream();
+
+ const std::vector<uint32_t>& GetSsrcs() const;
+
+ std::vector<webrtc::RtpSource> GetSources();
+
+ // Does not return codecs, nor header extensions, they are filled by the
+ // owning WebRtcVideoChannel.
+ webrtc::RtpParameters GetRtpParameters() const;
+
+ // TODO(deadbeef): Move these feedback parameters into the recv parameters.
+ void SetFeedbackParameters(bool lntf_enabled,
+ bool nack_enabled,
+ webrtc::RtcpMode rtcp_mode,
+ absl::optional<int> rtx_time);
+ void SetReceiverParameters(const ChangedReceiverParameters& recv_params);
+
+ void OnFrame(const webrtc::VideoFrame& frame) override;
+ bool IsDefaultStream() const;
+
+ void SetFrameDecryptor(
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor);
+
+ bool SetBaseMinimumPlayoutDelayMs(int delay_ms);
+
+ int GetBaseMinimumPlayoutDelayMs() const;
+
+ void SetSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink);
+
+ VideoReceiverInfo GetVideoReceiverInfo(bool log_stats);
+
+ void SetRecordableEncodedFrameCallback(
+ std::function<void(const webrtc::RecordableEncodedFrame&)> callback);
+ void ClearRecordableEncodedFrameCallback();
+ void GenerateKeyFrame();
+
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ frame_transformer);
+
+ void SetLocalSsrc(uint32_t local_ssrc);
+ void UpdateRtxSsrc(uint32_t ssrc);
+ void StartReceiveStream();
+ void StopReceiveStream();
+
+ private:
+ // Attempts to reconfigure an already existing `flexfec_stream_`, create
+ // one if the configuration is now complete or remove a flexfec stream
+ // when disabled.
+ void SetFlexFecPayload(int payload_type);
+
+ void RecreateReceiveStream();
+ void CreateReceiveStream();
+
+ // Applies a new receive codecs configration to `config_`. Returns true
+ // if the internal stream needs to be reconstructed, or false if no changes
+ // were applied.
+ bool ReconfigureCodecs(const std::vector<VideoCodecSettings>& recv_codecs);
+
+ webrtc::Call* const call_;
+ const StreamParams stream_params_;
+
+ // Both `stream_` and `flexfec_stream_` are managed by `this`. They are
+ // destroyed by calling call_->DestroyVideoReceiveStream and
+ // call_->DestroyFlexfecReceiveStream, respectively.
+ webrtc::VideoReceiveStreamInterface* stream_;
+ const bool default_stream_;
+ webrtc::VideoReceiveStreamInterface::Config config_;
+ webrtc::FlexfecReceiveStream::Config flexfec_config_;
+ webrtc::FlexfecReceiveStream* flexfec_stream_;
+
+ webrtc::Mutex sink_lock_;
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* sink_
+ RTC_GUARDED_BY(sink_lock_);
+ int64_t first_frame_timestamp_ RTC_GUARDED_BY(sink_lock_);
+ // Start NTP time is estimated as current remote NTP time (estimated from
+ // RTCP) minus the elapsed time, as soon as remote NTP time is available.
+ int64_t estimated_remote_start_ntp_time_ms_ RTC_GUARDED_BY(sink_lock_);
+
+ RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
+ bool receiving_ RTC_GUARDED_BY(&thread_checker_);
+ };
+ bool GetChangedReceiverParameters(const VideoReceiverParameters& params,
+ ChangedReceiverParameters* changed_params)
+ const RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+
+ std::map<uint32_t, WebRtcVideoReceiveStream*> receive_streams_
+ RTC_GUARDED_BY(thread_checker_);
+ void FillReceiverStats(VideoMediaReceiveInfo* info, bool log_stats)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+ void FillReceiveCodecStats(VideoMediaReceiveInfo* video_media_info)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(thread_checker_);
+
+ StreamParams unsignaled_stream_params() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return unsignaled_stream_params_;
+ }
+ // Variables.
+ webrtc::TaskQueueBase* const worker_thread_;
+ webrtc::ScopedTaskSafety task_safety_;
+ RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker network_thread_checker_{
+ webrtc::SequenceChecker::kDetached};
+ RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_;
+
+ uint32_t rtcp_receiver_report_ssrc_ RTC_GUARDED_BY(thread_checker_);
+ bool receiving_ RTC_GUARDED_BY(&thread_checker_);
+ webrtc::Call* const call_;
+
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* default_sink_
+ RTC_GUARDED_BY(thread_checker_);
+
+ // Delay for unsignaled streams, which may be set before the stream exists.
+ int default_recv_base_minimum_delay_ms_ RTC_GUARDED_BY(thread_checker_) = 0;
+
+ const MediaConfig::Video video_config_ RTC_GUARDED_BY(thread_checker_);
+
+ // When the channel and demuxer get reconfigured, there is a window of time
+ // where we have to be prepared for packets arriving based on the old demuxer
+ // criteria because the streams live on the worker thread and the demuxer
+ // lives on the network thread. Because packets are posted from the network
+ // thread to the worker thread, they can still be in-flight when streams are
+ // reconfgured. This can happen when `demuxer_criteria_id_` and
+ // `demuxer_criteria_completed_id_` don't match. During this time, we do not
+ // want to create unsignalled receive streams and should instead drop the
+ // packets. E.g:
+ // * If RemoveRecvStream(old_ssrc) was recently called, there may be packets
+ // in-flight for that ssrc. This happens when a receiver becomes inactive.
+ // * If we go from one to many m= sections, the demuxer may change from
+ // forwarding all packets to only forwarding the configured ssrcs, so there
+ // is a risk of receiving ssrcs for other, recently added m= sections.
+ uint32_t demuxer_criteria_id_ RTC_GUARDED_BY(thread_checker_) = 0;
+ uint32_t demuxer_criteria_completed_id_ RTC_GUARDED_BY(thread_checker_) = 0;
+ absl::optional<int64_t> last_unsignalled_ssrc_creation_time_ms_
+ RTC_GUARDED_BY(thread_checker_);
+ std::set<uint32_t> send_ssrcs_ RTC_GUARDED_BY(thread_checker_);
+ std::set<uint32_t> receive_ssrcs_ RTC_GUARDED_BY(thread_checker_);
+
+ absl::optional<VideoCodecSettings> send_codec_
+ RTC_GUARDED_BY(thread_checker_);
+ std::vector<VideoCodecSettings> negotiated_codecs_
+ RTC_GUARDED_BY(thread_checker_);
+
+ std::vector<webrtc::RtpExtension> send_rtp_extensions_
+ RTC_GUARDED_BY(thread_checker_);
+
+ webrtc::VideoDecoderFactory* const decoder_factory_
+ RTC_GUARDED_BY(thread_checker_);
+ std::vector<VideoCodecSettings> recv_codecs_ RTC_GUARDED_BY(thread_checker_);
+ webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_
+ RTC_GUARDED_BY(thread_checker_);
+ std::vector<webrtc::RtpExtension> recv_rtp_extensions_
+ RTC_GUARDED_BY(thread_checker_);
+ // See reason for keeping track of the FlexFEC payload type separately in
+ // comment in WebRtcVideoChannel::ChangedReceiverParameters.
+ int recv_flexfec_payload_type_ RTC_GUARDED_BY(thread_checker_);
+ webrtc::BitrateConstraints bitrate_config_ RTC_GUARDED_BY(thread_checker_);
+ // TODO(deadbeef): Don't duplicate information between
+ // send_params/recv_params, rtp_extensions, options, etc.
+ VideoSenderParameters send_params_ RTC_GUARDED_BY(thread_checker_);
+ VideoOptions default_send_options_ RTC_GUARDED_BY(thread_checker_);
+ VideoReceiverParameters recv_params_ RTC_GUARDED_BY(thread_checker_);
+ int64_t last_receive_stats_log_ms_ RTC_GUARDED_BY(thread_checker_);
+ const bool discard_unknown_ssrc_packets_ RTC_GUARDED_BY(thread_checker_);
+ // This is a stream param that comes from the remote description, but wasn't
+ // signaled with any a=ssrc lines. It holds information that was signaled
+ // before the unsignaled receive stream is created when the first packet is
+ // received.
+ StreamParams unsignaled_stream_params_ RTC_GUARDED_BY(thread_checker_);
+ // Per peer connection crypto options that last for the lifetime of the peer
+ // connection.
+ const webrtc::CryptoOptions crypto_options_ RTC_GUARDED_BY(thread_checker_);
+
+ // Optional frame transformer set on unsignaled streams.
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ unsignaled_frame_transformer_ RTC_GUARDED_BY(thread_checker_);
+
+ // RTP parameters that need to be set when creating a video receive stream.
+ // Only used in Receiver mode - in Both mode, it reads those things from the
+ // codec.
+ webrtc::VideoReceiveStreamInterface::Config::Rtp rtp_config_;
+
+ // Callback invoked whenever the send codec changes.
+ // TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
+ absl::AnyInvocable<void()> send_codec_changed_callback_;
+ // Callback invoked whenever the list of SSRCs changes.
+ absl::AnyInvocable<void(const std::set<uint32_t>&)>
+ ssrc_list_changed_callback_;
+
+ const int receive_buffer_size_;
+};
+
+// Keeping the old name "WebRtcVideoChannel" around because some external
+// customers are using cricket::WebRtcVideoChannel::AdaptReason
+// TODO(bugs.webrtc.org/15216): Move this enum to an interface class and
+// delete this workaround.
+class WebRtcVideoChannel : public WebRtcVideoSendChannel {
+ public:
+ // Make all the values of AdaptReason available as
+ // WebRtcVideoChannel::ADAPT_xxx.
+ using WebRtcVideoSendChannel::AdaptReason;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_ENGINE_WEBRTC_VIDEO_ENGINE_H_
diff --git a/third_party/libwebrtc/media/engine/webrtc_video_engine_unittest.cc b/third_party/libwebrtc/media/engine/webrtc_video_engine_unittest.cc
new file mode 100644
index 0000000000..e8b7ee4b2d
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_video_engine_unittest.cc
@@ -0,0 +1,10194 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/webrtc_video_engine.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/rtp_parameters.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/test/mock_encoder_selector.h"
+#include "api/test/mock_video_bitrate_allocator.h"
+#include "api/test/mock_video_bitrate_allocator_factory.h"
+#include "api/test/mock_video_decoder_factory.h"
+#include "api/test/mock_video_encoder_factory.h"
+#include "api/test/video/function_video_decoder_factory.h"
+#include "api/transport/field_trial_based_config.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "api/video/builtin_video_bitrate_allocator_factory.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_bitrate_allocation.h"
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_decoder_factory_template.h"
+#include "api/video_codecs/video_decoder_factory_template_dav1d_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_libvpx_vp8_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_libvpx_vp9_adapter.h"
+#include "api/video_codecs/video_decoder_factory_template_open_h264_adapter.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "api/video_codecs/video_encoder_factory_template.h"
+#include "api/video_codecs/video_encoder_factory_template_libaom_av1_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp8_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_libvpx_vp9_adapter.h"
+#include "api/video_codecs/video_encoder_factory_template_open_h264_adapter.h"
+#include "call/flexfec_receive_stream.h"
+#include "media/base/fake_frame_source.h"
+#include "media/base/fake_network_interface.h"
+#include "media/base/fake_video_renderer.h"
+#include "media/base/media_channel.h"
+#include "media/base/media_constants.h"
+#include "media/base/rtp_utils.h"
+#include "media/base/test_utils.h"
+#include "media/engine/fake_webrtc_call.h"
+#include "media/engine/fake_webrtc_video_engine.h"
+#include "media/engine/webrtc_voice_engine.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtcp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/video_coding/svc/scalability_mode_util.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/event.h"
+#include "rtc_base/experiments/min_video_bitrate_experiment.h"
+#include "rtc_base/fake_clock.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/time_utils.h"
+#include "test/fake_decoder.h"
+#include "test/frame_forwarder.h"
+#include "test/gmock.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/scoped_key_value_config.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "video/config/simulcast.h"
+
+using ::testing::_;
+using ::testing::Contains;
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Gt;
+using ::testing::IsEmpty;
+using ::testing::Lt;
+using ::testing::Pair;
+using ::testing::Return;
+using ::testing::SizeIs;
+using ::testing::StrNe;
+using ::testing::Values;
+using ::testing::WithArg;
+using ::webrtc::BitrateConstraints;
+using ::webrtc::Call;
+using ::webrtc::CallConfig;
+using ::webrtc::kDefaultScalabilityModeStr;
+using ::webrtc::RtpExtension;
+using ::webrtc::RtpPacket;
+using ::webrtc::RtpPacketReceived;
+using ::webrtc::ScalabilityMode;
+using ::webrtc::TimeDelta;
+using ::webrtc::Timestamp;
+using ::webrtc::test::RtcpPacketParser;
+
+namespace {
+
+static const uint8_t kRedRtxPayloadType = 125;
+
+static const uint32_t kSsrc = 1234u;
+static const uint32_t kSsrcs4[] = {1, 2, 3, 4};
+static const int kVideoWidth = 640;
+static const int kVideoHeight = 360;
+static const int kFramerate = 30;
+static constexpr TimeDelta kFrameDuration =
+ TimeDelta::Millis(1000 / kFramerate);
+
+static const uint32_t kSsrcs1[] = {1};
+static const uint32_t kSsrcs3[] = {1, 2, 3};
+static const uint32_t kRtxSsrcs1[] = {4};
+static const uint32_t kFlexfecSsrc = 5;
+static const uint32_t kIncomingUnsignalledSsrc = 0xC0FFEE;
+static const int64_t kUnsignalledReceiveStreamCooldownMs = 500;
+
+constexpr uint32_t kRtpHeaderSize = 12;
+constexpr size_t kNumSimulcastStreams = 3;
+
+static const char kUnsupportedExtensionName[] =
+ "urn:ietf:params:rtp-hdrext:unsupported";
+
+cricket::VideoCodec RemoveFeedbackParams(cricket::VideoCodec&& codec) {
+ codec.feedback_params = cricket::FeedbackParams();
+ return std::move(codec);
+}
+
+void VerifyCodecHasDefaultFeedbackParams(const cricket::VideoCodec& codec,
+ bool lntf_expected) {
+ EXPECT_EQ(lntf_expected,
+ codec.HasFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamLntf, cricket::kParamValueEmpty)));
+ EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamNack, cricket::kParamValueEmpty)));
+ EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamNack, cricket::kRtcpFbNackParamPli)));
+ EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamRemb, cricket::kParamValueEmpty)));
+ EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamTransportCc, cricket::kParamValueEmpty)));
+ EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir)));
+}
+
+// Return true if any codec in `codecs` is an RTX codec with associated
+// payload type `payload_type`.
+bool HasRtxCodec(const std::vector<cricket::VideoCodec>& codecs,
+ int payload_type) {
+ for (const cricket::VideoCodec& codec : codecs) {
+ int associated_payload_type;
+ if (absl::EqualsIgnoreCase(codec.name.c_str(), "rtx") &&
+ codec.GetParam(cricket::kCodecParamAssociatedPayloadType,
+ &associated_payload_type) &&
+ associated_payload_type == payload_type) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Return true if any codec in `codecs` is an RTX codec, independent of
+// payload type.
+bool HasAnyRtxCodec(const std::vector<cricket::VideoCodec>& codecs) {
+ for (const cricket::VideoCodec& codec : codecs) {
+ if (absl::EqualsIgnoreCase(codec.name.c_str(), "rtx")) {
+ return true;
+ }
+ }
+ return false;
+}
+
+const int* FindKeyByValue(const std::map<int, int>& m, int v) {
+ for (const auto& kv : m) {
+ if (kv.second == v)
+ return &kv.first;
+ }
+ return nullptr;
+}
+
+bool HasRtxReceiveAssociation(
+ const webrtc::VideoReceiveStreamInterface::Config& config,
+ int payload_type) {
+ return FindKeyByValue(config.rtp.rtx_associated_payload_types,
+ payload_type) != nullptr;
+}
+
+// Check that there's an Rtx payload type for each decoder.
+bool VerifyRtxReceiveAssociations(
+ const webrtc::VideoReceiveStreamInterface::Config& config) {
+ for (const auto& decoder : config.decoders) {
+ if (!HasRtxReceiveAssociation(config, decoder.payload_type))
+ return false;
+ }
+ return true;
+}
+
+rtc::scoped_refptr<webrtc::VideoFrameBuffer> CreateBlackFrameBuffer(
+ int width,
+ int height) {
+ rtc::scoped_refptr<webrtc::I420Buffer> buffer =
+ webrtc::I420Buffer::Create(width, height);
+ webrtc::I420Buffer::SetBlack(buffer.get());
+ return buffer;
+}
+
+void VerifySendStreamHasRtxTypes(const webrtc::VideoSendStream::Config& config,
+ const std::map<int, int>& rtx_types) {
+ std::map<int, int>::const_iterator it;
+ it = rtx_types.find(config.rtp.payload_type);
+ EXPECT_TRUE(it != rtx_types.end() &&
+ it->second == config.rtp.rtx.payload_type);
+
+ if (config.rtp.ulpfec.red_rtx_payload_type != -1) {
+ it = rtx_types.find(config.rtp.ulpfec.red_payload_type);
+ EXPECT_TRUE(it != rtx_types.end() &&
+ it->second == config.rtp.ulpfec.red_rtx_payload_type);
+ }
+}
+
+cricket::MediaConfig GetMediaConfig() {
+ cricket::MediaConfig media_config;
+ media_config.video.enable_cpu_adaptation = false;
+ return media_config;
+}
+
+// Values from GetMaxDefaultVideoBitrateKbps in webrtcvideoengine.cc.
+int GetMaxDefaultBitrateBps(size_t width, size_t height) {
+ if (width * height <= 320 * 240) {
+ return 600000;
+ } else if (width * height <= 640 * 480) {
+ return 1700000;
+ } else if (width * height <= 960 * 540) {
+ return 2000000;
+ } else {
+ return 2500000;
+ }
+}
+
+class MockVideoSource : public rtc::VideoSourceInterface<webrtc::VideoFrame> {
+ public:
+ MOCK_METHOD(void,
+ AddOrUpdateSink,
+ (rtc::VideoSinkInterface<webrtc::VideoFrame> * sink,
+ const rtc::VideoSinkWants& wants),
+ (override));
+ MOCK_METHOD(void,
+ RemoveSink,
+ (rtc::VideoSinkInterface<webrtc::VideoFrame> * sink),
+ (override));
+};
+
+class MockNetworkInterface : public cricket::MediaChannelNetworkInterface {
+ public:
+ MOCK_METHOD(bool,
+ SendPacket,
+ (rtc::CopyOnWriteBuffer * packet,
+ const rtc::PacketOptions& options),
+ (override));
+ MOCK_METHOD(bool,
+ SendRtcp,
+ (rtc::CopyOnWriteBuffer * packet,
+ const rtc::PacketOptions& options),
+ (override));
+ MOCK_METHOD(int,
+ SetOption,
+ (SocketType type, rtc::Socket::Option opt, int option),
+ (override));
+};
+
+std::vector<webrtc::Resolution> GetStreamResolutions(
+ const std::vector<webrtc::VideoStream>& streams) {
+ std::vector<webrtc::Resolution> res;
+ for (const auto& s : streams) {
+ if (s.active) {
+ res.push_back(
+ {rtc::checked_cast<int>(s.width), rtc::checked_cast<int>(s.height)});
+ }
+ }
+ return res;
+}
+
+RtpPacketReceived BuildVp8KeyFrame(uint32_t ssrc, uint8_t payload_type) {
+ RtpPacketReceived packet;
+ packet.SetMarker(true);
+ packet.SetPayloadType(payload_type);
+ packet.SetSsrc(ssrc);
+
+ // VP8 Keyframe + 1 byte payload
+ uint8_t* buf_ptr = packet.AllocatePayload(11);
+ memset(buf_ptr, 0, 11); // Pass MSAN (don't care about bytes 1-9)
+ buf_ptr[0] = 0x10; // Partition ID 0 + beginning of partition.
+ constexpr unsigned width = 1080;
+ constexpr unsigned height = 720;
+ buf_ptr[6] = width & 255;
+ buf_ptr[7] = width >> 8;
+ buf_ptr[8] = height & 255;
+ buf_ptr[9] = height >> 8;
+ return packet;
+}
+
+RtpPacketReceived BuildRtxPacket(uint32_t rtx_ssrc,
+ uint8_t rtx_payload_type,
+ const RtpPacketReceived& original_packet) {
+ constexpr size_t kRtxHeaderSize = 2;
+ RtpPacketReceived packet(original_packet);
+ packet.SetPayloadType(rtx_payload_type);
+ packet.SetSsrc(rtx_ssrc);
+
+ uint8_t* rtx_payload =
+ packet.AllocatePayload(original_packet.payload_size() + kRtxHeaderSize);
+ // Add OSN (original sequence number).
+ rtx_payload[0] = packet.SequenceNumber() >> 8;
+ rtx_payload[1] = packet.SequenceNumber();
+
+ // Add original payload data.
+ if (!original_packet.payload().empty()) {
+ memcpy(rtx_payload + kRtxHeaderSize, original_packet.payload().data(),
+ original_packet.payload().size());
+ }
+ return packet;
+}
+
+} // namespace
+
+// TODO(tommi): Consider replacing these macros with custom matchers.
+#define EXPECT_FRAME(c, w, h) \
+ EXPECT_EQ((c), renderer_.num_rendered_frames()); \
+ EXPECT_EQ((w), renderer_.width()); \
+ EXPECT_EQ((h), renderer_.height());
+
+#define EXPECT_FRAME_ON_RENDERER(r, c, w, h) \
+ EXPECT_EQ((c), (r).num_rendered_frames()); \
+ EXPECT_EQ((w), (r).width()); \
+ EXPECT_EQ((h), (r).height());
+
+namespace cricket {
+class WebRtcVideoEngineTest : public ::testing::Test {
+ public:
+ WebRtcVideoEngineTest() : WebRtcVideoEngineTest("") {}
+ explicit WebRtcVideoEngineTest(const std::string& field_trials)
+ : field_trials_(field_trials),
+ time_controller_(webrtc::Timestamp::Millis(4711)),
+ task_queue_factory_(time_controller_.CreateTaskQueueFactory()),
+ call_(Call::Create([&] {
+ CallConfig call_config(&event_log_);
+ call_config.task_queue_factory = task_queue_factory_.get();
+ call_config.trials = &field_trials_;
+ return call_config;
+ }())),
+ encoder_factory_(new cricket::FakeWebRtcVideoEncoderFactory),
+ decoder_factory_(new cricket::FakeWebRtcVideoDecoderFactory),
+ video_bitrate_allocator_factory_(
+ webrtc::CreateBuiltinVideoBitrateAllocatorFactory()),
+ engine_(std::unique_ptr<cricket::FakeWebRtcVideoEncoderFactory>(
+ encoder_factory_),
+ std::unique_ptr<cricket::FakeWebRtcVideoDecoderFactory>(
+ decoder_factory_),
+ field_trials_) {}
+
+ protected:
+ void AssignDefaultAptRtxTypes();
+ void AssignDefaultCodec();
+
+ // Find the index of the codec in the engine with the given name. The codec
+ // must be present.
+ size_t GetEngineCodecIndex(const std::string& name) const;
+
+ // Find the codec in the engine with the given name. The codec must be
+ // present.
+ cricket::VideoCodec GetEngineCodec(const std::string& name) const;
+ void AddSupportedVideoCodecType(
+ const std::string& name,
+ const std::vector<webrtc::ScalabilityMode>& scalability_modes = {});
+ std::unique_ptr<VideoMediaSendChannelInterface>
+ SetSendParamsWithAllSupportedCodecs();
+
+ std::unique_ptr<VideoMediaReceiveChannelInterface>
+ SetRecvParamsWithAllSupportedCodecs();
+ std::unique_ptr<VideoMediaReceiveChannelInterface>
+ SetRecvParamsWithSupportedCodecs(const std::vector<VideoCodec>& codecs);
+
+ void ExpectRtpCapabilitySupport(const char* uri, bool supported) const;
+
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ webrtc::GlobalSimulatedTimeController time_controller_;
+ webrtc::RtcEventLogNull event_log_;
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory_;
+ // Used in WebRtcVideoEngineVoiceTest, but defined here so it's properly
+ // initialized when the constructor is called.
+ std::unique_ptr<Call> call_;
+ cricket::FakeWebRtcVideoEncoderFactory* encoder_factory_;
+ cricket::FakeWebRtcVideoDecoderFactory* decoder_factory_;
+ std::unique_ptr<webrtc::VideoBitrateAllocatorFactory>
+ video_bitrate_allocator_factory_;
+ WebRtcVideoEngine engine_;
+ absl::optional<VideoCodec> default_codec_;
+ std::map<int, int> default_apt_rtx_types_;
+};
+
+TEST_F(WebRtcVideoEngineTest, DefaultRtxCodecHasAssociatedPayloadTypeSet) {
+ encoder_factory_->AddSupportedVideoCodecType("VP8");
+ AssignDefaultCodec();
+
+ std::vector<VideoCodec> engine_codecs = engine_.send_codecs();
+ for (size_t i = 0; i < engine_codecs.size(); ++i) {
+ if (engine_codecs[i].name != kRtxCodecName)
+ continue;
+ int associated_payload_type;
+ EXPECT_TRUE(engine_codecs[i].GetParam(kCodecParamAssociatedPayloadType,
+ &associated_payload_type));
+ EXPECT_EQ(default_codec_->id, associated_payload_type);
+ return;
+ }
+ FAIL() << "No RTX codec found among default codecs.";
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsTimestampOffsetHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kTimestampOffsetUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsAbsoluteSenderTimeHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kAbsSendTimeUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsTransportSequenceNumberHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kTransportSequenceNumberUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsVideoRotationHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kVideoRotationUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsPlayoutDelayHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kPlayoutDelayUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsVideoContentTypeHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kVideoContentTypeUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsVideoTimingHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kVideoTimingUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, SupportsColorSpaceHeaderExtension) {
+ ExpectRtpCapabilitySupport(RtpExtension::kColorSpaceUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, AdvertiseGenericDescriptor00) {
+ ExpectRtpCapabilitySupport(RtpExtension::kGenericFrameDescriptorUri00, false);
+}
+
+class WebRtcVideoEngineTestWithGenericDescriptor
+ : public WebRtcVideoEngineTest {
+ public:
+ WebRtcVideoEngineTestWithGenericDescriptor()
+ : WebRtcVideoEngineTest("WebRTC-GenericDescriptorAdvertised/Enabled/") {}
+};
+
+TEST_F(WebRtcVideoEngineTestWithGenericDescriptor,
+ AdvertiseGenericDescriptor00) {
+ ExpectRtpCapabilitySupport(RtpExtension::kGenericFrameDescriptorUri00, true);
+}
+
+class WebRtcVideoEngineTestWithDependencyDescriptor
+ : public WebRtcVideoEngineTest {
+ public:
+ WebRtcVideoEngineTestWithDependencyDescriptor()
+ : WebRtcVideoEngineTest(
+ "WebRTC-DependencyDescriptorAdvertised/Enabled/") {}
+};
+
+TEST_F(WebRtcVideoEngineTestWithDependencyDescriptor,
+ AdvertiseDependencyDescriptor) {
+ ExpectRtpCapabilitySupport(RtpExtension::kDependencyDescriptorUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, AdvertiseVideoLayersAllocation) {
+ ExpectRtpCapabilitySupport(RtpExtension::kVideoLayersAllocationUri, false);
+}
+
+class WebRtcVideoEngineTestWithVideoLayersAllocation
+ : public WebRtcVideoEngineTest {
+ public:
+ WebRtcVideoEngineTestWithVideoLayersAllocation()
+ : WebRtcVideoEngineTest(
+ "WebRTC-VideoLayersAllocationAdvertised/Enabled/") {}
+};
+
+TEST_F(WebRtcVideoEngineTestWithVideoLayersAllocation,
+ AdvertiseVideoLayersAllocation) {
+ ExpectRtpCapabilitySupport(RtpExtension::kVideoLayersAllocationUri, true);
+}
+
+class WebRtcVideoFrameTrackingId : public WebRtcVideoEngineTest {
+ public:
+ WebRtcVideoFrameTrackingId()
+ : WebRtcVideoEngineTest(
+ "WebRTC-VideoFrameTrackingIdAdvertised/Enabled/") {}
+};
+
+TEST_F(WebRtcVideoFrameTrackingId, AdvertiseVideoFrameTrackingId) {
+ ExpectRtpCapabilitySupport(RtpExtension::kVideoFrameTrackingIdUri, true);
+}
+
+TEST_F(WebRtcVideoEngineTest, CVOSetHeaderExtensionBeforeCapturer) {
+ // Allocate the source first to prevent early destruction before channel's
+ // dtor is called.
+ ::testing::NiceMock<MockVideoSource> video_source;
+
+ AddSupportedVideoCodecType("VP8");
+
+ auto send_channel = SetSendParamsWithAllSupportedCodecs();
+ EXPECT_TRUE(send_channel->AddSendStream(StreamParams::CreateLegacy(kSsrc)));
+
+ // Add CVO extension.
+ const int id = 1;
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoRotationUri, id));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+
+ EXPECT_CALL(
+ video_source,
+ AddOrUpdateSink(_, Field(&rtc::VideoSinkWants::rotation_applied, false)));
+ // Set capturer.
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, nullptr, &video_source));
+
+ // Verify capturer has turned off applying rotation.
+ ::testing::Mock::VerifyAndClear(&video_source);
+
+ // Verify removing header extension turns on applying rotation.
+ parameters.extensions.clear();
+ EXPECT_CALL(
+ video_source,
+ AddOrUpdateSink(_, Field(&rtc::VideoSinkWants::rotation_applied, true)));
+
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+}
+
+TEST_F(WebRtcVideoEngineTest, CVOSetHeaderExtensionBeforeAddSendStream) {
+ // Allocate the source first to prevent early destruction before channel's
+ // dtor is called.
+ ::testing::NiceMock<MockVideoSource> video_source;
+
+ AddSupportedVideoCodecType("VP8");
+
+ auto send_channel = SetSendParamsWithAllSupportedCodecs();
+ // Add CVO extension.
+ const int id = 1;
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoRotationUri, id));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+ EXPECT_TRUE(send_channel->AddSendStream(StreamParams::CreateLegacy(kSsrc)));
+
+ // Set source.
+ EXPECT_CALL(
+ video_source,
+ AddOrUpdateSink(_, Field(&rtc::VideoSinkWants::rotation_applied, false)));
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, nullptr, &video_source));
+}
+
+TEST_F(WebRtcVideoEngineTest, CVOSetHeaderExtensionAfterCapturer) {
+ ::testing::NiceMock<MockVideoSource> video_source;
+
+ AddSupportedVideoCodecType("VP8");
+ AddSupportedVideoCodecType("VP9");
+
+ auto send_channel = SetSendParamsWithAllSupportedCodecs();
+
+ EXPECT_TRUE(send_channel->AddSendStream(StreamParams::CreateLegacy(kSsrc)));
+
+ // Set capturer.
+ EXPECT_CALL(
+ video_source,
+ AddOrUpdateSink(_, Field(&rtc::VideoSinkWants::rotation_applied, true)));
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, nullptr, &video_source));
+
+ // Verify capturer has turned on applying rotation.
+ ::testing::Mock::VerifyAndClear(&video_source);
+
+ // Add CVO extension.
+ const int id = 1;
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ parameters.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoRotationUri, id));
+ // Also remove the first codec to trigger a codec change as well.
+ parameters.codecs.erase(parameters.codecs.begin());
+ EXPECT_CALL(
+ video_source,
+ AddOrUpdateSink(_, Field(&rtc::VideoSinkWants::rotation_applied, false)));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+
+ // Verify capturer has turned off applying rotation.
+ ::testing::Mock::VerifyAndClear(&video_source);
+
+ // Verify removing header extension turns on applying rotation.
+ parameters.extensions.clear();
+ EXPECT_CALL(
+ video_source,
+ AddOrUpdateSink(_, Field(&rtc::VideoSinkWants::rotation_applied, true)));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+}
+
+TEST_F(WebRtcVideoEngineTest, SetSendFailsBeforeSettingCodecs) {
+ AddSupportedVideoCodecType("VP8");
+
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+
+ EXPECT_TRUE(send_channel->AddSendStream(StreamParams::CreateLegacy(123)));
+
+ EXPECT_FALSE(send_channel->SetSend(true))
+ << "Channel should not start without codecs.";
+ EXPECT_TRUE(send_channel->SetSend(false))
+ << "Channel should be stoppable even without set codecs.";
+}
+
+TEST_F(WebRtcVideoEngineTest, GetStatsWithoutCodecsSetDoesNotCrash) {
+ AddSupportedVideoCodecType("VP8");
+
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ EXPECT_TRUE(send_channel->AddSendStream(StreamParams::CreateLegacy(123)));
+ VideoMediaSendInfo send_info;
+ send_channel->GetStats(&send_info);
+
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel =
+ engine_.CreateReceiveChannel(call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ EXPECT_TRUE(receive_channel->AddRecvStream(StreamParams::CreateLegacy(123)));
+ VideoMediaReceiveInfo receive_info;
+ receive_channel->GetStats(&receive_info);
+}
+
+TEST_F(WebRtcVideoEngineTest, UseFactoryForVp8WhenSupported) {
+ AddSupportedVideoCodecType("VP8");
+
+ auto send_channel = SetSendParamsWithAllSupportedCodecs();
+
+ send_channel->OnReadyToSend(true);
+
+ EXPECT_TRUE(
+ send_channel->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_EQ(0, encoder_factory_->GetNumCreatedEncoders());
+ EXPECT_TRUE(send_channel->SetSend(true));
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, nullptr, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+ // Sending one frame will have allocate the encoder.
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(1));
+ EXPECT_GT(encoder_factory_->encoders()[0]->GetNumEncodedFrames(), 0);
+
+ int num_created_encoders = encoder_factory_->GetNumCreatedEncoders();
+ EXPECT_EQ(num_created_encoders, 1);
+
+ // Setting codecs of the same type should not reallocate any encoders
+ // (expecting a no-op).
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+ EXPECT_EQ(num_created_encoders, encoder_factory_->GetNumCreatedEncoders());
+
+ // Remove stream previously added to free the external encoder instance.
+ EXPECT_TRUE(send_channel->RemoveSendStream(kSsrc));
+ EXPECT_EQ(0u, encoder_factory_->encoders().size());
+}
+
+// Test that when an encoder factory supports H264, we add an RTX
+// codec for it.
+// TODO(deadbeef): This test should be updated if/when we start
+// adding RTX codecs for unrecognized codec names.
+TEST_F(WebRtcVideoEngineTest, RtxCodecAddedForH264Codec) {
+ using webrtc::H264Level;
+ using webrtc::H264Profile;
+ using webrtc::H264ProfileLevelId;
+ using webrtc::H264ProfileLevelIdToString;
+ webrtc::SdpVideoFormat h264_constrained_baseline("H264");
+ h264_constrained_baseline.parameters[kH264FmtpProfileLevelId] =
+ *H264ProfileLevelIdToString(H264ProfileLevelId(
+ H264Profile::kProfileConstrainedBaseline, H264Level::kLevel1));
+ webrtc::SdpVideoFormat h264_constrained_high("H264");
+ h264_constrained_high.parameters[kH264FmtpProfileLevelId] =
+ *H264ProfileLevelIdToString(H264ProfileLevelId(
+ H264Profile::kProfileConstrainedHigh, H264Level::kLevel1));
+ webrtc::SdpVideoFormat h264_high("H264");
+ h264_high.parameters[kH264FmtpProfileLevelId] = *H264ProfileLevelIdToString(
+ H264ProfileLevelId(H264Profile::kProfileHigh, H264Level::kLevel1));
+
+ encoder_factory_->AddSupportedVideoCodec(h264_constrained_baseline);
+ encoder_factory_->AddSupportedVideoCodec(h264_constrained_high);
+ encoder_factory_->AddSupportedVideoCodec(h264_high);
+
+ // First figure out what payload types the test codecs got assigned.
+ const std::vector<cricket::VideoCodec> codecs = engine_.send_codecs();
+ // Now search for RTX codecs for them. Expect that they all have associated
+ // RTX codecs.
+ EXPECT_TRUE(HasRtxCodec(
+ codecs, FindMatchingVideoCodec(
+ codecs, cricket::CreateVideoCodec(h264_constrained_baseline))
+ ->id));
+ EXPECT_TRUE(HasRtxCodec(
+ codecs, FindMatchingVideoCodec(
+ codecs, cricket::CreateVideoCodec(h264_constrained_high))
+ ->id));
+ EXPECT_TRUE(HasRtxCodec(
+ codecs,
+ FindMatchingVideoCodec(codecs, cricket::CreateVideoCodec(h264_high))
+ ->id));
+}
+
+#if defined(RTC_ENABLE_VP9)
+TEST_F(WebRtcVideoEngineTest, CanConstructDecoderForVp9EncoderFactory) {
+ AddSupportedVideoCodecType("VP9");
+
+ auto receive_channel = SetRecvParamsWithAllSupportedCodecs();
+
+ EXPECT_TRUE(receive_channel->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+}
+#endif // defined(RTC_ENABLE_VP9)
+
+TEST_F(WebRtcVideoEngineTest, PropagatesInputFrameTimestamp) {
+ AddSupportedVideoCodecType("VP8");
+ FakeCall* fake_call = new FakeCall();
+ call_.reset(fake_call);
+ auto send_channel = SetSendParamsWithAllSupportedCodecs();
+
+ EXPECT_TRUE(
+ send_channel->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrc)));
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 60);
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, nullptr, &frame_forwarder));
+ send_channel->SetSend(true);
+
+ FakeVideoSendStream* stream = fake_call->GetVideoSendStreams()[0];
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ int64_t last_timestamp = stream->GetLastTimestamp();
+ for (int i = 0; i < 10; i++) {
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ int64_t timestamp = stream->GetLastTimestamp();
+ int64_t interval = timestamp - last_timestamp;
+
+ // Precision changes from nanosecond to millisecond.
+ // Allow error to be no more than 1.
+ EXPECT_NEAR(cricket::VideoFormat::FpsToInterval(60) / 1E6, interval, 1);
+
+ last_timestamp = timestamp;
+ }
+
+ frame_forwarder.IncomingCapturedFrame(
+ frame_source.GetFrame(1280, 720, webrtc::VideoRotation::kVideoRotation_0,
+ rtc::kNumMicrosecsPerSec / 30));
+ last_timestamp = stream->GetLastTimestamp();
+ for (int i = 0; i < 10; i++) {
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame(
+ 1280, 720, webrtc::VideoRotation::kVideoRotation_0,
+ rtc::kNumMicrosecsPerSec / 30));
+ int64_t timestamp = stream->GetLastTimestamp();
+ int64_t interval = timestamp - last_timestamp;
+
+ // Precision changes from nanosecond to millisecond.
+ // Allow error to be no more than 1.
+ EXPECT_NEAR(cricket::VideoFormat::FpsToInterval(30) / 1E6, interval, 1);
+
+ last_timestamp = timestamp;
+ }
+
+ // Remove stream previously added to free the external encoder instance.
+ EXPECT_TRUE(send_channel->RemoveSendStream(kSsrc));
+}
+
+void WebRtcVideoEngineTest::AssignDefaultAptRtxTypes() {
+ std::vector<VideoCodec> engine_codecs = engine_.send_codecs();
+ RTC_DCHECK(!engine_codecs.empty());
+ for (const cricket::VideoCodec& codec : engine_codecs) {
+ if (codec.name == "rtx") {
+ int associated_payload_type;
+ if (codec.GetParam(kCodecParamAssociatedPayloadType,
+ &associated_payload_type)) {
+ default_apt_rtx_types_[associated_payload_type] = codec.id;
+ }
+ }
+ }
+}
+
+void WebRtcVideoEngineTest::AssignDefaultCodec() {
+ std::vector<VideoCodec> engine_codecs = engine_.send_codecs();
+ RTC_DCHECK(!engine_codecs.empty());
+ bool codec_set = false;
+ for (const cricket::VideoCodec& codec : engine_codecs) {
+ if (!codec_set && codec.name != "rtx" && codec.name != "red" &&
+ codec.name != "ulpfec" && codec.name != "flexfec-03") {
+ default_codec_ = codec;
+ codec_set = true;
+ }
+ }
+
+ RTC_DCHECK(codec_set);
+}
+
+size_t WebRtcVideoEngineTest::GetEngineCodecIndex(
+ const std::string& name) const {
+ const std::vector<cricket::VideoCodec> codecs = engine_.send_codecs();
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ const cricket::VideoCodec engine_codec = codecs[i];
+ if (!absl::EqualsIgnoreCase(name, engine_codec.name))
+ continue;
+ // The tests only use H264 Constrained Baseline. Make sure we don't return
+ // an internal H264 codec from the engine with a different H264 profile.
+ if (absl::EqualsIgnoreCase(name.c_str(), kH264CodecName)) {
+ const absl::optional<webrtc::H264ProfileLevelId> profile_level_id =
+ webrtc::ParseSdpForH264ProfileLevelId(engine_codec.params);
+ if (profile_level_id->profile !=
+ webrtc::H264Profile::kProfileConstrainedBaseline) {
+ continue;
+ }
+ }
+ return i;
+ }
+ // This point should never be reached.
+ ADD_FAILURE() << "Unrecognized codec name: " << name;
+ return -1;
+}
+
+cricket::VideoCodec WebRtcVideoEngineTest::GetEngineCodec(
+ const std::string& name) const {
+ return engine_.send_codecs()[GetEngineCodecIndex(name)];
+}
+
+void WebRtcVideoEngineTest::AddSupportedVideoCodecType(
+ const std::string& name,
+ const std::vector<webrtc::ScalabilityMode>& scalability_modes) {
+ encoder_factory_->AddSupportedVideoCodecType(name, scalability_modes);
+ decoder_factory_->AddSupportedVideoCodecType(name);
+}
+
+std::unique_ptr<VideoMediaSendChannelInterface>
+WebRtcVideoEngineTest::SetSendParamsWithAllSupportedCodecs() {
+ std::unique_ptr<VideoMediaSendChannelInterface> channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ cricket::VideoSenderParameters parameters;
+ // We need to look up the codec in the engine to get the correct payload type.
+ for (const webrtc::SdpVideoFormat& format :
+ encoder_factory_->GetSupportedFormats()) {
+ cricket::VideoCodec engine_codec = GetEngineCodec(format.name);
+ if (!absl::c_linear_search(parameters.codecs, engine_codec)) {
+ parameters.codecs.push_back(engine_codec);
+ }
+ }
+
+ EXPECT_TRUE(channel->SetSenderParameters(parameters));
+
+ return channel;
+}
+
+std::unique_ptr<VideoMediaReceiveChannelInterface>
+WebRtcVideoEngineTest::SetRecvParamsWithSupportedCodecs(
+ const std::vector<VideoCodec>& codecs) {
+ std::unique_ptr<VideoMediaReceiveChannelInterface> channel =
+ engine_.CreateReceiveChannel(call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = codecs;
+ EXPECT_TRUE(channel->SetReceiverParameters(parameters));
+
+ return channel;
+}
+
+std::unique_ptr<VideoMediaReceiveChannelInterface>
+WebRtcVideoEngineTest::SetRecvParamsWithAllSupportedCodecs() {
+ std::vector<VideoCodec> codecs;
+ for (const webrtc::SdpVideoFormat& format :
+ decoder_factory_->GetSupportedFormats()) {
+ cricket::VideoCodec engine_codec = GetEngineCodec(format.name);
+ if (!absl::c_linear_search(codecs, engine_codec)) {
+ codecs.push_back(engine_codec);
+ }
+ }
+
+ return SetRecvParamsWithSupportedCodecs(codecs);
+}
+
+void WebRtcVideoEngineTest::ExpectRtpCapabilitySupport(const char* uri,
+ bool supported) const {
+ const std::vector<webrtc::RtpExtension> header_extensions =
+ GetDefaultEnabledRtpHeaderExtensions(engine_);
+ if (supported) {
+ EXPECT_THAT(header_extensions, Contains(Field(&RtpExtension::uri, uri)));
+ } else {
+ EXPECT_THAT(header_extensions, Each(Field(&RtpExtension::uri, StrNe(uri))));
+ }
+}
+
+TEST_F(WebRtcVideoEngineTest, SendsFeedbackAfterUnsignaledRtxPacket) {
+ // Setup a channel with VP8, RTX and transport sequence number header
+ // extension. Receive stream is not explicitly configured.
+ AddSupportedVideoCodecType("VP8");
+ std::vector<VideoCodec> supported_codecs =
+ engine_.recv_codecs(/*include_rtx=*/true);
+ ASSERT_EQ(supported_codecs[1].name, "rtx");
+ int rtx_payload_type = supported_codecs[1].id;
+ MockNetworkInterface network;
+ RtcpPacketParser rtcp_parser;
+ ON_CALL(network, SendRtcp)
+ .WillByDefault(
+ testing::DoAll(WithArg<0>([&](rtc::CopyOnWriteBuffer* packet) {
+ ASSERT_TRUE(rtcp_parser.Parse(*packet));
+ }),
+ Return(true)));
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel =
+ engine_.CreateReceiveChannel(call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = supported_codecs;
+ const int kTransportSeqExtensionId = 1;
+ parameters.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kTransportSeqExtensionId));
+ ASSERT_TRUE(receive_channel->SetReceiverParameters(parameters));
+ send_channel->SetInterface(&network);
+ receive_channel->SetInterface(&network);
+ send_channel->OnReadyToSend(true);
+ receive_channel->SetReceive(true);
+
+ // Inject a RTX packet.
+ webrtc::RtpHeaderExtensionMap extension_map(parameters.extensions);
+ webrtc::RtpPacketReceived packet(&extension_map);
+ packet.SetMarker(true);
+ packet.SetPayloadType(rtx_payload_type);
+ packet.SetSsrc(999);
+ packet.SetExtension<webrtc::TransportSequenceNumber>(7);
+ uint8_t* buf_ptr = packet.AllocatePayload(11);
+ memset(buf_ptr, 0, 11); // Pass MSAN (don't care about bytes 1-9)
+ receive_channel->OnPacketReceived(packet);
+
+ // Expect that feedback is sent after a while.
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Seconds(1));
+ EXPECT_GT(rtcp_parser.transport_feedback()->num_packets(), 0);
+
+ send_channel->SetInterface(nullptr);
+ receive_channel->SetInterface(nullptr);
+}
+
+TEST_F(WebRtcVideoEngineTest, ReceiveBufferSizeViaFieldTrial) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-ReceiveBufferSize/size_bytes:10000/");
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel =
+ engine_.CreateReceiveChannel(call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ cricket::FakeNetworkInterface network;
+ receive_channel->SetInterface(&network);
+ EXPECT_EQ(10000, network.recvbuf_size());
+ receive_channel->SetInterface(nullptr);
+}
+
+TEST_F(WebRtcVideoEngineTest, TooLowReceiveBufferSizeViaFieldTrial) {
+ // 10000001 is too high, it will revert to the default
+ // kVideoRtpRecvBufferSize.
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-ReceiveBufferSize/size_bytes:10000001/");
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel =
+ engine_.CreateReceiveChannel(call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ cricket::FakeNetworkInterface network;
+ receive_channel->SetInterface(&network);
+ EXPECT_EQ(kVideoRtpRecvBufferSize, network.recvbuf_size());
+ receive_channel->SetInterface(nullptr);
+}
+
+TEST_F(WebRtcVideoEngineTest, TooHighReceiveBufferSizeViaFieldTrial) {
+ // 9999 is too low, it will revert to the default kVideoRtpRecvBufferSize.
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-ReceiveBufferSize/size_bytes:9999/");
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel =
+ engine_.CreateReceiveChannel(call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ cricket::FakeNetworkInterface network;
+ receive_channel->SetInterface(&network);
+ EXPECT_EQ(kVideoRtpRecvBufferSize, network.recvbuf_size());
+ receive_channel->SetInterface(nullptr);
+}
+
+TEST_F(WebRtcVideoEngineTest, UpdatesUnsignaledRtxSsrcAndRecoversPayload) {
+ // Setup a channel with VP8, RTX and transport sequence number header
+ // extension. Receive stream is not explicitly configured.
+ AddSupportedVideoCodecType("VP8");
+ std::vector<VideoCodec> supported_codecs =
+ engine_.recv_codecs(/*include_rtx=*/true);
+ ASSERT_EQ(supported_codecs[1].name, "rtx");
+ int rtx_payload_type = supported_codecs[1].id;
+
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel =
+ engine_.CreateReceiveChannel(call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = supported_codecs;
+ ASSERT_TRUE(receive_channel->SetReceiverParameters(parameters));
+ receive_channel->SetReceive(true);
+
+ // Receive a normal payload packet. It is not a complete frame since the
+ // marker bit is not set.
+ RtpPacketReceived packet_1 =
+ BuildVp8KeyFrame(/*ssrc*/ 123, supported_codecs[0].id);
+ packet_1.SetMarker(false);
+ receive_channel->OnPacketReceived(packet_1);
+
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(100));
+ // No complete frame received. No decoder created yet.
+ EXPECT_THAT(decoder_factory_->decoders(), IsEmpty());
+
+ RtpPacketReceived packet_2;
+ packet_2.SetSsrc(123);
+ packet_2.SetPayloadType(supported_codecs[0].id);
+ packet_2.SetSequenceNumber(packet_1.SequenceNumber() + 1);
+ memset(packet_2.AllocatePayload(500), 0, 1);
+ packet_2.SetMarker(true); // Frame is complete.
+ RtpPacketReceived rtx_packet =
+ BuildRtxPacket(345, rtx_payload_type, packet_2);
+
+ receive_channel->OnPacketReceived(rtx_packet);
+
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(0));
+ ASSERT_THAT(decoder_factory_->decoders(), Not(IsEmpty()));
+ EXPECT_EQ(decoder_factory_->decoders()[0]->GetNumFramesReceived(), 1);
+}
+
+TEST_F(WebRtcVideoEngineTest, UsesSimulcastAdapterForVp8Factories) {
+ AddSupportedVideoCodecType("VP8");
+
+ auto send_channel = SetSendParamsWithAllSupportedCodecs();
+
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+
+ EXPECT_TRUE(
+ send_channel->AddSendStream(CreateSimStreamParams("cname", ssrcs)));
+ EXPECT_TRUE(send_channel->SetSend(true));
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 60);
+ EXPECT_TRUE(
+ send_channel->SetVideoSend(ssrcs.front(), nullptr, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(2));
+
+ // Verify that encoders are configured for simulcast through adapter
+ // (increasing resolution and only configured to send one stream each).
+ int prev_width = -1;
+ for (size_t i = 0; i < encoder_factory_->encoders().size(); ++i) {
+ ASSERT_TRUE(encoder_factory_->encoders()[i]->WaitForInitEncode());
+ webrtc::VideoCodec codec_settings =
+ encoder_factory_->encoders()[i]->GetCodecSettings();
+ EXPECT_EQ(0, codec_settings.numberOfSimulcastStreams);
+ EXPECT_GT(codec_settings.width, prev_width);
+ prev_width = codec_settings.width;
+ }
+
+ EXPECT_TRUE(send_channel->SetVideoSend(ssrcs.front(), nullptr, nullptr));
+
+ send_channel.reset();
+ ASSERT_EQ(0u, encoder_factory_->encoders().size());
+}
+
+TEST_F(WebRtcVideoEngineTest, ChannelWithH264CanChangeToVp8) {
+ AddSupportedVideoCodecType("VP8");
+ AddSupportedVideoCodecType("H264");
+
+ // Frame source.
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("H264"));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+
+ EXPECT_TRUE(
+ send_channel->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, nullptr, &frame_forwarder));
+ // Sending one frame will have allocate the encoder.
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+
+ ASSERT_EQ(1u, encoder_factory_->encoders().size());
+
+ cricket::VideoSenderParameters new_parameters;
+ new_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel->SetSenderParameters(new_parameters));
+
+ // Sending one frame will switch encoder.
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+
+ EXPECT_EQ(1u, encoder_factory_->encoders().size());
+}
+
+TEST_F(WebRtcVideoEngineTest,
+ UsesSimulcastAdapterForVp8WithCombinedVP8AndH264Factory) {
+ AddSupportedVideoCodecType("VP8");
+ AddSupportedVideoCodecType("H264");
+
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+
+ EXPECT_TRUE(
+ send_channel->AddSendStream(CreateSimStreamParams("cname", ssrcs)));
+ EXPECT_TRUE(send_channel->SetSend(true));
+
+ // Send a fake frame, or else the media engine will configure the simulcast
+ // encoder adapter at a low-enough size that it'll only create a single
+ // encoder layer.
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(
+ send_channel->SetVideoSend(ssrcs.front(), nullptr, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(2));
+ ASSERT_TRUE(encoder_factory_->encoders()[0]->WaitForInitEncode());
+ EXPECT_EQ(webrtc::kVideoCodecVP8,
+ encoder_factory_->encoders()[0]->GetCodecSettings().codecType);
+
+ send_channel.reset();
+ // Make sure DestroyVideoEncoder was called on the factory.
+ EXPECT_EQ(0u, encoder_factory_->encoders().size());
+}
+
+TEST_F(WebRtcVideoEngineTest,
+ DestroysNonSimulcastEncoderFromCombinedVP8AndH264Factory) {
+ AddSupportedVideoCodecType("VP8");
+ AddSupportedVideoCodecType("H264");
+
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("H264"));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+
+ EXPECT_TRUE(
+ send_channel->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrc)));
+
+ // Send a frame of 720p. This should trigger a "real" encoder initialization.
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, nullptr, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(1));
+ ASSERT_EQ(1u, encoder_factory_->encoders().size());
+ ASSERT_TRUE(encoder_factory_->encoders()[0]->WaitForInitEncode());
+ EXPECT_EQ(webrtc::kVideoCodecH264,
+ encoder_factory_->encoders()[0]->GetCodecSettings().codecType);
+
+ send_channel.reset();
+ // Make sure DestroyVideoEncoder was called on the factory.
+ ASSERT_EQ(0u, encoder_factory_->encoders().size());
+}
+
+TEST_F(WebRtcVideoEngineTest, SimulcastEnabledForH264) {
+ AddSupportedVideoCodecType("H264");
+
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine_.CreateSendChannel(call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("H264"));
+ EXPECT_TRUE(send_channel->SetSenderParameters(parameters));
+
+ const std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+ EXPECT_TRUE(send_channel->AddSendStream(
+ cricket::CreateSimStreamParams("cname", ssrcs)));
+
+ // Send a frame of 720p. This should trigger a "real" encoder initialization.
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(send_channel->SetVideoSend(ssrcs[0], nullptr, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(1));
+ ASSERT_EQ(1u, encoder_factory_->encoders().size());
+ FakeWebRtcVideoEncoder* encoder = encoder_factory_->encoders()[0];
+ ASSERT_TRUE(encoder_factory_->encoders()[0]->WaitForInitEncode());
+ EXPECT_EQ(webrtc::kVideoCodecH264, encoder->GetCodecSettings().codecType);
+ EXPECT_LT(1u, encoder->GetCodecSettings().numberOfSimulcastStreams);
+ EXPECT_TRUE(send_channel->SetVideoSend(ssrcs[0], nullptr, nullptr));
+}
+
+// Test that FlexFEC is not supported as a send video codec by default.
+// Only enabling field trial should allow advertising FlexFEC send codec.
+TEST_F(WebRtcVideoEngineTest, Flexfec03SendCodecEnablesWithFieldTrial) {
+ encoder_factory_->AddSupportedVideoCodecType("VP8");
+
+ auto flexfec = Field("name", &VideoCodec::name, "flexfec-03");
+
+ EXPECT_THAT(engine_.send_codecs(), Not(Contains(flexfec)));
+
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-FlexFEC-03-Advertised/Enabled/");
+ EXPECT_THAT(engine_.send_codecs(), Contains(flexfec));
+}
+
+// Test that the FlexFEC "codec" gets assigned to the lower payload type range
+TEST_F(WebRtcVideoEngineTest, Flexfec03LowerPayloadTypeRange) {
+ encoder_factory_->AddSupportedVideoCodecType("VP8");
+
+ auto flexfec = Field("name", &VideoCodec::name, "flexfec-03");
+
+ // FlexFEC is active with field trial.
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-FlexFEC-03-Advertised/Enabled/");
+ auto send_codecs = engine_.send_codecs();
+ auto it = std::find_if(send_codecs.begin(), send_codecs.end(),
+ [](const cricket::VideoCodec& codec) {
+ return codec.name == "flexfec-03";
+ });
+ ASSERT_NE(it, send_codecs.end());
+ EXPECT_LE(35, it->id);
+ EXPECT_GE(65, it->id);
+}
+
+// Test that codecs are added in the order they are reported from the factory.
+TEST_F(WebRtcVideoEngineTest, ReportSupportedCodecs) {
+ encoder_factory_->AddSupportedVideoCodecType("VP8");
+ const char* kFakeCodecName = "FakeCodec";
+ encoder_factory_->AddSupportedVideoCodecType(kFakeCodecName);
+
+ // The last reported codec should appear after the first codec in the vector.
+ const size_t vp8_index = GetEngineCodecIndex("VP8");
+ const size_t fake_codec_index = GetEngineCodecIndex(kFakeCodecName);
+ EXPECT_LT(vp8_index, fake_codec_index);
+}
+
+// Test that a codec that was added after the engine was initialized
+// does show up in the codec list after it was added.
+TEST_F(WebRtcVideoEngineTest, ReportSupportedAddedCodec) {
+ const char* kFakeExternalCodecName1 = "FakeExternalCodec1";
+ const char* kFakeExternalCodecName2 = "FakeExternalCodec2";
+
+ // Set up external encoder factory with first codec, and initialize engine.
+ encoder_factory_->AddSupportedVideoCodecType(kFakeExternalCodecName1);
+
+ std::vector<cricket::VideoCodec> codecs_before(engine_.send_codecs());
+
+ // Add second codec.
+ encoder_factory_->AddSupportedVideoCodecType(kFakeExternalCodecName2);
+ std::vector<cricket::VideoCodec> codecs_after(engine_.send_codecs());
+ // The codec itself and RTX should have been added.
+ EXPECT_EQ(codecs_before.size() + 2, codecs_after.size());
+
+ // Check that both fake codecs are present and that the second fake codec
+ // appears after the first fake codec.
+ const size_t fake_codec_index1 = GetEngineCodecIndex(kFakeExternalCodecName1);
+ const size_t fake_codec_index2 = GetEngineCodecIndex(kFakeExternalCodecName2);
+ EXPECT_LT(fake_codec_index1, fake_codec_index2);
+}
+
+TEST_F(WebRtcVideoEngineTest, ReportRtxForExternalCodec) {
+ const char* kFakeCodecName = "FakeCodec";
+ encoder_factory_->AddSupportedVideoCodecType(kFakeCodecName);
+
+ const size_t fake_codec_index = GetEngineCodecIndex(kFakeCodecName);
+ EXPECT_EQ("rtx", engine_.send_codecs().at(fake_codec_index + 1).name);
+}
+
+TEST_F(WebRtcVideoEngineTest, RegisterDecodersIfSupported) {
+ AddSupportedVideoCodecType("VP8");
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+
+ auto receive_channel = SetRecvParamsWithSupportedCodecs(parameters.codecs);
+
+ EXPECT_TRUE(receive_channel->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ // Decoders are not created until they are used.
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+ EXPECT_EQ(0u, decoder_factory_->decoders().size());
+
+ // Setting codecs of the same type should not reallocate the decoder.
+ EXPECT_TRUE(receive_channel->SetReceiverParameters(parameters));
+ EXPECT_EQ(0, decoder_factory_->GetNumCreatedDecoders());
+
+ // Remove stream previously added to free the external decoder instance.
+ EXPECT_TRUE(receive_channel->RemoveRecvStream(kSsrc));
+ EXPECT_EQ(0u, decoder_factory_->decoders().size());
+}
+
+// Verifies that we can set up decoders.
+TEST_F(WebRtcVideoEngineTest, RegisterH264DecoderIfSupported) {
+ // TODO(pbos): Do not assume that encoder/decoder support is symmetric. We
+ // can't even query the WebRtcVideoDecoderFactory for supported codecs.
+ // For now we add a FakeWebRtcVideoEncoderFactory to add H264 to supported
+ // codecs.
+ AddSupportedVideoCodecType("H264");
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(GetEngineCodec("H264"));
+
+ auto receive_channel = SetRecvParamsWithSupportedCodecs(codecs);
+
+ EXPECT_TRUE(receive_channel->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ // Decoders are not created until they are used.
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Zero());
+ ASSERT_EQ(0u, decoder_factory_->decoders().size());
+}
+
+// Tests when GetSources is called with non-existing ssrc, it will return an
+// empty list of RtpSource without crashing.
+TEST_F(WebRtcVideoEngineTest, GetSourcesWithNonExistingSsrc) {
+ // Setup an recv stream with `kSsrc`.
+ AddSupportedVideoCodecType("VP8");
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ auto receive_channel = SetRecvParamsWithSupportedCodecs(parameters.codecs);
+
+ EXPECT_TRUE(receive_channel->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+
+ // Call GetSources with |kSsrc + 1| which doesn't exist.
+ std::vector<webrtc::RtpSource> sources =
+ receive_channel->GetSources(kSsrc + 1);
+ EXPECT_EQ(0u, sources.size());
+}
+
+TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, NullFactories) {
+ std::unique_ptr<webrtc::VideoEncoderFactory> encoder_factory;
+ std::unique_ptr<webrtc::VideoDecoderFactory> decoder_factory;
+ webrtc::FieldTrialBasedConfig trials;
+ WebRtcVideoEngine engine(std::move(encoder_factory),
+ std::move(decoder_factory), trials);
+ EXPECT_EQ(0u, engine.send_codecs().size());
+ EXPECT_EQ(0u, engine.recv_codecs().size());
+}
+
+TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, EmptyFactories) {
+ // `engine` take ownership of the factories.
+ webrtc::MockVideoEncoderFactory* encoder_factory =
+ new webrtc::MockVideoEncoderFactory();
+ webrtc::MockVideoDecoderFactory* decoder_factory =
+ new webrtc::MockVideoDecoderFactory();
+ webrtc::FieldTrialBasedConfig trials;
+ WebRtcVideoEngine engine(
+ (std::unique_ptr<webrtc::VideoEncoderFactory>(encoder_factory)),
+ (std::unique_ptr<webrtc::VideoDecoderFactory>(decoder_factory)), trials);
+ // TODO(kron): Change to Times(1) once send and receive codecs are changed
+ // to be treated independently.
+ EXPECT_CALL(*encoder_factory, GetSupportedFormats()).Times(1);
+ EXPECT_EQ(0u, engine.send_codecs().size());
+ EXPECT_EQ(0u, engine.recv_codecs().size());
+ EXPECT_CALL(*encoder_factory, Die());
+ EXPECT_CALL(*decoder_factory, Die());
+}
+
+// Test full behavior in the video engine when video codec factories of the new
+// type are injected supporting the single codec Vp8. Check the returned codecs
+// from the engine and that we will create a Vp8 encoder and decoder using the
+// new factories.
+TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, Vp8) {
+ // `engine` take ownership of the factories.
+ webrtc::MockVideoEncoderFactory* encoder_factory =
+ new webrtc::MockVideoEncoderFactory();
+ webrtc::MockVideoDecoderFactory* decoder_factory =
+ new webrtc::MockVideoDecoderFactory();
+ std::unique_ptr<webrtc::MockVideoBitrateAllocatorFactory>
+ rate_allocator_factory =
+ std::make_unique<webrtc::MockVideoBitrateAllocatorFactory>();
+ EXPECT_CALL(*rate_allocator_factory,
+ CreateVideoBitrateAllocator(Field(&webrtc::VideoCodec::codecType,
+ webrtc::kVideoCodecVP8)))
+ .WillOnce(
+ [] { return std::make_unique<webrtc::MockVideoBitrateAllocator>(); });
+ webrtc::FieldTrialBasedConfig trials;
+ WebRtcVideoEngine engine(
+ (std::unique_ptr<webrtc::VideoEncoderFactory>(encoder_factory)),
+ (std::unique_ptr<webrtc::VideoDecoderFactory>(decoder_factory)), trials);
+ const webrtc::SdpVideoFormat vp8_format("VP8");
+ const std::vector<webrtc::SdpVideoFormat> supported_formats = {vp8_format};
+ EXPECT_CALL(*encoder_factory, GetSupportedFormats())
+ .WillRepeatedly(Return(supported_formats));
+ EXPECT_CALL(*decoder_factory, GetSupportedFormats())
+ .WillRepeatedly(Return(supported_formats));
+
+ // Verify the codecs from the engine.
+ const std::vector<VideoCodec> engine_codecs = engine.send_codecs();
+ // Verify default codecs has been added correctly.
+ EXPECT_EQ(5u, engine_codecs.size());
+ EXPECT_EQ("VP8", engine_codecs.at(0).name);
+
+ // RTX codec for VP8.
+ EXPECT_EQ("rtx", engine_codecs.at(1).name);
+ int vp8_associated_payload;
+ EXPECT_TRUE(engine_codecs.at(1).GetParam(kCodecParamAssociatedPayloadType,
+ &vp8_associated_payload));
+ EXPECT_EQ(vp8_associated_payload, engine_codecs.at(0).id);
+
+ EXPECT_EQ(kRedCodecName, engine_codecs.at(2).name);
+
+ // RTX codec for RED.
+ EXPECT_EQ("rtx", engine_codecs.at(3).name);
+ int red_associated_payload;
+ EXPECT_TRUE(engine_codecs.at(3).GetParam(kCodecParamAssociatedPayloadType,
+ &red_associated_payload));
+ EXPECT_EQ(red_associated_payload, engine_codecs.at(2).id);
+
+ EXPECT_EQ(kUlpfecCodecName, engine_codecs.at(4).name);
+
+ int associated_payload_type;
+ EXPECT_TRUE(engine_codecs.at(1).GetParam(
+ cricket::kCodecParamAssociatedPayloadType, &associated_payload_type));
+ EXPECT_EQ(engine_codecs.at(0).id, associated_payload_type);
+ // Verify default parameters has been added to the VP8 codec.
+ VerifyCodecHasDefaultFeedbackParams(engine_codecs.at(0),
+ /*lntf_expected=*/false);
+
+ // Mock encoder creation. `engine` take ownership of the encoder.
+ const webrtc::SdpVideoFormat format("VP8");
+ EXPECT_CALL(*encoder_factory, CreateVideoEncoder(format)).WillOnce([&] {
+ return std::make_unique<FakeWebRtcVideoEncoder>(nullptr);
+ });
+
+ // Expect no decoder to be created at this point. The decoder will only be
+ // created if we receive payload data.
+ EXPECT_CALL(*decoder_factory, CreateVideoDecoder(format)).Times(0);
+
+ // Create a call.
+ webrtc::RtcEventLogNull event_log;
+ webrtc::GlobalSimulatedTimeController time_controller(
+ webrtc::Timestamp::Millis(4711));
+ auto task_queue_factory = time_controller.CreateTaskQueueFactory();
+ CallConfig call_config(&event_log);
+ webrtc::FieldTrialBasedConfig field_trials;
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ const std::unique_ptr<Call> call = Call::Create(call_config);
+
+ // Create send channel.
+ const int send_ssrc = 123;
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel =
+ engine.CreateSendChannel(call.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(),
+ rate_allocator_factory.get());
+
+ cricket::VideoSenderParameters send_parameters;
+ send_parameters.codecs.push_back(engine_codecs.at(0));
+ EXPECT_TRUE(send_channel->SetSenderParameters(send_parameters));
+ send_channel->OnReadyToSend(true);
+ EXPECT_TRUE(
+ send_channel->AddSendStream(StreamParams::CreateLegacy(send_ssrc)));
+ EXPECT_TRUE(send_channel->SetSend(true));
+
+ // Set capturer.
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(send_channel->SetVideoSend(send_ssrc, nullptr, &frame_forwarder));
+ // Sending one frame will allocate the encoder.
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller.AdvanceTime(webrtc::TimeDelta::Zero());
+
+ // Create recv channel.
+ const int recv_ssrc = 321;
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel =
+ engine.CreateReceiveChannel(call.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions());
+
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(engine_codecs.at(0));
+ EXPECT_TRUE(receive_channel->SetReceiverParameters(recv_parameters));
+ EXPECT_TRUE(receive_channel->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(recv_ssrc)));
+
+ // Remove streams previously added to free the encoder and decoder instance.
+ EXPECT_CALL(*encoder_factory, Die());
+ EXPECT_CALL(*decoder_factory, Die());
+ EXPECT_CALL(*rate_allocator_factory, Die());
+ EXPECT_TRUE(send_channel->RemoveSendStream(send_ssrc));
+ EXPECT_TRUE(receive_channel->RemoveRecvStream(recv_ssrc));
+}
+
+TEST_F(WebRtcVideoEngineTest, DISABLED_RecreatesEncoderOnContentTypeChange) {
+ encoder_factory_->AddSupportedVideoCodecType("VP8");
+ std::unique_ptr<FakeCall> fake_call(new FakeCall());
+ auto send_channel = SetSendParamsWithAllSupportedCodecs();
+
+ ASSERT_TRUE(
+ send_channel->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrc)));
+ cricket::VideoCodec codec = GetEngineCodec("VP8");
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(codec);
+ send_channel->OnReadyToSend(true);
+ send_channel->SetSend(true);
+ ASSERT_TRUE(send_channel->SetSenderParameters(parameters));
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ VideoOptions options;
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, &options, &frame_forwarder));
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(1));
+ EXPECT_EQ(webrtc::VideoCodecMode::kRealtimeVideo,
+ encoder_factory_->encoders().back()->GetCodecSettings().mode);
+
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, &options, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ // No change in content type, keep current encoder.
+ EXPECT_EQ(1, encoder_factory_->GetNumCreatedEncoders());
+
+ options.is_screencast.emplace(true);
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, &options, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ // Change to screen content, recreate encoder. For the simulcast encoder
+ // adapter case, this will result in two calls since InitEncode triggers a
+ // a new instance.
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(2));
+ EXPECT_EQ(webrtc::VideoCodecMode::kScreensharing,
+ encoder_factory_->encoders().back()->GetCodecSettings().mode);
+
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, &options, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ // Still screen content, no need to update encoder.
+ EXPECT_EQ(2, encoder_factory_->GetNumCreatedEncoders());
+
+ options.is_screencast.emplace(false);
+ options.video_noise_reduction.emplace(false);
+ EXPECT_TRUE(send_channel->SetVideoSend(kSsrc, &options, &frame_forwarder));
+ // Change back to regular video content, update encoder. Also change
+ // a non `is_screencast` option just to verify it doesn't affect recreation.
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(3));
+ EXPECT_EQ(webrtc::VideoCodecMode::kRealtimeVideo,
+ encoder_factory_->encoders().back()->GetCodecSettings().mode);
+
+ // Remove stream previously added to free the external encoder instance.
+ EXPECT_TRUE(send_channel->RemoveSendStream(kSsrc));
+ EXPECT_EQ(0u, encoder_factory_->encoders().size());
+}
+
+TEST_F(WebRtcVideoEngineTest, SetVideoRtxEnabled) {
+ AddSupportedVideoCodecType("VP8");
+ std::vector<VideoCodec> send_codecs;
+ std::vector<VideoCodec> recv_codecs;
+
+ webrtc::test::ScopedKeyValueConfig field_trials;
+
+ // Don't want RTX
+ send_codecs = engine_.send_codecs(false);
+ EXPECT_FALSE(HasAnyRtxCodec(send_codecs));
+ recv_codecs = engine_.recv_codecs(false);
+ EXPECT_FALSE(HasAnyRtxCodec(recv_codecs));
+
+ // Want RTX
+ send_codecs = engine_.send_codecs(true);
+ EXPECT_TRUE(HasAnyRtxCodec(send_codecs));
+ recv_codecs = engine_.recv_codecs(true);
+ EXPECT_TRUE(HasAnyRtxCodec(recv_codecs));
+}
+
+class WebRtcVideoChannelEncodedFrameCallbackTest : public ::testing::Test {
+ protected:
+ CallConfig GetCallConfig(webrtc::RtcEventLogNull* event_log,
+ webrtc::TaskQueueFactory* task_queue_factory) {
+ CallConfig call_config(event_log);
+ call_config.task_queue_factory = task_queue_factory;
+ call_config.trials = &field_trials_;
+ return call_config;
+ }
+
+ WebRtcVideoChannelEncodedFrameCallbackTest()
+ : task_queue_factory_(time_controller_.CreateTaskQueueFactory()),
+ call_(Call::Create(
+ GetCallConfig(&event_log_, task_queue_factory_.get()))),
+ video_bitrate_allocator_factory_(
+ webrtc::CreateBuiltinVideoBitrateAllocatorFactory()),
+ engine_(
+ std::make_unique<webrtc::VideoEncoderFactoryTemplate<
+ webrtc::LibvpxVp8EncoderTemplateAdapter,
+ webrtc::LibvpxVp9EncoderTemplateAdapter,
+ webrtc::OpenH264EncoderTemplateAdapter,
+ webrtc::LibaomAv1EncoderTemplateAdapter>>(),
+ std::make_unique<webrtc::test::FunctionVideoDecoderFactory>(
+ []() { return std::make_unique<webrtc::test::FakeDecoder>(); },
+ kSdpVideoFormats),
+ field_trials_) {
+ send_channel_ = engine_.CreateSendChannel(
+ call_.get(), cricket::MediaConfig(), cricket::VideoOptions(),
+ webrtc::CryptoOptions(), video_bitrate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(
+ call_.get(), cricket::MediaConfig(), cricket::VideoOptions(),
+ webrtc::CryptoOptions());
+
+ network_interface_.SetDestination(receive_channel_.get());
+ send_channel_->SetInterface(&network_interface_);
+ receive_channel_->SetInterface(&network_interface_);
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = engine_.recv_codecs();
+ receive_channel_->SetReceiverParameters(parameters);
+ receive_channel_->SetReceive(true);
+ }
+
+ ~WebRtcVideoChannelEncodedFrameCallbackTest() override {
+ send_channel_->SetInterface(nullptr);
+ receive_channel_->SetInterface(nullptr);
+ send_channel_.reset();
+ receive_channel_.reset();
+ }
+
+ void DeliverKeyFrame(uint32_t ssrc) {
+ receive_channel_->OnPacketReceived(BuildVp8KeyFrame(ssrc, 96));
+ }
+
+ void DeliverKeyFrameAndWait(uint32_t ssrc) {
+ DeliverKeyFrame(ssrc);
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_EQ(renderer_.num_rendered_frames(), 1);
+ }
+
+ static const std::vector<webrtc::SdpVideoFormat> kSdpVideoFormats;
+ webrtc::GlobalSimulatedTimeController time_controller_{
+ Timestamp::Seconds(1000)};
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ webrtc::RtcEventLogNull event_log_;
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory_;
+ std::unique_ptr<Call> call_;
+ std::unique_ptr<webrtc::VideoBitrateAllocatorFactory>
+ video_bitrate_allocator_factory_;
+ WebRtcVideoEngine engine_;
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel_;
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel_;
+ cricket::FakeNetworkInterface network_interface_;
+ cricket::FakeVideoRenderer renderer_;
+};
+
+const std::vector<webrtc::SdpVideoFormat>
+ WebRtcVideoChannelEncodedFrameCallbackTest::kSdpVideoFormats = {
+ webrtc::SdpVideoFormat("VP8")};
+
+TEST_F(WebRtcVideoChannelEncodedFrameCallbackTest,
+ SetEncodedFrameBufferFunction_DefaultStream) {
+ testing::MockFunction<void(const webrtc::RecordableEncodedFrame&)> callback;
+ EXPECT_CALL(callback, Call);
+ EXPECT_TRUE(receive_channel_->AddDefaultRecvStreamForTesting(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ receive_channel_->SetRecordableEncodedFrameCallback(/*ssrc=*/0,
+ callback.AsStdFunction());
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc, &renderer_));
+ DeliverKeyFrame(kSsrc);
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_EQ(renderer_.num_rendered_frames(), 1);
+ receive_channel_->RemoveRecvStream(kSsrc);
+}
+
+TEST_F(WebRtcVideoChannelEncodedFrameCallbackTest,
+ SetEncodedFrameBufferFunction_MatchSsrcWithDefaultStream) {
+ testing::MockFunction<void(const webrtc::RecordableEncodedFrame&)> callback;
+ EXPECT_CALL(callback, Call);
+ EXPECT_TRUE(receive_channel_->AddDefaultRecvStreamForTesting(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc, &renderer_));
+ receive_channel_->SetRecordableEncodedFrameCallback(kSsrc,
+ callback.AsStdFunction());
+ DeliverKeyFrame(kSsrc);
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_EQ(renderer_.num_rendered_frames(), 1);
+ receive_channel_->RemoveRecvStream(kSsrc);
+}
+
+TEST_F(WebRtcVideoChannelEncodedFrameCallbackTest,
+ SetEncodedFrameBufferFunction_MatchSsrc) {
+ testing::MockFunction<void(const webrtc::RecordableEncodedFrame&)> callback;
+ EXPECT_CALL(callback, Call);
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc, &renderer_));
+ receive_channel_->SetRecordableEncodedFrameCallback(kSsrc,
+ callback.AsStdFunction());
+ DeliverKeyFrame(kSsrc);
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_EQ(renderer_.num_rendered_frames(), 1);
+ receive_channel_->RemoveRecvStream(kSsrc);
+}
+
+TEST_F(WebRtcVideoChannelEncodedFrameCallbackTest,
+ SetEncodedFrameBufferFunction_MismatchSsrc) {
+ testing::StrictMock<
+ testing::MockFunction<void(const webrtc::RecordableEncodedFrame&)>>
+ callback;
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc + 1)));
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc + 1, &renderer_));
+ receive_channel_->SetRecordableEncodedFrameCallback(kSsrc,
+ callback.AsStdFunction());
+ DeliverKeyFrame(kSsrc); // Expected to not cause function to fire.
+ DeliverKeyFrameAndWait(kSsrc + 1);
+ receive_channel_->RemoveRecvStream(kSsrc + 1);
+}
+
+TEST_F(WebRtcVideoChannelEncodedFrameCallbackTest,
+ SetEncodedFrameBufferFunction_MismatchSsrcWithDefaultStream) {
+ testing::StrictMock<
+ testing::MockFunction<void(const webrtc::RecordableEncodedFrame&)>>
+ callback;
+ EXPECT_TRUE(receive_channel_->AddDefaultRecvStreamForTesting(
+ cricket::StreamParams::CreateLegacy(kSsrc + 1)));
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc + 1, &renderer_));
+ receive_channel_->SetRecordableEncodedFrameCallback(kSsrc,
+ callback.AsStdFunction());
+ receive_channel_->SetDefaultSink(&renderer_);
+ DeliverKeyFrame(kSsrc); // Expected to not cause function to fire.
+ DeliverKeyFrameAndWait(kSsrc + 1);
+ receive_channel_->RemoveRecvStream(kSsrc + 1);
+}
+
+TEST_F(WebRtcVideoChannelEncodedFrameCallbackTest, DoesNotDecodeWhenDisabled) {
+ testing::MockFunction<void(const webrtc::RecordableEncodedFrame&)> callback;
+ EXPECT_CALL(callback, Call);
+ EXPECT_TRUE(receive_channel_->AddDefaultRecvStreamForTesting(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ receive_channel_->SetRecordableEncodedFrameCallback(/*ssrc=*/0,
+ callback.AsStdFunction());
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc, &renderer_));
+ receive_channel_->SetReceive(false);
+ DeliverKeyFrame(kSsrc);
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_EQ(renderer_.num_rendered_frames(), 0);
+
+ receive_channel_->SetReceive(true);
+ DeliverKeyFrame(kSsrc);
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_EQ(renderer_.num_rendered_frames(), 1);
+
+ receive_channel_->SetReceive(false);
+ DeliverKeyFrame(kSsrc);
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_EQ(renderer_.num_rendered_frames(), 1);
+ receive_channel_->RemoveRecvStream(kSsrc);
+}
+
+class WebRtcVideoChannelBaseTest : public ::testing::Test {
+ protected:
+ WebRtcVideoChannelBaseTest()
+ : task_queue_factory_(time_controller_.CreateTaskQueueFactory()),
+ video_bitrate_allocator_factory_(
+ webrtc::CreateBuiltinVideoBitrateAllocatorFactory()),
+ engine_(std::make_unique<webrtc::VideoEncoderFactoryTemplate<
+ webrtc::LibvpxVp8EncoderTemplateAdapter,
+ webrtc::LibvpxVp9EncoderTemplateAdapter,
+ webrtc::OpenH264EncoderTemplateAdapter,
+ webrtc::LibaomAv1EncoderTemplateAdapter>>(),
+ std::make_unique<webrtc::VideoDecoderFactoryTemplate<
+ webrtc::LibvpxVp8DecoderTemplateAdapter,
+ webrtc::LibvpxVp9DecoderTemplateAdapter,
+ webrtc::OpenH264DecoderTemplateAdapter,
+ webrtc::Dav1dDecoderTemplateAdapter>>(),
+ field_trials_) {}
+
+ void SetUp() override {
+ // One testcase calls SetUp in a loop, only create call_ once.
+ if (!call_) {
+ CallConfig call_config(&event_log_);
+ call_config.task_queue_factory = task_queue_factory_.get();
+ call_config.trials = &field_trials_;
+ call_ = Call::Create(call_config);
+ }
+
+ cricket::MediaConfig media_config;
+ // Disabling cpu overuse detection actually disables quality scaling too; it
+ // implies DegradationPreference kMaintainResolution. Automatic scaling
+ // needs to be disabled, otherwise, tests which check the size of received
+ // frames become flaky.
+ media_config.video.enable_cpu_adaptation = false;
+ send_channel_ = engine_.CreateSendChannel(
+ call_.get(), media_config, cricket::VideoOptions(),
+ webrtc::CryptoOptions(), video_bitrate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(call_.get(), media_config,
+ cricket::VideoOptions(),
+ webrtc::CryptoOptions());
+ send_channel_->OnReadyToSend(true);
+ receive_channel_->SetReceive(true);
+ network_interface_.SetDestination(receive_channel_.get());
+ send_channel_->SetInterface(&network_interface_);
+ receive_channel_->SetInterface(&network_interface_);
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = engine_.send_codecs();
+ receive_channel_->SetReceiverParameters(parameters);
+ EXPECT_TRUE(send_channel_->AddSendStream(DefaultSendStreamParams()));
+ frame_forwarder_ = std::make_unique<webrtc::test::FrameForwarder>();
+ frame_source_ = std::make_unique<cricket::FakeFrameSource>(
+ 640, 480, rtc::kNumMicrosecsPerSec / kFramerate);
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(kSsrc, nullptr, frame_forwarder_.get()));
+ }
+
+ // Returns pointer to implementation of the send channel.
+ WebRtcVideoSendChannel* SendImpl() {
+ // Note that this function requires intimate knowledge of how the channel
+ // was created.
+ return static_cast<cricket::WebRtcVideoSendChannel*>(send_channel_.get());
+ }
+
+ // Utility method to setup an additional stream to send and receive video.
+ // Used to test send and recv between two streams.
+ void SetUpSecondStream() {
+ SetUpSecondStreamWithNoRecv();
+ // Setup recv for second stream.
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+ // Make the second renderer available for use by a new stream.
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc + 2, &renderer2_));
+ }
+
+ // Setup an additional stream just to send video. Defer add recv stream.
+ // This is required if you want to test unsignalled recv of video rtp packets.
+ void SetUpSecondStreamWithNoRecv() {
+ // SetUp() already added kSsrc make sure duplicate SSRCs cant be added.
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc, &renderer_));
+ EXPECT_FALSE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+ // We dont add recv for the second stream.
+
+ // Setup the receive and renderer for second stream after send.
+ frame_forwarder_2_ = std::make_unique<webrtc::test::FrameForwarder>();
+ EXPECT_TRUE(send_channel_->SetVideoSend(kSsrc + 2, nullptr,
+ frame_forwarder_2_.get()));
+ }
+
+ void TearDown() override {
+ send_channel_->SetInterface(nullptr);
+ receive_channel_->SetInterface(nullptr);
+ send_channel_.reset();
+ receive_channel_.reset();
+ }
+
+ void ResetTest() {
+ TearDown();
+ SetUp();
+ }
+
+ bool SetDefaultCodec() { return SetOneCodec(DefaultCodec()); }
+
+ bool SetOneCodec(const cricket::VideoCodec& codec) {
+ frame_source_ = std::make_unique<cricket::FakeFrameSource>(
+ kVideoWidth, kVideoHeight, rtc::kNumMicrosecsPerSec / kFramerate);
+
+ bool sending = SendImpl()->sending();
+ bool success = SetSend(false);
+ if (success) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(codec);
+ success = send_channel_->SetSenderParameters(parameters);
+ }
+ if (success) {
+ success = SetSend(sending);
+ }
+ return success;
+ }
+ bool SetSend(bool send) { return send_channel_->SetSend(send); }
+ void SendFrame() {
+ if (frame_forwarder_2_) {
+ frame_forwarder_2_->IncomingCapturedFrame(frame_source_->GetFrame());
+ }
+ frame_forwarder_->IncomingCapturedFrame(frame_source_->GetFrame());
+ time_controller_.AdvanceTime(kFrameDuration);
+ }
+ bool WaitAndSendFrame(int wait_ms) {
+ time_controller_.AdvanceTime(TimeDelta::Millis(wait_ms));
+ SendFrame();
+ return true;
+ }
+ int NumRtpBytes() { return network_interface_.NumRtpBytes(); }
+ int NumRtpBytes(uint32_t ssrc) {
+ return network_interface_.NumRtpBytes(ssrc);
+ }
+ int NumRtpPackets() { return network_interface_.NumRtpPackets(); }
+ int NumRtpPackets(uint32_t ssrc) {
+ return network_interface_.NumRtpPackets(ssrc);
+ }
+ int NumSentSsrcs() { return network_interface_.NumSentSsrcs(); }
+ rtc::CopyOnWriteBuffer GetRtpPacket(int index) {
+ return network_interface_.GetRtpPacket(index);
+ }
+ static int GetPayloadType(rtc::CopyOnWriteBuffer p) {
+ RtpPacket header;
+ EXPECT_TRUE(header.Parse(std::move(p)));
+ return header.PayloadType();
+ }
+
+ // Tests that we can send and receive frames.
+ void SendAndReceive(const cricket::VideoCodec& codec) {
+ EXPECT_TRUE(SetOneCodec(codec));
+ EXPECT_TRUE(SetSend(true));
+ receive_channel_->SetDefaultSink(&renderer_);
+ EXPECT_EQ(0, renderer_.num_rendered_frames());
+ SendFrame();
+ EXPECT_FRAME(1, kVideoWidth, kVideoHeight);
+ EXPECT_EQ(codec.id, GetPayloadType(GetRtpPacket(0)));
+ }
+
+ void SendReceiveManyAndGetStats(const cricket::VideoCodec& codec,
+ int duration_sec,
+ int fps) {
+ EXPECT_TRUE(SetOneCodec(codec));
+ EXPECT_TRUE(SetSend(true));
+ receive_channel_->SetDefaultSink(&renderer_);
+ EXPECT_EQ(0, renderer_.num_rendered_frames());
+ for (int i = 0; i < duration_sec; ++i) {
+ for (int frame = 1; frame <= fps; ++frame) {
+ EXPECT_TRUE(WaitAndSendFrame(1000 / fps));
+ EXPECT_FRAME(frame + i * fps, kVideoWidth, kVideoHeight);
+ }
+ }
+ EXPECT_EQ(codec.id, GetPayloadType(GetRtpPacket(0)));
+ }
+
+ cricket::VideoSenderInfo GetSenderStats(size_t i) {
+ VideoMediaSendInfo send_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ return send_info.senders[i];
+ }
+
+ cricket::VideoReceiverInfo GetReceiverStats(size_t i) {
+ cricket::VideoMediaReceiveInfo info;
+ EXPECT_TRUE(receive_channel_->GetStats(&info));
+ return info.receivers[i];
+ }
+
+ // Two streams one channel tests.
+
+ // Tests that we can send and receive frames.
+ void TwoStreamsSendAndReceive(const cricket::VideoCodec& codec) {
+ SetUpSecondStream();
+ // Test sending and receiving on first stream.
+ SendAndReceive(codec);
+ // Test sending and receiving on second stream.
+ EXPECT_EQ(renderer2_.num_rendered_frames(), 1);
+ EXPECT_GT(NumRtpPackets(), 0);
+ }
+
+ cricket::VideoCodec GetEngineCodec(const std::string& name) {
+ for (const cricket::VideoCodec& engine_codec : engine_.send_codecs()) {
+ if (absl::EqualsIgnoreCase(name, engine_codec.name))
+ return engine_codec;
+ }
+ // This point should never be reached.
+ ADD_FAILURE() << "Unrecognized codec name: " << name;
+ return cricket::CreateVideoCodec(0, "");
+ }
+
+ cricket::VideoCodec DefaultCodec() { return GetEngineCodec("VP8"); }
+
+ cricket::StreamParams DefaultSendStreamParams() {
+ return cricket::StreamParams::CreateLegacy(kSsrc);
+ }
+
+ webrtc::GlobalSimulatedTimeController time_controller_{
+ Timestamp::Seconds(1000)};
+
+ webrtc::RtcEventLogNull event_log_;
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ std::unique_ptr<webrtc::test::ScopedKeyValueConfig> override_field_trials_;
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory_;
+ std::unique_ptr<Call> call_;
+ std::unique_ptr<webrtc::VideoBitrateAllocatorFactory>
+ video_bitrate_allocator_factory_;
+ WebRtcVideoEngine engine_;
+
+ std::unique_ptr<cricket::FakeFrameSource> frame_source_;
+ std::unique_ptr<webrtc::test::FrameForwarder> frame_forwarder_;
+ std::unique_ptr<webrtc::test::FrameForwarder> frame_forwarder_2_;
+
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel_;
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel_;
+ cricket::FakeNetworkInterface network_interface_;
+ cricket::FakeVideoRenderer renderer_;
+
+ // Used by test cases where 2 streams are run on the same channel.
+ cricket::FakeVideoRenderer renderer2_;
+};
+
+// Test that SetSend works.
+TEST_F(WebRtcVideoChannelBaseTest, SetSend) {
+ EXPECT_FALSE(SendImpl()->sending());
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(kSsrc, nullptr, frame_forwarder_.get()));
+ EXPECT_TRUE(SetOneCodec(DefaultCodec()));
+ EXPECT_FALSE(SendImpl()->sending());
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_TRUE(SendImpl()->sending());
+ SendFrame();
+ EXPECT_GT(NumRtpPackets(), 0);
+ EXPECT_TRUE(SetSend(false));
+ EXPECT_FALSE(SendImpl()->sending());
+}
+
+// Test that SetSend fails without codecs being set.
+TEST_F(WebRtcVideoChannelBaseTest, SetSendWithoutCodecs) {
+ EXPECT_FALSE(SendImpl()->sending());
+ EXPECT_FALSE(SetSend(true));
+ EXPECT_FALSE(SendImpl()->sending());
+}
+
+// Test that we properly set the send and recv buffer sizes by the time
+// SetSend is called.
+TEST_F(WebRtcVideoChannelBaseTest, SetSendSetsTransportBufferSizes) {
+ EXPECT_TRUE(SetOneCodec(DefaultCodec()));
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_EQ(kVideoRtpSendBufferSize, network_interface_.sendbuf_size());
+ EXPECT_EQ(kVideoRtpRecvBufferSize, network_interface_.recvbuf_size());
+}
+
+// Test that stats work properly for a 1-1 call.
+TEST_F(WebRtcVideoChannelBaseTest, GetStats) {
+ const int kDurationSec = 3;
+ const int kFps = 10;
+ SendReceiveManyAndGetStats(DefaultCodec(), kDurationSec, kFps);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1U, send_info.senders.size());
+ // TODO(whyuan): bytes_sent and bytes_received are different. Are both
+ // payload? For webrtc, bytes_sent does not include the RTP header length.
+ EXPECT_EQ(send_info.senders[0].payload_bytes_sent,
+ NumRtpBytes() - kRtpHeaderSize * NumRtpPackets());
+ EXPECT_EQ(NumRtpPackets(), send_info.senders[0].packets_sent);
+ EXPECT_EQ(0.0, send_info.senders[0].fraction_lost);
+ ASSERT_TRUE(send_info.senders[0].codec_payload_type);
+ EXPECT_EQ(DefaultCodec().id, *send_info.senders[0].codec_payload_type);
+ EXPECT_EQ(0, send_info.senders[0].firs_received);
+ EXPECT_EQ(0, send_info.senders[0].plis_received);
+ EXPECT_EQ(0u, send_info.senders[0].nacks_received);
+ EXPECT_EQ(kVideoWidth, send_info.senders[0].send_frame_width);
+ EXPECT_EQ(kVideoHeight, send_info.senders[0].send_frame_height);
+ EXPECT_GT(send_info.senders[0].framerate_input, 0);
+ EXPECT_GT(send_info.senders[0].framerate_sent, 0);
+
+ EXPECT_EQ(1U, send_info.send_codecs.count(DefaultCodec().id));
+ EXPECT_EQ(DefaultCodec().ToCodecParameters(),
+ send_info.send_codecs[DefaultCodec().id]);
+
+ ASSERT_EQ(1U, receive_info.receivers.size());
+ EXPECT_EQ(1U, send_info.senders[0].ssrcs().size());
+ EXPECT_EQ(1U, receive_info.receivers[0].ssrcs().size());
+ EXPECT_EQ(send_info.senders[0].ssrcs()[0],
+ receive_info.receivers[0].ssrcs()[0]);
+ ASSERT_TRUE(receive_info.receivers[0].codec_payload_type);
+ EXPECT_EQ(DefaultCodec().id, *receive_info.receivers[0].codec_payload_type);
+ EXPECT_EQ(NumRtpBytes() - kRtpHeaderSize * NumRtpPackets(),
+ receive_info.receivers[0].payload_bytes_received);
+ EXPECT_EQ(NumRtpPackets(), receive_info.receivers[0].packets_received);
+ EXPECT_EQ(0, receive_info.receivers[0].packets_lost);
+ // TODO(asapersson): Not set for webrtc. Handle missing stats.
+ // EXPECT_EQ(0, receive_info.receivers[0].packets_concealed);
+ EXPECT_EQ(0, receive_info.receivers[0].firs_sent);
+ EXPECT_EQ(0, receive_info.receivers[0].plis_sent);
+ EXPECT_EQ(0U, receive_info.receivers[0].nacks_sent);
+ EXPECT_EQ(kVideoWidth, receive_info.receivers[0].frame_width);
+ EXPECT_EQ(kVideoHeight, receive_info.receivers[0].frame_height);
+ EXPECT_GT(receive_info.receivers[0].framerate_received, 0);
+ EXPECT_GT(receive_info.receivers[0].framerate_decoded, 0);
+ EXPECT_GT(receive_info.receivers[0].framerate_output, 0);
+ EXPECT_GT(receive_info.receivers[0].jitter_buffer_delay_seconds, 0.0);
+ EXPECT_GT(receive_info.receivers[0].jitter_buffer_emitted_count, 0u);
+
+ EXPECT_EQ(1U, receive_info.receive_codecs.count(DefaultCodec().id));
+ EXPECT_EQ(DefaultCodec().ToCodecParameters(),
+ receive_info.receive_codecs[DefaultCodec().id]);
+}
+
+// Test that stats work properly for a conf call with multiple recv streams.
+TEST_F(WebRtcVideoChannelBaseTest, GetStatsMultipleRecvStreams) {
+ cricket::FakeVideoRenderer renderer1, renderer2;
+ EXPECT_TRUE(SetOneCodec(DefaultCodec()));
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(DefaultCodec());
+ parameters.conference_mode = true;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ EXPECT_TRUE(receive_channel_->SetSink(1, &renderer1));
+ EXPECT_TRUE(receive_channel_->SetSink(2, &renderer2));
+ EXPECT_EQ(0, renderer1.num_rendered_frames());
+ EXPECT_EQ(0, renderer2.num_rendered_frames());
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(1);
+ ssrcs.push_back(2);
+ network_interface_.SetConferenceMode(true, ssrcs);
+ SendFrame();
+ EXPECT_FRAME_ON_RENDERER(renderer1, 1, kVideoWidth, kVideoHeight);
+ EXPECT_FRAME_ON_RENDERER(renderer2, 1, kVideoWidth, kVideoHeight);
+
+ EXPECT_TRUE(send_channel_->SetSend(false));
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1U, send_info.senders.size());
+ // TODO(whyuan): bytes_sent and bytes_received are different. Are both
+ // payload? For webrtc, bytes_sent does not include the RTP header length.
+ EXPECT_EQ(NumRtpBytes() - kRtpHeaderSize * NumRtpPackets(),
+ GetSenderStats(0).payload_bytes_sent);
+ EXPECT_EQ(NumRtpPackets(), GetSenderStats(0).packets_sent);
+ EXPECT_EQ(kVideoWidth, GetSenderStats(0).send_frame_width);
+ EXPECT_EQ(kVideoHeight, GetSenderStats(0).send_frame_height);
+
+ ASSERT_EQ(2U, receive_info.receivers.size());
+ for (size_t i = 0; i < receive_info.receivers.size(); ++i) {
+ EXPECT_EQ(1U, GetReceiverStats(i).ssrcs().size());
+ EXPECT_EQ(i + 1, GetReceiverStats(i).ssrcs()[0]);
+ EXPECT_EQ(NumRtpBytes() - kRtpHeaderSize * NumRtpPackets(),
+ GetReceiverStats(i).payload_bytes_received);
+ EXPECT_EQ(NumRtpPackets(), GetReceiverStats(i).packets_received);
+ EXPECT_EQ(kVideoWidth, GetReceiverStats(i).frame_width);
+ EXPECT_EQ(kVideoHeight, GetReceiverStats(i).frame_height);
+ }
+}
+
+// Test that stats work properly for a conf call with multiple send streams.
+TEST_F(WebRtcVideoChannelBaseTest, GetStatsMultipleSendStreams) {
+ // Normal setup; note that we set the SSRC explicitly to ensure that
+ // it will come first in the senders map.
+ EXPECT_TRUE(SetOneCodec(DefaultCodec()));
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(DefaultCodec());
+ parameters.conference_mode = true;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc, &renderer_));
+ EXPECT_TRUE(SetSend(true));
+ SendFrame();
+ EXPECT_GT(NumRtpPackets(), 0);
+ EXPECT_FRAME(1, kVideoWidth, kVideoHeight);
+
+ // Add an additional capturer, and hook up a renderer to receive it.
+ cricket::FakeVideoRenderer renderer2;
+ webrtc::test::FrameForwarder frame_forwarder;
+ const int kTestWidth = 160;
+ const int kTestHeight = 120;
+ cricket::FakeFrameSource frame_source(kTestWidth, kTestHeight,
+ rtc::kNumMicrosecsPerSec / 5);
+ EXPECT_TRUE(
+ send_channel_->AddSendStream(cricket::StreamParams::CreateLegacy(5678)));
+ EXPECT_TRUE(send_channel_->SetVideoSend(5678, nullptr, &frame_forwarder));
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(5678)));
+ EXPECT_TRUE(receive_channel_->SetSink(5678, &renderer2));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_FRAME_ON_RENDERER(renderer2, 1, kTestWidth, kTestHeight);
+
+ // Get stats, and make sure they are correct for two senders
+ cricket::VideoMediaSendInfo send_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+
+ ASSERT_EQ(2U, send_info.senders.size());
+
+ EXPECT_EQ(NumRtpPackets(), send_info.senders[0].packets_sent +
+ send_info.senders[1].packets_sent);
+ EXPECT_EQ(1U, send_info.senders[0].ssrcs().size());
+ EXPECT_EQ(1234U, send_info.senders[0].ssrcs()[0]);
+ EXPECT_EQ(kVideoWidth, send_info.senders[0].send_frame_width);
+ EXPECT_EQ(kVideoHeight, send_info.senders[0].send_frame_height);
+ EXPECT_EQ(1U, send_info.senders[1].ssrcs().size());
+ EXPECT_EQ(5678U, send_info.senders[1].ssrcs()[0]);
+ EXPECT_EQ(kTestWidth, send_info.senders[1].send_frame_width);
+ EXPECT_EQ(kTestHeight, send_info.senders[1].send_frame_height);
+ // The capturer must be unregistered here as it runs out of it's scope next.
+ send_channel_->SetVideoSend(5678, nullptr, nullptr);
+}
+
+// Test that we can set the bandwidth.
+TEST_F(WebRtcVideoChannelBaseTest, SetSendBandwidth) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(DefaultCodec());
+ parameters.max_bandwidth_bps = -1; // <= 0 means unlimited.
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ parameters.max_bandwidth_bps = 128 * 1024;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that we can set the SSRC for the default send source.
+TEST_F(WebRtcVideoChannelBaseTest, SetSendSsrc) {
+ EXPECT_TRUE(SetDefaultCodec());
+ EXPECT_TRUE(SetSend(true));
+ SendFrame();
+ EXPECT_GT(NumRtpPackets(), 0);
+ RtpPacket header;
+ EXPECT_TRUE(header.Parse(GetRtpPacket(0)));
+ EXPECT_EQ(kSsrc, header.Ssrc());
+
+ // Packets are being paced out, so these can mismatch between the first and
+ // second call to NumRtpPackets until pending packets are paced out.
+ EXPECT_EQ(NumRtpPackets(), NumRtpPackets(header.Ssrc()));
+ EXPECT_EQ(NumRtpBytes(), NumRtpBytes(header.Ssrc()));
+ EXPECT_EQ(1, NumSentSsrcs());
+ EXPECT_EQ(0, NumRtpPackets(kSsrc - 1));
+ EXPECT_EQ(0, NumRtpBytes(kSsrc - 1));
+}
+
+// Test that we can set the SSRC even after codecs are set.
+TEST_F(WebRtcVideoChannelBaseTest, SetSendSsrcAfterSetCodecs) {
+ // Remove stream added in Setup.
+ EXPECT_TRUE(send_channel_->RemoveSendStream(kSsrc));
+ EXPECT_TRUE(SetDefaultCodec());
+ EXPECT_TRUE(
+ send_channel_->AddSendStream(cricket::StreamParams::CreateLegacy(999)));
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(999u, nullptr, frame_forwarder_.get()));
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_TRUE(WaitAndSendFrame(0));
+ EXPECT_GT(NumRtpPackets(), 0);
+ RtpPacket header;
+ EXPECT_TRUE(header.Parse(GetRtpPacket(0)));
+ EXPECT_EQ(999u, header.Ssrc());
+ // Packets are being paced out, so these can mismatch between the first and
+ // second call to NumRtpPackets until pending packets are paced out.
+ EXPECT_EQ(NumRtpPackets(), NumRtpPackets(header.Ssrc()));
+ EXPECT_EQ(NumRtpBytes(), NumRtpBytes(header.Ssrc()));
+ EXPECT_EQ(1, NumSentSsrcs());
+ EXPECT_EQ(0, NumRtpPackets(kSsrc));
+ EXPECT_EQ(0, NumRtpBytes(kSsrc));
+}
+
+// Test that we can set the default video renderer before and after
+// media is received.
+TEST_F(WebRtcVideoChannelBaseTest, SetSink) {
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc);
+ receive_channel_->SetDefaultSink(NULL);
+ EXPECT_TRUE(SetDefaultCodec());
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_EQ(0, renderer_.num_rendered_frames());
+ receive_channel_->SetDefaultSink(&renderer_);
+ receive_channel_->OnPacketReceived(packet);
+ SendFrame();
+ EXPECT_FRAME(1, kVideoWidth, kVideoHeight);
+}
+
+// Tests setting up and configuring a send stream.
+TEST_F(WebRtcVideoChannelBaseTest, AddRemoveSendStreams) {
+ EXPECT_TRUE(SetOneCodec(DefaultCodec()));
+ EXPECT_TRUE(SetSend(true));
+ receive_channel_->SetDefaultSink(&renderer_);
+ SendFrame();
+ EXPECT_FRAME(1, kVideoWidth, kVideoHeight);
+ EXPECT_GT(NumRtpPackets(), 0);
+ RtpPacket header;
+ size_t last_packet = NumRtpPackets() - 1;
+ EXPECT_TRUE(header.Parse(GetRtpPacket(static_cast<int>(last_packet))));
+ EXPECT_EQ(kSsrc, header.Ssrc());
+
+ // Remove the send stream that was added during Setup.
+ EXPECT_TRUE(send_channel_->RemoveSendStream(kSsrc));
+ int rtp_packets = NumRtpPackets();
+
+ EXPECT_TRUE(
+ send_channel_->AddSendStream(cricket::StreamParams::CreateLegacy(789u)));
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(789u, nullptr, frame_forwarder_.get()));
+ EXPECT_EQ(rtp_packets, NumRtpPackets());
+ // Wait 30ms to guarantee the engine does not drop the frame.
+ EXPECT_TRUE(WaitAndSendFrame(30));
+ EXPECT_GT(NumRtpPackets(), rtp_packets);
+
+ last_packet = NumRtpPackets() - 1;
+ EXPECT_TRUE(header.Parse(GetRtpPacket(static_cast<int>(last_packet))));
+ EXPECT_EQ(789u, header.Ssrc());
+}
+
+// Tests the behavior of incoming streams in a conference scenario.
+TEST_F(WebRtcVideoChannelBaseTest, SimulateConference) {
+ cricket::FakeVideoRenderer renderer1, renderer2;
+ EXPECT_TRUE(SetDefaultCodec());
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(DefaultCodec());
+ parameters.conference_mode = true;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ EXPECT_TRUE(receive_channel_->SetSink(1, &renderer1));
+ EXPECT_TRUE(receive_channel_->SetSink(2, &renderer2));
+ EXPECT_EQ(0, renderer1.num_rendered_frames());
+ EXPECT_EQ(0, renderer2.num_rendered_frames());
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(1);
+ ssrcs.push_back(2);
+ network_interface_.SetConferenceMode(true, ssrcs);
+ SendFrame();
+ EXPECT_FRAME_ON_RENDERER(renderer1, 1, kVideoWidth, kVideoHeight);
+ EXPECT_FRAME_ON_RENDERER(renderer2, 1, kVideoWidth, kVideoHeight);
+
+ EXPECT_EQ(DefaultCodec().id, GetPayloadType(GetRtpPacket(0)));
+ EXPECT_EQ(kVideoWidth, renderer1.width());
+ EXPECT_EQ(kVideoHeight, renderer1.height());
+ EXPECT_EQ(kVideoWidth, renderer2.width());
+ EXPECT_EQ(kVideoHeight, renderer2.height());
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(2));
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(1));
+}
+
+// Tests that we can add and remove capturers and frames are sent out properly
+TEST_F(WebRtcVideoChannelBaseTest, DISABLED_AddRemoveCapturer) {
+ using cricket::FOURCC_I420;
+ using cricket::VideoCodec;
+ using cricket::VideoFormat;
+ using cricket::VideoOptions;
+
+ VideoCodec codec = DefaultCodec();
+ const int time_between_send_ms = VideoFormat::FpsToInterval(kFramerate);
+ EXPECT_TRUE(SetOneCodec(codec));
+ EXPECT_TRUE(SetSend(true));
+ receive_channel_->SetDefaultSink(&renderer_);
+ EXPECT_EQ(0, renderer_.num_rendered_frames());
+ SendFrame();
+ EXPECT_FRAME(1, kVideoWidth, kVideoHeight);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(480, 360, rtc::kNumMicrosecsPerSec / 30,
+ rtc::kNumMicrosecsPerSec / 30);
+
+ // TODO(nisse): This testcase fails if we don't configure
+ // screencast. It's unclear why, I see nothing obvious in this
+ // test which is related to screencast logic.
+ VideoOptions video_options;
+ video_options.is_screencast = true;
+ send_channel_->SetVideoSend(kSsrc, &video_options, nullptr);
+
+ int captured_frames = 1;
+ for (int iterations = 0; iterations < 2; ++iterations) {
+ EXPECT_TRUE(send_channel_->SetVideoSend(kSsrc, nullptr, &frame_forwarder));
+ time_controller_.AdvanceTime(TimeDelta::Millis(time_between_send_ms));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ ++captured_frames;
+ // Check if the right size was captured.
+ EXPECT_TRUE(renderer_.num_rendered_frames() >= captured_frames &&
+ 480 == renderer_.width() && 360 == renderer_.height() &&
+ !renderer_.black_frame());
+ EXPECT_GE(renderer_.num_rendered_frames(), captured_frames);
+ EXPECT_EQ(480, renderer_.width());
+ EXPECT_EQ(360, renderer_.height());
+ captured_frames = renderer_.num_rendered_frames() + 1;
+ EXPECT_FALSE(renderer_.black_frame());
+ EXPECT_TRUE(send_channel_->SetVideoSend(kSsrc, nullptr, nullptr));
+ // Make sure a black frame was generated.
+ // The black frame should have the resolution of the previous frame to
+ // prevent expensive encoder reconfigurations.
+ EXPECT_TRUE(renderer_.num_rendered_frames() >= captured_frames &&
+ 480 == renderer_.width() && 360 == renderer_.height() &&
+ renderer_.black_frame());
+ EXPECT_GE(renderer_.num_rendered_frames(), captured_frames);
+ EXPECT_EQ(480, renderer_.width());
+ EXPECT_EQ(360, renderer_.height());
+ EXPECT_TRUE(renderer_.black_frame());
+
+ // The black frame has the same timestamp as the next frame since it's
+ // timestamp is set to the last frame's timestamp + interval. WebRTC will
+ // not render a frame with the same timestamp so capture another frame
+ // with the frame capturer to increment the next frame's timestamp.
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ }
+}
+
+// Tests that if SetVideoSend is called with a NULL capturer after the
+// capturer was already removed, the application doesn't crash (and no black
+// frame is sent).
+TEST_F(WebRtcVideoChannelBaseTest, RemoveCapturerWithoutAdd) {
+ EXPECT_TRUE(SetOneCodec(DefaultCodec()));
+ EXPECT_TRUE(SetSend(true));
+ receive_channel_->SetDefaultSink(&renderer_);
+ EXPECT_EQ(0, renderer_.num_rendered_frames());
+ SendFrame();
+ EXPECT_FRAME(1, kVideoWidth, kVideoHeight);
+ // Allow one frame so they don't get dropped because we send frames too
+ // tightly.
+ time_controller_.AdvanceTime(kFrameDuration);
+ // Remove the capturer.
+ EXPECT_TRUE(send_channel_->SetVideoSend(kSsrc, nullptr, nullptr));
+
+ // No capturer was added, so this SetVideoSend shouldn't do anything.
+ EXPECT_TRUE(send_channel_->SetVideoSend(kSsrc, nullptr, nullptr));
+ time_controller_.AdvanceTime(TimeDelta::Millis(300));
+ // Verify no more frames were sent.
+ EXPECT_EQ(1, renderer_.num_rendered_frames());
+}
+
+// Tests that we can add and remove capturer as unique sources.
+TEST_F(WebRtcVideoChannelBaseTest, AddRemoveCapturerMultipleSources) {
+ // Set up the stream associated with the engine.
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ EXPECT_TRUE(receive_channel_->SetSink(kSsrc, &renderer_));
+ cricket::VideoFormat capture_format(
+ kVideoWidth, kVideoHeight,
+ cricket::VideoFormat::FpsToInterval(kFramerate), cricket::FOURCC_I420);
+ // Set up additional stream 1.
+ cricket::FakeVideoRenderer renderer1;
+ EXPECT_FALSE(receive_channel_->SetSink(1, &renderer1));
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(receive_channel_->SetSink(1, &renderer1));
+ EXPECT_TRUE(
+ send_channel_->AddSendStream(cricket::StreamParams::CreateLegacy(1)));
+
+ webrtc::test::FrameForwarder frame_forwarder1;
+ cricket::FakeFrameSource frame_source(kVideoWidth, kVideoHeight,
+ rtc::kNumMicrosecsPerSec / kFramerate);
+
+ // Set up additional stream 2.
+ cricket::FakeVideoRenderer renderer2;
+ EXPECT_FALSE(receive_channel_->SetSink(2, &renderer2));
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ EXPECT_TRUE(receive_channel_->SetSink(2, &renderer2));
+ EXPECT_TRUE(
+ send_channel_->AddSendStream(cricket::StreamParams::CreateLegacy(2)));
+ webrtc::test::FrameForwarder frame_forwarder2;
+
+ // State for all the streams.
+ EXPECT_TRUE(SetOneCodec(DefaultCodec()));
+ // A limitation in the lmi implementation requires that SetVideoSend() is
+ // called after SetOneCodec().
+ // TODO(hellner): this seems like an unnecessary constraint, fix it.
+ EXPECT_TRUE(send_channel_->SetVideoSend(1, nullptr, &frame_forwarder1));
+ EXPECT_TRUE(send_channel_->SetVideoSend(2, nullptr, &frame_forwarder2));
+ EXPECT_TRUE(SetSend(true));
+ // Test capturer associated with engine.
+ const int kTestWidth = 160;
+ const int kTestHeight = 120;
+ frame_forwarder1.IncomingCapturedFrame(frame_source.GetFrame(
+ kTestWidth, kTestHeight, webrtc::VideoRotation::kVideoRotation_0,
+ rtc::kNumMicrosecsPerSec / kFramerate));
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_FRAME_ON_RENDERER(renderer1, 1, kTestWidth, kTestHeight);
+ // Capture a frame with additional capturer2, frames should be received
+ frame_forwarder2.IncomingCapturedFrame(frame_source.GetFrame(
+ kTestWidth, kTestHeight, webrtc::VideoRotation::kVideoRotation_0,
+ rtc::kNumMicrosecsPerSec / kFramerate));
+ time_controller_.AdvanceTime(kFrameDuration);
+ EXPECT_FRAME_ON_RENDERER(renderer2, 1, kTestWidth, kTestHeight);
+ // Successfully remove the capturer.
+ EXPECT_TRUE(send_channel_->SetVideoSend(kSsrc, nullptr, nullptr));
+ // The capturers must be unregistered here as it runs out of it's scope
+ // next.
+ EXPECT_TRUE(send_channel_->SetVideoSend(1, nullptr, nullptr));
+ EXPECT_TRUE(send_channel_->SetVideoSend(2, nullptr, nullptr));
+}
+
+// Tests empty StreamParams is rejected.
+TEST_F(WebRtcVideoChannelBaseTest, RejectEmptyStreamParams) {
+ // Remove the send stream that was added during Setup.
+ EXPECT_TRUE(send_channel_->RemoveSendStream(kSsrc));
+
+ cricket::StreamParams empty;
+ EXPECT_FALSE(send_channel_->AddSendStream(empty));
+ EXPECT_TRUE(
+ send_channel_->AddSendStream(cricket::StreamParams::CreateLegacy(789u)));
+}
+
+// Test that multiple send streams can be created and deleted properly.
+TEST_F(WebRtcVideoChannelBaseTest, MultipleSendStreams) {
+ // Remove stream added in Setup. I.e. remove stream corresponding to default
+ // channel.
+ EXPECT_TRUE(send_channel_->RemoveSendStream(kSsrc));
+ const unsigned int kSsrcsSize = sizeof(kSsrcs4) / sizeof(kSsrcs4[0]);
+ for (unsigned int i = 0; i < kSsrcsSize; ++i) {
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
+ }
+ // Delete one of the non default channel streams, let the destructor delete
+ // the remaining ones.
+ EXPECT_TRUE(send_channel_->RemoveSendStream(kSsrcs4[kSsrcsSize - 1]));
+ // Stream should already be deleted.
+ EXPECT_FALSE(send_channel_->RemoveSendStream(kSsrcs4[kSsrcsSize - 1]));
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, SendAndReceiveVp8Vga) {
+ SendAndReceive(GetEngineCodec("VP8"));
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, SendAndReceiveVp8Qvga) {
+ SendAndReceive(GetEngineCodec("VP8"));
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, SendAndReceiveVp8SvcQqvga) {
+ SendAndReceive(GetEngineCodec("VP8"));
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, TwoStreamsSendAndReceive) {
+ // Set a high bitrate to not be downscaled by VP8 due to low initial start
+ // bitrates. This currently happens at <250k, and two streams sharing 300k
+ // initially will use QVGA instead of VGA.
+ // TODO(pbos): Set up the quality scaler so that both senders reliably start
+ // at QVGA, then verify that instead.
+ cricket::VideoCodec codec = GetEngineCodec("VP8");
+ codec.params[kCodecParamStartBitrate] = "1000000";
+ TwoStreamsSendAndReceive(codec);
+}
+
+#if defined(RTC_ENABLE_VP9)
+
+TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderFallback) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ absl::optional<VideoCodec> codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP9", codec->name);
+
+ // RequestEncoderFallback will post a task to the worker thread (which is also
+ // the current thread), hence the ProcessMessages call.
+ SendImpl()->RequestEncoderFallback();
+ time_controller_.AdvanceTime(kFrameDuration);
+ codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP8", codec->name);
+
+ // No other codec to fall back to, keep using VP8.
+ SendImpl()->RequestEncoderFallback();
+ time_controller_.AdvanceTime(kFrameDuration);
+ codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP8", codec->name);
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderSwitchDefaultFallback) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ absl::optional<VideoCodec> codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP9", codec->name);
+
+ // RequestEncoderSwitch will post a task to the worker thread (which is also
+ // the current thread), hence the ProcessMessages call.
+ SendImpl()->RequestEncoderSwitch(webrtc::SdpVideoFormat("UnavailableCodec"),
+ /*allow_default_fallback=*/true);
+ time_controller_.AdvanceTime(kFrameDuration);
+
+ // Requested encoder is not available. Default fallback is allowed. Switch to
+ // the next negotiated codec, VP8.
+ codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP8", codec->name);
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderSwitchStrictPreference) {
+ VideoCodec vp9 = GetEngineCodec("VP9");
+ vp9.params["profile-id"] = "0";
+
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(vp9);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ absl::optional<VideoCodec> codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP8", codec->name);
+
+ SendImpl()->RequestEncoderSwitch(
+ webrtc::SdpVideoFormat("VP9", {{"profile-id", "1"}}),
+ /*allow_default_fallback=*/false);
+ time_controller_.AdvanceTime(kFrameDuration);
+
+ // VP9 profile_id=1 is not available. Default fallback is not allowed. Switch
+ // is not performed.
+ codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP8", codec->name);
+
+ SendImpl()->RequestEncoderSwitch(
+ webrtc::SdpVideoFormat("VP9", {{"profile-id", "0"}}),
+ /*allow_default_fallback=*/false);
+ time_controller_.AdvanceTime(kFrameDuration);
+
+ // VP9 profile_id=0 is available. Switch encoder.
+ codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP9", codec->name);
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, SendCodecIsMovedToFrontInRtpParameters) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ auto send_codecs = send_channel_->GetRtpSendParameters(kSsrc).codecs;
+ ASSERT_EQ(send_codecs.size(), 2u);
+ EXPECT_THAT("VP9", send_codecs[0].name);
+
+ // RequestEncoderFallback will post a task to the worker thread (which is also
+ // the current thread), hence the ProcessMessages call.
+ SendImpl()->RequestEncoderFallback();
+ time_controller_.AdvanceTime(kFrameDuration);
+
+ send_codecs = send_channel_->GetRtpSendParameters(kSsrc).codecs;
+ ASSERT_EQ(send_codecs.size(), 2u);
+ EXPECT_THAT("VP8", send_codecs[0].name);
+}
+
+#endif // defined(RTC_ENABLE_VP9)
+
+class WebRtcVideoChannelTest : public WebRtcVideoEngineTest {
+ public:
+ WebRtcVideoChannelTest() : WebRtcVideoChannelTest("") {}
+ explicit WebRtcVideoChannelTest(const char* field_trials)
+ : WebRtcVideoEngineTest(field_trials),
+ frame_source_(1280, 720, rtc::kNumMicrosecsPerSec / 30),
+ last_ssrc_(0) {}
+ void SetUp() override {
+ AddSupportedVideoCodecType("VP8");
+ AddSupportedVideoCodecType("VP9");
+ AddSupportedVideoCodecType(
+ "AV1", {ScalabilityMode::kL1T3, ScalabilityMode::kL2T3});
+#if defined(WEBRTC_USE_H264)
+ AddSupportedVideoCodecType("H264");
+#endif
+
+ fake_call_.reset(new FakeCall(&field_trials_));
+ send_channel_ = engine_.CreateSendChannel(
+ fake_call_.get(), GetMediaConfig(), VideoOptions(),
+ webrtc::CryptoOptions(), video_bitrate_allocator_factory_.get());
+ receive_channel_ =
+ engine_.CreateReceiveChannel(fake_call_.get(), GetMediaConfig(),
+ VideoOptions(), webrtc::CryptoOptions());
+ send_channel_->SetSsrcListChangedCallback(
+ [receive_channel =
+ receive_channel_.get()](const std::set<uint32_t>& choices) {
+ receive_channel->ChooseReceiverReportSsrc(choices);
+ });
+ send_channel_->SetSendCodecChangedCallback([this]() {
+ receive_channel_->SetReceiverFeedbackParameters(
+ send_channel_->SendCodecHasLntf(), send_channel_->SendCodecHasNack(),
+ send_channel_->SendCodecRtcpMode(),
+ send_channel_->SendCodecRtxTime());
+ });
+ send_channel_->OnReadyToSend(true);
+ receive_channel_->SetReceive(true);
+ last_ssrc_ = 123;
+ send_parameters_.codecs = engine_.send_codecs();
+ recv_parameters_.codecs = engine_.recv_codecs();
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ }
+
+ void TearDown() override {
+ send_channel_->SetInterface(nullptr);
+ receive_channel_->SetInterface(nullptr);
+ send_channel_.reset();
+ receive_channel_.reset();
+ fake_call_ = nullptr;
+ }
+
+ void ResetTest() {
+ TearDown();
+ SetUp();
+ }
+
+ // Returns pointer to implementation of the send channel.
+ WebRtcVideoSendChannel* SendImpl() {
+ // Note that this function requires intimate knowledge of how the channel
+ // was created.
+ return static_cast<cricket::WebRtcVideoSendChannel*>(send_channel_.get());
+ }
+
+ // Casts a shim channel to a webrtc::Transport. Used once.
+ webrtc::Transport* ChannelImplAsTransport(
+ cricket::VideoMediaSendChannelInterface* channel) {
+ return static_cast<cricket::WebRtcVideoSendChannel*>(channel)->transport();
+ }
+
+ cricket::VideoCodec GetEngineCodec(const std::string& name) {
+ for (const cricket::VideoCodec& engine_codec : engine_.send_codecs()) {
+ if (absl::EqualsIgnoreCase(name, engine_codec.name))
+ return engine_codec;
+ }
+ // This point should never be reached.
+ ADD_FAILURE() << "Unrecognized codec name: " << name;
+ return cricket::CreateVideoCodec(0, "");
+ }
+
+ cricket::VideoCodec DefaultCodec() { return GetEngineCodec("VP8"); }
+
+ // After receciving and processing the packet, enough time is advanced that
+ // the unsignalled receive stream cooldown is no longer in effect.
+ void ReceivePacketAndAdvanceTime(const RtpPacketReceived& packet) {
+ receive_channel_->OnPacketReceived(packet);
+ time_controller_.AdvanceTime(
+ webrtc::TimeDelta::Millis(kUnsignalledReceiveStreamCooldownMs));
+ }
+
+ protected:
+ FakeVideoSendStream* AddSendStream() {
+ return AddSendStream(StreamParams::CreateLegacy(++last_ssrc_));
+ }
+
+ FakeVideoSendStream* AddSendStream(const StreamParams& sp) {
+ size_t num_streams = fake_call_->GetVideoSendStreams().size();
+ EXPECT_TRUE(send_channel_->AddSendStream(sp));
+ std::vector<FakeVideoSendStream*> streams =
+ fake_call_->GetVideoSendStreams();
+ EXPECT_EQ(num_streams + 1, streams.size());
+ return streams[streams.size() - 1];
+ }
+
+ std::vector<FakeVideoSendStream*> GetFakeSendStreams() {
+ return fake_call_->GetVideoSendStreams();
+ }
+
+ FakeVideoReceiveStream* AddRecvStream() {
+ return AddRecvStream(StreamParams::CreateLegacy(++last_ssrc_));
+ }
+
+ FakeVideoReceiveStream* AddRecvStream(const StreamParams& sp) {
+ size_t num_streams = fake_call_->GetVideoReceiveStreams().size();
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+ std::vector<FakeVideoReceiveStream*> streams =
+ fake_call_->GetVideoReceiveStreams();
+ EXPECT_EQ(num_streams + 1, streams.size());
+ return streams[streams.size() - 1];
+ }
+
+ void SetSendCodecsShouldWorkForBitrates(const char* min_bitrate_kbps,
+ int expected_min_bitrate_bps,
+ const char* start_bitrate_kbps,
+ int expected_start_bitrate_bps,
+ const char* max_bitrate_kbps,
+ int expected_max_bitrate_bps) {
+ ExpectSetBitrateParameters(expected_min_bitrate_bps,
+ expected_start_bitrate_bps,
+ expected_max_bitrate_bps);
+ auto& codecs = send_parameters_.codecs;
+ codecs.clear();
+ codecs.push_back(GetEngineCodec("VP8"));
+ codecs[0].params[kCodecParamMinBitrate] = min_bitrate_kbps;
+ codecs[0].params[kCodecParamStartBitrate] = start_bitrate_kbps;
+ codecs[0].params[kCodecParamMaxBitrate] = max_bitrate_kbps;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ }
+
+ void ExpectSetBitrateParameters(int min_bitrate_bps,
+ int start_bitrate_bps,
+ int max_bitrate_bps) {
+ EXPECT_CALL(
+ *fake_call_->GetMockTransportControllerSend(),
+ SetSdpBitrateParameters(AllOf(
+ Field(&BitrateConstraints::min_bitrate_bps, min_bitrate_bps),
+ Field(&BitrateConstraints::start_bitrate_bps, start_bitrate_bps),
+ Field(&BitrateConstraints::max_bitrate_bps, max_bitrate_bps))));
+ }
+
+ void ExpectSetMaxBitrate(int max_bitrate_bps) {
+ EXPECT_CALL(*fake_call_->GetMockTransportControllerSend(),
+ SetSdpBitrateParameters(Field(
+ &BitrateConstraints::max_bitrate_bps, max_bitrate_bps)));
+ }
+
+ void TestExtmapAllowMixedCaller(bool extmap_allow_mixed) {
+ // For a caller, the answer will be applied in set remote description
+ // where SetSenderParameters() is called.
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ send_parameters_.extmap_allow_mixed = extmap_allow_mixed;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ const webrtc::VideoSendStream::Config& config =
+ fake_call_->GetVideoSendStreams()[0]->GetConfig();
+ EXPECT_EQ(extmap_allow_mixed, config.rtp.extmap_allow_mixed);
+ }
+
+ void TestExtmapAllowMixedCallee(bool extmap_allow_mixed) {
+ // For a callee, the answer will be applied in set local description
+ // where SetExtmapAllowMixed() and AddSendStream() are called.
+ send_channel_->SetExtmapAllowMixed(extmap_allow_mixed);
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrc)));
+ const webrtc::VideoSendStream::Config& config =
+ fake_call_->GetVideoSendStreams()[0]->GetConfig();
+ EXPECT_EQ(extmap_allow_mixed, config.rtp.extmap_allow_mixed);
+ }
+
+ void TestSetSendRtpHeaderExtensions(const std::string& ext_uri) {
+ // Enable extension.
+ const int id = 1;
+ cricket::VideoSenderParameters parameters = send_parameters_;
+ parameters.extensions.push_back(RtpExtension(ext_uri, id));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(123));
+
+ // Verify the send extension id.
+ ASSERT_EQ(1u, send_stream->GetConfig().rtp.extensions.size());
+ EXPECT_EQ(id, send_stream->GetConfig().rtp.extensions[0].id);
+ EXPECT_EQ(ext_uri, send_stream->GetConfig().rtp.extensions[0].uri);
+ // Verify call with same set of extensions returns true.
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ // Verify that existing RTP header extensions can be removed.
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
+ send_stream = fake_call_->GetVideoSendStreams()[0];
+ EXPECT_TRUE(send_stream->GetConfig().rtp.extensions.empty());
+
+ // Verify that adding receive RTP header extensions adds them for existing
+ // streams.
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ send_stream = fake_call_->GetVideoSendStreams()[0];
+ ASSERT_EQ(1u, send_stream->GetConfig().rtp.extensions.size());
+ EXPECT_EQ(id, send_stream->GetConfig().rtp.extensions[0].id);
+ EXPECT_EQ(ext_uri, send_stream->GetConfig().rtp.extensions[0].uri);
+ }
+
+ void TestSetRecvRtpHeaderExtensions(const std::string& ext_uri) {
+ // Enable extension.
+ const int id = 1;
+ cricket::VideoReceiverParameters parameters = recv_parameters_;
+ parameters.extensions.push_back(RtpExtension(ext_uri, id));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ AddRecvStream(cricket::StreamParams::CreateLegacy(123));
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(123).header_extensions,
+ ElementsAre(RtpExtension(ext_uri, id)));
+
+ // Verify call with same set of extensions returns true.
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ // Verify that SetRecvRtpHeaderExtensions doesn't implicitly add them for
+ // senders.
+ EXPECT_TRUE(AddSendStream(cricket::StreamParams::CreateLegacy(123))
+ ->GetConfig()
+ .rtp.extensions.empty());
+
+ // Verify that existing RTP header extensions can be removed.
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(123).header_extensions,
+ IsEmpty());
+
+ // Verify that adding receive RTP header extensions adds them for existing
+ // streams.
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_EQ(receive_channel_->GetRtpReceiverParameters(123).header_extensions,
+ parameters.extensions);
+ }
+
+ void TestLossNotificationState(bool expect_lntf_enabled) {
+ AssignDefaultCodec();
+ VerifyCodecHasDefaultFeedbackParams(*default_codec_, expect_lntf_enabled);
+
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs = engine_.send_codecs();
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_TRUE(send_channel_->SetSend(true));
+
+ // Send side.
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(1));
+ EXPECT_EQ(send_stream->GetConfig().rtp.lntf.enabled, expect_lntf_enabled);
+
+ // Receiver side.
+ FakeVideoReceiveStream* recv_stream =
+ AddRecvStream(cricket::StreamParams::CreateLegacy(1));
+ EXPECT_EQ(recv_stream->GetConfig().rtp.lntf.enabled, expect_lntf_enabled);
+ }
+
+ void TestExtensionFilter(const std::vector<std::string>& extensions,
+ const std::string& expected_extension) {
+ cricket::VideoSenderParameters parameters = send_parameters_;
+ int expected_id = -1;
+ int id = 1;
+ for (const std::string& extension : extensions) {
+ if (extension == expected_extension)
+ expected_id = id;
+ parameters.extensions.push_back(RtpExtension(extension, id++));
+ }
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(123));
+
+ // Verify that only one of them has been set, and that it is the one with
+ // highest priority (transport sequence number).
+ ASSERT_EQ(1u, send_stream->GetConfig().rtp.extensions.size());
+ EXPECT_EQ(expected_id, send_stream->GetConfig().rtp.extensions[0].id);
+ EXPECT_EQ(expected_extension,
+ send_stream->GetConfig().rtp.extensions[0].uri);
+ }
+
+ void TestDegradationPreference(bool resolution_scaling_enabled,
+ bool fps_scaling_enabled);
+
+ void TestCpuAdaptation(bool enable_overuse, bool is_screenshare);
+ void TestReceiverLocalSsrcConfiguration(bool receiver_first);
+ void TestReceiveUnsignaledSsrcPacket(uint8_t payload_type,
+ bool expect_created_receive_stream);
+
+ FakeVideoSendStream* SetDenoisingOption(
+ uint32_t ssrc,
+ webrtc::test::FrameForwarder* frame_forwarder,
+ bool enabled) {
+ cricket::VideoOptions options;
+ options.video_noise_reduction = enabled;
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrc, &options, frame_forwarder));
+ // Options only take effect on the next frame.
+ frame_forwarder->IncomingCapturedFrame(frame_source_.GetFrame());
+
+ return fake_call_->GetVideoSendStreams().back();
+ }
+
+ FakeVideoSendStream* SetUpSimulcast(bool enabled, bool with_rtx) {
+ const int kRtxSsrcOffset = 0xDEADBEEF;
+ last_ssrc_ += 3;
+ std::vector<uint32_t> ssrcs;
+ std::vector<uint32_t> rtx_ssrcs;
+ uint32_t num_streams = enabled ? kNumSimulcastStreams : 1;
+ for (uint32_t i = 0; i < num_streams; ++i) {
+ uint32_t ssrc = last_ssrc_ + i;
+ ssrcs.push_back(ssrc);
+ if (with_rtx) {
+ rtx_ssrcs.push_back(ssrc + kRtxSsrcOffset);
+ }
+ }
+ if (with_rtx) {
+ return AddSendStream(
+ cricket::CreateSimWithRtxStreamParams("cname", ssrcs, rtx_ssrcs));
+ }
+ return AddSendStream(CreateSimStreamParams("cname", ssrcs));
+ }
+
+ int GetMaxEncoderBitrate() {
+ std::vector<FakeVideoSendStream*> streams =
+ fake_call_->GetVideoSendStreams();
+ EXPECT_EQ(1u, streams.size());
+ FakeVideoSendStream* stream = streams[streams.size() - 1];
+ EXPECT_EQ(1u, stream->GetEncoderConfig().number_of_streams);
+ return stream->GetVideoStreams()[0].max_bitrate_bps;
+ }
+
+ void SetAndExpectMaxBitrate(int global_max,
+ int stream_max,
+ int expected_encoder_bitrate) {
+ VideoSenderParameters limited_send_params = send_parameters_;
+ limited_send_params.max_bandwidth_bps = global_max;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(limited_send_params));
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ parameters.encodings[0].max_bitrate_bps = stream_max;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ // Read back the parameteres and verify they have the correct value
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ EXPECT_EQ(stream_max, parameters.encodings[0].max_bitrate_bps);
+ // Verify that the new value propagated down to the encoder
+ EXPECT_EQ(expected_encoder_bitrate, GetMaxEncoderBitrate());
+ }
+
+ // Values from kSimulcastConfigs in simulcast.cc.
+ const std::vector<webrtc::VideoStream> GetSimulcastBitrates720p() const {
+ std::vector<webrtc::VideoStream> layers(3);
+ layers[0].min_bitrate_bps = 30000;
+ layers[0].target_bitrate_bps = 150000;
+ layers[0].max_bitrate_bps = 200000;
+ layers[1].min_bitrate_bps = 150000;
+ layers[1].target_bitrate_bps = 500000;
+ layers[1].max_bitrate_bps = 700000;
+ layers[2].min_bitrate_bps = 600000;
+ layers[2].target_bitrate_bps = 2500000;
+ layers[2].max_bitrate_bps = 2500000;
+ return layers;
+ }
+
+ cricket::FakeFrameSource frame_source_;
+ std::unique_ptr<FakeCall> fake_call_;
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel_;
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel_;
+ cricket::VideoSenderParameters send_parameters_;
+ cricket::VideoReceiverParameters recv_parameters_;
+ uint32_t last_ssrc_;
+};
+
+TEST_F(WebRtcVideoChannelTest, SetsSyncGroupFromSyncLabel) {
+ const uint32_t kVideoSsrc = 123;
+ const std::string kSyncLabel = "AvSyncLabel";
+
+ cricket::StreamParams sp = cricket::StreamParams::CreateLegacy(kVideoSsrc);
+ sp.set_stream_ids({kSyncLabel});
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ EXPECT_EQ(kSyncLabel,
+ fake_call_->GetVideoReceiveStreams()[0]->GetConfig().sync_group)
+ << "SyncGroup should be set based on sync_label";
+}
+
+TEST_F(WebRtcVideoChannelTest, RecvStreamWithSimAndRtx) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs = engine_.send_codecs();
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ parameters.conference_mode = true;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ // Send side.
+ const std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs1);
+ const std::vector<uint32_t> rtx_ssrcs = MAKE_VECTOR(kRtxSsrcs1);
+ FakeVideoSendStream* send_stream = AddSendStream(
+ cricket::CreateSimWithRtxStreamParams("cname", ssrcs, rtx_ssrcs));
+
+ ASSERT_EQ(rtx_ssrcs.size(), send_stream->GetConfig().rtp.rtx.ssrcs.size());
+ for (size_t i = 0; i < rtx_ssrcs.size(); ++i)
+ EXPECT_EQ(rtx_ssrcs[i], send_stream->GetConfig().rtp.rtx.ssrcs[i]);
+
+ // Receiver side.
+ FakeVideoReceiveStream* recv_stream = AddRecvStream(
+ cricket::CreateSimWithRtxStreamParams("cname", ssrcs, rtx_ssrcs));
+ EXPECT_FALSE(
+ recv_stream->GetConfig().rtp.rtx_associated_payload_types.empty());
+ EXPECT_TRUE(VerifyRtxReceiveAssociations(recv_stream->GetConfig()))
+ << "RTX should be mapped for all decoders/payload types.";
+ EXPECT_TRUE(HasRtxReceiveAssociation(recv_stream->GetConfig(),
+ GetEngineCodec("red").id))
+ << "RTX should be mapped for the RED payload type";
+
+ EXPECT_EQ(rtx_ssrcs[0], recv_stream->GetConfig().rtp.rtx_ssrc);
+}
+
+TEST_F(WebRtcVideoChannelTest, RecvStreamWithRtx) {
+ // Setup one channel with an associated RTX stream.
+ cricket::StreamParams params =
+ cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ params.AddFidSsrc(kSsrcs1[0], kRtxSsrcs1[0]);
+ FakeVideoReceiveStream* recv_stream = AddRecvStream(params);
+ EXPECT_EQ(kRtxSsrcs1[0], recv_stream->GetConfig().rtp.rtx_ssrc);
+
+ EXPECT_TRUE(VerifyRtxReceiveAssociations(recv_stream->GetConfig()))
+ << "RTX should be mapped for all decoders/payload types.";
+ EXPECT_TRUE(HasRtxReceiveAssociation(recv_stream->GetConfig(),
+ GetEngineCodec("red").id))
+ << "RTX should be mapped for the RED payload type";
+}
+
+TEST_F(WebRtcVideoChannelTest, RecvStreamNoRtx) {
+ // Setup one channel without an associated RTX stream.
+ cricket::StreamParams params =
+ cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ FakeVideoReceiveStream* recv_stream = AddRecvStream(params);
+ ASSERT_EQ(0U, recv_stream->GetConfig().rtp.rtx_ssrc);
+}
+
+// Test propagation of extmap allow mixed setting.
+TEST_F(WebRtcVideoChannelTest, SetExtmapAllowMixedAsCaller) {
+ TestExtmapAllowMixedCaller(/*extmap_allow_mixed=*/true);
+}
+TEST_F(WebRtcVideoChannelTest, SetExtmapAllowMixedDisabledAsCaller) {
+ TestExtmapAllowMixedCaller(/*extmap_allow_mixed=*/false);
+}
+TEST_F(WebRtcVideoChannelTest, SetExtmapAllowMixedAsCallee) {
+ TestExtmapAllowMixedCallee(/*extmap_allow_mixed=*/true);
+}
+TEST_F(WebRtcVideoChannelTest, SetExtmapAllowMixedDisabledAsCallee) {
+ TestExtmapAllowMixedCallee(/*extmap_allow_mixed=*/false);
+}
+
+TEST_F(WebRtcVideoChannelTest, NoHeaderExtesionsByDefault) {
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(kSsrcs1[0]));
+ ASSERT_TRUE(send_stream->GetConfig().rtp.extensions.empty());
+
+ AddRecvStream(cricket::StreamParams::CreateLegacy(kSsrcs1[0]));
+ ASSERT_TRUE(receive_channel_->GetRtpReceiverParameters(kSsrcs1[0])
+ .header_extensions.empty());
+}
+
+// Test support for RTP timestamp offset header extension.
+TEST_F(WebRtcVideoChannelTest, SendRtpTimestampOffsetHeaderExtensions) {
+ TestSetSendRtpHeaderExtensions(RtpExtension::kTimestampOffsetUri);
+}
+
+TEST_F(WebRtcVideoChannelTest, RecvRtpTimestampOffsetHeaderExtensions) {
+ TestSetRecvRtpHeaderExtensions(RtpExtension::kTimestampOffsetUri);
+}
+
+// Test support for absolute send time header extension.
+TEST_F(WebRtcVideoChannelTest, SendAbsoluteSendTimeHeaderExtensions) {
+ TestSetSendRtpHeaderExtensions(RtpExtension::kAbsSendTimeUri);
+}
+
+TEST_F(WebRtcVideoChannelTest, RecvAbsoluteSendTimeHeaderExtensions) {
+ TestSetRecvRtpHeaderExtensions(RtpExtension::kAbsSendTimeUri);
+}
+
+TEST_F(WebRtcVideoChannelTest, FiltersExtensionsPicksTransportSeqNum) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-FilterAbsSendTimeExtension/Enabled/");
+ // Enable three redundant extensions.
+ std::vector<std::string> extensions;
+ extensions.push_back(RtpExtension::kAbsSendTimeUri);
+ extensions.push_back(RtpExtension::kTimestampOffsetUri);
+ extensions.push_back(RtpExtension::kTransportSequenceNumberUri);
+ TestExtensionFilter(extensions, RtpExtension::kTransportSequenceNumberUri);
+}
+
+TEST_F(WebRtcVideoChannelTest, FiltersExtensionsPicksAbsSendTime) {
+ // Enable two redundant extensions.
+ std::vector<std::string> extensions;
+ extensions.push_back(RtpExtension::kAbsSendTimeUri);
+ extensions.push_back(RtpExtension::kTimestampOffsetUri);
+ TestExtensionFilter(extensions, RtpExtension::kAbsSendTimeUri);
+}
+
+// Test support for transport sequence number header extension.
+TEST_F(WebRtcVideoChannelTest, SendTransportSequenceNumberHeaderExtensions) {
+ TestSetSendRtpHeaderExtensions(RtpExtension::kTransportSequenceNumberUri);
+}
+TEST_F(WebRtcVideoChannelTest, RecvTransportSequenceNumberHeaderExtensions) {
+ TestSetRecvRtpHeaderExtensions(RtpExtension::kTransportSequenceNumberUri);
+}
+
+// Test support for video rotation header extension.
+TEST_F(WebRtcVideoChannelTest, SendVideoRotationHeaderExtensions) {
+ TestSetSendRtpHeaderExtensions(RtpExtension::kVideoRotationUri);
+}
+TEST_F(WebRtcVideoChannelTest, RecvVideoRotationHeaderExtensions) {
+ TestSetRecvRtpHeaderExtensions(RtpExtension::kVideoRotationUri);
+}
+
+TEST_F(WebRtcVideoChannelTest, IdenticalSendExtensionsDoesntRecreateStream) {
+ const int kAbsSendTimeId = 1;
+ const int kVideoRotationId = 2;
+ send_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeId));
+ send_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoRotationUri, kVideoRotationId));
+
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(123));
+
+ EXPECT_EQ(1, fake_call_->GetNumCreatedSendStreams());
+ ASSERT_EQ(2u, send_stream->GetConfig().rtp.extensions.size());
+
+ // Setting the same extensions (even if in different order) shouldn't
+ // reallocate the stream.
+ absl::c_reverse(send_parameters_.extensions);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ EXPECT_EQ(1, fake_call_->GetNumCreatedSendStreams());
+
+ // Setting different extensions should recreate the stream.
+ send_parameters_.extensions.resize(1);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ EXPECT_EQ(2, fake_call_->GetNumCreatedSendStreams());
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetSendRtpHeaderExtensionsExcludeUnsupportedExtensions) {
+ const int kUnsupportedId = 1;
+ const int kTOffsetId = 2;
+
+ send_parameters_.extensions.push_back(
+ RtpExtension(kUnsupportedExtensionName, kUnsupportedId));
+ send_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kTOffsetId));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(123));
+
+ // Only timestamp offset extension is set to send stream,
+ // unsupported rtp extension is ignored.
+ ASSERT_EQ(1u, send_stream->GetConfig().rtp.extensions.size());
+ EXPECT_STREQ(RtpExtension::kTimestampOffsetUri,
+ send_stream->GetConfig().rtp.extensions[0].uri.c_str());
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetRecvRtpHeaderExtensionsExcludeUnsupportedExtensions) {
+ const int kUnsupportedId = 1;
+ const int kTOffsetId = 2;
+
+ recv_parameters_.extensions.push_back(
+ RtpExtension(kUnsupportedExtensionName, kUnsupportedId));
+ recv_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kTOffsetId));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ AddRecvStream(cricket::StreamParams::CreateLegacy(123));
+
+ // Only timestamp offset extension is set to receive stream,
+ // unsupported rtp extension is ignored.
+ ASSERT_THAT(receive_channel_->GetRtpReceiverParameters(123).header_extensions,
+ SizeIs(1));
+ EXPECT_STREQ(receive_channel_->GetRtpReceiverParameters(123)
+ .header_extensions[0]
+ .uri.c_str(),
+ RtpExtension::kTimestampOffsetUri);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendRtpHeaderExtensionsRejectsIncorrectIds) {
+ const int kIncorrectIds[] = {-2, -1, 0, 15, 16};
+ for (size_t i = 0; i < arraysize(kIncorrectIds); ++i) {
+ send_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kIncorrectIds[i]));
+ EXPECT_FALSE(send_channel_->SetSenderParameters(send_parameters_))
+ << "Bad extension id '" << kIncorrectIds[i] << "' accepted.";
+ }
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvRtpHeaderExtensionsRejectsIncorrectIds) {
+ const int kIncorrectIds[] = {-2, -1, 0, 15, 16};
+ for (size_t i = 0; i < arraysize(kIncorrectIds); ++i) {
+ recv_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, kIncorrectIds[i]));
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(recv_parameters_))
+ << "Bad extension id '" << kIncorrectIds[i] << "' accepted.";
+ }
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendRtpHeaderExtensionsRejectsDuplicateIds) {
+ const int id = 1;
+ send_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, id));
+ send_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, id));
+ EXPECT_FALSE(send_channel_->SetSenderParameters(send_parameters_));
+
+ // Duplicate entries are also not supported.
+ send_parameters_.extensions.clear();
+ send_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, id));
+ send_parameters_.extensions.push_back(send_parameters_.extensions.back());
+ EXPECT_FALSE(send_channel_->SetSenderParameters(send_parameters_));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvRtpHeaderExtensionsRejectsDuplicateIds) {
+ const int id = 1;
+ recv_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, id));
+ recv_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, id));
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(recv_parameters_));
+
+ // Duplicate entries are also not supported.
+ recv_parameters_.extensions.clear();
+ recv_parameters_.extensions.push_back(
+ RtpExtension(RtpExtension::kTimestampOffsetUri, id));
+ recv_parameters_.extensions.push_back(recv_parameters_.extensions.back());
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(recv_parameters_));
+}
+
+TEST_F(WebRtcVideoChannelTest, OnPacketReceivedIdentifiesExtensions) {
+ cricket::VideoReceiverParameters parameters = recv_parameters_;
+ parameters.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoRotationUri, /*id=*/1));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ webrtc::RtpHeaderExtensionMap extension_map(parameters.extensions);
+ RtpPacketReceived reference_packet(&extension_map);
+ reference_packet.SetExtension<webrtc::VideoOrientation>(
+ webrtc::VideoRotation::kVideoRotation_270);
+ // Create a packet without the extension map but with the same content.
+ RtpPacketReceived received_packet;
+ ASSERT_TRUE(received_packet.Parse(reference_packet.Buffer()));
+
+ receive_channel_->OnPacketReceived(received_packet);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ EXPECT_EQ(fake_call_->last_received_rtp_packet()
+ .GetExtension<webrtc::VideoOrientation>(),
+ webrtc::VideoRotation::kVideoRotation_270);
+}
+
+TEST_F(WebRtcVideoChannelTest, AddRecvStreamOnlyUsesOneReceiveStream) {
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+}
+
+TEST_F(WebRtcVideoChannelTest, RtcpIsCompoundByDefault) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ EXPECT_EQ(webrtc::RtcpMode::kCompound, stream->GetConfig().rtp.rtcp_mode);
+}
+
+TEST_F(WebRtcVideoChannelTest, LossNotificationIsDisabledByDefault) {
+ TestLossNotificationState(false);
+}
+
+TEST_F(WebRtcVideoChannelTest, LossNotificationIsEnabledByFieldTrial) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-RtcpLossNotification/Enabled/");
+ ResetTest();
+ TestLossNotificationState(true);
+}
+
+TEST_F(WebRtcVideoChannelTest, LossNotificationCanBeEnabledAndDisabled) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-RtcpLossNotification/Enabled/");
+ ResetTest();
+
+ AssignDefaultCodec();
+ VerifyCodecHasDefaultFeedbackParams(*default_codec_, true);
+
+ {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs = engine_.send_codecs();
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ }
+
+ // Start with LNTF enabled.
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(1));
+ ASSERT_TRUE(send_stream->GetConfig().rtp.lntf.enabled);
+ FakeVideoReceiveStream* recv_stream =
+ AddRecvStream(cricket::StreamParams::CreateLegacy(1));
+ ASSERT_TRUE(recv_stream->GetConfig().rtp.lntf.enabled);
+
+ // Verify that LNTF is turned off when send(!) codecs without LNTF are set.
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(RemoveFeedbackParams(GetEngineCodec("VP8")));
+ EXPECT_TRUE(parameters.codecs[0].feedback_params.params().empty());
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_FALSE(recv_stream->GetConfig().rtp.lntf.enabled);
+ send_stream = fake_call_->GetVideoSendStreams()[0];
+ EXPECT_FALSE(send_stream->GetConfig().rtp.lntf.enabled);
+
+ // Setting the default codecs again, including VP8, turns LNTF back on.
+ parameters.codecs = engine_.send_codecs();
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_TRUE(recv_stream->GetConfig().rtp.lntf.enabled);
+ send_stream = fake_call_->GetVideoSendStreams()[0];
+ EXPECT_TRUE(send_stream->GetConfig().rtp.lntf.enabled);
+}
+
+TEST_F(WebRtcVideoChannelTest, NackIsEnabledByDefault) {
+ AssignDefaultCodec();
+ VerifyCodecHasDefaultFeedbackParams(*default_codec_, false);
+
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs = engine_.send_codecs();
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_TRUE(send_channel_->SetSend(true));
+
+ // Send side.
+ FakeVideoSendStream* send_stream =
+ AddSendStream(cricket::StreamParams::CreateLegacy(1));
+ EXPECT_GT(send_stream->GetConfig().rtp.nack.rtp_history_ms, 0);
+
+ // Receiver side.
+ FakeVideoReceiveStream* recv_stream =
+ AddRecvStream(cricket::StreamParams::CreateLegacy(1));
+ EXPECT_GT(recv_stream->GetConfig().rtp.nack.rtp_history_ms, 0);
+
+ // Nack history size should match between sender and receiver.
+ EXPECT_EQ(send_stream->GetConfig().rtp.nack.rtp_history_ms,
+ recv_stream->GetConfig().rtp.nack.rtp_history_ms);
+}
+
+TEST_F(WebRtcVideoChannelTest, NackCanBeEnabledAndDisabled) {
+ FakeVideoSendStream* send_stream = AddSendStream();
+ FakeVideoReceiveStream* recv_stream = AddRecvStream();
+
+ EXPECT_GT(recv_stream->GetConfig().rtp.nack.rtp_history_ms, 0);
+ EXPECT_GT(send_stream->GetConfig().rtp.nack.rtp_history_ms, 0);
+
+ // Verify that NACK is turned off when send(!) codecs without NACK are set.
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(RemoveFeedbackParams(GetEngineCodec("VP8")));
+ EXPECT_TRUE(parameters.codecs[0].feedback_params.params().empty());
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(0, recv_stream->GetConfig().rtp.nack.rtp_history_ms);
+ send_stream = fake_call_->GetVideoSendStreams()[0];
+ EXPECT_EQ(0, send_stream->GetConfig().rtp.nack.rtp_history_ms);
+
+ // Verify that NACK is turned on when setting default codecs since the
+ // default codecs have NACK enabled.
+ parameters.codecs = engine_.send_codecs();
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_GT(recv_stream->GetConfig().rtp.nack.rtp_history_ms, 0);
+ send_stream = fake_call_->GetVideoSendStreams()[0];
+ EXPECT_GT(send_stream->GetConfig().rtp.nack.rtp_history_ms, 0);
+}
+
+// This test verifies that new frame sizes reconfigures encoders even though not
+// (yet) sending. The purpose of this is to permit encoding as quickly as
+// possible once we start sending. Likely the frames being input are from the
+// same source that will be sent later, which just means that we're ready
+// earlier.
+TEST_F(WebRtcVideoChannelTest, ReconfiguresEncodersWhenNotSending) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ send_channel_->SetSend(false);
+
+ FakeVideoSendStream* stream = AddSendStream();
+
+ // No frames entered.
+ std::vector<webrtc::VideoStream> streams = stream->GetVideoStreams();
+ EXPECT_EQ(0u, streams[0].width);
+ EXPECT_EQ(0u, streams[0].height);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ // Frame entered, should be reconfigured to new dimensions.
+ streams = stream->GetVideoStreams();
+ EXPECT_EQ(rtc::checked_cast<size_t>(1280), streams[0].width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams[0].height);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, UsesCorrectSettingsForScreencast) {
+ static const int kScreenshareMinBitrateKbps = 800;
+ cricket::VideoCodec codec = GetEngineCodec("VP8");
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(codec);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ AddSendStream();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ VideoOptions min_bitrate_options;
+ min_bitrate_options.screencast_min_bitrate_kbps = kScreenshareMinBitrateKbps;
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, &min_bitrate_options,
+ &frame_forwarder));
+
+ EXPECT_TRUE(send_channel_->SetSend(true));
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
+ FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
+
+ EXPECT_EQ(1, send_stream->GetNumberOfSwappedFrames());
+
+ // Verify non-screencast settings.
+ webrtc::VideoEncoderConfig encoder_config =
+ send_stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo,
+ encoder_config.content_type);
+ std::vector<webrtc::VideoStream> streams = send_stream->GetVideoStreams();
+ EXPECT_EQ(rtc::checked_cast<size_t>(1280), streams.front().width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams.front().height);
+ EXPECT_EQ(0, encoder_config.min_transmit_bitrate_bps)
+ << "Non-screenshare shouldn't use min-transmit bitrate.";
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+ EXPECT_EQ(1, send_stream->GetNumberOfSwappedFrames());
+ VideoOptions screencast_options;
+ screencast_options.is_screencast = true;
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, &screencast_options,
+ &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ // Send stream recreated after option change.
+ ASSERT_EQ(2, fake_call_->GetNumCreatedSendStreams());
+ send_stream = fake_call_->GetVideoSendStreams().front();
+ EXPECT_EQ(1, send_stream->GetNumberOfSwappedFrames());
+
+ // Verify screencast settings.
+ encoder_config = send_stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(webrtc::VideoEncoderConfig::ContentType::kScreen,
+ encoder_config.content_type);
+ EXPECT_EQ(kScreenshareMinBitrateKbps * 1000,
+ encoder_config.min_transmit_bitrate_bps);
+
+ streams = send_stream->GetVideoStreams();
+ EXPECT_EQ(rtc::checked_cast<size_t>(1280), streams.front().width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams.front().height);
+ EXPECT_FALSE(streams[0].num_temporal_layers.has_value());
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ ConferenceModeScreencastConfiguresTemporalLayer) {
+ static const int kConferenceScreencastTemporalBitrateBps = 200 * 1000;
+ send_parameters_.conference_mode = true;
+ send_channel_->SetSenderParameters(send_parameters_);
+
+ AddSendStream();
+ VideoOptions options;
+ options.is_screencast = true;
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ EXPECT_TRUE(send_channel_->SetSend(true));
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
+ FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
+
+ webrtc::VideoEncoderConfig encoder_config =
+ send_stream->GetEncoderConfig().Copy();
+
+ // Verify screencast settings.
+ encoder_config = send_stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(webrtc::VideoEncoderConfig::ContentType::kScreen,
+ encoder_config.content_type);
+
+ std::vector<webrtc::VideoStream> streams = send_stream->GetVideoStreams();
+ ASSERT_EQ(1u, streams.size());
+ ASSERT_EQ(2u, streams[0].num_temporal_layers);
+ EXPECT_EQ(kConferenceScreencastTemporalBitrateBps,
+ streams[0].target_bitrate_bps);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, SuspendBelowMinBitrateDisabledByDefault) {
+ FakeVideoSendStream* stream = AddSendStream();
+ EXPECT_FALSE(stream->GetConfig().suspend_below_min_bitrate);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetMediaConfigSuspendBelowMinBitrate) {
+ MediaConfig media_config = GetMediaConfig();
+ media_config.video.suspend_below_min_bitrate = true;
+
+ send_channel_ = engine_.CreateSendChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions());
+ send_channel_->OnReadyToSend(true);
+
+ send_channel_->SetSenderParameters(send_parameters_);
+
+ FakeVideoSendStream* stream = AddSendStream();
+ EXPECT_TRUE(stream->GetConfig().suspend_below_min_bitrate);
+
+ media_config.video.suspend_below_min_bitrate = false;
+ send_channel_ = engine_.CreateSendChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions());
+ send_channel_->OnReadyToSend(true);
+
+ send_channel_->SetSenderParameters(send_parameters_);
+
+ stream = AddSendStream();
+ EXPECT_FALSE(stream->GetConfig().suspend_below_min_bitrate);
+}
+
+TEST_F(WebRtcVideoChannelTest, Vp8DenoisingEnabledByDefault) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoCodecVP8 vp8_settings;
+ ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set.";
+ EXPECT_TRUE(vp8_settings.denoisingOn);
+}
+
+TEST_F(WebRtcVideoChannelTest, VerifyVp8SpecificSettings) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ // Single-stream settings should apply with RTX as well (verifies that we
+ // check number of regular SSRCs and not StreamParams::ssrcs which contains
+ // both RTX and regular SSRCs).
+ FakeVideoSendStream* stream = SetUpSimulcast(false, /*with_rtx=*/true);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ webrtc::VideoCodecVP8 vp8_settings;
+ ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set.";
+ EXPECT_TRUE(vp8_settings.denoisingOn)
+ << "VP8 denoising should be on by default.";
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, false);
+
+ ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set.";
+ EXPECT_FALSE(vp8_settings.denoisingOn);
+ EXPECT_TRUE(vp8_settings.automaticResizeOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, true);
+
+ ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set.";
+ EXPECT_TRUE(vp8_settings.denoisingOn);
+ EXPECT_TRUE(vp8_settings.automaticResizeOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+ stream = SetUpSimulcast(true, /*with_rtx=*/false);
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ EXPECT_EQ(3u, stream->GetVideoStreams().size());
+ ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set.";
+ // Autmatic resize off when using simulcast.
+ EXPECT_FALSE(vp8_settings.automaticResizeOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+
+ // In screen-share mode, denoising is forced off.
+ VideoOptions options;
+ options.is_screencast = true;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, false);
+
+ EXPECT_EQ(3u, stream->GetVideoStreams().size());
+ ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set.";
+ EXPECT_FALSE(vp8_settings.denoisingOn);
+ // Resizing always off for screen sharing.
+ EXPECT_FALSE(vp8_settings.automaticResizeOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, true);
+
+ ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set.";
+ EXPECT_FALSE(vp8_settings.denoisingOn);
+ EXPECT_FALSE(vp8_settings.automaticResizeOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, VerifyAv1SpecificSettings) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("AV1"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ webrtc::test::FrameForwarder frame_forwarder;
+ webrtc::VideoCodecAV1 settings;
+
+ // Single-stream settings should apply with RTX as well (verifies that we
+ // check number of regular SSRCs and not StreamParams::ssrcs which contains
+ // both RTX and regular SSRCs).
+ FakeVideoSendStream* stream = SetUpSimulcast(false, /*with_rtx=*/true);
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ ASSERT_TRUE(stream->GetAv1Settings(&settings)) << "No AV1 config set.";
+ EXPECT_TRUE(settings.automatic_resize_on);
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_THAT(
+ rtp_parameters.encodings,
+ ElementsAre(Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ absl::nullopt)));
+ rtp_parameters.encodings[0].scalability_mode = "L2T3";
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ ASSERT_TRUE(stream->GetAv1Settings(&settings)) << "No AV1 config set.";
+ EXPECT_FALSE(settings.automatic_resize_on);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+// Test that setting the same options doesn't result in the encoder being
+// reconfigured.
+TEST_F(WebRtcVideoChannelTest, SetIdenticalOptionsDoesntReconfigureEncoder) {
+ VideoOptions options;
+ webrtc::test::FrameForwarder frame_forwarder;
+
+ AddSendStream();
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
+
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+ // Expect 1 reconfigurations at this point from the initial configuration.
+ EXPECT_EQ(1, send_stream->num_encoder_reconfigurations());
+
+ // Set the options one more time and expect no additional reconfigurations.
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ EXPECT_EQ(1, send_stream->num_encoder_reconfigurations());
+
+ // Change `options` and expect 2 reconfigurations.
+ options.video_noise_reduction = true;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ EXPECT_EQ(2, send_stream->num_encoder_reconfigurations());
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+class Vp9SettingsTest : public WebRtcVideoChannelTest {
+ public:
+ Vp9SettingsTest() : Vp9SettingsTest("") {}
+ explicit Vp9SettingsTest(const char* field_trials)
+ : WebRtcVideoChannelTest(field_trials) {
+ encoder_factory_->AddSupportedVideoCodecType("VP9");
+ }
+ virtual ~Vp9SettingsTest() {}
+
+ protected:
+ void TearDown() override {
+ // Remove references to encoder_factory_ since this will be destroyed
+ // before send_channel_ and engine_.
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ }
+};
+
+TEST_F(Vp9SettingsTest, VerifyVp9SpecificSettings) {
+ encoder_factory_->AddSupportedVideoCodec(
+ webrtc::SdpVideoFormat("VP9", webrtc::SdpVideoFormat::Parameters(),
+ {ScalabilityMode::kL1T1, ScalabilityMode::kL2T1}));
+
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = SetUpSimulcast(false, /*with_rtx=*/false);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ webrtc::VideoCodecVP9 vp9_settings;
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_TRUE(vp9_settings.denoisingOn)
+ << "VP9 denoising should be on by default.";
+ EXPECT_TRUE(vp9_settings.automaticResizeOn)
+ << "Automatic resize on for one active stream.";
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, false);
+
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_FALSE(vp9_settings.denoisingOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled)
+ << "Frame dropping always on for real time video.";
+ EXPECT_TRUE(vp9_settings.automaticResizeOn);
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, true);
+
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_TRUE(vp9_settings.denoisingOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+ EXPECT_TRUE(vp9_settings.automaticResizeOn);
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_THAT(
+ rtp_parameters.encodings,
+ ElementsAre(Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ absl::nullopt)));
+ rtp_parameters.encodings[0].scalability_mode = "L2T1";
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_TRUE(vp9_settings.denoisingOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+ EXPECT_FALSE(vp9_settings.automaticResizeOn)
+ << "Automatic resize off for multiple spatial layers.";
+
+ rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_THAT(rtp_parameters.encodings,
+ ElementsAre(Field(
+ &webrtc::RtpEncodingParameters::scalability_mode, "L2T1")));
+ rtp_parameters.encodings[0].scalability_mode = "L1T1";
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_THAT(rtp_parameters.encodings,
+ ElementsAre(Field(
+ &webrtc::RtpEncodingParameters::scalability_mode, "L1T1")));
+
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_TRUE(vp9_settings.denoisingOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+ EXPECT_TRUE(vp9_settings.automaticResizeOn)
+ << "Automatic resize on for one spatial layer.";
+
+ // In screen-share mode, denoising is forced off.
+ VideoOptions options;
+ options.is_screencast = true;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, false);
+
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_FALSE(vp9_settings.denoisingOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled)
+ << "Frame dropping always on for screen sharing.";
+ EXPECT_FALSE(vp9_settings.automaticResizeOn)
+ << "Automatic resize off for screencast.";
+
+ stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, false);
+
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_FALSE(vp9_settings.denoisingOn);
+ EXPECT_TRUE(stream->GetEncoderConfig().frame_drop_enabled);
+ EXPECT_FALSE(vp9_settings.automaticResizeOn);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(Vp9SettingsTest, MultipleSsrcsEnablesSvc) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+
+ FakeVideoSendStream* stream =
+ AddSendStream(CreateSimStreamParams("cname", ssrcs));
+
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ webrtc::VideoCodecVP9 vp9_settings;
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+
+ const size_t kNumSpatialLayers = ssrcs.size();
+ const size_t kNumTemporalLayers = 3;
+ EXPECT_EQ(vp9_settings.numberOfSpatialLayers, kNumSpatialLayers);
+ EXPECT_EQ(vp9_settings.numberOfTemporalLayers, kNumTemporalLayers);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, nullptr));
+}
+
+TEST_F(Vp9SettingsTest, SvcModeCreatesSingleRtpStream) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+
+ FakeVideoSendStream* stream =
+ AddSendStream(CreateSimStreamParams("cname", ssrcs));
+
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ // Despite 3 ssrcs provided, single layer is used.
+ EXPECT_EQ(1u, config.rtp.ssrcs.size());
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ webrtc::VideoCodecVP9 vp9_settings;
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+
+ const size_t kNumSpatialLayers = ssrcs.size();
+ EXPECT_EQ(vp9_settings.numberOfSpatialLayers, kNumSpatialLayers);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, nullptr));
+}
+
+TEST_F(Vp9SettingsTest, AllEncodingParametersCopied) {
+ cricket::VideoSenderParameters send_parameters;
+ send_parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters));
+
+ const size_t kNumSpatialLayers = 3;
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+
+ FakeVideoSendStream* stream =
+ AddSendStream(CreateSimStreamParams("cname", ssrcs));
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(ssrcs[0]);
+ ASSERT_EQ(kNumSpatialLayers, parameters.encodings.size());
+ ASSERT_TRUE(parameters.encodings[0].active);
+ ASSERT_TRUE(parameters.encodings[1].active);
+ ASSERT_TRUE(parameters.encodings[2].active);
+ // Invert value to verify copying.
+ parameters.encodings[1].active = false;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(ssrcs[0], parameters).ok());
+
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+
+ // number_of_streams should be 1 since all spatial layers are sent on the
+ // same SSRC. But encoding parameters of all layers is supposed to be copied
+ // and stored in simulcast_layers[].
+ EXPECT_EQ(1u, encoder_config.number_of_streams);
+ EXPECT_EQ(encoder_config.simulcast_layers.size(), kNumSpatialLayers);
+ EXPECT_TRUE(encoder_config.simulcast_layers[0].active);
+ EXPECT_FALSE(encoder_config.simulcast_layers[1].active);
+ EXPECT_TRUE(encoder_config.simulcast_layers[2].active);
+}
+
+TEST_F(Vp9SettingsTest, MaxBitrateDeterminedBySvcResolutions) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+
+ FakeVideoSendStream* stream =
+ AddSendStream(CreateSimStreamParams("cname", ssrcs));
+
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ // Send frame at 1080p@30fps.
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame(
+ 1920, 1080, webrtc::VideoRotation::kVideoRotation_0,
+ /*duration_us=*/33000));
+
+ webrtc::VideoCodecVP9 vp9_settings;
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+
+ const size_t kNumSpatialLayers = ssrcs.size();
+ const size_t kNumTemporalLayers = 3;
+ EXPECT_EQ(vp9_settings.numberOfSpatialLayers, kNumSpatialLayers);
+ EXPECT_EQ(vp9_settings.numberOfTemporalLayers, kNumTemporalLayers);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, nullptr));
+
+ // VideoStream max bitrate should be more than legacy 2.5Mbps default stream
+ // cap.
+ EXPECT_THAT(
+ stream->GetVideoStreams(),
+ ElementsAre(Field(&webrtc::VideoStream::max_bitrate_bps, Gt(2500000))));
+
+ // Update send parameters to 2Mbps, this should cap the max bitrate of the
+ // stream.
+ parameters.max_bandwidth_bps = 2000000;
+ send_channel_->SetSenderParameters(parameters);
+ EXPECT_THAT(
+ stream->GetVideoStreams(),
+ ElementsAre(Field(&webrtc::VideoStream::max_bitrate_bps, Eq(2000000))));
+}
+
+TEST_F(Vp9SettingsTest, Vp9SvcTargetBitrateCappedByMax) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+
+ FakeVideoSendStream* stream =
+ AddSendStream(CreateSimStreamParams("cname", ssrcs));
+
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ // Set up 3 spatial layers with 720p, which should result in a max bitrate of
+ // 2084 kbps.
+ frame_forwarder.IncomingCapturedFrame(
+ frame_source_.GetFrame(1280, 720, webrtc::VideoRotation::kVideoRotation_0,
+ /*duration_us=*/33000));
+
+ webrtc::VideoCodecVP9 vp9_settings;
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+
+ const size_t kNumSpatialLayers = ssrcs.size();
+ const size_t kNumTemporalLayers = 3;
+ EXPECT_EQ(vp9_settings.numberOfSpatialLayers, kNumSpatialLayers);
+ EXPECT_EQ(vp9_settings.numberOfTemporalLayers, kNumTemporalLayers);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs[0], nullptr, nullptr));
+
+ // VideoStream both min and max bitrate should be lower than legacy 2.5Mbps
+ // default stream cap.
+ EXPECT_THAT(
+ stream->GetVideoStreams()[0],
+ AllOf(Field(&webrtc::VideoStream::max_bitrate_bps, Lt(2500000)),
+ Field(&webrtc::VideoStream::target_bitrate_bps, Lt(2500000))));
+}
+
+class Vp9SettingsTestWithFieldTrial
+ : public Vp9SettingsTest,
+ public ::testing::WithParamInterface<
+ ::testing::tuple<const char*, int, int, webrtc::InterLayerPredMode>> {
+ protected:
+ Vp9SettingsTestWithFieldTrial()
+ : Vp9SettingsTest(::testing::get<0>(GetParam())),
+ num_spatial_layers_(::testing::get<1>(GetParam())),
+ num_temporal_layers_(::testing::get<2>(GetParam())),
+ inter_layer_pred_mode_(::testing::get<3>(GetParam())) {}
+
+ void VerifySettings(int num_spatial_layers,
+ int num_temporal_layers,
+ webrtc::InterLayerPredMode interLayerPred) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = SetUpSimulcast(false, /*with_rtx=*/false);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ webrtc::VideoCodecVP9 vp9_settings;
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
+ EXPECT_EQ(num_spatial_layers, vp9_settings.numberOfSpatialLayers);
+ EXPECT_EQ(num_temporal_layers, vp9_settings.numberOfTemporalLayers);
+ EXPECT_EQ(inter_layer_pred_mode_, vp9_settings.interLayerPred);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+ }
+
+ const uint8_t num_spatial_layers_;
+ const uint8_t num_temporal_layers_;
+ const webrtc::InterLayerPredMode inter_layer_pred_mode_;
+};
+
+TEST_P(Vp9SettingsTestWithFieldTrial, VerifyCodecSettings) {
+ VerifySettings(num_spatial_layers_, num_temporal_layers_,
+ inter_layer_pred_mode_);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ All,
+ Vp9SettingsTestWithFieldTrial,
+ Values(
+ std::make_tuple("", 1, 1, webrtc::InterLayerPredMode::kOnKeyPic),
+ std::make_tuple("WebRTC-Vp9InterLayerPred/Default/",
+ 1,
+ 1,
+ webrtc::InterLayerPredMode::kOnKeyPic),
+ std::make_tuple("WebRTC-Vp9InterLayerPred/Disabled/",
+ 1,
+ 1,
+ webrtc::InterLayerPredMode::kOnKeyPic),
+ std::make_tuple(
+ "WebRTC-Vp9InterLayerPred/Enabled,inter_layer_pred_mode:off/",
+ 1,
+ 1,
+ webrtc::InterLayerPredMode::kOff),
+ std::make_tuple(
+ "WebRTC-Vp9InterLayerPred/Enabled,inter_layer_pred_mode:on/",
+ 1,
+ 1,
+ webrtc::InterLayerPredMode::kOn),
+ std::make_tuple(
+ "WebRTC-Vp9InterLayerPred/Enabled,inter_layer_pred_mode:onkeypic/",
+ 1,
+ 1,
+ webrtc::InterLayerPredMode::kOnKeyPic)));
+
+TEST_F(WebRtcVideoChannelTest, VerifyMinBitrate) {
+ std::vector<webrtc::VideoStream> streams = AddSendStream()->GetVideoStreams();
+ ASSERT_EQ(1u, streams.size());
+ EXPECT_EQ(webrtc::kDefaultMinVideoBitrateBps, streams[0].min_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest, VerifyMinBitrateWithForcedFallbackFieldTrial) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_,
+ "WebRTC-VP8-Forced-Fallback-Encoder-v2/Enabled-1,2,34567/");
+ std::vector<webrtc::VideoStream> streams = AddSendStream()->GetVideoStreams();
+ ASSERT_EQ(1u, streams.size());
+ EXPECT_EQ(34567, streams[0].min_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ BalancedDegradationPreferenceNotSupportedWithoutFieldtrial) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-Video-BalancedDegradation/Disabled/");
+ const bool kResolutionScalingEnabled = true;
+ const bool kFpsScalingEnabled = false;
+ TestDegradationPreference(kResolutionScalingEnabled, kFpsScalingEnabled);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ BalancedDegradationPreferenceSupportedBehindFieldtrial) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-Video-BalancedDegradation/Enabled/");
+ const bool kResolutionScalingEnabled = true;
+ const bool kFpsScalingEnabled = true;
+ TestDegradationPreference(kResolutionScalingEnabled, kFpsScalingEnabled);
+}
+
+TEST_F(WebRtcVideoChannelTest, AdaptsOnOveruse) {
+ TestCpuAdaptation(true, false);
+}
+
+TEST_F(WebRtcVideoChannelTest, DoesNotAdaptOnOveruseWhenDisabled) {
+ TestCpuAdaptation(false, false);
+}
+
+TEST_F(WebRtcVideoChannelTest, DoesNotAdaptWhenScreeensharing) {
+ TestCpuAdaptation(false, true);
+}
+
+TEST_F(WebRtcVideoChannelTest, DoesNotAdaptOnOveruseWhenScreensharing) {
+ TestCpuAdaptation(true, true);
+}
+
+TEST_F(WebRtcVideoChannelTest, PreviousAdaptationDoesNotApplyToScreenshare) {
+ cricket::VideoCodec codec = GetEngineCodec("VP8");
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(codec);
+
+ MediaConfig media_config = GetMediaConfig();
+ media_config.video.enable_cpu_adaptation = true;
+ send_channel_ = engine_.CreateSendChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions());
+
+ send_channel_->OnReadyToSend(true);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ AddSendStream();
+ webrtc::test::FrameForwarder frame_forwarder;
+
+ ASSERT_TRUE(send_channel_->SetSend(true));
+ cricket::VideoOptions camera_options;
+ camera_options.is_screencast = false;
+ send_channel_->SetVideoSend(last_ssrc_, &camera_options, &frame_forwarder);
+
+ ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
+ FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
+
+ EXPECT_TRUE(send_stream->resolution_scaling_enabled());
+ // Dont' expect anything on framerate_scaling_enabled, since the default is
+ // transitioning from MAINTAIN_FRAMERATE to BALANCED.
+
+ // Switch to screen share. Expect no resolution scaling.
+ cricket::VideoOptions screenshare_options;
+ screenshare_options.is_screencast = true;
+ send_channel_->SetVideoSend(last_ssrc_, &screenshare_options,
+ &frame_forwarder);
+ ASSERT_EQ(2, fake_call_->GetNumCreatedSendStreams());
+ send_stream = fake_call_->GetVideoSendStreams().front();
+ EXPECT_FALSE(send_stream->resolution_scaling_enabled());
+
+ // Switch back to the normal capturer. Expect resolution scaling to be
+ // reenabled.
+ send_channel_->SetVideoSend(last_ssrc_, &camera_options, &frame_forwarder);
+ send_stream = fake_call_->GetVideoSendStreams().front();
+ ASSERT_EQ(3, fake_call_->GetNumCreatedSendStreams());
+ send_stream = fake_call_->GetVideoSendStreams().front();
+ EXPECT_TRUE(send_stream->resolution_scaling_enabled());
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+// TODO(asapersson): Remove this test when the balanced field trial is removed.
+void WebRtcVideoChannelTest::TestDegradationPreference(
+ bool resolution_scaling_enabled,
+ bool fps_scaling_enabled) {
+ cricket::VideoCodec codec = GetEngineCodec("VP8");
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(codec);
+
+ MediaConfig media_config = GetMediaConfig();
+ media_config.video.enable_cpu_adaptation = true;
+ send_channel_ = engine_.CreateSendChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions());
+ send_channel_->OnReadyToSend(true);
+
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ AddSendStream();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+
+ EXPECT_TRUE(send_channel_->SetSend(true));
+
+ FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
+ EXPECT_EQ(resolution_scaling_enabled,
+ send_stream->resolution_scaling_enabled());
+ EXPECT_EQ(fps_scaling_enabled, send_stream->framerate_scaling_enabled());
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+void WebRtcVideoChannelTest::TestCpuAdaptation(bool enable_overuse,
+ bool is_screenshare) {
+ cricket::VideoCodec codec = GetEngineCodec("VP8");
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(codec);
+
+ MediaConfig media_config = GetMediaConfig();
+ if (enable_overuse) {
+ media_config.video.enable_cpu_adaptation = true;
+ }
+ send_channel_ = engine_.CreateSendChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(
+ fake_call_.get(), media_config, VideoOptions(), webrtc::CryptoOptions());
+ send_channel_->OnReadyToSend(true);
+
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ AddSendStream();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ options.is_screencast = is_screenshare;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+
+ EXPECT_TRUE(send_channel_->SetSend(true));
+
+ FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
+
+ if (!enable_overuse) {
+ EXPECT_FALSE(send_stream->resolution_scaling_enabled());
+ EXPECT_FALSE(send_stream->framerate_scaling_enabled());
+ } else if (is_screenshare) {
+ EXPECT_FALSE(send_stream->resolution_scaling_enabled());
+ EXPECT_TRUE(send_stream->framerate_scaling_enabled());
+ } else {
+ EXPECT_TRUE(send_stream->resolution_scaling_enabled());
+ EXPECT_FALSE(send_stream->framerate_scaling_enabled());
+ }
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, EstimatesNtpStartTimeCorrectly) {
+ // Start at last timestamp to verify that wraparounds are estimated correctly.
+ static const uint32_t kInitialTimestamp = 0xFFFFFFFFu;
+ static const int64_t kInitialNtpTimeMs = 1247891230;
+ static const int kFrameOffsetMs = 20;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ cricket::FakeVideoRenderer renderer;
+ EXPECT_TRUE(receive_channel_->SetSink(last_ssrc_, &renderer));
+
+ webrtc::VideoFrame video_frame =
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
+ .set_timestamp_rtp(kInitialTimestamp)
+ .set_timestamp_us(0)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .build();
+ // Initial NTP time is not available on the first frame, but should still be
+ // able to be estimated.
+ stream->InjectFrame(video_frame);
+
+ EXPECT_EQ(1, renderer.num_rendered_frames());
+
+ // This timestamp is kInitialTimestamp (-1) + kFrameOffsetMs * 90, which
+ // triggers a constant-overflow warning, hence we're calculating it explicitly
+ // here.
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(kFrameOffsetMs));
+ video_frame.set_timestamp(kFrameOffsetMs * 90 - 1);
+ video_frame.set_ntp_time_ms(kInitialNtpTimeMs + kFrameOffsetMs);
+ stream->InjectFrame(video_frame);
+
+ EXPECT_EQ(2, renderer.num_rendered_frames());
+
+ // Verify that NTP time has been correctly deduced.
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1u, receive_info.receivers.size());
+ EXPECT_EQ(kInitialNtpTimeMs,
+ receive_info.receivers[0].capture_start_ntp_time_ms);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetDefaultSendCodecs) {
+ AssignDefaultAptRtxTypes();
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ absl::optional<VideoCodec> codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_TRUE(codec->Matches(engine_.send_codecs()[0], &field_trials_));
+
+ // Using a RTX setup to verify that the default RTX payload type is good.
+ const std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs1);
+ const std::vector<uint32_t> rtx_ssrcs = MAKE_VECTOR(kRtxSsrcs1);
+ FakeVideoSendStream* stream = AddSendStream(
+ cricket::CreateSimWithRtxStreamParams("cname", ssrcs, rtx_ssrcs));
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ // Make sure NACK and FEC are enabled on the correct payload types.
+ EXPECT_EQ(1000, config.rtp.nack.rtp_history_ms);
+ EXPECT_EQ(GetEngineCodec("ulpfec").id, config.rtp.ulpfec.ulpfec_payload_type);
+ EXPECT_EQ(GetEngineCodec("red").id, config.rtp.ulpfec.red_payload_type);
+
+ EXPECT_EQ(1u, config.rtp.rtx.ssrcs.size());
+ EXPECT_EQ(kRtxSsrcs1[0], config.rtp.rtx.ssrcs[0]);
+ VerifySendStreamHasRtxTypes(config, default_apt_rtx_types_);
+ // TODO(juberti): Check RTCP, PLI, TMMBR.
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithoutPacketization) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ const webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+ EXPECT_FALSE(config.rtp.raw_payload);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithPacketization) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.back().packetization = kPacketizationParamRaw;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ const webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+ EXPECT_TRUE(config.rtp.raw_payload);
+}
+
+// The following four tests ensures that FlexFEC is not activated by default
+// when the field trials are not enabled.
+// TODO(brandtr): Remove or update these tests when FlexFEC _is_ enabled by
+// default.
+TEST_F(WebRtcVideoChannelTest, FlexfecSendCodecWithoutSsrcNotExposedByDefault) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(-1, config.rtp.flexfec.payload_type);
+ EXPECT_EQ(0U, config.rtp.flexfec.ssrc);
+ EXPECT_TRUE(config.rtp.flexfec.protected_media_ssrcs.empty());
+}
+
+TEST_F(WebRtcVideoChannelTest, FlexfecSendCodecWithSsrcNotExposedByDefault) {
+ FakeVideoSendStream* stream = AddSendStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(-1, config.rtp.flexfec.payload_type);
+ EXPECT_EQ(0U, config.rtp.flexfec.ssrc);
+ EXPECT_TRUE(config.rtp.flexfec.protected_media_ssrcs.empty());
+}
+
+TEST_F(WebRtcVideoChannelTest, FlexfecRecvCodecWithoutSsrcNotExposedByDefault) {
+ AddRecvStream();
+
+ const std::vector<FakeFlexfecReceiveStream*>& streams =
+ fake_call_->GetFlexfecReceiveStreams();
+ EXPECT_TRUE(streams.empty());
+}
+
+TEST_F(WebRtcVideoChannelTest, FlexfecRecvCodecWithSsrcExposedByDefault) {
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+
+ const std::vector<FakeFlexfecReceiveStream*>& streams =
+ fake_call_->GetFlexfecReceiveStreams();
+ EXPECT_EQ(1U, streams.size());
+}
+
+// TODO(brandtr): When FlexFEC is no longer behind a field trial, merge all
+// tests that use this test fixture into the corresponding "non-field trial"
+// tests.
+class WebRtcVideoChannelFlexfecRecvTest : public WebRtcVideoChannelTest {
+ public:
+ WebRtcVideoChannelFlexfecRecvTest()
+ : WebRtcVideoChannelTest("WebRTC-FlexFEC-03-Advertised/Enabled/") {}
+};
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest,
+ DefaultFlexfecCodecHasTransportCcAndRembFeedbackParam) {
+ EXPECT_TRUE(cricket::HasTransportCc(GetEngineCodec("flexfec-03")));
+ EXPECT_TRUE(cricket::HasRemb(GetEngineCodec("flexfec-03")));
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetDefaultRecvCodecsWithoutSsrc) {
+ AddRecvStream();
+
+ const std::vector<FakeFlexfecReceiveStream*>& streams =
+ fake_call_->GetFlexfecReceiveStreams();
+ EXPECT_TRUE(streams.empty());
+
+ const std::vector<FakeVideoReceiveStream*>& video_streams =
+ fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1U, video_streams.size());
+ const FakeVideoReceiveStream& video_stream = *video_streams.front();
+ const webrtc::VideoReceiveStreamInterface::Config& video_config =
+ video_stream.GetConfig();
+ EXPECT_FALSE(video_config.rtp.protected_by_flexfec);
+ EXPECT_EQ(video_config.rtp.packet_sink_, nullptr);
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetDefaultRecvCodecsWithSsrc) {
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+
+ const std::vector<FakeFlexfecReceiveStream*>& streams =
+ fake_call_->GetFlexfecReceiveStreams();
+ ASSERT_EQ(1U, streams.size());
+ const auto* stream = streams.front();
+ const webrtc::FlexfecReceiveStream::Config& config = stream->GetConfig();
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id, config.payload_type);
+ EXPECT_EQ(kFlexfecSsrc, config.rtp.remote_ssrc);
+ ASSERT_EQ(1U, config.protected_media_ssrcs.size());
+ EXPECT_EQ(kSsrcs1[0], config.protected_media_ssrcs[0]);
+
+ const std::vector<FakeVideoReceiveStream*>& video_streams =
+ fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1U, video_streams.size());
+ const FakeVideoReceiveStream& video_stream = *video_streams.front();
+ const webrtc::VideoReceiveStreamInterface::Config& video_config =
+ video_stream.GetConfig();
+ EXPECT_TRUE(video_config.rtp.protected_by_flexfec);
+ EXPECT_NE(video_config.rtp.packet_sink_, nullptr);
+}
+
+// Test changing the configuration after a video stream has been created and
+// turn on flexfec. This will result in video stream being reconfigured but not
+// recreated because the flexfec stream pointer will be given to the already
+// existing video stream instance.
+TEST_F(WebRtcVideoChannelFlexfecRecvTest,
+ EnablingFlexfecDoesNotRecreateVideoReceiveStream) {
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ EXPECT_EQ(1, fake_call_->GetNumCreatedReceiveStreams());
+ const std::vector<FakeVideoReceiveStream*>& video_streams =
+ fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1U, video_streams.size());
+ const FakeVideoReceiveStream* video_stream = video_streams.front();
+ const webrtc::VideoReceiveStreamInterface::Config* video_config =
+ &video_stream->GetConfig();
+ EXPECT_FALSE(video_config->rtp.protected_by_flexfec);
+ EXPECT_EQ(video_config->rtp.packet_sink_, nullptr);
+
+ // Enable FlexFEC.
+ recv_parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+
+ // The count of created streams will remain 2 despite the creation of a new
+ // flexfec stream. The existing receive stream will have been reconfigured
+ // to use the new flexfec instance.
+ EXPECT_EQ(2, fake_call_->GetNumCreatedReceiveStreams())
+ << "Enabling FlexFEC should not create VideoReceiveStreamInterface (1).";
+ EXPECT_EQ(1U, fake_call_->GetVideoReceiveStreams().size())
+ << "Enabling FlexFEC should not create VideoReceiveStreamInterface (2).";
+ EXPECT_EQ(1U, fake_call_->GetFlexfecReceiveStreams().size())
+ << "Enabling FlexFEC should create a single FlexfecReceiveStream.";
+ video_stream = video_streams.front();
+ video_config = &video_stream->GetConfig();
+ EXPECT_TRUE(video_config->rtp.protected_by_flexfec);
+ EXPECT_NE(video_config->rtp.packet_sink_, nullptr);
+}
+
+// Test changing the configuration after a video stream has been created with
+// flexfec enabled and then turn off flexfec. This will not result in the video
+// stream being recreated. The flexfec stream pointer that's held by the video
+// stream will be set/cleared as dictated by the configuration change.
+TEST_F(WebRtcVideoChannelFlexfecRecvTest,
+ DisablingFlexfecDoesNotRecreateVideoReceiveStream) {
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ recv_parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ EXPECT_EQ(2, fake_call_->GetNumCreatedReceiveStreams());
+ EXPECT_EQ(1U, fake_call_->GetFlexfecReceiveStreams().size());
+ const std::vector<FakeVideoReceiveStream*>& video_streams =
+ fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1U, video_streams.size());
+ const FakeVideoReceiveStream* video_stream = video_streams.front();
+ const webrtc::VideoReceiveStreamInterface::Config* video_config =
+ &video_stream->GetConfig();
+ EXPECT_TRUE(video_config->rtp.protected_by_flexfec);
+ EXPECT_NE(video_config->rtp.packet_sink_, nullptr);
+
+ // Disable FlexFEC.
+ recv_parameters.codecs.clear();
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+ // The count of created streams should remain 2 since the video stream will
+ // have been reconfigured to not reference flexfec and not recreated on
+ // account of the flexfec stream being deleted.
+ EXPECT_EQ(2, fake_call_->GetNumCreatedReceiveStreams())
+ << "Disabling FlexFEC should not recreate VideoReceiveStreamInterface.";
+ EXPECT_EQ(1U, fake_call_->GetVideoReceiveStreams().size())
+ << "Disabling FlexFEC should not destroy VideoReceiveStreamInterface.";
+ EXPECT_TRUE(fake_call_->GetFlexfecReceiveStreams().empty())
+ << "Disabling FlexFEC should destroy FlexfecReceiveStream.";
+ video_stream = video_streams.front();
+ video_config = &video_stream->GetConfig();
+ EXPECT_FALSE(video_config->rtp.protected_by_flexfec);
+ EXPECT_EQ(video_config->rtp.packet_sink_, nullptr);
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest, DuplicateFlexfecCodecIsDropped) {
+ constexpr int kUnusedPayloadType1 = 127;
+
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ recv_parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ cricket::VideoCodec duplicate = GetEngineCodec("flexfec-03");
+ duplicate.id = kUnusedPayloadType1;
+ recv_parameters.codecs.push_back(duplicate);
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+
+ const std::vector<FakeFlexfecReceiveStream*>& streams =
+ fake_call_->GetFlexfecReceiveStreams();
+ ASSERT_EQ(1U, streams.size());
+ const auto* stream = streams.front();
+ const webrtc::FlexfecReceiveStream::Config& config = stream->GetConfig();
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id, config.payload_type);
+}
+
+// TODO(brandtr): When FlexFEC is no longer behind a field trial, merge all
+// tests that use this test fixture into the corresponding "non-field trial"
+// tests.
+class WebRtcVideoChannelFlexfecSendRecvTest : public WebRtcVideoChannelTest {
+ public:
+ WebRtcVideoChannelFlexfecSendRecvTest()
+ : WebRtcVideoChannelTest(
+ "WebRTC-FlexFEC-03-Advertised/Enabled/WebRTC-FlexFEC-03/Enabled/") {
+ }
+};
+
+TEST_F(WebRtcVideoChannelFlexfecSendRecvTest, SetDefaultSendCodecsWithoutSsrc) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id, config.rtp.flexfec.payload_type);
+ EXPECT_EQ(0U, config.rtp.flexfec.ssrc);
+ EXPECT_TRUE(config.rtp.flexfec.protected_media_ssrcs.empty());
+}
+
+TEST_F(WebRtcVideoChannelFlexfecSendRecvTest, SetDefaultSendCodecsWithSsrc) {
+ FakeVideoSendStream* stream = AddSendStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id, config.rtp.flexfec.payload_type);
+ EXPECT_EQ(kFlexfecSsrc, config.rtp.flexfec.ssrc);
+ ASSERT_EQ(1U, config.rtp.flexfec.protected_media_ssrcs.size());
+ EXPECT_EQ(kSsrcs1[0], config.rtp.flexfec.protected_media_ssrcs[0]);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithoutFec) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(-1, config.rtp.ulpfec.ulpfec_payload_type);
+ EXPECT_EQ(-1, config.rtp.ulpfec.red_payload_type);
+}
+
+TEST_F(WebRtcVideoChannelFlexfecSendRecvTest, SetSendCodecsWithoutFec) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(-1, config.rtp.flexfec.payload_type);
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetRecvCodecsWithFec) {
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ recv_parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+
+ const std::vector<FakeFlexfecReceiveStream*>& flexfec_streams =
+ fake_call_->GetFlexfecReceiveStreams();
+ ASSERT_EQ(1U, flexfec_streams.size());
+ const FakeFlexfecReceiveStream* flexfec_stream = flexfec_streams.front();
+ const webrtc::FlexfecReceiveStream::Config& flexfec_stream_config =
+ flexfec_stream->GetConfig();
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id,
+ flexfec_stream_config.payload_type);
+ EXPECT_EQ(kFlexfecSsrc, flexfec_stream_config.rtp.remote_ssrc);
+ ASSERT_EQ(1U, flexfec_stream_config.protected_media_ssrcs.size());
+ EXPECT_EQ(kSsrcs1[0], flexfec_stream_config.protected_media_ssrcs[0]);
+ const std::vector<FakeVideoReceiveStream*>& video_streams =
+ fake_call_->GetVideoReceiveStreams();
+ const FakeVideoReceiveStream* video_stream = video_streams.front();
+ const webrtc::VideoReceiveStreamInterface::Config& video_stream_config =
+ video_stream->GetConfig();
+ EXPECT_EQ(video_stream_config.rtp.local_ssrc,
+ flexfec_stream_config.rtp.local_ssrc);
+ EXPECT_EQ(video_stream_config.rtp.rtcp_mode, flexfec_stream_config.rtcp_mode);
+ EXPECT_EQ(video_stream_config.rtcp_send_transport,
+ flexfec_stream_config.rtcp_send_transport);
+ EXPECT_EQ(video_stream_config.rtp.rtcp_mode, flexfec_stream_config.rtcp_mode);
+}
+
+// We should not send FlexFEC, even if we advertise it, unless the right
+// field trial is set.
+// TODO(brandtr): Remove when FlexFEC is enabled by default.
+TEST_F(WebRtcVideoChannelFlexfecRecvTest,
+ SetSendCodecsWithoutSsrcWithFecDoesNotEnableFec) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(-1, config.rtp.flexfec.payload_type);
+ EXPECT_EQ(0u, config.rtp.flexfec.ssrc);
+ EXPECT_TRUE(config.rtp.flexfec.protected_media_ssrcs.empty());
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest,
+ SetSendCodecsWithSsrcWithFecDoesNotEnableFec) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(-1, config.rtp.flexfec.payload_type);
+ EXPECT_EQ(0u, config.rtp.flexfec.ssrc);
+ EXPECT_TRUE(config.rtp.flexfec.protected_media_ssrcs.empty());
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetSendCodecRejectsRtxWithoutAssociatedPayloadType) {
+ const int kUnusedPayloadType = 127;
+ EXPECT_FALSE(FindCodecById(engine_.send_codecs(), kUnusedPayloadType));
+
+ cricket::VideoSenderParameters parameters;
+ cricket::VideoCodec rtx_codec =
+ cricket::CreateVideoCodec(kUnusedPayloadType, "rtx");
+ parameters.codecs.push_back(rtx_codec);
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters))
+ << "RTX codec without associated payload type should be rejected.";
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetSendCodecRejectsRtxWithoutMatchingVideoCodec) {
+ const int kUnusedPayloadType1 = 126;
+ const int kUnusedPayloadType2 = 127;
+ EXPECT_FALSE(FindCodecById(engine_.send_codecs(), kUnusedPayloadType1));
+ EXPECT_FALSE(FindCodecById(engine_.send_codecs(), kUnusedPayloadType2));
+ {
+ cricket::VideoCodec rtx_codec = cricket::CreateVideoRtxCodec(
+ kUnusedPayloadType1, GetEngineCodec("VP8").id);
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(rtx_codec);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ }
+ {
+ cricket::VideoCodec rtx_codec =
+ cricket::CreateVideoRtxCodec(kUnusedPayloadType1, kUnusedPayloadType2);
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(rtx_codec);
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters))
+ << "RTX without matching video codec should be rejected.";
+ }
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithChangedRtxPayloadType) {
+ const int kUnusedPayloadType1 = 126;
+ const int kUnusedPayloadType2 = 127;
+ EXPECT_FALSE(FindCodecById(engine_.send_codecs(), kUnusedPayloadType1));
+ EXPECT_FALSE(FindCodecById(engine_.send_codecs(), kUnusedPayloadType2));
+
+ // SSRCs for RTX.
+ cricket::StreamParams params =
+ cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ params.AddFidSsrc(kSsrcs1[0], kRtxSsrcs1[0]);
+ AddSendStream(params);
+
+ // Original payload type for RTX.
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ cricket::VideoCodec rtx_codec =
+ cricket::CreateVideoCodec(kUnusedPayloadType1, "rtx");
+ rtx_codec.SetParam("apt", GetEngineCodec("VP8").id);
+ parameters.codecs.push_back(rtx_codec);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ ASSERT_EQ(1U, fake_call_->GetVideoSendStreams().size());
+ const webrtc::VideoSendStream::Config& config_before =
+ fake_call_->GetVideoSendStreams()[0]->GetConfig();
+ EXPECT_EQ(kUnusedPayloadType1, config_before.rtp.rtx.payload_type);
+ ASSERT_EQ(1U, config_before.rtp.rtx.ssrcs.size());
+ EXPECT_EQ(kRtxSsrcs1[0], config_before.rtp.rtx.ssrcs[0]);
+
+ // Change payload type for RTX.
+ parameters.codecs[1].id = kUnusedPayloadType2;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ ASSERT_EQ(1U, fake_call_->GetVideoSendStreams().size());
+ const webrtc::VideoSendStream::Config& config_after =
+ fake_call_->GetVideoSendStreams()[0]->GetConfig();
+ EXPECT_EQ(kUnusedPayloadType2, config_after.rtp.rtx.payload_type);
+ ASSERT_EQ(1U, config_after.rtp.rtx.ssrcs.size());
+ EXPECT_EQ(kRtxSsrcs1[0], config_after.rtp.rtx.ssrcs[0]);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithoutFecDisablesFec) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("ulpfec"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(GetEngineCodec("ulpfec").id, config.rtp.ulpfec.ulpfec_payload_type);
+
+ parameters.codecs.pop_back();
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ stream = fake_call_->GetVideoSendStreams()[0];
+ ASSERT_TRUE(stream != nullptr);
+ config = stream->GetConfig().Copy();
+ EXPECT_EQ(-1, config.rtp.ulpfec.ulpfec_payload_type)
+ << "SetSendCodec without ULPFEC should disable current ULPFEC.";
+}
+
+TEST_F(WebRtcVideoChannelFlexfecSendRecvTest,
+ SetSendCodecsWithoutFecDisablesFec) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ webrtc::VideoSendStream::Config config = stream->GetConfig().Copy();
+
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id, config.rtp.flexfec.payload_type);
+ EXPECT_EQ(kFlexfecSsrc, config.rtp.flexfec.ssrc);
+ ASSERT_EQ(1U, config.rtp.flexfec.protected_media_ssrcs.size());
+ EXPECT_EQ(kSsrcs1[0], config.rtp.flexfec.protected_media_ssrcs[0]);
+
+ parameters.codecs.pop_back();
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ stream = fake_call_->GetVideoSendStreams()[0];
+ ASSERT_TRUE(stream != nullptr);
+ config = stream->GetConfig().Copy();
+ EXPECT_EQ(-1, config.rtp.flexfec.payload_type)
+ << "SetSendCodec without FlexFEC should disable current FlexFEC.";
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsChangesExistingStreams) {
+ cricket::VideoSenderParameters parameters;
+ cricket::VideoCodec codec = cricket::CreateVideoCodec(100, "VP8");
+ codec.SetParam(kCodecParamMaxQuantization, kDefaultVideoMaxQpVpx);
+ parameters.codecs.push_back(codec);
+
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ send_channel_->SetSend(true);
+
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+
+ std::vector<webrtc::VideoStream> streams = stream->GetVideoStreams();
+ EXPECT_EQ(kDefaultVideoMaxQpVpx, streams[0].max_qp);
+
+ parameters.codecs.clear();
+ codec.SetParam(kCodecParamMaxQuantization, kDefaultVideoMaxQpVpx + 1);
+ parameters.codecs.push_back(codec);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ streams = fake_call_->GetVideoSendStreams()[0]->GetVideoStreams();
+ EXPECT_EQ(kDefaultVideoMaxQpVpx + 1, streams[0].max_qp);
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithBitrates) {
+ SetSendCodecsShouldWorkForBitrates("100", 100000, "150", 150000, "200",
+ 200000);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithHighMaxBitrate) {
+ SetSendCodecsShouldWorkForBitrates("", 0, "", -1, "10000", 10000000);
+ std::vector<webrtc::VideoStream> streams = AddSendStream()->GetVideoStreams();
+ ASSERT_EQ(1u, streams.size());
+ EXPECT_EQ(10000000, streams[0].max_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetSendCodecsWithoutBitratesUsesCorrectDefaults) {
+ SetSendCodecsShouldWorkForBitrates("", 0, "", -1, "", -1);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsCapsMinAndStartBitrate) {
+ SetSendCodecsShouldWorkForBitrates("-1", 0, "-100", -1, "", -1);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsRejectsMaxLessThanMinBitrate) {
+ send_parameters_.codecs[0].params[kCodecParamMinBitrate] = "300";
+ send_parameters_.codecs[0].params[kCodecParamMaxBitrate] = "200";
+ EXPECT_FALSE(send_channel_->SetSenderParameters(send_parameters_));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetSenderParametersRemovesSelectedCodecFromRtpParameters) {
+ EXPECT_TRUE(AddSendStream());
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(cricket::CreateVideoCodec(100, "VP8"));
+ parameters.codecs.push_back(cricket::CreateVideoCodec(100, "VP9"));
+ send_channel_->SetSenderParameters(parameters);
+
+ webrtc::RtpParameters initial_params =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+
+ webrtc::RtpCodec vp9_rtp_codec;
+ vp9_rtp_codec.name = "VP9";
+ vp9_rtp_codec.kind = cricket::MEDIA_TYPE_VIDEO;
+ vp9_rtp_codec.clock_rate = 90000;
+ initial_params.encodings[0].codec = vp9_rtp_codec;
+
+ // We should be able to set the params with the VP9 codec that has been
+ // negotiated.
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, initial_params).ok());
+
+ parameters.codecs.clear();
+ parameters.codecs.push_back(cricket::CreateVideoCodec(100, "VP8"));
+ send_channel_->SetSenderParameters(parameters);
+
+ // Since VP9 is no longer negotiated, the RTP parameters should not have a
+ // forced codec anymore.
+ webrtc::RtpParameters new_params =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(new_params.encodings[0].codec, absl::nullopt);
+}
+
+// Test that when both the codec-specific bitrate params and max_bandwidth_bps
+// are present in the same send parameters, the settings are combined correctly.
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithBitratesAndMaxSendBandwidth) {
+ send_parameters_.codecs[0].params[kCodecParamMinBitrate] = "100";
+ send_parameters_.codecs[0].params[kCodecParamStartBitrate] = "200";
+ send_parameters_.codecs[0].params[kCodecParamMaxBitrate] = "300";
+ send_parameters_.max_bandwidth_bps = 400000;
+ // We expect max_bandwidth_bps to take priority, if set.
+ ExpectSetBitrateParameters(100000, 200000, 400000);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ // Since the codec isn't changing, start_bitrate_bps should be -1.
+ ExpectSetBitrateParameters(100000, -1, 350000);
+
+ // Decrease max_bandwidth_bps.
+ send_parameters_.max_bandwidth_bps = 350000;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ // Now try again with the values flipped around.
+ send_parameters_.codecs[0].params[kCodecParamMaxBitrate] = "400";
+ send_parameters_.max_bandwidth_bps = 300000;
+ ExpectSetBitrateParameters(100000, 200000, 300000);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ // If we change the codec max, max_bandwidth_bps should still apply.
+ send_parameters_.codecs[0].params[kCodecParamMaxBitrate] = "350";
+ ExpectSetBitrateParameters(100000, 200000, 300000);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetMaxSendBandwidthShouldPreserveOtherBitrates) {
+ SetSendCodecsShouldWorkForBitrates("100", 100000, "150", 150000, "200",
+ 200000);
+ send_parameters_.max_bandwidth_bps = 300000;
+ // Setting max bitrate should keep previous min bitrate.
+ // Setting max bitrate should not reset start bitrate.
+ ExpectSetBitrateParameters(100000, -1, 300000);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetMaxSendBandwidthShouldBeRemovable) {
+ send_parameters_.max_bandwidth_bps = 300000;
+ ExpectSetMaxBitrate(300000);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ // -1 means to disable max bitrate (set infinite).
+ send_parameters_.max_bandwidth_bps = -1;
+ ExpectSetMaxBitrate(-1);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetMaxSendBandwidthAndAddSendStream) {
+ send_parameters_.max_bandwidth_bps = 99999;
+ FakeVideoSendStream* stream = AddSendStream();
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ ASSERT_EQ(1u, stream->GetVideoStreams().size());
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+
+ send_parameters_.max_bandwidth_bps = 77777;
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+// Tests that when the codec specific max bitrate and VideoSenderParameters
+// max_bandwidth_bps are used, that it sets the VideoStream's max bitrate
+// appropriately.
+TEST_F(WebRtcVideoChannelTest,
+ MaxBitratePrioritizesVideoSendParametersOverCodecMaxBitrate) {
+ send_parameters_.codecs[0].params[kCodecParamMinBitrate] = "100";
+ send_parameters_.codecs[0].params[kCodecParamStartBitrate] = "200";
+ send_parameters_.codecs[0].params[kCodecParamMaxBitrate] = "300";
+ send_parameters_.max_bandwidth_bps = -1;
+ AddSendStream();
+ ExpectSetMaxBitrate(300000);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ std::vector<FakeVideoSendStream*> video_send_streams = GetFakeSendStreams();
+ ASSERT_EQ(1u, video_send_streams.size());
+ FakeVideoSendStream* video_send_stream = video_send_streams[0];
+ ASSERT_EQ(1u, video_send_streams[0]->GetVideoStreams().size());
+ // First the max bitrate is set based upon the codec param.
+ EXPECT_EQ(300000,
+ video_send_streams[0]->GetVideoStreams()[0].max_bitrate_bps);
+
+ // The VideoSenderParameters max bitrate overrides the codec's.
+ send_parameters_.max_bandwidth_bps = 500000;
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ ASSERT_EQ(1u, video_send_stream->GetVideoStreams().size());
+ EXPECT_EQ(500000, video_send_stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+// Tests that when the codec specific max bitrate and RtpParameters
+// max_bitrate_bps are used, that it sets the VideoStream's max bitrate
+// appropriately.
+TEST_F(WebRtcVideoChannelTest,
+ MaxBitratePrioritizesRtpParametersOverCodecMaxBitrate) {
+ send_parameters_.codecs[0].params[kCodecParamMinBitrate] = "100";
+ send_parameters_.codecs[0].params[kCodecParamStartBitrate] = "200";
+ send_parameters_.codecs[0].params[kCodecParamMaxBitrate] = "300";
+ send_parameters_.max_bandwidth_bps = -1;
+ AddSendStream();
+ ExpectSetMaxBitrate(300000);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ std::vector<FakeVideoSendStream*> video_send_streams = GetFakeSendStreams();
+ ASSERT_EQ(1u, video_send_streams.size());
+ FakeVideoSendStream* video_send_stream = video_send_streams[0];
+ ASSERT_EQ(1u, video_send_stream->GetVideoStreams().size());
+ // First the max bitrate is set based upon the codec param.
+ EXPECT_EQ(300000, video_send_stream->GetVideoStreams()[0].max_bitrate_bps);
+
+ // The RtpParameter max bitrate overrides the codec's.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(1u, parameters.encodings.size());
+ parameters.encodings[0].max_bitrate_bps = 500000;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ ASSERT_EQ(1u, video_send_stream->GetVideoStreams().size());
+ EXPECT_EQ(parameters.encodings[0].max_bitrate_bps,
+ video_send_stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ MaxBitrateIsMinimumOfMaxSendBandwidthAndMaxEncodingBitrate) {
+ send_parameters_.max_bandwidth_bps = 99999;
+ FakeVideoSendStream* stream = AddSendStream();
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ ASSERT_EQ(1u, stream->GetVideoStreams().size());
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1u, parameters.encodings.size());
+
+ parameters.encodings[0].max_bitrate_bps = 99999 - 1;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_EQ(parameters.encodings[0].max_bitrate_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+
+ parameters.encodings[0].max_bitrate_bps = 99999 + 1;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetMaxSendBitrateCanIncreaseSenderBitrate) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ send_channel_->SetSend(true);
+
+ FakeVideoSendStream* stream = AddSendStream();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+
+ std::vector<webrtc::VideoStream> streams = stream->GetVideoStreams();
+ int initial_max_bitrate_bps = streams[0].max_bitrate_bps;
+ EXPECT_GT(initial_max_bitrate_bps, 0);
+
+ parameters.max_bandwidth_bps = initial_max_bitrate_bps * 2;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ // Insert a frame to update the encoder config.
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+ streams = stream->GetVideoStreams();
+ EXPECT_EQ(initial_max_bitrate_bps * 2, streams[0].max_bitrate_bps);
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetMaxSendBitrateCanIncreaseSimulcastSenderBitrate) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ send_channel_->SetSend(true);
+
+ FakeVideoSendStream* stream = AddSendStream(
+ cricket::CreateSimStreamParams("cname", MAKE_VECTOR(kSsrcs3)));
+
+ // Send a frame to make sure this scales up to >1 stream (simulcast).
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(kSsrcs3[0], nullptr, &frame_forwarder));
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ std::vector<webrtc::VideoStream> streams = stream->GetVideoStreams();
+ ASSERT_GT(streams.size(), 1u)
+ << "Without simulcast this test doesn't make sense.";
+ int initial_max_bitrate_bps = GetTotalMaxBitrate(streams).bps();
+ EXPECT_GT(initial_max_bitrate_bps, 0);
+
+ parameters.max_bandwidth_bps = initial_max_bitrate_bps * 2;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ // Insert a frame to update the encoder config.
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+ streams = stream->GetVideoStreams();
+ int increased_max_bitrate_bps = GetTotalMaxBitrate(streams).bps();
+ EXPECT_EQ(initial_max_bitrate_bps * 2, increased_max_bitrate_bps);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(kSsrcs3[0], nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsWithMaxQuantization) {
+ static const char* kMaxQuantization = "21";
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs[0].params[kCodecParamMaxQuantization] = kMaxQuantization;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_EQ(atoi(kMaxQuantization),
+ AddSendStream()->GetVideoStreams().back().max_qp);
+
+ absl::optional<VideoCodec> codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ(kMaxQuantization, codec->params[kCodecParamMaxQuantization]);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsRejectBadPayloadTypes) {
+ // TODO(pbos): Should we only allow the dynamic range?
+ static const int kIncorrectPayloads[] = {-2, -1, 128, 129};
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ for (size_t i = 0; i < arraysize(kIncorrectPayloads); ++i) {
+ parameters.codecs[0].id = kIncorrectPayloads[i];
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters))
+ << "Bad payload type '" << kIncorrectPayloads[i] << "' accepted.";
+ }
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendCodecsAcceptAllValidPayloadTypes) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ for (int payload_type = 96; payload_type <= 127; ++payload_type) {
+ parameters.codecs[0].id = payload_type;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters))
+ << "Payload type '" << payload_type << "' rejected.";
+ }
+}
+
+// Test that setting the a different set of codecs but with an identical front
+// codec doesn't result in the stream being recreated.
+// This may happen when a subsequent negotiation includes fewer codecs, as a
+// result of one of the codecs being rejected.
+TEST_F(WebRtcVideoChannelTest,
+ SetSendCodecsIdenticalFirstCodecDoesntRecreateStream) {
+ cricket::VideoSenderParameters parameters1;
+ parameters1.codecs.push_back(GetEngineCodec("VP8"));
+ parameters1.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters1));
+
+ AddSendStream();
+ EXPECT_EQ(1, fake_call_->GetNumCreatedSendStreams());
+
+ cricket::VideoSenderParameters parameters2;
+ parameters2.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters2));
+ EXPECT_EQ(1, fake_call_->GetNumCreatedSendStreams());
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsWithOnlyVp8) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+// Test that we set our inbound RTX codecs properly.
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsWithRtx) {
+ const int kUnusedPayloadType1 = 126;
+ const int kUnusedPayloadType2 = 127;
+ EXPECT_FALSE(FindCodecById(engine_.recv_codecs(), kUnusedPayloadType1));
+ EXPECT_FALSE(FindCodecById(engine_.recv_codecs(), kUnusedPayloadType2));
+
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ cricket::VideoCodec rtx_codec =
+ cricket::CreateVideoCodec(kUnusedPayloadType1, "rtx");
+ parameters.codecs.push_back(rtx_codec);
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters))
+ << "RTX codec without associated payload should be rejected.";
+
+ parameters.codecs[1].SetParam("apt", kUnusedPayloadType2);
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters))
+ << "RTX codec with invalid associated payload type should be rejected.";
+
+ parameters.codecs[1].SetParam("apt", GetEngineCodec("VP8").id);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ cricket::VideoCodec rtx_codec2 =
+ cricket::CreateVideoCodec(kUnusedPayloadType2, "rtx");
+ rtx_codec2.SetParam("apt", rtx_codec.id);
+ parameters.codecs.push_back(rtx_codec2);
+
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters))
+ << "RTX codec with another RTX as associated payload type should be "
+ "rejected.";
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsWithPacketization) {
+ cricket::VideoCodec vp8_codec = GetEngineCodec("VP8");
+ vp8_codec.packetization = kPacketizationParamRaw;
+
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = {vp8_codec, GetEngineCodec("VP9")};
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ const cricket::StreamParams params =
+ cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ AddRecvStream(params);
+ ASSERT_THAT(fake_call_->GetVideoReceiveStreams(), testing::SizeIs(1));
+
+ const webrtc::VideoReceiveStreamInterface::Config& config =
+ fake_call_->GetVideoReceiveStreams()[0]->GetConfig();
+ ASSERT_THAT(config.rtp.raw_payload_types, testing::SizeIs(1));
+ EXPECT_EQ(config.rtp.raw_payload_types.count(vp8_codec.id), 1U);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsWithPacketizationRecreatesStream) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = {GetEngineCodec("VP8"), GetEngineCodec("VP9")};
+ parameters.codecs.back().packetization = kPacketizationParamRaw;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ const cricket::StreamParams params =
+ cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ AddRecvStream(params);
+ ASSERT_THAT(fake_call_->GetVideoReceiveStreams(), testing::SizeIs(1));
+ EXPECT_EQ(fake_call_->GetNumCreatedReceiveStreams(), 1);
+
+ parameters.codecs.back().packetization.reset();
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_EQ(fake_call_->GetNumCreatedReceiveStreams(), 2);
+}
+
+TEST_F(WebRtcVideoChannelTest, DuplicateUlpfecCodecIsDropped) {
+ constexpr int kFirstUlpfecPayloadType = 126;
+ constexpr int kSecondUlpfecPayloadType = 127;
+
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(cricket::CreateVideoCodec(
+ kFirstUlpfecPayloadType, cricket::kUlpfecCodecName));
+ parameters.codecs.push_back(cricket::CreateVideoCodec(
+ kSecondUlpfecPayloadType, cricket::kUlpfecCodecName));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ FakeVideoReceiveStream* recv_stream = AddRecvStream();
+ EXPECT_EQ(kFirstUlpfecPayloadType,
+ recv_stream->GetConfig().rtp.ulpfec_payload_type);
+}
+
+TEST_F(WebRtcVideoChannelTest, DuplicateRedCodecIsDropped) {
+ constexpr int kFirstRedPayloadType = 126;
+ constexpr int kSecondRedPayloadType = 127;
+
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(
+ cricket::CreateVideoCodec(kFirstRedPayloadType, cricket::kRedCodecName));
+ parameters.codecs.push_back(
+ cricket::CreateVideoCodec(kSecondRedPayloadType, cricket::kRedCodecName));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ FakeVideoReceiveStream* recv_stream = AddRecvStream();
+ EXPECT_EQ(kFirstRedPayloadType,
+ recv_stream->GetConfig().rtp.red_payload_type);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsWithChangedRtxPayloadType) {
+ const int kUnusedPayloadType1 = 126;
+ const int kUnusedPayloadType2 = 127;
+ EXPECT_FALSE(FindCodecById(engine_.recv_codecs(), kUnusedPayloadType1));
+ EXPECT_FALSE(FindCodecById(engine_.recv_codecs(), kUnusedPayloadType2));
+
+ // SSRCs for RTX.
+ cricket::StreamParams params =
+ cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ params.AddFidSsrc(kSsrcs1[0], kRtxSsrcs1[0]);
+ AddRecvStream(params);
+
+ // Original payload type for RTX.
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ cricket::VideoCodec rtx_codec =
+ cricket::CreateVideoCodec(kUnusedPayloadType1, "rtx");
+ rtx_codec.SetParam("apt", GetEngineCodec("VP8").id);
+ parameters.codecs.push_back(rtx_codec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ ASSERT_EQ(1U, fake_call_->GetVideoReceiveStreams().size());
+ const webrtc::VideoReceiveStreamInterface::Config& config_before =
+ fake_call_->GetVideoReceiveStreams()[0]->GetConfig();
+ EXPECT_EQ(1U, config_before.rtp.rtx_associated_payload_types.size());
+ const int* payload_type_before = FindKeyByValue(
+ config_before.rtp.rtx_associated_payload_types, GetEngineCodec("VP8").id);
+ ASSERT_NE(payload_type_before, nullptr);
+ EXPECT_EQ(kUnusedPayloadType1, *payload_type_before);
+ EXPECT_EQ(kRtxSsrcs1[0], config_before.rtp.rtx_ssrc);
+
+ // Change payload type for RTX.
+ parameters.codecs[1].id = kUnusedPayloadType2;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ ASSERT_EQ(1U, fake_call_->GetVideoReceiveStreams().size());
+ const webrtc::VideoReceiveStreamInterface::Config& config_after =
+ fake_call_->GetVideoReceiveStreams()[0]->GetConfig();
+ EXPECT_EQ(1U, config_after.rtp.rtx_associated_payload_types.size());
+ const int* payload_type_after = FindKeyByValue(
+ config_after.rtp.rtx_associated_payload_types, GetEngineCodec("VP8").id);
+ ASSERT_NE(payload_type_after, nullptr);
+ EXPECT_EQ(kUnusedPayloadType2, *payload_type_after);
+ EXPECT_EQ(kRtxSsrcs1[0], config_after.rtp.rtx_ssrc);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsRtxWithRtxTime) {
+ const int kUnusedPayloadType1 = 126;
+ const int kUnusedPayloadType2 = 127;
+ EXPECT_FALSE(FindCodecById(engine_.recv_codecs(), kUnusedPayloadType1));
+ EXPECT_FALSE(FindCodecById(engine_.recv_codecs(), kUnusedPayloadType2));
+
+ // SSRCs for RTX.
+ cricket::StreamParams params =
+ cricket::StreamParams::CreateLegacy(kSsrcs1[0]);
+ params.AddFidSsrc(kSsrcs1[0], kRtxSsrcs1[0]);
+ AddRecvStream(params);
+
+ // Payload type for RTX.
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ cricket::VideoCodec rtx_codec =
+ cricket::CreateVideoCodec(kUnusedPayloadType1, "rtx");
+ rtx_codec.SetParam("apt", GetEngineCodec("VP8").id);
+ parameters.codecs.push_back(rtx_codec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ ASSERT_EQ(1U, fake_call_->GetVideoReceiveStreams().size());
+ const webrtc::VideoReceiveStreamInterface::Config& config =
+ fake_call_->GetVideoReceiveStreams()[0]->GetConfig();
+
+ const int kRtxTime = 343;
+ // Assert that the default value is different from the ones we test
+ // and store the default value.
+ EXPECT_NE(config.rtp.nack.rtp_history_ms, kRtxTime);
+ int default_history_ms = config.rtp.nack.rtp_history_ms;
+
+ // Set rtx-time.
+ parameters.codecs[1].SetParam(kCodecParamRtxTime, kRtxTime);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams()[0]
+ ->GetConfig()
+ .rtp.nack.rtp_history_ms,
+ kRtxTime);
+
+ // Negative values are ignored so the default value applies.
+ parameters.codecs[1].SetParam(kCodecParamRtxTime, -1);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_NE(fake_call_->GetVideoReceiveStreams()[0]
+ ->GetConfig()
+ .rtp.nack.rtp_history_ms,
+ -1);
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams()[0]
+ ->GetConfig()
+ .rtp.nack.rtp_history_ms,
+ default_history_ms);
+
+ // 0 is ignored so the default applies.
+ parameters.codecs[1].SetParam(kCodecParamRtxTime, 0);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_NE(fake_call_->GetVideoReceiveStreams()[0]
+ ->GetConfig()
+ .rtp.nack.rtp_history_ms,
+ 0);
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams()[0]
+ ->GetConfig()
+ .rtp.nack.rtp_history_ms,
+ default_history_ms);
+
+ // Values larger than the default are clamped to the default.
+ parameters.codecs[1].SetParam(kCodecParamRtxTime, default_history_ms + 100);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams()[0]
+ ->GetConfig()
+ .rtp.nack.rtp_history_ms,
+ default_history_ms);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsDifferentPayloadType) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs[0].id = 99;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsAcceptDefaultCodecs) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs = engine_.recv_codecs();
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ const webrtc::VideoReceiveStreamInterface::Config& config =
+ stream->GetConfig();
+ EXPECT_EQ(engine_.recv_codecs()[0].name,
+ config.decoders[0].video_format.name);
+ EXPECT_EQ(engine_.recv_codecs()[0].id, config.decoders[0].payload_type);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsRejectUnsupportedCodec) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(cricket::CreateVideoCodec(101, "WTF3"));
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsAcceptsMultipleVideoCodecs) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsWithoutFecDisablesFec) {
+ cricket::VideoSenderParameters send_parameters;
+ send_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ send_parameters.codecs.push_back(GetEngineCodec("red"));
+ send_parameters.codecs.push_back(GetEngineCodec("ulpfec"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters));
+
+ FakeVideoReceiveStream* stream = AddRecvStream();
+
+ EXPECT_EQ(GetEngineCodec("ulpfec").id,
+ stream->GetConfig().rtp.ulpfec_payload_type);
+
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+ stream = fake_call_->GetVideoReceiveStreams()[0];
+ ASSERT_TRUE(stream != nullptr);
+ EXPECT_EQ(-1, stream->GetConfig().rtp.ulpfec_payload_type)
+ << "SetSendCodec without ULPFEC should disable current ULPFEC.";
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetRecvParamsWithoutFecDisablesFec) {
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ const std::vector<FakeFlexfecReceiveStream*>& streams =
+ fake_call_->GetFlexfecReceiveStreams();
+
+ ASSERT_EQ(1U, streams.size());
+ const FakeFlexfecReceiveStream* stream = streams.front();
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id, stream->GetConfig().payload_type);
+ EXPECT_EQ(kFlexfecSsrc, stream->remote_ssrc());
+ ASSERT_EQ(1U, stream->GetConfig().protected_media_ssrcs.size());
+ EXPECT_EQ(kSsrcs1[0], stream->GetConfig().protected_media_ssrcs[0]);
+
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+ EXPECT_TRUE(streams.empty())
+ << "SetSendCodec without FlexFEC should disable current FlexFEC.";
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSendParamsWithFecEnablesFec) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ EXPECT_EQ(GetEngineCodec("ulpfec").id,
+ stream->GetConfig().rtp.ulpfec_payload_type);
+
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ recv_parameters.codecs.push_back(GetEngineCodec("red"));
+ recv_parameters.codecs.push_back(GetEngineCodec("ulpfec"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+ stream = fake_call_->GetVideoReceiveStreams()[0];
+ ASSERT_TRUE(stream != nullptr);
+ EXPECT_EQ(GetEngineCodec("ulpfec").id,
+ stream->GetConfig().rtp.ulpfec_payload_type)
+ << "ULPFEC should be enabled on the receive stream.";
+
+ cricket::VideoSenderParameters send_parameters;
+ send_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ send_parameters.codecs.push_back(GetEngineCodec("red"));
+ send_parameters.codecs.push_back(GetEngineCodec("ulpfec"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters));
+ stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(GetEngineCodec("ulpfec").id,
+ stream->GetConfig().rtp.ulpfec_payload_type)
+ << "ULPFEC should be enabled on the receive stream.";
+}
+
+TEST_F(WebRtcVideoChannelFlexfecSendRecvTest,
+ SetSendRecvParamsWithFecEnablesFec) {
+ AddRecvStream(
+ CreatePrimaryWithFecFrStreamParams("cname", kSsrcs1[0], kFlexfecSsrc));
+ const std::vector<FakeFlexfecReceiveStream*>& streams =
+ fake_call_->GetFlexfecReceiveStreams();
+
+ cricket::VideoReceiverParameters recv_parameters;
+ recv_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ recv_parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters));
+ ASSERT_EQ(1U, streams.size());
+ const FakeFlexfecReceiveStream* stream_with_recv_params = streams.front();
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id,
+ stream_with_recv_params->GetConfig().payload_type);
+ EXPECT_EQ(kFlexfecSsrc, stream_with_recv_params->GetConfig().rtp.remote_ssrc);
+ EXPECT_EQ(1U,
+ stream_with_recv_params->GetConfig().protected_media_ssrcs.size());
+ EXPECT_EQ(kSsrcs1[0],
+ stream_with_recv_params->GetConfig().protected_media_ssrcs[0]);
+
+ cricket::VideoSenderParameters send_parameters;
+ send_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ send_parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters));
+ ASSERT_EQ(1U, streams.size());
+ const FakeFlexfecReceiveStream* stream_with_send_params = streams.front();
+ EXPECT_EQ(GetEngineCodec("flexfec-03").id,
+ stream_with_send_params->GetConfig().payload_type);
+ EXPECT_EQ(kFlexfecSsrc, stream_with_send_params->GetConfig().rtp.remote_ssrc);
+ EXPECT_EQ(1U,
+ stream_with_send_params->GetConfig().protected_media_ssrcs.size());
+ EXPECT_EQ(kSsrcs1[0],
+ stream_with_send_params->GetConfig().protected_media_ssrcs[0]);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsRejectDuplicateFecPayloads) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("red"));
+ parameters.codecs[1].id = parameters.codecs[0].id;
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest,
+ SetRecvCodecsRejectDuplicateFecPayloads) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("flexfec-03"));
+ parameters.codecs[1].id = parameters.codecs[0].id;
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRecvCodecsRejectDuplicateCodecPayloads) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ parameters.codecs[1].id = parameters.codecs[0].id;
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetRecvCodecsAcceptSameCodecOnMultiplePayloadTypes) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs[1].id += 1;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+// Test that setting the same codecs but with a different order
+// doesn't result in the stream being recreated.
+TEST_F(WebRtcVideoChannelTest,
+ SetRecvCodecsDifferentOrderDoesntRecreateStream) {
+ cricket::VideoReceiverParameters parameters1;
+ parameters1.codecs.push_back(GetEngineCodec("VP8"));
+ parameters1.codecs.push_back(GetEngineCodec("red"));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters1));
+
+ AddRecvStream(cricket::StreamParams::CreateLegacy(123));
+ EXPECT_EQ(1, fake_call_->GetNumCreatedReceiveStreams());
+
+ cricket::VideoReceiverParameters parameters2;
+ parameters2.codecs.push_back(GetEngineCodec("red"));
+ parameters2.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters2));
+ EXPECT_EQ(1, fake_call_->GetNumCreatedReceiveStreams());
+}
+
+TEST_F(WebRtcVideoChannelTest, SendStreamNotSendingByDefault) {
+ EXPECT_FALSE(AddSendStream()->IsSending());
+}
+
+TEST_F(WebRtcVideoChannelTest, ReceiveStreamReceivingByDefault) {
+ EXPECT_TRUE(AddRecvStream()->IsReceiving());
+}
+
+TEST_F(WebRtcVideoChannelTest, SetSend) {
+ FakeVideoSendStream* stream = AddSendStream();
+ EXPECT_FALSE(stream->IsSending());
+
+ // false->true
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ EXPECT_TRUE(stream->IsSending());
+ // true->true
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ EXPECT_TRUE(stream->IsSending());
+ // true->false
+ EXPECT_TRUE(send_channel_->SetSend(false));
+ EXPECT_FALSE(stream->IsSending());
+ // false->false
+ EXPECT_TRUE(send_channel_->SetSend(false));
+ EXPECT_FALSE(stream->IsSending());
+
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ FakeVideoSendStream* new_stream = AddSendStream();
+ EXPECT_TRUE(new_stream->IsSending())
+ << "Send stream created after SetSend(true) not sending initially.";
+}
+
+// This test verifies DSCP settings are properly applied on video media channel.
+TEST_F(WebRtcVideoChannelTest, TestSetDscpOptions) {
+ std::unique_ptr<cricket::FakeNetworkInterface> network_interface(
+ new cricket::FakeNetworkInterface);
+ MediaConfig config;
+ std::unique_ptr<cricket::VideoMediaSendChannelInterface> send_channel;
+ webrtc::RtpParameters parameters;
+
+ send_channel = engine_.CreateSendChannel(
+ call_.get(), config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+
+ send_channel->SetInterface(network_interface.get());
+ // Default value when DSCP is disabled should be DSCP_DEFAULT.
+ EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface->dscp());
+ send_channel->SetInterface(nullptr);
+
+ // Default value when DSCP is enabled is also DSCP_DEFAULT, until it is set
+ // through rtp parameters.
+ config.enable_dscp = true;
+ send_channel = engine_.CreateSendChannel(
+ call_.get(), config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ send_channel->SetInterface(network_interface.get());
+ EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface->dscp());
+
+ // Create a send stream to configure
+ EXPECT_TRUE(send_channel->AddSendStream(StreamParams::CreateLegacy(kSsrc)));
+ parameters = send_channel->GetRtpSendParameters(kSsrc);
+ ASSERT_FALSE(parameters.encodings.empty());
+
+ // Various priorities map to various dscp values.
+ parameters.encodings[0].network_priority = webrtc::Priority::kHigh;
+ ASSERT_TRUE(
+ send_channel->SetRtpSendParameters(kSsrc, parameters, nullptr).ok());
+ EXPECT_EQ(rtc::DSCP_AF41, network_interface->dscp());
+ parameters.encodings[0].network_priority = webrtc::Priority::kVeryLow;
+ ASSERT_TRUE(
+ send_channel->SetRtpSendParameters(kSsrc, parameters, nullptr).ok());
+ EXPECT_EQ(rtc::DSCP_CS1, network_interface->dscp());
+
+ // Packets should also self-identify their dscp in PacketOptions.
+ const uint8_t kData[10] = {0};
+ EXPECT_TRUE(ChannelImplAsTransport(send_channel.get())->SendRtcp(kData));
+ EXPECT_EQ(rtc::DSCP_CS1, network_interface->options().dscp);
+ send_channel->SetInterface(nullptr);
+
+ // Verify that setting the option to false resets the
+ // DiffServCodePoint.
+ config.enable_dscp = false;
+ send_channel = engine_.CreateSendChannel(
+ call_.get(), config, VideoOptions(), webrtc::CryptoOptions(),
+ video_bitrate_allocator_factory_.get());
+ send_channel->SetInterface(network_interface.get());
+ EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface->dscp());
+ send_channel->SetInterface(nullptr);
+}
+
+// This test verifies that the RTCP reduced size mode is properly applied to
+// send video streams.
+TEST_F(WebRtcVideoChannelTest, TestSetSendRtcpReducedSize) {
+ // Create stream, expecting that default mode is "compound".
+ FakeVideoSendStream* stream1 = AddSendStream();
+ EXPECT_EQ(webrtc::RtcpMode::kCompound, stream1->GetConfig().rtp.rtcp_mode);
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_FALSE(rtp_parameters.rtcp.reduced_size);
+
+ // Now enable reduced size mode.
+ send_parameters_.rtcp.reduced_size = true;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ stream1 = fake_call_->GetVideoSendStreams()[0];
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream1->GetConfig().rtp.rtcp_mode);
+ rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_TRUE(rtp_parameters.rtcp.reduced_size);
+
+ // Create a new stream and ensure it picks up the reduced size mode.
+ FakeVideoSendStream* stream2 = AddSendStream();
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream2->GetConfig().rtp.rtcp_mode);
+}
+
+// This test verifies that the RTCP reduced size mode is properly applied to
+// receive video streams.
+TEST_F(WebRtcVideoChannelTest, TestSetRecvRtcpReducedSize) {
+ // Create stream, expecting that default mode is "compound".
+ FakeVideoReceiveStream* stream1 = AddRecvStream();
+ EXPECT_EQ(webrtc::RtcpMode::kCompound, stream1->GetConfig().rtp.rtcp_mode);
+
+ // Now enable reduced size mode.
+ // TODO(deadbeef): Once "recv_parameters" becomes "receiver_parameters",
+ // the reduced_size flag should come from that.
+ send_parameters_.rtcp.reduced_size = true;
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ stream1 = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream1->GetConfig().rtp.rtcp_mode);
+
+ // Create a new stream and ensure it picks up the reduced size mode.
+ FakeVideoReceiveStream* stream2 = AddRecvStream();
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream2->GetConfig().rtp.rtcp_mode);
+}
+
+TEST_F(WebRtcVideoChannelTest, OnReadyToSendSignalsNetworkState) {
+ EXPECT_EQ(webrtc::kNetworkUp,
+ fake_call_->GetNetworkState(webrtc::MediaType::VIDEO));
+ EXPECT_EQ(webrtc::kNetworkUp,
+ fake_call_->GetNetworkState(webrtc::MediaType::AUDIO));
+
+ send_channel_->OnReadyToSend(false);
+ EXPECT_EQ(webrtc::kNetworkDown,
+ fake_call_->GetNetworkState(webrtc::MediaType::VIDEO));
+ EXPECT_EQ(webrtc::kNetworkUp,
+ fake_call_->GetNetworkState(webrtc::MediaType::AUDIO));
+
+ send_channel_->OnReadyToSend(true);
+ EXPECT_EQ(webrtc::kNetworkUp,
+ fake_call_->GetNetworkState(webrtc::MediaType::VIDEO));
+ EXPECT_EQ(webrtc::kNetworkUp,
+ fake_call_->GetNetworkState(webrtc::MediaType::AUDIO));
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsSentCodecName) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ AddSendStream();
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ("VP8", send_info.senders[0].codec_name);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsEncoderImplementationName) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.encoder_implementation_name = "encoder_implementation_name";
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(stats.encoder_implementation_name,
+ send_info.senders[0].encoder_implementation_name);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsPowerEfficientEncoder) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.power_efficient_encoder = true;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_TRUE(send_info.senders[0].power_efficient_encoder);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsCpuOveruseMetrics) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.avg_encode_time_ms = 13;
+ stats.encode_usage_percent = 42;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(stats.avg_encode_time_ms, send_info.senders[0].avg_encode_ms);
+ EXPECT_EQ(stats.encode_usage_percent,
+ send_info.senders[0].encode_usage_percent);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsFramesEncoded) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.frames_encoded = 13;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(stats.frames_encoded, send_info.senders[0].frames_encoded);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsKeyFramesEncoded) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.substreams[123].frame_counts.key_frames = 10;
+ stats.substreams[456].frame_counts.key_frames = 87;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(send_info.senders.size(), 2u);
+ EXPECT_EQ(10u, send_info.senders[0].key_frames_encoded);
+ EXPECT_EQ(87u, send_info.senders[1].key_frames_encoded);
+ EXPECT_EQ(97u, send_info.aggregated_senders[0].key_frames_encoded);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsPerLayerQpSum) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.substreams[123].qp_sum = 15;
+ stats.substreams[456].qp_sum = 11;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(send_info.senders.size(), 2u);
+ EXPECT_EQ(stats.substreams[123].qp_sum, send_info.senders[0].qp_sum);
+ EXPECT_EQ(stats.substreams[456].qp_sum, send_info.senders[1].qp_sum);
+ EXPECT_EQ(*send_info.aggregated_senders[0].qp_sum, 26u);
+}
+
+webrtc::VideoSendStream::Stats GetInitialisedStats() {
+ webrtc::VideoSendStream::Stats stats;
+ stats.encoder_implementation_name = "vp";
+ stats.input_frame_rate = 1.0;
+ stats.encode_frame_rate = 2;
+ stats.avg_encode_time_ms = 3;
+ stats.encode_usage_percent = 4;
+ stats.frames_encoded = 5;
+ stats.total_encode_time_ms = 6;
+ stats.frames_dropped_by_capturer = 7;
+ stats.frames_dropped_by_encoder_queue = 8;
+ stats.frames_dropped_by_rate_limiter = 9;
+ stats.frames_dropped_by_congestion_window = 10;
+ stats.frames_dropped_by_encoder = 11;
+ stats.target_media_bitrate_bps = 13;
+ stats.media_bitrate_bps = 14;
+ stats.suspended = true;
+ stats.bw_limited_resolution = true;
+ stats.cpu_limited_resolution = true;
+ // Not wired.
+ stats.bw_limited_framerate = true;
+ // Not wired.
+ stats.cpu_limited_framerate = true;
+ stats.quality_limitation_reason = webrtc::QualityLimitationReason::kCpu;
+ stats.quality_limitation_durations_ms[webrtc::QualityLimitationReason::kCpu] =
+ 15;
+ stats.quality_limitation_resolution_changes = 16;
+ stats.number_of_cpu_adapt_changes = 17;
+ stats.number_of_quality_adapt_changes = 18;
+ stats.has_entered_low_resolution = true;
+ stats.content_type = webrtc::VideoContentType::SCREENSHARE;
+ stats.frames_sent = 19;
+ stats.huge_frames_sent = 20;
+
+ return stats;
+}
+
+TEST_F(WebRtcVideoChannelTest, GetAggregatedStatsReportWithoutSubStreams) {
+ FakeVideoSendStream* stream = AddSendStream();
+ auto stats = GetInitialisedStats();
+ stream->SetStats(stats);
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(send_info.aggregated_senders.size(), 1u);
+ auto& sender = send_info.aggregated_senders[0];
+
+ // MediaSenderInfo
+
+ EXPECT_EQ(sender.payload_bytes_sent, 0);
+ EXPECT_EQ(sender.header_and_padding_bytes_sent, 0);
+ EXPECT_EQ(sender.retransmitted_bytes_sent, 0u);
+ EXPECT_EQ(sender.packets_sent, 0);
+ EXPECT_EQ(sender.retransmitted_packets_sent, 0u);
+ EXPECT_EQ(sender.packets_lost, 0);
+ EXPECT_EQ(sender.fraction_lost, 0.0f);
+ EXPECT_EQ(sender.rtt_ms, 0);
+ EXPECT_EQ(sender.codec_name, DefaultCodec().name);
+ EXPECT_EQ(sender.codec_payload_type, DefaultCodec().id);
+ EXPECT_EQ(sender.local_stats.size(), 1u);
+ EXPECT_EQ(sender.local_stats[0].ssrc, last_ssrc_);
+ EXPECT_EQ(sender.local_stats[0].timestamp, 0.0f);
+ EXPECT_EQ(sender.remote_stats.size(), 0u);
+ EXPECT_EQ(sender.report_block_datas.size(), 0u);
+
+ // VideoSenderInfo
+
+ EXPECT_EQ(sender.ssrc_groups.size(), 0u);
+ EXPECT_EQ(sender.encoder_implementation_name,
+ stats.encoder_implementation_name);
+ // Comes from substream only.
+ EXPECT_EQ(sender.firs_received, 0);
+ EXPECT_EQ(sender.plis_received, 0);
+ EXPECT_EQ(sender.nacks_received, 0u);
+ EXPECT_EQ(sender.send_frame_width, 0);
+ EXPECT_EQ(sender.send_frame_height, 0);
+
+ EXPECT_EQ(sender.framerate_input, stats.input_frame_rate);
+ EXPECT_EQ(sender.framerate_sent, stats.encode_frame_rate);
+ EXPECT_EQ(sender.nominal_bitrate, stats.media_bitrate_bps);
+ EXPECT_NE(sender.adapt_reason & WebRtcVideoChannel::ADAPTREASON_CPU, 0);
+ EXPECT_NE(sender.adapt_reason & WebRtcVideoChannel::ADAPTREASON_BANDWIDTH, 0);
+ EXPECT_EQ(sender.adapt_changes, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(sender.quality_limitation_reason, stats.quality_limitation_reason);
+ EXPECT_EQ(sender.quality_limitation_durations_ms,
+ stats.quality_limitation_durations_ms);
+ EXPECT_EQ(sender.quality_limitation_resolution_changes,
+ stats.quality_limitation_resolution_changes);
+ EXPECT_EQ(sender.avg_encode_ms, stats.avg_encode_time_ms);
+ EXPECT_EQ(sender.encode_usage_percent, stats.encode_usage_percent);
+ EXPECT_EQ(sender.frames_encoded, stats.frames_encoded);
+ // Comes from substream only.
+ EXPECT_EQ(sender.key_frames_encoded, 0u);
+
+ EXPECT_EQ(sender.total_encode_time_ms, stats.total_encode_time_ms);
+ EXPECT_EQ(sender.total_encoded_bytes_target,
+ stats.total_encoded_bytes_target);
+ // Comes from substream only.
+ EXPECT_EQ(sender.total_packet_send_delay, webrtc::TimeDelta::Zero());
+ EXPECT_EQ(sender.qp_sum, absl::nullopt);
+
+ EXPECT_EQ(sender.has_entered_low_resolution,
+ stats.has_entered_low_resolution);
+ EXPECT_EQ(sender.content_type, webrtc::VideoContentType::SCREENSHARE);
+ EXPECT_EQ(sender.frames_sent, stats.frames_encoded);
+ EXPECT_EQ(sender.huge_frames_sent, stats.huge_frames_sent);
+ EXPECT_EQ(sender.rid, absl::nullopt);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetAggregatedStatsReportForSubStreams) {
+ FakeVideoSendStream* stream = AddSendStream();
+ auto stats = GetInitialisedStats();
+
+ const uint32_t ssrc_1 = 123u;
+ const uint32_t ssrc_2 = 456u;
+
+ auto& substream = stats.substreams[ssrc_1];
+ substream.frame_counts.key_frames = 1;
+ substream.frame_counts.delta_frames = 2;
+ substream.width = 3;
+ substream.height = 4;
+ substream.total_bitrate_bps = 5;
+ substream.retransmit_bitrate_bps = 6;
+ substream.avg_delay_ms = 7;
+ substream.max_delay_ms = 8;
+ substream.rtp_stats.transmitted.total_packet_delay =
+ webrtc::TimeDelta::Millis(9);
+ substream.rtp_stats.transmitted.header_bytes = 10;
+ substream.rtp_stats.transmitted.padding_bytes = 11;
+ substream.rtp_stats.retransmitted.payload_bytes = 12;
+ substream.rtp_stats.retransmitted.packets = 13;
+ substream.rtcp_packet_type_counts.fir_packets = 14;
+ substream.rtcp_packet_type_counts.nack_packets = 15;
+ substream.rtcp_packet_type_counts.pli_packets = 16;
+ webrtc::rtcp::ReportBlock report_block;
+ report_block.SetCumulativeLost(17);
+ report_block.SetFractionLost(18);
+ webrtc::ReportBlockData report_block_data;
+ report_block_data.SetReportBlock(0, report_block, webrtc::Timestamp::Zero());
+ report_block_data.AddRoundTripTimeSample(webrtc::TimeDelta::Millis(19));
+ substream.report_block_data = report_block_data;
+ substream.encode_frame_rate = 20.0;
+ substream.frames_encoded = 21;
+ substream.qp_sum = 22;
+ substream.total_encode_time_ms = 23;
+ substream.total_encoded_bytes_target = 24;
+ substream.huge_frames_sent = 25;
+
+ stats.substreams[ssrc_2] = substream;
+
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(send_info.aggregated_senders.size(), 1u);
+ auto& sender = send_info.aggregated_senders[0];
+
+ // MediaSenderInfo
+
+ EXPECT_EQ(
+ sender.payload_bytes_sent,
+ static_cast<int64_t>(2u * substream.rtp_stats.transmitted.payload_bytes));
+ EXPECT_EQ(sender.header_and_padding_bytes_sent,
+ static_cast<int64_t>(
+ 2u * (substream.rtp_stats.transmitted.header_bytes +
+ substream.rtp_stats.transmitted.padding_bytes)));
+ EXPECT_EQ(sender.retransmitted_bytes_sent,
+ 2u * substream.rtp_stats.retransmitted.payload_bytes);
+ EXPECT_EQ(sender.packets_sent,
+ static_cast<int>(2 * substream.rtp_stats.transmitted.packets));
+ EXPECT_EQ(sender.retransmitted_packets_sent,
+ 2u * substream.rtp_stats.retransmitted.packets);
+ EXPECT_EQ(sender.total_packet_send_delay,
+ 2 * substream.rtp_stats.transmitted.total_packet_delay);
+ EXPECT_EQ(sender.packets_lost,
+ 2 * substream.report_block_data->cumulative_lost());
+ EXPECT_FLOAT_EQ(sender.fraction_lost,
+ substream.report_block_data->fraction_lost());
+ EXPECT_EQ(sender.rtt_ms, 0);
+ EXPECT_EQ(sender.codec_name, DefaultCodec().name);
+ EXPECT_EQ(sender.codec_payload_type, DefaultCodec().id);
+ EXPECT_EQ(sender.local_stats.size(), 1u);
+ EXPECT_EQ(sender.local_stats[0].ssrc, last_ssrc_);
+ EXPECT_EQ(sender.local_stats[0].timestamp, 0.0f);
+ EXPECT_EQ(sender.remote_stats.size(), 0u);
+ EXPECT_EQ(sender.report_block_datas.size(), 2u * 1);
+
+ // VideoSenderInfo
+
+ EXPECT_EQ(sender.ssrc_groups.size(), 0u);
+ EXPECT_EQ(sender.encoder_implementation_name,
+ stats.encoder_implementation_name);
+ EXPECT_EQ(
+ sender.firs_received,
+ static_cast<int>(2 * substream.rtcp_packet_type_counts.fir_packets));
+ EXPECT_EQ(
+ sender.plis_received,
+ static_cast<int>(2 * substream.rtcp_packet_type_counts.pli_packets));
+ EXPECT_EQ(sender.nacks_received,
+ 2 * substream.rtcp_packet_type_counts.nack_packets);
+ EXPECT_EQ(sender.send_frame_width, substream.width);
+ EXPECT_EQ(sender.send_frame_height, substream.height);
+
+ EXPECT_EQ(sender.framerate_input, stats.input_frame_rate);
+ EXPECT_EQ(sender.framerate_sent, stats.encode_frame_rate);
+ EXPECT_EQ(sender.nominal_bitrate, stats.media_bitrate_bps);
+ EXPECT_NE(sender.adapt_reason & WebRtcVideoChannel::ADAPTREASON_CPU, 0);
+ EXPECT_NE(sender.adapt_reason & WebRtcVideoChannel::ADAPTREASON_BANDWIDTH, 0);
+ EXPECT_EQ(sender.adapt_changes, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(sender.quality_limitation_reason, stats.quality_limitation_reason);
+ EXPECT_EQ(sender.quality_limitation_durations_ms,
+ stats.quality_limitation_durations_ms);
+ EXPECT_EQ(sender.quality_limitation_resolution_changes,
+ stats.quality_limitation_resolution_changes);
+ EXPECT_EQ(sender.avg_encode_ms, stats.avg_encode_time_ms);
+ EXPECT_EQ(sender.encode_usage_percent, stats.encode_usage_percent);
+ EXPECT_EQ(sender.frames_encoded, 2u * substream.frames_encoded);
+ EXPECT_EQ(sender.key_frames_encoded, 2u * substream.frame_counts.key_frames);
+ EXPECT_EQ(sender.total_encode_time_ms, 2u * substream.total_encode_time_ms);
+ EXPECT_EQ(sender.total_encoded_bytes_target,
+ 2u * substream.total_encoded_bytes_target);
+ EXPECT_EQ(sender.has_entered_low_resolution,
+ stats.has_entered_low_resolution);
+ EXPECT_EQ(sender.qp_sum, 2u * *substream.qp_sum);
+ EXPECT_EQ(sender.content_type, webrtc::VideoContentType::SCREENSHARE);
+ EXPECT_EQ(sender.frames_sent, 2u * substream.frames_encoded);
+ EXPECT_EQ(sender.huge_frames_sent, stats.huge_frames_sent);
+ EXPECT_EQ(sender.rid, absl::nullopt);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetPerLayerStatsReportForSubStreams) {
+ FakeVideoSendStream* stream = AddSendStream();
+ auto stats = GetInitialisedStats();
+
+ const uint32_t ssrc_1 = 123u;
+ const uint32_t ssrc_2 = 456u;
+
+ auto& substream = stats.substreams[ssrc_1];
+ substream.frame_counts.key_frames = 1;
+ substream.frame_counts.delta_frames = 2;
+ substream.width = 3;
+ substream.height = 4;
+ substream.total_bitrate_bps = 5;
+ substream.retransmit_bitrate_bps = 6;
+ substream.avg_delay_ms = 7;
+ substream.max_delay_ms = 8;
+ substream.rtp_stats.transmitted.total_packet_delay =
+ webrtc::TimeDelta::Millis(9);
+ substream.rtp_stats.transmitted.header_bytes = 10;
+ substream.rtp_stats.transmitted.padding_bytes = 11;
+ substream.rtp_stats.retransmitted.payload_bytes = 12;
+ substream.rtp_stats.retransmitted.packets = 13;
+ substream.rtcp_packet_type_counts.fir_packets = 14;
+ substream.rtcp_packet_type_counts.nack_packets = 15;
+ substream.rtcp_packet_type_counts.pli_packets = 16;
+ webrtc::rtcp::ReportBlock report_block;
+ report_block.SetCumulativeLost(17);
+ report_block.SetFractionLost(18);
+ webrtc::ReportBlockData report_block_data;
+ report_block_data.SetReportBlock(0, report_block, webrtc::Timestamp::Zero());
+ report_block_data.AddRoundTripTimeSample(webrtc::TimeDelta::Millis(19));
+ substream.report_block_data = report_block_data;
+ substream.encode_frame_rate = 20.0;
+ substream.frames_encoded = 21;
+ substream.qp_sum = 22;
+ substream.total_encode_time_ms = 23;
+ substream.total_encoded_bytes_target = 24;
+ substream.huge_frames_sent = 25;
+
+ stats.substreams[ssrc_2] = substream;
+
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(send_info.senders.size(), 2u);
+ auto& sender = send_info.senders[0];
+
+ // MediaSenderInfo
+
+ EXPECT_EQ(
+ sender.payload_bytes_sent,
+ static_cast<int64_t>(substream.rtp_stats.transmitted.payload_bytes));
+ EXPECT_EQ(
+ sender.header_and_padding_bytes_sent,
+ static_cast<int64_t>(substream.rtp_stats.transmitted.header_bytes +
+ substream.rtp_stats.transmitted.padding_bytes));
+ EXPECT_EQ(sender.retransmitted_bytes_sent,
+ substream.rtp_stats.retransmitted.payload_bytes);
+ EXPECT_EQ(sender.packets_sent,
+ static_cast<int>(substream.rtp_stats.transmitted.packets));
+ EXPECT_EQ(sender.total_packet_send_delay,
+ substream.rtp_stats.transmitted.total_packet_delay);
+ EXPECT_EQ(sender.retransmitted_packets_sent,
+ substream.rtp_stats.retransmitted.packets);
+ EXPECT_EQ(sender.packets_lost,
+ substream.report_block_data->cumulative_lost());
+ EXPECT_FLOAT_EQ(sender.fraction_lost,
+ substream.report_block_data->fraction_lost());
+ EXPECT_EQ(sender.rtt_ms, 0);
+ EXPECT_EQ(sender.codec_name, DefaultCodec().name);
+ EXPECT_EQ(sender.codec_payload_type, DefaultCodec().id);
+ EXPECT_EQ(sender.local_stats.size(), 1u);
+ EXPECT_EQ(sender.local_stats[0].ssrc, ssrc_1);
+ EXPECT_EQ(sender.local_stats[0].timestamp, 0.0f);
+ EXPECT_EQ(sender.remote_stats.size(), 0u);
+ EXPECT_EQ(sender.report_block_datas.size(), 1u);
+
+ // VideoSenderInfo
+
+ EXPECT_EQ(sender.ssrc_groups.size(), 0u);
+ EXPECT_EQ(sender.encoder_implementation_name,
+ stats.encoder_implementation_name);
+ EXPECT_EQ(sender.firs_received,
+ static_cast<int>(substream.rtcp_packet_type_counts.fir_packets));
+ EXPECT_EQ(sender.plis_received,
+ static_cast<int>(substream.rtcp_packet_type_counts.pli_packets));
+ EXPECT_EQ(sender.nacks_received,
+ substream.rtcp_packet_type_counts.nack_packets);
+ EXPECT_EQ(sender.send_frame_width, substream.width);
+ EXPECT_EQ(sender.send_frame_height, substream.height);
+
+ EXPECT_EQ(sender.framerate_input, stats.input_frame_rate);
+ EXPECT_EQ(sender.framerate_sent, substream.encode_frame_rate);
+ EXPECT_EQ(sender.nominal_bitrate, stats.media_bitrate_bps);
+ EXPECT_NE(sender.adapt_reason & WebRtcVideoChannel::ADAPTREASON_CPU, 0);
+ EXPECT_NE(sender.adapt_reason & WebRtcVideoChannel::ADAPTREASON_BANDWIDTH, 0);
+ EXPECT_EQ(sender.adapt_changes, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(sender.quality_limitation_reason, stats.quality_limitation_reason);
+ EXPECT_EQ(sender.quality_limitation_durations_ms,
+ stats.quality_limitation_durations_ms);
+ EXPECT_EQ(sender.quality_limitation_resolution_changes,
+ stats.quality_limitation_resolution_changes);
+ EXPECT_EQ(sender.avg_encode_ms, stats.avg_encode_time_ms);
+ EXPECT_EQ(sender.encode_usage_percent, stats.encode_usage_percent);
+ EXPECT_EQ(sender.frames_encoded,
+ static_cast<uint32_t>(substream.frames_encoded));
+ EXPECT_EQ(sender.key_frames_encoded,
+ static_cast<uint32_t>(substream.frame_counts.key_frames));
+ EXPECT_EQ(sender.total_encode_time_ms, substream.total_encode_time_ms);
+ EXPECT_EQ(sender.total_encoded_bytes_target,
+ substream.total_encoded_bytes_target);
+ EXPECT_EQ(sender.has_entered_low_resolution,
+ stats.has_entered_low_resolution);
+ EXPECT_EQ(sender.qp_sum, *substream.qp_sum);
+ EXPECT_EQ(sender.content_type, webrtc::VideoContentType::SCREENSHARE);
+ EXPECT_EQ(sender.frames_sent,
+ static_cast<uint32_t>(substream.frames_encoded));
+ EXPECT_EQ(sender.huge_frames_sent, substream.huge_frames_sent);
+ EXPECT_EQ(sender.rid, absl::nullopt);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ OutboundRtpIsActiveComesFromMatchingEncodingInSimulcast) {
+ constexpr uint32_t kSsrc1 = 123u;
+ constexpr uint32_t kSsrc2 = 456u;
+
+ // Create simulcast stream from both SSRCs.
+ // `kSsrc1` is the "main" ssrc used for getting parameters.
+ FakeVideoSendStream* stream =
+ AddSendStream(cricket::CreateSimStreamParams("cname", {kSsrc1, kSsrc2}));
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(kSsrc1);
+ ASSERT_EQ(2u, parameters.encodings.size());
+ parameters.encodings[0].active = false;
+ parameters.encodings[1].active = true;
+ send_channel_->SetRtpSendParameters(kSsrc1, parameters);
+
+ // Fill in dummy stats.
+ auto stats = GetInitialisedStats();
+ stats.substreams[kSsrc1];
+ stats.substreams[kSsrc2];
+ stream->SetStats(stats);
+
+ // GetStats() and ensure `active` matches `encodings` for each SSRC.
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(send_info.senders.size(), 2u);
+ ASSERT_TRUE(send_info.senders[0].active.has_value());
+ EXPECT_FALSE(send_info.senders[0].active.value());
+ ASSERT_TRUE(send_info.senders[1].active.has_value());
+ EXPECT_TRUE(send_info.senders[1].active.value());
+}
+
+TEST_F(WebRtcVideoChannelTest, OutboundRtpIsActiveComesFromAnyEncodingInSvc) {
+ cricket::VideoSenderParameters send_parameters;
+ send_parameters.codecs.push_back(GetEngineCodec("VP9"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters));
+
+ constexpr uint32_t kSsrc1 = 123u;
+ constexpr uint32_t kSsrc2 = 456u;
+ constexpr uint32_t kSsrc3 = 789u;
+
+ // Configuring SVC is done the same way that simulcast is configured, the only
+ // difference is that the VP9 codec is used. This triggers special hacks that
+ // we depend on because we don't have a proper SVC API yet.
+ FakeVideoSendStream* stream = AddSendStream(
+ cricket::CreateSimStreamParams("cname", {kSsrc1, kSsrc2, kSsrc3}));
+ // Expect that we got SVC.
+ EXPECT_EQ(stream->GetEncoderConfig().number_of_streams, 1u);
+ webrtc::VideoCodecVP9 vp9_settings;
+ ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings));
+ EXPECT_EQ(vp9_settings.numberOfSpatialLayers, 3u);
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(kSsrc1);
+ ASSERT_EQ(3u, parameters.encodings.size());
+ parameters.encodings[0].active = false;
+ parameters.encodings[1].active = true;
+ parameters.encodings[2].active = false;
+ send_channel_->SetRtpSendParameters(kSsrc1, parameters);
+
+ // Fill in dummy stats.
+ auto stats = GetInitialisedStats();
+ stats.substreams[kSsrc1];
+ stream->SetStats(stats);
+
+ // GetStats() and ensure `active` is true if ANY encoding is active.
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(send_info.senders.size(), 1u);
+ // Middle layer is active.
+ ASSERT_TRUE(send_info.senders[0].active.has_value());
+ EXPECT_TRUE(send_info.senders[0].active.value());
+
+ parameters = send_channel_->GetRtpSendParameters(kSsrc1);
+ ASSERT_EQ(3u, parameters.encodings.size());
+ parameters.encodings[0].active = false;
+ parameters.encodings[1].active = false;
+ parameters.encodings[2].active = false;
+ send_channel_->SetRtpSendParameters(kSsrc1, parameters);
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(send_info.senders.size(), 1u);
+ // No layer is active.
+ ASSERT_TRUE(send_info.senders[0].active.has_value());
+ EXPECT_FALSE(send_info.senders[0].active.value());
+}
+
+TEST_F(WebRtcVideoChannelTest, MediaSubstreamMissingProducesEmpyStats) {
+ FakeVideoSendStream* stream = AddSendStream();
+
+ const uint32_t kRtxSsrc = 123u;
+ const uint32_t kMissingMediaSsrc = 124u;
+
+ // Set up a scenarios where we have a substream that is not kMedia (in this
+ // case: kRtx) but its associated kMedia stream does not exist yet. This
+ // results in zero GetPerLayerVideoSenderInfos despite non-empty substreams.
+ // Covers https://crbug.com/1090712.
+ auto stats = GetInitialisedStats();
+ auto& substream = stats.substreams[kRtxSsrc];
+ substream.type = webrtc::VideoSendStream::StreamStats::StreamType::kRtx;
+ substream.referenced_media_ssrc = kMissingMediaSsrc;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_TRUE(send_info.senders.empty());
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsUpperResolution) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.substreams[17].width = 123;
+ stats.substreams[17].height = 40;
+ stats.substreams[42].width = 80;
+ stats.substreams[42].height = 31;
+ stats.substreams[11].width = 20;
+ stats.substreams[11].height = 90;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1u, send_info.aggregated_senders.size());
+ ASSERT_EQ(3u, send_info.senders.size());
+ EXPECT_EQ(123, send_info.senders[1].send_frame_width);
+ EXPECT_EQ(40, send_info.senders[1].send_frame_height);
+ EXPECT_EQ(80, send_info.senders[2].send_frame_width);
+ EXPECT_EQ(31, send_info.senders[2].send_frame_height);
+ EXPECT_EQ(20, send_info.senders[0].send_frame_width);
+ EXPECT_EQ(90, send_info.senders[0].send_frame_height);
+ EXPECT_EQ(123, send_info.aggregated_senders[0].send_frame_width);
+ EXPECT_EQ(90, send_info.aggregated_senders[0].send_frame_height);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsCpuAdaptationStats) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.number_of_cpu_adapt_changes = 2;
+ stats.cpu_limited_resolution = true;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1U, send_info.senders.size());
+ EXPECT_EQ(WebRtcVideoChannel::ADAPTREASON_CPU,
+ send_info.senders[0].adapt_reason);
+ EXPECT_EQ(stats.number_of_cpu_adapt_changes,
+ send_info.senders[0].adapt_changes);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsReportsAdaptationAndBandwidthStats) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.number_of_cpu_adapt_changes = 2;
+ stats.cpu_limited_resolution = true;
+ stats.bw_limited_resolution = true;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1U, send_info.senders.size());
+ EXPECT_EQ(WebRtcVideoChannel::ADAPTREASON_CPU |
+ WebRtcVideoChannel::ADAPTREASON_BANDWIDTH,
+ send_info.senders[0].adapt_reason);
+ EXPECT_EQ(stats.number_of_cpu_adapt_changes,
+ send_info.senders[0].adapt_changes);
+}
+
+TEST(WebRtcVideoChannelHelperTest, MergeInfoAboutOutboundRtpSubstreams) {
+ const uint32_t kFirstMediaStreamSsrc = 10;
+ const uint32_t kSecondMediaStreamSsrc = 20;
+ const uint32_t kRtxSsrc = 30;
+ const uint32_t kFlexfecSsrc = 40;
+ std::map<uint32_t, webrtc::VideoSendStream::StreamStats> substreams;
+ // First kMedia stream.
+ substreams[kFirstMediaStreamSsrc].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kMedia;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.transmitted.header_bytes = 1;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.transmitted.padding_bytes = 2;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.transmitted.payload_bytes = 3;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.transmitted.packets = 4;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.retransmitted.header_bytes = 5;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.retransmitted.padding_bytes = 6;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.retransmitted.payload_bytes = 7;
+ substreams[kFirstMediaStreamSsrc].rtp_stats.retransmitted.packets = 8;
+ substreams[kFirstMediaStreamSsrc].referenced_media_ssrc = absl::nullopt;
+ substreams[kFirstMediaStreamSsrc].width = 1280;
+ substreams[kFirstMediaStreamSsrc].height = 720;
+ // Second kMedia stream.
+ substreams[kSecondMediaStreamSsrc].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kMedia;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.transmitted.header_bytes = 10;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.transmitted.padding_bytes = 11;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.transmitted.payload_bytes = 12;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.transmitted.packets = 13;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.retransmitted.header_bytes = 14;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.retransmitted.padding_bytes = 15;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.retransmitted.payload_bytes = 16;
+ substreams[kSecondMediaStreamSsrc].rtp_stats.retransmitted.packets = 17;
+ substreams[kSecondMediaStreamSsrc].referenced_media_ssrc = absl::nullopt;
+ substreams[kSecondMediaStreamSsrc].width = 640;
+ substreams[kSecondMediaStreamSsrc].height = 480;
+ // kRtx stream referencing the first kMedia stream.
+ substreams[kRtxSsrc].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kRtx;
+ substreams[kRtxSsrc].rtp_stats.transmitted.header_bytes = 19;
+ substreams[kRtxSsrc].rtp_stats.transmitted.padding_bytes = 20;
+ substreams[kRtxSsrc].rtp_stats.transmitted.payload_bytes = 21;
+ substreams[kRtxSsrc].rtp_stats.transmitted.packets = 22;
+ substreams[kRtxSsrc].rtp_stats.retransmitted.header_bytes = 23;
+ substreams[kRtxSsrc].rtp_stats.retransmitted.padding_bytes = 24;
+ substreams[kRtxSsrc].rtp_stats.retransmitted.payload_bytes = 25;
+ substreams[kRtxSsrc].rtp_stats.retransmitted.packets = 26;
+ substreams[kRtxSsrc].referenced_media_ssrc = kFirstMediaStreamSsrc;
+ // kFlexfec stream referencing the second kMedia stream.
+ substreams[kFlexfecSsrc].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kFlexfec;
+ substreams[kFlexfecSsrc].rtp_stats.transmitted.header_bytes = 19;
+ substreams[kFlexfecSsrc].rtp_stats.transmitted.padding_bytes = 20;
+ substreams[kFlexfecSsrc].rtp_stats.transmitted.payload_bytes = 21;
+ substreams[kFlexfecSsrc].rtp_stats.transmitted.packets = 22;
+ substreams[kFlexfecSsrc].rtp_stats.retransmitted.header_bytes = 23;
+ substreams[kFlexfecSsrc].rtp_stats.retransmitted.padding_bytes = 24;
+ substreams[kFlexfecSsrc].rtp_stats.retransmitted.payload_bytes = 25;
+ substreams[kFlexfecSsrc].rtp_stats.retransmitted.packets = 26;
+ substreams[kFlexfecSsrc].referenced_media_ssrc = kSecondMediaStreamSsrc;
+
+ auto merged_substreams =
+ MergeInfoAboutOutboundRtpSubstreamsForTesting(substreams);
+ // Only kMedia substreams remain.
+ EXPECT_TRUE(merged_substreams.find(kFirstMediaStreamSsrc) !=
+ merged_substreams.end());
+ EXPECT_EQ(merged_substreams[kFirstMediaStreamSsrc].type,
+ webrtc::VideoSendStream::StreamStats::StreamType::kMedia);
+ EXPECT_TRUE(merged_substreams.find(kSecondMediaStreamSsrc) !=
+ merged_substreams.end());
+ EXPECT_EQ(merged_substreams[kSecondMediaStreamSsrc].type,
+ webrtc::VideoSendStream::StreamStats::StreamType::kMedia);
+ EXPECT_FALSE(merged_substreams.find(kRtxSsrc) != merged_substreams.end());
+ EXPECT_FALSE(merged_substreams.find(kFlexfecSsrc) != merged_substreams.end());
+ // Expect kFirstMediaStreamSsrc's rtp_stats to be merged with kRtxSsrc.
+ webrtc::StreamDataCounters first_media_expected_rtp_stats =
+ substreams[kFirstMediaStreamSsrc].rtp_stats;
+ first_media_expected_rtp_stats.Add(substreams[kRtxSsrc].rtp_stats);
+ EXPECT_EQ(merged_substreams[kFirstMediaStreamSsrc].rtp_stats.transmitted,
+ first_media_expected_rtp_stats.transmitted);
+ EXPECT_EQ(merged_substreams[kFirstMediaStreamSsrc].rtp_stats.retransmitted,
+ first_media_expected_rtp_stats.retransmitted);
+ // Expect kSecondMediaStreamSsrc' rtp_stats to be merged with kFlexfecSsrc.
+ webrtc::StreamDataCounters second_media_expected_rtp_stats =
+ substreams[kSecondMediaStreamSsrc].rtp_stats;
+ second_media_expected_rtp_stats.Add(substreams[kFlexfecSsrc].rtp_stats);
+ EXPECT_EQ(merged_substreams[kSecondMediaStreamSsrc].rtp_stats.transmitted,
+ second_media_expected_rtp_stats.transmitted);
+ EXPECT_EQ(merged_substreams[kSecondMediaStreamSsrc].rtp_stats.retransmitted,
+ second_media_expected_rtp_stats.retransmitted);
+ // Expect other metrics to come from the original kMedia stats.
+ EXPECT_EQ(merged_substreams[kFirstMediaStreamSsrc].width,
+ substreams[kFirstMediaStreamSsrc].width);
+ EXPECT_EQ(merged_substreams[kFirstMediaStreamSsrc].height,
+ substreams[kFirstMediaStreamSsrc].height);
+ EXPECT_EQ(merged_substreams[kSecondMediaStreamSsrc].width,
+ substreams[kSecondMediaStreamSsrc].width);
+ EXPECT_EQ(merged_substreams[kSecondMediaStreamSsrc].height,
+ substreams[kSecondMediaStreamSsrc].height);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetStatsReportsTransmittedAndRetransmittedBytesAndPacketsCorrectly) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ // Simulcast layer 1, RTP stream. header+padding=10, payload=20, packets=3.
+ stats.substreams[101].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kMedia;
+ stats.substreams[101].rtp_stats.transmitted.header_bytes = 5;
+ stats.substreams[101].rtp_stats.transmitted.padding_bytes = 5;
+ stats.substreams[101].rtp_stats.transmitted.payload_bytes = 20;
+ stats.substreams[101].rtp_stats.transmitted.packets = 3;
+ stats.substreams[101].rtp_stats.retransmitted.header_bytes = 0;
+ stats.substreams[101].rtp_stats.retransmitted.padding_bytes = 0;
+ stats.substreams[101].rtp_stats.retransmitted.payload_bytes = 0;
+ stats.substreams[101].rtp_stats.retransmitted.packets = 0;
+ stats.substreams[101].referenced_media_ssrc = absl::nullopt;
+ // Simulcast layer 1, RTX stream. header+padding=5, payload=10, packets=1.
+ stats.substreams[102].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kRtx;
+ stats.substreams[102].rtp_stats.retransmitted.header_bytes = 3;
+ stats.substreams[102].rtp_stats.retransmitted.padding_bytes = 2;
+ stats.substreams[102].rtp_stats.retransmitted.payload_bytes = 10;
+ stats.substreams[102].rtp_stats.retransmitted.packets = 1;
+ stats.substreams[102].rtp_stats.transmitted =
+ stats.substreams[102].rtp_stats.retransmitted;
+ stats.substreams[102].referenced_media_ssrc = 101;
+ // Simulcast layer 2, RTP stream. header+padding=20, payload=40, packets=7.
+ stats.substreams[201].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kMedia;
+ stats.substreams[201].rtp_stats.transmitted.header_bytes = 10;
+ stats.substreams[201].rtp_stats.transmitted.padding_bytes = 10;
+ stats.substreams[201].rtp_stats.transmitted.payload_bytes = 40;
+ stats.substreams[201].rtp_stats.transmitted.packets = 7;
+ stats.substreams[201].rtp_stats.retransmitted.header_bytes = 0;
+ stats.substreams[201].rtp_stats.retransmitted.padding_bytes = 0;
+ stats.substreams[201].rtp_stats.retransmitted.payload_bytes = 0;
+ stats.substreams[201].rtp_stats.retransmitted.packets = 0;
+ stats.substreams[201].referenced_media_ssrc = absl::nullopt;
+ // Simulcast layer 2, RTX stream. header+padding=10, payload=20, packets=4.
+ stats.substreams[202].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kRtx;
+ stats.substreams[202].rtp_stats.retransmitted.header_bytes = 6;
+ stats.substreams[202].rtp_stats.retransmitted.padding_bytes = 4;
+ stats.substreams[202].rtp_stats.retransmitted.payload_bytes = 20;
+ stats.substreams[202].rtp_stats.retransmitted.packets = 4;
+ stats.substreams[202].rtp_stats.transmitted =
+ stats.substreams[202].rtp_stats.retransmitted;
+ stats.substreams[202].referenced_media_ssrc = 201;
+ // FlexFEC stream associated with the Simulcast layer 2.
+ // header+padding=15, payload=17, packets=5.
+ stats.substreams[301].type =
+ webrtc::VideoSendStream::StreamStats::StreamType::kFlexfec;
+ stats.substreams[301].rtp_stats.transmitted.header_bytes = 13;
+ stats.substreams[301].rtp_stats.transmitted.padding_bytes = 2;
+ stats.substreams[301].rtp_stats.transmitted.payload_bytes = 17;
+ stats.substreams[301].rtp_stats.transmitted.packets = 5;
+ stats.substreams[301].rtp_stats.retransmitted.header_bytes = 0;
+ stats.substreams[301].rtp_stats.retransmitted.padding_bytes = 0;
+ stats.substreams[301].rtp_stats.retransmitted.payload_bytes = 0;
+ stats.substreams[301].rtp_stats.retransmitted.packets = 0;
+ stats.substreams[301].referenced_media_ssrc = 201;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(send_info.senders.size(), 2u);
+ EXPECT_EQ(15u, send_info.senders[0].header_and_padding_bytes_sent);
+ EXPECT_EQ(30u, send_info.senders[0].payload_bytes_sent);
+ EXPECT_EQ(4, send_info.senders[0].packets_sent);
+ EXPECT_EQ(10u, send_info.senders[0].retransmitted_bytes_sent);
+ EXPECT_EQ(1u, send_info.senders[0].retransmitted_packets_sent);
+
+ EXPECT_EQ(45u, send_info.senders[1].header_and_padding_bytes_sent);
+ EXPECT_EQ(77u, send_info.senders[1].payload_bytes_sent);
+ EXPECT_EQ(16, send_info.senders[1].packets_sent);
+ EXPECT_EQ(20u, send_info.senders[1].retransmitted_bytes_sent);
+ EXPECT_EQ(4u, send_info.senders[1].retransmitted_packets_sent);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetStatsTranslatesBandwidthLimitedResolutionCorrectly) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.bw_limited_resolution = true;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1U, send_info.senders.size());
+ EXPECT_EQ(WebRtcVideoChannel::ADAPTREASON_BANDWIDTH,
+ send_info.senders[0].adapt_reason);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsTranslatesSendRtcpPacketTypesCorrectly) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.substreams[17].rtcp_packet_type_counts.fir_packets = 2;
+ stats.substreams[17].rtcp_packet_type_counts.nack_packets = 3;
+ stats.substreams[17].rtcp_packet_type_counts.pli_packets = 4;
+
+ stats.substreams[42].rtcp_packet_type_counts.fir_packets = 5;
+ stats.substreams[42].rtcp_packet_type_counts.nack_packets = 7;
+ stats.substreams[42].rtcp_packet_type_counts.pli_packets = 9;
+
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(2, send_info.senders[0].firs_received);
+ EXPECT_EQ(3u, send_info.senders[0].nacks_received);
+ EXPECT_EQ(4, send_info.senders[0].plis_received);
+
+ EXPECT_EQ(5, send_info.senders[1].firs_received);
+ EXPECT_EQ(7u, send_info.senders[1].nacks_received);
+ EXPECT_EQ(9, send_info.senders[1].plis_received);
+
+ EXPECT_EQ(7, send_info.aggregated_senders[0].firs_received);
+ EXPECT_EQ(10u, send_info.aggregated_senders[0].nacks_received);
+ EXPECT_EQ(13, send_info.aggregated_senders[0].plis_received);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetStatsTranslatesReceiveRtcpPacketTypesCorrectly) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ webrtc::VideoReceiveStreamInterface::Stats stats;
+ stats.rtcp_packet_type_counts.fir_packets = 2;
+ stats.rtcp_packet_type_counts.nack_packets = 3;
+ stats.rtcp_packet_type_counts.pli_packets = 4;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(
+ stats.rtcp_packet_type_counts.fir_packets,
+ rtc::checked_cast<unsigned int>(receive_info.receivers[0].firs_sent));
+ EXPECT_EQ(stats.rtcp_packet_type_counts.nack_packets,
+ receive_info.receivers[0].nacks_sent);
+ EXPECT_EQ(
+ stats.rtcp_packet_type_counts.pli_packets,
+ rtc::checked_cast<unsigned int>(receive_info.receivers[0].plis_sent));
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsTranslatesDecodeStatsCorrectly) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ webrtc::VideoReceiveStreamInterface::Stats stats;
+ stats.decoder_implementation_name = "decoder_implementation_name";
+ stats.decode_ms = 2;
+ stats.max_decode_ms = 3;
+ stats.current_delay_ms = 4;
+ stats.target_delay_ms = 5;
+ stats.jitter_buffer_ms = 6;
+ stats.jitter_buffer_delay = TimeDelta::Seconds(60);
+ stats.jitter_buffer_target_delay = TimeDelta::Seconds(55);
+ stats.jitter_buffer_emitted_count = 6;
+ stats.jitter_buffer_minimum_delay = TimeDelta::Seconds(50);
+ stats.min_playout_delay_ms = 7;
+ stats.render_delay_ms = 8;
+ stats.width = 9;
+ stats.height = 10;
+ stats.frame_counts.key_frames = 11;
+ stats.frame_counts.delta_frames = 12;
+ stats.frames_rendered = 13;
+ stats.frames_decoded = 14;
+ stats.qp_sum = 15;
+ stats.total_decode_time = webrtc::TimeDelta::Millis(16);
+ stats.total_assembly_time = webrtc::TimeDelta::Millis(4);
+ stats.frames_assembled_from_multiple_packets = 2;
+ stats.power_efficient_decoder = true;
+ webrtc::RtpReceiveStats rtx_stats;
+ rtx_stats.packet_counter.packets = 5;
+ rtx_stats.packet_counter.payload_bytes = 23;
+ stats.rtx_rtp_stats = rtx_stats;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(stats.decoder_implementation_name,
+ receive_info.receivers[0].decoder_implementation_name);
+ EXPECT_EQ(stats.decode_ms, receive_info.receivers[0].decode_ms);
+ EXPECT_EQ(stats.max_decode_ms, receive_info.receivers[0].max_decode_ms);
+ EXPECT_EQ(stats.current_delay_ms, receive_info.receivers[0].current_delay_ms);
+ EXPECT_EQ(stats.target_delay_ms, receive_info.receivers[0].target_delay_ms);
+ EXPECT_EQ(stats.jitter_buffer_ms, receive_info.receivers[0].jitter_buffer_ms);
+ EXPECT_EQ(stats.jitter_buffer_delay.seconds<double>(),
+ receive_info.receivers[0].jitter_buffer_delay_seconds);
+ EXPECT_EQ(stats.jitter_buffer_target_delay.seconds<double>(),
+ receive_info.receivers[0].jitter_buffer_target_delay_seconds);
+ EXPECT_EQ(stats.jitter_buffer_emitted_count,
+ receive_info.receivers[0].jitter_buffer_emitted_count);
+ EXPECT_EQ(stats.jitter_buffer_minimum_delay.seconds<double>(),
+ receive_info.receivers[0].jitter_buffer_minimum_delay_seconds);
+ EXPECT_EQ(stats.min_playout_delay_ms,
+ receive_info.receivers[0].min_playout_delay_ms);
+ EXPECT_EQ(stats.render_delay_ms, receive_info.receivers[0].render_delay_ms);
+ EXPECT_EQ(stats.width, receive_info.receivers[0].frame_width);
+ EXPECT_EQ(stats.height, receive_info.receivers[0].frame_height);
+ EXPECT_EQ(rtc::checked_cast<unsigned int>(stats.frame_counts.key_frames +
+ stats.frame_counts.delta_frames),
+ receive_info.receivers[0].frames_received);
+ EXPECT_EQ(stats.frames_rendered, receive_info.receivers[0].frames_rendered);
+ EXPECT_EQ(stats.frames_decoded, receive_info.receivers[0].frames_decoded);
+ EXPECT_EQ(rtc::checked_cast<unsigned int>(stats.frame_counts.key_frames),
+ receive_info.receivers[0].key_frames_decoded);
+ EXPECT_EQ(stats.qp_sum, receive_info.receivers[0].qp_sum);
+ EXPECT_EQ(stats.total_decode_time,
+ receive_info.receivers[0].total_decode_time);
+ EXPECT_EQ(stats.total_assembly_time,
+ receive_info.receivers[0].total_assembly_time);
+ EXPECT_EQ(stats.frames_assembled_from_multiple_packets,
+ receive_info.receivers[0].frames_assembled_from_multiple_packets);
+ EXPECT_TRUE(receive_info.receivers[0].power_efficient_decoder);
+ EXPECT_EQ(stats.rtx_rtp_stats->packet_counter.packets,
+ receive_info.receivers[0].retransmitted_packets_received);
+ EXPECT_EQ(stats.rtx_rtp_stats->packet_counter.payload_bytes,
+ receive_info.receivers[0].retransmitted_bytes_received);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetStatsTranslatesInterFrameDelayStatsCorrectly) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ webrtc::VideoReceiveStreamInterface::Stats stats;
+ stats.total_inter_frame_delay = 0.123;
+ stats.total_squared_inter_frame_delay = 0.00456;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(stats.total_inter_frame_delay,
+ receive_info.receivers[0].total_inter_frame_delay);
+ EXPECT_EQ(stats.total_squared_inter_frame_delay,
+ receive_info.receivers[0].total_squared_inter_frame_delay);
+}
+
+TEST_F(WebRtcVideoChannelTest, GetStatsTranslatesReceivePacketStatsCorrectly) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ webrtc::VideoReceiveStreamInterface::Stats stats;
+ stats.rtp_stats.packet_counter.payload_bytes = 2;
+ stats.rtp_stats.packet_counter.header_bytes = 3;
+ stats.rtp_stats.packet_counter.padding_bytes = 4;
+ stats.rtp_stats.packet_counter.packets = 5;
+ stats.rtp_stats.packets_lost = 6;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_EQ(stats.rtp_stats.packet_counter.payload_bytes,
+ rtc::checked_cast<size_t>(
+ receive_info.receivers[0].payload_bytes_received));
+ EXPECT_EQ(stats.rtp_stats.packet_counter.packets,
+ rtc::checked_cast<unsigned int>(
+ receive_info.receivers[0].packets_received));
+ EXPECT_EQ(stats.rtp_stats.packets_lost,
+ receive_info.receivers[0].packets_lost);
+}
+
+TEST_F(WebRtcVideoChannelTest, TranslatesCallStatsCorrectly) {
+ AddSendStream();
+ AddSendStream();
+ Call::Stats stats;
+ stats.rtt_ms = 123;
+ fake_call_->SetStats(stats);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(2u, send_info.senders.size());
+ EXPECT_EQ(stats.rtt_ms, send_info.senders[0].rtt_ms);
+ EXPECT_EQ(stats.rtt_ms, send_info.senders[1].rtt_ms);
+}
+
+TEST_F(WebRtcVideoChannelTest, TranslatesSenderBitrateStatsCorrectly) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.target_media_bitrate_bps = 156;
+ stats.media_bitrate_bps = 123;
+ stats.substreams[17].total_bitrate_bps = 1;
+ stats.substreams[17].retransmit_bitrate_bps = 2;
+ stats.substreams[42].total_bitrate_bps = 3;
+ stats.substreams[42].retransmit_bitrate_bps = 4;
+ stream->SetStats(stats);
+
+ FakeVideoSendStream* stream2 = AddSendStream();
+ webrtc::VideoSendStream::Stats stats2;
+ stats2.target_media_bitrate_bps = 200;
+ stats2.media_bitrate_bps = 321;
+ stats2.substreams[13].total_bitrate_bps = 5;
+ stats2.substreams[13].retransmit_bitrate_bps = 6;
+ stats2.substreams[21].total_bitrate_bps = 7;
+ stats2.substreams[21].retransmit_bitrate_bps = 8;
+ stream2->SetStats(stats2);
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(2u, send_info.aggregated_senders.size());
+ ASSERT_EQ(4u, send_info.senders.size());
+ BandwidthEstimationInfo bwe_info;
+ send_channel_->FillBitrateInfo(&bwe_info);
+ // Assuming stream and stream2 corresponds to senders[0] and [1] respectively
+ // is OK as std::maps are sorted and AddSendStream() gives increasing SSRCs.
+ EXPECT_EQ(stats.media_bitrate_bps,
+ send_info.aggregated_senders[0].nominal_bitrate);
+ EXPECT_EQ(stats2.media_bitrate_bps,
+ send_info.aggregated_senders[1].nominal_bitrate);
+ EXPECT_EQ(stats.target_media_bitrate_bps + stats2.target_media_bitrate_bps,
+ bwe_info.target_enc_bitrate);
+ EXPECT_EQ(stats.media_bitrate_bps + stats2.media_bitrate_bps,
+ bwe_info.actual_enc_bitrate);
+ EXPECT_EQ(1 + 3 + 5 + 7, bwe_info.transmit_bitrate)
+ << "Bandwidth stats should take all streams into account.";
+ EXPECT_EQ(2 + 4 + 6 + 8, bwe_info.retransmit_bitrate)
+ << "Bandwidth stats should take all streams into account.";
+}
+
+TEST_F(WebRtcVideoChannelTest, DefaultReceiveStreamReconfiguresToUseRtx) {
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ const std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs1);
+ const std::vector<uint32_t> rtx_ssrcs = MAKE_VECTOR(kRtxSsrcs1);
+
+ ASSERT_EQ(0u, fake_call_->GetVideoReceiveStreams().size());
+ RtpPacketReceived packet;
+ packet.SetSsrc(ssrcs[0]);
+ ReceivePacketAndAdvanceTime(packet);
+
+ ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size())
+ << "No default receive stream created.";
+ FakeVideoReceiveStream* recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(0u, recv_stream->GetConfig().rtp.rtx_ssrc)
+ << "Default receive stream should not have configured RTX";
+
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::CreateSimWithRtxStreamParams("cname", ssrcs, rtx_ssrcs)));
+ ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size())
+ << "AddRecvStream should have reconfigured, not added a new receiver.";
+ recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_FALSE(
+ recv_stream->GetConfig().rtp.rtx_associated_payload_types.empty());
+ EXPECT_TRUE(VerifyRtxReceiveAssociations(recv_stream->GetConfig()))
+ << "RTX should be mapped for all decoders/payload types.";
+ EXPECT_TRUE(HasRtxReceiveAssociation(recv_stream->GetConfig(),
+ GetEngineCodec("red").id))
+ << "RTX should be mapped also for the RED payload type";
+ EXPECT_EQ(rtx_ssrcs[0], recv_stream->GetConfig().rtp.rtx_ssrc);
+}
+
+TEST_F(WebRtcVideoChannelTest, RejectsAddingStreamsWithMissingSsrcsForRtx) {
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ const std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs1);
+ const std::vector<uint32_t> rtx_ssrcs = MAKE_VECTOR(kRtxSsrcs1);
+
+ StreamParams sp =
+ cricket::CreateSimWithRtxStreamParams("cname", ssrcs, rtx_ssrcs);
+ sp.ssrcs = ssrcs; // Without RTXs, this is the important part.
+
+ EXPECT_FALSE(send_channel_->AddSendStream(sp));
+ EXPECT_FALSE(receive_channel_->AddRecvStream(sp));
+}
+
+TEST_F(WebRtcVideoChannelTest, RejectsAddingStreamsWithOverlappingRtxSsrcs) {
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ const std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs1);
+ const std::vector<uint32_t> rtx_ssrcs = MAKE_VECTOR(kRtxSsrcs1);
+
+ StreamParams sp =
+ cricket::CreateSimWithRtxStreamParams("cname", ssrcs, rtx_ssrcs);
+
+ EXPECT_TRUE(send_channel_->AddSendStream(sp));
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+
+ // The RTX SSRC is already used in previous streams, using it should fail.
+ sp = cricket::StreamParams::CreateLegacy(rtx_ssrcs[0]);
+ EXPECT_FALSE(send_channel_->AddSendStream(sp));
+ EXPECT_FALSE(receive_channel_->AddRecvStream(sp));
+
+ // After removing the original stream this should be fine to add (makes sure
+ // that RTX ssrcs are not forever taken).
+ EXPECT_TRUE(send_channel_->RemoveSendStream(ssrcs[0]));
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(ssrcs[0]));
+ EXPECT_TRUE(send_channel_->AddSendStream(sp));
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ RejectsAddingStreamsWithOverlappingSimulcastSsrcs) {
+ static const uint32_t kFirstStreamSsrcs[] = {1, 2, 3};
+ static const uint32_t kOverlappingStreamSsrcs[] = {4, 3, 5};
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ StreamParams sp =
+ cricket::CreateSimStreamParams("cname", MAKE_VECTOR(kFirstStreamSsrcs));
+
+ EXPECT_TRUE(send_channel_->AddSendStream(sp));
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+
+ // One of the SSRCs is already used in previous streams, using it should fail.
+ sp = cricket::CreateSimStreamParams("cname",
+ MAKE_VECTOR(kOverlappingStreamSsrcs));
+ EXPECT_FALSE(send_channel_->AddSendStream(sp));
+ EXPECT_FALSE(receive_channel_->AddRecvStream(sp));
+
+ // After removing the original stream this should be fine to add (makes sure
+ // that RTX ssrcs are not forever taken).
+ EXPECT_TRUE(send_channel_->RemoveSendStream(kFirstStreamSsrcs[0]));
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kFirstStreamSsrcs[0]));
+ EXPECT_TRUE(send_channel_->AddSendStream(sp));
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+}
+
+TEST_F(WebRtcVideoChannelTest, ReportsSsrcGroupsInStats) {
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ static const uint32_t kSenderSsrcs[] = {4, 7, 10};
+ static const uint32_t kSenderRtxSsrcs[] = {5, 8, 11};
+
+ StreamParams sender_sp = cricket::CreateSimWithRtxStreamParams(
+ "cname", MAKE_VECTOR(kSenderSsrcs), MAKE_VECTOR(kSenderRtxSsrcs));
+
+ EXPECT_TRUE(send_channel_->AddSendStream(sender_sp));
+
+ static const uint32_t kReceiverSsrcs[] = {3};
+ static const uint32_t kReceiverRtxSsrcs[] = {2};
+
+ StreamParams receiver_sp = cricket::CreateSimWithRtxStreamParams(
+ "cname", MAKE_VECTOR(kReceiverSsrcs), MAKE_VECTOR(kReceiverRtxSsrcs));
+ EXPECT_TRUE(receive_channel_->AddRecvStream(receiver_sp));
+
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ ASSERT_EQ(1u, send_info.senders.size());
+ ASSERT_EQ(1u, receive_info.receivers.size());
+
+ EXPECT_NE(sender_sp.ssrc_groups, receiver_sp.ssrc_groups);
+ EXPECT_EQ(sender_sp.ssrc_groups, send_info.senders[0].ssrc_groups);
+ EXPECT_EQ(receiver_sp.ssrc_groups, receive_info.receivers[0].ssrc_groups);
+}
+
+TEST_F(WebRtcVideoChannelTest, MapsReceivedPayloadTypeToCodecName) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ webrtc::VideoReceiveStreamInterface::Stats stats;
+
+ // Report no codec name before receiving.
+ stream->SetStats(stats);
+ cricket::VideoMediaSendInfo send_info;
+ cricket::VideoMediaReceiveInfo receive_info;
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_STREQ("", receive_info.receivers[0].codec_name.c_str());
+
+ // Report VP8 if we're receiving it.
+ stats.current_payload_type = GetEngineCodec("VP8").id;
+ stream->SetStats(stats);
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_STREQ(kVp8CodecName, receive_info.receivers[0].codec_name.c_str());
+
+ // Report no codec name for unknown playload types.
+ stats.current_payload_type = 3;
+ stream->SetStats(stats);
+ EXPECT_TRUE(send_channel_->GetStats(&send_info));
+ EXPECT_TRUE(receive_channel_->GetStats(&receive_info));
+
+ EXPECT_STREQ("", receive_info.receivers[0].codec_name.c_str());
+}
+
+// Tests that when we add a stream without SSRCs, but contains a stream_id
+// that it is stored and its stream id is later used when the first packet
+// arrives to properly create a receive stream with a sync label.
+TEST_F(WebRtcVideoChannelTest, RecvUnsignaledSsrcWithSignaledStreamId) {
+ const char kSyncLabel[] = "sync_label";
+ cricket::StreamParams unsignaled_stream;
+ unsignaled_stream.set_stream_ids({kSyncLabel});
+ ASSERT_TRUE(receive_channel_->AddRecvStream(unsignaled_stream));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ // The stream shouldn't have been created at this point because it doesn't
+ // have any SSRCs.
+ EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size());
+
+ // Create and deliver packet.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kIncomingUnsignalledSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+
+ // The stream should now be created with the appropriate sync label.
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ EXPECT_EQ(kSyncLabel,
+ fake_call_->GetVideoReceiveStreams()[0]->GetConfig().sync_group);
+
+ // Reset the unsignaled stream to clear the cache. This deletes the receive
+ // stream.
+ receive_channel_->ResetUnsignaledRecvStream();
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size());
+
+ // Until the demuxer criteria has been updated, we ignore in-flight ssrcs of
+ // the recently removed unsignaled receive stream.
+ ReceivePacketAndAdvanceTime(packet);
+ EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size());
+
+ // After the demuxer criteria has been updated, we should proceed to create
+ // unsignalled receive streams. This time when a default video receive stream
+ // is created it won't have a sync_group.
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ ReceivePacketAndAdvanceTime(packet);
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ EXPECT_TRUE(
+ fake_call_->GetVideoReceiveStreams()[0]->GetConfig().sync_group.empty());
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ ResetUnsignaledRecvStreamDeletesAllDefaultStreams) {
+ // No receive streams to start with.
+ EXPECT_TRUE(fake_call_->GetVideoReceiveStreams().empty());
+
+ // Packet with unsignaled SSRC is received.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kIncomingUnsignalledSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+
+ // Default receive stream created.
+ const auto& receivers1 = fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(receivers1.size(), 1u);
+ EXPECT_EQ(receivers1[0]->GetConfig().rtp.remote_ssrc,
+ kIncomingUnsignalledSsrc);
+
+ // Stream with another SSRC gets signaled.
+ receive_channel_->ResetUnsignaledRecvStream();
+ constexpr uint32_t kIncomingSignalledSsrc = kIncomingUnsignalledSsrc + 1;
+ ASSERT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kIncomingSignalledSsrc)));
+
+ // New receiver is for the signaled stream.
+ const auto& receivers2 = fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(receivers2.size(), 1u);
+ EXPECT_EQ(receivers2[0]->GetConfig().rtp.remote_ssrc, kIncomingSignalledSsrc);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ RecentlyAddedSsrcsDoNotCreateUnsignalledRecvStreams) {
+ const uint32_t kSsrc1 = 1;
+ const uint32_t kSsrc2 = 2;
+
+ // Starting point: receiving kSsrc1.
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(StreamParams::CreateLegacy(kSsrc1)));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+
+ // If this is the only m= section the demuxer might be configure to forward
+ // all packets, regardless of ssrc, to this channel. When we go to multiple m=
+ // sections, there can thus be a window of time where packets that should
+ // never have belonged to this channel arrive anyway.
+
+ // Emulate a second m= section being created by updating the demuxer criteria
+ // without adding any streams.
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+
+ // Emulate there being in-flight packets for kSsrc1 and kSsrc2 arriving before
+ // the demuxer is updated.
+ {
+ // Receive a packet for kSsrc1.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc1);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ {
+ // Receive a packet for kSsrc2.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc2);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+
+ // No unsignaled ssrc for kSsrc2 should have been created, but kSsrc1 should
+ // arrive since it already has a stream.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 0u);
+
+ // Signal that the demuxer update is complete. Because there are no more
+ // pending demuxer updates, receiving unknown ssrcs (kSsrc2) should again
+ // result in unsignalled receive streams being created.
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Receive packets for kSsrc1 and kSsrc2 again.
+ {
+ // Receive a packet for kSsrc1.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc1);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ {
+ // Receive a packet for kSsrc2.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc2);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+
+ // An unsignalled ssrc for kSsrc2 should be created and the packet counter
+ // should increase for both ssrcs.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 2u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 2u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 1u);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ RecentlyRemovedSsrcsDoNotCreateUnsignalledRecvStreams) {
+ const uint32_t kSsrc1 = 1;
+ const uint32_t kSsrc2 = 2;
+
+ // Starting point: receiving kSsrc1 and kSsrc2.
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(StreamParams::CreateLegacy(kSsrc1)));
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(StreamParams::CreateLegacy(kSsrc2)));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 2u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 0u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 0u);
+
+ // Remove kSsrc1, signal that a demuxer criteria update is pending, but not
+ // completed yet.
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrc1));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+
+ // We only have a receiver for kSsrc2 now.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+
+ // Emulate there being in-flight packets for kSsrc1 and kSsrc2 arriving before
+ // the demuxer is updated.
+ {
+ // Receive a packet for kSsrc1.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc1);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ {
+ // Receive a packet for kSsrc2.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc2);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+
+ // No unsignaled ssrc for kSsrc1 should have been created, but the packet
+ // count for kSsrc2 should increase.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 0u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 1u);
+
+ // Signal that the demuxer update is complete. This means we should stop
+ // ignorning kSsrc1.
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Receive packets for kSsrc1 and kSsrc2 again.
+ {
+ // Receive a packet for kSsrc1.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc1);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ {
+ // Receive a packet for kSsrc2.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc2);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+
+ // An unsignalled ssrc for kSsrc1 should be created and the packet counter
+ // should increase for both ssrcs.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 2u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 2u);
+}
+
+TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) {
+ const uint32_t kSsrc = 1;
+
+ // Starting point: receiving kSsrc.
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(StreamParams::CreateLegacy(kSsrc)));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ ASSERT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+
+ // Remove kSsrc...
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrc));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u);
+ // And then add it back again, before the demuxer knows about the new
+ // criteria!
+ EXPECT_TRUE(
+ receive_channel_->AddRecvStream(StreamParams::CreateLegacy(kSsrc)));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+
+ // In-flight packets should arrive because the stream was recreated, even
+ // though demuxer criteria updates are pending...
+ {
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 1u);
+
+ // Signal that the demuxer knows about the first update: the removal.
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // This still should not prevent in-flight packets from arriving because we
+ // have a receive stream for it.
+ {
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u);
+
+ // Remove the kSsrc again while previous demuxer updates are still pending.
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrc));
+ receive_channel_->OnDemuxerCriteriaUpdatePending();
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u);
+
+ // Now the packet should be dropped and not create an unsignalled receive
+ // stream.
+ {
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u);
+
+ // Signal that the demuxer knows about the second update: adding it back.
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // The packets should continue to be dropped because removal happened after
+ // the most recently completed demuxer update.
+ {
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u);
+
+ // Signal that the demuxer knows about the last update: the second removal.
+ receive_channel_->OnDemuxerCriteriaUpdateComplete();
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // If packets still arrive after the demuxer knows about the latest removal we
+ // should finally create an unsignalled receive stream.
+ {
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+ }
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 3u);
+}
+
+TEST_F(WebRtcVideoChannelTest, UnsignalledSsrcHasACooldown) {
+ const uint32_t kSsrc1 = 1;
+ const uint32_t kSsrc2 = 2;
+
+ // Send packets for kSsrc1, creating an unsignalled receive stream.
+ {
+ // Receive a packet for kSsrc1.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc1);
+ receive_channel_->OnPacketReceived(packet);
+ }
+
+ time_controller_.AdvanceTime(
+ webrtc::TimeDelta::Millis(kUnsignalledReceiveStreamCooldownMs - 1));
+
+ // We now have an unsignalled receive stream for kSsrc1.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 0u);
+
+ {
+ // Receive a packet for kSsrc2.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc2);
+ receive_channel_->OnPacketReceived(packet);
+ }
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // Not enough time has passed to replace the unsignalled receive stream, so
+ // the kSsrc2 should be ignored.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 0u);
+
+ // After 500 ms, kSsrc2 should trigger a new unsignalled receive stream that
+ // replaces the old one.
+ time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(1));
+ {
+ // Receive a packet for kSsrc2.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kSsrc2);
+ receive_channel_->OnPacketReceived(packet);
+ }
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ // The old unsignalled receive stream was destroyed and replaced, so we still
+ // only have one unsignalled receive stream. But tha packet counter for kSsrc2
+ // has now increased.
+ EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc1), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc2), 1u);
+}
+
+// Test BaseMinimumPlayoutDelayMs on receive streams.
+TEST_F(WebRtcVideoChannelTest, BaseMinimumPlayoutDelayMs) {
+ // Test that set won't work for non-existing receive streams.
+ EXPECT_FALSE(receive_channel_->SetBaseMinimumPlayoutDelayMs(kSsrc + 2, 200));
+ // Test that get won't work for non-existing receive streams.
+ EXPECT_FALSE(receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrc + 2));
+
+ EXPECT_TRUE(AddRecvStream());
+ // Test that set works for the existing receive stream.
+ EXPECT_TRUE(receive_channel_->SetBaseMinimumPlayoutDelayMs(last_ssrc_, 200));
+ auto* recv_stream = fake_call_->GetVideoReceiveStream(last_ssrc_);
+ EXPECT_TRUE(recv_stream);
+ EXPECT_EQ(recv_stream->base_mininum_playout_delay_ms(), 200);
+ EXPECT_EQ(
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(last_ssrc_).value_or(0),
+ 200);
+}
+
+// Test BaseMinimumPlayoutDelayMs on unsignaled receive streams.
+TEST_F(WebRtcVideoChannelTest, BaseMinimumPlayoutDelayMsUnsignaledRecvStream) {
+ absl::optional<int> delay_ms;
+ const FakeVideoReceiveStream* recv_stream;
+
+ // Set default stream with SSRC 0
+ EXPECT_TRUE(receive_channel_->SetBaseMinimumPlayoutDelayMs(0, 200));
+ EXPECT_EQ(200, receive_channel_->GetBaseMinimumPlayoutDelayMs(0).value_or(0));
+
+ // Spawn an unsignaled stream by sending a packet, it should inherit
+ // default delay 200.
+ RtpPacketReceived packet;
+ packet.SetSsrc(kIncomingUnsignalledSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+
+ recv_stream = fake_call_->GetVideoReceiveStream(kIncomingUnsignalledSsrc);
+ EXPECT_EQ(recv_stream->base_mininum_playout_delay_ms(), 200);
+ delay_ms =
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kIncomingUnsignalledSsrc);
+ EXPECT_EQ(200, delay_ms.value_or(0));
+
+ // Check that now if we change delay for SSRC 0 it will change delay for the
+ // default receiving stream as well.
+ EXPECT_TRUE(receive_channel_->SetBaseMinimumPlayoutDelayMs(0, 300));
+ EXPECT_EQ(300, receive_channel_->GetBaseMinimumPlayoutDelayMs(0).value_or(0));
+ delay_ms =
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kIncomingUnsignalledSsrc);
+ EXPECT_EQ(300, delay_ms.value_or(0));
+ recv_stream = fake_call_->GetVideoReceiveStream(kIncomingUnsignalledSsrc);
+ EXPECT_EQ(recv_stream->base_mininum_playout_delay_ms(), 300);
+}
+
+void WebRtcVideoChannelTest::TestReceiveUnsignaledSsrcPacket(
+ uint8_t payload_type,
+ bool expect_created_receive_stream) {
+ // kRedRtxPayloadType must currently be unused.
+ EXPECT_FALSE(FindCodecById(engine_.recv_codecs(), kRedRtxPayloadType));
+
+ // Add a RED RTX codec.
+ VideoCodec red_rtx_codec = cricket::CreateVideoRtxCodec(
+ kRedRtxPayloadType, GetEngineCodec("red").id);
+ recv_parameters_.codecs.push_back(red_rtx_codec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+
+ ASSERT_EQ(0u, fake_call_->GetVideoReceiveStreams().size());
+ RtpPacketReceived packet;
+ packet.SetPayloadType(payload_type);
+ packet.SetSsrc(kIncomingUnsignalledSsrc);
+ ReceivePacketAndAdvanceTime(packet);
+
+ if (expect_created_receive_stream) {
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size())
+ << "Should have created a receive stream for payload type: "
+ << payload_type;
+ } else {
+ EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size())
+ << "Shouldn't have created a receive stream for payload type: "
+ << payload_type;
+ }
+}
+
+class WebRtcVideoChannelDiscardUnknownSsrcTest : public WebRtcVideoChannelTest {
+ public:
+ WebRtcVideoChannelDiscardUnknownSsrcTest()
+ : WebRtcVideoChannelTest(
+ "WebRTC-Video-DiscardPacketsWithUnknownSsrc/Enabled/") {}
+};
+
+TEST_F(WebRtcVideoChannelDiscardUnknownSsrcTest, NoUnsignalledStreamCreated) {
+ TestReceiveUnsignaledSsrcPacket(GetEngineCodec("VP8").id,
+ false /* expect_created_receive_stream */);
+}
+
+TEST_F(WebRtcVideoChannelTest, Vp8PacketCreatesUnsignalledStream) {
+ TestReceiveUnsignaledSsrcPacket(GetEngineCodec("VP8").id,
+ true /* expect_created_receive_stream */);
+}
+
+TEST_F(WebRtcVideoChannelTest, Vp9PacketCreatesUnsignalledStream) {
+ TestReceiveUnsignaledSsrcPacket(GetEngineCodec("VP9").id,
+ true /* expect_created_receive_stream */);
+}
+
+TEST_F(WebRtcVideoChannelTest, RtxPacketCreateUnsignalledStream) {
+ AssignDefaultAptRtxTypes();
+ const cricket::VideoCodec vp8 = GetEngineCodec("VP8");
+ const int rtx_vp8_payload_type = default_apt_rtx_types_[vp8.id];
+ TestReceiveUnsignaledSsrcPacket(rtx_vp8_payload_type,
+ true /* expect_created_receive_stream */);
+}
+
+TEST_F(WebRtcVideoChannelTest, UlpfecPacketDoesntCreateUnsignalledStream) {
+ TestReceiveUnsignaledSsrcPacket(GetEngineCodec("ulpfec").id,
+ false /* expect_created_receive_stream */);
+}
+
+TEST_F(WebRtcVideoChannelFlexfecRecvTest,
+ FlexfecPacketDoesntCreateUnsignalledStream) {
+ TestReceiveUnsignaledSsrcPacket(GetEngineCodec("flexfec-03").id,
+ false /* expect_created_receive_stream */);
+}
+
+TEST_F(WebRtcVideoChannelTest, RedRtxPacketDoesntCreateUnsignalledStream) {
+ TestReceiveUnsignaledSsrcPacket(kRedRtxPayloadType,
+ false /* expect_created_receive_stream */);
+}
+
+TEST_F(WebRtcVideoChannelTest, RtxAfterMediaPacketUpdatesUnsignalledRtxSsrc) {
+ AssignDefaultAptRtxTypes();
+ const cricket::VideoCodec vp8 = GetEngineCodec("VP8");
+ const int payload_type = vp8.id;
+ const int rtx_vp8_payload_type = default_apt_rtx_types_[vp8.id];
+ const uint32_t ssrc = kIncomingUnsignalledSsrc;
+ const uint32_t rtx_ssrc = ssrc + 1;
+
+ // Send media packet.
+ RtpPacketReceived packet;
+ packet.SetPayloadType(payload_type);
+ packet.SetSsrc(ssrc);
+ ReceivePacketAndAdvanceTime(packet);
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size())
+ << "Should have created a receive stream for payload type: "
+ << payload_type;
+
+ // Send rtx packet.
+ RtpPacketReceived rtx_packet;
+ rtx_packet.SetPayloadType(rtx_vp8_payload_type);
+ rtx_packet.SetSsrc(rtx_ssrc);
+ ReceivePacketAndAdvanceTime(rtx_packet);
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size())
+ << "RTX packet should not have added or removed a receive stream";
+
+ auto recv_stream = fake_call_->GetVideoReceiveStreams().front();
+ auto& config = recv_stream->GetConfig();
+ EXPECT_EQ(config.rtp.remote_ssrc, ssrc)
+ << "Receive stream should have correct media ssrc";
+ EXPECT_EQ(config.rtp.rtx_ssrc, rtx_ssrc)
+ << "Receive stream should have correct rtx ssrc";
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(ssrc), 1u);
+ EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(rtx_ssrc), 1u);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ MediaPacketAfterRtxImmediatelyRecreatesUnsignalledStream) {
+ AssignDefaultAptRtxTypes();
+ const cricket::VideoCodec vp8 = GetEngineCodec("VP8");
+ const int payload_type = vp8.id;
+ const int rtx_vp8_payload_type = default_apt_rtx_types_[vp8.id];
+ const uint32_t ssrc = kIncomingUnsignalledSsrc;
+ const uint32_t rtx_ssrc = ssrc + 1;
+
+ // Send rtx packet.
+ RtpPacketReceived rtx_packet;
+ rtx_packet.SetPayloadType(rtx_vp8_payload_type);
+ rtx_packet.SetSsrc(rtx_ssrc);
+ receive_channel_->OnPacketReceived(rtx_packet);
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+
+ // Send media packet.
+ RtpPacketReceived packet;
+ packet.SetPayloadType(payload_type);
+ packet.SetSsrc(ssrc);
+ ReceivePacketAndAdvanceTime(packet);
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+
+ // Check receive stream has been recreated with correct ssrcs.
+ auto recv_stream = fake_call_->GetVideoReceiveStreams().front();
+ auto& config = recv_stream->GetConfig();
+ EXPECT_EQ(config.rtp.remote_ssrc, ssrc)
+ << "Receive stream should have correct media ssrc";
+}
+
+// Test that receiving any unsignalled SSRC works even if it changes.
+// The first unsignalled SSRC received will create a default receive stream.
+// Any different unsignalled SSRC received will replace the default.
+TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) {
+ // Allow receiving VP8, VP9, H264 (if enabled).
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+
+#if defined(WEBRTC_USE_H264)
+ cricket::VideoCodec H264codec = cricket::CreateVideoCodec(126, "H264");
+ parameters.codecs.push_back(H264codec);
+#endif
+
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ // No receive streams yet.
+ ASSERT_EQ(0u, fake_call_->GetVideoReceiveStreams().size());
+ cricket::FakeVideoRenderer renderer;
+ receive_channel_->SetDefaultSink(&renderer);
+
+ // Receive VP8 packet on first SSRC.
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(GetEngineCodec("VP8").id);
+ rtp_packet.SetSsrc(kIncomingUnsignalledSsrc + 1);
+ ReceivePacketAndAdvanceTime(rtp_packet);
+ // VP8 packet should create default receive stream.
+ ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ FakeVideoReceiveStream* recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(rtp_packet.Ssrc(), recv_stream->GetConfig().rtp.remote_ssrc);
+ // Verify that the receive stream sinks to a renderer.
+ webrtc::VideoFrame video_frame =
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
+ .set_timestamp_rtp(100)
+ .set_timestamp_us(0)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .build();
+ recv_stream->InjectFrame(video_frame);
+ EXPECT_EQ(1, renderer.num_rendered_frames());
+
+ // Receive VP9 packet on second SSRC.
+ rtp_packet.SetPayloadType(GetEngineCodec("VP9").id);
+ rtp_packet.SetSsrc(kIncomingUnsignalledSsrc + 2);
+ ReceivePacketAndAdvanceTime(rtp_packet);
+ // VP9 packet should replace the default receive SSRC.
+ ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(rtp_packet.Ssrc(), recv_stream->GetConfig().rtp.remote_ssrc);
+ // Verify that the receive stream sinks to a renderer.
+ webrtc::VideoFrame video_frame2 =
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
+ .set_timestamp_rtp(200)
+ .set_timestamp_us(0)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .build();
+ recv_stream->InjectFrame(video_frame2);
+ EXPECT_EQ(2, renderer.num_rendered_frames());
+
+#if defined(WEBRTC_USE_H264)
+ // Receive H264 packet on third SSRC.
+ rtp_packet.SetPayloadType(126);
+ rtp_packet.SetSsrc(kIncomingUnsignalledSsrc + 3);
+ ReceivePacketAndAdvanceTime(rtp_packet);
+ // H264 packet should replace the default receive SSRC.
+ ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ recv_stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(rtp_packet.Ssrc(), recv_stream->GetConfig().rtp.remote_ssrc);
+ // Verify that the receive stream sinks to a renderer.
+ webrtc::VideoFrame video_frame3 =
+ webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(CreateBlackFrameBuffer(4, 4))
+ .set_timestamp_rtp(300)
+ .set_timestamp_us(0)
+ .set_rotation(webrtc::kVideoRotation_0)
+ .build();
+ recv_stream->InjectFrame(video_frame3);
+ EXPECT_EQ(3, renderer.num_rendered_frames());
+#endif
+}
+
+// This test verifies that when a new default stream is created for a new
+// unsignaled SSRC, the new stream does not overwrite any old stream that had
+// been the default receive stream before being properly signaled.
+TEST_F(WebRtcVideoChannelTest,
+ NewUnsignaledStreamDoesNotDestroyPreviouslyUnsignaledStream) {
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ // No streams signaled and no packets received, so we should not have any
+ // stream objects created yet.
+ EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size());
+
+ // Receive packet on an unsignaled SSRC.
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(GetEngineCodec("VP8").id);
+ rtp_packet.SetSsrc(kSsrcs3[0]);
+ ReceivePacketAndAdvanceTime(rtp_packet);
+ // Default receive stream should be created.
+ ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ FakeVideoReceiveStream* recv_stream0 =
+ fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(kSsrcs3[0], recv_stream0->GetConfig().rtp.remote_ssrc);
+
+ // Signal the SSRC.
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrcs3[0])));
+ ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+ recv_stream0 = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(kSsrcs3[0], recv_stream0->GetConfig().rtp.remote_ssrc);
+
+ // Receive packet on a different unsignaled SSRC.
+ rtp_packet.SetSsrc(kSsrcs3[1]);
+ ReceivePacketAndAdvanceTime(rtp_packet);
+ // New default receive stream should be created, but old stream should remain.
+ ASSERT_EQ(2u, fake_call_->GetVideoReceiveStreams().size());
+ EXPECT_EQ(recv_stream0, fake_call_->GetVideoReceiveStreams()[0]);
+ FakeVideoReceiveStream* recv_stream1 =
+ fake_call_->GetVideoReceiveStreams()[1];
+ EXPECT_EQ(kSsrcs3[1], recv_stream1->GetConfig().rtp.remote_ssrc);
+}
+
+TEST_F(WebRtcVideoChannelTest, CanSetMaxBitrateForExistingStream) {
+ AddSendStream();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ int default_encoder_bitrate = GetMaxEncoderBitrate();
+ EXPECT_GT(default_encoder_bitrate, 1000);
+
+ // TODO(skvlad): Resolve the inconsistency between the interpretation
+ // of the global bitrate limit for audio and video:
+ // - Audio: max_bandwidth_bps = 0 - fail the operation,
+ // max_bandwidth_bps = -1 - remove the bandwidth limit
+ // - Video: max_bandwidth_bps = 0 - remove the bandwidth limit,
+ // max_bandwidth_bps = -1 - remove the bandwidth limit
+
+ SetAndExpectMaxBitrate(1000, 0, 1000);
+ SetAndExpectMaxBitrate(1000, 800, 800);
+ SetAndExpectMaxBitrate(600, 800, 600);
+ SetAndExpectMaxBitrate(0, 800, 800);
+ SetAndExpectMaxBitrate(0, 0, default_encoder_bitrate);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, CannotSetMaxBitrateForNonexistentStream) {
+ webrtc::RtpParameters nonexistent_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(0u, nonexistent_parameters.encodings.size());
+
+ nonexistent_parameters.encodings.push_back(webrtc::RtpEncodingParameters());
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, nonexistent_parameters)
+ .ok());
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetLowMaxBitrateOverwritesVideoStreamMinBitrate) {
+ FakeVideoSendStream* stream = AddSendStream();
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ EXPECT_FALSE(parameters.encodings[0].max_bitrate_bps.has_value());
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Note that this is testing the behavior of the FakeVideoSendStream, which
+ // also calls to CreateEncoderStreams to get the VideoStreams, so essentially
+ // we are just testing the behavior of
+ // EncoderStreamFactory::CreateEncoderStreams.
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(webrtc::kDefaultMinVideoBitrateBps,
+ stream->GetVideoStreams()[0].min_bitrate_bps);
+
+ // Set a low max bitrate & check that VideoStream.min_bitrate_bps is limited
+ // by this amount.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ int low_max_bitrate_bps = webrtc::kDefaultMinVideoBitrateBps - 1000;
+ parameters.encodings[0].max_bitrate_bps = low_max_bitrate_bps;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(low_max_bitrate_bps, stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(low_max_bitrate_bps, stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetHighMinBitrateOverwritesVideoStreamMaxBitrate) {
+ FakeVideoSendStream* stream = AddSendStream();
+
+ // Note that this is testing the behavior of the FakeVideoSendStream, which
+ // also calls to CreateEncoderStreams to get the VideoStreams, so essentially
+ // we are just testing the behavior of
+ // EncoderStreamFactory::CreateEncoderStreams.
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ int high_min_bitrate_bps = stream->GetVideoStreams()[0].max_bitrate_bps + 1;
+
+ // Set a high min bitrate and check that max_bitrate_bps is adjusted up.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ parameters.encodings[0].min_bitrate_bps = high_min_bitrate_bps;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(high_min_bitrate_bps, stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(high_min_bitrate_bps, stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetMinBitrateAboveMaxBitrateLimitAdjustsMinBitrateDown) {
+ send_parameters_.max_bandwidth_bps = 99999;
+ FakeVideoSendStream* stream = AddSendStream();
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(webrtc::kDefaultMinVideoBitrateBps,
+ stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+
+ // Set min bitrate above global max bitrate and check that min_bitrate_bps is
+ // adjusted down.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ parameters.encodings[0].min_bitrate_bps = 99999 + 1;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetMaxFramerateOneStream) {
+ FakeVideoSendStream* stream = AddSendStream();
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ EXPECT_FALSE(parameters.encodings[0].max_framerate.has_value());
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Note that this is testing the behavior of the FakeVideoSendStream, which
+ // also calls to CreateEncoderStreams to get the VideoStreams, so essentially
+ // we are just testing the behavior of
+ // EncoderStreamFactory::CreateEncoderStreams.
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(kDefaultVideoMaxFramerate,
+ stream->GetVideoStreams()[0].max_framerate);
+
+ // Set max framerate and check that VideoStream.max_framerate is set.
+ const int kNewMaxFramerate = kDefaultVideoMaxFramerate - 1;
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ parameters.encodings[0].max_framerate = kNewMaxFramerate;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(kNewMaxFramerate, stream->GetVideoStreams()[0].max_framerate);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetNumTemporalLayersForSingleStream) {
+ FakeVideoSendStream* stream = AddSendStream();
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ EXPECT_FALSE(parameters.encodings[0].num_temporal_layers.has_value());
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Note that this is testing the behavior of the FakeVideoSendStream, which
+ // also calls to CreateEncoderStreams to get the VideoStreams, so essentially
+ // we are just testing the behavior of
+ // EncoderStreamFactory::CreateEncoderStreams.
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_FALSE(stream->GetVideoStreams()[0].num_temporal_layers.has_value());
+
+ // Set temporal layers and check that VideoStream.num_temporal_layers is set.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ parameters.encodings[0].num_temporal_layers = 2;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ ASSERT_EQ(1UL, stream->GetVideoStreams().size());
+ EXPECT_EQ(2UL, stream->GetVideoStreams()[0].num_temporal_layers);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ CannotSetRtpSendParametersWithIncorrectNumberOfEncodings) {
+ AddSendStream();
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ // Two or more encodings should result in failure.
+ parameters.encodings.push_back(webrtc::RtpEncodingParameters());
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ // Zero encodings should also fail.
+ parameters.encodings.clear();
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ CannotSetSimulcastRtpSendParametersWithIncorrectNumberOfEncodings) {
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+ StreamParams sp = CreateSimStreamParams("cname", ssrcs);
+ AddSendStream(sp);
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+
+ // Additional encodings should result in failure.
+ parameters.encodings.push_back(webrtc::RtpEncodingParameters());
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ // Zero encodings should also fail.
+ parameters.encodings.clear();
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+}
+
+// Changing the SSRC through RtpParameters is not allowed.
+TEST_F(WebRtcVideoChannelTest, CannotSetSsrcInRtpSendParameters) {
+ AddSendStream();
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ parameters.encodings[0].ssrc = 0xdeadbeef;
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+}
+
+// Tests that when RTCRtpEncodingParameters.bitrate_priority gets set to
+// a value <= 0, setting the parameters returns false.
+TEST_F(WebRtcVideoChannelTest, SetRtpSendParametersInvalidBitratePriority) {
+ AddSendStream();
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ EXPECT_EQ(webrtc::kDefaultBitratePriority,
+ parameters.encodings[0].bitrate_priority);
+
+ parameters.encodings[0].bitrate_priority = 0;
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ parameters.encodings[0].bitrate_priority = -2;
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+}
+
+// Tests when the the RTCRtpEncodingParameters.bitrate_priority gets set
+// properly on the VideoChannel and propogates down to the video encoder.
+TEST_F(WebRtcVideoChannelTest, SetRtpSendParametersPriorityOneStream) {
+ AddSendStream();
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ EXPECT_EQ(webrtc::kDefaultBitratePriority,
+ parameters.encodings[0].bitrate_priority);
+
+ // Change the value and set it on the VideoChannel.
+ double new_bitrate_priority = 2.0;
+ parameters.encodings[0].bitrate_priority = new_bitrate_priority;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the encoding parameters bitrate_priority is set for the
+ // VideoChannel.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+ EXPECT_EQ(new_bitrate_priority, parameters.encodings[0].bitrate_priority);
+
+ // Verify that the new value propagated down to the encoder.
+ std::vector<FakeVideoSendStream*> video_send_streams =
+ fake_call_->GetVideoSendStreams();
+ EXPECT_EQ(1UL, video_send_streams.size());
+ FakeVideoSendStream* video_send_stream = video_send_streams.front();
+ // Check that the WebRtcVideoSendStream updated the VideoEncoderConfig
+ // appropriately.
+ EXPECT_EQ(new_bitrate_priority,
+ video_send_stream->GetEncoderConfig().bitrate_priority);
+ // Check that the vector of VideoStreams also was propagated correctly. Note
+ // that this is testing the behavior of the FakeVideoSendStream, which mimics
+ // the calls to CreateEncoderStreams to get the VideoStreams.
+ EXPECT_EQ(absl::optional<double>(new_bitrate_priority),
+ video_send_stream->GetVideoStreams()[0].bitrate_priority);
+}
+
+// Tests that the RTCRtpEncodingParameters.bitrate_priority is set for the
+// VideoChannel and the value propogates to the video encoder with all simulcast
+// streams.
+TEST_F(WebRtcVideoChannelTest, SetRtpSendParametersPrioritySimulcastStreams) {
+ // Create the stream params with multiple ssrcs for simulcast.
+ const size_t kNumSimulcastStreams = 3;
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+ StreamParams stream_params = CreateSimStreamParams("cname", ssrcs);
+ AddSendStream(stream_params);
+ uint32_t primary_ssrc = stream_params.first_ssrc();
+
+ // Using the FrameForwarder, we manually send a full size
+ // frame. This creates multiple VideoStreams for all simulcast layers when
+ // reconfiguring, and allows us to test this behavior.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(primary_ssrc, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame(
+ 1920, 1080, webrtc::VideoRotation::kVideoRotation_0,
+ rtc::kNumMicrosecsPerSec / 30));
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(primary_ssrc);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_EQ(webrtc::kDefaultBitratePriority,
+ parameters.encodings[0].bitrate_priority);
+ // Change the value and set it on the VideoChannel.
+ double new_bitrate_priority = 2.0;
+ parameters.encodings[0].bitrate_priority = new_bitrate_priority;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(primary_ssrc, parameters).ok());
+
+ // Verify that the encoding parameters priority is set on the VideoChannel.
+ parameters = send_channel_->GetRtpSendParameters(primary_ssrc);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_EQ(new_bitrate_priority, parameters.encodings[0].bitrate_priority);
+
+ // Verify that the new value propagated down to the encoder.
+ std::vector<FakeVideoSendStream*> video_send_streams =
+ fake_call_->GetVideoSendStreams();
+ EXPECT_EQ(1UL, video_send_streams.size());
+ FakeVideoSendStream* video_send_stream = video_send_streams.front();
+ // Check that the WebRtcVideoSendStream updated the VideoEncoderConfig
+ // appropriately.
+ EXPECT_EQ(kNumSimulcastStreams,
+ video_send_stream->GetEncoderConfig().number_of_streams);
+ EXPECT_EQ(new_bitrate_priority,
+ video_send_stream->GetEncoderConfig().bitrate_priority);
+ // Check that the vector of VideoStreams also propagated correctly. The
+ // FakeVideoSendStream calls CreateEncoderStreams, and we are testing that
+ // these are created appropriately for the simulcast case.
+ EXPECT_EQ(kNumSimulcastStreams, video_send_stream->GetVideoStreams().size());
+ EXPECT_EQ(absl::optional<double>(new_bitrate_priority),
+ video_send_stream->GetVideoStreams()[0].bitrate_priority);
+ // Since we are only setting bitrate priority per-sender, the other
+ // VideoStreams should have a bitrate priority of 0.
+ EXPECT_EQ(absl::nullopt,
+ video_send_stream->GetVideoStreams()[1].bitrate_priority);
+ EXPECT_EQ(absl::nullopt,
+ video_send_stream->GetVideoStreams()[2].bitrate_priority);
+ EXPECT_TRUE(send_channel_->SetVideoSend(primary_ssrc, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetAndSetRtpSendParametersScaleResolutionDownByVP8) {
+ VideoSenderParameters parameters;
+ parameters.codecs.push_back(cricket::CreateVideoCodec(kVp8CodecName));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ FakeFrameSource frame_source(1280, 720, rtc::kNumMicrosecsPerSec / 30);
+
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ // Try layers in natural order (smallest to largest).
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 4.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 1.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(320u, video_streams[0].width);
+ EXPECT_EQ(180u, video_streams[0].height);
+ EXPECT_EQ(640u, video_streams[1].width);
+ EXPECT_EQ(360u, video_streams[1].height);
+ EXPECT_EQ(1280u, video_streams[2].width);
+ EXPECT_EQ(720u, video_streams[2].height);
+ }
+
+ // Try layers in reverse natural order (largest to smallest).
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 1.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(1280u, video_streams[0].width);
+ EXPECT_EQ(720u, video_streams[0].height);
+ EXPECT_EQ(640u, video_streams[1].width);
+ EXPECT_EQ(360u, video_streams[1].height);
+ EXPECT_EQ(320u, video_streams[2].width);
+ EXPECT_EQ(180u, video_streams[2].height);
+ }
+
+ // Try layers in mixed order.
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 10.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(128u, video_streams[0].width);
+ EXPECT_EQ(72u, video_streams[0].height);
+ EXPECT_EQ(640u, video_streams[1].width);
+ EXPECT_EQ(360u, video_streams[1].height);
+ EXPECT_EQ(320u, video_streams[2].width);
+ EXPECT_EQ(180u, video_streams[2].height);
+ }
+
+ // Try with a missing scale setting, defaults to 1.0 if any other is set.
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 1.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by.reset();
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(1280u, video_streams[0].width);
+ EXPECT_EQ(720u, video_streams[0].height);
+ EXPECT_EQ(1280u, video_streams[1].width);
+ EXPECT_EQ(720u, video_streams[1].height);
+ EXPECT_EQ(320u, video_streams[2].width);
+ EXPECT_EQ(180u, video_streams[2].height);
+ }
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetAndSetRtpSendParametersScaleResolutionDownByVP8WithOddResolution) {
+ // Ensure that the top layer has width and height divisible by 2^3,
+ // so that the bottom layer has width and height divisible by 2.
+ // TODO(bugs.webrtc.org/8785): Remove this field trial when we fully trust
+ // the number of simulcast layers set by the app.
+ webrtc::test::ScopedKeyValueConfig field_trial(
+ field_trials_, "WebRTC-NormalizeSimulcastResolution/Enabled-3/");
+
+ // Set up WebRtcVideoChannel for 3-layer VP8 simulcast.
+ VideoSenderParameters parameters;
+ parameters.codecs.push_back(cricket::CreateVideoCodec(kVp8CodecName));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, /*options=*/nullptr,
+ &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ // Set `scale_resolution_down_by`'s.
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(rtp_parameters.encodings.size(), 3u);
+ rtp_parameters.encodings[0].scale_resolution_down_by = 1.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ const auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ // Use a capture resolution whose width and height are not divisible by 2^3.
+ // (See field trial set at the top of the test.)
+ FakeFrameSource frame_source(2007, 1207, rtc::kNumMicrosecsPerSec / 30);
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ // Ensure the scaling is correct.
+ const auto video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(video_streams.size(), 3u);
+ // Ensure that we round the capture resolution down for the top layer...
+ EXPECT_EQ(video_streams[0].width, 2000u);
+ EXPECT_EQ(video_streams[0].height, 1200u);
+ EXPECT_EQ(video_streams[1].width, 1000u);
+ EXPECT_EQ(video_streams[1].height, 600u);
+ // ...and that the bottom layer has a width/height divisible by 2.
+ EXPECT_EQ(video_streams[2].width, 500u);
+ EXPECT_EQ(video_streams[2].height, 300u);
+
+ // Tear down.
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetAndSetRtpSendParametersScaleResolutionDownByH264) {
+ encoder_factory_->AddSupportedVideoCodecType(kH264CodecName);
+ VideoSenderParameters parameters;
+ parameters.codecs.push_back(cricket::CreateVideoCodec(kH264CodecName));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ FakeFrameSource frame_source(1280, 720, rtc::kNumMicrosecsPerSec / 30);
+
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ // Try layers in natural order (smallest to largest).
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 4.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 1.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(320u, video_streams[0].width);
+ EXPECT_EQ(180u, video_streams[0].height);
+ EXPECT_EQ(640u, video_streams[1].width);
+ EXPECT_EQ(360u, video_streams[1].height);
+ EXPECT_EQ(1280u, video_streams[2].width);
+ EXPECT_EQ(720u, video_streams[2].height);
+ }
+
+ // Try layers in reverse natural order (largest to smallest).
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 1.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(1280u, video_streams[0].width);
+ EXPECT_EQ(720u, video_streams[0].height);
+ EXPECT_EQ(640u, video_streams[1].width);
+ EXPECT_EQ(360u, video_streams[1].height);
+ EXPECT_EQ(320u, video_streams[2].width);
+ EXPECT_EQ(180u, video_streams[2].height);
+ }
+
+ // Try layers in mixed order.
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 10.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(128u, video_streams[0].width);
+ EXPECT_EQ(72u, video_streams[0].height);
+ EXPECT_EQ(640u, video_streams[1].width);
+ EXPECT_EQ(360u, video_streams[1].height);
+ EXPECT_EQ(320u, video_streams[2].width);
+ EXPECT_EQ(180u, video_streams[2].height);
+ }
+
+ // Try with a missing scale setting, defaults to 1.0 if any other is set.
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].scale_resolution_down_by = 1.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by.reset();
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(3u, video_streams.size());
+ EXPECT_EQ(1280u, video_streams[0].width);
+ EXPECT_EQ(720u, video_streams[0].height);
+ EXPECT_EQ(1280u, video_streams[1].width);
+ EXPECT_EQ(720u, video_streams[1].height);
+ EXPECT_EQ(320u, video_streams[2].width);
+ EXPECT_EQ(180u, video_streams[2].height);
+ }
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ GetAndSetRtpSendParametersScaleResolutionDownByH264WithOddResolution) {
+ // Ensure that the top layer has width and height divisible by 2^3,
+ // so that the bottom layer has width and height divisible by 2.
+ // TODO(bugs.webrtc.org/8785): Remove this field trial when we fully trust
+ // the number of simulcast layers set by the app.
+ webrtc::test::ScopedKeyValueConfig field_trial(
+ field_trials_, "WebRTC-NormalizeSimulcastResolution/Enabled-3/");
+
+ // Set up WebRtcVideoChannel for 3-layer H264 simulcast.
+ encoder_factory_->AddSupportedVideoCodecType(kH264CodecName);
+ VideoSenderParameters parameters;
+ parameters.codecs.push_back(cricket::CreateVideoCodec(kH264CodecName));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, /*options=*/nullptr,
+ &frame_forwarder));
+ send_channel_->SetSend(true);
+
+ // Set `scale_resolution_down_by`'s.
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(rtp_parameters.encodings.size(), 3u);
+ rtp_parameters.encodings[0].scale_resolution_down_by = 1.0;
+ rtp_parameters.encodings[1].scale_resolution_down_by = 2.0;
+ rtp_parameters.encodings[2].scale_resolution_down_by = 4.0;
+ const auto result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ ASSERT_TRUE(result.ok());
+
+ // Use a capture resolution whose width and height are not divisible by 2^3.
+ // (See field trial set at the top of the test.)
+ FakeFrameSource frame_source(2007, 1207, rtc::kNumMicrosecsPerSec / 30);
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ // Ensure the scaling is correct.
+ const auto video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(video_streams.size(), 3u);
+ // Ensure that we round the capture resolution down for the top layer...
+ EXPECT_EQ(video_streams[0].width, 2000u);
+ EXPECT_EQ(video_streams[0].height, 1200u);
+ EXPECT_EQ(video_streams[1].width, 1000u);
+ EXPECT_EQ(video_streams[1].height, 600u);
+ // ...and that the bottom layer has a width/height divisible by 2.
+ EXPECT_EQ(video_streams[2].width, 500u);
+ EXPECT_EQ(video_streams[2].height, 300u);
+
+ // Tear down.
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, GetAndSetRtpSendParametersMaxFramerate) {
+ SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ for (const auto& encoding : parameters.encodings) {
+ EXPECT_FALSE(encoding.max_framerate);
+ }
+
+ // Change the value and set it on the VideoChannel.
+ parameters.encodings[0].max_framerate = 10;
+ parameters.encodings[1].max_framerate = 20;
+ parameters.encodings[2].max_framerate = 25;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the bitrates are set on the VideoChannel.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_EQ(10, parameters.encodings[0].max_framerate);
+ EXPECT_EQ(20, parameters.encodings[1].max_framerate);
+ EXPECT_EQ(25, parameters.encodings[2].max_framerate);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SetRtpSendParametersNumTemporalLayersFailsForInvalidRange) {
+ SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+
+ // Num temporal layers should be in the range [1, kMaxTemporalStreams].
+ parameters.encodings[0].num_temporal_layers = 0;
+ EXPECT_EQ(webrtc::RTCErrorType::INVALID_RANGE,
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).type());
+ parameters.encodings[0].num_temporal_layers = webrtc::kMaxTemporalStreams + 1;
+ EXPECT_EQ(webrtc::RTCErrorType::INVALID_RANGE,
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).type());
+}
+
+TEST_F(WebRtcVideoChannelTest, GetAndSetRtpSendParametersNumTemporalLayers) {
+ SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ for (const auto& encoding : parameters.encodings)
+ EXPECT_FALSE(encoding.num_temporal_layers);
+
+ // Change the value and set it on the VideoChannel.
+ parameters.encodings[0].num_temporal_layers = 3;
+ parameters.encodings[1].num_temporal_layers = 3;
+ parameters.encodings[2].num_temporal_layers = 3;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the number of temporal layers are set on the VideoChannel.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_EQ(3, parameters.encodings[0].num_temporal_layers);
+ EXPECT_EQ(3, parameters.encodings[1].num_temporal_layers);
+ EXPECT_EQ(3, parameters.encodings[2].num_temporal_layers);
+}
+
+TEST_F(WebRtcVideoChannelTest, NumTemporalLayersPropagatedToEncoder) {
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Get and set the rtp encoding parameters.
+ // Change the value and set it on the VideoChannel.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[0].num_temporal_layers = 3;
+ parameters.encodings[1].num_temporal_layers = 2;
+ parameters.encodings[2].num_temporal_layers = 1;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the new value is propagated down to the encoder.
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ EXPECT_EQ(2, stream->num_encoder_reconfigurations());
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size());
+ EXPECT_EQ(3UL, encoder_config.simulcast_layers[0].num_temporal_layers);
+ EXPECT_EQ(2UL, encoder_config.simulcast_layers[1].num_temporal_layers);
+ EXPECT_EQ(1UL, encoder_config.simulcast_layers[2].num_temporal_layers);
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ EXPECT_EQ(3UL, stream->GetVideoStreams()[0].num_temporal_layers);
+ EXPECT_EQ(2UL, stream->GetVideoStreams()[1].num_temporal_layers);
+ EXPECT_EQ(1UL, stream->GetVideoStreams()[2].num_temporal_layers);
+
+ // No parameter changed, encoder should not be reconfigured.
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_EQ(2, stream->num_encoder_reconfigurations());
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ DefaultValuePropagatedToEncoderForUnsetNumTemporalLayers) {
+ const size_t kDefaultNumTemporalLayers = 3;
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Change rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[0].num_temporal_layers = 2;
+ parameters.encodings[2].num_temporal_layers = 1;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that no value is propagated down to the encoder.
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size());
+ EXPECT_EQ(2UL, encoder_config.simulcast_layers[0].num_temporal_layers);
+ EXPECT_FALSE(encoder_config.simulcast_layers[1].num_temporal_layers);
+ EXPECT_EQ(1UL, encoder_config.simulcast_layers[2].num_temporal_layers);
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ EXPECT_EQ(2UL, stream->GetVideoStreams()[0].num_temporal_layers);
+ EXPECT_EQ(kDefaultNumTemporalLayers,
+ stream->GetVideoStreams()[1].num_temporal_layers);
+ EXPECT_EQ(1UL, stream->GetVideoStreams()[2].num_temporal_layers);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ DefaultValuePropagatedToEncoderForUnsetFramerate) {
+ const std::vector<webrtc::VideoStream> kDefault = GetSimulcastBitrates720p();
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Get and set the rtp encoding parameters.
+ // Change the value and set it on the VideoChannel.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[0].max_framerate = 15;
+ parameters.encodings[2].max_framerate = 20;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the new value propagated down to the encoder.
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size());
+ EXPECT_EQ(15, encoder_config.simulcast_layers[0].max_framerate);
+ EXPECT_EQ(-1, encoder_config.simulcast_layers[1].max_framerate);
+ EXPECT_EQ(20, encoder_config.simulcast_layers[2].max_framerate);
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ // The maximum `max_framerate` is used, kDefaultVideoMaxFramerate: 60.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ EXPECT_EQ(15, stream->GetVideoStreams()[0].max_framerate);
+ EXPECT_EQ(kDefaultVideoMaxFramerate,
+ stream->GetVideoStreams()[1].max_framerate);
+ EXPECT_EQ(20, stream->GetVideoStreams()[2].max_framerate);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, FallbackForUnsetOrUnsupportedScalabilityMode) {
+ const absl::InlinedVector<ScalabilityMode, webrtc::kScalabilityModeCount>
+ kSupportedModes = {ScalabilityMode::kL1T1, ScalabilityMode::kL1T2,
+ ScalabilityMode::kL1T3};
+
+ encoder_factory_->AddSupportedVideoCodec(webrtc::SdpVideoFormat(
+ "VP8", webrtc::SdpVideoFormat::Parameters(), kSupportedModes));
+
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Set scalability mode.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[0].scalability_mode = absl::nullopt;
+ parameters.encodings[1].scalability_mode = "L1T3"; // Supported.
+ parameters.encodings[2].scalability_mode = "L3T3"; // Unsupported.
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the new value is propagated down to the encoder.
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ const absl::optional<ScalabilityMode> kDefaultScalabilityMode =
+ webrtc::ScalabilityModeFromString(kDefaultScalabilityModeStr);
+ EXPECT_EQ(2, stream->num_encoder_reconfigurations());
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
+ EXPECT_THAT(encoder_config.simulcast_layers,
+ ElementsAre(Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode),
+ Field(&webrtc::VideoStream::scalability_mode,
+ ScalabilityMode::kL1T3),
+ Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode)));
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ EXPECT_THAT(stream->GetVideoStreams(),
+ ElementsAre(Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode),
+ Field(&webrtc::VideoStream::scalability_mode,
+ ScalabilityMode::kL1T3),
+ Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode)));
+
+ // GetParameters.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_THAT(
+ parameters.encodings,
+ ElementsAre(
+ Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ kDefaultScalabilityModeStr),
+ Field(&webrtc::RtpEncodingParameters::scalability_mode, "L1T3"),
+ Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ kDefaultScalabilityModeStr)));
+
+ // No parameters changed, encoder should not be reconfigured.
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_EQ(2, stream->num_encoder_reconfigurations());
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ DefaultValueUsedIfScalabilityModeIsUnsupportedByCodec) {
+ const absl::InlinedVector<ScalabilityMode, webrtc::kScalabilityModeCount>
+ kVp9SupportedModes = {ScalabilityMode::kL3T3};
+
+ encoder_factory_->AddSupportedVideoCodec(webrtc::SdpVideoFormat(
+ "VP8", webrtc::SdpVideoFormat::Parameters(), {ScalabilityMode::kL1T1}));
+ encoder_factory_->AddSupportedVideoCodec(webrtc::SdpVideoFormat(
+ "VP9", webrtc::SdpVideoFormat::Parameters(), {ScalabilityMode::kL3T3}));
+
+ cricket::VideoSenderParameters send_parameters;
+ send_parameters.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters));
+
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Set scalability mode.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[0].scalability_mode = "L3T3";
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the new value is propagated down to the encoder.
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ const absl::optional<ScalabilityMode> kDefaultScalabilityMode =
+ webrtc::ScalabilityModeFromString(kDefaultScalabilityModeStr);
+ EXPECT_EQ(2, stream->num_encoder_reconfigurations());
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(1u, encoder_config.number_of_streams);
+ EXPECT_THAT(encoder_config.simulcast_layers,
+ ElementsAre(Field(&webrtc::VideoStream::scalability_mode,
+ ScalabilityMode::kL3T3),
+ Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode),
+ Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode)));
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ EXPECT_THAT(stream->GetVideoStreams(),
+ ElementsAre(Field(&webrtc::VideoStream::scalability_mode,
+ ScalabilityMode::kL3T3)));
+
+ // GetParameters.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_THAT(
+ parameters.encodings,
+ ElementsAre(
+ Field(&webrtc::RtpEncodingParameters::scalability_mode, "L3T3"),
+ Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ kDefaultScalabilityModeStr),
+ Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ kDefaultScalabilityModeStr)));
+
+ // Change codec to VP8.
+ cricket::VideoSenderParameters vp8_parameters;
+ vp8_parameters.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(vp8_parameters));
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // The stream should be recreated due to codec change.
+ std::vector<FakeVideoSendStream*> new_streams = GetFakeSendStreams();
+ EXPECT_EQ(1u, new_streams.size());
+ EXPECT_EQ(2, fake_call_->GetNumCreatedSendStreams());
+
+ // Verify fallback to default value triggered (L3T3 is not supported).
+ EXPECT_THAT(new_streams[0]->GetVideoStreams(),
+ ElementsAre(Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode),
+ Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode),
+ Field(&webrtc::VideoStream::scalability_mode,
+ kDefaultScalabilityMode)));
+
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_THAT(
+ parameters.encodings,
+ ElementsAre(Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ kDefaultScalabilityModeStr),
+ Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ kDefaultScalabilityModeStr),
+ Field(&webrtc::RtpEncodingParameters::scalability_mode,
+ kDefaultScalabilityModeStr)));
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, GetAndSetRtpSendParametersMinAndMaxBitrate) {
+ SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ for (const auto& encoding : parameters.encodings) {
+ EXPECT_FALSE(encoding.min_bitrate_bps);
+ EXPECT_FALSE(encoding.max_bitrate_bps);
+ }
+
+ // Change the value and set it on the VideoChannel.
+ parameters.encodings[0].min_bitrate_bps = 100000;
+ parameters.encodings[0].max_bitrate_bps = 200000;
+ parameters.encodings[1].min_bitrate_bps = 300000;
+ parameters.encodings[1].max_bitrate_bps = 400000;
+ parameters.encodings[2].min_bitrate_bps = 500000;
+ parameters.encodings[2].max_bitrate_bps = 600000;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the bitrates are set on the VideoChannel.
+ parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_EQ(100000, parameters.encodings[0].min_bitrate_bps);
+ EXPECT_EQ(200000, parameters.encodings[0].max_bitrate_bps);
+ EXPECT_EQ(300000, parameters.encodings[1].min_bitrate_bps);
+ EXPECT_EQ(400000, parameters.encodings[1].max_bitrate_bps);
+ EXPECT_EQ(500000, parameters.encodings[2].min_bitrate_bps);
+ EXPECT_EQ(600000, parameters.encodings[2].max_bitrate_bps);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetRtpSendParametersFailsWithIncorrectBitrate) {
+ SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+
+ // Max bitrate lower than min bitrate should fail.
+ parameters.encodings[2].min_bitrate_bps = 100000;
+ parameters.encodings[2].max_bitrate_bps = 100000 - 1;
+ EXPECT_EQ(webrtc::RTCErrorType::INVALID_RANGE,
+ send_channel_->SetRtpSendParameters(last_ssrc_, parameters).type());
+}
+
+// Test that min and max bitrate values set via RtpParameters are correctly
+// propagated to the underlying encoder, and that the target is set to 3/4 of
+// the maximum (3/4 was chosen because it's similar to the simulcast defaults
+// that are used if no min/max are specified).
+TEST_F(WebRtcVideoChannelTest, MinAndMaxSimulcastBitratePropagatedToEncoder) {
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Get and set the rtp encoding parameters.
+ // Change the value and set it on the VideoChannel.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[0].min_bitrate_bps = 100000;
+ parameters.encodings[0].max_bitrate_bps = 200000;
+ parameters.encodings[1].min_bitrate_bps = 300000;
+ parameters.encodings[1].max_bitrate_bps = 400000;
+ parameters.encodings[2].min_bitrate_bps = 500000;
+ parameters.encodings[2].max_bitrate_bps = 600000;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the new value propagated down to the encoder.
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ EXPECT_EQ(2, stream->num_encoder_reconfigurations());
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size());
+ EXPECT_EQ(100000, encoder_config.simulcast_layers[0].min_bitrate_bps);
+ EXPECT_EQ(200000, encoder_config.simulcast_layers[0].max_bitrate_bps);
+ EXPECT_EQ(300000, encoder_config.simulcast_layers[1].min_bitrate_bps);
+ EXPECT_EQ(400000, encoder_config.simulcast_layers[1].max_bitrate_bps);
+ EXPECT_EQ(500000, encoder_config.simulcast_layers[2].min_bitrate_bps);
+ EXPECT_EQ(600000, encoder_config.simulcast_layers[2].max_bitrate_bps);
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ // Target bitrate: 200000 * 3 / 4 = 150000.
+ EXPECT_EQ(100000, stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(150000, stream->GetVideoStreams()[0].target_bitrate_bps);
+ EXPECT_EQ(200000, stream->GetVideoStreams()[0].max_bitrate_bps);
+ // Target bitrate: 400000 * 3 / 4 = 300000.
+ EXPECT_EQ(300000, stream->GetVideoStreams()[1].min_bitrate_bps);
+ EXPECT_EQ(300000, stream->GetVideoStreams()[1].target_bitrate_bps);
+ EXPECT_EQ(400000, stream->GetVideoStreams()[1].max_bitrate_bps);
+ // Target bitrate: 600000 * 3 / 4 = 450000, less than min -> max.
+ EXPECT_EQ(500000, stream->GetVideoStreams()[2].min_bitrate_bps);
+ EXPECT_EQ(600000, stream->GetVideoStreams()[2].target_bitrate_bps);
+ EXPECT_EQ(600000, stream->GetVideoStreams()[2].max_bitrate_bps);
+
+ // No parameter changed, encoder should not be reconfigured.
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_EQ(2, stream->num_encoder_reconfigurations());
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+// Test to only specify the min or max bitrate value for a layer via
+// RtpParameters. The unspecified min/max and target value should be set to the
+// simulcast default that is used if no min/max are specified.
+TEST_F(WebRtcVideoChannelTest, MinOrMaxSimulcastBitratePropagatedToEncoder) {
+ const std::vector<webrtc::VideoStream> kDefault = GetSimulcastBitrates720p();
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+
+ // Change the value and set it on the VideoChannel.
+ // Layer 0: only configure min bitrate.
+ const int kMinBpsLayer0 = kDefault[0].min_bitrate_bps + 1;
+ parameters.encodings[0].min_bitrate_bps = kMinBpsLayer0;
+ // Layer 1: only configure max bitrate.
+ const int kMaxBpsLayer1 = kDefault[1].max_bitrate_bps - 1;
+ parameters.encodings[1].max_bitrate_bps = kMaxBpsLayer1;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the new value propagated down to the encoder.
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
+ EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size());
+ EXPECT_EQ(kMinBpsLayer0, encoder_config.simulcast_layers[0].min_bitrate_bps);
+ EXPECT_EQ(-1, encoder_config.simulcast_layers[0].max_bitrate_bps);
+ EXPECT_EQ(-1, encoder_config.simulcast_layers[1].min_bitrate_bps);
+ EXPECT_EQ(kMaxBpsLayer1, encoder_config.simulcast_layers[1].max_bitrate_bps);
+ EXPECT_EQ(-1, encoder_config.simulcast_layers[2].min_bitrate_bps);
+ EXPECT_EQ(-1, encoder_config.simulcast_layers[2].max_bitrate_bps);
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ // Layer 0: min configured bitrate should overwrite min default.
+ EXPECT_EQ(kMinBpsLayer0, stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(kDefault[0].target_bitrate_bps,
+ stream->GetVideoStreams()[0].target_bitrate_bps);
+ EXPECT_EQ(kDefault[0].max_bitrate_bps,
+ stream->GetVideoStreams()[0].max_bitrate_bps);
+ // Layer 1: max configured bitrate should overwrite max default.
+ // And target bitrate should be 3/4 * max bitrate or default target
+ // which is larger.
+ EXPECT_EQ(kDefault[1].min_bitrate_bps,
+ stream->GetVideoStreams()[1].min_bitrate_bps);
+ const int kTargetBpsLayer1 =
+ std::max(kDefault[1].target_bitrate_bps, kMaxBpsLayer1 * 3 / 4);
+ EXPECT_EQ(kTargetBpsLayer1, stream->GetVideoStreams()[1].target_bitrate_bps);
+ EXPECT_EQ(kMaxBpsLayer1, stream->GetVideoStreams()[1].max_bitrate_bps);
+ // Layer 2: min and max bitrate not configured, default expected.
+ EXPECT_EQ(kDefault[2].min_bitrate_bps,
+ stream->GetVideoStreams()[2].min_bitrate_bps);
+ EXPECT_EQ(kDefault[2].target_bitrate_bps,
+ stream->GetVideoStreams()[2].target_bitrate_bps);
+ EXPECT_EQ(kDefault[2].max_bitrate_bps,
+ stream->GetVideoStreams()[2].max_bitrate_bps);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+// Test that specifying the min (or max) bitrate value for a layer via
+// RtpParameters above (or below) the simulcast default max (or min) adjusts the
+// unspecified values accordingly.
+TEST_F(WebRtcVideoChannelTest, SetMinAndMaxSimulcastBitrateAboveBelowDefault) {
+ const std::vector<webrtc::VideoStream> kDefault = GetSimulcastBitrates720p();
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Get and set the rtp encoding parameters.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+
+ // Change the value and set it on the VideoChannel.
+ // For layer 0, set the min bitrate above the default max.
+ const int kMinBpsLayer0 = kDefault[0].max_bitrate_bps + 1;
+ parameters.encodings[0].min_bitrate_bps = kMinBpsLayer0;
+ // For layer 1, set the max bitrate below the default min.
+ const int kMaxBpsLayer1 = kDefault[1].min_bitrate_bps - 1;
+ parameters.encodings[1].max_bitrate_bps = kMaxBpsLayer1;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Verify that the new value propagated down to the encoder.
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately for the simulcast case.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ // Layer 0: Min bitrate above default max (target/max should be adjusted).
+ EXPECT_EQ(kMinBpsLayer0, stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(kMinBpsLayer0, stream->GetVideoStreams()[0].target_bitrate_bps);
+ EXPECT_EQ(kMinBpsLayer0, stream->GetVideoStreams()[0].max_bitrate_bps);
+ // Layer 1: Max bitrate below default min (min/target should be adjusted).
+ EXPECT_EQ(kMaxBpsLayer1, stream->GetVideoStreams()[1].min_bitrate_bps);
+ EXPECT_EQ(kMaxBpsLayer1, stream->GetVideoStreams()[1].target_bitrate_bps);
+ EXPECT_EQ(kMaxBpsLayer1, stream->GetVideoStreams()[1].max_bitrate_bps);
+ // Layer 2: min and max bitrate not configured, default expected.
+ EXPECT_EQ(kDefault[2].min_bitrate_bps,
+ stream->GetVideoStreams()[2].min_bitrate_bps);
+ EXPECT_EQ(kDefault[2].target_bitrate_bps,
+ stream->GetVideoStreams()[2].target_bitrate_bps);
+ EXPECT_EQ(kDefault[2].max_bitrate_bps,
+ stream->GetVideoStreams()[2].max_bitrate_bps);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, BandwidthAboveTotalMaxBitrateGivenToMaxLayer) {
+ const std::vector<webrtc::VideoStream> kDefault = GetSimulcastBitrates720p();
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Set max bitrate for all but the highest layer.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[0].max_bitrate_bps = kDefault[0].max_bitrate_bps;
+ parameters.encodings[1].max_bitrate_bps = kDefault[1].max_bitrate_bps;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Set max bandwidth equal to total max bitrate.
+ send_parameters_.max_bandwidth_bps =
+ GetTotalMaxBitrate(stream->GetVideoStreams()).bps();
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ // No bitrate above the total max to give to the highest layer.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ EXPECT_EQ(kDefault[2].max_bitrate_bps,
+ stream->GetVideoStreams()[2].max_bitrate_bps);
+
+ // Set max bandwidth above the total max bitrate.
+ send_parameters_.max_bandwidth_bps =
+ GetTotalMaxBitrate(stream->GetVideoStreams()).bps() + 1;
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ // The highest layer has no max bitrate set -> the bitrate above the total
+ // max should be given to the highest layer.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ EXPECT_EQ(send_parameters_.max_bandwidth_bps,
+ GetTotalMaxBitrate(stream->GetVideoStreams()).bps());
+ EXPECT_EQ(kDefault[2].max_bitrate_bps + 1,
+ stream->GetVideoStreams()[2].max_bitrate_bps);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ BandwidthAboveTotalMaxBitrateNotGivenToMaxLayerIfMaxBitrateSet) {
+ const std::vector<webrtc::VideoStream> kDefault = GetSimulcastBitrates720p();
+ EXPECT_EQ(kNumSimulcastStreams, kDefault.size());
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ // Send a full size frame so all simulcast layers are used when reconfiguring.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame());
+
+ // Set max bitrate for the highest layer.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ parameters.encodings[2].max_bitrate_bps = kDefault[2].max_bitrate_bps;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Set max bandwidth above the total max bitrate.
+ send_parameters_.max_bandwidth_bps =
+ GetTotalMaxBitrate(stream->GetVideoStreams()).bps() + 1;
+ ExpectSetMaxBitrate(send_parameters_.max_bandwidth_bps);
+ ASSERT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ // The highest layer has the max bitrate set -> the bitrate above the total
+ // max should not be given to the highest layer.
+ EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
+ EXPECT_EQ(*parameters.encodings[2].max_bitrate_bps,
+ stream->GetVideoStreams()[2].max_bitrate_bps);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+// Test that min and max bitrate values set via RtpParameters are correctly
+// propagated to the underlying encoder for a single stream.
+TEST_F(WebRtcVideoChannelTest, MinAndMaxBitratePropagatedToEncoder) {
+ FakeVideoSendStream* stream = AddSendStream();
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ EXPECT_TRUE(stream->IsSending());
+
+ // Set min and max bitrate.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1u, parameters.encodings.size());
+ parameters.encodings[0].min_bitrate_bps = 80000;
+ parameters.encodings[0].max_bitrate_bps = 150000;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(1u, encoder_config.number_of_streams);
+ EXPECT_EQ(1u, encoder_config.simulcast_layers.size());
+ EXPECT_EQ(80000, encoder_config.simulcast_layers[0].min_bitrate_bps);
+ EXPECT_EQ(150000, encoder_config.simulcast_layers[0].max_bitrate_bps);
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately.
+ EXPECT_EQ(1u, stream->GetVideoStreams().size());
+ EXPECT_EQ(80000, stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(150000, stream->GetVideoStreams()[0].target_bitrate_bps);
+ EXPECT_EQ(150000, stream->GetVideoStreams()[0].max_bitrate_bps);
+}
+
+// Test the default min and max bitrate value are correctly propagated to the
+// underlying encoder for a single stream (when the values are not set via
+// RtpParameters).
+TEST_F(WebRtcVideoChannelTest, DefaultMinAndMaxBitratePropagatedToEncoder) {
+ FakeVideoSendStream* stream = AddSendStream();
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ EXPECT_TRUE(stream->IsSending());
+
+ // Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
+ webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
+ EXPECT_EQ(1u, encoder_config.number_of_streams);
+ EXPECT_EQ(1u, encoder_config.simulcast_layers.size());
+ EXPECT_EQ(-1, encoder_config.simulcast_layers[0].min_bitrate_bps);
+ EXPECT_EQ(-1, encoder_config.simulcast_layers[0].max_bitrate_bps);
+
+ // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
+ // VideoStreams are created appropriately.
+ EXPECT_EQ(1u, stream->GetVideoStreams().size());
+ EXPECT_EQ(webrtc::kDefaultMinVideoBitrateBps,
+ stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_GT(stream->GetVideoStreams()[0].max_bitrate_bps,
+ stream->GetVideoStreams()[0].min_bitrate_bps);
+ EXPECT_EQ(stream->GetVideoStreams()[0].max_bitrate_bps,
+ stream->GetVideoStreams()[0].target_bitrate_bps);
+}
+
+// Test that a stream will not be sending if its encoding is made inactive
+// through SetRtpSendParameters.
+TEST_F(WebRtcVideoChannelTest, SetRtpSendParametersOneEncodingActive) {
+ FakeVideoSendStream* stream = AddSendStream();
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ EXPECT_TRUE(stream->IsSending());
+
+ // Get current parameters and change "active" to false.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(1u, parameters.encodings.size());
+ ASSERT_TRUE(parameters.encodings[0].active);
+ parameters.encodings[0].active = false;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_FALSE(stream->IsSending());
+
+ // Now change it back to active and verify we resume sending.
+ parameters.encodings[0].active = true;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_TRUE(stream->IsSending());
+}
+
+// Tests that when active is updated for any simulcast layer then the send
+// stream's sending state will be updated and it will be reconfigured with the
+// new appropriate active simulcast streams.
+TEST_F(WebRtcVideoChannelTest, SetRtpSendParametersMultipleEncodingsActive) {
+ // Create the stream params with multiple ssrcs for simulcast.
+ const size_t kNumSimulcastStreams = 3;
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+ StreamParams stream_params = CreateSimStreamParams("cname", ssrcs);
+ FakeVideoSendStream* fake_video_send_stream = AddSendStream(stream_params);
+ uint32_t primary_ssrc = stream_params.first_ssrc();
+
+ // Using the FrameForwarder, we manually send a full size
+ // frame. This allows us to test that ReconfigureEncoder is called
+ // appropriately.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(primary_ssrc, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame(
+ 1920, 1080, webrtc::VideoRotation::kVideoRotation_0,
+ rtc::kNumMicrosecsPerSec / 30));
+
+ // Check that all encodings are initially active.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(primary_ssrc);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_TRUE(parameters.encodings[0].active);
+ EXPECT_TRUE(parameters.encodings[1].active);
+ EXPECT_TRUE(parameters.encodings[2].active);
+ EXPECT_TRUE(fake_video_send_stream->IsSending());
+
+ // Only turn on only the middle stream.
+ parameters.encodings[0].active = false;
+ parameters.encodings[1].active = true;
+ parameters.encodings[2].active = false;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(primary_ssrc, parameters).ok());
+ // Verify that the active fields are set on the VideoChannel.
+ parameters = send_channel_->GetRtpSendParameters(primary_ssrc);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_FALSE(parameters.encodings[0].active);
+ EXPECT_TRUE(parameters.encodings[1].active);
+ EXPECT_FALSE(parameters.encodings[2].active);
+ // Check that the VideoSendStream is updated appropriately. This means its
+ // send state was updated and it was reconfigured.
+ EXPECT_TRUE(fake_video_send_stream->IsSending());
+ std::vector<webrtc::VideoStream> simulcast_streams =
+ fake_video_send_stream->GetVideoStreams();
+ EXPECT_EQ(kNumSimulcastStreams, simulcast_streams.size());
+ EXPECT_FALSE(simulcast_streams[0].active);
+ EXPECT_TRUE(simulcast_streams[1].active);
+ EXPECT_FALSE(simulcast_streams[2].active);
+
+ // Turn off all streams.
+ parameters.encodings[0].active = false;
+ parameters.encodings[1].active = false;
+ parameters.encodings[2].active = false;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(primary_ssrc, parameters).ok());
+ // Verify that the active fields are set on the VideoChannel.
+ parameters = send_channel_->GetRtpSendParameters(primary_ssrc);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_FALSE(parameters.encodings[0].active);
+ EXPECT_FALSE(parameters.encodings[1].active);
+ EXPECT_FALSE(parameters.encodings[2].active);
+ // Check that the VideoSendStream is off.
+ EXPECT_FALSE(fake_video_send_stream->IsSending());
+ simulcast_streams = fake_video_send_stream->GetVideoStreams();
+ EXPECT_EQ(kNumSimulcastStreams, simulcast_streams.size());
+ EXPECT_FALSE(simulcast_streams[0].active);
+ EXPECT_FALSE(simulcast_streams[1].active);
+ EXPECT_FALSE(simulcast_streams[2].active);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(primary_ssrc, nullptr, nullptr));
+}
+
+// Tests that when some streams are disactivated then the lowest
+// stream min_bitrate would be reused for the first active stream.
+TEST_F(WebRtcVideoChannelTest,
+ SetRtpSendParametersSetsMinBitrateForFirstActiveStream) {
+ // Create the stream params with multiple ssrcs for simulcast.
+ const size_t kNumSimulcastStreams = 3;
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+ StreamParams stream_params = CreateSimStreamParams("cname", ssrcs);
+ FakeVideoSendStream* fake_video_send_stream = AddSendStream(stream_params);
+ uint32_t primary_ssrc = stream_params.first_ssrc();
+
+ // Using the FrameForwarder, we manually send a full size
+ // frame. This allows us to test that ReconfigureEncoder is called
+ // appropriately.
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(primary_ssrc, &options, &frame_forwarder));
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame(
+ 1920, 1080, webrtc::VideoRotation::kVideoRotation_0,
+ rtc::kNumMicrosecsPerSec / 30));
+
+ // Check that all encodings are initially active.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(primary_ssrc);
+ EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
+ EXPECT_TRUE(parameters.encodings[0].active);
+ EXPECT_TRUE(parameters.encodings[1].active);
+ EXPECT_TRUE(parameters.encodings[2].active);
+ EXPECT_TRUE(fake_video_send_stream->IsSending());
+
+ // Only turn on the highest stream.
+ parameters.encodings[0].active = false;
+ parameters.encodings[1].active = false;
+ parameters.encodings[2].active = true;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(primary_ssrc, parameters).ok());
+
+ // Check that the VideoSendStream is updated appropriately. This means its
+ // send state was updated and it was reconfigured.
+ EXPECT_TRUE(fake_video_send_stream->IsSending());
+ std::vector<webrtc::VideoStream> simulcast_streams =
+ fake_video_send_stream->GetVideoStreams();
+ EXPECT_EQ(kNumSimulcastStreams, simulcast_streams.size());
+ EXPECT_FALSE(simulcast_streams[0].active);
+ EXPECT_FALSE(simulcast_streams[1].active);
+ EXPECT_TRUE(simulcast_streams[2].active);
+
+ EXPECT_EQ(simulcast_streams[2].min_bitrate_bps,
+ simulcast_streams[0].min_bitrate_bps);
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(primary_ssrc, nullptr, nullptr));
+}
+
+// Test that if a stream is reconfigured (due to a codec change or other
+// change) while its encoding is still inactive, it doesn't start sending.
+TEST_F(WebRtcVideoChannelTest,
+ InactiveStreamDoesntStartSendingWhenReconfigured) {
+ // Set an initial codec list, which will be modified later.
+ cricket::VideoSenderParameters parameters1;
+ parameters1.codecs.push_back(GetEngineCodec("VP8"));
+ parameters1.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters1));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ EXPECT_TRUE(send_channel_->SetSend(true));
+ EXPECT_TRUE(stream->IsSending());
+
+ // Get current parameters and change "active" to false.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(1u, parameters.encodings.size());
+ ASSERT_TRUE(parameters.encodings[0].active);
+ parameters.encodings[0].active = false;
+ EXPECT_EQ(1u, GetFakeSendStreams().size());
+ EXPECT_EQ(1, fake_call_->GetNumCreatedSendStreams());
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
+ EXPECT_FALSE(stream->IsSending());
+
+ // Reorder the codec list, causing the stream to be reconfigured.
+ cricket::VideoSenderParameters parameters2;
+ parameters2.codecs.push_back(GetEngineCodec("VP9"));
+ parameters2.codecs.push_back(GetEngineCodec("VP8"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters2));
+ auto new_streams = GetFakeSendStreams();
+ // Assert that a new underlying stream was created due to the codec change.
+ // Otherwise, this test isn't testing what it set out to test.
+ EXPECT_EQ(1u, GetFakeSendStreams().size());
+ EXPECT_EQ(2, fake_call_->GetNumCreatedSendStreams());
+
+ // Verify that we still are not sending anything, due to the inactive
+ // encoding.
+ EXPECT_FALSE(new_streams[0]->IsSending());
+}
+
+// Test that GetRtpSendParameters returns the currently configured codecs.
+TEST_F(WebRtcVideoChannelTest, GetRtpSendParametersCodecs) {
+ AddSendStream();
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(2u, rtp_parameters.codecs.size());
+ EXPECT_EQ(GetEngineCodec("VP8").ToCodecParameters(),
+ rtp_parameters.codecs[0]);
+ EXPECT_EQ(GetEngineCodec("VP9").ToCodecParameters(),
+ rtp_parameters.codecs[1]);
+}
+
+// Test that GetRtpSendParameters returns the currently configured RTCP CNAME.
+TEST_F(WebRtcVideoChannelTest, GetRtpSendParametersRtcpCname) {
+ StreamParams params = StreamParams::CreateLegacy(kSsrc);
+ params.cname = "rtcpcname";
+ AddSendStream(params);
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrc);
+ EXPECT_STREQ("rtcpcname", rtp_parameters.rtcp.cname.c_str());
+}
+
+// Test that RtpParameters for send stream has one encoding and it has
+// the correct SSRC.
+TEST_F(WebRtcVideoChannelTest, GetRtpSendParametersSsrc) {
+ AddSendStream();
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_EQ(last_ssrc_, rtp_parameters.encodings[0].ssrc);
+}
+
+TEST_F(WebRtcVideoChannelTest, DetectRtpSendParameterHeaderExtensionsChange) {
+ AddSendStream();
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ rtp_parameters.header_extensions.emplace_back();
+
+ EXPECT_NE(0u, rtp_parameters.header_extensions.size());
+
+ webrtc::RTCError result =
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+ EXPECT_EQ(webrtc::RTCErrorType::INVALID_MODIFICATION, result.type());
+}
+
+TEST_F(WebRtcVideoChannelTest, GetRtpSendParametersDegradationPreference) {
+ AddSendStream();
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_FALSE(rtp_parameters.degradation_preference.has_value());
+ rtp_parameters.degradation_preference =
+ webrtc::DegradationPreference::MAINTAIN_FRAMERATE;
+
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+
+ webrtc::RtpParameters updated_rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(updated_rtp_parameters.degradation_preference,
+ webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+
+ // Remove the source since it will be destroyed before the channel
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+// Test that if we set/get parameters multiple times, we get the same results.
+TEST_F(WebRtcVideoChannelTest, SetAndGetRtpSendParameters) {
+ AddSendStream();
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ webrtc::RtpParameters initial_params =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+
+ // We should be able to set the params we just got.
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, initial_params).ok());
+
+ // ... And this shouldn't change the params returned by GetRtpSendParameters.
+ EXPECT_EQ(initial_params, send_channel_->GetRtpSendParameters(last_ssrc_));
+}
+
+// Test that GetRtpReceiverParameters returns the currently configured codecs.
+TEST_F(WebRtcVideoChannelTest, GetRtpReceiveParametersCodecs) {
+ AddRecvStream();
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ webrtc::RtpParameters rtp_parameters =
+ receive_channel_->GetRtpReceiverParameters(last_ssrc_);
+ ASSERT_EQ(2u, rtp_parameters.codecs.size());
+ EXPECT_EQ(GetEngineCodec("VP8").ToCodecParameters(),
+ rtp_parameters.codecs[0]);
+ EXPECT_EQ(GetEngineCodec("VP9").ToCodecParameters(),
+ rtp_parameters.codecs[1]);
+}
+
+#if defined(WEBRTC_USE_H264)
+TEST_F(WebRtcVideoChannelTest, GetRtpReceiveFmtpSprop) {
+#else
+TEST_F(WebRtcVideoChannelTest, DISABLED_GetRtpReceiveFmtpSprop) {
+#endif
+ cricket::VideoReceiverParameters parameters;
+ cricket::VideoCodec kH264sprop1 = cricket::CreateVideoCodec(101, "H264");
+ kH264sprop1.SetParam(kH264FmtpSpropParameterSets, "uvw");
+ parameters.codecs.push_back(kH264sprop1);
+ cricket::VideoCodec kH264sprop2 = cricket::CreateVideoCodec(102, "H264");
+ kH264sprop2.SetParam(kH264FmtpSpropParameterSets, "xyz");
+ parameters.codecs.push_back(kH264sprop2);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ FakeVideoReceiveStream* recv_stream = AddRecvStream();
+ const webrtc::VideoReceiveStreamInterface::Config& cfg =
+ recv_stream->GetConfig();
+ webrtc::RtpParameters rtp_parameters =
+ receive_channel_->GetRtpReceiverParameters(last_ssrc_);
+ ASSERT_EQ(2u, rtp_parameters.codecs.size());
+ EXPECT_EQ(kH264sprop1.ToCodecParameters(), rtp_parameters.codecs[0]);
+ ASSERT_EQ(2u, cfg.decoders.size());
+ EXPECT_EQ(101, cfg.decoders[0].payload_type);
+ EXPECT_EQ("H264", cfg.decoders[0].video_format.name);
+ const auto it0 =
+ cfg.decoders[0].video_format.parameters.find(kH264FmtpSpropParameterSets);
+ ASSERT_TRUE(it0 != cfg.decoders[0].video_format.parameters.end());
+ EXPECT_EQ("uvw", it0->second);
+
+ EXPECT_EQ(102, cfg.decoders[1].payload_type);
+ EXPECT_EQ("H264", cfg.decoders[1].video_format.name);
+ const auto it1 =
+ cfg.decoders[1].video_format.parameters.find(kH264FmtpSpropParameterSets);
+ ASSERT_TRUE(it1 != cfg.decoders[1].video_format.parameters.end());
+ EXPECT_EQ("xyz", it1->second);
+}
+
+// Test that RtpParameters for receive stream has one encoding and it has
+// the correct SSRC.
+TEST_F(WebRtcVideoChannelTest, GetRtpReceiveParametersSsrc) {
+ AddRecvStream();
+
+ webrtc::RtpParameters rtp_parameters =
+ receive_channel_->GetRtpReceiverParameters(last_ssrc_);
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_EQ(last_ssrc_, rtp_parameters.encodings[0].ssrc);
+}
+
+// Test that if we set/get parameters multiple times, we get the same results.
+TEST_F(WebRtcVideoChannelTest, SetAndGetRtpReceiveParameters) {
+ AddRecvStream();
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ webrtc::RtpParameters initial_params =
+ receive_channel_->GetRtpReceiverParameters(last_ssrc_);
+
+ // ... And this shouldn't change the params returned by
+ // GetRtpReceiverParameters.
+ EXPECT_EQ(initial_params,
+ receive_channel_->GetRtpReceiverParameters(last_ssrc_));
+}
+
+// Test that GetDefaultRtpReceiveParameters returns parameters correctly when
+// SSRCs aren't signaled. It should always return an empty
+// "RtpEncodingParameters", even after a packet is received and the unsignaled
+// SSRC is known.
+TEST_F(WebRtcVideoChannelTest,
+ GetDefaultRtpReceiveParametersWithUnsignaledSsrc) {
+ // Call necessary methods to configure receiving a default stream as
+ // soon as it arrives.
+ cricket::VideoReceiverParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(GetEngineCodec("VP9"));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ // Call GetRtpReceiverParameters before configured to receive an unsignaled
+ // stream. Should return nothing.
+ EXPECT_EQ(webrtc::RtpParameters(),
+ receive_channel_->GetDefaultRtpReceiveParameters());
+
+ // Set a sink for an unsignaled stream.
+ cricket::FakeVideoRenderer renderer;
+ receive_channel_->SetDefaultSink(&renderer);
+
+ // Call GetDefaultRtpReceiveParameters before the SSRC is known.
+ webrtc::RtpParameters rtp_parameters =
+ receive_channel_->GetDefaultRtpReceiveParameters();
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_FALSE(rtp_parameters.encodings[0].ssrc);
+
+ // Receive VP8 packet.
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(GetEngineCodec("VP8").id);
+ rtp_packet.SetSsrc(kIncomingUnsignalledSsrc);
+ ReceivePacketAndAdvanceTime(rtp_packet);
+
+ // The `ssrc` member should still be unset.
+ rtp_parameters = receive_channel_->GetDefaultRtpReceiveParameters();
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_FALSE(rtp_parameters.encodings[0].ssrc);
+}
+
+// Test that if a default stream is created for a non-primary stream (for
+// example, RTX before we know it's RTX), we are still able to explicitly add
+// the stream later.
+TEST_F(WebRtcVideoChannelTest,
+ AddReceiveStreamAfterReceivingNonPrimaryUnsignaledSsrc) {
+ // Receive VP8 RTX packet.
+ RtpPacketReceived rtp_packet;
+ const cricket::VideoCodec vp8 = GetEngineCodec("VP8");
+ rtp_packet.SetPayloadType(default_apt_rtx_types_[vp8.id]);
+ rtp_packet.SetSsrc(2);
+ ReceivePacketAndAdvanceTime(rtp_packet);
+ EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size());
+
+ cricket::StreamParams params = cricket::StreamParams::CreateLegacy(1);
+ params.AddFidSsrc(1, 2);
+ EXPECT_TRUE(receive_channel_->AddRecvStream(params));
+}
+
+void WebRtcVideoChannelTest::TestReceiverLocalSsrcConfiguration(
+ bool receiver_first) {
+ EXPECT_TRUE(send_channel_->SetSenderParameters(send_parameters_));
+
+ const uint32_t kSenderSsrc = 0xC0FFEE;
+ const uint32_t kSecondSenderSsrc = 0xBADCAFE;
+ const uint32_t kReceiverSsrc = 0x4711;
+ const uint32_t kExpectedDefaultReceiverSsrc = 1;
+
+ if (receiver_first) {
+ AddRecvStream(StreamParams::CreateLegacy(kReceiverSsrc));
+ std::vector<FakeVideoReceiveStream*> receive_streams =
+ fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1u, receive_streams.size());
+ // Default local SSRC when we have no sender.
+ EXPECT_EQ(kExpectedDefaultReceiverSsrc,
+ receive_streams[0]->GetConfig().rtp.local_ssrc);
+ }
+ AddSendStream(StreamParams::CreateLegacy(kSenderSsrc));
+ if (!receiver_first)
+ AddRecvStream(StreamParams::CreateLegacy(kReceiverSsrc));
+ std::vector<FakeVideoReceiveStream*> receive_streams =
+ fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1u, receive_streams.size());
+ EXPECT_EQ(kSenderSsrc, receive_streams[0]->GetConfig().rtp.local_ssrc);
+
+ // Removing first sender should fall back to another (in this case the second)
+ // local send stream's SSRC.
+ AddSendStream(StreamParams::CreateLegacy(kSecondSenderSsrc));
+ ASSERT_TRUE(send_channel_->RemoveSendStream(kSenderSsrc));
+ receive_streams = fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1u, receive_streams.size());
+ EXPECT_EQ(kSecondSenderSsrc, receive_streams[0]->GetConfig().rtp.local_ssrc);
+
+ // Removing the last sender should fall back to default local SSRC.
+ ASSERT_TRUE(send_channel_->RemoveSendStream(kSecondSenderSsrc));
+ receive_streams = fake_call_->GetVideoReceiveStreams();
+ ASSERT_EQ(1u, receive_streams.size());
+ EXPECT_EQ(kExpectedDefaultReceiverSsrc,
+ receive_streams[0]->GetConfig().rtp.local_ssrc);
+}
+
+TEST_F(WebRtcVideoChannelTest, ConfiguresLocalSsrc) {
+ TestReceiverLocalSsrcConfiguration(false);
+}
+
+TEST_F(WebRtcVideoChannelTest, ConfiguresLocalSsrcOnExistingReceivers) {
+ TestReceiverLocalSsrcConfiguration(true);
+}
+
+TEST_F(WebRtcVideoChannelTest, Simulcast_QualityScalingNotAllowed) {
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/true);
+ EXPECT_FALSE(stream->GetEncoderConfig().is_quality_scaling_allowed);
+}
+
+TEST_F(WebRtcVideoChannelTest, Singlecast_QualityScalingAllowed) {
+ FakeVideoSendStream* stream = SetUpSimulcast(false, /*with_rtx=*/true);
+ EXPECT_TRUE(stream->GetEncoderConfig().is_quality_scaling_allowed);
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SinglecastScreenSharing_QualityScalingNotAllowed) {
+ SetUpSimulcast(false, /*with_rtx=*/true);
+
+ webrtc::test::FrameForwarder frame_forwarder;
+ VideoOptions options;
+ options.is_screencast = true;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder));
+ // Fetch the latest stream since SetVideoSend() may recreate it if the
+ // screen content setting is changed.
+ FakeVideoSendStream* stream = fake_call_->GetVideoSendStreams().front();
+
+ EXPECT_FALSE(stream->GetEncoderConfig().is_quality_scaling_allowed);
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest,
+ SimulcastSingleActiveStream_QualityScalingAllowed) {
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ ASSERT_TRUE(rtp_parameters.encodings[0].active);
+ ASSERT_TRUE(rtp_parameters.encodings[1].active);
+ ASSERT_TRUE(rtp_parameters.encodings[2].active);
+ rtp_parameters.encodings[0].active = false;
+ rtp_parameters.encodings[1].active = false;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_TRUE(stream->GetEncoderConfig().is_quality_scaling_allowed);
+}
+
+TEST_F(WebRtcVideoChannelTest, GenerateKeyFrameSinglecast) {
+ FakeVideoSendStream* stream = AddSendStream();
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_EQ(rtp_parameters.encodings[0].rid, "");
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(), std::vector<std::string>({}));
+
+ // Manually set the key frames requested to check they are cleared by the next
+ // call.
+ stream->GenerateKeyFrame({"bogus"});
+ rtp_parameters.encodings[0].request_key_frame = true;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(),
+ ElementsAreArray(std::vector<std::string>({})));
+}
+
+TEST_F(WebRtcVideoChannelTest, GenerateKeyFrameSimulcast) {
+ StreamParams stream_params = CreateSimStreamParams("cname", {123, 456, 789});
+
+ std::vector<std::string> rids = {"f", "h", "q"};
+ std::vector<cricket::RidDescription> rid_descriptions;
+ for (const auto& rid : rids) {
+ rid_descriptions.emplace_back(rid, cricket::RidDirection::kSend);
+ }
+ stream_params.set_rids(rid_descriptions);
+ FakeVideoSendStream* stream = AddSendStream(stream_params);
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ ASSERT_EQ(3u, rtp_parameters.encodings.size());
+ EXPECT_EQ(rtp_parameters.encodings[0].rid, "f");
+ EXPECT_EQ(rtp_parameters.encodings[1].rid, "h");
+ EXPECT_EQ(rtp_parameters.encodings[2].rid, "q");
+
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(),
+ ElementsAreArray(std::vector<std::string>({})));
+
+ rtp_parameters.encodings[0].request_key_frame = true;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(), ElementsAreArray({"f"}));
+
+ rtp_parameters.encodings[0].request_key_frame = true;
+ rtp_parameters.encodings[1].request_key_frame = true;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(), ElementsAreArray({"f", "h"}));
+
+ rtp_parameters.encodings[0].request_key_frame = true;
+ rtp_parameters.encodings[1].request_key_frame = true;
+ rtp_parameters.encodings[2].request_key_frame = true;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(),
+ ElementsAreArray({"f", "h", "q"}));
+
+ rtp_parameters.encodings[0].request_key_frame = true;
+ rtp_parameters.encodings[1].request_key_frame = false;
+ rtp_parameters.encodings[2].request_key_frame = true;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(), ElementsAreArray({"f", "q"}));
+
+ rtp_parameters.encodings[0].request_key_frame = false;
+ rtp_parameters.encodings[1].request_key_frame = false;
+ rtp_parameters.encodings[2].request_key_frame = true;
+ EXPECT_TRUE(
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters).ok());
+ EXPECT_THAT(stream->GetKeyFramesRequested(), ElementsAreArray({"q"}));
+}
+
+class WebRtcVideoChannelSimulcastTest : public ::testing::Test {
+ public:
+ WebRtcVideoChannelSimulcastTest()
+ : fake_call_(),
+ encoder_factory_(new cricket::FakeWebRtcVideoEncoderFactory),
+ decoder_factory_(new cricket::FakeWebRtcVideoDecoderFactory),
+ mock_rate_allocator_factory_(
+ std::make_unique<webrtc::MockVideoBitrateAllocatorFactory>()),
+ engine_(std::unique_ptr<cricket::FakeWebRtcVideoEncoderFactory>(
+ encoder_factory_),
+ std::unique_ptr<cricket::FakeWebRtcVideoDecoderFactory>(
+ decoder_factory_),
+ field_trials_),
+ last_ssrc_(0) {}
+
+ void SetUp() override {
+ encoder_factory_->AddSupportedVideoCodecType("VP8");
+ decoder_factory_->AddSupportedVideoCodecType("VP8");
+ send_channel_ = engine_.CreateSendChannel(
+ &fake_call_, GetMediaConfig(), VideoOptions(), webrtc::CryptoOptions(),
+ mock_rate_allocator_factory_.get());
+ receive_channel_ = engine_.CreateReceiveChannel(
+ &fake_call_, GetMediaConfig(), VideoOptions(), webrtc::CryptoOptions());
+ send_channel_->OnReadyToSend(true);
+ receive_channel_->SetReceive(true);
+ last_ssrc_ = 123;
+ }
+
+ protected:
+ void VerifySimulcastSettings(const VideoCodec& codec,
+ int capture_width,
+ int capture_height,
+ size_t num_configured_streams,
+ size_t expected_num_streams,
+ bool screenshare,
+ bool conference_mode) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(codec);
+ parameters.conference_mode = conference_mode;
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ std::vector<uint32_t> ssrcs = MAKE_VECTOR(kSsrcs3);
+ RTC_DCHECK(num_configured_streams <= ssrcs.size());
+ ssrcs.resize(num_configured_streams);
+
+ AddSendStream(CreateSimStreamParams("cname", ssrcs));
+ // Send a full-size frame to trigger a stream reconfiguration to use all
+ // expected simulcast layers.
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(capture_width, capture_height,
+ rtc::kNumMicrosecsPerSec / 30);
+
+ VideoOptions options;
+ if (screenshare)
+ options.is_screencast = screenshare;
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(ssrcs.front(), &options, &frame_forwarder));
+ // Fetch the latest stream since SetVideoSend() may recreate it if the
+ // screen content setting is changed.
+ FakeVideoSendStream* stream = fake_call_.GetVideoSendStreams().front();
+ send_channel_->SetSend(true);
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(kSsrcs3[0]);
+ EXPECT_EQ(num_configured_streams, rtp_parameters.encodings.size());
+
+ std::vector<webrtc::VideoStream> video_streams = stream->GetVideoStreams();
+ ASSERT_EQ(expected_num_streams, video_streams.size());
+ EXPECT_LE(expected_num_streams, stream->GetConfig().rtp.ssrcs.size());
+
+ std::vector<webrtc::VideoStream> expected_streams;
+ if (num_configured_streams > 1 || conference_mode) {
+ expected_streams = GetSimulcastConfig(
+ /*min_layers=*/1, num_configured_streams, capture_width,
+ capture_height, webrtc::kDefaultBitratePriority,
+ kDefaultVideoMaxQpVpx, screenshare && conference_mode, true,
+ field_trials_);
+ if (screenshare && conference_mode) {
+ for (const webrtc::VideoStream& stream : expected_streams) {
+ // Never scale screen content.
+ EXPECT_EQ(stream.width, rtc::checked_cast<size_t>(capture_width));
+ EXPECT_EQ(stream.height, rtc::checked_cast<size_t>(capture_height));
+ }
+ }
+ } else {
+ webrtc::VideoStream stream;
+ stream.width = capture_width;
+ stream.height = capture_height;
+ stream.max_framerate = kDefaultVideoMaxFramerate;
+ stream.min_bitrate_bps = webrtc::kDefaultMinVideoBitrateBps;
+ stream.target_bitrate_bps = stream.max_bitrate_bps =
+ GetMaxDefaultBitrateBps(capture_width, capture_height);
+ stream.max_qp = kDefaultVideoMaxQpVpx;
+ expected_streams.push_back(stream);
+ }
+
+ ASSERT_EQ(expected_streams.size(), video_streams.size());
+
+ size_t num_streams = video_streams.size();
+ for (size_t i = 0; i < num_streams; ++i) {
+ EXPECT_EQ(expected_streams[i].width, video_streams[i].width);
+ EXPECT_EQ(expected_streams[i].height, video_streams[i].height);
+
+ EXPECT_GT(video_streams[i].max_framerate, 0);
+ EXPECT_EQ(expected_streams[i].max_framerate,
+ video_streams[i].max_framerate);
+
+ EXPECT_GT(video_streams[i].min_bitrate_bps, 0);
+ EXPECT_EQ(expected_streams[i].min_bitrate_bps,
+ video_streams[i].min_bitrate_bps);
+
+ EXPECT_GT(video_streams[i].target_bitrate_bps, 0);
+ EXPECT_EQ(expected_streams[i].target_bitrate_bps,
+ video_streams[i].target_bitrate_bps);
+
+ EXPECT_GT(video_streams[i].max_bitrate_bps, 0);
+ EXPECT_EQ(expected_streams[i].max_bitrate_bps,
+ video_streams[i].max_bitrate_bps);
+
+ EXPECT_GT(video_streams[i].max_qp, 0);
+ EXPECT_EQ(expected_streams[i].max_qp, video_streams[i].max_qp);
+
+ EXPECT_EQ(num_configured_streams > 1 || conference_mode,
+ expected_streams[i].num_temporal_layers.has_value());
+
+ if (conference_mode) {
+ EXPECT_EQ(expected_streams[i].num_temporal_layers,
+ video_streams[i].num_temporal_layers);
+ }
+ }
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(ssrcs.front(), nullptr, nullptr));
+ }
+
+ FakeVideoSendStream* AddSendStream() {
+ return AddSendStream(StreamParams::CreateLegacy(last_ssrc_++));
+ }
+
+ FakeVideoSendStream* AddSendStream(const StreamParams& sp) {
+ size_t num_streams = fake_call_.GetVideoSendStreams().size();
+ EXPECT_TRUE(send_channel_->AddSendStream(sp));
+ std::vector<FakeVideoSendStream*> streams =
+ fake_call_.GetVideoSendStreams();
+ EXPECT_EQ(num_streams + 1, streams.size());
+ return streams[streams.size() - 1];
+ }
+
+ std::vector<FakeVideoSendStream*> GetFakeSendStreams() {
+ return fake_call_.GetVideoSendStreams();
+ }
+
+ FakeVideoReceiveStream* AddRecvStream() {
+ return AddRecvStream(StreamParams::CreateLegacy(last_ssrc_++));
+ }
+
+ FakeVideoReceiveStream* AddRecvStream(const StreamParams& sp) {
+ size_t num_streams = fake_call_.GetVideoReceiveStreams().size();
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+ std::vector<FakeVideoReceiveStream*> streams =
+ fake_call_.GetVideoReceiveStreams();
+ EXPECT_EQ(num_streams + 1, streams.size());
+ return streams[streams.size() - 1];
+ }
+
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ webrtc::RtcEventLogNull event_log_;
+ FakeCall fake_call_;
+ cricket::FakeWebRtcVideoEncoderFactory* encoder_factory_;
+ cricket::FakeWebRtcVideoDecoderFactory* decoder_factory_;
+ std::unique_ptr<webrtc::MockVideoBitrateAllocatorFactory>
+ mock_rate_allocator_factory_;
+ WebRtcVideoEngine engine_;
+ std::unique_ptr<VideoMediaSendChannelInterface> send_channel_;
+ std::unique_ptr<VideoMediaReceiveChannelInterface> receive_channel_;
+ uint32_t last_ssrc_;
+};
+
+TEST_F(WebRtcVideoChannelSimulcastTest, SetSendCodecsWith2SimulcastStreams) {
+ VerifySimulcastSettings(cricket::CreateVideoCodec("VP8"), 640, 360, 2, 2,
+ false, true);
+}
+
+TEST_F(WebRtcVideoChannelSimulcastTest, SetSendCodecsWith3SimulcastStreams) {
+ VerifySimulcastSettings(cricket::CreateVideoCodec("VP8"), 1280, 720, 3, 3,
+ false, true);
+}
+
+// Test that we normalize send codec format size in simulcast.
+TEST_F(WebRtcVideoChannelSimulcastTest, SetSendCodecsWithOddSizeInSimulcast) {
+ VerifySimulcastSettings(cricket::CreateVideoCodec("VP8"), 541, 271, 2, 2,
+ false, true);
+}
+
+TEST_F(WebRtcVideoChannelSimulcastTest, SetSendCodecsForScreenshare) {
+ VerifySimulcastSettings(cricket::CreateVideoCodec("VP8"), 1280, 720, 3, 3,
+ true, false);
+}
+
+TEST_F(WebRtcVideoChannelSimulcastTest, SetSendCodecsForSimulcastScreenshare) {
+ VerifySimulcastSettings(cricket::CreateVideoCodec("VP8"), 1280, 720, 3, 2,
+ true, true);
+}
+
+TEST_F(WebRtcVideoChannelSimulcastTest, SimulcastScreenshareWithoutConference) {
+ VerifySimulcastSettings(cricket::CreateVideoCodec("VP8"), 1280, 720, 3, 3,
+ true, false);
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, GetSources) {
+ EXPECT_THAT(receive_channel_->GetSources(kSsrc), IsEmpty());
+
+ receive_channel_->SetDefaultSink(&renderer_);
+ EXPECT_TRUE(SetDefaultCodec());
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_EQ(renderer_.num_rendered_frames(), 0);
+
+ // Send and receive one frame.
+ SendFrame();
+ EXPECT_FRAME(1, kVideoWidth, kVideoHeight);
+
+ EXPECT_THAT(receive_channel_->GetSources(kSsrc - 1), IsEmpty());
+ EXPECT_THAT(receive_channel_->GetSources(kSsrc), SizeIs(1));
+ EXPECT_THAT(receive_channel_->GetSources(kSsrc + 1), IsEmpty());
+
+ webrtc::RtpSource source = receive_channel_->GetSources(kSsrc)[0];
+ EXPECT_EQ(source.source_id(), kSsrc);
+ EXPECT_EQ(source.source_type(), webrtc::RtpSourceType::SSRC);
+ int64_t rtp_timestamp_1 = source.rtp_timestamp();
+ Timestamp timestamp_1 = source.timestamp();
+
+ // Send and receive another frame.
+ SendFrame();
+ EXPECT_FRAME(2, kVideoWidth, kVideoHeight);
+
+ EXPECT_THAT(receive_channel_->GetSources(kSsrc - 1), IsEmpty());
+ EXPECT_THAT(receive_channel_->GetSources(kSsrc), SizeIs(1));
+ EXPECT_THAT(receive_channel_->GetSources(kSsrc + 1), IsEmpty());
+
+ source = receive_channel_->GetSources(kSsrc)[0];
+ EXPECT_EQ(source.source_id(), kSsrc);
+ EXPECT_EQ(source.source_type(), webrtc::RtpSourceType::SSRC);
+ int64_t rtp_timestamp_2 = source.rtp_timestamp();
+ Timestamp timestamp_2 = source.timestamp();
+
+ EXPECT_GT(rtp_timestamp_2, rtp_timestamp_1);
+ EXPECT_GT(timestamp_2, timestamp_1);
+}
+
+TEST_F(WebRtcVideoChannelTest, SetsRidsOnSendStream) {
+ StreamParams sp = CreateSimStreamParams("cname", {123, 456, 789});
+
+ std::vector<std::string> rids = {"f", "h", "q"};
+ std::vector<cricket::RidDescription> rid_descriptions;
+ for (const auto& rid : rids) {
+ rid_descriptions.emplace_back(rid, cricket::RidDirection::kSend);
+ }
+ sp.set_rids(rid_descriptions);
+
+ ASSERT_TRUE(send_channel_->AddSendStream(sp));
+ const auto& streams = fake_call_->GetVideoSendStreams();
+ ASSERT_EQ(1u, streams.size());
+ auto stream = streams[0];
+ ASSERT_NE(stream, nullptr);
+ const auto& config = stream->GetConfig();
+ EXPECT_THAT(config.rtp.rids, ElementsAreArray(rids));
+}
+
+TEST_F(WebRtcVideoChannelBaseTest, EncoderSelectorSwitchCodec) {
+ VideoCodec vp9 = GetEngineCodec("VP9");
+
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ parameters.codecs.push_back(vp9);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters));
+ send_channel_->SetSend(true);
+
+ absl::optional<VideoCodec> codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP8", codec->name);
+
+ webrtc::MockEncoderSelector encoder_selector;
+ EXPECT_CALL(encoder_selector, OnAvailableBitrate)
+ .WillRepeatedly(Return(webrtc::SdpVideoFormat("VP9")));
+
+ send_channel_->SetEncoderSelector(kSsrc, &encoder_selector);
+ time_controller_.AdvanceTime(kFrameDuration);
+
+ codec = send_channel_->GetSendCodec();
+ ASSERT_TRUE(codec);
+ EXPECT_EQ("VP9", codec->name);
+
+ // Deregister the encoder selector in case it's called during test tear-down.
+ send_channel_->SetEncoderSelector(kSsrc, nullptr);
+}
+
+TEST_F(WebRtcVideoChannelTest, RequestedResolutionSinglecast) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+
+ { // TEST requested_resolution < frame size
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 640,
+ .height = 360};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ auto streams = stream->GetVideoStreams();
+ ASSERT_EQ(streams.size(), 1u);
+ EXPECT_EQ(rtc::checked_cast<size_t>(640), streams[0].width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(360), streams[0].height);
+ }
+
+ { // TEST requested_resolution == frame size
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 1280,
+ .height = 720};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ auto streams = stream->GetVideoStreams();
+ ASSERT_EQ(streams.size(), 1u);
+ EXPECT_EQ(rtc::checked_cast<size_t>(1280), streams[0].width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams[0].height);
+ }
+
+ { // TEST requested_resolution > frame size
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 2 * 1280,
+ .height = 2 * 720};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+ auto streams = stream->GetVideoStreams();
+ ASSERT_EQ(streams.size(), 1u);
+ EXPECT_EQ(rtc::checked_cast<size_t>(1280), streams[0].width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams[0].height);
+ }
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, RequestedResolutionSinglecastCropping) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 720,
+ .height = 720};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ auto streams = stream->GetVideoStreams();
+ ASSERT_EQ(streams.size(), 1u);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams[0].width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams[0].height);
+ }
+
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 1280,
+ .height = 1280};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ auto streams = stream->GetVideoStreams();
+ ASSERT_EQ(streams.size(), 1u);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams[0].width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(720), streams[0].height);
+ }
+
+ {
+ auto rtp_parameters = send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 650,
+ .height = 650};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ auto streams = stream->GetVideoStreams();
+ ASSERT_EQ(streams.size(), 1u);
+ EXPECT_EQ(rtc::checked_cast<size_t>(480), streams[0].width);
+ EXPECT_EQ(rtc::checked_cast<size_t>(480), streams[0].height);
+ }
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+TEST_F(WebRtcVideoChannelTest, RequestedResolutionSimulcast) {
+ cricket::VideoSenderParameters parameters;
+ parameters.codecs.push_back(GetEngineCodec("VP8"));
+ ASSERT_TRUE(send_channel_->SetSenderParameters(parameters));
+
+ FakeVideoSendStream* stream = SetUpSimulcast(true, /*with_rtx=*/false);
+ webrtc::test::FrameForwarder frame_forwarder;
+ cricket::FakeFrameSource frame_source(1280, 720,
+ rtc::kNumMicrosecsPerSec / 30);
+ EXPECT_TRUE(
+ send_channel_->SetVideoSend(last_ssrc_, nullptr, &frame_forwarder));
+
+ {
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(3UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 320,
+ .height = 180};
+ rtp_parameters.encodings[1].requested_resolution = {.width = 640,
+ .height = 360};
+ rtp_parameters.encodings[2].requested_resolution = {.width = 1280,
+ .height = 720};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ EXPECT_EQ(GetStreamResolutions(stream->GetVideoStreams()),
+ (std::vector<webrtc::Resolution>{
+ {.width = 320, .height = 180},
+ {.width = 640, .height = 360},
+ {.width = 1280, .height = 720},
+ }));
+ }
+
+ {
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(3UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 320,
+ .height = 180};
+ rtp_parameters.encodings[1].active = false;
+
+ rtp_parameters.encodings[2].requested_resolution = {.width = 1280,
+ .height = 720};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ EXPECT_EQ(GetStreamResolutions(stream->GetVideoStreams()),
+ (std::vector<webrtc::Resolution>{
+ {.width = 320, .height = 180},
+ {.width = 1280, .height = 720},
+ }));
+ }
+
+ {
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(last_ssrc_);
+ EXPECT_EQ(3UL, rtp_parameters.encodings.size());
+ rtp_parameters.encodings[0].requested_resolution = {.width = 320,
+ .height = 180};
+ rtp_parameters.encodings[1].active = true;
+ rtp_parameters.encodings[1].requested_resolution = {.width = 640,
+ .height = 360};
+ rtp_parameters.encodings[2].requested_resolution = {.width = 960,
+ .height = 540};
+ send_channel_->SetRtpSendParameters(last_ssrc_, rtp_parameters);
+
+ frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame());
+
+ EXPECT_EQ(GetStreamResolutions(stream->GetVideoStreams()),
+ (std::vector<webrtc::Resolution>{
+ {.width = 320, .height = 180},
+ {.width = 640, .height = 360},
+ {.width = 960, .height = 540},
+ }));
+ }
+
+ EXPECT_TRUE(send_channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/webrtc_voice_engine.cc b/third_party/libwebrtc/media/engine/webrtc_voice_engine.cc
new file mode 100644
index 0000000000..adf8b5c51d
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_voice_engine.cc
@@ -0,0 +1,2725 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/webrtc_voice_engine.h"
+
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/algorithm.h"
+#include "absl/algorithm/container.h"
+#include "absl/functional/bind_front.h"
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/audio/audio_frame.h"
+#include "api/audio/audio_frame_processor.h"
+#include "api/audio_codecs/audio_codec_pair_id.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/call/audio_sink.h"
+#include "api/field_trials_view.h"
+#include "api/make_ref_counted.h"
+#include "api/media_types.h"
+#include "api/priority.h"
+#include "api/rtp_headers.h"
+#include "api/rtp_parameters.h"
+#include "api/rtp_transceiver_direction.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/transport/bitrate_settings.h"
+#include "api/units/data_rate.h"
+#include "api/units/time_delta.h"
+#include "api/units/timestamp.h"
+#include "call/audio_receive_stream.h"
+#include "call/packet_receiver.h"
+#include "call/rtp_config.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "media/base/audio_source.h"
+#include "media/base/codec.h"
+#include "media/base/media_constants.h"
+#include "media/base/stream_params.h"
+#include "media/engine/adm_helpers.h"
+#include "media/engine/payload_type_mapper.h"
+#include "media/engine/webrtc_media_engine.h"
+#include "modules/async_audio_processing/async_audio_processing.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/dscp.h"
+#include "rtc_base/experiments/struct_parameters_parser.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/string_encode.h"
+#include "rtc_base/strings/audio_format_to_string.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/strings/string_format.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/metrics.h"
+
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/config.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace cricket {
+namespace {
+
+using ::webrtc::ParseRtpSsrc;
+
+constexpr size_t kMaxUnsignaledRecvStreams = 4;
+
+constexpr int kNackRtpHistoryMs = 5000;
+
+const int kMinTelephoneEventCode = 0; // RFC4733 (Section 2.3.1)
+const int kMaxTelephoneEventCode = 255;
+
+const int kMinPayloadType = 0;
+const int kMaxPayloadType = 127;
+
+class ProxySink : public webrtc::AudioSinkInterface {
+ public:
+ explicit ProxySink(AudioSinkInterface* sink) : sink_(sink) {
+ RTC_DCHECK(sink);
+ }
+
+ void OnData(const Data& audio) override { sink_->OnData(audio); }
+
+ private:
+ webrtc::AudioSinkInterface* sink_;
+};
+
+bool ValidateStreamParams(const StreamParams& sp) {
+ if (sp.ssrcs.empty()) {
+ RTC_DLOG(LS_ERROR) << "No SSRCs in stream parameters: " << sp.ToString();
+ return false;
+ }
+ if (sp.ssrcs.size() > 1) {
+ RTC_DLOG(LS_ERROR) << "Multiple SSRCs in stream parameters: "
+ << sp.ToString();
+ return false;
+ }
+ return true;
+}
+
+// Dumps an AudioCodec in RFC 2327-ish format.
+std::string ToString(const AudioCodec& codec) {
+ rtc::StringBuilder ss;
+ ss << codec.name << "/" << codec.clockrate << "/" << codec.channels;
+ if (!codec.params.empty()) {
+ ss << " {";
+ for (const auto& param : codec.params) {
+ ss << " " << param.first << "=" << param.second;
+ }
+ ss << " }";
+ }
+ ss << " (" << codec.id << ")";
+ return ss.Release();
+}
+
+bool IsCodec(const AudioCodec& codec, const char* ref_name) {
+ return absl::EqualsIgnoreCase(codec.name, ref_name);
+}
+
+absl::optional<AudioCodec> FindCodec(
+ const std::vector<AudioCodec>& codecs,
+ const AudioCodec& codec,
+ const webrtc::FieldTrialsView* field_trials) {
+ for (const AudioCodec& c : codecs) {
+ if (c.Matches(codec, field_trials)) {
+ return c;
+ }
+ }
+ return absl::nullopt;
+}
+
+bool VerifyUniquePayloadTypes(const std::vector<AudioCodec>& codecs) {
+ if (codecs.empty()) {
+ return true;
+ }
+ std::vector<int> payload_types;
+ absl::c_transform(codecs, std::back_inserter(payload_types),
+ [](const AudioCodec& codec) { return codec.id; });
+ absl::c_sort(payload_types);
+ return absl::c_adjacent_find(payload_types) == payload_types.end();
+}
+
+absl::optional<std::string> GetAudioNetworkAdaptorConfig(
+ const AudioOptions& options) {
+ if (options.audio_network_adaptor && *options.audio_network_adaptor &&
+ options.audio_network_adaptor_config) {
+ // Turn on audio network adaptor only when `options_.audio_network_adaptor`
+ // equals true and `options_.audio_network_adaptor_config` has a value.
+ return options.audio_network_adaptor_config;
+ }
+ return absl::nullopt;
+}
+
+// Returns its smallest positive argument. If neither argument is positive,
+// returns an arbitrary nonpositive value.
+int MinPositive(int a, int b) {
+ if (a <= 0) {
+ return b;
+ }
+ if (b <= 0) {
+ return a;
+ }
+ return std::min(a, b);
+}
+
+// `max_send_bitrate_bps` is the bitrate from "b=" in SDP.
+// `rtp_max_bitrate_bps` is the bitrate from RtpSender::SetParameters.
+absl::optional<int> ComputeSendBitrate(int max_send_bitrate_bps,
+ absl::optional<int> rtp_max_bitrate_bps,
+ const webrtc::AudioCodecSpec& spec) {
+ // If application-configured bitrate is set, take minimum of that and SDP
+ // bitrate.
+ const int bps = rtp_max_bitrate_bps
+ ? MinPositive(max_send_bitrate_bps, *rtp_max_bitrate_bps)
+ : max_send_bitrate_bps;
+ if (bps <= 0) {
+ return spec.info.default_bitrate_bps;
+ }
+
+ if (bps < spec.info.min_bitrate_bps) {
+ // If codec is not multi-rate and `bps` is less than the fixed bitrate then
+ // fail. If codec is not multi-rate and `bps` exceeds or equal the fixed
+ // bitrate then ignore.
+ RTC_LOG(LS_ERROR) << "Failed to set codec " << spec.format.name
+ << " to bitrate " << bps
+ << " bps"
+ ", requires at least "
+ << spec.info.min_bitrate_bps << " bps.";
+ return absl::nullopt;
+ }
+
+ if (spec.info.HasFixedBitrate()) {
+ return spec.info.default_bitrate_bps;
+ } else {
+ // If codec is multi-rate then just set the bitrate.
+ return std::min(bps, spec.info.max_bitrate_bps);
+ }
+}
+
+bool IsEnabled(const webrtc::FieldTrialsView& config, absl::string_view trial) {
+ return absl::StartsWith(config.Lookup(trial), "Enabled");
+}
+
+struct AdaptivePtimeConfig {
+ bool enabled = false;
+ webrtc::DataRate min_payload_bitrate = webrtc::DataRate::KilobitsPerSec(16);
+ // Value is chosen to ensure FEC can be encoded, see LBRR_WB_MIN_RATE_BPS in
+ // libopus.
+ webrtc::DataRate min_encoder_bitrate = webrtc::DataRate::KilobitsPerSec(16);
+ bool use_slow_adaptation = true;
+
+ absl::optional<std::string> audio_network_adaptor_config;
+
+ std::unique_ptr<webrtc::StructParametersParser> Parser() {
+ return webrtc::StructParametersParser::Create( //
+ "enabled", &enabled, //
+ "min_payload_bitrate", &min_payload_bitrate, //
+ "min_encoder_bitrate", &min_encoder_bitrate, //
+ "use_slow_adaptation", &use_slow_adaptation);
+ }
+
+ explicit AdaptivePtimeConfig(const webrtc::FieldTrialsView& trials) {
+ Parser()->Parse(trials.Lookup("WebRTC-Audio-AdaptivePtime"));
+#if WEBRTC_ENABLE_PROTOBUF
+ webrtc::audio_network_adaptor::config::ControllerManager config;
+ auto* frame_length_controller =
+ config.add_controllers()->mutable_frame_length_controller_v2();
+ frame_length_controller->set_min_payload_bitrate_bps(
+ min_payload_bitrate.bps());
+ frame_length_controller->set_use_slow_adaptation(use_slow_adaptation);
+ config.add_controllers()->mutable_bitrate_controller();
+ audio_network_adaptor_config = config.SerializeAsString();
+#endif
+ }
+};
+
+// TODO(tommi): Constructing a receive stream could be made simpler.
+// Move some of this boiler plate code into the config structs themselves.
+webrtc::AudioReceiveStreamInterface::Config BuildReceiveStreamConfig(
+ uint32_t remote_ssrc,
+ uint32_t local_ssrc,
+ bool use_nack,
+ bool enable_non_sender_rtt,
+ const std::vector<std::string>& stream_ids,
+ const std::vector<webrtc::RtpExtension>& extensions,
+ webrtc::Transport* rtcp_send_transport,
+ const rtc::scoped_refptr<webrtc::AudioDecoderFactory>& decoder_factory,
+ const std::map<int, webrtc::SdpAudioFormat>& decoder_map,
+ absl::optional<webrtc::AudioCodecPairId> codec_pair_id,
+ size_t jitter_buffer_max_packets,
+ bool jitter_buffer_fast_accelerate,
+ int jitter_buffer_min_delay_ms,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor,
+ const webrtc::CryptoOptions& crypto_options,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ webrtc::AudioReceiveStreamInterface::Config config;
+ config.rtp.remote_ssrc = remote_ssrc;
+ config.rtp.local_ssrc = local_ssrc;
+ config.rtp.nack.rtp_history_ms = use_nack ? kNackRtpHistoryMs : 0;
+ if (!stream_ids.empty()) {
+ config.sync_group = stream_ids[0];
+ }
+ config.rtcp_send_transport = rtcp_send_transport;
+ config.enable_non_sender_rtt = enable_non_sender_rtt;
+ config.decoder_factory = decoder_factory;
+ config.decoder_map = decoder_map;
+ config.codec_pair_id = codec_pair_id;
+ config.jitter_buffer_max_packets = jitter_buffer_max_packets;
+ config.jitter_buffer_fast_accelerate = jitter_buffer_fast_accelerate;
+ config.jitter_buffer_min_delay_ms = jitter_buffer_min_delay_ms;
+ config.frame_decryptor = std::move(frame_decryptor);
+ config.crypto_options = crypto_options;
+ config.frame_transformer = std::move(frame_transformer);
+ return config;
+}
+
+// Utility function to check if RED codec and its parameters match a codec spec.
+bool CheckRedParameters(
+ const AudioCodec& red_codec,
+ const webrtc::AudioSendStream::Config::SendCodecSpec& send_codec_spec) {
+ if (red_codec.clockrate != send_codec_spec.format.clockrate_hz ||
+ red_codec.channels != send_codec_spec.format.num_channels) {
+ return false;
+ }
+
+ // Check the FMTP line for the empty parameter which should match
+ // <primary codec>/<primary codec>[/...]
+ auto red_parameters = red_codec.params.find("");
+ if (red_parameters == red_codec.params.end()) {
+ RTC_LOG(LS_WARNING) << "audio/RED missing fmtp parameters.";
+ return false;
+ }
+ std::vector<absl::string_view> redundant_payloads =
+ rtc::split(red_parameters->second, '/');
+ // 32 is chosen as a maximum upper bound for consistency with the
+ // red payload splitter.
+ if (redundant_payloads.size() < 2 || redundant_payloads.size() > 32) {
+ return false;
+ }
+ for (auto pt : redundant_payloads) {
+ if (pt != rtc::ToString(send_codec_spec.payload_type)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+WebRtcVoiceEngine::WebRtcVoiceEngine(
+ webrtc::TaskQueueFactory* task_queue_factory,
+ webrtc::AudioDeviceModule* adm,
+ const rtc::scoped_refptr<webrtc::AudioEncoderFactory>& encoder_factory,
+ const rtc::scoped_refptr<webrtc::AudioDecoderFactory>& decoder_factory,
+ rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer,
+ rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing,
+ // TODO(bugs.webrtc.org/15111):
+ // Remove the raw AudioFrameProcessor pointer in the follow-up.
+ webrtc::AudioFrameProcessor* audio_frame_processor,
+ std::unique_ptr<webrtc::AudioFrameProcessor> owned_audio_frame_processor,
+ const webrtc::FieldTrialsView& trials)
+ : task_queue_factory_(task_queue_factory),
+ adm_(adm),
+ encoder_factory_(encoder_factory),
+ decoder_factory_(decoder_factory),
+ audio_mixer_(audio_mixer),
+ apm_(audio_processing),
+ audio_frame_processor_(audio_frame_processor),
+ owned_audio_frame_processor_(std::move(owned_audio_frame_processor)),
+ minimized_remsampling_on_mobile_trial_enabled_(
+ IsEnabled(trials, "WebRTC-Audio-MinimizeResamplingOnMobile")) {
+ RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
+ RTC_DCHECK(decoder_factory);
+ RTC_DCHECK(encoder_factory);
+ // The rest of our initialization will happen in Init.
+}
+
+WebRtcVoiceEngine::~WebRtcVoiceEngine() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
+ if (initialized_) {
+ StopAecDump();
+
+ // Stop AudioDevice.
+ adm()->StopPlayout();
+ adm()->StopRecording();
+ adm()->RegisterAudioCallback(nullptr);
+ adm()->Terminate();
+ }
+}
+
+void WebRtcVoiceEngine::Init() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
+
+ // TaskQueue expects to be created/destroyed on the same thread.
+ RTC_DCHECK(!low_priority_worker_queue_);
+ low_priority_worker_queue_.reset(
+ new rtc::TaskQueue(task_queue_factory_->CreateTaskQueue(
+ "rtc-low-prio", webrtc::TaskQueueFactory::Priority::LOW)));
+
+ // Load our audio codec lists.
+ RTC_LOG(LS_VERBOSE) << "Supported send codecs in order of preference:";
+ send_codecs_ = CollectCodecs(encoder_factory_->GetSupportedEncoders());
+ for (const AudioCodec& codec : send_codecs_) {
+ RTC_LOG(LS_VERBOSE) << ToString(codec);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Supported recv codecs in order of preference:";
+ recv_codecs_ = CollectCodecs(decoder_factory_->GetSupportedDecoders());
+ for (const AudioCodec& codec : recv_codecs_) {
+ RTC_LOG(LS_VERBOSE) << ToString(codec);
+ }
+
+#if defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
+ // No ADM supplied? Create a default one.
+ if (!adm_) {
+ adm_ = webrtc::AudioDeviceModule::Create(
+ webrtc::AudioDeviceModule::kPlatformDefaultAudio, task_queue_factory_);
+ }
+#endif // WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE
+ RTC_CHECK(adm());
+ webrtc::adm_helpers::Init(adm());
+
+ // Set up AudioState.
+ {
+ webrtc::AudioState::Config config;
+ if (audio_mixer_) {
+ config.audio_mixer = audio_mixer_;
+ } else {
+ config.audio_mixer = webrtc::AudioMixerImpl::Create();
+ }
+ config.audio_processing = apm_;
+ config.audio_device_module = adm_;
+ if (audio_frame_processor_) {
+ config.async_audio_processing_factory =
+ rtc::make_ref_counted<webrtc::AsyncAudioProcessing::Factory>(
+ *audio_frame_processor_, *task_queue_factory_);
+ } else if (owned_audio_frame_processor_) {
+ config.async_audio_processing_factory =
+ rtc::make_ref_counted<webrtc::AsyncAudioProcessing::Factory>(
+ std::move(owned_audio_frame_processor_), *task_queue_factory_);
+ }
+ audio_state_ = webrtc::AudioState::Create(config);
+ }
+
+ // Connect the ADM to our audio path.
+ adm()->RegisterAudioCallback(audio_state()->audio_transport());
+
+ // Set default engine options.
+ {
+ AudioOptions options;
+ options.echo_cancellation = true;
+ options.auto_gain_control = true;
+#if defined(WEBRTC_IOS)
+ // On iOS, VPIO provides built-in NS.
+ options.noise_suppression = false;
+#else
+ options.noise_suppression = true;
+#endif
+ options.highpass_filter = true;
+ options.stereo_swapping = false;
+ options.audio_jitter_buffer_max_packets = 200;
+ options.audio_jitter_buffer_fast_accelerate = false;
+ options.audio_jitter_buffer_min_delay_ms = 0;
+ ApplyOptions(options);
+ }
+ initialized_ = true;
+}
+
+rtc::scoped_refptr<webrtc::AudioState> WebRtcVoiceEngine::GetAudioState()
+ const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return audio_state_;
+}
+
+std::unique_ptr<VoiceMediaSendChannelInterface>
+WebRtcVoiceEngine::CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) {
+ return std::make_unique<WebRtcVoiceSendChannel>(
+ this, config, options, crypto_options, call, codec_pair_id);
+}
+
+std::unique_ptr<VoiceMediaReceiveChannelInterface>
+WebRtcVoiceEngine::CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) {
+ return std::make_unique<WebRtcVoiceReceiveChannel>(
+ this, config, options, crypto_options, call, codec_pair_id);
+}
+
+void WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::ApplyOptions: "
+ << options_in.ToString();
+ AudioOptions options = options_in; // The options are modified below.
+
+ // Set and adjust echo canceller options.
+ // Use desktop AEC by default, when not using hardware AEC.
+ bool use_mobile_software_aec = false;
+
+#if defined(WEBRTC_IOS)
+ if (options.ios_force_software_aec_HACK &&
+ *options.ios_force_software_aec_HACK) {
+ // EC may be forced on for a device known to have non-functioning platform
+ // AEC.
+ options.echo_cancellation = true;
+ RTC_LOG(LS_WARNING)
+ << "Force software AEC on iOS. May conflict with platform AEC.";
+ } else {
+ // On iOS, VPIO provides built-in EC.
+ options.echo_cancellation = false;
+ RTC_LOG(LS_INFO) << "Always disable AEC on iOS. Use built-in instead.";
+ }
+#elif defined(WEBRTC_ANDROID)
+ use_mobile_software_aec = true;
+#endif
+
+// Set and adjust gain control options.
+#if defined(WEBRTC_IOS)
+ // On iOS, VPIO provides built-in AGC.
+ options.auto_gain_control = false;
+ RTC_LOG(LS_INFO) << "Always disable AGC on iOS. Use built-in instead.";
+#endif
+
+#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID)
+ // Turn off the gain control if specified by the field trial.
+ // The purpose of the field trial is to reduce the amount of resampling
+ // performed inside the audio processing module on mobile platforms by
+ // whenever possible turning off the fixed AGC mode and the high-pass filter.
+ // (https://bugs.chromium.org/p/webrtc/issues/detail?id=6181).
+ if (minimized_remsampling_on_mobile_trial_enabled_) {
+ options.auto_gain_control = false;
+ RTC_LOG(LS_INFO) << "Disable AGC according to field trial.";
+ if (!(options.noise_suppression.value_or(false) ||
+ options.echo_cancellation.value_or(false))) {
+ // If possible, turn off the high-pass filter.
+ RTC_LOG(LS_INFO)
+ << "Disable high-pass filter in response to field trial.";
+ options.highpass_filter = false;
+ }
+ }
+#endif
+
+ if (options.echo_cancellation) {
+ // Check if platform supports built-in EC. Currently only supported on
+ // Android and in combination with Java based audio layer.
+ // TODO(henrika): investigate possibility to support built-in EC also
+ // in combination with Open SL ES audio.
+ const bool built_in_aec = adm()->BuiltInAECIsAvailable();
+ if (built_in_aec) {
+ // Built-in EC exists on this device. Enable/Disable it according to the
+ // echo_cancellation audio option.
+ const bool enable_built_in_aec = *options.echo_cancellation;
+ if (adm()->EnableBuiltInAEC(enable_built_in_aec) == 0 &&
+ enable_built_in_aec) {
+ // Disable internal software EC if built-in EC is enabled,
+ // i.e., replace the software EC with the built-in EC.
+ options.echo_cancellation = false;
+ RTC_LOG(LS_INFO)
+ << "Disabling EC since built-in EC will be used instead";
+ }
+ }
+ }
+
+ if (options.auto_gain_control) {
+ bool built_in_agc_avaliable = adm()->BuiltInAGCIsAvailable();
+ if (built_in_agc_avaliable) {
+ if (adm()->EnableBuiltInAGC(*options.auto_gain_control) == 0 &&
+ *options.auto_gain_control) {
+ // Disable internal software AGC if built-in AGC is enabled,
+ // i.e., replace the software AGC with the built-in AGC.
+ options.auto_gain_control = false;
+ RTC_LOG(LS_INFO)
+ << "Disabling AGC since built-in AGC will be used instead";
+ }
+ }
+ }
+
+ if (options.noise_suppression) {
+ if (adm()->BuiltInNSIsAvailable()) {
+ bool builtin_ns = *options.noise_suppression;
+ if (adm()->EnableBuiltInNS(builtin_ns) == 0 && builtin_ns) {
+ // Disable internal software NS if built-in NS is enabled,
+ // i.e., replace the software NS with the built-in NS.
+ options.noise_suppression = false;
+ RTC_LOG(LS_INFO)
+ << "Disabling NS since built-in NS will be used instead";
+ }
+ }
+ }
+
+ if (options.stereo_swapping) {
+ audio_state()->SetStereoChannelSwapping(*options.stereo_swapping);
+ }
+
+ if (options.audio_jitter_buffer_max_packets) {
+ audio_jitter_buffer_max_packets_ =
+ std::max(20, *options.audio_jitter_buffer_max_packets);
+ }
+ if (options.audio_jitter_buffer_fast_accelerate) {
+ audio_jitter_buffer_fast_accelerate_ =
+ *options.audio_jitter_buffer_fast_accelerate;
+ }
+ if (options.audio_jitter_buffer_min_delay_ms) {
+ audio_jitter_buffer_min_delay_ms_ =
+ *options.audio_jitter_buffer_min_delay_ms;
+ }
+
+ webrtc::AudioProcessing* ap = apm();
+ if (!ap) {
+ return;
+ }
+
+ webrtc::AudioProcessing::Config apm_config = ap->GetConfig();
+
+ if (options.echo_cancellation) {
+ apm_config.echo_canceller.enabled = *options.echo_cancellation;
+ apm_config.echo_canceller.mobile_mode = use_mobile_software_aec;
+ }
+
+ if (options.auto_gain_control) {
+ const bool enabled = *options.auto_gain_control;
+ apm_config.gain_controller1.enabled = enabled;
+#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID)
+ apm_config.gain_controller1.mode =
+ apm_config.gain_controller1.kFixedDigital;
+#else
+ apm_config.gain_controller1.mode =
+ apm_config.gain_controller1.kAdaptiveAnalog;
+#endif
+ }
+
+ if (options.highpass_filter) {
+ apm_config.high_pass_filter.enabled = *options.highpass_filter;
+ }
+
+ if (options.noise_suppression) {
+ const bool enabled = *options.noise_suppression;
+ apm_config.noise_suppression.enabled = enabled;
+ apm_config.noise_suppression.level =
+ webrtc::AudioProcessing::Config::NoiseSuppression::Level::kHigh;
+ }
+
+ ap->ApplyConfig(apm_config);
+}
+
+const std::vector<AudioCodec>& WebRtcVoiceEngine::send_codecs() const {
+ RTC_DCHECK(signal_thread_checker_.IsCurrent());
+ return send_codecs_;
+}
+
+const std::vector<AudioCodec>& WebRtcVoiceEngine::recv_codecs() const {
+ RTC_DCHECK(signal_thread_checker_.IsCurrent());
+ return recv_codecs_;
+}
+
+std::vector<webrtc::RtpHeaderExtensionCapability>
+WebRtcVoiceEngine::GetRtpHeaderExtensions() const {
+ RTC_DCHECK(signal_thread_checker_.IsCurrent());
+ std::vector<webrtc::RtpHeaderExtensionCapability> result;
+ int id = 1;
+ for (const auto& uri : {webrtc::RtpExtension::kAudioLevelUri,
+ webrtc::RtpExtension::kAbsSendTimeUri,
+ webrtc::RtpExtension::kTransportSequenceNumberUri,
+ webrtc::RtpExtension::kMidUri}) {
+ result.emplace_back(uri, id++, webrtc::RtpTransceiverDirection::kSendRecv);
+ }
+ for (const auto& uri : {webrtc::RtpExtension::kAbsoluteCaptureTimeUri}) {
+ result.emplace_back(uri, id++, webrtc::RtpTransceiverDirection::kStopped);
+ }
+ return result;
+}
+
+bool WebRtcVoiceEngine::StartAecDump(webrtc::FileWrapper file,
+ int64_t max_size_bytes) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+
+ webrtc::AudioProcessing* ap = apm();
+ if (!ap) {
+ RTC_LOG(LS_WARNING)
+ << "Attempting to start aecdump when no audio processing module is "
+ "present, hence no aecdump is started.";
+ return false;
+ }
+
+ return ap->CreateAndAttachAecDump(file.Release(), max_size_bytes,
+ low_priority_worker_queue_.get());
+}
+
+void WebRtcVoiceEngine::StopAecDump() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ webrtc::AudioProcessing* ap = apm();
+ if (ap) {
+ ap->DetachAecDump();
+ } else {
+ RTC_LOG(LS_WARNING) << "Attempting to stop aecdump when no audio "
+ "processing module is present";
+ }
+}
+
+absl::optional<webrtc::AudioDeviceModule::Stats>
+WebRtcVoiceEngine::GetAudioDeviceStats() {
+ return adm()->GetStats();
+}
+
+webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(adm_);
+ return adm_.get();
+}
+
+webrtc::AudioProcessing* WebRtcVoiceEngine::apm() const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return apm_.get();
+}
+
+webrtc::AudioState* WebRtcVoiceEngine::audio_state() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(audio_state_);
+ return audio_state_.get();
+}
+
+std::vector<AudioCodec> WebRtcVoiceEngine::CollectCodecs(
+ const std::vector<webrtc::AudioCodecSpec>& specs) const {
+ PayloadTypeMapper mapper;
+ std::vector<AudioCodec> out;
+
+ // Only generate CN payload types for these clockrates:
+ std::map<int, bool, std::greater<int>> generate_cn = {
+ {8000, false}, {16000, false}, {32000, false}};
+ // Only generate telephone-event payload types for these clockrates:
+ std::map<int, bool, std::greater<int>> generate_dtmf = {
+ {8000, false}, {16000, false}, {32000, false}, {48000, false}};
+
+ auto map_format = [&mapper](const webrtc::SdpAudioFormat& format,
+ std::vector<AudioCodec>* out) {
+ absl::optional<AudioCodec> opt_codec = mapper.ToAudioCodec(format);
+ if (opt_codec) {
+ if (out) {
+ out->push_back(*opt_codec);
+ }
+ } else {
+ RTC_LOG(LS_ERROR) << "Unable to assign payload type to format: "
+ << rtc::ToString(format);
+ }
+
+ return opt_codec;
+ };
+
+ for (const auto& spec : specs) {
+ // We need to do some extra stuff before adding the main codecs to out.
+ absl::optional<AudioCodec> opt_codec = map_format(spec.format, nullptr);
+ if (opt_codec) {
+ AudioCodec& codec = *opt_codec;
+ if (spec.info.supports_network_adaption) {
+ codec.AddFeedbackParam(
+ FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
+ }
+
+ if (spec.info.allow_comfort_noise) {
+ // Generate a CN entry if the decoder allows it and we support the
+ // clockrate.
+ auto cn = generate_cn.find(spec.format.clockrate_hz);
+ if (cn != generate_cn.end()) {
+ cn->second = true;
+ }
+ }
+
+ // Generate a telephone-event entry if we support the clockrate.
+ auto dtmf = generate_dtmf.find(spec.format.clockrate_hz);
+ if (dtmf != generate_dtmf.end()) {
+ dtmf->second = true;
+ }
+
+ out.push_back(codec);
+
+ if (codec.name == kOpusCodecName) {
+ std::string redFmtp =
+ rtc::ToString(codec.id) + "/" + rtc::ToString(codec.id);
+ map_format({kRedCodecName, 48000, 2, {{"", redFmtp}}}, &out);
+ }
+ }
+ }
+
+ // Add CN codecs after "proper" audio codecs.
+ for (const auto& cn : generate_cn) {
+ if (cn.second) {
+ map_format({kCnCodecName, cn.first, 1}, &out);
+ }
+ }
+
+ // Add telephone-event codecs last.
+ for (const auto& dtmf : generate_dtmf) {
+ if (dtmf.second) {
+ map_format({kDtmfCodecName, dtmf.first, 1}, &out);
+ }
+ }
+
+ return out;
+}
+
+// --------------------------------- WebRtcVoiceSendChannel ------------------
+
+class WebRtcVoiceSendChannel::WebRtcAudioSendStream : public AudioSource::Sink {
+ public:
+ WebRtcAudioSendStream(
+ uint32_t ssrc,
+ const std::string& mid,
+ const std::string& c_name,
+ const std::string track_id,
+ const absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>&
+ send_codec_spec,
+ bool extmap_allow_mixed,
+ const std::vector<webrtc::RtpExtension>& extensions,
+ int max_send_bitrate_bps,
+ int rtcp_report_interval_ms,
+ const absl::optional<std::string>& audio_network_adaptor_config,
+ webrtc::Call* call,
+ webrtc::Transport* send_transport,
+ const rtc::scoped_refptr<webrtc::AudioEncoderFactory>& encoder_factory,
+ const absl::optional<webrtc::AudioCodecPairId> codec_pair_id,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor,
+ const webrtc::CryptoOptions& crypto_options)
+ : adaptive_ptime_config_(call->trials()),
+ call_(call),
+ config_(send_transport),
+ max_send_bitrate_bps_(max_send_bitrate_bps),
+ rtp_parameters_(CreateRtpParametersWithOneEncoding()) {
+ RTC_DCHECK(call);
+ RTC_DCHECK(encoder_factory);
+ config_.rtp.ssrc = ssrc;
+ config_.rtp.mid = mid;
+ config_.rtp.c_name = c_name;
+ config_.rtp.extmap_allow_mixed = extmap_allow_mixed;
+ config_.rtp.extensions = extensions;
+ config_.has_dscp =
+ rtp_parameters_.encodings[0].network_priority != webrtc::Priority::kLow;
+ config_.encoder_factory = encoder_factory;
+ config_.codec_pair_id = codec_pair_id;
+ config_.track_id = track_id;
+ config_.frame_encryptor = frame_encryptor;
+ config_.crypto_options = crypto_options;
+ config_.rtcp_report_interval_ms = rtcp_report_interval_ms;
+ rtp_parameters_.encodings[0].ssrc = ssrc;
+ rtp_parameters_.rtcp.cname = c_name;
+ rtp_parameters_.header_extensions = extensions;
+
+ audio_network_adaptor_config_from_options_ = audio_network_adaptor_config;
+ UpdateAudioNetworkAdaptorConfig();
+
+ if (send_codec_spec) {
+ UpdateSendCodecSpec(*send_codec_spec);
+ }
+
+ stream_ = call_->CreateAudioSendStream(config_);
+ }
+
+ WebRtcAudioSendStream() = delete;
+ WebRtcAudioSendStream(const WebRtcAudioSendStream&) = delete;
+ WebRtcAudioSendStream& operator=(const WebRtcAudioSendStream&) = delete;
+
+ ~WebRtcAudioSendStream() override {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ ClearSource();
+ call_->DestroyAudioSendStream(stream_);
+ }
+
+ void SetSendCodecSpec(
+ const webrtc::AudioSendStream::Config::SendCodecSpec& send_codec_spec) {
+ UpdateSendCodecSpec(send_codec_spec);
+ ReconfigureAudioSendStream(nullptr);
+ }
+
+ void SetRtpExtensions(const std::vector<webrtc::RtpExtension>& extensions) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ config_.rtp.extensions = extensions;
+ rtp_parameters_.header_extensions = extensions;
+ ReconfigureAudioSendStream(nullptr);
+ }
+
+ void SetExtmapAllowMixed(bool extmap_allow_mixed) {
+ config_.rtp.extmap_allow_mixed = extmap_allow_mixed;
+ ReconfigureAudioSendStream(nullptr);
+ }
+
+ void SetMid(const std::string& mid) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (config_.rtp.mid == mid) {
+ return;
+ }
+ config_.rtp.mid = mid;
+ ReconfigureAudioSendStream(nullptr);
+ }
+
+ void SetFrameEncryptor(
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ config_.frame_encryptor = frame_encryptor;
+ ReconfigureAudioSendStream(nullptr);
+ }
+
+ void SetAudioNetworkAdaptorConfig(
+ const absl::optional<std::string>& audio_network_adaptor_config) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (audio_network_adaptor_config_from_options_ ==
+ audio_network_adaptor_config) {
+ return;
+ }
+ audio_network_adaptor_config_from_options_ = audio_network_adaptor_config;
+ UpdateAudioNetworkAdaptorConfig();
+ UpdateAllowedBitrateRange();
+ ReconfigureAudioSendStream(nullptr);
+ }
+
+ bool SetMaxSendBitrate(int bps) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(config_.send_codec_spec);
+ RTC_DCHECK(audio_codec_spec_);
+ auto send_rate = ComputeSendBitrate(
+ bps, rtp_parameters_.encodings[0].max_bitrate_bps, *audio_codec_spec_);
+
+ if (!send_rate) {
+ return false;
+ }
+
+ max_send_bitrate_bps_ = bps;
+
+ if (send_rate != config_.send_codec_spec->target_bitrate_bps) {
+ config_.send_codec_spec->target_bitrate_bps = send_rate;
+ ReconfigureAudioSendStream(nullptr);
+ }
+ return true;
+ }
+
+ bool SendTelephoneEvent(int payload_type,
+ int payload_freq,
+ int event,
+ int duration_ms) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(stream_);
+ return stream_->SendTelephoneEvent(payload_type, payload_freq, event,
+ duration_ms);
+ }
+
+ void SetSend(bool send) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ send_ = send;
+ UpdateSendState();
+ }
+
+ void SetMuted(bool muted) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(stream_);
+ stream_->SetMuted(muted);
+ muted_ = muted;
+ }
+
+ bool muted() const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return muted_;
+ }
+
+ webrtc::AudioSendStream::Stats GetStats(bool has_remote_tracks) const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(stream_);
+ return stream_->GetStats(has_remote_tracks);
+ }
+
+ // Starts the sending by setting ourselves as a sink to the AudioSource to
+ // get data callbacks.
+ // This method is called on the libjingle worker thread.
+ // TODO(xians): Make sure Start() is called only once.
+ void SetSource(AudioSource* source) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(source);
+ if (source_) {
+ RTC_DCHECK(source_ == source);
+ return;
+ }
+ source->SetSink(this);
+ source_ = source;
+ UpdateSendState();
+ }
+
+ // Stops sending by setting the sink of the AudioSource to nullptr. No data
+ // callback will be received after this method.
+ // This method is called on the libjingle worker thread.
+ void ClearSource() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (source_) {
+ source_->SetSink(nullptr);
+ source_ = nullptr;
+ }
+ UpdateSendState();
+ }
+
+ // AudioSource::Sink implementation.
+ // This method is called on the audio thread.
+ void OnData(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ absl::optional<int64_t> absolute_capture_timestamp_ms) override {
+ TRACE_EVENT_BEGIN2("webrtc", "WebRtcAudioSendStream::OnData", "sample_rate",
+ sample_rate, "number_of_frames", number_of_frames);
+ RTC_DCHECK_EQ(16, bits_per_sample);
+ RTC_CHECK_RUNS_SERIALIZED(&audio_capture_race_checker_);
+ RTC_DCHECK(stream_);
+ std::unique_ptr<webrtc::AudioFrame> audio_frame(new webrtc::AudioFrame());
+ audio_frame->UpdateFrame(
+ audio_frame->timestamp_, static_cast<const int16_t*>(audio_data),
+ number_of_frames, sample_rate, audio_frame->speech_type_,
+ audio_frame->vad_activity_, number_of_channels);
+ // TODO(bugs.webrtc.org/10739): add dcheck that
+ // `absolute_capture_timestamp_ms` always receives a value.
+ if (absolute_capture_timestamp_ms) {
+ audio_frame->set_absolute_capture_timestamp_ms(
+ *absolute_capture_timestamp_ms);
+ }
+ stream_->SendAudioData(std::move(audio_frame));
+ TRACE_EVENT_END1("webrtc", "WebRtcAudioSendStream::OnData",
+ "number_of_channels", number_of_channels);
+ }
+
+ // Callback from the `source_` when it is going away. In case Start() has
+ // never been called, this callback won't be triggered.
+ void OnClose() override {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ // Set `source_` to nullptr to make sure no more callback will get into
+ // the source.
+ source_ = nullptr;
+ UpdateSendState();
+ }
+
+ const webrtc::RtpParameters& rtp_parameters() const {
+ return rtp_parameters_;
+ }
+
+ webrtc::RTCError SetRtpParameters(const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback) {
+ webrtc::RTCError error = CheckRtpParametersInvalidModificationAndValues(
+ rtp_parameters_, parameters);
+ if (!error.ok()) {
+ return webrtc::InvokeSetParametersCallback(callback, error);
+ }
+
+ absl::optional<int> send_rate;
+ if (audio_codec_spec_) {
+ send_rate = ComputeSendBitrate(max_send_bitrate_bps_,
+ parameters.encodings[0].max_bitrate_bps,
+ *audio_codec_spec_);
+ if (!send_rate) {
+ return webrtc::InvokeSetParametersCallback(
+ callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
+ }
+ }
+
+ const absl::optional<int> old_rtp_max_bitrate =
+ rtp_parameters_.encodings[0].max_bitrate_bps;
+ double old_priority = rtp_parameters_.encodings[0].bitrate_priority;
+ webrtc::Priority old_dscp = rtp_parameters_.encodings[0].network_priority;
+ bool old_adaptive_ptime = rtp_parameters_.encodings[0].adaptive_ptime;
+ rtp_parameters_ = parameters;
+ config_.bitrate_priority = rtp_parameters_.encodings[0].bitrate_priority;
+ config_.has_dscp = (rtp_parameters_.encodings[0].network_priority !=
+ webrtc::Priority::kLow);
+
+ bool reconfigure_send_stream =
+ (rtp_parameters_.encodings[0].max_bitrate_bps != old_rtp_max_bitrate) ||
+ (rtp_parameters_.encodings[0].bitrate_priority != old_priority) ||
+ (rtp_parameters_.encodings[0].network_priority != old_dscp) ||
+ (rtp_parameters_.encodings[0].adaptive_ptime != old_adaptive_ptime);
+ if (rtp_parameters_.encodings[0].max_bitrate_bps != old_rtp_max_bitrate) {
+ // Update the bitrate range.
+ if (send_rate) {
+ config_.send_codec_spec->target_bitrate_bps = send_rate;
+ }
+ }
+ if (reconfigure_send_stream) {
+ // Changing adaptive_ptime may update the audio network adaptor config
+ // used.
+ UpdateAudioNetworkAdaptorConfig();
+ UpdateAllowedBitrateRange();
+ ReconfigureAudioSendStream(std::move(callback));
+ } else {
+ webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK());
+ }
+
+ rtp_parameters_.rtcp.cname = config_.rtp.c_name;
+ rtp_parameters_.rtcp.reduced_size = false;
+
+ // parameters.encodings[0].active could have changed.
+ UpdateSendState();
+ return webrtc::RTCError::OK();
+ }
+
+ void SetEncoderToPacketizerFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ config_.frame_transformer = std::move(frame_transformer);
+ ReconfigureAudioSendStream(nullptr);
+ }
+
+ private:
+ void UpdateSendState() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(stream_);
+ RTC_DCHECK_EQ(1UL, rtp_parameters_.encodings.size());
+ // Stream can be started without |source_| being set.
+ if (send_ && rtp_parameters_.encodings[0].active) {
+ stream_->Start();
+ } else {
+ stream_->Stop();
+ }
+ }
+
+ void UpdateAllowedBitrateRange() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ // The order of precedence, from lowest to highest is:
+ // - a reasonable default of 32kbps min/max
+ // - fixed target bitrate from codec spec
+ // - lower min bitrate if adaptive ptime is enabled
+ const int kDefaultBitrateBps = 32000;
+ config_.min_bitrate_bps = kDefaultBitrateBps;
+ config_.max_bitrate_bps = kDefaultBitrateBps;
+
+ if (config_.send_codec_spec &&
+ config_.send_codec_spec->target_bitrate_bps) {
+ config_.min_bitrate_bps = *config_.send_codec_spec->target_bitrate_bps;
+ config_.max_bitrate_bps = *config_.send_codec_spec->target_bitrate_bps;
+ }
+
+ if (rtp_parameters_.encodings[0].adaptive_ptime) {
+ config_.min_bitrate_bps = std::min(
+ config_.min_bitrate_bps,
+ static_cast<int>(adaptive_ptime_config_.min_encoder_bitrate.bps()));
+ }
+ }
+
+ void UpdateSendCodecSpec(
+ const webrtc::AudioSendStream::Config::SendCodecSpec& send_codec_spec) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ config_.send_codec_spec = send_codec_spec;
+ auto info =
+ config_.encoder_factory->QueryAudioEncoder(send_codec_spec.format);
+ RTC_DCHECK(info);
+ // If a specific target bitrate has been set for the stream, use that as
+ // the new default bitrate when computing send bitrate.
+ if (send_codec_spec.target_bitrate_bps) {
+ info->default_bitrate_bps = std::max(
+ info->min_bitrate_bps,
+ std::min(info->max_bitrate_bps, *send_codec_spec.target_bitrate_bps));
+ }
+
+ audio_codec_spec_.emplace(
+ webrtc::AudioCodecSpec{send_codec_spec.format, *info});
+
+ config_.send_codec_spec->target_bitrate_bps = ComputeSendBitrate(
+ max_send_bitrate_bps_, rtp_parameters_.encodings[0].max_bitrate_bps,
+ *audio_codec_spec_);
+
+ UpdateAllowedBitrateRange();
+
+ // Encoder will only use two channels if the stereo parameter is set.
+ const auto& it = send_codec_spec.format.parameters.find("stereo");
+ if (it != send_codec_spec.format.parameters.end() && it->second == "1") {
+ num_encoded_channels_ = 2;
+ } else {
+ num_encoded_channels_ = 1;
+ }
+ }
+
+ void UpdateAudioNetworkAdaptorConfig() {
+ if (adaptive_ptime_config_.enabled ||
+ rtp_parameters_.encodings[0].adaptive_ptime) {
+ config_.audio_network_adaptor_config =
+ adaptive_ptime_config_.audio_network_adaptor_config;
+ return;
+ }
+ config_.audio_network_adaptor_config =
+ audio_network_adaptor_config_from_options_;
+ }
+
+ void ReconfigureAudioSendStream(webrtc::SetParametersCallback callback) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_DCHECK(stream_);
+ stream_->Reconfigure(config_, std::move(callback));
+ }
+
+ int NumPreferredChannels() const override { return num_encoded_channels_; }
+
+ const AdaptivePtimeConfig adaptive_ptime_config_;
+ webrtc::SequenceChecker worker_thread_checker_;
+ rtc::RaceChecker audio_capture_race_checker_;
+ webrtc::Call* call_ = nullptr;
+ webrtc::AudioSendStream::Config config_;
+ // The stream is owned by WebRtcAudioSendStream and may be reallocated if
+ // configuration changes.
+ webrtc::AudioSendStream* stream_ = nullptr;
+
+ // Raw pointer to AudioSource owned by LocalAudioTrackHandler.
+ // PeerConnection will make sure invalidating the pointer before the object
+ // goes away.
+ AudioSource* source_ = nullptr;
+ bool send_ = false;
+ bool muted_ = false;
+ int max_send_bitrate_bps_;
+ webrtc::RtpParameters rtp_parameters_;
+ absl::optional<webrtc::AudioCodecSpec> audio_codec_spec_;
+ // TODO(webrtc:11717): Remove this once audio_network_adaptor in AudioOptions
+ // has been removed.
+ absl::optional<std::string> audio_network_adaptor_config_from_options_;
+ std::atomic<int> num_encoded_channels_{-1};
+};
+
+WebRtcVoiceSendChannel::WebRtcVoiceSendChannel(
+ WebRtcVoiceEngine* engine,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::Call* call,
+ webrtc::AudioCodecPairId codec_pair_id)
+ : MediaChannelUtil(call->network_thread(), config.enable_dscp),
+ worker_thread_(call->worker_thread()),
+ engine_(engine),
+ call_(call),
+ audio_config_(config.audio),
+ codec_pair_id_(codec_pair_id),
+ crypto_options_(crypto_options) {
+ RTC_LOG(LS_VERBOSE) << "WebRtcVoiceSendChannel::WebRtcVoiceSendChannel";
+ RTC_DCHECK(call);
+ SetOptions(options);
+}
+
+WebRtcVoiceSendChannel::~WebRtcVoiceSendChannel() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DLOG(LS_VERBOSE) << "WebRtcVoiceSendChannel::~WebRtcVoiceSendChannel";
+ // TODO(solenberg): Should be able to delete the streams directly, without
+ // going through RemoveNnStream(), once stream objects handle
+ // all (de)configuration.
+ while (!send_streams_.empty()) {
+ RemoveSendStream(send_streams_.begin()->first);
+ }
+}
+
+bool WebRtcVoiceSendChannel::SetOptions(const AudioOptions& options) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "Setting voice channel options: " << options.ToString();
+
+ // We retain all of the existing options, and apply the given ones
+ // on top. This means there is no way to "clear" options such that
+ // they go back to the engine default.
+ options_.SetAll(options);
+ engine()->ApplyOptions(options_);
+
+ absl::optional<std::string> audio_network_adaptor_config =
+ GetAudioNetworkAdaptorConfig(options_);
+ for (auto& it : send_streams_) {
+ it.second->SetAudioNetworkAdaptorConfig(audio_network_adaptor_config);
+ }
+
+ RTC_LOG(LS_INFO) << "Set voice send channel options. Current options: "
+ << options_.ToString();
+ return true;
+}
+
+bool WebRtcVoiceSendChannel::SetSenderParameters(
+ const AudioSenderParameter& params) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetSenderParameters");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSenderParameters: "
+ << params.ToString();
+ // TODO(pthatcher): Refactor this to be more clean now that we have
+ // all the information at once.
+
+ // Finding if the RtpParameters force a specific codec
+ absl::optional<Codec> force_codec;
+ if (send_streams_.size() == 1) {
+ // Since audio simulcast is not supported, currently, only PlanB
+ // has multiple tracks and we don't care about getting the
+ // functionality working there properly.
+ auto rtp_parameters = send_streams_.begin()->second->rtp_parameters();
+ if (rtp_parameters.encodings[0].codec) {
+ auto matched_codec =
+ absl::c_find_if(params.codecs, [&](auto negotiated_codec) {
+ return negotiated_codec.MatchesRtpCodec(
+ *rtp_parameters.encodings[0].codec);
+ });
+ if (matched_codec != params.codecs.end()) {
+ force_codec = *matched_codec;
+ } else {
+ // The requested codec has been negotiated away, we clear it from the
+ // parameters.
+ for (auto& encoding : rtp_parameters.encodings) {
+ encoding.codec.reset();
+ }
+ send_streams_.begin()->second->SetRtpParameters(rtp_parameters,
+ nullptr);
+ }
+ }
+ }
+
+ if (!SetSendCodecs(params.codecs, force_codec)) {
+ return false;
+ }
+
+ if (!ValidateRtpExtensions(params.extensions, send_rtp_extensions_)) {
+ return false;
+ }
+
+ if (ExtmapAllowMixed() != params.extmap_allow_mixed) {
+ SetExtmapAllowMixed(params.extmap_allow_mixed);
+ for (auto& it : send_streams_) {
+ it.second->SetExtmapAllowMixed(params.extmap_allow_mixed);
+ }
+ }
+
+ std::vector<webrtc::RtpExtension> filtered_extensions = FilterRtpExtensions(
+ params.extensions, webrtc::RtpExtension::IsSupportedForAudio, true,
+ call_->trials());
+ if (send_rtp_extensions_ != filtered_extensions) {
+ send_rtp_extensions_.swap(filtered_extensions);
+ for (auto& it : send_streams_) {
+ it.second->SetRtpExtensions(send_rtp_extensions_);
+ }
+ }
+ if (!params.mid.empty()) {
+ mid_ = params.mid;
+ for (auto& it : send_streams_) {
+ it.second->SetMid(params.mid);
+ }
+ }
+
+ if (!SetMaxSendBitrate(params.max_bandwidth_bps)) {
+ return false;
+ }
+ return SetOptions(params.options);
+}
+
+absl::optional<Codec> WebRtcVoiceSendChannel::GetSendCodec() const {
+ if (send_codec_spec_) {
+ return CreateAudioCodec(send_codec_spec_->format);
+ }
+ return absl::nullopt;
+}
+
+// Utility function called from SetSenderParameters() to extract current send
+// codec settings from the given list of codecs (originally from SDP). Both send
+// and receive streams may be reconfigured based on the new settings.
+bool WebRtcVoiceSendChannel::SetSendCodecs(
+ const std::vector<Codec>& codecs,
+ absl::optional<Codec> preferred_codec) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ dtmf_payload_type_ = absl::nullopt;
+ dtmf_payload_freq_ = -1;
+
+ // Validate supplied codecs list.
+ for (const Codec& codec : codecs) {
+ // TODO(solenberg): Validate more aspects of input - that payload types
+ // don't overlap, remove redundant/unsupported codecs etc -
+ // the same way it is done for RtpHeaderExtensions.
+ if (codec.id < kMinPayloadType || codec.id > kMaxPayloadType) {
+ RTC_LOG(LS_WARNING) << "Codec payload type out of range: "
+ << ToString(codec);
+ return false;
+ }
+ }
+
+ // Find PT of telephone-event codec with lowest clockrate, as a fallback, in
+ // case we don't have a DTMF codec with a rate matching the send codec's, or
+ // if this function returns early.
+ std::vector<Codec> dtmf_codecs;
+ for (const Codec& codec : codecs) {
+ if (IsCodec(codec, kDtmfCodecName)) {
+ dtmf_codecs.push_back(codec);
+ if (!dtmf_payload_type_ || codec.clockrate < dtmf_payload_freq_) {
+ dtmf_payload_type_ = codec.id;
+ dtmf_payload_freq_ = codec.clockrate;
+ }
+ }
+ }
+
+ // Scan through the list to figure out the codec to use for sending.
+ absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>
+ send_codec_spec;
+ webrtc::BitrateConstraints bitrate_config;
+ absl::optional<webrtc::AudioCodecInfo> voice_codec_info;
+ size_t send_codec_position = 0;
+ for (const Codec& voice_codec : codecs) {
+ if (!(IsCodec(voice_codec, kCnCodecName) ||
+ IsCodec(voice_codec, kDtmfCodecName) ||
+ IsCodec(voice_codec, kRedCodecName)) &&
+ (!preferred_codec || preferred_codec->Matches(voice_codec))) {
+ webrtc::SdpAudioFormat format(voice_codec.name, voice_codec.clockrate,
+ voice_codec.channels, voice_codec.params);
+
+ voice_codec_info = engine()->encoder_factory_->QueryAudioEncoder(format);
+ if (!voice_codec_info) {
+ RTC_LOG(LS_WARNING) << "Unknown codec " << ToString(voice_codec);
+ continue;
+ }
+
+ send_codec_spec = webrtc::AudioSendStream::Config::SendCodecSpec(
+ voice_codec.id, format);
+ if (voice_codec.bitrate > 0) {
+ send_codec_spec->target_bitrate_bps = voice_codec.bitrate;
+ }
+ send_codec_spec->transport_cc_enabled = HasTransportCc(voice_codec);
+ send_codec_spec->nack_enabled = HasNack(voice_codec);
+ send_codec_spec->enable_non_sender_rtt = HasRrtr(voice_codec);
+ bitrate_config = GetBitrateConfigForCodec(voice_codec);
+ break;
+ }
+ send_codec_position++;
+ }
+
+ if (!send_codec_spec) {
+ return false;
+ }
+
+ RTC_DCHECK(voice_codec_info);
+ if (voice_codec_info->allow_comfort_noise) {
+ // Loop through the codecs list again to find the CN codec.
+ // TODO(solenberg): Break out into a separate function?
+ for (const Codec& cn_codec : codecs) {
+ if (IsCodec(cn_codec, kCnCodecName) &&
+ cn_codec.clockrate == send_codec_spec->format.clockrate_hz &&
+ cn_codec.channels == voice_codec_info->num_channels) {
+ if (cn_codec.channels != 1) {
+ RTC_LOG(LS_WARNING)
+ << "CN #channels " << cn_codec.channels << " not supported.";
+ } else if (cn_codec.clockrate != 8000 && cn_codec.clockrate != 16000 &&
+ cn_codec.clockrate != 32000) {
+ RTC_LOG(LS_WARNING)
+ << "CN frequency " << cn_codec.clockrate << " not supported.";
+ } else {
+ send_codec_spec->cng_payload_type = cn_codec.id;
+ }
+ break;
+ }
+ }
+
+ // Find the telephone-event PT exactly matching the preferred send codec.
+ for (const Codec& dtmf_codec : dtmf_codecs) {
+ if (dtmf_codec.clockrate == send_codec_spec->format.clockrate_hz) {
+ dtmf_payload_type_ = dtmf_codec.id;
+ dtmf_payload_freq_ = dtmf_codec.clockrate;
+ break;
+ }
+ }
+ }
+
+ // Loop through the codecs to find the RED codec that matches opus
+ // with respect to clockrate and number of channels.
+ // RED codec needs to be negotiated before the actual codec they
+ // reference.
+ for (size_t i = 0; i < send_codec_position; ++i) {
+ const Codec& red_codec = codecs[i];
+ if (IsCodec(red_codec, kRedCodecName) &&
+ CheckRedParameters(red_codec, *send_codec_spec)) {
+ send_codec_spec->red_payload_type = red_codec.id;
+ break;
+ }
+ }
+
+ if (send_codec_spec_ != send_codec_spec) {
+ send_codec_spec_ = std::move(send_codec_spec);
+ // Apply new settings to all streams.
+ for (const auto& kv : send_streams_) {
+ kv.second->SetSendCodecSpec(*send_codec_spec_);
+ }
+ } else {
+ // If the codec isn't changing, set the start bitrate to -1 which means
+ // "unchanged" so that BWE isn't affected.
+ bitrate_config.start_bitrate_bps = -1;
+ }
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(bitrate_config);
+
+ send_codecs_ = codecs;
+
+ if (send_codec_changed_callback_) {
+ send_codec_changed_callback_();
+ }
+
+ return true;
+}
+
+void WebRtcVoiceSendChannel::SetSend(bool send) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetSend");
+ if (send_ == send) {
+ return;
+ }
+
+ // Apply channel specific options.
+ if (send) {
+ engine()->ApplyOptions(options_);
+
+ // Initialize the ADM for recording (this may take time on some platforms,
+ // e.g. Android).
+ if (options_.init_recording_on_send.value_or(true) &&
+ // InitRecording() may return an error if the ADM is already recording.
+ !engine()->adm()->RecordingIsInitialized() &&
+ !engine()->adm()->Recording()) {
+ if (engine()->adm()->InitRecording() != 0) {
+ RTC_LOG(LS_WARNING) << "Failed to initialize recording";
+ }
+ }
+ }
+
+ // Change the settings on each send channel.
+ for (auto& kv : send_streams_) {
+ kv.second->SetSend(send);
+ }
+
+ send_ = send;
+}
+
+bool WebRtcVoiceSendChannel::SetAudioSend(uint32_t ssrc,
+ bool enable,
+ const AudioOptions* options,
+ AudioSource* source) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ // TODO(solenberg): The state change should be fully rolled back if any one of
+ // these calls fail.
+ if (!SetLocalSource(ssrc, source)) {
+ return false;
+ }
+ if (!MuteStream(ssrc, !enable)) {
+ return false;
+ }
+ if (enable && options) {
+ return SetOptions(*options);
+ }
+ return true;
+}
+
+bool WebRtcVoiceSendChannel::AddSendStream(const StreamParams& sp) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::AddSendStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "AddSendStream: " << sp.ToString();
+
+ uint32_t ssrc = sp.first_ssrc();
+ RTC_DCHECK(0 != ssrc);
+
+ if (send_streams_.find(ssrc) != send_streams_.end()) {
+ RTC_LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
+ return false;
+ }
+
+ absl::optional<std::string> audio_network_adaptor_config =
+ GetAudioNetworkAdaptorConfig(options_);
+ WebRtcAudioSendStream* stream = new WebRtcAudioSendStream(
+ ssrc, mid_, sp.cname, sp.id, send_codec_spec_, ExtmapAllowMixed(),
+ send_rtp_extensions_, max_send_bitrate_bps_,
+ audio_config_.rtcp_report_interval_ms, audio_network_adaptor_config,
+ call_, transport(), engine()->encoder_factory_, codec_pair_id_, nullptr,
+ crypto_options_);
+ send_streams_.insert(std::make_pair(ssrc, stream));
+ if (ssrc_list_changed_callback_) {
+ std::set<uint32_t> ssrcs_in_use;
+ for (auto it : send_streams_) {
+ ssrcs_in_use.insert(it.first);
+ }
+ ssrc_list_changed_callback_(ssrcs_in_use);
+ }
+
+ send_streams_[ssrc]->SetSend(send_);
+ return true;
+}
+
+bool WebRtcVoiceSendChannel::RemoveSendStream(uint32_t ssrc) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::RemoveSendStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "RemoveSendStream: " << ssrc;
+
+ auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
+ << " which doesn't exist.";
+ return false;
+ }
+
+ it->second->SetSend(false);
+
+ // TODO(solenberg): If we're removing the receiver_reports_ssrc_ stream, find
+ // the first active send stream and use that instead, reassociating receive
+ // streams.
+
+ delete it->second;
+ send_streams_.erase(it);
+ if (send_streams_.empty()) {
+ SetSend(false);
+ }
+ return true;
+}
+
+void WebRtcVoiceSendChannel::SetSsrcListChangedCallback(
+ absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) {
+ ssrc_list_changed_callback_ = std::move(callback);
+}
+
+bool WebRtcVoiceSendChannel::SetLocalSource(uint32_t ssrc,
+ AudioSource* source) {
+ auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ if (source) {
+ // Return an error if trying to set a valid source with an invalid ssrc.
+ RTC_LOG(LS_ERROR) << "SetLocalSource failed with ssrc " << ssrc;
+ return false;
+ }
+
+ // The channel likely has gone away, do nothing.
+ return true;
+ }
+
+ if (source) {
+ it->second->SetSource(source);
+ } else {
+ it->second->ClearSource();
+ }
+
+ return true;
+}
+
+bool WebRtcVoiceSendChannel::CanInsertDtmf() {
+ return dtmf_payload_type_.has_value() && send_;
+}
+
+void WebRtcVoiceSendChannel::SetFrameEncryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface> frame_encryptor) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ auto matching_stream = send_streams_.find(ssrc);
+ if (matching_stream != send_streams_.end()) {
+ matching_stream->second->SetFrameEncryptor(frame_encryptor);
+ }
+}
+
+bool WebRtcVoiceSendChannel::InsertDtmf(uint32_t ssrc,
+ int event,
+ int duration) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::InsertDtmf";
+ if (!CanInsertDtmf()) {
+ return false;
+ }
+
+ // Figure out which WebRtcAudioSendStream to send the event on.
+ auto it = ssrc != 0 ? send_streams_.find(ssrc) : send_streams_.begin();
+ if (it == send_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
+ return false;
+ }
+ if (event < kMinTelephoneEventCode || event > kMaxTelephoneEventCode) {
+ RTC_LOG(LS_WARNING) << "DTMF event code " << event << " out of range.";
+ return false;
+ }
+ RTC_DCHECK_NE(-1, dtmf_payload_freq_);
+ return it->second->SendTelephoneEvent(*dtmf_payload_type_, dtmf_payload_freq_,
+ event, duration);
+}
+
+void WebRtcVoiceSendChannel::OnPacketSent(const rtc::SentPacket& sent_packet) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+ // TODO(tommi): We shouldn't need to go through call_ to deliver this
+ // notification. We should already have direct access to
+ // video_send_delay_stats_ and transport_send_ptr_ via `stream_`.
+ // So we should be able to remove OnSentPacket from Call and handle this per
+ // channel instead. At the moment Call::OnSentPacket calls OnSentPacket for
+ // the video stats, which we should be able to skip.
+ call_->OnSentPacket(sent_packet);
+}
+
+void WebRtcVoiceSendChannel::OnNetworkRouteChanged(
+ absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+
+ call_->OnAudioTransportOverheadChanged(network_route.packet_overhead);
+
+ worker_thread_->PostTask(SafeTask(
+ task_safety_.flag(),
+ [this, name = std::string(transport_name), route = network_route] {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ call_->GetTransportControllerSend()->OnNetworkRouteChanged(name, route);
+ }));
+}
+
+bool WebRtcVoiceSendChannel::MuteStream(uint32_t ssrc, bool muted) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ const auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
+ return false;
+ }
+ it->second->SetMuted(muted);
+
+ // TODO(solenberg):
+ // We set the AGC to mute state only when all the channels are muted.
+ // This implementation is not ideal, instead we should signal the AGC when
+ // the mic channel is muted/unmuted. We can't do it today because there
+ // is no good way to know which stream is mapping to the mic channel.
+ bool all_muted = muted;
+ for (const auto& kv : send_streams_) {
+ all_muted = all_muted && kv.second->muted();
+ }
+ webrtc::AudioProcessing* ap = engine()->apm();
+ if (ap) {
+ ap->set_output_will_be_muted(all_muted);
+ }
+
+ return true;
+}
+
+bool WebRtcVoiceSendChannel::SetMaxSendBitrate(int bps) {
+ RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetMaxSendBitrate.";
+ max_send_bitrate_bps_ = bps;
+ bool success = true;
+ for (const auto& kv : send_streams_) {
+ if (!kv.second->SetMaxSendBitrate(max_send_bitrate_bps_)) {
+ success = false;
+ }
+ }
+ return success;
+}
+
+void WebRtcVoiceSendChannel::OnReadyToSend(bool ready) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+ RTC_LOG(LS_VERBOSE) << "OnReadyToSend: " << (ready ? "Ready." : "Not ready.");
+ call_->SignalChannelNetworkState(
+ webrtc::MediaType::AUDIO,
+ ready ? webrtc::kNetworkUp : webrtc::kNetworkDown);
+}
+
+bool WebRtcVoiceSendChannel::GetStats(VoiceMediaSendInfo* info) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::GetSendStats");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK(info);
+
+ // Get SSRC and stats for each sender.
+ // With separate send and receive channels, we expect GetStats to be called on
+ // both, and accumulate info, but only one channel (the send one) should have
+ // senders.
+ RTC_DCHECK(info->senders.size() == 0U || send_streams_.size() == 0);
+ for (const auto& stream : send_streams_) {
+ webrtc::AudioSendStream::Stats stats = stream.second->GetStats(false);
+ VoiceSenderInfo sinfo;
+ sinfo.add_ssrc(stats.local_ssrc);
+ sinfo.payload_bytes_sent = stats.payload_bytes_sent;
+ sinfo.header_and_padding_bytes_sent = stats.header_and_padding_bytes_sent;
+ sinfo.retransmitted_bytes_sent = stats.retransmitted_bytes_sent;
+ sinfo.packets_sent = stats.packets_sent;
+ sinfo.total_packet_send_delay = stats.total_packet_send_delay;
+ sinfo.retransmitted_packets_sent = stats.retransmitted_packets_sent;
+ sinfo.packets_lost = stats.packets_lost;
+ sinfo.fraction_lost = stats.fraction_lost;
+ sinfo.nacks_received = stats.nacks_received;
+ sinfo.target_bitrate = stats.target_bitrate_bps;
+ sinfo.codec_name = stats.codec_name;
+ sinfo.codec_payload_type = stats.codec_payload_type;
+ sinfo.jitter_ms = stats.jitter_ms;
+ sinfo.rtt_ms = stats.rtt_ms;
+ sinfo.audio_level = stats.audio_level;
+ sinfo.total_input_energy = stats.total_input_energy;
+ sinfo.total_input_duration = stats.total_input_duration;
+ sinfo.ana_statistics = stats.ana_statistics;
+ sinfo.apm_statistics = stats.apm_statistics;
+ sinfo.report_block_datas = std::move(stats.report_block_datas);
+
+ auto encodings = stream.second->rtp_parameters().encodings;
+ if (!encodings.empty()) {
+ sinfo.active = encodings[0].active;
+ }
+
+ info->senders.push_back(sinfo);
+ }
+
+ FillSendCodecStats(info);
+
+ return true;
+}
+
+void WebRtcVoiceSendChannel::FillSendCodecStats(
+ VoiceMediaSendInfo* voice_media_info) {
+ for (const auto& sender : voice_media_info->senders) {
+ auto codec = absl::c_find_if(send_codecs_, [&sender](const AudioCodec& c) {
+ return sender.codec_payload_type && *sender.codec_payload_type == c.id;
+ });
+ if (codec != send_codecs_.end()) {
+ voice_media_info->send_codecs.insert(
+ std::make_pair(codec->id, codec->ToCodecParameters()));
+ }
+ }
+}
+
+void WebRtcVoiceSendChannel::SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ auto matching_stream = send_streams_.find(ssrc);
+ if (matching_stream == send_streams_.end()) {
+ RTC_LOG(LS_INFO) << "Attempting to set frame transformer for SSRC:" << ssrc
+ << " which doesn't exist.";
+ return;
+ }
+ matching_stream->second->SetEncoderToPacketizerFrameTransformer(
+ std::move(frame_transformer));
+}
+
+webrtc::RtpParameters WebRtcVoiceSendChannel::GetRtpSendParameters(
+ uint32_t ssrc) const {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "Attempting to get RTP send parameters for stream "
+ "with ssrc "
+ << ssrc << " which doesn't exist.";
+ return webrtc::RtpParameters();
+ }
+
+ webrtc::RtpParameters rtp_params = it->second->rtp_parameters();
+ // Need to add the common list of codecs to the send stream-specific
+ // RTP parameters.
+ for (const AudioCodec& codec : send_codecs_) {
+ rtp_params.codecs.push_back(codec.ToCodecParameters());
+ }
+ return rtp_params;
+}
+
+webrtc::RTCError WebRtcVoiceSendChannel::SetRtpSendParameters(
+ uint32_t ssrc,
+ const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ auto it = send_streams_.find(ssrc);
+ if (it == send_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "Attempting to set RTP send parameters for stream "
+ "with ssrc "
+ << ssrc << " which doesn't exist.";
+ return webrtc::InvokeSetParametersCallback(
+ callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
+ }
+
+ // TODO(deadbeef): Handle setting parameters with a list of codecs in a
+ // different order (which should change the send codec).
+ webrtc::RtpParameters current_parameters = GetRtpSendParameters(ssrc);
+ if (current_parameters.codecs != parameters.codecs) {
+ RTC_DLOG(LS_ERROR) << "Using SetParameters to change the set of codecs "
+ "is not currently supported.";
+ return webrtc::InvokeSetParametersCallback(
+ callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR));
+ }
+
+ if (!parameters.encodings.empty()) {
+ // Note that these values come from:
+ // https://tools.ietf.org/html/draft-ietf-tsvwg-rtcweb-qos-16#section-5
+ rtc::DiffServCodePoint new_dscp = rtc::DSCP_DEFAULT;
+ switch (parameters.encodings[0].network_priority) {
+ case webrtc::Priority::kVeryLow:
+ new_dscp = rtc::DSCP_CS1;
+ break;
+ case webrtc::Priority::kLow:
+ new_dscp = rtc::DSCP_DEFAULT;
+ break;
+ case webrtc::Priority::kMedium:
+ new_dscp = rtc::DSCP_EF;
+ break;
+ case webrtc::Priority::kHigh:
+ new_dscp = rtc::DSCP_EF;
+ break;
+ }
+ SetPreferredDscp(new_dscp);
+
+ absl::optional<cricket::Codec> send_codec = GetSendCodec();
+ // Since we validate that all layers have the same value, we can just check
+ // the first layer.
+ // TODO(orphis): Support mixed-codec simulcast
+ if (parameters.encodings[0].codec && send_codec &&
+ !send_codec->MatchesRtpCodec(*parameters.encodings[0].codec)) {
+ RTC_LOG(LS_VERBOSE) << "Trying to change codec to "
+ << parameters.encodings[0].codec->name;
+ auto matched_codec =
+ absl::c_find_if(send_codecs_, [&](auto negotiated_codec) {
+ return negotiated_codec.MatchesRtpCodec(
+ *parameters.encodings[0].codec);
+ });
+
+ if (matched_codec == send_codecs_.end()) {
+ return webrtc::InvokeSetParametersCallback(
+ callback, webrtc::RTCError(
+ webrtc::RTCErrorType::INVALID_MODIFICATION,
+ "Attempted to use an unsupported codec for layer 0"));
+ }
+
+ SetSendCodecs(send_codecs_, *matched_codec);
+ }
+ }
+
+ // TODO(minyue): The following legacy actions go into
+ // `WebRtcAudioSendStream::SetRtpParameters()` which is called at the end,
+ // though there are two difference:
+ // 1. `WebRtcVoiceMediaChannel::SetChannelSendParameters()` only calls
+ // `SetSendCodec` while `WebRtcAudioSendStream::SetRtpParameters()` calls
+ // `SetSendCodecs`. The outcome should be the same.
+ // 2. AudioSendStream can be recreated.
+
+ // Codecs are handled at the WebRtcVoiceMediaChannel level.
+ webrtc::RtpParameters reduced_params = parameters;
+ reduced_params.codecs.clear();
+ return it->second->SetRtpParameters(reduced_params, std::move(callback));
+}
+
+// -------------------------- WebRtcVoiceReceiveChannel ----------------------
+
+class WebRtcVoiceReceiveChannel::WebRtcAudioReceiveStream {
+ public:
+ WebRtcAudioReceiveStream(webrtc::AudioReceiveStreamInterface::Config config,
+ webrtc::Call* call)
+ : call_(call), stream_(call_->CreateAudioReceiveStream(config)) {
+ RTC_DCHECK(call);
+ RTC_DCHECK(stream_);
+ }
+
+ WebRtcAudioReceiveStream() = delete;
+ WebRtcAudioReceiveStream(const WebRtcAudioReceiveStream&) = delete;
+ WebRtcAudioReceiveStream& operator=(const WebRtcAudioReceiveStream&) = delete;
+
+ ~WebRtcAudioReceiveStream() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ call_->DestroyAudioReceiveStream(stream_);
+ }
+
+ webrtc::AudioReceiveStreamInterface& stream() {
+ RTC_DCHECK(stream_);
+ return *stream_;
+ }
+
+ void SetFrameDecryptor(
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ stream_->SetFrameDecryptor(std::move(frame_decryptor));
+ }
+
+ void SetUseNack(bool use_nack) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ stream_->SetNackHistory(use_nack ? kNackRtpHistoryMs : 0);
+ }
+
+ void SetNonSenderRttMeasurement(bool enabled) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ stream_->SetNonSenderRttMeasurement(enabled);
+ }
+
+ // Set a new payload type -> decoder map.
+ void SetDecoderMap(const std::map<int, webrtc::SdpAudioFormat>& decoder_map) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ stream_->SetDecoderMap(decoder_map);
+ }
+
+ webrtc::AudioReceiveStreamInterface::Stats GetStats(
+ bool get_and_clear_legacy_stats) const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return stream_->GetStats(get_and_clear_legacy_stats);
+ }
+
+ void SetRawAudioSink(std::unique_ptr<webrtc::AudioSinkInterface> sink) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ // Need to update the stream's sink first; once raw_audio_sink_ is
+ // reassigned, whatever was in there before is destroyed.
+ stream_->SetSink(sink.get());
+ raw_audio_sink_ = std::move(sink);
+ }
+
+ void SetOutputVolume(double volume) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ stream_->SetGain(volume);
+ }
+
+ void SetPlayout(bool playout) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (playout) {
+ stream_->Start();
+ } else {
+ stream_->Stop();
+ }
+ }
+
+ bool SetBaseMinimumPlayoutDelayMs(int delay_ms) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (stream_->SetBaseMinimumPlayoutDelayMs(delay_ms))
+ return true;
+
+ RTC_LOG(LS_ERROR) << "Failed to SetBaseMinimumPlayoutDelayMs"
+ " on AudioReceiveStreamInterface on SSRC="
+ << stream_->remote_ssrc()
+ << " with delay_ms=" << delay_ms;
+ return false;
+ }
+
+ int GetBaseMinimumPlayoutDelayMs() const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return stream_->GetBaseMinimumPlayoutDelayMs();
+ }
+
+ std::vector<webrtc::RtpSource> GetSources() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return stream_->GetSources();
+ }
+
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ stream_->SetDepacketizerToDecoderFrameTransformer(frame_transformer);
+ }
+
+ private:
+ webrtc::SequenceChecker worker_thread_checker_;
+ webrtc::Call* call_ = nullptr;
+ webrtc::AudioReceiveStreamInterface* const stream_ = nullptr;
+ std::unique_ptr<webrtc::AudioSinkInterface> raw_audio_sink_
+ RTC_GUARDED_BY(worker_thread_checker_);
+};
+
+WebRtcVoiceReceiveChannel::WebRtcVoiceReceiveChannel(
+ WebRtcVoiceEngine* engine,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::Call* call,
+ webrtc::AudioCodecPairId codec_pair_id)
+ : MediaChannelUtil(call->network_thread(), config.enable_dscp),
+ worker_thread_(call->worker_thread()),
+ engine_(engine),
+ call_(call),
+ audio_config_(config.audio),
+ codec_pair_id_(codec_pair_id),
+ crypto_options_(crypto_options) {
+ RTC_LOG(LS_VERBOSE) << "WebRtcVoiceReceiveChannel::WebRtcVoiceReceiveChannel";
+ RTC_DCHECK(call);
+ SetOptions(options);
+}
+
+WebRtcVoiceReceiveChannel::~WebRtcVoiceReceiveChannel() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DLOG(LS_VERBOSE)
+ << "WebRtcVoiceReceiveChannel::~WebRtcVoiceReceiveChannel";
+ // TODO(solenberg): Should be able to delete the streams directly, without
+ // going through RemoveNnStream(), once stream objects handle
+ // all (de)configuration.
+ while (!recv_streams_.empty()) {
+ RemoveRecvStream(recv_streams_.begin()->first);
+ }
+}
+
+bool WebRtcVoiceReceiveChannel::SetReceiverParameters(
+ const AudioReceiverParameters& params) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetReceiverParameters");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetReceiverParameters: "
+ << params.ToString();
+ // TODO(pthatcher): Refactor this to be more clean now that we have
+ // all the information at once.
+
+ if (!SetRecvCodecs(params.codecs)) {
+ return false;
+ }
+
+ if (!ValidateRtpExtensions(params.extensions, recv_rtp_extensions_)) {
+ return false;
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions = FilterRtpExtensions(
+ params.extensions, webrtc::RtpExtension::IsSupportedForAudio, false,
+ call_->trials());
+ if (recv_rtp_extensions_ != filtered_extensions) {
+ recv_rtp_extensions_.swap(filtered_extensions);
+ recv_rtp_extension_map_ =
+ webrtc::RtpHeaderExtensionMap(recv_rtp_extensions_);
+ }
+ return true;
+}
+
+webrtc::RtpParameters WebRtcVoiceReceiveChannel::GetRtpReceiverParameters(
+ uint32_t ssrc) const {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ webrtc::RtpParameters rtp_params;
+ auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ RTC_LOG(LS_WARNING)
+ << "Attempting to get RTP receive parameters for stream "
+ "with ssrc "
+ << ssrc << " which doesn't exist.";
+ return webrtc::RtpParameters();
+ }
+ rtp_params.encodings.emplace_back();
+ rtp_params.encodings.back().ssrc = it->second->stream().remote_ssrc();
+ rtp_params.header_extensions = recv_rtp_extensions_;
+
+ for (const AudioCodec& codec : recv_codecs_) {
+ rtp_params.codecs.push_back(codec.ToCodecParameters());
+ }
+ return rtp_params;
+}
+
+webrtc::RtpParameters
+WebRtcVoiceReceiveChannel::GetDefaultRtpReceiveParameters() const {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ webrtc::RtpParameters rtp_params;
+ if (!default_sink_) {
+ // Getting parameters on a default, unsignaled audio receive stream but
+ // because we've not configured to receive such a stream, `encodings` is
+ // empty.
+ return rtp_params;
+ }
+ rtp_params.encodings.emplace_back();
+
+ for (const AudioCodec& codec : recv_codecs_) {
+ rtp_params.codecs.push_back(codec.ToCodecParameters());
+ }
+ return rtp_params;
+}
+
+bool WebRtcVoiceReceiveChannel::SetOptions(const AudioOptions& options) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "Setting voice channel options: " << options.ToString();
+
+ // We retain all of the existing options, and apply the given ones
+ // on top. This means there is no way to "clear" options such that
+ // they go back to the engine default.
+ options_.SetAll(options);
+ engine()->ApplyOptions(options_);
+
+ RTC_LOG(LS_INFO) << "Set voice receive channel options. Current options: "
+ << options_.ToString();
+ return true;
+}
+
+bool WebRtcVoiceReceiveChannel::SetRecvCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ // Set the payload types to be used for incoming media.
+ RTC_LOG(LS_INFO) << "Setting receive voice codecs.";
+
+ if (!VerifyUniquePayloadTypes(codecs)) {
+ RTC_LOG(LS_ERROR) << "Codec payload types overlap.";
+ return false;
+ }
+
+ // Create a payload type -> SdpAudioFormat map with all the decoders. Fail
+ // unless the factory claims to support all decoders.
+ std::map<int, webrtc::SdpAudioFormat> decoder_map;
+ for (const AudioCodec& codec : codecs) {
+ // Log a warning if a codec's payload type is changing. This used to be
+ // treated as an error. It's abnormal, but not really illegal.
+ absl::optional<AudioCodec> old_codec =
+ FindCodec(recv_codecs_, codec, &call_->trials());
+ if (old_codec && old_codec->id != codec.id) {
+ RTC_LOG(LS_WARNING) << codec.name << " mapped to a second payload type ("
+ << codec.id << ", was already mapped to "
+ << old_codec->id << ")";
+ }
+ auto format = AudioCodecToSdpAudioFormat(codec);
+ if (!IsCodec(codec, kCnCodecName) && !IsCodec(codec, kDtmfCodecName) &&
+ !IsCodec(codec, kRedCodecName) &&
+ !engine()->decoder_factory_->IsSupportedDecoder(format)) {
+ RTC_LOG(LS_ERROR) << "Unsupported codec: " << rtc::ToString(format);
+ return false;
+ }
+ // We allow adding new codecs but don't allow changing the payload type of
+ // codecs that are already configured since we might already be receiving
+ // packets with that payload type. See RFC3264, Section 8.3.2.
+ // TODO(deadbeef): Also need to check for clashes with previously mapped
+ // payload types, and not just currently mapped ones. For example, this
+ // should be illegal:
+ // 1. {100: opus/48000/2, 101: ISAC/16000}
+ // 2. {100: opus/48000/2}
+ // 3. {100: opus/48000/2, 101: ISAC/32000}
+ // Though this check really should happen at a higher level, since this
+ // conflict could happen between audio and video codecs.
+ auto existing = decoder_map_.find(codec.id);
+ if (existing != decoder_map_.end() && !existing->second.Matches(format)) {
+ RTC_LOG(LS_ERROR) << "Attempting to use payload type " << codec.id
+ << " for " << codec.name
+ << ", but it is already used for "
+ << existing->second.name;
+ return false;
+ }
+ decoder_map.insert({codec.id, std::move(format)});
+ }
+
+ if (decoder_map == decoder_map_) {
+ // There's nothing new to configure.
+ return true;
+ }
+
+ bool playout_enabled = playout_;
+ // Receive codecs can not be changed while playing. So we temporarily
+ // pause playout.
+ SetPlayout(false);
+ RTC_DCHECK(!playout_);
+
+ decoder_map_ = std::move(decoder_map);
+ for (auto& kv : recv_streams_) {
+ kv.second->SetDecoderMap(decoder_map_);
+ }
+
+ recv_codecs_ = codecs;
+
+ SetPlayout(playout_enabled);
+ RTC_DCHECK_EQ(playout_, playout_enabled);
+
+ return true;
+}
+
+void WebRtcVoiceReceiveChannel::SetReceiveNackEnabled(bool enabled) {
+ // Check if the NACK status has changed on the
+ // preferred send codec, and in that case reconfigure all receive streams.
+ if (recv_nack_enabled_ != enabled) {
+ RTC_LOG(LS_INFO) << "Changing NACK status on receive streams.";
+ recv_nack_enabled_ = enabled;
+ for (auto& kv : recv_streams_) {
+ kv.second->SetUseNack(recv_nack_enabled_);
+ }
+ }
+}
+
+void WebRtcVoiceReceiveChannel::SetReceiveNonSenderRttEnabled(bool enabled) {
+ // Check if the receive-side RTT status has changed on the preferred send
+ // codec, in that case reconfigure all receive streams.
+ if (enable_non_sender_rtt_ != enabled) {
+ RTC_LOG(LS_INFO) << "Changing receive-side RTT status on receive streams.";
+ enable_non_sender_rtt_ = enabled;
+ for (auto& kv : recv_streams_) {
+ kv.second->SetNonSenderRttMeasurement(enable_non_sender_rtt_);
+ }
+ }
+}
+
+void WebRtcVoiceReceiveChannel::SetPlayout(bool playout) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetPlayout");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ if (playout_ == playout) {
+ return;
+ }
+
+ for (const auto& kv : recv_streams_) {
+ kv.second->SetPlayout(playout);
+ }
+ playout_ = playout;
+}
+
+bool WebRtcVoiceReceiveChannel::AddRecvStream(const StreamParams& sp) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::AddRecvStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "AddRecvStream: " << sp.ToString();
+
+ if (!sp.has_ssrcs()) {
+ // This is a StreamParam with unsignaled SSRCs. Store it, so it can be used
+ // later when we know the SSRCs on the first packet arrival.
+ unsignaled_stream_params_ = sp;
+ return true;
+ }
+
+ if (!ValidateStreamParams(sp)) {
+ return false;
+ }
+
+ const uint32_t ssrc = sp.first_ssrc();
+
+ // If this stream was previously received unsignaled, we promote it, possibly
+ // updating the sync group if stream ids have changed.
+ if (MaybeDeregisterUnsignaledRecvStream(ssrc)) {
+ auto stream_ids = sp.stream_ids();
+ std::string sync_group = stream_ids.empty() ? std::string() : stream_ids[0];
+ call_->OnUpdateSyncGroup(recv_streams_[ssrc]->stream(),
+ std::move(sync_group));
+ return true;
+ }
+
+ if (recv_streams_.find(ssrc) != recv_streams_.end()) {
+ RTC_LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
+ return false;
+ }
+
+ // Create a new channel for receiving audio data.
+ auto config = BuildReceiveStreamConfig(
+ ssrc, receiver_reports_ssrc_, recv_nack_enabled_, enable_non_sender_rtt_,
+ sp.stream_ids(), recv_rtp_extensions_, transport(),
+ engine()->decoder_factory_, decoder_map_, codec_pair_id_,
+ engine()->audio_jitter_buffer_max_packets_,
+ engine()->audio_jitter_buffer_fast_accelerate_,
+ engine()->audio_jitter_buffer_min_delay_ms_, unsignaled_frame_decryptor_,
+ crypto_options_, unsignaled_frame_transformer_);
+
+ recv_streams_.insert(std::make_pair(
+ ssrc, new WebRtcAudioReceiveStream(std::move(config), call_)));
+ recv_streams_[ssrc]->SetPlayout(playout_);
+
+ return true;
+}
+
+bool WebRtcVoiceReceiveChannel::RemoveRecvStream(uint32_t ssrc) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::RemoveRecvStream");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "RemoveRecvStream: " << ssrc;
+
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
+ << " which doesn't exist.";
+ return false;
+ }
+
+ MaybeDeregisterUnsignaledRecvStream(ssrc);
+
+ it->second->SetRawAudioSink(nullptr);
+ delete it->second;
+ recv_streams_.erase(it);
+ return true;
+}
+
+void WebRtcVoiceReceiveChannel::ResetUnsignaledRecvStream() {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << "ResetUnsignaledRecvStream.";
+ unsignaled_stream_params_ = StreamParams();
+ // Create a copy since RemoveRecvStream will modify `unsignaled_recv_ssrcs_`.
+ std::vector<uint32_t> to_remove = unsignaled_recv_ssrcs_;
+ for (uint32_t ssrc : to_remove) {
+ RemoveRecvStream(ssrc);
+ }
+}
+
+absl::optional<uint32_t> WebRtcVoiceReceiveChannel::GetUnsignaledSsrc() const {
+ if (unsignaled_recv_ssrcs_.empty()) {
+ return absl::nullopt;
+ }
+ // In the event of multiple unsignaled ssrcs, the last in the vector will be
+ // the most recent one (the one forwarded to the MediaStreamTrack).
+ return unsignaled_recv_ssrcs_.back();
+}
+
+void WebRtcVoiceReceiveChannel::ChooseReceiverReportSsrc(
+ const std::set<uint32_t>& choices) {
+ // Don't change SSRC if set is empty. Note that this differs from
+ // the behavior of video.
+ if (choices.empty()) {
+ return;
+ }
+ if (choices.find(receiver_reports_ssrc_) != choices.end()) {
+ return;
+ }
+ uint32_t ssrc = *(choices.begin());
+ receiver_reports_ssrc_ = ssrc;
+ for (auto& kv : recv_streams_) {
+ call_->OnLocalSsrcUpdated(kv.second->stream(), ssrc);
+ }
+}
+
+// Not implemented.
+// TODO(https://crbug.com/webrtc/12676): Implement a fix for the unsignalled
+// SSRC race that can happen when an m= section goes from receiving to not
+// receiving.
+void WebRtcVoiceReceiveChannel::OnDemuxerCriteriaUpdatePending() {}
+void WebRtcVoiceReceiveChannel::OnDemuxerCriteriaUpdateComplete() {}
+
+bool WebRtcVoiceReceiveChannel::SetOutputVolume(uint32_t ssrc, double volume) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_INFO) << rtc::StringFormat("WRVMC::%s({ssrc=%u}, {volume=%.2f})",
+ __func__, ssrc, volume);
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ RTC_LOG(LS_WARNING) << rtc::StringFormat(
+ "WRVMC::%s => (WARNING: no receive stream for SSRC %u)", __func__,
+ ssrc);
+ return false;
+ }
+ it->second->SetOutputVolume(volume);
+ RTC_LOG(LS_INFO) << rtc::StringFormat(
+ "WRVMC::%s => (stream with SSRC %u now uses volume %.2f)", __func__, ssrc,
+ volume);
+ return true;
+}
+
+bool WebRtcVoiceReceiveChannel::SetDefaultOutputVolume(double volume) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ default_recv_volume_ = volume;
+ for (uint32_t ssrc : unsignaled_recv_ssrcs_) {
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "SetDefaultOutputVolume: no recv stream " << ssrc;
+ return false;
+ }
+ it->second->SetOutputVolume(volume);
+ RTC_LOG(LS_INFO) << "SetDefaultOutputVolume() to " << volume
+ << " for recv stream with ssrc " << ssrc;
+ }
+ return true;
+}
+
+bool WebRtcVoiceReceiveChannel::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc,
+ int delay_ms) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ std::vector<uint32_t> ssrcs(1, ssrc);
+ // SSRC of 0 represents the default receive stream.
+ if (ssrc == 0) {
+ default_recv_base_minimum_delay_ms_ = delay_ms;
+ ssrcs = unsignaled_recv_ssrcs_;
+ }
+ for (uint32_t ssrc : ssrcs) {
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "SetBaseMinimumPlayoutDelayMs: no recv stream "
+ << ssrc;
+ return false;
+ }
+ it->second->SetBaseMinimumPlayoutDelayMs(delay_ms);
+ RTC_LOG(LS_INFO) << "SetBaseMinimumPlayoutDelayMs() to " << delay_ms
+ << " for recv stream with ssrc " << ssrc;
+ }
+ return true;
+}
+
+absl::optional<int> WebRtcVoiceReceiveChannel::GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const {
+ // SSRC of 0 represents the default receive stream.
+ if (ssrc == 0) {
+ return default_recv_base_minimum_delay_ms_;
+ }
+
+ const auto it = recv_streams_.find(ssrc);
+
+ if (it != recv_streams_.end()) {
+ return it->second->GetBaseMinimumPlayoutDelayMs();
+ }
+ return absl::nullopt;
+}
+
+void WebRtcVoiceReceiveChannel::SetFrameDecryptor(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface> frame_decryptor) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ auto matching_stream = recv_streams_.find(ssrc);
+ if (matching_stream != recv_streams_.end()) {
+ matching_stream->second->SetFrameDecryptor(frame_decryptor);
+ }
+ // Handle unsignaled frame decryptors.
+ if (ssrc == 0) {
+ unsignaled_frame_decryptor_ = frame_decryptor;
+ }
+}
+
+void WebRtcVoiceReceiveChannel::OnPacketReceived(
+ const webrtc::RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&network_thread_checker_);
+
+ // TODO(bugs.webrtc.org/11993): This code is very similar to what
+ // WebRtcVideoChannel::OnPacketReceived does. For maintainability and
+ // consistency it would be good to move the interaction with
+ // call_->Receiver() to a common implementation and provide a callback on
+ // the worker thread for the exception case (DELIVERY_UNKNOWN_SSRC) and
+ // how retry is attempted.
+ worker_thread_->PostTask(
+ SafeTask(task_safety_.flag(), [this, packet = packet]() mutable {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+
+ // TODO(bugs.webrtc.org/7135): extensions in `packet` is currently set
+ // in RtpTransport and does not neccessarily include extensions specific
+ // to this channel/MID. Also see comment in
+ // BaseChannel::MaybeUpdateDemuxerAndRtpExtensions_w.
+ // It would likely be good if extensions where merged per BUNDLE and
+ // applied directly in RtpTransport::DemuxPacket;
+ packet.IdentifyExtensions(recv_rtp_extension_map_);
+ if (!packet.arrival_time().IsFinite()) {
+ packet.set_arrival_time(webrtc::Timestamp::Micros(rtc::TimeMicros()));
+ }
+
+ call_->Receiver()->DeliverRtpPacket(
+ webrtc::MediaType::AUDIO, std::move(packet),
+ absl::bind_front(
+ &WebRtcVoiceReceiveChannel::MaybeCreateDefaultReceiveStream,
+ this));
+ }));
+}
+
+bool WebRtcVoiceReceiveChannel::MaybeCreateDefaultReceiveStream(
+ const webrtc::RtpPacketReceived& packet) {
+ // Create an unsignaled receive stream for this previously not received
+ // ssrc. If there already is N unsignaled receive streams, delete the
+ // oldest. See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5208
+ uint32_t ssrc = packet.Ssrc();
+ RTC_DCHECK(!absl::c_linear_search(unsignaled_recv_ssrcs_, ssrc));
+
+ // Add new stream.
+ StreamParams sp = unsignaled_stream_params_;
+ sp.ssrcs.push_back(ssrc);
+ RTC_LOG(LS_INFO) << "Creating unsignaled receive stream for SSRC=" << ssrc;
+ if (!AddRecvStream(sp)) {
+ RTC_LOG(LS_WARNING) << "Could not create unsignaled receive stream.";
+ return false;
+ }
+ unsignaled_recv_ssrcs_.push_back(ssrc);
+ RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.NumOfUnsignaledStreams",
+ unsignaled_recv_ssrcs_.size(), 1, 100, 101);
+
+ // Remove oldest unsignaled stream, if we have too many.
+ if (unsignaled_recv_ssrcs_.size() > kMaxUnsignaledRecvStreams) {
+ uint32_t remove_ssrc = unsignaled_recv_ssrcs_.front();
+ RTC_DLOG(LS_INFO) << "Removing unsignaled receive stream with SSRC="
+ << remove_ssrc;
+ RemoveRecvStream(remove_ssrc);
+ }
+ RTC_DCHECK_GE(kMaxUnsignaledRecvStreams, unsignaled_recv_ssrcs_.size());
+
+ SetOutputVolume(ssrc, default_recv_volume_);
+ SetBaseMinimumPlayoutDelayMs(ssrc, default_recv_base_minimum_delay_ms_);
+
+ // The default sink can only be attached to one stream at a time, so we hook
+ // it up to the *latest* unsignaled stream we've seen, in order to support
+ // the case where the SSRC of one unsignaled stream changes.
+ if (default_sink_) {
+ for (uint32_t drop_ssrc : unsignaled_recv_ssrcs_) {
+ auto it = recv_streams_.find(drop_ssrc);
+ it->second->SetRawAudioSink(nullptr);
+ }
+ std::unique_ptr<webrtc::AudioSinkInterface> proxy_sink(
+ new ProxySink(default_sink_.get()));
+ SetRawAudioSink(ssrc, std::move(proxy_sink));
+ }
+ return true;
+}
+
+bool WebRtcVoiceReceiveChannel::GetStats(VoiceMediaReceiveInfo* info,
+ bool get_and_clear_legacy_stats) {
+ TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::GetReceiveStats");
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_DCHECK(info);
+
+ // Get SSRC and stats for each receiver.
+ RTC_DCHECK_EQ(info->receivers.size(), 0U);
+ for (const auto& stream : recv_streams_) {
+ uint32_t ssrc = stream.first;
+ // When SSRCs are unsignaled, there's only one audio MediaStreamTrack, but
+ // multiple RTP streams can be received over time (if the SSRC changes for
+ // whatever reason). We only want the RTCMediaStreamTrackStats to represent
+ // the stats for the most recent stream (the one whose audio is actually
+ // routed to the MediaStreamTrack), so here we ignore any unsignaled SSRCs
+ // except for the most recent one (last in the vector). This is somewhat of
+ // a hack, and means you don't get *any* stats for these inactive streams,
+ // but it's slightly better than the previous behavior, which was "highest
+ // SSRC wins".
+ // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=8158
+ if (!unsignaled_recv_ssrcs_.empty()) {
+ auto end_it = --unsignaled_recv_ssrcs_.end();
+ if (absl::linear_search(unsignaled_recv_ssrcs_.begin(), end_it, ssrc)) {
+ continue;
+ }
+ }
+ webrtc::AudioReceiveStreamInterface::Stats stats =
+ stream.second->GetStats(get_and_clear_legacy_stats);
+ VoiceReceiverInfo rinfo;
+ rinfo.add_ssrc(stats.remote_ssrc);
+ rinfo.payload_bytes_received = stats.payload_bytes_received;
+ rinfo.header_and_padding_bytes_received =
+ stats.header_and_padding_bytes_received;
+ rinfo.packets_received = stats.packets_received;
+ rinfo.fec_packets_received = stats.fec_packets_received;
+ rinfo.fec_packets_discarded = stats.fec_packets_discarded;
+ rinfo.packets_lost = stats.packets_lost;
+ rinfo.packets_discarded = stats.packets_discarded;
+ rinfo.codec_name = stats.codec_name;
+ rinfo.codec_payload_type = stats.codec_payload_type;
+ rinfo.jitter_ms = stats.jitter_ms;
+ rinfo.jitter_buffer_ms = stats.jitter_buffer_ms;
+ rinfo.jitter_buffer_preferred_ms = stats.jitter_buffer_preferred_ms;
+ rinfo.delay_estimate_ms = stats.delay_estimate_ms;
+ rinfo.audio_level = stats.audio_level;
+ rinfo.total_output_energy = stats.total_output_energy;
+ rinfo.total_samples_received = stats.total_samples_received;
+ rinfo.total_output_duration = stats.total_output_duration;
+ rinfo.concealed_samples = stats.concealed_samples;
+ rinfo.silent_concealed_samples = stats.silent_concealed_samples;
+ rinfo.concealment_events = stats.concealment_events;
+ rinfo.jitter_buffer_delay_seconds = stats.jitter_buffer_delay_seconds;
+ rinfo.jitter_buffer_emitted_count = stats.jitter_buffer_emitted_count;
+ rinfo.jitter_buffer_target_delay_seconds =
+ stats.jitter_buffer_target_delay_seconds;
+ rinfo.jitter_buffer_minimum_delay_seconds =
+ stats.jitter_buffer_minimum_delay_seconds;
+ rinfo.inserted_samples_for_deceleration =
+ stats.inserted_samples_for_deceleration;
+ rinfo.removed_samples_for_acceleration =
+ stats.removed_samples_for_acceleration;
+ rinfo.expand_rate = stats.expand_rate;
+ rinfo.speech_expand_rate = stats.speech_expand_rate;
+ rinfo.secondary_decoded_rate = stats.secondary_decoded_rate;
+ rinfo.secondary_discarded_rate = stats.secondary_discarded_rate;
+ rinfo.accelerate_rate = stats.accelerate_rate;
+ rinfo.preemptive_expand_rate = stats.preemptive_expand_rate;
+ rinfo.delayed_packet_outage_samples = stats.delayed_packet_outage_samples;
+ rinfo.decoding_calls_to_silence_generator =
+ stats.decoding_calls_to_silence_generator;
+ rinfo.decoding_calls_to_neteq = stats.decoding_calls_to_neteq;
+ rinfo.decoding_normal = stats.decoding_normal;
+ rinfo.decoding_plc = stats.decoding_plc;
+ rinfo.decoding_codec_plc = stats.decoding_codec_plc;
+ rinfo.decoding_cng = stats.decoding_cng;
+ rinfo.decoding_plc_cng = stats.decoding_plc_cng;
+ rinfo.decoding_muted_output = stats.decoding_muted_output;
+ rinfo.capture_start_ntp_time_ms = stats.capture_start_ntp_time_ms;
+ rinfo.last_packet_received = stats.last_packet_received;
+ rinfo.estimated_playout_ntp_timestamp_ms =
+ stats.estimated_playout_ntp_timestamp_ms;
+ rinfo.jitter_buffer_flushes = stats.jitter_buffer_flushes;
+ rinfo.relative_packet_arrival_delay_seconds =
+ stats.relative_packet_arrival_delay_seconds;
+ rinfo.interruption_count = stats.interruption_count;
+ rinfo.total_interruption_duration_ms = stats.total_interruption_duration_ms;
+ rinfo.last_sender_report_timestamp_ms =
+ stats.last_sender_report_timestamp_ms;
+ rinfo.last_sender_report_remote_timestamp_ms =
+ stats.last_sender_report_remote_timestamp_ms;
+ rinfo.sender_reports_packets_sent = stats.sender_reports_packets_sent;
+ rinfo.sender_reports_bytes_sent = stats.sender_reports_bytes_sent;
+ rinfo.sender_reports_reports_count = stats.sender_reports_reports_count;
+ rinfo.round_trip_time = stats.round_trip_time;
+ rinfo.round_trip_time_measurements = stats.round_trip_time_measurements;
+ rinfo.total_round_trip_time = stats.total_round_trip_time;
+
+ if (recv_nack_enabled_) {
+ rinfo.nacks_sent = stats.nacks_sent;
+ }
+
+ info->receivers.push_back(rinfo);
+ }
+
+ FillReceiveCodecStats(info);
+
+ info->device_underrun_count = engine_->adm()->GetPlayoutUnderrunCount();
+
+ return true;
+}
+
+void WebRtcVoiceReceiveChannel::FillReceiveCodecStats(
+ VoiceMediaReceiveInfo* voice_media_info) {
+ for (const auto& receiver : voice_media_info->receivers) {
+ auto codec =
+ absl::c_find_if(recv_codecs_, [&receiver](const AudioCodec& c) {
+ return receiver.codec_payload_type &&
+ *receiver.codec_payload_type == c.id;
+ });
+ if (codec != recv_codecs_.end()) {
+ voice_media_info->receive_codecs.insert(
+ std::make_pair(codec->id, codec->ToCodecParameters()));
+ }
+ }
+}
+
+void WebRtcVoiceReceiveChannel::SetRawAudioSink(
+ uint32_t ssrc,
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetRawAudioSink: ssrc:"
+ << ssrc << " " << (sink ? "(ptr)" : "NULL");
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ RTC_LOG(LS_WARNING) << "SetRawAudioSink: no recv stream " << ssrc;
+ return;
+ }
+ it->second->SetRawAudioSink(std::move(sink));
+}
+
+void WebRtcVoiceReceiveChannel::SetDefaultRawAudioSink(
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ RTC_LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetDefaultRawAudioSink:";
+ if (!unsignaled_recv_ssrcs_.empty()) {
+ std::unique_ptr<webrtc::AudioSinkInterface> proxy_sink(
+ sink ? new ProxySink(sink.get()) : nullptr);
+ SetRawAudioSink(unsignaled_recv_ssrcs_.back(), std::move(proxy_sink));
+ }
+ default_sink_ = std::move(sink);
+}
+
+std::vector<webrtc::RtpSource> WebRtcVoiceReceiveChannel::GetSources(
+ uint32_t ssrc) const {
+ auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ RTC_LOG(LS_ERROR) << "Attempting to get contributing sources for SSRC:"
+ << ssrc << " which doesn't exist.";
+ return std::vector<webrtc::RtpSource>();
+ }
+ return it->second->GetSources();
+}
+
+void WebRtcVoiceReceiveChannel::SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ if (ssrc == 0) {
+ // If the receiver is unsignaled, save the frame transformer and set it when
+ // the stream is associated with an ssrc.
+ unsignaled_frame_transformer_ = std::move(frame_transformer);
+ return;
+ }
+
+ auto matching_stream = recv_streams_.find(ssrc);
+ if (matching_stream == recv_streams_.end()) {
+ RTC_LOG(LS_INFO) << "Attempting to set frame transformer for SSRC:" << ssrc
+ << " which doesn't exist.";
+ return;
+ }
+ matching_stream->second->SetDepacketizerToDecoderFrameTransformer(
+ std::move(frame_transformer));
+}
+
+bool WebRtcVoiceReceiveChannel::MaybeDeregisterUnsignaledRecvStream(
+ uint32_t ssrc) {
+ RTC_DCHECK_RUN_ON(worker_thread_);
+ auto it = absl::c_find(unsignaled_recv_ssrcs_, ssrc);
+ if (it != unsignaled_recv_ssrcs_.end()) {
+ unsignaled_recv_ssrcs_.erase(it);
+ return true;
+ }
+ return false;
+}
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/engine/webrtc_voice_engine.h b/third_party/libwebrtc/media/engine/webrtc_voice_engine.h
new file mode 100644
index 0000000000..a3e6d3acab
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_voice_engine.h
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_
+#define MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/functional/any_invocable.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/audio/audio_frame_processor.h"
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_codec_pair_id.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/audio_options.h"
+#include "api/call/audio_sink.h"
+#include "api/call/transport.h"
+#include "api/crypto/crypto_options.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/crypto/frame_encryptor_interface.h"
+#include "api/field_trials_view.h"
+#include "api/frame_transformer_interface.h"
+#include "api/rtc_error.h"
+#include "api/rtp_parameters.h"
+#include "api/rtp_sender_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/transport/rtp/rtp_source.h"
+#include "call/audio_send_stream.h"
+#include "call/audio_state.h"
+#include "call/call.h"
+#include "media/base/codec.h"
+#include "media/base/media_channel.h"
+#include "media/base/media_channel_impl.h"
+#include "media/base/media_config.h"
+#include "media/base/media_engine.h"
+#include "media/base/rtp_utils.h"
+#include "media/base/stream_params.h"
+#include "modules/async_audio_processing/async_audio_processing.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/network/sent_packet.h"
+#include "rtc_base/network_route.h"
+#include "rtc_base/system/file_wrapper.h"
+#include "rtc_base/task_queue.h"
+
+namespace webrtc {
+class AudioFrameProcessor;
+}
+
+namespace cricket {
+
+class AudioSource;
+
+// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
+// It uses the WebRtc VoiceEngine library for audio handling.
+class WebRtcVoiceEngine final : public VoiceEngineInterface {
+ friend class WebRtcVoiceSendChannel;
+ friend class WebRtcVoiceReceiveChannel;
+
+ public:
+ WebRtcVoiceEngine(
+ webrtc::TaskQueueFactory* task_queue_factory,
+ webrtc::AudioDeviceModule* adm,
+ const rtc::scoped_refptr<webrtc::AudioEncoderFactory>& encoder_factory,
+ const rtc::scoped_refptr<webrtc::AudioDecoderFactory>& decoder_factory,
+ rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer,
+ rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing,
+ // TODO(bugs.webrtc.org/15111):
+ // Remove the raw AudioFrameProcessor pointer in the follow-up.
+ webrtc::AudioFrameProcessor* audio_frame_processor,
+ std::unique_ptr<webrtc::AudioFrameProcessor> owned_audio_frame_processor,
+ const webrtc::FieldTrialsView& trials);
+
+ WebRtcVoiceEngine() = delete;
+ WebRtcVoiceEngine(const WebRtcVoiceEngine&) = delete;
+ WebRtcVoiceEngine& operator=(const WebRtcVoiceEngine&) = delete;
+
+ ~WebRtcVoiceEngine() override;
+
+ // Does initialization that needs to occur on the worker thread.
+ void Init() override;
+ rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const override;
+
+ std::unique_ptr<VoiceMediaSendChannelInterface> CreateSendChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) override;
+
+ std::unique_ptr<VoiceMediaReceiveChannelInterface> CreateReceiveChannel(
+ webrtc::Call* call,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::AudioCodecPairId codec_pair_id) override;
+
+ const std::vector<AudioCodec>& send_codecs() const override;
+ const std::vector<AudioCodec>& recv_codecs() const override;
+ std::vector<webrtc::RtpHeaderExtensionCapability> GetRtpHeaderExtensions()
+ const override;
+
+ // Starts AEC dump using an existing file. A maximum file size in bytes can be
+ // specified. When the maximum file size is reached, logging is stopped and
+ // the file is closed. If max_size_bytes is set to <= 0, no limit will be
+ // used.
+ bool StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) override;
+
+ // Stops AEC dump.
+ void StopAecDump() override;
+
+ absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
+ override;
+
+ private:
+ // Every option that is "set" will be applied. Every option not "set" will be
+ // ignored. This allows us to selectively turn on and off different options
+ // easily at any time.
+ void ApplyOptions(const AudioOptions& options);
+
+ webrtc::TaskQueueFactory* const task_queue_factory_;
+ std::unique_ptr<rtc::TaskQueue> low_priority_worker_queue_;
+
+ webrtc::AudioDeviceModule* adm();
+ webrtc::AudioProcessing* apm() const;
+ webrtc::AudioState* audio_state();
+
+ std::vector<AudioCodec> CollectCodecs(
+ const std::vector<webrtc::AudioCodecSpec>& specs) const;
+
+ webrtc::SequenceChecker signal_thread_checker_{
+ webrtc::SequenceChecker::kDetached};
+ webrtc::SequenceChecker worker_thread_checker_{
+ webrtc::SequenceChecker::kDetached};
+
+ // The audio device module.
+ rtc::scoped_refptr<webrtc::AudioDeviceModule> adm_;
+ rtc::scoped_refptr<webrtc::AudioEncoderFactory> encoder_factory_;
+ rtc::scoped_refptr<webrtc::AudioDecoderFactory> decoder_factory_;
+ rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer_;
+ // The audio processing module.
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm_;
+ // Asynchronous audio processing.
+ // TODO(bugs.webrtc.org/15111):
+ // Remove the raw AudioFrameProcessor pointer in the follow-up.
+ webrtc::AudioFrameProcessor* const audio_frame_processor_;
+ std::unique_ptr<webrtc::AudioFrameProcessor> owned_audio_frame_processor_;
+ // The primary instance of WebRtc VoiceEngine.
+ rtc::scoped_refptr<webrtc::AudioState> audio_state_;
+ std::vector<AudioCodec> send_codecs_;
+ std::vector<AudioCodec> recv_codecs_;
+ bool is_dumping_aec_ = false;
+ bool initialized_ = false;
+
+ // Jitter buffer settings for new streams.
+ size_t audio_jitter_buffer_max_packets_ = 200;
+ bool audio_jitter_buffer_fast_accelerate_ = false;
+ int audio_jitter_buffer_min_delay_ms_ = 0;
+
+ const bool minimized_remsampling_on_mobile_trial_enabled_;
+};
+
+class WebRtcVoiceSendChannel final : public MediaChannelUtil,
+ public VoiceMediaSendChannelInterface {
+ public:
+ WebRtcVoiceSendChannel(WebRtcVoiceEngine* engine,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::Call* call,
+ webrtc::AudioCodecPairId codec_pair_id);
+
+ WebRtcVoiceSendChannel() = delete;
+ WebRtcVoiceSendChannel(const WebRtcVoiceSendChannel&) = delete;
+ WebRtcVoiceSendChannel& operator=(const WebRtcVoiceSendChannel&) = delete;
+
+ ~WebRtcVoiceSendChannel() override;
+
+ MediaType media_type() const override { return MEDIA_TYPE_AUDIO; }
+ VideoMediaSendChannelInterface* AsVideoSendChannel() override {
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+ VoiceMediaSendChannelInterface* AsVoiceSendChannel() override { return this; }
+
+ absl::optional<Codec> GetSendCodec() const override;
+
+ // Functions imported from MediaChannelUtil
+ void SetInterface(MediaChannelNetworkInterface* iface) override {
+ MediaChannelUtil::SetInterface(iface);
+ }
+
+ bool HasNetworkInterface() const override {
+ return MediaChannelUtil::HasNetworkInterface();
+ }
+ void SetExtmapAllowMixed(bool extmap_allow_mixed) override {
+ MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed);
+ }
+ bool ExtmapAllowMixed() const override {
+ return MediaChannelUtil::ExtmapAllowMixed();
+ }
+
+ const AudioOptions& options() const { return options_; }
+
+ bool SetSenderParameters(const AudioSenderParameter& params) override;
+ webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override;
+ webrtc::RTCError SetRtpSendParameters(
+ uint32_t ssrc,
+ const webrtc::RtpParameters& parameters,
+ webrtc::SetParametersCallback callback) override;
+
+ void SetSend(bool send) override;
+ bool SetAudioSend(uint32_t ssrc,
+ bool enable,
+ const AudioOptions* options,
+ AudioSource* source) override;
+ bool AddSendStream(const StreamParams& sp) override;
+ bool RemoveSendStream(uint32_t ssrc) override;
+
+ void SetSsrcListChangedCallback(
+ absl::AnyInvocable<void(const std::set<uint32_t>&)> callback) override;
+
+ // E2EE Frame API
+ // Set a frame encryptor to a particular ssrc that will intercept all
+ // outgoing audio payloads frames and attempt to encrypt them and forward the
+ // result to the packetizer.
+ void SetFrameEncryptor(uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameEncryptorInterface>
+ frame_encryptor) override;
+
+ bool CanInsertDtmf() override;
+ bool InsertDtmf(uint32_t ssrc, int event, int duration) override;
+
+ void OnPacketSent(const rtc::SentPacket& sent_packet) override;
+ void OnNetworkRouteChanged(absl::string_view transport_name,
+ const rtc::NetworkRoute& network_route) override;
+ void OnReadyToSend(bool ready) override;
+ bool GetStats(VoiceMediaSendInfo* info) override;
+
+ // Sets a frame transformer between encoder and packetizer, to transform
+ // encoded frames before sending them out the network.
+ void SetEncoderToPacketizerFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override;
+
+ bool SenderNackEnabled() const override {
+ if (!send_codec_spec_) {
+ return false;
+ }
+ return send_codec_spec_->nack_enabled;
+ }
+ bool SenderNonSenderRttEnabled() const override {
+ if (!send_codec_spec_) {
+ return false;
+ }
+ return send_codec_spec_->enable_non_sender_rtt;
+ }
+ bool SendCodecHasNack() const override { return SenderNackEnabled(); }
+
+ void SetSendCodecChangedCallback(
+ absl::AnyInvocable<void()> callback) override {
+ send_codec_changed_callback_ = std::move(callback);
+ }
+
+ private:
+ bool SetOptions(const AudioOptions& options);
+ bool SetSendCodecs(const std::vector<Codec>& codecs,
+ absl::optional<Codec> preferred_codec);
+ bool SetLocalSource(uint32_t ssrc, AudioSource* source);
+ bool MuteStream(uint32_t ssrc, bool mute);
+
+ WebRtcVoiceEngine* engine() { return engine_; }
+ bool SetMaxSendBitrate(int bps);
+ void SetupRecording();
+
+ webrtc::TaskQueueBase* const worker_thread_;
+ webrtc::ScopedTaskSafety task_safety_;
+ webrtc::SequenceChecker network_thread_checker_{
+ webrtc::SequenceChecker::kDetached};
+
+ WebRtcVoiceEngine* const engine_ = nullptr;
+ std::vector<AudioCodec> send_codecs_;
+
+ int max_send_bitrate_bps_ = 0;
+ AudioOptions options_;
+ absl::optional<int> dtmf_payload_type_;
+ int dtmf_payload_freq_ = -1;
+ bool enable_non_sender_rtt_ = false;
+ bool send_ = false;
+ webrtc::Call* const call_ = nullptr;
+
+ const MediaConfig::Audio audio_config_;
+
+ class WebRtcAudioSendStream;
+
+ std::map<uint32_t, WebRtcAudioSendStream*> send_streams_;
+ std::vector<webrtc::RtpExtension> send_rtp_extensions_;
+ std::string mid_;
+
+ absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>
+ send_codec_spec_;
+
+ // TODO(kwiberg): Per-SSRC codec pair IDs?
+ const webrtc::AudioCodecPairId codec_pair_id_;
+
+ // Per peer connection crypto options that last for the lifetime of the peer
+ // connection.
+ const webrtc::CryptoOptions crypto_options_;
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ unsignaled_frame_transformer_;
+
+ void FillSendCodecStats(VoiceMediaSendInfo* voice_media_info);
+
+ // Callback invoked whenever the send codec changes.
+ // TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed.
+ absl::AnyInvocable<void()> send_codec_changed_callback_;
+ // Callback invoked whenever the list of SSRCs changes.
+ absl::AnyInvocable<void(const std::set<uint32_t>&)>
+ ssrc_list_changed_callback_;
+};
+
+class WebRtcVoiceReceiveChannel final
+ : public MediaChannelUtil,
+ public VoiceMediaReceiveChannelInterface {
+ public:
+ WebRtcVoiceReceiveChannel(WebRtcVoiceEngine* engine,
+ const MediaConfig& config,
+ const AudioOptions& options,
+ const webrtc::CryptoOptions& crypto_options,
+ webrtc::Call* call,
+ webrtc::AudioCodecPairId codec_pair_id);
+
+ WebRtcVoiceReceiveChannel() = delete;
+ WebRtcVoiceReceiveChannel(const WebRtcVoiceReceiveChannel&) = delete;
+ WebRtcVoiceReceiveChannel& operator=(const WebRtcVoiceReceiveChannel&) =
+ delete;
+
+ ~WebRtcVoiceReceiveChannel() override;
+
+ MediaType media_type() const override { return MEDIA_TYPE_AUDIO; }
+
+ VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override {
+ RTC_CHECK_NOTREACHED();
+ return nullptr;
+ }
+ VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override {
+ return this;
+ }
+
+ const AudioOptions& options() const { return options_; }
+
+ void SetInterface(MediaChannelNetworkInterface* iface) override {
+ MediaChannelUtil::SetInterface(iface);
+ }
+ bool SetReceiverParameters(const AudioReceiverParameters& params) override;
+ webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override;
+ webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override;
+
+ void SetPlayout(bool playout) override;
+ bool AddRecvStream(const StreamParams& sp) override;
+ bool RemoveRecvStream(uint32_t ssrc) override;
+ void ResetUnsignaledRecvStream() override;
+ absl::optional<uint32_t> GetUnsignaledSsrc() const override;
+
+ void ChooseReceiverReportSsrc(const std::set<uint32_t>& choices) override;
+
+ void OnDemuxerCriteriaUpdatePending() override;
+ void OnDemuxerCriteriaUpdateComplete() override;
+
+ // E2EE Frame API
+ // Set a frame decryptor to a particular ssrc that will intercept all
+ // incoming audio payloads and attempt to decrypt them before forwarding the
+ // result.
+ void SetFrameDecryptor(uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
+ frame_decryptor) override;
+
+ bool SetOutputVolume(uint32_t ssrc, double volume) override;
+ // Applies the new volume to current and future unsignaled streams.
+ bool SetDefaultOutputVolume(double volume) override;
+
+ bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override;
+ absl::optional<int> GetBaseMinimumPlayoutDelayMs(
+ uint32_t ssrc) const override;
+
+ void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override;
+ bool GetStats(VoiceMediaReceiveInfo* info,
+ bool get_and_clear_legacy_stats) override;
+
+ // Set the audio sink for an existing stream.
+ void SetRawAudioSink(
+ uint32_t ssrc,
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
+ // Will set the audio sink on the latest unsignaled stream, future or
+ // current. Only one stream at a time will use the sink.
+ void SetDefaultRawAudioSink(
+ std::unique_ptr<webrtc::AudioSinkInterface> sink) override;
+
+ std::vector<webrtc::RtpSource> GetSources(uint32_t ssrc) const override;
+
+ void SetDepacketizerToDecoderFrameTransformer(
+ uint32_t ssrc,
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface> frame_transformer)
+ override;
+
+ void SetReceiveNackEnabled(bool enabled) override;
+ void SetReceiveNonSenderRttEnabled(bool enabled) override;
+
+ private:
+ bool SetOptions(const AudioOptions& options);
+ bool SetRecvCodecs(const std::vector<AudioCodec>& codecs);
+ bool SetLocalSource(uint32_t ssrc, AudioSource* source);
+ bool MuteStream(uint32_t ssrc, bool mute);
+
+ WebRtcVoiceEngine* engine() { return engine_; }
+ void SetupRecording();
+
+ // Expected to be invoked once per packet that belongs to this channel that
+ // can not be demuxed. Returns true if a default receive stream has been
+ // created.
+ bool MaybeCreateDefaultReceiveStream(const webrtc::RtpPacketReceived& packet);
+ // Check if 'ssrc' is an unsignaled stream, and if so mark it as not being
+ // unsignaled anymore (i.e. it is now removed, or signaled), and return true.
+ bool MaybeDeregisterUnsignaledRecvStream(uint32_t ssrc);
+
+ webrtc::TaskQueueBase* const worker_thread_;
+ webrtc::ScopedTaskSafety task_safety_;
+ webrtc::SequenceChecker network_thread_checker_{
+ webrtc::SequenceChecker::kDetached};
+
+ WebRtcVoiceEngine* const engine_ = nullptr;
+
+ // TODO(kwiberg): decoder_map_ and recv_codecs_ store the exact same
+ // information, in slightly different formats. Eliminate recv_codecs_.
+ std::map<int, webrtc::SdpAudioFormat> decoder_map_;
+ std::vector<AudioCodec> recv_codecs_;
+
+ AudioOptions options_;
+ bool recv_nack_enabled_ = false;
+ bool enable_non_sender_rtt_ = false;
+ bool playout_ = false;
+ webrtc::Call* const call_ = nullptr;
+
+ const MediaConfig::Audio audio_config_;
+
+ // Queue of unsignaled SSRCs; oldest at the beginning.
+ std::vector<uint32_t> unsignaled_recv_ssrcs_;
+
+ // This is a stream param that comes from the remote description, but wasn't
+ // signaled with any a=ssrc lines. It holds the information that was signaled
+ // before the unsignaled receive stream is created when the first packet is
+ // received.
+ StreamParams unsignaled_stream_params_;
+
+ // Volume for unsignaled streams, which may be set before the stream exists.
+ double default_recv_volume_ = 1.0;
+
+ // Delay for unsignaled streams, which may be set before the stream exists.
+ int default_recv_base_minimum_delay_ms_ = 0;
+
+ // Sink for latest unsignaled stream - may be set before the stream exists.
+ std::unique_ptr<webrtc::AudioSinkInterface> default_sink_;
+ // Default SSRC to use for RTCP receiver reports in case of no signaled
+ // send streams. See: https://code.google.com/p/webrtc/issues/detail?id=4740
+ // and https://code.google.com/p/chromium/issues/detail?id=547661
+ uint32_t receiver_reports_ssrc_ = 0xFA17FA17u;
+
+ std::string mid_;
+
+ class WebRtcAudioReceiveStream;
+
+ std::map<uint32_t, WebRtcAudioReceiveStream*> recv_streams_;
+ std::vector<webrtc::RtpExtension> recv_rtp_extensions_;
+ webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_;
+
+ absl::optional<webrtc::AudioSendStream::Config::SendCodecSpec>
+ send_codec_spec_;
+
+ // TODO(kwiberg): Per-SSRC codec pair IDs?
+ const webrtc::AudioCodecPairId codec_pair_id_;
+
+ // Per peer connection crypto options that last for the lifetime of the peer
+ // connection.
+ const webrtc::CryptoOptions crypto_options_;
+ // Unsignaled streams have an option to have a frame decryptor set on them.
+ rtc::scoped_refptr<webrtc::FrameDecryptorInterface>
+ unsignaled_frame_decryptor_;
+ rtc::scoped_refptr<webrtc::FrameTransformerInterface>
+ unsignaled_frame_transformer_;
+
+ void FillReceiveCodecStats(VoiceMediaReceiveInfo* voice_media_info);
+};
+
+} // namespace cricket
+
+#endif // MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_
diff --git a/third_party/libwebrtc/media/engine/webrtc_voice_engine_unittest.cc b/third_party/libwebrtc/media/engine/webrtc_voice_engine_unittest.cc
new file mode 100644
index 0000000000..b1393eec74
--- /dev/null
+++ b/third_party/libwebrtc/media/engine/webrtc_voice_engine_unittest.cc
@@ -0,0 +1,4017 @@
+/*
+ * Copyright (c) 2008 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/engine/webrtc_voice_engine.h"
+
+#include <memory>
+#include <utility>
+
+#include "absl/memory/memory.h"
+#include "absl/strings/match.h"
+#include "absl/types/optional.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/media_types.h"
+#include "api/rtc_event_log/rtc_event_log.h"
+#include "api/rtp_parameters.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/transport/field_trial_based_config.h"
+#include "call/call.h"
+#include "media/base/codec.h"
+#include "media/base/fake_media_engine.h"
+#include "media/base/fake_network_interface.h"
+#include "media/base/fake_rtp.h"
+#include "media/base/media_channel.h"
+#include "media/base/media_constants.h"
+#include "media/engine/fake_webrtc_call.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/byte_order.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+#include "test/mock_audio_encoder_factory.h"
+#include "test/scoped_key_value_config.h"
+
+using ::testing::_;
+using ::testing::ContainerEq;
+using ::testing::Contains;
+using ::testing::Field;
+using ::testing::IsEmpty;
+using ::testing::Return;
+using ::testing::ReturnPointee;
+using ::testing::SaveArg;
+using ::testing::StrictMock;
+using ::testing::UnorderedElementsAreArray;
+using ::webrtc::Call;
+using ::webrtc::CallConfig;
+
+namespace {
+using webrtc::BitrateConstraints;
+
+constexpr uint32_t kMaxUnsignaledRecvStreams = 4;
+
+const cricket::AudioCodec kPcmuCodec =
+ cricket::CreateAudioCodec(0, "PCMU", 8000, 1);
+const cricket::AudioCodec kOpusCodec =
+ cricket::CreateAudioCodec(111, "opus", 48000, 2);
+const cricket::AudioCodec kG722CodecVoE =
+ cricket::CreateAudioCodec(9, "G722", 16000, 1);
+const cricket::AudioCodec kG722CodecSdp =
+ cricket::CreateAudioCodec(9, "G722", 8000, 1);
+const cricket::AudioCodec kCn8000Codec =
+ cricket::CreateAudioCodec(13, "CN", 8000, 1);
+const cricket::AudioCodec kCn16000Codec =
+ cricket::CreateAudioCodec(105, "CN", 16000, 1);
+const cricket::AudioCodec kRed48000Codec =
+ cricket::CreateAudioCodec(112, "RED", 48000, 2);
+const cricket::AudioCodec kTelephoneEventCodec1 =
+ cricket::CreateAudioCodec(106, "telephone-event", 8000, 1);
+const cricket::AudioCodec kTelephoneEventCodec2 =
+ cricket::CreateAudioCodec(107, "telephone-event", 32000, 1);
+
+const uint32_t kSsrc0 = 0;
+const uint32_t kSsrc1 = 1;
+const uint32_t kSsrcX = 0x99;
+const uint32_t kSsrcY = 0x17;
+const uint32_t kSsrcZ = 0x42;
+const uint32_t kSsrcW = 0x02;
+const uint32_t kSsrcs4[] = {11, 200, 30, 44};
+
+constexpr int kRtpHistoryMs = 5000;
+
+constexpr webrtc::AudioProcessing::Config::GainController1::Mode
+ kDefaultAgcMode =
+#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID)
+ webrtc::AudioProcessing::Config::GainController1::kFixedDigital;
+#else
+ webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+#endif
+
+constexpr webrtc::AudioProcessing::Config::NoiseSuppression::Level
+ kDefaultNsLevel =
+ webrtc::AudioProcessing::Config::NoiseSuppression::Level::kHigh;
+
+void AdmSetupExpectations(webrtc::test::MockAudioDeviceModule* adm) {
+ RTC_DCHECK(adm);
+
+ // Setup.
+ EXPECT_CALL(*adm, Init()).WillOnce(Return(0));
+ EXPECT_CALL(*adm, RegisterAudioCallback(_)).WillOnce(Return(0));
+#if defined(WEBRTC_WIN)
+ EXPECT_CALL(
+ *adm,
+ SetPlayoutDevice(
+ ::testing::Matcher<webrtc::AudioDeviceModule::WindowsDeviceType>(
+ webrtc::AudioDeviceModule::kDefaultCommunicationDevice)))
+ .WillOnce(Return(0));
+#else
+ EXPECT_CALL(*adm, SetPlayoutDevice(0)).WillOnce(Return(0));
+#endif // #if defined(WEBRTC_WIN)
+ EXPECT_CALL(*adm, InitSpeaker()).WillOnce(Return(0));
+ EXPECT_CALL(*adm, StereoPlayoutIsAvailable(::testing::_)).WillOnce(Return(0));
+ EXPECT_CALL(*adm, SetStereoPlayout(false)).WillOnce(Return(0));
+#if defined(WEBRTC_WIN)
+ EXPECT_CALL(
+ *adm,
+ SetRecordingDevice(
+ ::testing::Matcher<webrtc::AudioDeviceModule::WindowsDeviceType>(
+ webrtc::AudioDeviceModule::kDefaultCommunicationDevice)))
+ .WillOnce(Return(0));
+#else
+ EXPECT_CALL(*adm, SetRecordingDevice(0)).WillOnce(Return(0));
+#endif // #if defined(WEBRTC_WIN)
+ EXPECT_CALL(*adm, InitMicrophone()).WillOnce(Return(0));
+ EXPECT_CALL(*adm, StereoRecordingIsAvailable(::testing::_))
+ .WillOnce(Return(0));
+ EXPECT_CALL(*adm, SetStereoRecording(false)).WillOnce(Return(0));
+ EXPECT_CALL(*adm, BuiltInAECIsAvailable()).WillOnce(Return(false));
+ EXPECT_CALL(*adm, BuiltInAGCIsAvailable()).WillOnce(Return(false));
+ EXPECT_CALL(*adm, BuiltInNSIsAvailable()).WillOnce(Return(false));
+
+ // Teardown.
+ EXPECT_CALL(*adm, StopPlayout()).WillOnce(Return(0));
+ EXPECT_CALL(*adm, StopRecording()).WillOnce(Return(0));
+ EXPECT_CALL(*adm, RegisterAudioCallback(nullptr)).WillOnce(Return(0));
+ EXPECT_CALL(*adm, Terminate()).WillOnce(Return(0));
+}
+} // namespace
+
+// Tests that our stub library "works".
+TEST(WebRtcVoiceEngineTestStubLibrary, StartupShutdown) {
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateStrict();
+ AdmSetupExpectations(adm.get());
+ rtc::scoped_refptr<StrictMock<webrtc::test::MockAudioProcessing>> apm =
+ use_null_apm ? nullptr
+ : rtc::make_ref_counted<
+ StrictMock<webrtc::test::MockAudioProcessing>>();
+
+ webrtc::AudioProcessing::Config apm_config;
+ if (!use_null_apm) {
+ EXPECT_CALL(*apm, GetConfig()).WillRepeatedly(ReturnPointee(&apm_config));
+ EXPECT_CALL(*apm, ApplyConfig(_)).WillRepeatedly(SaveArg<0>(&apm_config));
+ EXPECT_CALL(*apm, DetachAecDump());
+ }
+ {
+ webrtc::FieldTrialBasedConfig trials;
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm.get(),
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm,
+ nullptr, nullptr, trials);
+ engine.Init();
+ }
+ }
+}
+
+class FakeAudioSink : public webrtc::AudioSinkInterface {
+ public:
+ void OnData(const Data& audio) override {}
+};
+
+class FakeAudioSource : public cricket::AudioSource {
+ void SetSink(Sink* sink) override {}
+};
+
+class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam<bool> {
+ public:
+ WebRtcVoiceEngineTestFake()
+ : use_null_apm_(GetParam()),
+ task_queue_factory_(webrtc::CreateDefaultTaskQueueFactory()),
+ adm_(webrtc::test::MockAudioDeviceModule::CreateStrict()),
+ apm_(use_null_apm_
+ ? nullptr
+ : rtc::make_ref_counted<
+ StrictMock<webrtc::test::MockAudioProcessing>>()),
+ call_(&field_trials_) {
+ // AudioDeviceModule.
+ AdmSetupExpectations(adm_.get());
+
+ if (!use_null_apm_) {
+ // AudioProcessing.
+ EXPECT_CALL(*apm_, GetConfig())
+ .WillRepeatedly(ReturnPointee(&apm_config_));
+ EXPECT_CALL(*apm_, ApplyConfig(_))
+ .WillRepeatedly(SaveArg<0>(&apm_config_));
+ EXPECT_CALL(*apm_, DetachAecDump());
+ }
+
+ // Default Options.
+ // TODO(kwiberg): We should use mock factories here, but a bunch of
+ // the tests here probe the specific set of codecs provided by the builtin
+ // factories. Those tests should probably be moved elsewhere.
+ auto encoder_factory = webrtc::CreateBuiltinAudioEncoderFactory();
+ auto decoder_factory = webrtc::CreateBuiltinAudioDecoderFactory();
+ engine_.reset(new cricket::WebRtcVoiceEngine(
+ task_queue_factory_.get(), adm_.get(), encoder_factory, decoder_factory,
+ nullptr, apm_, nullptr, nullptr, field_trials_));
+ engine_->Init();
+ send_parameters_.codecs.push_back(kPcmuCodec);
+ recv_parameters_.codecs.push_back(kPcmuCodec);
+
+ if (!use_null_apm_) {
+ // Default Options.
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(IsHighPassFilterEnabled());
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ VerifyGainControlEnabledCorrectly();
+ VerifyGainControlDefaultSettings();
+ }
+ }
+
+ bool SetupChannel() {
+ send_channel_ = engine_->CreateSendChannel(
+ &call_, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create());
+ receive_channel_ = engine_->CreateReceiveChannel(
+ &call_, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create());
+ send_channel_->SetSsrcListChangedCallback(
+ [receive_channel =
+ receive_channel_.get()](const std::set<uint32_t>& choices) {
+ receive_channel->ChooseReceiverReportSsrc(choices);
+ });
+ send_channel_->SetSendCodecChangedCallback(
+ [receive_channel = receive_channel_.get(),
+ send_channel = send_channel_.get()]() {
+ receive_channel->SetReceiveNackEnabled(
+ send_channel->SendCodecHasNack());
+ receive_channel->SetReceiveNonSenderRttEnabled(
+ send_channel->SenderNonSenderRttEnabled());
+ });
+ return true;
+ }
+
+ bool SetupRecvStream() {
+ if (!SetupChannel()) {
+ return false;
+ }
+ return AddRecvStream(kSsrcX);
+ }
+
+ bool SetupSendStream() {
+ return SetupSendStream(cricket::StreamParams::CreateLegacy(kSsrcX));
+ }
+
+ bool SetupSendStream(const cricket::StreamParams& sp) {
+ if (!SetupChannel()) {
+ return false;
+ }
+ if (!send_channel_->AddSendStream(sp)) {
+ return false;
+ }
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, set_output_will_be_muted(false));
+ }
+ return send_channel_->SetAudioSend(kSsrcX, true, nullptr, &fake_source_);
+ }
+
+ bool AddRecvStream(uint32_t ssrc) {
+ EXPECT_TRUE(receive_channel_);
+ return receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(ssrc));
+ }
+
+ void SetupForMultiSendStream() {
+ EXPECT_TRUE(SetupSendStream());
+ // Remove stream added in Setup.
+ EXPECT_TRUE(call_.GetAudioSendStream(kSsrcX));
+ EXPECT_TRUE(send_channel_->RemoveSendStream(kSsrcX));
+ // Verify the channel does not exist.
+ EXPECT_FALSE(call_.GetAudioSendStream(kSsrcX));
+ }
+
+ void DeliverPacket(const void* data, int len) {
+ webrtc::RtpPacketReceived packet;
+ packet.Parse(reinterpret_cast<const uint8_t*>(data), len);
+ receive_channel_->OnPacketReceived(packet);
+ rtc::Thread::Current()->ProcessMessages(0);
+ }
+
+ const cricket::FakeAudioSendStream& GetSendStream(uint32_t ssrc) {
+ const auto* send_stream = call_.GetAudioSendStream(ssrc);
+ EXPECT_TRUE(send_stream);
+ return *send_stream;
+ }
+
+ const cricket::FakeAudioReceiveStream& GetRecvStream(uint32_t ssrc) {
+ const auto* recv_stream = call_.GetAudioReceiveStream(ssrc);
+ EXPECT_TRUE(recv_stream);
+ return *recv_stream;
+ }
+
+ const webrtc::AudioSendStream::Config& GetSendStreamConfig(uint32_t ssrc) {
+ return GetSendStream(ssrc).GetConfig();
+ }
+
+ const webrtc::AudioReceiveStreamInterface::Config& GetRecvStreamConfig(
+ uint32_t ssrc) {
+ return GetRecvStream(ssrc).GetConfig();
+ }
+
+ void SetSend(bool enable) {
+ ASSERT_TRUE(send_channel_);
+ if (enable) {
+ EXPECT_CALL(*adm_, RecordingIsInitialized())
+ .Times(::testing::AtMost(1))
+ .WillOnce(Return(false));
+ EXPECT_CALL(*adm_, Recording())
+ .Times(::testing::AtMost(1))
+ .WillOnce(Return(false));
+ EXPECT_CALL(*adm_, InitRecording())
+ .Times(::testing::AtMost(1))
+ .WillOnce(Return(0));
+ }
+ send_channel_->SetSend(enable);
+ }
+
+ void SetSenderParameters(const cricket::AudioSenderParameter& params) {
+ ASSERT_TRUE(send_channel_);
+ EXPECT_TRUE(send_channel_->SetSenderParameters(params));
+ }
+
+ void SetAudioSend(uint32_t ssrc,
+ bool enable,
+ cricket::AudioSource* source,
+ const cricket::AudioOptions* options = nullptr) {
+ ASSERT_TRUE(send_channel_);
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, set_output_will_be_muted(!enable));
+ }
+ EXPECT_TRUE(send_channel_->SetAudioSend(ssrc, enable, options, source));
+ }
+
+ void TestInsertDtmf(uint32_t ssrc,
+ bool caller,
+ const cricket::AudioCodec& codec) {
+ EXPECT_TRUE(SetupChannel());
+ if (caller) {
+ // If this is a caller, local description will be applied and add the
+ // send stream.
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+ }
+
+ // Test we can only InsertDtmf when the other side supports telephone-event.
+ SetSenderParameters(send_parameters_);
+ SetSend(true);
+ EXPECT_FALSE(send_channel_->CanInsertDtmf());
+ EXPECT_FALSE(send_channel_->InsertDtmf(ssrc, 1, 111));
+ send_parameters_.codecs.push_back(codec);
+ SetSenderParameters(send_parameters_);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+
+ if (!caller) {
+ // If this is callee, there's no active send channel yet.
+ EXPECT_FALSE(send_channel_->InsertDtmf(ssrc, 2, 123));
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+ }
+
+ // Check we fail if the ssrc is invalid.
+ EXPECT_FALSE(send_channel_->InsertDtmf(-1, 1, 111));
+
+ // Test send.
+ cricket::FakeAudioSendStream::TelephoneEvent telephone_event =
+ GetSendStream(kSsrcX).GetLatestTelephoneEvent();
+ EXPECT_EQ(-1, telephone_event.payload_type);
+ EXPECT_TRUE(send_channel_->InsertDtmf(ssrc, 2, 123));
+ telephone_event = GetSendStream(kSsrcX).GetLatestTelephoneEvent();
+ EXPECT_EQ(codec.id, telephone_event.payload_type);
+ EXPECT_EQ(codec.clockrate, telephone_event.payload_frequency);
+ EXPECT_EQ(2, telephone_event.event_code);
+ EXPECT_EQ(123, telephone_event.duration_ms);
+ }
+
+ void TestExtmapAllowMixedCaller(bool extmap_allow_mixed) {
+ // For a caller, the answer will be applied in set remote description
+ // where SetSenderParameters() is called.
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+ send_parameters_.extmap_allow_mixed = extmap_allow_mixed;
+ SetSenderParameters(send_parameters_);
+ const webrtc::AudioSendStream::Config& config = GetSendStreamConfig(kSsrcX);
+ EXPECT_EQ(extmap_allow_mixed, config.rtp.extmap_allow_mixed);
+ }
+
+ void TestExtmapAllowMixedCallee(bool extmap_allow_mixed) {
+ // For a callee, the answer will be applied in set local description
+ // where SetExtmapAllowMixed() and AddSendStream() are called.
+ EXPECT_TRUE(SetupChannel());
+ send_channel_->SetExtmapAllowMixed(extmap_allow_mixed);
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+
+ const webrtc::AudioSendStream::Config& config = GetSendStreamConfig(kSsrcX);
+ EXPECT_EQ(extmap_allow_mixed, config.rtp.extmap_allow_mixed);
+ }
+
+ // Test that send bandwidth is set correctly.
+ // `codec` is the codec under test.
+ // `max_bitrate` is a parameter to set to SetMaxSendBandwidth().
+ // `expected_result` is the expected result from SetMaxSendBandwidth().
+ // `expected_bitrate` is the expected audio bitrate afterward.
+ void TestMaxSendBandwidth(const cricket::AudioCodec& codec,
+ int max_bitrate,
+ bool expected_result,
+ int expected_bitrate) {
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(codec);
+ parameters.max_bandwidth_bps = max_bitrate;
+ if (expected_result) {
+ SetSenderParameters(parameters);
+ } else {
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+ }
+ EXPECT_EQ(expected_bitrate, GetCodecBitrate(kSsrcX));
+ }
+
+ // Sets the per-stream maximum bitrate limit for the specified SSRC.
+ bool SetMaxBitrateForStream(int32_t ssrc, int bitrate) {
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(ssrc);
+ EXPECT_EQ(1UL, parameters.encodings.size());
+
+ parameters.encodings[0].max_bitrate_bps = bitrate;
+ return send_channel_->SetRtpSendParameters(ssrc, parameters).ok();
+ }
+
+ void SetGlobalMaxBitrate(const cricket::AudioCodec& codec, int bitrate) {
+ cricket::AudioSenderParameter send_parameters;
+ send_parameters.codecs.push_back(codec);
+ send_parameters.max_bandwidth_bps = bitrate;
+ SetSenderParameters(send_parameters);
+ }
+
+ void CheckSendCodecBitrate(int32_t ssrc,
+ const char expected_name[],
+ int expected_bitrate) {
+ const auto& spec = GetSendStreamConfig(ssrc).send_codec_spec;
+ EXPECT_EQ(expected_name, spec->format.name);
+ EXPECT_EQ(expected_bitrate, spec->target_bitrate_bps);
+ }
+
+ absl::optional<int> GetCodecBitrate(int32_t ssrc) {
+ return GetSendStreamConfig(ssrc).send_codec_spec->target_bitrate_bps;
+ }
+
+ int GetMaxBitrate(int32_t ssrc) {
+ return GetSendStreamConfig(ssrc).max_bitrate_bps;
+ }
+
+ const absl::optional<std::string>& GetAudioNetworkAdaptorConfig(
+ int32_t ssrc) {
+ return GetSendStreamConfig(ssrc).audio_network_adaptor_config;
+ }
+
+ void SetAndExpectMaxBitrate(const cricket::AudioCodec& codec,
+ int global_max,
+ int stream_max,
+ bool expected_result,
+ int expected_codec_bitrate) {
+ // Clear the bitrate limit from the previous test case.
+ EXPECT_TRUE(SetMaxBitrateForStream(kSsrcX, -1));
+
+ // Attempt to set the requested bitrate limits.
+ SetGlobalMaxBitrate(codec, global_max);
+ EXPECT_EQ(expected_result, SetMaxBitrateForStream(kSsrcX, stream_max));
+
+ // Verify that reading back the parameters gives results
+ // consistent with the Set() result.
+ webrtc::RtpParameters resulting_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ EXPECT_EQ(1UL, resulting_parameters.encodings.size());
+ EXPECT_EQ(expected_result ? stream_max : -1,
+ resulting_parameters.encodings[0].max_bitrate_bps);
+
+ // Verify that the codec settings have the expected bitrate.
+ EXPECT_EQ(expected_codec_bitrate, GetCodecBitrate(kSsrcX));
+ EXPECT_EQ(expected_codec_bitrate, GetMaxBitrate(kSsrcX));
+ }
+
+ void SetSendCodecsShouldWorkForBitrates(const char* min_bitrate_kbps,
+ int expected_min_bitrate_bps,
+ const char* start_bitrate_kbps,
+ int expected_start_bitrate_bps,
+ const char* max_bitrate_kbps,
+ int expected_max_bitrate_bps) {
+ EXPECT_TRUE(SetupSendStream());
+ auto& codecs = send_parameters_.codecs;
+ codecs.clear();
+ codecs.push_back(kOpusCodec);
+ codecs[0].params[cricket::kCodecParamMinBitrate] = min_bitrate_kbps;
+ codecs[0].params[cricket::kCodecParamStartBitrate] = start_bitrate_kbps;
+ codecs[0].params[cricket::kCodecParamMaxBitrate] = max_bitrate_kbps;
+ EXPECT_CALL(*call_.GetMockTransportControllerSend(),
+ SetSdpBitrateParameters(
+ AllOf(Field(&BitrateConstraints::min_bitrate_bps,
+ expected_min_bitrate_bps),
+ Field(&BitrateConstraints::start_bitrate_bps,
+ expected_start_bitrate_bps),
+ Field(&BitrateConstraints::max_bitrate_bps,
+ expected_max_bitrate_bps))));
+
+ SetSenderParameters(send_parameters_);
+ }
+
+ void TestSetSendRtpHeaderExtensions(const std::string& ext) {
+ EXPECT_TRUE(SetupSendStream());
+
+ // Ensure extensions are off by default.
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrcX).rtp.extensions.size());
+
+ // Ensure unknown extensions won't cause an error.
+ send_parameters_.extensions.push_back(
+ webrtc::RtpExtension("urn:ietf:params:unknownextention", 1));
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrcX).rtp.extensions.size());
+
+ // Ensure extensions stay off with an empty list of headers.
+ send_parameters_.extensions.clear();
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrcX).rtp.extensions.size());
+
+ // Ensure extension is set properly.
+ const int id = 1;
+ send_parameters_.extensions.push_back(webrtc::RtpExtension(ext, id));
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(1u, GetSendStreamConfig(kSsrcX).rtp.extensions.size());
+ EXPECT_EQ(ext, GetSendStreamConfig(kSsrcX).rtp.extensions[0].uri);
+ EXPECT_EQ(id, GetSendStreamConfig(kSsrcX).rtp.extensions[0].id);
+
+ // Ensure extension is set properly on new stream.
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcY)));
+ EXPECT_NE(call_.GetAudioSendStream(kSsrcX),
+ call_.GetAudioSendStream(kSsrcY));
+ EXPECT_EQ(1u, GetSendStreamConfig(kSsrcY).rtp.extensions.size());
+ EXPECT_EQ(ext, GetSendStreamConfig(kSsrcY).rtp.extensions[0].uri);
+ EXPECT_EQ(id, GetSendStreamConfig(kSsrcY).rtp.extensions[0].id);
+
+ // Ensure all extensions go back off with an empty list.
+ send_parameters_.codecs.push_back(kPcmuCodec);
+ send_parameters_.extensions.clear();
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrcX).rtp.extensions.size());
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrcY).rtp.extensions.size());
+ }
+
+ void TestSetRecvRtpHeaderExtensions(const std::string& ext) {
+ EXPECT_TRUE(SetupRecvStream());
+
+ // Ensure extensions are off by default.
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(kSsrcX).header_extensions,
+ IsEmpty());
+
+ // Ensure unknown extensions won't cause an error.
+ recv_parameters_.extensions.push_back(
+ webrtc::RtpExtension("urn:ietf:params:unknownextention", 1));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(kSsrcX).header_extensions,
+ IsEmpty());
+
+ // Ensure extensions stay off with an empty list of headers.
+ recv_parameters_.extensions.clear();
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(kSsrcX).header_extensions,
+ IsEmpty());
+
+ // Ensure extension is set properly.
+ const int id = 2;
+ recv_parameters_.extensions.push_back(webrtc::RtpExtension(ext, id));
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ EXPECT_EQ(
+ receive_channel_->GetRtpReceiverParameters(kSsrcX).header_extensions,
+ recv_parameters_.extensions);
+
+ // Ensure extension is set properly on new stream.
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ EXPECT_EQ(
+ receive_channel_->GetRtpReceiverParameters(kSsrcY).header_extensions,
+ recv_parameters_.extensions);
+
+ // Ensure all extensions go back off with an empty list.
+ recv_parameters_.extensions.clear();
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(kSsrcX).header_extensions,
+ IsEmpty());
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(kSsrcY).header_extensions,
+ IsEmpty());
+ }
+
+ webrtc::AudioSendStream::Stats GetAudioSendStreamStats() const {
+ webrtc::AudioSendStream::Stats stats;
+ stats.local_ssrc = 12;
+ stats.payload_bytes_sent = 345;
+ stats.header_and_padding_bytes_sent = 56;
+ stats.packets_sent = 678;
+ stats.packets_lost = 9012;
+ stats.fraction_lost = 34.56f;
+ stats.codec_name = "codec_name_send";
+ stats.codec_payload_type = 0;
+ stats.jitter_ms = 12;
+ stats.rtt_ms = 345;
+ stats.audio_level = 678;
+ stats.apm_statistics.delay_median_ms = 234;
+ stats.apm_statistics.delay_standard_deviation_ms = 567;
+ stats.apm_statistics.echo_return_loss = 890;
+ stats.apm_statistics.echo_return_loss_enhancement = 1234;
+ stats.apm_statistics.residual_echo_likelihood = 0.432f;
+ stats.apm_statistics.residual_echo_likelihood_recent_max = 0.6f;
+ stats.ana_statistics.bitrate_action_counter = 321;
+ stats.ana_statistics.channel_action_counter = 432;
+ stats.ana_statistics.dtx_action_counter = 543;
+ stats.ana_statistics.fec_action_counter = 654;
+ stats.ana_statistics.frame_length_increase_counter = 765;
+ stats.ana_statistics.frame_length_decrease_counter = 876;
+ stats.ana_statistics.uplink_packet_loss_fraction = 987.0;
+ return stats;
+ }
+ void SetAudioSendStreamStats() {
+ for (auto* s : call_.GetAudioSendStreams()) {
+ s->SetStats(GetAudioSendStreamStats());
+ }
+ }
+ void VerifyVoiceSenderInfo(const cricket::VoiceSenderInfo& info,
+ bool is_sending) {
+ const auto stats = GetAudioSendStreamStats();
+ EXPECT_EQ(info.ssrc(), stats.local_ssrc);
+ EXPECT_EQ(info.payload_bytes_sent, stats.payload_bytes_sent);
+ EXPECT_EQ(info.header_and_padding_bytes_sent,
+ stats.header_and_padding_bytes_sent);
+ EXPECT_EQ(info.packets_sent, stats.packets_sent);
+ EXPECT_EQ(info.packets_lost, stats.packets_lost);
+ EXPECT_EQ(info.fraction_lost, stats.fraction_lost);
+ EXPECT_EQ(info.codec_name, stats.codec_name);
+ EXPECT_EQ(info.codec_payload_type, stats.codec_payload_type);
+ EXPECT_EQ(info.jitter_ms, stats.jitter_ms);
+ EXPECT_EQ(info.rtt_ms, stats.rtt_ms);
+ EXPECT_EQ(info.audio_level, stats.audio_level);
+ EXPECT_EQ(info.apm_statistics.delay_median_ms,
+ stats.apm_statistics.delay_median_ms);
+ EXPECT_EQ(info.apm_statistics.delay_standard_deviation_ms,
+ stats.apm_statistics.delay_standard_deviation_ms);
+ EXPECT_EQ(info.apm_statistics.echo_return_loss,
+ stats.apm_statistics.echo_return_loss);
+ EXPECT_EQ(info.apm_statistics.echo_return_loss_enhancement,
+ stats.apm_statistics.echo_return_loss_enhancement);
+ EXPECT_EQ(info.apm_statistics.residual_echo_likelihood,
+ stats.apm_statistics.residual_echo_likelihood);
+ EXPECT_EQ(info.apm_statistics.residual_echo_likelihood_recent_max,
+ stats.apm_statistics.residual_echo_likelihood_recent_max);
+ EXPECT_EQ(info.ana_statistics.bitrate_action_counter,
+ stats.ana_statistics.bitrate_action_counter);
+ EXPECT_EQ(info.ana_statistics.channel_action_counter,
+ stats.ana_statistics.channel_action_counter);
+ EXPECT_EQ(info.ana_statistics.dtx_action_counter,
+ stats.ana_statistics.dtx_action_counter);
+ EXPECT_EQ(info.ana_statistics.fec_action_counter,
+ stats.ana_statistics.fec_action_counter);
+ EXPECT_EQ(info.ana_statistics.frame_length_increase_counter,
+ stats.ana_statistics.frame_length_increase_counter);
+ EXPECT_EQ(info.ana_statistics.frame_length_decrease_counter,
+ stats.ana_statistics.frame_length_decrease_counter);
+ EXPECT_EQ(info.ana_statistics.uplink_packet_loss_fraction,
+ stats.ana_statistics.uplink_packet_loss_fraction);
+ }
+
+ webrtc::AudioReceiveStreamInterface::Stats GetAudioReceiveStreamStats()
+ const {
+ webrtc::AudioReceiveStreamInterface::Stats stats;
+ stats.remote_ssrc = 123;
+ stats.payload_bytes_received = 456;
+ stats.header_and_padding_bytes_received = 67;
+ stats.packets_received = 768;
+ stats.packets_lost = 101;
+ stats.codec_name = "codec_name_recv";
+ stats.codec_payload_type = 0;
+ stats.jitter_ms = 901;
+ stats.jitter_buffer_ms = 234;
+ stats.jitter_buffer_preferred_ms = 567;
+ stats.delay_estimate_ms = 890;
+ stats.audio_level = 1234;
+ stats.total_samples_received = 5678901;
+ stats.concealed_samples = 234;
+ stats.concealment_events = 12;
+ stats.jitter_buffer_delay_seconds = 34;
+ stats.jitter_buffer_emitted_count = 77;
+ stats.expand_rate = 5.67f;
+ stats.speech_expand_rate = 8.90f;
+ stats.secondary_decoded_rate = 1.23f;
+ stats.secondary_discarded_rate = 0.12f;
+ stats.accelerate_rate = 4.56f;
+ stats.preemptive_expand_rate = 7.89f;
+ stats.decoding_calls_to_silence_generator = 12;
+ stats.decoding_calls_to_neteq = 345;
+ stats.decoding_normal = 67890;
+ stats.decoding_plc = 1234;
+ stats.decoding_codec_plc = 1236;
+ stats.decoding_cng = 5678;
+ stats.decoding_plc_cng = 9012;
+ stats.decoding_muted_output = 3456;
+ stats.capture_start_ntp_time_ms = 7890;
+ return stats;
+ }
+ void SetAudioReceiveStreamStats() {
+ for (auto* s : call_.GetAudioReceiveStreams()) {
+ s->SetStats(GetAudioReceiveStreamStats());
+ }
+ }
+ void VerifyVoiceReceiverInfo(const cricket::VoiceReceiverInfo& info) {
+ const auto stats = GetAudioReceiveStreamStats();
+ EXPECT_EQ(info.ssrc(), stats.remote_ssrc);
+ EXPECT_EQ(info.payload_bytes_received, stats.payload_bytes_received);
+ EXPECT_EQ(info.header_and_padding_bytes_received,
+ stats.header_and_padding_bytes_received);
+ EXPECT_EQ(rtc::checked_cast<unsigned int>(info.packets_received),
+ stats.packets_received);
+ EXPECT_EQ(info.packets_lost, stats.packets_lost);
+ EXPECT_EQ(info.codec_name, stats.codec_name);
+ EXPECT_EQ(info.codec_payload_type, stats.codec_payload_type);
+ EXPECT_EQ(rtc::checked_cast<unsigned int>(info.jitter_ms), stats.jitter_ms);
+ EXPECT_EQ(rtc::checked_cast<unsigned int>(info.jitter_buffer_ms),
+ stats.jitter_buffer_ms);
+ EXPECT_EQ(rtc::checked_cast<unsigned int>(info.jitter_buffer_preferred_ms),
+ stats.jitter_buffer_preferred_ms);
+ EXPECT_EQ(rtc::checked_cast<unsigned int>(info.delay_estimate_ms),
+ stats.delay_estimate_ms);
+ EXPECT_EQ(info.audio_level, stats.audio_level);
+ EXPECT_EQ(info.total_samples_received, stats.total_samples_received);
+ EXPECT_EQ(info.concealed_samples, stats.concealed_samples);
+ EXPECT_EQ(info.concealment_events, stats.concealment_events);
+ EXPECT_EQ(info.jitter_buffer_delay_seconds,
+ stats.jitter_buffer_delay_seconds);
+ EXPECT_EQ(info.jitter_buffer_emitted_count,
+ stats.jitter_buffer_emitted_count);
+ EXPECT_EQ(info.expand_rate, stats.expand_rate);
+ EXPECT_EQ(info.speech_expand_rate, stats.speech_expand_rate);
+ EXPECT_EQ(info.secondary_decoded_rate, stats.secondary_decoded_rate);
+ EXPECT_EQ(info.secondary_discarded_rate, stats.secondary_discarded_rate);
+ EXPECT_EQ(info.accelerate_rate, stats.accelerate_rate);
+ EXPECT_EQ(info.preemptive_expand_rate, stats.preemptive_expand_rate);
+ EXPECT_EQ(info.decoding_calls_to_silence_generator,
+ stats.decoding_calls_to_silence_generator);
+ EXPECT_EQ(info.decoding_calls_to_neteq, stats.decoding_calls_to_neteq);
+ EXPECT_EQ(info.decoding_normal, stats.decoding_normal);
+ EXPECT_EQ(info.decoding_plc, stats.decoding_plc);
+ EXPECT_EQ(info.decoding_codec_plc, stats.decoding_codec_plc);
+ EXPECT_EQ(info.decoding_cng, stats.decoding_cng);
+ EXPECT_EQ(info.decoding_plc_cng, stats.decoding_plc_cng);
+ EXPECT_EQ(info.decoding_muted_output, stats.decoding_muted_output);
+ EXPECT_EQ(info.capture_start_ntp_time_ms, stats.capture_start_ntp_time_ms);
+ }
+ void VerifyVoiceSendRecvCodecs(
+ const cricket::VoiceMediaSendInfo& send_info,
+ const cricket::VoiceMediaReceiveInfo& receive_info) const {
+ EXPECT_EQ(send_parameters_.codecs.size(), send_info.send_codecs.size());
+ for (const cricket::AudioCodec& codec : send_parameters_.codecs) {
+ ASSERT_EQ(send_info.send_codecs.count(codec.id), 1U);
+ EXPECT_EQ(send_info.send_codecs.find(codec.id)->second,
+ codec.ToCodecParameters());
+ }
+ EXPECT_EQ(recv_parameters_.codecs.size(),
+ receive_info.receive_codecs.size());
+ for (const cricket::AudioCodec& codec : recv_parameters_.codecs) {
+ ASSERT_EQ(receive_info.receive_codecs.count(codec.id), 1U);
+ EXPECT_EQ(receive_info.receive_codecs.find(codec.id)->second,
+ codec.ToCodecParameters());
+ }
+ }
+
+ void VerifyGainControlEnabledCorrectly() {
+ EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ EXPECT_EQ(kDefaultAgcMode, apm_config_.gain_controller1.mode);
+ }
+
+ void VerifyGainControlDefaultSettings() {
+ EXPECT_EQ(3, apm_config_.gain_controller1.target_level_dbfs);
+ EXPECT_EQ(9, apm_config_.gain_controller1.compression_gain_db);
+ EXPECT_TRUE(apm_config_.gain_controller1.enable_limiter);
+ }
+
+ void VerifyEchoCancellationSettings(bool enabled) {
+ constexpr bool kDefaultUseAecm =
+#if defined(WEBRTC_ANDROID)
+ true;
+#else
+ false;
+#endif
+ EXPECT_EQ(apm_config_.echo_canceller.enabled, enabled);
+ EXPECT_EQ(apm_config_.echo_canceller.mobile_mode, kDefaultUseAecm);
+ }
+
+ bool IsHighPassFilterEnabled() {
+ return apm_config_.high_pass_filter.enabled;
+ }
+
+ cricket::WebRtcVoiceSendChannel* SendImplFromPointer(
+ cricket::VoiceMediaSendChannelInterface* channel) {
+ return static_cast<cricket::WebRtcVoiceSendChannel*>(channel);
+ }
+
+ cricket::WebRtcVoiceSendChannel* SendImpl() {
+ return SendImplFromPointer(send_channel_.get());
+ }
+ cricket::WebRtcVoiceReceiveChannel* ReceiveImpl() {
+ return static_cast<cricket::WebRtcVoiceReceiveChannel*>(
+ receive_channel_.get());
+ }
+
+ protected:
+ rtc::AutoThread main_thread_;
+ const bool use_null_apm_;
+ webrtc::test::ScopedKeyValueConfig field_trials_;
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory_;
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm_;
+ rtc::scoped_refptr<StrictMock<webrtc::test::MockAudioProcessing>> apm_;
+ cricket::FakeCall call_;
+ std::unique_ptr<cricket::WebRtcVoiceEngine> engine_;
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_channel_;
+ std::unique_ptr<cricket::VoiceMediaReceiveChannelInterface> receive_channel_;
+ cricket::AudioSenderParameter send_parameters_;
+ cricket::AudioReceiverParameters recv_parameters_;
+ FakeAudioSource fake_source_;
+ webrtc::AudioProcessing::Config apm_config_;
+};
+
+INSTANTIATE_TEST_SUITE_P(TestBothWithAndWithoutNullApm,
+ WebRtcVoiceEngineTestFake,
+ ::testing::Values(false, true));
+
+// Tests that we can create and destroy a channel.
+TEST_P(WebRtcVoiceEngineTestFake, CreateMediaChannel) {
+ EXPECT_TRUE(SetupChannel());
+}
+
+// Test that we can add a send stream and that it has the correct defaults.
+TEST_P(WebRtcVoiceEngineTestFake, CreateSendStream) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+ const webrtc::AudioSendStream::Config& config = GetSendStreamConfig(kSsrcX);
+ EXPECT_EQ(kSsrcX, config.rtp.ssrc);
+ EXPECT_EQ("", config.rtp.c_name);
+ EXPECT_EQ(0u, config.rtp.extensions.size());
+ EXPECT_EQ(SendImpl()->transport(), config.send_transport);
+}
+
+// Test that we can add a receive stream and that it has the correct defaults.
+TEST_P(WebRtcVoiceEngineTestFake, CreateRecvStream) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_TRUE(AddRecvStream(kSsrcX));
+ const webrtc::AudioReceiveStreamInterface::Config& config =
+ GetRecvStreamConfig(kSsrcX);
+ EXPECT_EQ(kSsrcX, config.rtp.remote_ssrc);
+ EXPECT_EQ(0xFA17FA17, config.rtp.local_ssrc);
+ EXPECT_EQ(ReceiveImpl()->transport(), config.rtcp_send_transport);
+ EXPECT_EQ("", config.sync_group);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, OpusSupportsTransportCc) {
+ const std::vector<cricket::AudioCodec>& codecs = engine_->send_codecs();
+ bool opus_found = false;
+ for (const cricket::AudioCodec& codec : codecs) {
+ if (codec.name == "opus") {
+ EXPECT_TRUE(HasTransportCc(codec));
+ opus_found = true;
+ }
+ }
+ EXPECT_TRUE(opus_found);
+}
+
+// Test that we set our inbound codecs properly, including changing PT.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kTelephoneEventCodec1);
+ parameters.codecs.push_back(kTelephoneEventCodec2);
+ parameters.codecs[0].id = 106; // collide with existing CN 32k
+ parameters.codecs[2].id = 126;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_TRUE(AddRecvStream(kSsrcX));
+ EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
+ (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
+ {{0, {"PCMU", 8000, 1}},
+ {106, {"OPUS", 48000, 2}},
+ {126, {"telephone-event", 8000, 1}},
+ {107, {"telephone-event", 32000, 1}}})));
+}
+
+// Test that we fail to set an unknown inbound codec.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(cricket::CreateAudioCodec(127, "XYZ", 32000, 1));
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+// Test that we fail if we have duplicate types in the inbound list.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ parameters.codecs[1].id = kOpusCodec.id;
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+// Test that we can decode OPUS without stereo parameters.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kOpusCodec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_TRUE(AddRecvStream(kSsrcX));
+ EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
+ (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
+ {{0, {"PCMU", 8000, 1}}, {111, {"opus", 48000, 2}}})));
+}
+
+// Test that we can decode OPUS with stereo = 0.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[1].params["stereo"] = "0";
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_TRUE(AddRecvStream(kSsrcX));
+ EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
+ (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
+ {{0, {"PCMU", 8000, 1}},
+ {111, {"opus", 48000, 2, {{"stereo", "0"}}}}})));
+}
+
+// Test that we can decode OPUS with stereo = 1.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[1].params["stereo"] = "1";
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_TRUE(AddRecvStream(kSsrcX));
+ EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
+ (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
+ {{0, {"PCMU", 8000, 1}},
+ {111, {"opus", 48000, 2, {{"stereo", "1"}}}}})));
+}
+
+// Test that changes to recv codecs are applied to all streams.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kTelephoneEventCodec1);
+ parameters.codecs.push_back(kTelephoneEventCodec2);
+ parameters.codecs[0].id = 106; // collide with existing CN 32k
+ parameters.codecs[2].id = 126;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ for (const auto& ssrc : {kSsrcX, kSsrcY}) {
+ EXPECT_TRUE(AddRecvStream(ssrc));
+ EXPECT_THAT(GetRecvStreamConfig(ssrc).decoder_map,
+ (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
+ {{0, {"PCMU", 8000, 1}},
+ {106, {"OPUS", 48000, 2}},
+ {126, {"telephone-event", 8000, 1}},
+ {107, {"telephone-event", 32000, 1}}})));
+ }
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].id = 106; // collide with existing CN 32k
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ const auto& dm = GetRecvStreamConfig(kSsrcX).decoder_map;
+ ASSERT_EQ(1u, dm.count(106));
+ EXPECT_EQ(webrtc::SdpAudioFormat("opus", 48000, 2), dm.at(106));
+}
+
+// Test that we can apply the same set of codecs again while playing.
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ receive_channel_->SetPlayout(true);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ // Remapping a payload type to a different codec should fail.
+ parameters.codecs[0] = kOpusCodec;
+ parameters.codecs[0].id = kPcmuCodec.id;
+ EXPECT_FALSE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_TRUE(GetRecvStream(kSsrcX).started());
+}
+
+// Test that we can add a codec while playing.
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ receive_channel_->SetPlayout(true);
+
+ parameters.codecs.push_back(kOpusCodec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_TRUE(GetRecvStream(kSsrcX).started());
+}
+
+// Test that we accept adding the same codec with a different payload type.
+// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5847
+TEST_P(WebRtcVoiceEngineTestFake, ChangeRecvCodecPayloadType) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ ++parameters.codecs[0].id;
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+}
+
+// Test that we do allow setting Opus/Red by default.
+TEST_P(WebRtcVoiceEngineTestFake, RecvRedDefault) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kRed48000Codec);
+ parameters.codecs[1].params[""] = "111/111";
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
+ (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
+ {{111, {"opus", 48000, 2}},
+ {112, {"red", 48000, 2, {{"", "111/111"}}}}})));
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetSendBandwidthAuto) {
+ EXPECT_TRUE(SetupSendStream());
+
+ // Test that when autobw is enabled, bitrate is kept as the default
+ // value. autobw is enabled for the following tests because the target
+ // bitrate is <= 0.
+
+ // PCMU, default bitrate == 64000.
+ TestMaxSendBandwidth(kPcmuCodec, -1, true, 64000);
+
+ // opus, default bitrate == 32000 in mono.
+ TestMaxSendBandwidth(kOpusCodec, -1, true, 32000);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCaller) {
+ EXPECT_TRUE(SetupSendStream());
+
+ // opus, default bitrate == 64000.
+ TestMaxSendBandwidth(kOpusCodec, 96000, true, 96000);
+ TestMaxSendBandwidth(kOpusCodec, 48000, true, 48000);
+ // Rates above the max (510000) should be capped.
+ TestMaxSendBandwidth(kOpusCodec, 600000, true, 510000);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthFixedRateAsCaller) {
+ EXPECT_TRUE(SetupSendStream());
+
+ // Test that we can only set a maximum bitrate for a fixed-rate codec
+ // if it's bigger than the fixed rate.
+
+ // PCMU, fixed bitrate == 64000.
+ TestMaxSendBandwidth(kPcmuCodec, 0, true, 64000);
+ TestMaxSendBandwidth(kPcmuCodec, 1, false, 64000);
+ TestMaxSendBandwidth(kPcmuCodec, 128000, true, 64000);
+ TestMaxSendBandwidth(kPcmuCodec, 32000, false, 64000);
+ TestMaxSendBandwidth(kPcmuCodec, 64000, true, 64000);
+ TestMaxSendBandwidth(kPcmuCodec, 63999, false, 64000);
+ TestMaxSendBandwidth(kPcmuCodec, 64001, true, 64000);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCallee) {
+ EXPECT_TRUE(SetupChannel());
+ const int kDesiredBitrate = 128000;
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs = engine_->send_codecs();
+ parameters.max_bandwidth_bps = kDesiredBitrate;
+ SetSenderParameters(parameters);
+
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+
+ EXPECT_EQ(kDesiredBitrate, GetCodecBitrate(kSsrcX));
+}
+
+// Test that bitrate cannot be set for CBR codecs.
+// Bitrate is ignored if it is higher than the fixed bitrate.
+// Bitrate less then the fixed bitrate is an error.
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthCbr) {
+ EXPECT_TRUE(SetupSendStream());
+
+ // PCMU, default bitrate == 64000.
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(64000, GetCodecBitrate(kSsrcX));
+
+ send_parameters_.max_bandwidth_bps = 128000;
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(64000, GetCodecBitrate(kSsrcX));
+
+ send_parameters_.max_bandwidth_bps = 128;
+ EXPECT_FALSE(send_channel_->SetSenderParameters(send_parameters_));
+ EXPECT_EQ(64000, GetCodecBitrate(kSsrcX));
+}
+
+// Test that the per-stream bitrate limit and the global
+// bitrate limit both apply.
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxBitratePerStream) {
+ EXPECT_TRUE(SetupSendStream());
+
+ // opus, default bitrate == 32000.
+ SetAndExpectMaxBitrate(kOpusCodec, 0, 0, true, 32000);
+ SetAndExpectMaxBitrate(kOpusCodec, 48000, 0, true, 48000);
+ SetAndExpectMaxBitrate(kOpusCodec, 48000, 64000, true, 48000);
+ SetAndExpectMaxBitrate(kOpusCodec, 64000, 48000, true, 48000);
+
+ // CBR codecs allow both maximums to exceed the bitrate.
+ SetAndExpectMaxBitrate(kPcmuCodec, 0, 0, true, 64000);
+ SetAndExpectMaxBitrate(kPcmuCodec, 64001, 0, true, 64000);
+ SetAndExpectMaxBitrate(kPcmuCodec, 0, 64001, true, 64000);
+ SetAndExpectMaxBitrate(kPcmuCodec, 64001, 64001, true, 64000);
+
+ // CBR codecs don't allow per stream maximums to be too low.
+ SetAndExpectMaxBitrate(kPcmuCodec, 0, 63999, false, 64000);
+ SetAndExpectMaxBitrate(kPcmuCodec, 64001, 63999, false, 64000);
+}
+
+// Test that an attempt to set RtpParameters for a stream that does not exist
+// fails.
+TEST_P(WebRtcVoiceEngineTestFake, CannotSetMaxBitrateForNonexistentStream) {
+ EXPECT_TRUE(SetupChannel());
+ webrtc::RtpParameters nonexistent_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ EXPECT_EQ(0u, nonexistent_parameters.encodings.size());
+
+ nonexistent_parameters.encodings.push_back(webrtc::RtpEncodingParameters());
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(kSsrcX, nonexistent_parameters).ok());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake,
+ CannotSetRtpSendParametersWithIncorrectNumberOfEncodings) {
+ // This test verifies that setting RtpParameters succeeds only if
+ // the structure contains exactly one encoding.
+ // TODO(skvlad): Update this test when we start supporting setting parameters
+ // for each encoding individually.
+
+ EXPECT_TRUE(SetupSendStream());
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ // Two or more encodings should result in failure.
+ parameters.encodings.push_back(webrtc::RtpEncodingParameters());
+ EXPECT_FALSE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+ // Zero encodings should also fail.
+ parameters.encodings.clear();
+ EXPECT_FALSE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+}
+
+// Changing the SSRC through RtpParameters is not allowed.
+TEST_P(WebRtcVoiceEngineTestFake, CannotSetSsrcInRtpSendParameters) {
+ EXPECT_TRUE(SetupSendStream());
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ parameters.encodings[0].ssrc = 0xdeadbeef;
+ EXPECT_FALSE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+}
+
+// Test that a stream will not be sending if its encoding is made
+// inactive through SetRtpSendParameters.
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpParametersEncodingsActive) {
+ EXPECT_TRUE(SetupSendStream());
+ SetSend(true);
+ EXPECT_TRUE(GetSendStream(kSsrcX).IsSending());
+ // Get current parameters and change "active" to false.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ ASSERT_EQ(1u, parameters.encodings.size());
+ ASSERT_TRUE(parameters.encodings[0].active);
+ parameters.encodings[0].active = false;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+ EXPECT_FALSE(GetSendStream(kSsrcX).IsSending());
+
+ // Now change it back to active and verify we resume sending.
+ // This should occur even when other parameters are updated.
+ parameters.encodings[0].active = true;
+ parameters.encodings[0].max_bitrate_bps = absl::optional<int>(6000);
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+ EXPECT_TRUE(GetSendStream(kSsrcX).IsSending());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpParametersAdaptivePtime) {
+ EXPECT_TRUE(SetupSendStream());
+ // Get current parameters and change "adaptive_ptime" to true.
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ ASSERT_EQ(1u, parameters.encodings.size());
+ ASSERT_FALSE(parameters.encodings[0].adaptive_ptime);
+ parameters.encodings[0].adaptive_ptime = true;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+ EXPECT_TRUE(GetAudioNetworkAdaptorConfig(kSsrcX));
+ EXPECT_EQ(16000, GetSendStreamConfig(kSsrcX).min_bitrate_bps);
+
+ parameters.encodings[0].adaptive_ptime = false;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+ EXPECT_FALSE(GetAudioNetworkAdaptorConfig(kSsrcX));
+ EXPECT_EQ(32000, GetSendStreamConfig(kSsrcX).min_bitrate_bps);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake,
+ DisablingAdaptivePtimeDoesNotRemoveAudioNetworkAdaptorFromOptions) {
+ EXPECT_TRUE(SetupSendStream());
+ send_parameters_.options.audio_network_adaptor = true;
+ send_parameters_.options.audio_network_adaptor_config = {"1234"};
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(send_parameters_.options.audio_network_adaptor_config,
+ GetAudioNetworkAdaptorConfig(kSsrcX));
+
+ webrtc::RtpParameters parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ parameters.encodings[0].adaptive_ptime = false;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, parameters).ok());
+ EXPECT_EQ(send_parameters_.options.audio_network_adaptor_config,
+ GetAudioNetworkAdaptorConfig(kSsrcX));
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, AdaptivePtimeFieldTrial) {
+ webrtc::test::ScopedKeyValueConfig override_field_trials(
+ field_trials_, "WebRTC-Audio-AdaptivePtime/enabled:true/");
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(GetAudioNetworkAdaptorConfig(kSsrcX));
+}
+
+// Test that SetRtpSendParameters configures the correct encoding channel for
+// each SSRC.
+TEST_P(WebRtcVoiceEngineTestFake, RtpParametersArePerStream) {
+ SetupForMultiSendStream();
+ // Create send streams.
+ for (uint32_t ssrc : kSsrcs4) {
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(ssrc)));
+ }
+ // Configure one stream to be limited by the stream config, another to be
+ // limited by the global max, and the third one with no per-stream limit
+ // (still subject to the global limit).
+ SetGlobalMaxBitrate(kOpusCodec, 32000);
+ EXPECT_TRUE(SetMaxBitrateForStream(kSsrcs4[0], 24000));
+ EXPECT_TRUE(SetMaxBitrateForStream(kSsrcs4[1], 48000));
+ EXPECT_TRUE(SetMaxBitrateForStream(kSsrcs4[2], -1));
+
+ EXPECT_EQ(24000, GetCodecBitrate(kSsrcs4[0]));
+ EXPECT_EQ(32000, GetCodecBitrate(kSsrcs4[1]));
+ EXPECT_EQ(32000, GetCodecBitrate(kSsrcs4[2]));
+
+ // Remove the global cap; the streams should switch to their respective
+ // maximums (or remain unchanged if there was no other limit on them.)
+ SetGlobalMaxBitrate(kOpusCodec, -1);
+ EXPECT_EQ(24000, GetCodecBitrate(kSsrcs4[0]));
+ EXPECT_EQ(48000, GetCodecBitrate(kSsrcs4[1]));
+ EXPECT_EQ(32000, GetCodecBitrate(kSsrcs4[2]));
+}
+
+// Test that GetRtpSendParameters returns the currently configured codecs.
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersCodecs) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ SetSenderParameters(parameters);
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ ASSERT_EQ(2u, rtp_parameters.codecs.size());
+ EXPECT_EQ(kOpusCodec.ToCodecParameters(), rtp_parameters.codecs[0]);
+ EXPECT_EQ(kPcmuCodec.ToCodecParameters(), rtp_parameters.codecs[1]);
+}
+
+// Test that GetRtpSendParameters returns the currently configured RTCP CNAME.
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersRtcpCname) {
+ cricket::StreamParams params = cricket::StreamParams::CreateLegacy(kSsrcX);
+ params.cname = "rtcpcname";
+ EXPECT_TRUE(SetupSendStream(params));
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ EXPECT_STREQ("rtcpcname", rtp_parameters.rtcp.cname.c_str());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake,
+ DetectRtpSendParameterHeaderExtensionsChange) {
+ EXPECT_TRUE(SetupSendStream());
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ rtp_parameters.header_extensions.emplace_back();
+
+ EXPECT_NE(0u, rtp_parameters.header_extensions.size());
+
+ webrtc::RTCError result =
+ send_channel_->SetRtpSendParameters(kSsrcX, rtp_parameters);
+ EXPECT_EQ(webrtc::RTCErrorType::INVALID_MODIFICATION, result.type());
+}
+
+// Test that GetRtpSendParameters returns an SSRC.
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersSsrc) {
+ EXPECT_TRUE(SetupSendStream());
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_EQ(kSsrcX, rtp_parameters.encodings[0].ssrc);
+}
+
+// Test that if we set/get parameters multiple times, we get the same results.
+TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpSendParameters) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ SetSenderParameters(parameters);
+
+ webrtc::RtpParameters initial_params =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+
+ // We should be able to set the params we just got.
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, initial_params).ok());
+
+ // ... And this shouldn't change the params returned by GetRtpSendParameters.
+ webrtc::RtpParameters new_params =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ EXPECT_EQ(initial_params, send_channel_->GetRtpSendParameters(kSsrcX));
+}
+
+// Test that we remove the codec from RTP parameters if it's not negotiated
+// anymore.
+TEST_P(WebRtcVoiceEngineTestFake,
+ SetSendParametersRemovesSelectedCodecFromRtpParameters) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ SetSenderParameters(parameters);
+
+ webrtc::RtpParameters initial_params =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+
+ webrtc::RtpCodec opus_rtp_codec;
+ opus_rtp_codec.name = "opus";
+ opus_rtp_codec.kind = cricket::MEDIA_TYPE_AUDIO;
+ opus_rtp_codec.num_channels = 2;
+ opus_rtp_codec.clock_rate = 48000;
+ initial_params.encodings[0].codec = opus_rtp_codec;
+
+ // We should be able to set the params with the opus codec that has been
+ // negotiated.
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, initial_params).ok());
+
+ parameters.codecs.clear();
+ parameters.codecs.push_back(kPcmuCodec);
+ SetSenderParameters(parameters);
+
+ // Since Opus is no longer negotiated, the RTP parameters should not have a
+ // forced codec anymore.
+ webrtc::RtpParameters new_params =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ EXPECT_EQ(new_params.encodings[0].codec, absl::nullopt);
+}
+
+// Test that max_bitrate_bps in send stream config gets updated correctly when
+// SetRtpSendParameters is called.
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesMaxBitrate) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter send_parameters;
+ send_parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(send_parameters);
+
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ // Expect empty on parameters.encodings[0].max_bitrate_bps;
+ EXPECT_FALSE(rtp_parameters.encodings[0].max_bitrate_bps);
+
+ constexpr int kMaxBitrateBps = 6000;
+ rtp_parameters.encodings[0].max_bitrate_bps = kMaxBitrateBps;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, rtp_parameters).ok());
+
+ const int max_bitrate = GetSendStreamConfig(kSsrcX).max_bitrate_bps;
+ EXPECT_EQ(max_bitrate, kMaxBitrateBps);
+}
+
+// Tests that when RTCRtpEncodingParameters.bitrate_priority gets set to
+// a value <= 0, setting the parameters returns false.
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpSendParameterInvalidBitratePriority) {
+ EXPECT_TRUE(SetupSendStream());
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ EXPECT_EQ(webrtc::kDefaultBitratePriority,
+ rtp_parameters.encodings[0].bitrate_priority);
+
+ rtp_parameters.encodings[0].bitrate_priority = 0;
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(kSsrcX, rtp_parameters).ok());
+ rtp_parameters.encodings[0].bitrate_priority = -1.0;
+ EXPECT_FALSE(
+ send_channel_->SetRtpSendParameters(kSsrcX, rtp_parameters).ok());
+}
+
+// Test that the bitrate_priority in the send stream config gets updated when
+// SetRtpSendParameters is set for the VoiceMediaChannel.
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesBitratePriority) {
+ EXPECT_TRUE(SetupSendStream());
+ webrtc::RtpParameters rtp_parameters =
+ send_channel_->GetRtpSendParameters(kSsrcX);
+
+ EXPECT_EQ(1UL, rtp_parameters.encodings.size());
+ EXPECT_EQ(webrtc::kDefaultBitratePriority,
+ rtp_parameters.encodings[0].bitrate_priority);
+ double new_bitrate_priority = 2.0;
+ rtp_parameters.encodings[0].bitrate_priority = new_bitrate_priority;
+ EXPECT_TRUE(send_channel_->SetRtpSendParameters(kSsrcX, rtp_parameters).ok());
+
+ // The priority should get set for both the audio channel's rtp parameters
+ // and the audio send stream's audio config.
+ EXPECT_EQ(new_bitrate_priority, send_channel_->GetRtpSendParameters(kSsrcX)
+ .encodings[0]
+ .bitrate_priority);
+ EXPECT_EQ(new_bitrate_priority, GetSendStreamConfig(kSsrcX).bitrate_priority);
+}
+
+// Test that GetRtpReceiverParameters returns the currently configured codecs.
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersCodecs) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ webrtc::RtpParameters rtp_parameters =
+ receive_channel_->GetRtpReceiverParameters(kSsrcX);
+ ASSERT_EQ(2u, rtp_parameters.codecs.size());
+ EXPECT_EQ(kOpusCodec.ToCodecParameters(), rtp_parameters.codecs[0]);
+ EXPECT_EQ(kPcmuCodec.ToCodecParameters(), rtp_parameters.codecs[1]);
+}
+
+// Test that GetRtpReceiverParameters returns an SSRC.
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersSsrc) {
+ EXPECT_TRUE(SetupRecvStream());
+ webrtc::RtpParameters rtp_parameters =
+ receive_channel_->GetRtpReceiverParameters(kSsrcX);
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_EQ(kSsrcX, rtp_parameters.encodings[0].ssrc);
+}
+
+// Test that if we set/get parameters multiple times, we get the same results.
+TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpReceiveParameters) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ webrtc::RtpParameters initial_params =
+ receive_channel_->GetRtpReceiverParameters(kSsrcX);
+
+ // ... And this shouldn't change the params returned by
+ // GetRtpReceiverParameters.
+ webrtc::RtpParameters new_params =
+ receive_channel_->GetRtpReceiverParameters(kSsrcX);
+ EXPECT_EQ(initial_params, receive_channel_->GetRtpReceiverParameters(kSsrcX));
+}
+
+// Test that GetRtpReceiverParameters returns parameters correctly when SSRCs
+// aren't signaled. It should return an empty "RtpEncodingParameters" when
+// configured to receive an unsignaled stream and no packets have been received
+// yet, and start returning the SSRC once a packet has been received.
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersWithUnsignaledSsrc) {
+ ASSERT_TRUE(SetupChannel());
+ // Call necessary methods to configure receiving a default stream as
+ // soon as it arrives.
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+
+ // Call GetDefaultRtpReceiveParameters before configured to receive an
+ // unsignaled stream. Should return nothing.
+ EXPECT_EQ(webrtc::RtpParameters(),
+ receive_channel_->GetDefaultRtpReceiveParameters());
+
+ // Set a sink for an unsignaled stream.
+ std::unique_ptr<FakeAudioSink> fake_sink(new FakeAudioSink());
+ receive_channel_->SetDefaultRawAudioSink(std::move(fake_sink));
+
+ // Call GetDefaultRtpReceiveParameters before the SSRC is known.
+ webrtc::RtpParameters rtp_parameters =
+ receive_channel_->GetDefaultRtpReceiveParameters();
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_FALSE(rtp_parameters.encodings[0].ssrc);
+
+ // Receive PCMU packet (SSRC=1).
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+
+ // The `ssrc` member should still be unset.
+ rtp_parameters = receive_channel_->GetDefaultRtpReceiveParameters();
+ ASSERT_EQ(1u, rtp_parameters.encodings.size());
+ EXPECT_FALSE(rtp_parameters.encodings[0].ssrc);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, OnPacketReceivedIdentifiesExtensions) {
+ ASSERT_TRUE(SetupChannel());
+ cricket::AudioReceiverParameters parameters = recv_parameters_;
+ parameters.extensions.push_back(
+ RtpExtension(RtpExtension::kAudioLevelUri, /*id=*/1));
+ ASSERT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ webrtc::RtpHeaderExtensionMap extension_map(parameters.extensions);
+ webrtc::RtpPacketReceived reference_packet(&extension_map);
+ constexpr uint8_t kAudioLevel = 123;
+ reference_packet.SetExtension<webrtc::AudioLevel>(/*voice_activity=*/true,
+ kAudioLevel);
+ // Create a packet without the extension map but with the same content.
+ webrtc::RtpPacketReceived received_packet;
+ ASSERT_TRUE(received_packet.Parse(reference_packet.Buffer()));
+
+ receive_channel_->OnPacketReceived(received_packet);
+ rtc::Thread::Current()->ProcessMessages(0);
+
+ bool voice_activity;
+ uint8_t audio_level;
+ EXPECT_TRUE(call_.last_received_rtp_packet().GetExtension<webrtc::AudioLevel>(
+ &voice_activity, &audio_level));
+ EXPECT_EQ(audio_level, kAudioLevel);
+}
+
+// Test that we apply codecs properly.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecs) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn8000Codec);
+ parameters.codecs[0].id = 96;
+ parameters.codecs[0].bitrate = 22000;
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(96, send_codec_spec.payload_type);
+ EXPECT_EQ(22000, send_codec_spec.target_bitrate_bps);
+ EXPECT_STRCASEEQ("OPUS", send_codec_spec.format.name.c_str());
+ EXPECT_NE(send_codec_spec.format.clockrate_hz, 8000);
+ EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type);
+ EXPECT_FALSE(send_channel_->CanInsertDtmf());
+}
+
+// Test that we use Opus/Red by default when it is
+// listed as the first codec and there is an fmtp line.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRed) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kRed48000Codec);
+ parameters.codecs[0].params[""] = "111/111";
+ parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(112, send_codec_spec.red_payload_type);
+}
+
+// Test that we do not use Opus/Red by default when it is
+// listed as the first codec but there is no fmtp line.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedNoFmtp) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kRed48000Codec);
+ parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type);
+}
+
+// Test that we do not use Opus/Red by default.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedDefault) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kRed48000Codec);
+ parameters.codecs[1].params[""] = "111/111";
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type);
+}
+
+// Test that the RED fmtp line must match the payload type.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedFmtpMismatch) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kRed48000Codec);
+ parameters.codecs[0].params[""] = "8/8";
+ parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type);
+}
+
+// Test that the RED fmtp line must show 2..32 payloads.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedFmtpAmountOfRedundancy) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kRed48000Codec);
+ parameters.codecs[0].params[""] = "111";
+ parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type);
+ for (int i = 1; i < 32; i++) {
+ parameters.codecs[0].params[""] += "/111";
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec2 = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, send_codec_spec2.payload_type);
+ EXPECT_STRCASEEQ("opus", send_codec_spec2.format.name.c_str());
+ EXPECT_EQ(112, send_codec_spec2.red_payload_type);
+ }
+ parameters.codecs[0].params[""] += "/111";
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec3 = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, send_codec_spec3.payload_type);
+ EXPECT_STRCASEEQ("opus", send_codec_spec3.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec3.red_payload_type);
+}
+
+// Test that WebRtcVoiceEngine reconfigures, rather than recreates its
+// AudioSendStream.
+TEST_P(WebRtcVoiceEngineTestFake, DontRecreateSendStream) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn8000Codec);
+ parameters.codecs[0].id = 96;
+ parameters.codecs[0].bitrate = 48000;
+ const int initial_num = call_.GetNumCreatedSendStreams();
+ SetSenderParameters(parameters);
+ EXPECT_EQ(initial_num, call_.GetNumCreatedSendStreams());
+ // Calling SetSendCodec again with same codec which is already set.
+ // In this case media channel shouldn't send codec to VoE.
+ SetSenderParameters(parameters);
+ EXPECT_EQ(initial_num, call_.GetNumCreatedSendStreams());
+}
+
+// TODO(ossu): Revisit if these tests need to be here, now that these kinds of
+// tests should be available in AudioEncoderOpusTest.
+
+// Test that if clockrate is not 48000 for opus, we fail.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].clockrate = 50000;
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that if channels=0 for opus, we fail.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].channels = 0;
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that if channels=0 for opus, we fail.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].channels = 0;
+ parameters.codecs[0].params["stereo"] = "1";
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that if channel is 1 for opus and there's no stereo, we fail.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].channels = 1;
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that if channel is 1 for opus and stereo=0, we fail.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].channels = 1;
+ parameters.codecs[0].params["stereo"] = "0";
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that if channel is 1 for opus and stereo=1, we fail.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].channels = 1;
+ parameters.codecs[0].params["stereo"] = "1";
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that with bitrate=0 and no stereo, bitrate is 32000.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0BitrateNoStereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 32000);
+}
+
+// Test that with bitrate=0 and stereo=0, bitrate is 32000.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate0Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].params["stereo"] = "0";
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 32000);
+}
+
+// Test that with bitrate=invalid and stereo=0, bitrate is 32000.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate0Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].params["stereo"] = "0";
+ // bitrate that's out of the range between 6000 and 510000 will be clamped.
+ parameters.codecs[0].bitrate = 5999;
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 6000);
+
+ parameters.codecs[0].bitrate = 510001;
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 510000);
+}
+
+// Test that with bitrate=0 and stereo=1, bitrate is 64000.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate1Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 0;
+ parameters.codecs[0].params["stereo"] = "1";
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 64000);
+}
+
+// Test that with bitrate=invalid and stereo=1, bitrate is 64000.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate1Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].params["stereo"] = "1";
+ // bitrate that's out of the range between 6000 and 510000 will be clamped.
+ parameters.codecs[0].bitrate = 5999;
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 6000);
+
+ parameters.codecs[0].bitrate = 510001;
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 510000);
+}
+
+// Test that with bitrate=N and stereo unset, bitrate is N.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoStereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 96000;
+ SetSenderParameters(parameters);
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, spec.payload_type);
+ EXPECT_EQ(96000, spec.target_bitrate_bps);
+ EXPECT_EQ("opus", spec.format.name);
+ EXPECT_EQ(2u, spec.format.num_channels);
+ EXPECT_EQ(48000, spec.format.clockrate_hz);
+}
+
+// Test that with bitrate=N and stereo=0, bitrate is N.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate0Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 30000;
+ parameters.codecs[0].params["stereo"] = "0";
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 30000);
+}
+
+// Test that with bitrate=N and without any parameters, bitrate is N.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoParameters) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 30000;
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 30000);
+}
+
+// Test that with bitrate=N and stereo=1, bitrate is N.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate1Stereo) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].bitrate = 30000;
+ parameters.codecs[0].params["stereo"] = "1";
+ SetSenderParameters(parameters);
+ CheckSendCodecBitrate(kSsrcX, "opus", 30000);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsWithBitrates) {
+ SetSendCodecsShouldWorkForBitrates("100", 100000, "150", 150000, "200",
+ 200000);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsWithHighMaxBitrate) {
+ SetSendCodecsShouldWorkForBitrates("", 0, "", -1, "10000", 10000000);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake,
+ SetSendCodecsWithoutBitratesUsesCorrectDefaults) {
+ SetSendCodecsShouldWorkForBitrates("", 0, "", -1, "", -1);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCapsMinAndStartBitrate) {
+ SetSendCodecsShouldWorkForBitrates("-1", 0, "-100", -1, "", -1);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthForAudioDoesntAffectBwe) {
+ SetSendCodecsShouldWorkForBitrates("100", 100000, "150", 150000, "200",
+ 200000);
+ send_parameters_.max_bandwidth_bps = 100000;
+ // Setting max bitrate should keep previous min bitrate
+ // Setting max bitrate should not reset start bitrate.
+ EXPECT_CALL(*call_.GetMockTransportControllerSend(),
+ SetSdpBitrateParameters(
+ AllOf(Field(&BitrateConstraints::min_bitrate_bps, 100000),
+ Field(&BitrateConstraints::start_bitrate_bps, -1),
+ Field(&BitrateConstraints::max_bitrate_bps, 200000))));
+ SetSenderParameters(send_parameters_);
+}
+
+// Test that we can enable NACK with opus as callee.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackAsCallee) {
+ EXPECT_TRUE(SetupRecvStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].AddFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
+ EXPECT_EQ(0, GetRecvStreamConfig(kSsrcX).rtp.nack.rtp_history_ms);
+ SetSenderParameters(parameters);
+ // NACK should be enabled even with no send stream.
+ EXPECT_EQ(kRtpHistoryMs, GetRecvStreamConfig(kSsrcX).rtp.nack.rtp_history_ms);
+
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+}
+
+// Test that we can enable NACK on receive streams.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackRecvStreams) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].AddFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
+ EXPECT_EQ(0, GetRecvStreamConfig(kSsrcY).rtp.nack.rtp_history_ms);
+ SetSenderParameters(parameters);
+ EXPECT_EQ(kRtpHistoryMs, GetRecvStreamConfig(kSsrcY).rtp.nack.rtp_history_ms);
+}
+
+// Test that we can disable NACK on receive streams.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecDisableNackRecvStreams) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].AddFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
+ SetSenderParameters(parameters);
+ EXPECT_EQ(kRtpHistoryMs, GetRecvStreamConfig(kSsrcY).rtp.nack.rtp_history_ms);
+
+ parameters.codecs.clear();
+ parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(parameters);
+ EXPECT_EQ(0, GetRecvStreamConfig(kSsrcY).rtp.nack.rtp_history_ms);
+}
+
+// Test that NACK is enabled on a new receive stream.
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamEnableNack) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ parameters.codecs[0].AddFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
+ SetSenderParameters(parameters);
+
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ EXPECT_EQ(kRtpHistoryMs, GetRecvStreamConfig(kSsrcY).rtp.nack.rtp_history_ms);
+ EXPECT_TRUE(AddRecvStream(kSsrcZ));
+ EXPECT_EQ(kRtpHistoryMs, GetRecvStreamConfig(kSsrcZ).rtp.nack.rtp_history_ms);
+}
+
+// Test that we can switch back and forth between Opus and PCMU with CN.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsOpusPcmuSwitching) {
+ EXPECT_TRUE(SetupSendStream());
+
+ cricket::AudioSenderParameter opus_parameters;
+ opus_parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(opus_parameters);
+ {
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, spec.payload_type);
+ EXPECT_STRCASEEQ("opus", spec.format.name.c_str());
+ }
+
+ cricket::AudioSenderParameter pcmu_parameters;
+ pcmu_parameters.codecs.push_back(kPcmuCodec);
+ pcmu_parameters.codecs.push_back(kCn16000Codec);
+ pcmu_parameters.codecs.push_back(kOpusCodec);
+ SetSenderParameters(pcmu_parameters);
+ {
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(0, spec.payload_type);
+ EXPECT_STRCASEEQ("PCMU", spec.format.name.c_str());
+ }
+
+ SetSenderParameters(opus_parameters);
+ {
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, spec.payload_type);
+ EXPECT_STRCASEEQ("opus", spec.format.name.c_str());
+ }
+}
+
+// Test that we handle various ways of specifying bitrate.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ SetSenderParameters(parameters);
+ {
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(0, spec.payload_type);
+ EXPECT_STRCASEEQ("PCMU", spec.format.name.c_str());
+ EXPECT_EQ(64000, spec.target_bitrate_bps);
+ }
+
+ parameters.codecs[0].bitrate = 0; // bitrate == default
+ SetSenderParameters(parameters);
+ {
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(0, spec.payload_type);
+ EXPECT_STREQ("PCMU", spec.format.name.c_str());
+ EXPECT_EQ(64000, spec.target_bitrate_bps);
+ }
+
+ parameters.codecs[0] = kOpusCodec;
+ parameters.codecs[0].bitrate = 0; // bitrate == default
+ SetSenderParameters(parameters);
+ {
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(111, spec.payload_type);
+ EXPECT_STREQ("opus", spec.format.name.c_str());
+ EXPECT_EQ(32000, spec.target_bitrate_bps);
+ }
+}
+
+// Test that we fail if no codecs are specified.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+}
+
+// Test that we can set send codecs even with telephone-event codec as the first
+// one on the list.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kTelephoneEventCodec1);
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs[0].id = 98; // DTMF
+ parameters.codecs[1].id = 96;
+ SetSenderParameters(parameters);
+ const auto& spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(96, spec.payload_type);
+ EXPECT_STRCASEEQ("OPUS", spec.format.name.c_str());
+ SetSend(true);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+}
+
+// Test that CanInsertDtmf() is governed by the send flag
+TEST_P(WebRtcVoiceEngineTestFake, DTMFControlledBySendFlag) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kTelephoneEventCodec1);
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs[0].id = 98; // DTMF
+ parameters.codecs[1].id = 96;
+ SetSenderParameters(parameters);
+ EXPECT_FALSE(send_channel_->CanInsertDtmf());
+ SetSend(true);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+ SetSend(false);
+ EXPECT_FALSE(send_channel_->CanInsertDtmf());
+}
+
+// Test that payload type range is limited for telephone-event codec.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFPayloadTypeOutOfRange) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kTelephoneEventCodec2);
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs[0].id = 0; // DTMF
+ parameters.codecs[1].id = 96;
+ SetSenderParameters(parameters);
+ SetSend(true);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+ parameters.codecs[0].id = 128; // DTMF
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_FALSE(send_channel_->CanInsertDtmf());
+ parameters.codecs[0].id = 127;
+ SetSenderParameters(parameters);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+ parameters.codecs[0].id = -1; // DTMF
+ EXPECT_FALSE(send_channel_->SetSenderParameters(parameters));
+ EXPECT_FALSE(send_channel_->CanInsertDtmf());
+}
+
+// Test that we can set send codecs even with CN codec as the first
+// one on the list.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kCn8000Codec);
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs[0].id = 98; // narrowband CN
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(0, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(98, send_codec_spec.cng_payload_type);
+}
+
+// Test that we set VAD and DTMF types correctly as caller.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ parameters.codecs.push_back(kCn8000Codec);
+ parameters.codecs.push_back(kTelephoneEventCodec1);
+ parameters.codecs[0].id = 96;
+ parameters.codecs[2].id = 97; // narrowband CN
+ parameters.codecs[3].id = 98; // DTMF
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(96, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(1u, send_codec_spec.format.num_channels);
+ EXPECT_EQ(97, send_codec_spec.cng_payload_type);
+ SetSend(true);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+}
+
+// Test that we set VAD and DTMF types correctly as callee.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
+ EXPECT_TRUE(SetupChannel());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ parameters.codecs.push_back(kCn8000Codec);
+ parameters.codecs.push_back(kTelephoneEventCodec2);
+ parameters.codecs[0].id = 96;
+ parameters.codecs[2].id = 97; // narrowband CN
+ parameters.codecs[3].id = 98; // DTMF
+ SetSenderParameters(parameters);
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(96, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(1u, send_codec_spec.format.num_channels);
+ EXPECT_EQ(97, send_codec_spec.cng_payload_type);
+ SetSend(true);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+}
+
+// Test that we only apply VAD if we have a CN codec that matches the
+// send codec clockrate.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ // Set PCMU(8K) and CN(16K). VAD should not be activated.
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ parameters.codecs[1].id = 97;
+ SetSenderParameters(parameters);
+ {
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type);
+ }
+ // Set PCMU(8K) and CN(8K). VAD should be activated.
+ parameters.codecs[1] = kCn8000Codec;
+ SetSenderParameters(parameters);
+ {
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(1u, send_codec_spec.format.num_channels);
+ EXPECT_EQ(13, send_codec_spec.cng_payload_type);
+ }
+ // Set OPUS(48K) and CN(8K). VAD should not be activated.
+ parameters.codecs[0] = kOpusCodec;
+ SetSenderParameters(parameters);
+ {
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_STRCASEEQ("OPUS", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type);
+ }
+}
+
+// Test that we perform case-insensitive matching of codec names.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioSenderParameter parameters;
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn16000Codec);
+ parameters.codecs.push_back(kCn8000Codec);
+ parameters.codecs.push_back(kTelephoneEventCodec1);
+ parameters.codecs[0].name = "PcMu";
+ parameters.codecs[0].id = 96;
+ parameters.codecs[2].id = 97; // narrowband CN
+ parameters.codecs[3].id = 98; // DTMF
+ SetSenderParameters(parameters);
+ const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec;
+ EXPECT_EQ(96, send_codec_spec.payload_type);
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(1u, send_codec_spec.format.num_channels);
+ EXPECT_EQ(97, send_codec_spec.cng_payload_type);
+ SetSend(true);
+ EXPECT_TRUE(send_channel_->CanInsertDtmf());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake,
+ SupportsTransportSequenceNumberHeaderExtension) {
+ const std::vector<webrtc::RtpExtension> header_extensions =
+ GetDefaultEnabledRtpHeaderExtensions(*engine_);
+ EXPECT_THAT(header_extensions,
+ Contains(::testing::Field(
+ "uri", &RtpExtension::uri,
+ webrtc::RtpExtension::kTransportSequenceNumberUri)));
+}
+
+// Test support for audio level header extension.
+TEST_P(WebRtcVoiceEngineTestFake, SendAudioLevelHeaderExtensions) {
+ TestSetSendRtpHeaderExtensions(webrtc::RtpExtension::kAudioLevelUri);
+}
+TEST_P(WebRtcVoiceEngineTestFake, RecvAudioLevelHeaderExtensions) {
+ TestSetRecvRtpHeaderExtensions(webrtc::RtpExtension::kAudioLevelUri);
+}
+
+// Test support for transport sequence number header extension.
+TEST_P(WebRtcVoiceEngineTestFake, SendTransportSequenceNumberHeaderExtensions) {
+ TestSetSendRtpHeaderExtensions(
+ webrtc::RtpExtension::kTransportSequenceNumberUri);
+}
+TEST_P(WebRtcVoiceEngineTestFake, RecvTransportSequenceNumberHeaderExtensions) {
+ TestSetRecvRtpHeaderExtensions(
+ webrtc::RtpExtension::kTransportSequenceNumberUri);
+}
+
+// Test that we can create a channel and start sending on it.
+TEST_P(WebRtcVoiceEngineTestFake, Send) {
+ EXPECT_TRUE(SetupSendStream());
+ SetSenderParameters(send_parameters_);
+ SetSend(true);
+ EXPECT_TRUE(GetSendStream(kSsrcX).IsSending());
+ SetSend(false);
+ EXPECT_FALSE(GetSendStream(kSsrcX).IsSending());
+}
+
+// Test that a channel is muted/unmuted.
+TEST_P(WebRtcVoiceEngineTestFake, SendStateMuteUnmute) {
+ EXPECT_TRUE(SetupSendStream());
+ SetSenderParameters(send_parameters_);
+ EXPECT_FALSE(GetSendStream(kSsrcX).muted());
+ SetAudioSend(kSsrcX, true, nullptr);
+ EXPECT_FALSE(GetSendStream(kSsrcX).muted());
+ SetAudioSend(kSsrcX, false, nullptr);
+ EXPECT_TRUE(GetSendStream(kSsrcX).muted());
+}
+
+// Test that SetSenderParameters() does not alter a stream's send state.
+TEST_P(WebRtcVoiceEngineTestFake, SendStateWhenStreamsAreRecreated) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_FALSE(GetSendStream(kSsrcX).IsSending());
+
+ // Turn on sending.
+ SetSend(true);
+ EXPECT_TRUE(GetSendStream(kSsrcX).IsSending());
+
+ // Changing RTP header extensions will recreate the AudioSendStream.
+ send_parameters_.extensions.push_back(
+ webrtc::RtpExtension(webrtc::RtpExtension::kAudioLevelUri, 12));
+ SetSenderParameters(send_parameters_);
+ EXPECT_TRUE(GetSendStream(kSsrcX).IsSending());
+
+ // Turn off sending.
+ SetSend(false);
+ EXPECT_FALSE(GetSendStream(kSsrcX).IsSending());
+
+ // Changing RTP header extensions will recreate the AudioSendStream.
+ send_parameters_.extensions.clear();
+ SetSenderParameters(send_parameters_);
+ EXPECT_FALSE(GetSendStream(kSsrcX).IsSending());
+}
+
+// Test that we can create a channel and start playing out on it.
+TEST_P(WebRtcVoiceEngineTestFake, Playout) {
+ EXPECT_TRUE(SetupRecvStream());
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ receive_channel_->SetPlayout(true);
+ EXPECT_TRUE(GetRecvStream(kSsrcX).started());
+ receive_channel_->SetPlayout(false);
+ EXPECT_FALSE(GetRecvStream(kSsrcX).started());
+}
+
+// Test that we can add and remove send streams.
+TEST_P(WebRtcVoiceEngineTestFake, CreateAndDeleteMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ // Set the global state for sending.
+ SetSend(true);
+
+ for (uint32_t ssrc : kSsrcs4) {
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(ssrc)));
+ SetAudioSend(ssrc, true, &fake_source_);
+ // Verify that we are in a sending state for all the created streams.
+ EXPECT_TRUE(GetSendStream(ssrc).IsSending());
+ }
+ EXPECT_EQ(arraysize(kSsrcs4), call_.GetAudioSendStreams().size());
+
+ // Delete the send streams.
+ for (uint32_t ssrc : kSsrcs4) {
+ EXPECT_TRUE(send_channel_->RemoveSendStream(ssrc));
+ EXPECT_FALSE(call_.GetAudioSendStream(ssrc));
+ EXPECT_FALSE(send_channel_->RemoveSendStream(ssrc));
+ }
+ EXPECT_EQ(0u, call_.GetAudioSendStreams().size());
+}
+
+// Test SetSendCodecs correctly configure the codecs in all send streams.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ // Create send streams.
+ for (uint32_t ssrc : kSsrcs4) {
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(ssrc)));
+ }
+
+ cricket::AudioSenderParameter parameters;
+ // Set PCMU and CN(8K). VAD should be activated.
+ parameters.codecs.push_back(kPcmuCodec);
+ parameters.codecs.push_back(kCn8000Codec);
+ parameters.codecs[1].id = 97;
+ SetSenderParameters(parameters);
+
+ // Verify PCMU and VAD are corrected configured on all send channels.
+ for (uint32_t ssrc : kSsrcs4) {
+ ASSERT_TRUE(call_.GetAudioSendStream(ssrc) != nullptr);
+ const auto& send_codec_spec =
+ *call_.GetAudioSendStream(ssrc)->GetConfig().send_codec_spec;
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(1u, send_codec_spec.format.num_channels);
+ EXPECT_EQ(97, send_codec_spec.cng_payload_type);
+ }
+
+ // Change to PCMU(8K) and CN(16K).
+ parameters.codecs[0] = kPcmuCodec;
+ parameters.codecs[1] = kCn16000Codec;
+ SetSenderParameters(parameters);
+ for (uint32_t ssrc : kSsrcs4) {
+ ASSERT_TRUE(call_.GetAudioSendStream(ssrc) != nullptr);
+ const auto& send_codec_spec =
+ *call_.GetAudioSendStream(ssrc)->GetConfig().send_codec_spec;
+ EXPECT_STRCASEEQ("PCMU", send_codec_spec.format.name.c_str());
+ EXPECT_EQ(absl::nullopt, send_codec_spec.cng_payload_type);
+ }
+}
+
+// Test we can SetSend on all send streams correctly.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ // Create the send channels and they should be a "not sending" date.
+ for (uint32_t ssrc : kSsrcs4) {
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(ssrc)));
+ SetAudioSend(ssrc, true, &fake_source_);
+ EXPECT_FALSE(GetSendStream(ssrc).IsSending());
+ }
+
+ // Set the global state for starting sending.
+ SetSend(true);
+ for (uint32_t ssrc : kSsrcs4) {
+ // Verify that we are in a sending state for all the send streams.
+ EXPECT_TRUE(GetSendStream(ssrc).IsSending());
+ }
+
+ // Set the global state for stopping sending.
+ SetSend(false);
+ for (uint32_t ssrc : kSsrcs4) {
+ // Verify that we are in a stop state for all the send streams.
+ EXPECT_FALSE(GetSendStream(ssrc).IsSending());
+ }
+}
+
+// Test we can set the correct statistics on all send streams.
+TEST_P(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ // Create send streams.
+ for (uint32_t ssrc : kSsrcs4) {
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(ssrc)));
+ }
+
+ // Create a receive stream to check that none of the send streams end up in
+ // the receive stream stats.
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+
+ // We need send codec to be set to get all stats.
+ SetSenderParameters(send_parameters_);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ SetAudioSendStreamStats();
+ SetAudioReceiveStreamStats();
+
+ // Check stats for the added streams.
+ {
+ EXPECT_CALL(*adm_, GetPlayoutUnderrunCount()).WillOnce(Return(0));
+ cricket::VoiceMediaSendInfo send_info;
+ cricket::VoiceMediaReceiveInfo receive_info;
+ EXPECT_EQ(true, send_channel_->GetStats(&send_info));
+ EXPECT_EQ(true, receive_channel_->GetStats(
+ &receive_info, /*get_and_clear_legacy_stats=*/true));
+
+ // We have added 4 send streams. We should see empty stats for all.
+ EXPECT_EQ(static_cast<size_t>(arraysize(kSsrcs4)),
+ send_info.senders.size());
+ for (const auto& sender : send_info.senders) {
+ VerifyVoiceSenderInfo(sender, false);
+ }
+ VerifyVoiceSendRecvCodecs(send_info, receive_info);
+
+ // We have added one receive stream. We should see empty stats.
+ EXPECT_EQ(receive_info.receivers.size(), 1u);
+ EXPECT_EQ(receive_info.receivers[0].ssrc(), 123u);
+ }
+
+ // Remove the kSsrcY stream. No receiver stats.
+ {
+ cricket::VoiceMediaReceiveInfo receive_info;
+ cricket::VoiceMediaSendInfo send_info;
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrcY));
+ EXPECT_CALL(*adm_, GetPlayoutUnderrunCount()).WillOnce(Return(0));
+ EXPECT_EQ(true, send_channel_->GetStats(&send_info));
+ EXPECT_EQ(true, receive_channel_->GetStats(
+ &receive_info, /*get_and_clear_legacy_stats=*/true));
+ EXPECT_EQ(static_cast<size_t>(arraysize(kSsrcs4)),
+ send_info.senders.size());
+ EXPECT_EQ(0u, receive_info.receivers.size());
+ }
+
+ // Deliver a new packet - a default receive stream should be created and we
+ // should see stats again.
+ {
+ cricket::VoiceMediaSendInfo send_info;
+ cricket::VoiceMediaReceiveInfo receive_info;
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ SetAudioReceiveStreamStats();
+ EXPECT_CALL(*adm_, GetPlayoutUnderrunCount()).WillOnce(Return(0));
+ EXPECT_EQ(true, send_channel_->GetStats(&send_info));
+ EXPECT_EQ(true, receive_channel_->GetStats(
+ &receive_info, /*get_and_clear_legacy_stats=*/true));
+ EXPECT_EQ(static_cast<size_t>(arraysize(kSsrcs4)),
+ send_info.senders.size());
+ EXPECT_EQ(1u, receive_info.receivers.size());
+ VerifyVoiceReceiverInfo(receive_info.receivers[0]);
+ VerifyVoiceSendRecvCodecs(send_info, receive_info);
+ }
+}
+
+// Test that we can add and remove receive streams, and do proper send/playout.
+// We can receive on multiple streams while sending one stream.
+TEST_P(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) {
+ EXPECT_TRUE(SetupSendStream());
+
+ // Start playout without a receive stream.
+ SetSenderParameters(send_parameters_);
+ receive_channel_->SetPlayout(true);
+
+ // Adding another stream should enable playout on the new stream only.
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ SetSend(true);
+ EXPECT_TRUE(GetSendStream(kSsrcX).IsSending());
+
+ // Make sure only the new stream is played out.
+ EXPECT_TRUE(GetRecvStream(kSsrcY).started());
+
+ // Adding yet another stream should have stream 2 and 3 enabled for playout.
+ EXPECT_TRUE(AddRecvStream(kSsrcZ));
+ EXPECT_TRUE(GetRecvStream(kSsrcY).started());
+ EXPECT_TRUE(GetRecvStream(kSsrcZ).started());
+
+ // Stop sending.
+ SetSend(false);
+ EXPECT_FALSE(GetSendStream(kSsrcX).IsSending());
+
+ // Stop playout.
+ receive_channel_->SetPlayout(false);
+ EXPECT_FALSE(GetRecvStream(kSsrcY).started());
+ EXPECT_FALSE(GetRecvStream(kSsrcZ).started());
+
+ // Restart playout and make sure recv streams are played out.
+ receive_channel_->SetPlayout(true);
+ EXPECT_TRUE(GetRecvStream(kSsrcY).started());
+ EXPECT_TRUE(GetRecvStream(kSsrcZ).started());
+
+ // Now remove the recv streams.
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrcZ));
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrcY));
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetAudioNetworkAdaptorViaOptions) {
+ EXPECT_TRUE(SetupSendStream());
+ send_parameters_.options.audio_network_adaptor = true;
+ send_parameters_.options.audio_network_adaptor_config = {"1234"};
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(send_parameters_.options.audio_network_adaptor_config,
+ GetAudioNetworkAdaptorConfig(kSsrcX));
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, AudioSendResetAudioNetworkAdaptor) {
+ EXPECT_TRUE(SetupSendStream());
+ send_parameters_.options.audio_network_adaptor = true;
+ send_parameters_.options.audio_network_adaptor_config = {"1234"};
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(send_parameters_.options.audio_network_adaptor_config,
+ GetAudioNetworkAdaptorConfig(kSsrcX));
+ cricket::AudioOptions options;
+ options.audio_network_adaptor = false;
+ SetAudioSend(kSsrcX, true, nullptr, &options);
+ EXPECT_EQ(absl::nullopt, GetAudioNetworkAdaptorConfig(kSsrcX));
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, AudioNetworkAdaptorNotGetOverridden) {
+ EXPECT_TRUE(SetupSendStream());
+ send_parameters_.options.audio_network_adaptor = true;
+ send_parameters_.options.audio_network_adaptor_config = {"1234"};
+ SetSenderParameters(send_parameters_);
+ EXPECT_EQ(send_parameters_.options.audio_network_adaptor_config,
+ GetAudioNetworkAdaptorConfig(kSsrcX));
+ const int initial_num = call_.GetNumCreatedSendStreams();
+ cricket::AudioOptions options;
+ options.audio_network_adaptor = absl::nullopt;
+ // Unvalued `options.audio_network_adaptor` should not reset audio network
+ // adaptor.
+ SetAudioSend(kSsrcX, true, nullptr, &options);
+ // AudioSendStream not expected to be recreated.
+ EXPECT_EQ(initial_num, call_.GetNumCreatedSendStreams());
+ EXPECT_EQ(send_parameters_.options.audio_network_adaptor_config,
+ GetAudioNetworkAdaptorConfig(kSsrcX));
+}
+
+// Test that we can set the outgoing SSRC properly.
+// SSRC is set in SetupSendStream() by calling AddSendStream.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendSsrc) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(call_.GetAudioSendStream(kSsrcX));
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, GetStats) {
+ // Setup. We need send codec to be set to get all stats.
+ EXPECT_TRUE(SetupSendStream());
+ // SetupSendStream adds a send stream with kSsrcX, so the receive
+ // stream has to use a different SSRC.
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ SetSenderParameters(send_parameters_);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(recv_parameters_));
+ SetAudioSendStreamStats();
+
+ // Check stats for the added streams.
+ {
+ EXPECT_CALL(*adm_, GetPlayoutUnderrunCount()).WillOnce(Return(0));
+ cricket::VoiceMediaSendInfo send_info;
+ cricket::VoiceMediaReceiveInfo receive_info;
+ EXPECT_EQ(true, send_channel_->GetStats(&send_info));
+ EXPECT_EQ(true, receive_channel_->GetStats(
+ &receive_info, /*get_and_clear_legacy_stats=*/true));
+
+ // We have added one send stream. We should see the stats we've set.
+ EXPECT_EQ(1u, send_info.senders.size());
+ VerifyVoiceSenderInfo(send_info.senders[0], false);
+ // We have added one receive stream. We should see empty stats.
+ EXPECT_EQ(receive_info.receivers.size(), 1u);
+ EXPECT_EQ(receive_info.receivers[0].ssrc(), 0u);
+ }
+
+ // Start sending - this affects some reported stats.
+ {
+ SetSend(true);
+ EXPECT_CALL(*adm_, GetPlayoutUnderrunCount()).WillOnce(Return(0));
+ cricket::VoiceMediaSendInfo send_info;
+ cricket::VoiceMediaReceiveInfo receive_info;
+ SetAudioReceiveStreamStats();
+ EXPECT_EQ(true, send_channel_->GetStats(&send_info));
+ EXPECT_EQ(true, receive_channel_->GetStats(
+ &receive_info, /*get_and_clear_legacy_stats=*/true));
+ VerifyVoiceSenderInfo(send_info.senders[0], true);
+ VerifyVoiceSendRecvCodecs(send_info, receive_info);
+ }
+
+ // Remove the kSsrcY stream. No receiver stats.
+ {
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrcY));
+ EXPECT_CALL(*adm_, GetPlayoutUnderrunCount()).WillOnce(Return(0));
+ cricket::VoiceMediaSendInfo send_info;
+ cricket::VoiceMediaReceiveInfo receive_info;
+ EXPECT_EQ(true, send_channel_->GetStats(&send_info));
+ EXPECT_EQ(true, receive_channel_->GetStats(
+ &receive_info, /*get_and_clear_legacy_stats=*/true));
+ EXPECT_EQ(1u, send_info.senders.size());
+ EXPECT_EQ(0u, receive_info.receivers.size());
+ }
+
+ // Deliver a new packet - a default receive stream should be created and we
+ // should see stats again.
+ {
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ SetAudioReceiveStreamStats();
+ EXPECT_CALL(*adm_, GetPlayoutUnderrunCount()).WillOnce(Return(0));
+ cricket::VoiceMediaSendInfo send_info;
+ cricket::VoiceMediaReceiveInfo receive_info;
+ EXPECT_EQ(true, send_channel_->GetStats(&send_info));
+ EXPECT_EQ(true, receive_channel_->GetStats(
+ &receive_info, /*get_and_clear_legacy_stats=*/true));
+ EXPECT_EQ(1u, send_info.senders.size());
+ EXPECT_EQ(1u, receive_info.receivers.size());
+ VerifyVoiceReceiverInfo(receive_info.receivers[0]);
+ VerifyVoiceSendRecvCodecs(send_info, receive_info);
+ }
+}
+
+// Test that we can set the outgoing SSRC properly with multiple streams.
+// SSRC is set in SetupSendStream() by calling AddSendStream.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendSsrcWithMultipleStreams) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(call_.GetAudioSendStream(kSsrcX));
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ EXPECT_EQ(kSsrcX, GetRecvStreamConfig(kSsrcY).rtp.local_ssrc);
+}
+
+// Test that the local SSRC is the same on sending and receiving channels if the
+// receive channel is created before the send channel.
+TEST_P(WebRtcVoiceEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcX)));
+ EXPECT_TRUE(call_.GetAudioSendStream(kSsrcX));
+ EXPECT_EQ(kSsrcX, GetRecvStreamConfig(kSsrcY).rtp.local_ssrc);
+}
+
+// Test that we can properly receive packets.
+TEST_P(WebRtcVoiceEngineTestFake, Recv) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_TRUE(AddRecvStream(1));
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+
+ EXPECT_TRUE(
+ GetRecvStream(1).VerifyLastPacket(kPcmuFrame, sizeof(kPcmuFrame)));
+}
+
+// Test that we can properly receive packets on multiple streams.
+TEST_P(WebRtcVoiceEngineTestFake, RecvWithMultipleStreams) {
+ EXPECT_TRUE(SetupChannel());
+ const uint32_t ssrc1 = 1;
+ const uint32_t ssrc2 = 2;
+ const uint32_t ssrc3 = 3;
+ EXPECT_TRUE(AddRecvStream(ssrc1));
+ EXPECT_TRUE(AddRecvStream(ssrc2));
+ EXPECT_TRUE(AddRecvStream(ssrc3));
+ // Create packets with the right SSRCs.
+ unsigned char packets[4][sizeof(kPcmuFrame)];
+ for (size_t i = 0; i < arraysize(packets); ++i) {
+ memcpy(packets[i], kPcmuFrame, sizeof(kPcmuFrame));
+ rtc::SetBE32(packets[i] + 8, static_cast<uint32_t>(i));
+ }
+
+ const cricket::FakeAudioReceiveStream& s1 = GetRecvStream(ssrc1);
+ const cricket::FakeAudioReceiveStream& s2 = GetRecvStream(ssrc2);
+ const cricket::FakeAudioReceiveStream& s3 = GetRecvStream(ssrc3);
+
+ EXPECT_EQ(s1.received_packets(), 0);
+ EXPECT_EQ(s2.received_packets(), 0);
+ EXPECT_EQ(s3.received_packets(), 0);
+
+ DeliverPacket(packets[0], sizeof(packets[0]));
+ EXPECT_EQ(s1.received_packets(), 0);
+ EXPECT_EQ(s2.received_packets(), 0);
+ EXPECT_EQ(s3.received_packets(), 0);
+
+ DeliverPacket(packets[1], sizeof(packets[1]));
+ EXPECT_EQ(s1.received_packets(), 1);
+ EXPECT_TRUE(s1.VerifyLastPacket(packets[1], sizeof(packets[1])));
+ EXPECT_EQ(s2.received_packets(), 0);
+ EXPECT_EQ(s3.received_packets(), 0);
+
+ DeliverPacket(packets[2], sizeof(packets[2]));
+ EXPECT_EQ(s1.received_packets(), 1);
+ EXPECT_EQ(s2.received_packets(), 1);
+ EXPECT_TRUE(s2.VerifyLastPacket(packets[2], sizeof(packets[2])));
+ EXPECT_EQ(s3.received_packets(), 0);
+
+ DeliverPacket(packets[3], sizeof(packets[3]));
+ EXPECT_EQ(s1.received_packets(), 1);
+ EXPECT_EQ(s2.received_packets(), 1);
+ EXPECT_EQ(s3.received_packets(), 1);
+ EXPECT_TRUE(s3.VerifyLastPacket(packets[3], sizeof(packets[3])));
+
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(ssrc3));
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(ssrc2));
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(ssrc1));
+}
+
+// Test that receiving on an unsignaled stream works (a stream is created).
+TEST_P(WebRtcVoiceEngineTestFake, RecvUnsignaled) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_EQ(0u, call_.GetAudioReceiveStreams().size());
+
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+
+ EXPECT_EQ(1u, call_.GetAudioReceiveStreams().size());
+ EXPECT_TRUE(
+ GetRecvStream(kSsrc1).VerifyLastPacket(kPcmuFrame, sizeof(kPcmuFrame)));
+}
+
+// Tests that when we add a stream without SSRCs, but contains a stream_id
+// that it is stored and its stream id is later used when the first packet
+// arrives to properly create a receive stream with a sync label.
+TEST_P(WebRtcVoiceEngineTestFake, RecvUnsignaledSsrcWithSignaledStreamId) {
+ const char kSyncLabel[] = "sync_label";
+ EXPECT_TRUE(SetupChannel());
+ cricket::StreamParams unsignaled_stream;
+ unsignaled_stream.set_stream_ids({kSyncLabel});
+ ASSERT_TRUE(receive_channel_->AddRecvStream(unsignaled_stream));
+ // The stream shouldn't have been created at this point because it doesn't
+ // have any SSRCs.
+ EXPECT_EQ(0u, call_.GetAudioReceiveStreams().size());
+
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+
+ EXPECT_EQ(1u, call_.GetAudioReceiveStreams().size());
+ EXPECT_TRUE(
+ GetRecvStream(kSsrc1).VerifyLastPacket(kPcmuFrame, sizeof(kPcmuFrame)));
+ EXPECT_EQ(kSyncLabel, GetRecvStream(kSsrc1).GetConfig().sync_group);
+
+ // Remset the unsignaled stream to clear the cached parameters. If a new
+ // default unsignaled receive stream is created it will not have a sync group.
+ receive_channel_->ResetUnsignaledRecvStream();
+ receive_channel_->RemoveRecvStream(kSsrc1);
+
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+
+ EXPECT_EQ(1u, call_.GetAudioReceiveStreams().size());
+ EXPECT_TRUE(
+ GetRecvStream(kSsrc1).VerifyLastPacket(kPcmuFrame, sizeof(kPcmuFrame)));
+ EXPECT_TRUE(GetRecvStream(kSsrc1).GetConfig().sync_group.empty());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake,
+ ResetUnsignaledRecvStreamDeletesAllDefaultStreams) {
+ ASSERT_TRUE(SetupChannel());
+ // No receive streams to start with.
+ ASSERT_TRUE(call_.GetAudioReceiveStreams().empty());
+
+ // Deliver a couple packets with unsignaled SSRCs.
+ unsigned char packet[sizeof(kPcmuFrame)];
+ memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+ rtc::SetBE32(&packet[8], 0x1234);
+ DeliverPacket(packet, sizeof(packet));
+ rtc::SetBE32(&packet[8], 0x5678);
+ DeliverPacket(packet, sizeof(packet));
+
+ // Verify that the receive streams were created.
+ const auto& receivers1 = call_.GetAudioReceiveStreams();
+ ASSERT_EQ(receivers1.size(), 2u);
+
+ // Should remove all default streams.
+ receive_channel_->ResetUnsignaledRecvStream();
+ const auto& receivers2 = call_.GetAudioReceiveStreams();
+ EXPECT_EQ(0u, receivers2.size());
+}
+
+// Test that receiving N unsignaled stream works (streams will be created), and
+// that packets are forwarded to them all.
+TEST_P(WebRtcVoiceEngineTestFake, RecvMultipleUnsignaled) {
+ EXPECT_TRUE(SetupChannel());
+ unsigned char packet[sizeof(kPcmuFrame)];
+ memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+
+ // Note that SSRC = 0 is not supported.
+ for (uint32_t ssrc = 1; ssrc < (1 + kMaxUnsignaledRecvStreams); ++ssrc) {
+ rtc::SetBE32(&packet[8], ssrc);
+ DeliverPacket(packet, sizeof(packet));
+
+ // Verify we have one new stream for each loop iteration.
+ EXPECT_EQ(ssrc, call_.GetAudioReceiveStreams().size());
+ EXPECT_EQ(1, GetRecvStream(ssrc).received_packets());
+ EXPECT_TRUE(GetRecvStream(ssrc).VerifyLastPacket(packet, sizeof(packet)));
+ }
+
+ // Sending on the same SSRCs again should not create new streams.
+ for (uint32_t ssrc = 1; ssrc < (1 + kMaxUnsignaledRecvStreams); ++ssrc) {
+ rtc::SetBE32(&packet[8], ssrc);
+ DeliverPacket(packet, sizeof(packet));
+
+ EXPECT_EQ(kMaxUnsignaledRecvStreams, call_.GetAudioReceiveStreams().size());
+ EXPECT_EQ(2, GetRecvStream(ssrc).received_packets());
+ EXPECT_TRUE(GetRecvStream(ssrc).VerifyLastPacket(packet, sizeof(packet)));
+ }
+
+ // Send on another SSRC, the oldest unsignaled stream (SSRC=1) is replaced.
+ constexpr uint32_t kAnotherSsrc = 667;
+ rtc::SetBE32(&packet[8], kAnotherSsrc);
+ DeliverPacket(packet, sizeof(packet));
+
+ const auto& streams = call_.GetAudioReceiveStreams();
+ EXPECT_EQ(kMaxUnsignaledRecvStreams, streams.size());
+ size_t i = 0;
+ for (uint32_t ssrc = 2; ssrc < (1 + kMaxUnsignaledRecvStreams); ++ssrc, ++i) {
+ EXPECT_EQ(ssrc, streams[i]->GetConfig().rtp.remote_ssrc);
+ EXPECT_EQ(2, streams[i]->received_packets());
+ }
+ EXPECT_EQ(kAnotherSsrc, streams[i]->GetConfig().rtp.remote_ssrc);
+ EXPECT_EQ(1, streams[i]->received_packets());
+ // Sanity check that we've checked all streams.
+ EXPECT_EQ(kMaxUnsignaledRecvStreams, (i + 1));
+}
+
+// Test that a default channel is created even after a signaled stream has been
+// added, and that this stream will get any packets for unknown SSRCs.
+TEST_P(WebRtcVoiceEngineTestFake, RecvUnsignaledAfterSignaled) {
+ EXPECT_TRUE(SetupChannel());
+ unsigned char packet[sizeof(kPcmuFrame)];
+ memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+
+ // Add a known stream, send packet and verify we got it.
+ const uint32_t signaled_ssrc = 1;
+ rtc::SetBE32(&packet[8], signaled_ssrc);
+ EXPECT_TRUE(AddRecvStream(signaled_ssrc));
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_TRUE(
+ GetRecvStream(signaled_ssrc).VerifyLastPacket(packet, sizeof(packet)));
+ EXPECT_EQ(1u, call_.GetAudioReceiveStreams().size());
+
+ // Note that the first unknown SSRC cannot be 0, because we only support
+ // creating receive streams for SSRC!=0.
+ const uint32_t unsignaled_ssrc = 7011;
+ rtc::SetBE32(&packet[8], unsignaled_ssrc);
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_TRUE(
+ GetRecvStream(unsignaled_ssrc).VerifyLastPacket(packet, sizeof(packet)));
+ EXPECT_EQ(2u, call_.GetAudioReceiveStreams().size());
+
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_EQ(2, GetRecvStream(unsignaled_ssrc).received_packets());
+
+ rtc::SetBE32(&packet[8], signaled_ssrc);
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_EQ(2, GetRecvStream(signaled_ssrc).received_packets());
+ EXPECT_EQ(2u, call_.GetAudioReceiveStreams().size());
+}
+
+// Two tests to verify that adding a receive stream with the same SSRC as a
+// previously added unsignaled stream will only recreate underlying stream
+// objects if the stream parameters have changed.
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_NoRecreate) {
+ EXPECT_TRUE(SetupChannel());
+
+ // Spawn unsignaled stream with SSRC=1.
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_EQ(1u, call_.GetAudioReceiveStreams().size());
+ EXPECT_TRUE(
+ GetRecvStream(1).VerifyLastPacket(kPcmuFrame, sizeof(kPcmuFrame)));
+
+ // Verify that the underlying stream object in Call is not recreated when a
+ // stream with SSRC=1 is added.
+ const auto& streams = call_.GetAudioReceiveStreams();
+ EXPECT_EQ(1u, streams.size());
+ int audio_receive_stream_id = streams.front()->id();
+ EXPECT_TRUE(AddRecvStream(1));
+ EXPECT_EQ(1u, streams.size());
+ EXPECT_EQ(audio_receive_stream_id, streams.front()->id());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_Updates) {
+ EXPECT_TRUE(SetupChannel());
+
+ // Spawn unsignaled stream with SSRC=1.
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_EQ(1u, call_.GetAudioReceiveStreams().size());
+ EXPECT_TRUE(
+ GetRecvStream(1).VerifyLastPacket(kPcmuFrame, sizeof(kPcmuFrame)));
+
+ // Verify that the underlying stream object in Call gets updated when a
+ // stream with SSRC=1 is added, and which has changed stream parameters.
+ const auto& streams = call_.GetAudioReceiveStreams();
+ EXPECT_EQ(1u, streams.size());
+ // The sync_group id should be empty.
+ EXPECT_TRUE(streams.front()->GetConfig().sync_group.empty());
+
+ const std::string new_stream_id("stream_id");
+ int audio_receive_stream_id = streams.front()->id();
+ cricket::StreamParams stream_params;
+ stream_params.ssrcs.push_back(1);
+ stream_params.set_stream_ids({new_stream_id});
+
+ EXPECT_TRUE(receive_channel_->AddRecvStream(stream_params));
+ EXPECT_EQ(1u, streams.size());
+ // The audio receive stream should not have been recreated.
+ EXPECT_EQ(audio_receive_stream_id, streams.front()->id());
+
+ // The sync_group id should now match with the new stream params.
+ EXPECT_EQ(new_stream_id, streams.front()->GetConfig().sync_group);
+}
+
+// Test that AddRecvStream creates new stream.
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStream) {
+ EXPECT_TRUE(SetupRecvStream());
+ EXPECT_TRUE(AddRecvStream(1));
+}
+
+// Test that after adding a recv stream, we do not decode more codecs than
+// those previously passed into SetRecvCodecs.
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs.push_back(kOpusCodec);
+ parameters.codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(receive_channel_->SetReceiverParameters(parameters));
+ EXPECT_TRUE(AddRecvStream(kSsrcX));
+ EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map,
+ (ContainerEq<std::map<int, webrtc::SdpAudioFormat>>(
+ {{0, {"PCMU", 8000, 1}}, {111, {"OPUS", 48000, 2}}})));
+}
+
+// Test that we properly clean up any streams that were added, even if
+// not explicitly removed.
+TEST_P(WebRtcVoiceEngineTestFake, StreamCleanup) {
+ EXPECT_TRUE(SetupSendStream());
+ SetSenderParameters(send_parameters_);
+ EXPECT_TRUE(AddRecvStream(1));
+ EXPECT_TRUE(AddRecvStream(2));
+
+ EXPECT_EQ(1u, call_.GetAudioSendStreams().size());
+ EXPECT_EQ(2u, call_.GetAudioReceiveStreams().size());
+ send_channel_.reset();
+ receive_channel_.reset();
+ EXPECT_EQ(0u, call_.GetAudioSendStreams().size());
+ EXPECT_EQ(0u, call_.GetAudioReceiveStreams().size());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, TestAddRecvStreamSuccessWithZeroSsrc) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(AddRecvStream(0));
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, TestAddRecvStreamFailWithSameSsrc) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_TRUE(AddRecvStream(1));
+ EXPECT_FALSE(AddRecvStream(1));
+}
+
+// Test the InsertDtmf on default send stream as caller.
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCaller) {
+ TestInsertDtmf(0, true, kTelephoneEventCodec1);
+}
+
+// Test the InsertDtmf on default send stream as callee
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCallee) {
+ TestInsertDtmf(0, false, kTelephoneEventCodec2);
+}
+
+// Test the InsertDtmf on specified send stream as caller.
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCaller) {
+ TestInsertDtmf(kSsrcX, true, kTelephoneEventCodec2);
+}
+
+// Test the InsertDtmf on specified send stream as callee.
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCallee) {
+ TestInsertDtmf(kSsrcX, false, kTelephoneEventCodec1);
+}
+
+// Test propagation of extmap allow mixed setting.
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedAsCaller) {
+ TestExtmapAllowMixedCaller(/*extmap_allow_mixed=*/true);
+}
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedDisabledAsCaller) {
+ TestExtmapAllowMixedCaller(/*extmap_allow_mixed=*/false);
+}
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedAsCallee) {
+ TestExtmapAllowMixedCallee(/*extmap_allow_mixed=*/true);
+}
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedDisabledAsCallee) {
+ TestExtmapAllowMixedCallee(/*extmap_allow_mixed=*/false);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetAudioOptions) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ EXPECT_CALL(*adm_, BuiltInAECIsAvailable())
+ .Times(8)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*adm_, BuiltInAGCIsAvailable())
+ .Times(4)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*adm_, BuiltInNSIsAvailable())
+ .Times(2)
+ .WillRepeatedly(Return(false));
+
+ EXPECT_EQ(200u, GetRecvStreamConfig(kSsrcY).jitter_buffer_max_packets);
+ EXPECT_FALSE(GetRecvStreamConfig(kSsrcY).jitter_buffer_fast_accelerate);
+
+ // Nothing set in AudioOptions, so everything should be as default.
+ send_parameters_.options = cricket::AudioOptions();
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(IsHighPassFilterEnabled());
+ }
+ EXPECT_EQ(200u, GetRecvStreamConfig(kSsrcY).jitter_buffer_max_packets);
+ EXPECT_FALSE(GetRecvStreamConfig(kSsrcY).jitter_buffer_fast_accelerate);
+
+ // Turn echo cancellation off
+ send_parameters_.options.echo_cancellation = false;
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/false);
+ }
+
+ // Turn echo cancellation back on, with settings, and make sure
+ // nothing else changed.
+ send_parameters_.options.echo_cancellation = true;
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ }
+
+ // Turn off echo cancellation and delay agnostic aec.
+ send_parameters_.options.echo_cancellation = false;
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/false);
+ }
+
+ // Restore AEC to be on to work with the following tests.
+ send_parameters_.options.echo_cancellation = true;
+ SetSenderParameters(send_parameters_);
+
+ // Turn off AGC
+ send_parameters_.options.auto_gain_control = false;
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ }
+
+ // Turn AGC back on
+ send_parameters_.options.auto_gain_control = true;
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ }
+
+ // Turn off other options.
+ send_parameters_.options.noise_suppression = false;
+ send_parameters_.options.highpass_filter = false;
+ send_parameters_.options.stereo_swapping = true;
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(IsHighPassFilterEnabled());
+ EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
+
+ // Set options again to ensure it has no impact.
+ SetSenderParameters(send_parameters_);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, InitRecordingOnSend) {
+ EXPECT_CALL(*adm_, RecordingIsInitialized()).WillOnce(Return(false));
+ EXPECT_CALL(*adm_, Recording()).WillOnce(Return(false));
+ EXPECT_CALL(*adm_, InitRecording()).Times(1);
+
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_channel(
+ engine_->CreateSendChannel(
+ &call_, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create()));
+
+ send_channel->SetSend(true);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SkipInitRecordingOnSend) {
+ EXPECT_CALL(*adm_, RecordingIsInitialized()).Times(0);
+ EXPECT_CALL(*adm_, Recording()).Times(0);
+ EXPECT_CALL(*adm_, InitRecording()).Times(0);
+
+ cricket::AudioOptions options;
+ options.init_recording_on_send = false;
+
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_channel(
+ engine_->CreateSendChannel(&call_, cricket::MediaConfig(), options,
+ webrtc::CryptoOptions(),
+ webrtc::AudioCodecPairId::Create()));
+
+ send_channel->SetSend(true);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_CALL(*adm_, BuiltInAECIsAvailable())
+ .Times(use_null_apm_ ? 4 : 8)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*adm_, BuiltInAGCIsAvailable())
+ .Times(use_null_apm_ ? 7 : 8)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*adm_, BuiltInNSIsAvailable())
+ .Times(use_null_apm_ ? 5 : 8)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*adm_, RecordingIsInitialized())
+ .Times(2)
+ .WillRepeatedly(Return(false));
+
+ EXPECT_CALL(*adm_, Recording()).Times(2).WillRepeatedly(Return(false));
+ EXPECT_CALL(*adm_, InitRecording()).Times(2).WillRepeatedly(Return(0));
+
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_channel1(
+ engine_->CreateSendChannel(
+ &call_, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create()));
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_channel2(
+ engine_->CreateSendChannel(
+ &call_, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create()));
+
+ // Have to add a stream to make SetSend work.
+ cricket::StreamParams stream1;
+ stream1.ssrcs.push_back(1);
+ send_channel1->AddSendStream(stream1);
+ cricket::StreamParams stream2;
+ stream2.ssrcs.push_back(2);
+ send_channel2->AddSendStream(stream2);
+
+ // AEC and AGC and NS
+ cricket::AudioSenderParameter parameters_options_all = send_parameters_;
+ parameters_options_all.options.echo_cancellation = true;
+ parameters_options_all.options.auto_gain_control = true;
+ parameters_options_all.options.noise_suppression = true;
+ EXPECT_TRUE(send_channel1->SetSenderParameters(parameters_options_all));
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ EXPECT_EQ(parameters_options_all.options,
+ SendImplFromPointer(send_channel1.get())->options());
+ EXPECT_TRUE(send_channel2->SetSenderParameters(parameters_options_all));
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_EQ(parameters_options_all.options,
+ SendImplFromPointer(send_channel2.get())->options());
+ }
+
+ // unset NS
+ cricket::AudioSenderParameter parameters_options_no_ns = send_parameters_;
+ parameters_options_no_ns.options.noise_suppression = false;
+ EXPECT_TRUE(send_channel1->SetSenderParameters(parameters_options_no_ns));
+ cricket::AudioOptions expected_options = parameters_options_all.options;
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ VerifyGainControlEnabledCorrectly();
+ expected_options.echo_cancellation = true;
+ expected_options.auto_gain_control = true;
+ expected_options.noise_suppression = false;
+ EXPECT_EQ(expected_options,
+ SendImplFromPointer(send_channel1.get())->options());
+ }
+
+ // unset AGC
+ cricket::AudioSenderParameter parameters_options_no_agc = send_parameters_;
+ parameters_options_no_agc.options.auto_gain_control = false;
+ EXPECT_TRUE(send_channel2->SetSenderParameters(parameters_options_no_agc));
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ expected_options.echo_cancellation = true;
+ expected_options.auto_gain_control = false;
+ expected_options.noise_suppression = true;
+ EXPECT_EQ(expected_options,
+ SendImplFromPointer(send_channel2.get())->options());
+ }
+
+ EXPECT_TRUE(send_channel_->SetSenderParameters(parameters_options_all));
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
+
+ send_channel1->SetSend(true);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
+
+ send_channel2->SetSend(true);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
+
+ // Make sure settings take effect while we are sending.
+ cricket::AudioSenderParameter parameters_options_no_agc_nor_ns =
+ send_parameters_;
+ parameters_options_no_agc_nor_ns.options.auto_gain_control = false;
+ parameters_options_no_agc_nor_ns.options.noise_suppression = false;
+ EXPECT_TRUE(
+ send_channel2->SetSenderParameters(parameters_options_no_agc_nor_ns));
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ expected_options.echo_cancellation = true;
+ expected_options.auto_gain_control = false;
+ expected_options.noise_suppression = false;
+ EXPECT_EQ(expected_options,
+ SendImplFromPointer(send_channel2.get())->options());
+ }
+}
+
+// This test verifies DSCP settings are properly applied on voice media channel.
+TEST_P(WebRtcVoiceEngineTestFake, TestSetDscpOptions) {
+ EXPECT_TRUE(SetupSendStream());
+ cricket::FakeNetworkInterface network_interface;
+ cricket::MediaConfig config;
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> channel;
+ webrtc::RtpParameters parameters;
+
+ channel = engine_->CreateSendChannel(&call_, config, cricket::AudioOptions(),
+ webrtc::CryptoOptions(),
+ webrtc::AudioCodecPairId::Create());
+ channel->SetInterface(&network_interface);
+ // Default value when DSCP is disabled should be DSCP_DEFAULT.
+ EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface.dscp());
+ channel->SetInterface(nullptr);
+
+ config.enable_dscp = true;
+ channel = engine_->CreateSendChannel(&call_, config, cricket::AudioOptions(),
+ webrtc::CryptoOptions(),
+ webrtc::AudioCodecPairId::Create());
+ channel->SetInterface(&network_interface);
+ EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface.dscp());
+
+ // Create a send stream to configure
+ EXPECT_TRUE(
+ channel->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrcZ)));
+ parameters = channel->GetRtpSendParameters(kSsrcZ);
+ ASSERT_FALSE(parameters.encodings.empty());
+
+ // Various priorities map to various dscp values.
+ parameters.encodings[0].network_priority = webrtc::Priority::kHigh;
+ ASSERT_TRUE(channel->SetRtpSendParameters(kSsrcZ, parameters, nullptr).ok());
+ EXPECT_EQ(rtc::DSCP_EF, network_interface.dscp());
+ parameters.encodings[0].network_priority = webrtc::Priority::kVeryLow;
+ ASSERT_TRUE(channel->SetRtpSendParameters(kSsrcZ, parameters, nullptr).ok());
+ EXPECT_EQ(rtc::DSCP_CS1, network_interface.dscp());
+
+ // Packets should also self-identify their dscp in PacketOptions.
+ const uint8_t kData[10] = {0};
+ EXPECT_TRUE(SendImplFromPointer(channel.get())->transport()->SendRtcp(kData));
+ EXPECT_EQ(rtc::DSCP_CS1, network_interface.options().dscp);
+ channel->SetInterface(nullptr);
+
+ // Verify that setting the option to false resets the
+ // DiffServCodePoint.
+ config.enable_dscp = false;
+ channel = engine_->CreateSendChannel(&call_, config, cricket::AudioOptions(),
+ webrtc::CryptoOptions(),
+ webrtc::AudioCodecPairId::Create());
+ channel->SetInterface(&network_interface);
+ // Default value when DSCP is disabled should be DSCP_DEFAULT.
+ EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface.dscp());
+
+ channel->SetInterface(nullptr);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetOutputVolume) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_FALSE(receive_channel_->SetOutputVolume(kSsrcY, 0.5));
+ cricket::StreamParams stream;
+ stream.ssrcs.push_back(kSsrcY);
+ EXPECT_TRUE(receive_channel_->AddRecvStream(stream));
+ EXPECT_DOUBLE_EQ(1, GetRecvStream(kSsrcY).gain());
+ EXPECT_TRUE(receive_channel_->SetOutputVolume(kSsrcY, 3));
+ EXPECT_DOUBLE_EQ(3, GetRecvStream(kSsrcY).gain());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetOutputVolumeUnsignaledRecvStream) {
+ EXPECT_TRUE(SetupChannel());
+
+ // Spawn an unsignaled stream by sending a packet - gain should be 1.
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_DOUBLE_EQ(1, GetRecvStream(kSsrc1).gain());
+
+ // Should remember the volume "2" which will be set on new unsignaled streams,
+ // and also set the gain to 2 on existing unsignaled streams.
+ EXPECT_TRUE(receive_channel_->SetDefaultOutputVolume(2));
+ EXPECT_DOUBLE_EQ(2, GetRecvStream(kSsrc1).gain());
+
+ // Spawn an unsignaled stream by sending a packet - gain should be 2.
+ unsigned char pcmuFrame2[sizeof(kPcmuFrame)];
+ memcpy(pcmuFrame2, kPcmuFrame, sizeof(kPcmuFrame));
+ rtc::SetBE32(&pcmuFrame2[8], kSsrcX);
+ DeliverPacket(pcmuFrame2, sizeof(pcmuFrame2));
+ EXPECT_DOUBLE_EQ(2, GetRecvStream(kSsrcX).gain());
+
+ // Setting gain for all unsignaled streams.
+ EXPECT_TRUE(receive_channel_->SetDefaultOutputVolume(3));
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_DOUBLE_EQ(3, GetRecvStream(kSsrc1).gain());
+ }
+ EXPECT_DOUBLE_EQ(3, GetRecvStream(kSsrcX).gain());
+
+ // Setting gain on an individual stream affects only that.
+ EXPECT_TRUE(receive_channel_->SetOutputVolume(kSsrcX, 4));
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_DOUBLE_EQ(3, GetRecvStream(kSsrc1).gain());
+ }
+ EXPECT_DOUBLE_EQ(4, GetRecvStream(kSsrcX).gain());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, BaseMinimumPlayoutDelayMs) {
+ EXPECT_TRUE(SetupChannel());
+ EXPECT_FALSE(receive_channel_->SetBaseMinimumPlayoutDelayMs(kSsrcY, 200));
+ EXPECT_FALSE(
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrcY).has_value());
+
+ cricket::StreamParams stream;
+ stream.ssrcs.push_back(kSsrcY);
+ EXPECT_TRUE(receive_channel_->AddRecvStream(stream));
+ EXPECT_EQ(0, GetRecvStream(kSsrcY).base_mininum_playout_delay_ms());
+ EXPECT_TRUE(receive_channel_->SetBaseMinimumPlayoutDelayMs(kSsrcY, 300));
+ EXPECT_EQ(300, GetRecvStream(kSsrcY).base_mininum_playout_delay_ms());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake,
+ BaseMinimumPlayoutDelayMsUnsignaledRecvStream) {
+ // Here base minimum delay is abbreviated to delay in comments for shortness.
+ EXPECT_TRUE(SetupChannel());
+
+ // Spawn an unsignaled stream by sending a packet - delay should be 0.
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_EQ(
+ 0, receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrc1).value_or(-1));
+ // Check that it doesn't provide default values for unknown ssrc.
+ EXPECT_FALSE(
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrcY).has_value());
+
+ // Check that default value for unsignaled streams is 0.
+ EXPECT_EQ(
+ 0, receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrc0).value_or(-1));
+
+ // Should remember the delay 100 which will be set on new unsignaled streams,
+ // and also set the delay to 100 on existing unsignaled streams.
+ EXPECT_TRUE(receive_channel_->SetBaseMinimumPlayoutDelayMs(kSsrc0, 100));
+ EXPECT_EQ(
+ 100, receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrc0).value_or(-1));
+ // Check that it doesn't provide default values for unknown ssrc.
+ EXPECT_FALSE(
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrcY).has_value());
+
+ // Spawn an unsignaled stream by sending a packet - delay should be 100.
+ unsigned char pcmuFrame2[sizeof(kPcmuFrame)];
+ memcpy(pcmuFrame2, kPcmuFrame, sizeof(kPcmuFrame));
+ rtc::SetBE32(&pcmuFrame2[8], kSsrcX);
+ DeliverPacket(pcmuFrame2, sizeof(pcmuFrame2));
+ EXPECT_EQ(
+ 100, receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrcX).value_or(-1));
+
+ // Setting delay with SSRC=0 should affect all unsignaled streams.
+ EXPECT_TRUE(receive_channel_->SetBaseMinimumPlayoutDelayMs(kSsrc0, 300));
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_EQ(
+ 300,
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrc1).value_or(-1));
+ }
+ EXPECT_EQ(
+ 300, receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrcX).value_or(-1));
+
+ // Setting delay on an individual stream affects only that.
+ EXPECT_TRUE(receive_channel_->SetBaseMinimumPlayoutDelayMs(kSsrcX, 400));
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_EQ(
+ 300,
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrc1).value_or(-1));
+ }
+ EXPECT_EQ(
+ 400, receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrcX).value_or(-1));
+ EXPECT_EQ(
+ 300, receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrc0).value_or(-1));
+ // Check that it doesn't provide default values for unknown ssrc.
+ EXPECT_FALSE(
+ receive_channel_->GetBaseMinimumPlayoutDelayMs(kSsrcY).has_value());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetsSyncGroupFromStreamId) {
+ const uint32_t kAudioSsrc = 123;
+ const std::string kStreamId = "AvSyncLabel";
+
+ EXPECT_TRUE(SetupSendStream());
+ cricket::StreamParams sp = cricket::StreamParams::CreateLegacy(kAudioSsrc);
+ sp.set_stream_ids({kStreamId});
+ // Creating two channels to make sure that sync label is set properly for both
+ // the default voice channel and following ones.
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+ sp.ssrcs[0] += 1;
+ EXPECT_TRUE(receive_channel_->AddRecvStream(sp));
+
+ ASSERT_EQ(2u, call_.GetAudioReceiveStreams().size());
+ EXPECT_EQ(kStreamId,
+ call_.GetAudioReceiveStream(kAudioSsrc)->GetConfig().sync_group)
+ << "SyncGroup should be set based on stream id";
+ EXPECT_EQ(kStreamId,
+ call_.GetAudioReceiveStream(kAudioSsrc + 1)->GetConfig().sync_group)
+ << "SyncGroup should be set based on stream id";
+}
+
+// TODO(solenberg): Remove, once recv streams are configured through Call.
+// (This is then covered by TestSetRecvRtpHeaderExtensions.)
+TEST_P(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
+ // Test that setting the header extensions results in the expected state
+ // changes on an associated Call.
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(223);
+ ssrcs.push_back(224);
+
+ EXPECT_TRUE(SetupSendStream());
+ SetSenderParameters(send_parameters_);
+ for (uint32_t ssrc : ssrcs) {
+ EXPECT_TRUE(receive_channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(ssrc)));
+ }
+
+ EXPECT_EQ(2u, call_.GetAudioReceiveStreams().size());
+ for (uint32_t ssrc : ssrcs) {
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(ssrc).header_extensions,
+ IsEmpty());
+ }
+
+ // Set up receive extensions.
+ const std::vector<webrtc::RtpExtension> header_extensions =
+ GetDefaultEnabledRtpHeaderExtensions(*engine_);
+ cricket::AudioReceiverParameters recv_parameters;
+ recv_parameters.extensions = header_extensions;
+ receive_channel_->SetReceiverParameters(recv_parameters);
+ EXPECT_EQ(2u, call_.GetAudioReceiveStreams().size());
+ for (uint32_t ssrc : ssrcs) {
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(ssrc).header_extensions,
+ testing::UnorderedElementsAreArray(header_extensions));
+ }
+
+ // Disable receive extensions.
+ receive_channel_->SetReceiverParameters(cricket::AudioReceiverParameters());
+ for (uint32_t ssrc : ssrcs) {
+ EXPECT_THAT(
+ receive_channel_->GetRtpReceiverParameters(ssrc).header_extensions,
+ IsEmpty());
+ }
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, DeliverAudioPacket_Call) {
+ // Test that packets are forwarded to the Call when configured accordingly.
+ const uint32_t kAudioSsrc = 1;
+ rtc::CopyOnWriteBuffer kPcmuPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ static const unsigned char kRtcp[] = {
+ 0x80, 0xc9, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ rtc::CopyOnWriteBuffer kRtcpPacket(kRtcp, sizeof(kRtcp));
+
+ EXPECT_TRUE(SetupSendStream());
+ cricket::VoiceMediaReceiveChannelInterface* media_channel = ReceiveImpl();
+ SetSenderParameters(send_parameters_);
+ EXPECT_TRUE(media_channel->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kAudioSsrc)));
+
+ EXPECT_EQ(1u, call_.GetAudioReceiveStreams().size());
+ const cricket::FakeAudioReceiveStream* s =
+ call_.GetAudioReceiveStream(kAudioSsrc);
+ EXPECT_EQ(0, s->received_packets());
+ webrtc::RtpPacketReceived parsed_packet;
+ RTC_CHECK(parsed_packet.Parse(kPcmuPacket));
+ receive_channel_->OnPacketReceived(parsed_packet);
+ rtc::Thread::Current()->ProcessMessages(0);
+
+ EXPECT_EQ(1, s->received_packets());
+}
+
+// All receive channels should be associated with the first send channel,
+// since they do not send RTCP SR.
+TEST_P(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_SendCreatedFirst) {
+ EXPECT_TRUE(SetupSendStream());
+ EXPECT_TRUE(AddRecvStream(kSsrcY));
+ EXPECT_EQ(kSsrcX, GetRecvStreamConfig(kSsrcY).rtp.local_ssrc);
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcZ)));
+ EXPECT_EQ(kSsrcX, GetRecvStreamConfig(kSsrcY).rtp.local_ssrc);
+ EXPECT_TRUE(AddRecvStream(kSsrcW));
+ EXPECT_EQ(kSsrcX, GetRecvStreamConfig(kSsrcW).rtp.local_ssrc);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_RecvCreatedFirst) {
+ EXPECT_TRUE(SetupRecvStream());
+ EXPECT_EQ(0xFA17FA17u, GetRecvStreamConfig(kSsrcX).rtp.local_ssrc);
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcY)));
+ EXPECT_EQ(kSsrcY, GetRecvStreamConfig(kSsrcX).rtp.local_ssrc);
+ EXPECT_TRUE(AddRecvStream(kSsrcZ));
+ EXPECT_EQ(kSsrcY, GetRecvStreamConfig(kSsrcZ).rtp.local_ssrc);
+ EXPECT_TRUE(send_channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcW)));
+
+ EXPECT_EQ(kSsrcY, GetRecvStreamConfig(kSsrcX).rtp.local_ssrc);
+ EXPECT_EQ(kSsrcY, GetRecvStreamConfig(kSsrcZ).rtp.local_ssrc);
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetRawAudioSink) {
+ EXPECT_TRUE(SetupChannel());
+ std::unique_ptr<FakeAudioSink> fake_sink_1(new FakeAudioSink());
+ std::unique_ptr<FakeAudioSink> fake_sink_2(new FakeAudioSink());
+
+ // Setting the sink before a recv stream exists should do nothing.
+ receive_channel_->SetRawAudioSink(kSsrcX, std::move(fake_sink_1));
+ EXPECT_TRUE(AddRecvStream(kSsrcX));
+ EXPECT_EQ(nullptr, GetRecvStream(kSsrcX).sink());
+
+ // Now try actually setting the sink.
+ receive_channel_->SetRawAudioSink(kSsrcX, std::move(fake_sink_2));
+ EXPECT_NE(nullptr, GetRecvStream(kSsrcX).sink());
+
+ // Now try resetting it.
+ receive_channel_->SetRawAudioSink(kSsrcX, nullptr);
+ EXPECT_EQ(nullptr, GetRecvStream(kSsrcX).sink());
+}
+
+TEST_P(WebRtcVoiceEngineTestFake, SetRawAudioSinkUnsignaledRecvStream) {
+ EXPECT_TRUE(SetupChannel());
+ std::unique_ptr<FakeAudioSink> fake_sink_1(new FakeAudioSink());
+ std::unique_ptr<FakeAudioSink> fake_sink_2(new FakeAudioSink());
+ std::unique_ptr<FakeAudioSink> fake_sink_3(new FakeAudioSink());
+ std::unique_ptr<FakeAudioSink> fake_sink_4(new FakeAudioSink());
+
+ // Should be able to set a default sink even when no stream exists.
+ receive_channel_->SetDefaultRawAudioSink(std::move(fake_sink_1));
+
+ // Spawn an unsignaled stream by sending a packet - it should be assigned the
+ // default sink.
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_NE(nullptr, GetRecvStream(kSsrc1).sink());
+
+ // Try resetting the default sink.
+ receive_channel_->SetDefaultRawAudioSink(nullptr);
+ EXPECT_EQ(nullptr, GetRecvStream(kSsrc1).sink());
+
+ // Try setting the default sink while the default stream exists.
+ receive_channel_->SetDefaultRawAudioSink(std::move(fake_sink_2));
+ EXPECT_NE(nullptr, GetRecvStream(kSsrc1).sink());
+
+ // If we remove and add a default stream, it should get the same sink.
+ EXPECT_TRUE(receive_channel_->RemoveRecvStream(kSsrc1));
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_NE(nullptr, GetRecvStream(kSsrc1).sink());
+
+ // Spawn another unsignaled stream - it should be assigned the default sink
+ // and the previous unsignaled stream should lose it.
+ unsigned char pcmuFrame2[sizeof(kPcmuFrame)];
+ memcpy(pcmuFrame2, kPcmuFrame, sizeof(kPcmuFrame));
+ rtc::SetBE32(&pcmuFrame2[8], kSsrcX);
+ DeliverPacket(pcmuFrame2, sizeof(pcmuFrame2));
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_EQ(nullptr, GetRecvStream(kSsrc1).sink());
+ }
+ EXPECT_NE(nullptr, GetRecvStream(kSsrcX).sink());
+
+ // Reset the default sink - the second unsignaled stream should lose it.
+ receive_channel_->SetDefaultRawAudioSink(nullptr);
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_EQ(nullptr, GetRecvStream(kSsrc1).sink());
+ }
+ EXPECT_EQ(nullptr, GetRecvStream(kSsrcX).sink());
+
+ // Try setting the default sink while two streams exists.
+ receive_channel_->SetDefaultRawAudioSink(std::move(fake_sink_3));
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_EQ(nullptr, GetRecvStream(kSsrc1).sink());
+ }
+ EXPECT_NE(nullptr, GetRecvStream(kSsrcX).sink());
+
+ // Try setting the sink for the first unsignaled stream using its known SSRC.
+ receive_channel_->SetRawAudioSink(kSsrc1, std::move(fake_sink_4));
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_NE(nullptr, GetRecvStream(kSsrc1).sink());
+ }
+ EXPECT_NE(nullptr, GetRecvStream(kSsrcX).sink());
+ if (kMaxUnsignaledRecvStreams > 1) {
+ EXPECT_NE(GetRecvStream(kSsrc1).sink(), GetRecvStream(kSsrcX).sink());
+ }
+}
+
+// Test that, just like the video channel, the voice channel communicates the
+// network state to the call.
+TEST_P(WebRtcVoiceEngineTestFake, OnReadyToSendSignalsNetworkState) {
+ EXPECT_TRUE(SetupChannel());
+
+ EXPECT_EQ(webrtc::kNetworkUp,
+ call_.GetNetworkState(webrtc::MediaType::AUDIO));
+ EXPECT_EQ(webrtc::kNetworkUp,
+ call_.GetNetworkState(webrtc::MediaType::VIDEO));
+
+ send_channel_->OnReadyToSend(false);
+ EXPECT_EQ(webrtc::kNetworkDown,
+ call_.GetNetworkState(webrtc::MediaType::AUDIO));
+ EXPECT_EQ(webrtc::kNetworkUp,
+ call_.GetNetworkState(webrtc::MediaType::VIDEO));
+
+ send_channel_->OnReadyToSend(true);
+ EXPECT_EQ(webrtc::kNetworkUp,
+ call_.GetNetworkState(webrtc::MediaType::AUDIO));
+ EXPECT_EQ(webrtc::kNetworkUp,
+ call_.GetNetworkState(webrtc::MediaType::VIDEO));
+}
+
+// Test that playout is still started after changing parameters
+TEST_P(WebRtcVoiceEngineTestFake, PreservePlayoutWhenRecreateRecvStream) {
+ SetupRecvStream();
+ receive_channel_->SetPlayout(true);
+ EXPECT_TRUE(GetRecvStream(kSsrcX).started());
+
+ // Changing RTP header extensions will recreate the
+ // AudioReceiveStreamInterface.
+ cricket::AudioReceiverParameters parameters;
+ parameters.extensions.push_back(
+ webrtc::RtpExtension(webrtc::RtpExtension::kAudioLevelUri, 12));
+ receive_channel_->SetReceiverParameters(parameters);
+
+ EXPECT_TRUE(GetRecvStream(kSsrcX).started());
+}
+
+// Tests when GetSources is called with non-existing ssrc, it will return an
+// empty list of RtpSource without crashing.
+TEST_P(WebRtcVoiceEngineTestFake, GetSourcesWithNonExistingSsrc) {
+ // Setup an recv stream with `kSsrcX`.
+ SetupRecvStream();
+ cricket::WebRtcVoiceReceiveChannel* media_channel = ReceiveImpl();
+ // Call GetSources with `kSsrcY` which doesn't exist.
+ std::vector<webrtc::RtpSource> sources = media_channel->GetSources(kSsrcY);
+ EXPECT_EQ(0u, sources.size());
+}
+
+// Tests that the library initializes and shuts down properly.
+TEST(WebRtcVoiceEngineTest, StartupShutdown) {
+ rtc::AutoThread main_thread;
+ for (bool use_null_apm : {false, true}) {
+ // If the VoiceEngine wants to gather available codecs early, that's fine
+ // but we never want it to create a decoder at this stage.
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ webrtc::FieldTrialBasedConfig field_trials;
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm.get(),
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm,
+ nullptr, nullptr, field_trials);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ CallConfig call_config(&event_log);
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ std::unique_ptr<Call> call = Call::Create(call_config);
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_channel =
+ engine.CreateSendChannel(
+ call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create());
+ EXPECT_TRUE(send_channel);
+ std::unique_ptr<cricket::VoiceMediaReceiveChannelInterface>
+ receive_channel = engine.CreateReceiveChannel(
+ call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create());
+ EXPECT_TRUE(receive_channel);
+ }
+}
+
+// Tests that reference counting on the external ADM is correct.
+TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) {
+ rtc::AutoThread main_thread;
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ auto adm = rtc::make_ref_counted<
+ ::testing::NiceMock<webrtc::test::MockAudioDeviceModule>>();
+ {
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ webrtc::FieldTrialBasedConfig field_trials;
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm.get(),
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm,
+ nullptr, nullptr, field_trials);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ CallConfig call_config(&event_log);
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ std::unique_ptr<Call> call = Call::Create(call_config);
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> send_channel =
+ engine.CreateSendChannel(
+ call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create());
+ EXPECT_TRUE(send_channel);
+ std::unique_ptr<cricket::VoiceMediaReceiveChannelInterface>
+ receive_channel = engine.CreateReceiveChannel(
+ call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create());
+ EXPECT_TRUE(receive_channel);
+ }
+ // The engine/channel should have dropped their references.
+ EXPECT_EQ(adm.release()->Release(),
+ rtc::RefCountReleaseStatus::kDroppedLastRef);
+ }
+}
+
+// Verify the payload id of common audio codecs, including CN and G722.
+TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) {
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ // TODO(ossu): Why are the payload types of codecs with non-static payload
+ // type assignments checked here? It shouldn't really matter.
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ webrtc::FieldTrialBasedConfig field_trials;
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm.get(),
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm,
+ nullptr, nullptr, field_trials);
+ engine.Init();
+ for (const cricket::AudioCodec& codec : engine.send_codecs()) {
+ auto is_codec = [&codec](const char* name, int clockrate = 0) {
+ return absl::EqualsIgnoreCase(codec.name, name) &&
+ (clockrate == 0 || codec.clockrate == clockrate);
+ };
+ if (is_codec("CN", 16000)) {
+ EXPECT_EQ(105, codec.id);
+ } else if (is_codec("CN", 32000)) {
+ EXPECT_EQ(106, codec.id);
+ } else if (is_codec("G722", 8000)) {
+ EXPECT_EQ(9, codec.id);
+ } else if (is_codec("telephone-event", 8000)) {
+ EXPECT_EQ(126, codec.id);
+ // TODO(solenberg): 16k, 32k, 48k DTMF should be dynamically assigned.
+ // Remove these checks once both send and receive side assigns payload
+ // types dynamically.
+ } else if (is_codec("telephone-event", 16000)) {
+ EXPECT_EQ(113, codec.id);
+ } else if (is_codec("telephone-event", 32000)) {
+ EXPECT_EQ(112, codec.id);
+ } else if (is_codec("telephone-event", 48000)) {
+ EXPECT_EQ(110, codec.id);
+ } else if (is_codec("opus")) {
+ EXPECT_EQ(111, codec.id);
+ ASSERT_TRUE(codec.params.find("minptime") != codec.params.end());
+ EXPECT_EQ("10", codec.params.find("minptime")->second);
+ ASSERT_TRUE(codec.params.find("useinbandfec") != codec.params.end());
+ EXPECT_EQ("1", codec.params.find("useinbandfec")->second);
+ }
+ }
+ }
+}
+
+// Tests that VoE supports at least 32 channels
+TEST(WebRtcVoiceEngineTest, Has32Channels) {
+ rtc::AutoThread main_thread;
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ webrtc::FieldTrialBasedConfig field_trials;
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm.get(),
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm,
+ nullptr, nullptr, field_trials);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ CallConfig call_config(&event_log);
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ std::unique_ptr<Call> call = Call::Create(call_config);
+
+ std::vector<std::unique_ptr<cricket::VoiceMediaSendChannelInterface>>
+ channels;
+ while (channels.size() < 32) {
+ std::unique_ptr<cricket::VoiceMediaSendChannelInterface> channel =
+ engine.CreateSendChannel(
+ call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), webrtc::AudioCodecPairId::Create());
+ if (!channel)
+ break;
+ channels.emplace_back(std::move(channel));
+ }
+
+ EXPECT_EQ(channels.size(), 32u);
+ }
+}
+
+// Test that we set our preferred codecs properly.
+TEST(WebRtcVoiceEngineTest, SetRecvCodecs) {
+ rtc::AutoThread main_thread;
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ // TODO(ossu): I'm not sure of the intent of this test. It's either:
+ // - Check that our builtin codecs are usable by Channel.
+ // - The codecs provided by the engine is usable by Channel.
+ // It does not check that the codecs in the RecvParameters are actually
+ // what we sent in - though it's probably reasonable to expect so, if
+ // SetReceiverParameters returns true.
+ // I think it will become clear once audio decoder injection is completed.
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ webrtc::FieldTrialBasedConfig field_trials;
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm.get(),
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::CreateBuiltinAudioDecoderFactory(), nullptr, apm, nullptr,
+ nullptr, field_trials);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ CallConfig call_config(&event_log);
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ std::unique_ptr<Call> call = Call::Create(call_config);
+ cricket::WebRtcVoiceReceiveChannel channel(
+ &engine, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), call.get(),
+ webrtc::AudioCodecPairId::Create());
+ cricket::AudioReceiverParameters parameters;
+ parameters.codecs = engine.recv_codecs();
+ EXPECT_TRUE(channel.SetReceiverParameters(parameters));
+ }
+}
+
+TEST(WebRtcVoiceEngineTest, SetRtpSendParametersMaxBitrate) {
+ rtc::AutoThread main_thread;
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ webrtc::FieldTrialBasedConfig field_trials;
+ FakeAudioSource source;
+ cricket::WebRtcVoiceEngine engine(task_queue_factory.get(), adm.get(),
+ webrtc::CreateBuiltinAudioEncoderFactory(),
+ webrtc::CreateBuiltinAudioDecoderFactory(),
+ nullptr, nullptr, nullptr, nullptr,
+ field_trials);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ CallConfig call_config(&event_log);
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ {
+ webrtc::AudioState::Config config;
+ config.audio_mixer = webrtc::AudioMixerImpl::Create();
+ config.audio_device_module =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ call_config.audio_state = webrtc::AudioState::Create(config);
+ }
+ std::unique_ptr<Call> call = Call::Create(call_config);
+ cricket::WebRtcVoiceSendChannel channel(
+ &engine, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), call.get(), webrtc::AudioCodecPairId::Create());
+ {
+ cricket::AudioSenderParameter params;
+ params.codecs.push_back(cricket::CreateAudioCodec(1, "opus", 48000, 2));
+ params.extensions.push_back(webrtc::RtpExtension(
+ webrtc::RtpExtension::kTransportSequenceNumberUri, 1));
+ EXPECT_TRUE(channel.SetSenderParameters(params));
+ }
+ constexpr int kSsrc = 1234;
+ {
+ cricket::StreamParams params;
+ params.add_ssrc(kSsrc);
+ channel.AddSendStream(params);
+ }
+ channel.SetAudioSend(kSsrc, true, nullptr, &source);
+ channel.SetSend(true);
+ webrtc::RtpParameters params = channel.GetRtpSendParameters(kSsrc);
+ for (int max_bitrate : {-10, -1, 0, 10000}) {
+ params.encodings[0].max_bitrate_bps = max_bitrate;
+ channel.SetRtpSendParameters(
+ kSsrc, params, [](webrtc::RTCError error) { EXPECT_TRUE(error.ok()); });
+ }
+}
+
+TEST(WebRtcVoiceEngineTest, CollectRecvCodecs) {
+ for (bool use_null_apm : {false, true}) {
+ std::vector<webrtc::AudioCodecSpec> specs;
+ webrtc::AudioCodecSpec spec1{{"codec1", 48000, 2, {{"param1", "value1"}}},
+ {48000, 2, 16000, 10000, 20000}};
+ spec1.info.allow_comfort_noise = false;
+ spec1.info.supports_network_adaption = true;
+ specs.push_back(spec1);
+ webrtc::AudioCodecSpec spec2{{"codec2", 32000, 1}, {32000, 1, 32000}};
+ spec2.info.allow_comfort_noise = false;
+ specs.push_back(spec2);
+ specs.push_back(webrtc::AudioCodecSpec{
+ {"codec3", 16000, 1, {{"param1", "value1b"}, {"param2", "value2"}}},
+ {16000, 1, 13300}});
+ specs.push_back(
+ webrtc::AudioCodecSpec{{"codec4", 8000, 1}, {8000, 1, 64000}});
+ specs.push_back(
+ webrtc::AudioCodecSpec{{"codec5", 8000, 2}, {8000, 1, 64000}});
+
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::MockAudioEncoderFactory> unused_encoder_factory =
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory();
+ rtc::scoped_refptr<webrtc::MockAudioDecoderFactory> mock_decoder_factory =
+ rtc::make_ref_counted<webrtc::MockAudioDecoderFactory>();
+ EXPECT_CALL(*mock_decoder_factory.get(), GetSupportedDecoders())
+ .WillOnce(Return(specs));
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ webrtc::FieldTrialBasedConfig field_trials;
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm.get(), unused_encoder_factory,
+ mock_decoder_factory, nullptr, apm, nullptr, nullptr, field_trials);
+ engine.Init();
+ auto codecs = engine.recv_codecs();
+ EXPECT_EQ(11u, codecs.size());
+
+ // Rather than just ASSERTing that there are enough codecs, ensure that we
+ // can check the actual values safely, to provide better test results.
+ auto get_codec = [&codecs](size_t index) -> const cricket::AudioCodec& {
+ static const cricket::AudioCodec missing_codec =
+ cricket::CreateAudioCodec(0, "<missing>", 0, 0);
+ if (codecs.size() > index)
+ return codecs[index];
+ return missing_codec;
+ };
+
+ // Ensure the general codecs are generated first and in order.
+ for (size_t i = 0; i != specs.size(); ++i) {
+ EXPECT_EQ(specs[i].format.name, get_codec(i).name);
+ EXPECT_EQ(specs[i].format.clockrate_hz, get_codec(i).clockrate);
+ EXPECT_EQ(specs[i].format.num_channels, get_codec(i).channels);
+ EXPECT_EQ(specs[i].format.parameters, get_codec(i).params);
+ }
+
+ // Find the index of a codec, or -1 if not found, so that we can easily
+ // check supplementary codecs are ordered after the general codecs.
+ auto find_codec = [&codecs](const webrtc::SdpAudioFormat& format) -> int {
+ for (size_t i = 0; i != codecs.size(); ++i) {
+ const cricket::AudioCodec& codec = codecs[i];
+ if (absl::EqualsIgnoreCase(codec.name, format.name) &&
+ codec.clockrate == format.clockrate_hz &&
+ codec.channels == format.num_channels) {
+ return rtc::checked_cast<int>(i);
+ }
+ }
+ return -1;
+ };
+
+ // Ensure all supplementary codecs are generated last. Their internal
+ // ordering is not important. Without this cast, the comparison turned
+ // unsigned and, thus, failed for -1.
+ const int num_specs = static_cast<int>(specs.size());
+ EXPECT_GE(find_codec({"cn", 8000, 1}), num_specs);
+ EXPECT_GE(find_codec({"cn", 16000, 1}), num_specs);
+ EXPECT_EQ(find_codec({"cn", 32000, 1}), -1);
+ EXPECT_GE(find_codec({"telephone-event", 8000, 1}), num_specs);
+ EXPECT_GE(find_codec({"telephone-event", 16000, 1}), num_specs);
+ EXPECT_GE(find_codec({"telephone-event", 32000, 1}), num_specs);
+ EXPECT_GE(find_codec({"telephone-event", 48000, 1}), num_specs);
+ }
+}
diff --git a/third_party/libwebrtc/media/media_channel_gn/moz.build b/third_party/libwebrtc/media/media_channel_gn/moz.build
new file mode 100644
index 0000000000..1bedb41bf2
--- /dev/null
+++ b/third_party/libwebrtc/media/media_channel_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("media_channel_gn")
diff --git a/third_party/libwebrtc/media/media_channel_impl_gn/moz.build b/third_party/libwebrtc/media/media_channel_impl_gn/moz.build
new file mode 100644
index 0000000000..7d0a4bd650
--- /dev/null
+++ b/third_party/libwebrtc/media/media_channel_impl_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("media_channel_impl_gn")
diff --git a/third_party/libwebrtc/media/media_constants_gn/moz.build b/third_party/libwebrtc/media/media_constants_gn/moz.build
new file mode 100644
index 0000000000..af4cd6b257
--- /dev/null
+++ b/third_party/libwebrtc/media/media_constants_gn/moz.build
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/media/base/media_constants.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("media_constants_gn")
diff --git a/third_party/libwebrtc/media/rid_description_gn/moz.build b/third_party/libwebrtc/media/rid_description_gn/moz.build
new file mode 100644
index 0000000000..61afeec945
--- /dev/null
+++ b/third_party/libwebrtc/media/rid_description_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rid_description_gn")
diff --git a/third_party/libwebrtc/media/rtc_media_base_gn/moz.build b/third_party/libwebrtc/media/rtc_media_base_gn/moz.build
new file mode 100644
index 0000000000..cfff6f3411
--- /dev/null
+++ b/third_party/libwebrtc/media/rtc_media_base_gn/moz.build
@@ -0,0 +1,240 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/media/base/video_adapter.cc",
+ "/third_party/libwebrtc/media/base/video_broadcaster.cc",
+ "/third_party/libwebrtc/media/base/video_common.cc",
+ "/third_party/libwebrtc/media/base/video_source_base.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtc_media_base_gn")
diff --git a/third_party/libwebrtc/media/rtc_media_config_gn/moz.build b/third_party/libwebrtc/media/rtc_media_config_gn/moz.build
new file mode 100644
index 0000000000..17afebe8da
--- /dev/null
+++ b/third_party/libwebrtc/media/rtc_media_config_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtc_media_config_gn")
diff --git a/third_party/libwebrtc/media/rtc_simulcast_encoder_adapter_gn/moz.build b/third_party/libwebrtc/media/rtc_simulcast_encoder_adapter_gn/moz.build
new file mode 100644
index 0000000000..c09703ddd6
--- /dev/null
+++ b/third_party/libwebrtc/media/rtc_simulcast_encoder_adapter_gn/moz.build
@@ -0,0 +1,237 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/media/engine/simulcast_encoder_adapter.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "GLESv2",
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtc_simulcast_encoder_adapter_gn")
diff --git a/third_party/libwebrtc/media/rtp_utils_gn/moz.build b/third_party/libwebrtc/media/rtp_utils_gn/moz.build
new file mode 100644
index 0000000000..1aaa347151
--- /dev/null
+++ b/third_party/libwebrtc/media/rtp_utils_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("rtp_utils_gn")
diff --git a/third_party/libwebrtc/media/sctp/OWNERS b/third_party/libwebrtc/media/sctp/OWNERS
new file mode 100644
index 0000000000..da2f0178a8
--- /dev/null
+++ b/third_party/libwebrtc/media/sctp/OWNERS
@@ -0,0 +1,3 @@
+boivie@webrtc.org
+deadbeef@webrtc.org
+orphis@webrtc.org
diff --git a/third_party/libwebrtc/media/sctp/dcsctp_transport.cc b/third_party/libwebrtc/media/sctp/dcsctp_transport.cc
new file mode 100644
index 0000000000..525075468c
--- /dev/null
+++ b/third_party/libwebrtc/media/sctp/dcsctp_transport.cc
@@ -0,0 +1,668 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/sctp/dcsctp_transport.h"
+
+#include <atomic>
+#include <cstdint>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "media/base/media_channel.h"
+#include "net/dcsctp/public/dcsctp_socket_factory.h"
+#include "net/dcsctp/public/packet_observer.h"
+#include "net/dcsctp/public/text_pcap_packet_observer.h"
+#include "net/dcsctp/public/types.h"
+#include "p2p/base/packet_transport_internal.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/socket.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace {
+using ::dcsctp::SendPacketStatus;
+
+// When there is packet loss for a long time, the SCTP retry timers will use
+// exponential backoff, which can grow to very long durations and when the
+// connection recovers, it may take a long time to reach the new backoff
+// duration. By limiting it to a reasonable limit, the time to recover reduces.
+constexpr dcsctp::DurationMs kMaxTimerBackoffDuration =
+ dcsctp::DurationMs(3000);
+
+enum class WebrtcPPID : dcsctp::PPID::UnderlyingType {
+ // https://www.rfc-editor.org/rfc/rfc8832.html#section-8.1
+ kDCEP = 50,
+ // https://www.rfc-editor.org/rfc/rfc8831.html#section-8
+ kString = 51,
+ kBinaryPartial = 52, // Deprecated
+ kBinary = 53,
+ kStringPartial = 54, // Deprecated
+ kStringEmpty = 56,
+ kBinaryEmpty = 57,
+};
+
+WebrtcPPID ToPPID(DataMessageType message_type, size_t size) {
+ switch (message_type) {
+ case DataMessageType::kControl:
+ return WebrtcPPID::kDCEP;
+ case DataMessageType::kText:
+ return size > 0 ? WebrtcPPID::kString : WebrtcPPID::kStringEmpty;
+ case DataMessageType::kBinary:
+ return size > 0 ? WebrtcPPID::kBinary : WebrtcPPID::kBinaryEmpty;
+ }
+}
+
+absl::optional<DataMessageType> ToDataMessageType(dcsctp::PPID ppid) {
+ switch (static_cast<WebrtcPPID>(ppid.value())) {
+ case WebrtcPPID::kDCEP:
+ return DataMessageType::kControl;
+ case WebrtcPPID::kString:
+ case WebrtcPPID::kStringPartial:
+ case WebrtcPPID::kStringEmpty:
+ return DataMessageType::kText;
+ case WebrtcPPID::kBinary:
+ case WebrtcPPID::kBinaryPartial:
+ case WebrtcPPID::kBinaryEmpty:
+ return DataMessageType::kBinary;
+ }
+ return absl::nullopt;
+}
+
+absl::optional<cricket::SctpErrorCauseCode> ToErrorCauseCode(
+ dcsctp::ErrorKind error) {
+ switch (error) {
+ case dcsctp::ErrorKind::kParseFailed:
+ return cricket::SctpErrorCauseCode::kUnrecognizedParameters;
+ case dcsctp::ErrorKind::kPeerReported:
+ return cricket::SctpErrorCauseCode::kUserInitiatedAbort;
+ case dcsctp::ErrorKind::kWrongSequence:
+ case dcsctp::ErrorKind::kProtocolViolation:
+ return cricket::SctpErrorCauseCode::kProtocolViolation;
+ case dcsctp::ErrorKind::kResourceExhaustion:
+ return cricket::SctpErrorCauseCode::kOutOfResource;
+ case dcsctp::ErrorKind::kTooManyRetries:
+ case dcsctp::ErrorKind::kUnsupportedOperation:
+ case dcsctp::ErrorKind::kNoError:
+ case dcsctp::ErrorKind::kNotConnected:
+ // No SCTP error cause code matches those
+ break;
+ }
+ return absl::nullopt;
+}
+
+bool IsEmptyPPID(dcsctp::PPID ppid) {
+ WebrtcPPID webrtc_ppid = static_cast<WebrtcPPID>(ppid.value());
+ return webrtc_ppid == WebrtcPPID::kStringEmpty ||
+ webrtc_ppid == WebrtcPPID::kBinaryEmpty;
+}
+} // namespace
+
+DcSctpTransport::DcSctpTransport(rtc::Thread* network_thread,
+ rtc::PacketTransportInternal* transport,
+ Clock* clock)
+ : DcSctpTransport(network_thread,
+ transport,
+ clock,
+ std::make_unique<dcsctp::DcSctpSocketFactory>()) {}
+
+DcSctpTransport::DcSctpTransport(
+ rtc::Thread* network_thread,
+ rtc::PacketTransportInternal* transport,
+ Clock* clock,
+ std::unique_ptr<dcsctp::DcSctpSocketFactory> socket_factory)
+ : network_thread_(network_thread),
+ transport_(transport),
+ clock_(clock),
+ random_(clock_->TimeInMicroseconds()),
+ socket_factory_(std::move(socket_factory)),
+ task_queue_timeout_factory_(
+ *network_thread,
+ [this]() { return TimeMillis(); },
+ [this](dcsctp::TimeoutID timeout_id) {
+ socket_->HandleTimeout(timeout_id);
+ }) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ static std::atomic<int> instance_count = 0;
+ rtc::StringBuilder sb;
+ sb << debug_name_ << instance_count++;
+ debug_name_ = sb.Release();
+ ConnectTransportSignals();
+}
+
+DcSctpTransport::~DcSctpTransport() {
+ if (socket_) {
+ socket_->Close();
+ }
+}
+
+void DcSctpTransport::SetOnConnectedCallback(std::function<void()> callback) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ on_connected_callback_ = std::move(callback);
+}
+
+void DcSctpTransport::SetDataChannelSink(DataChannelSink* sink) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ data_channel_sink_ = sink;
+ if (data_channel_sink_ && ready_to_send_data_) {
+ data_channel_sink_->OnReadyToSend();
+ }
+}
+
+void DcSctpTransport::SetDtlsTransport(
+ rtc::PacketTransportInternal* transport) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ DisconnectTransportSignals();
+ transport_ = transport;
+ ConnectTransportSignals();
+ MaybeConnectSocket();
+}
+
+bool DcSctpTransport::Start(int local_sctp_port,
+ int remote_sctp_port,
+ int max_message_size) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DCHECK(max_message_size > 0);
+ RTC_DLOG(LS_INFO) << debug_name_ << "->Start(local=" << local_sctp_port
+ << ", remote=" << remote_sctp_port
+ << ", max_message_size=" << max_message_size << ")";
+
+ if (!socket_) {
+ dcsctp::DcSctpOptions options;
+ options.local_port = local_sctp_port;
+ options.remote_port = remote_sctp_port;
+ options.max_message_size = max_message_size;
+ options.max_timer_backoff_duration = kMaxTimerBackoffDuration;
+ // Don't close the connection automatically on too many retransmissions.
+ options.max_retransmissions = absl::nullopt;
+ options.max_init_retransmits = absl::nullopt;
+
+ std::unique_ptr<dcsctp::PacketObserver> packet_observer;
+ if (RTC_LOG_CHECK_LEVEL(LS_VERBOSE)) {
+ packet_observer =
+ std::make_unique<dcsctp::TextPcapPacketObserver>(debug_name_);
+ }
+
+ socket_ = socket_factory_->Create(debug_name_, *this,
+ std::move(packet_observer), options);
+ } else {
+ if (local_sctp_port != socket_->options().local_port ||
+ remote_sctp_port != socket_->options().remote_port) {
+ RTC_LOG(LS_ERROR)
+ << debug_name_ << "->Start(local=" << local_sctp_port
+ << ", remote=" << remote_sctp_port
+ << "): Can't change ports on already started transport.";
+ return false;
+ }
+ socket_->SetMaxMessageSize(max_message_size);
+ }
+
+ MaybeConnectSocket();
+
+ return true;
+}
+
+bool DcSctpTransport::OpenStream(int sid) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DLOG(LS_INFO) << debug_name_ << "->OpenStream(" << sid << ").";
+
+ StreamState stream_state;
+ stream_states_.insert_or_assign(dcsctp::StreamID(static_cast<uint16_t>(sid)),
+ stream_state);
+ return true;
+}
+
+bool DcSctpTransport::ResetStream(int sid) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DLOG(LS_INFO) << debug_name_ << "->ResetStream(" << sid << ").";
+ if (!socket_) {
+ RTC_LOG(LS_ERROR) << debug_name_ << "->ResetStream(sid=" << sid
+ << "): Transport is not started.";
+ return false;
+ }
+
+ dcsctp::StreamID streams[1] = {dcsctp::StreamID(static_cast<uint16_t>(sid))};
+
+ auto it = stream_states_.find(streams[0]);
+ if (it == stream_states_.end()) {
+ RTC_LOG(LS_ERROR) << debug_name_ << "->ResetStream(sid=" << sid
+ << "): Stream is not open.";
+ return false;
+ }
+
+ StreamState& stream_state = it->second;
+ if (stream_state.closure_initiated || stream_state.incoming_reset_done ||
+ stream_state.outgoing_reset_done) {
+ // The closing procedure was already initiated by the remote, don't do
+ // anything.
+ return false;
+ }
+ stream_state.closure_initiated = true;
+ socket_->ResetStreams(streams);
+ return true;
+}
+
+RTCError DcSctpTransport::SendData(int sid,
+ const SendDataParams& params,
+ const rtc::CopyOnWriteBuffer& payload) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DLOG(LS_VERBOSE) << debug_name_ << "->SendData(sid=" << sid
+ << ", type=" << static_cast<int>(params.type)
+ << ", length=" << payload.size() << ").";
+
+ if (!socket_) {
+ RTC_LOG(LS_ERROR) << debug_name_
+ << "->SendData(...): Transport is not started.";
+ return RTCError(RTCErrorType::INVALID_STATE);
+ }
+
+ // It is possible for a message to be sent from the signaling thread at the
+ // same time a data-channel is closing, but before the signaling thread is
+ // aware of it. So we need to keep track of currently active data channels and
+ // skip sending messages for the ones that are not open or closing.
+ // The sending errors are not impacting the data channel API contract as
+ // it is allowed to discard queued messages when the channel is closing.
+ auto stream_state =
+ stream_states_.find(dcsctp::StreamID(static_cast<uint16_t>(sid)));
+ if (stream_state == stream_states_.end()) {
+ RTC_LOG(LS_VERBOSE) << "Skipping message on non-open stream with sid: "
+ << sid;
+ return RTCError(RTCErrorType::INVALID_STATE);
+ }
+
+ if (stream_state->second.closure_initiated ||
+ stream_state->second.incoming_reset_done ||
+ stream_state->second.outgoing_reset_done) {
+ RTC_LOG(LS_VERBOSE) << "Skipping message on closing stream with sid: "
+ << sid;
+ return RTCError(RTCErrorType::INVALID_STATE);
+ }
+
+ auto max_message_size = socket_->options().max_message_size;
+ if (max_message_size > 0 && payload.size() > max_message_size) {
+ RTC_LOG(LS_WARNING) << debug_name_
+ << "->SendData(...): "
+ "Trying to send packet bigger "
+ "than the max message size: "
+ << payload.size() << " vs max of " << max_message_size;
+ return RTCError(RTCErrorType::INVALID_RANGE);
+ }
+
+ std::vector<uint8_t> message_payload(payload.cdata(),
+ payload.cdata() + payload.size());
+ if (message_payload.empty()) {
+ // https://www.rfc-editor.org/rfc/rfc8831.html#section-6.6
+ // SCTP does not support the sending of empty user messages. Therefore, if
+ // an empty message has to be sent, the appropriate PPID (WebRTC String
+ // Empty or WebRTC Binary Empty) is used, and the SCTP user message of one
+ // zero byte is sent.
+ message_payload.push_back('\0');
+ }
+
+ dcsctp::DcSctpMessage message(
+ dcsctp::StreamID(static_cast<uint16_t>(sid)),
+ dcsctp::PPID(static_cast<uint16_t>(ToPPID(params.type, payload.size()))),
+ std::move(message_payload));
+
+ dcsctp::SendOptions send_options;
+ send_options.unordered = dcsctp::IsUnordered(!params.ordered);
+ if (params.max_rtx_ms.has_value()) {
+ RTC_DCHECK(*params.max_rtx_ms >= 0 &&
+ *params.max_rtx_ms <= std::numeric_limits<uint16_t>::max());
+ send_options.lifetime = dcsctp::DurationMs(*params.max_rtx_ms);
+ }
+ if (params.max_rtx_count.has_value()) {
+ RTC_DCHECK(*params.max_rtx_count >= 0 &&
+ *params.max_rtx_count <= std::numeric_limits<uint16_t>::max());
+ send_options.max_retransmissions = *params.max_rtx_count;
+ }
+
+ dcsctp::SendStatus error = socket_->Send(std::move(message), send_options);
+ switch (error) {
+ case dcsctp::SendStatus::kSuccess:
+ return RTCError::OK();
+ case dcsctp::SendStatus::kErrorResourceExhaustion:
+ ready_to_send_data_ = false;
+ return RTCError(RTCErrorType::RESOURCE_EXHAUSTED);
+ default:
+ absl::string_view message = dcsctp::ToString(error);
+ RTC_LOG(LS_ERROR) << debug_name_
+ << "->SendData(...): send() failed with error "
+ << message << ".";
+ return RTCError(RTCErrorType::NETWORK_ERROR, message);
+ }
+}
+
+bool DcSctpTransport::ReadyToSendData() {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ return ready_to_send_data_;
+}
+
+int DcSctpTransport::max_message_size() const {
+ if (!socket_) {
+ RTC_LOG(LS_ERROR) << debug_name_
+ << "->max_message_size(...): Transport is not started.";
+ return 0;
+ }
+ return socket_->options().max_message_size;
+}
+
+absl::optional<int> DcSctpTransport::max_outbound_streams() const {
+ if (!socket_)
+ return absl::nullopt;
+ return socket_->options().announced_maximum_outgoing_streams;
+}
+
+absl::optional<int> DcSctpTransport::max_inbound_streams() const {
+ if (!socket_)
+ return absl::nullopt;
+ return socket_->options().announced_maximum_incoming_streams;
+}
+
+void DcSctpTransport::set_debug_name_for_testing(const char* debug_name) {
+ debug_name_ = debug_name;
+}
+
+SendPacketStatus DcSctpTransport::SendPacketWithStatus(
+ rtc::ArrayView<const uint8_t> data) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DCHECK(socket_);
+
+ if (data.size() > (socket_->options().mtu)) {
+ RTC_LOG(LS_ERROR) << debug_name_
+ << "->SendPacket(...): "
+ "SCTP seems to have made a packet that is bigger "
+ "than its official MTU: "
+ << data.size() << " vs max of " << socket_->options().mtu;
+ return SendPacketStatus::kError;
+ }
+ TRACE_EVENT0("webrtc", "DcSctpTransport::SendPacket");
+
+ if (!transport_ || !transport_->writable())
+ return SendPacketStatus::kError;
+
+ RTC_DLOG(LS_VERBOSE) << debug_name_ << "->SendPacket(length=" << data.size()
+ << ")";
+
+ auto result =
+ transport_->SendPacket(reinterpret_cast<const char*>(data.data()),
+ data.size(), rtc::PacketOptions(), 0);
+
+ if (result < 0) {
+ RTC_LOG(LS_WARNING) << debug_name_ << "->SendPacket(length=" << data.size()
+ << ") failed with error: " << transport_->GetError()
+ << ".";
+
+ if (rtc::IsBlockingError(transport_->GetError())) {
+ return SendPacketStatus::kTemporaryFailure;
+ }
+ return SendPacketStatus::kError;
+ }
+ return SendPacketStatus::kSuccess;
+}
+
+std::unique_ptr<dcsctp::Timeout> DcSctpTransport::CreateTimeout(
+ TaskQueueBase::DelayPrecision precision) {
+ return task_queue_timeout_factory_.CreateTimeout(precision);
+}
+
+dcsctp::TimeMs DcSctpTransport::TimeMillis() {
+ return dcsctp::TimeMs(clock_->TimeInMilliseconds());
+}
+
+uint32_t DcSctpTransport::GetRandomInt(uint32_t low, uint32_t high) {
+ return random_.Rand(low, high);
+}
+
+void DcSctpTransport::OnTotalBufferedAmountLow() {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ if (!ready_to_send_data_) {
+ ready_to_send_data_ = true;
+ if (data_channel_sink_) {
+ data_channel_sink_->OnReadyToSend();
+ }
+ }
+}
+
+void DcSctpTransport::OnMessageReceived(dcsctp::DcSctpMessage message) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DLOG(LS_VERBOSE) << debug_name_ << "->OnMessageReceived(sid="
+ << message.stream_id().value()
+ << ", ppid=" << message.ppid().value()
+ << ", length=" << message.payload().size() << ").";
+ auto type = ToDataMessageType(message.ppid());
+ if (!type.has_value()) {
+ RTC_LOG(LS_VERBOSE) << debug_name_
+ << "->OnMessageReceived(): Received an unknown PPID "
+ << message.ppid().value()
+ << " on an SCTP packet. Dropping.";
+ return;
+ }
+ receive_buffer_.Clear();
+ if (!IsEmptyPPID(message.ppid()))
+ receive_buffer_.AppendData(message.payload().data(),
+ message.payload().size());
+
+ if (data_channel_sink_) {
+ data_channel_sink_->OnDataReceived(message.stream_id().value(), *type,
+ receive_buffer_);
+ }
+}
+
+void DcSctpTransport::OnError(dcsctp::ErrorKind error,
+ absl::string_view message) {
+ if (error == dcsctp::ErrorKind::kResourceExhaustion) {
+ // Indicates that a message failed to be enqueued, because the send buffer
+ // is full, which is a very common (and wanted) state for high throughput
+ // sending/benchmarks.
+ RTC_LOG(LS_VERBOSE) << debug_name_
+ << "->OnError(error=" << dcsctp::ToString(error)
+ << ", message=" << message << ").";
+ } else {
+ RTC_LOG(LS_ERROR) << debug_name_
+ << "->OnError(error=" << dcsctp::ToString(error)
+ << ", message=" << message << ").";
+ }
+}
+
+void DcSctpTransport::OnAborted(dcsctp::ErrorKind error,
+ absl::string_view message) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_LOG(LS_ERROR) << debug_name_
+ << "->OnAborted(error=" << dcsctp::ToString(error)
+ << ", message=" << message << ").";
+ ready_to_send_data_ = false;
+ RTCError rtc_error(RTCErrorType::OPERATION_ERROR_WITH_DATA,
+ std::string(message));
+ rtc_error.set_error_detail(RTCErrorDetailType::SCTP_FAILURE);
+ auto code = ToErrorCauseCode(error);
+ if (code.has_value()) {
+ rtc_error.set_sctp_cause_code(static_cast<uint16_t>(*code));
+ }
+ if (data_channel_sink_) {
+ data_channel_sink_->OnTransportClosed(rtc_error);
+ }
+}
+
+void DcSctpTransport::OnConnected() {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DLOG(LS_INFO) << debug_name_ << "->OnConnected().";
+ ready_to_send_data_ = true;
+ if (data_channel_sink_) {
+ data_channel_sink_->OnReadyToSend();
+ }
+ if (on_connected_callback_) {
+ on_connected_callback_();
+ }
+}
+
+void DcSctpTransport::OnClosed() {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DLOG(LS_INFO) << debug_name_ << "->OnClosed().";
+ ready_to_send_data_ = false;
+}
+
+void DcSctpTransport::OnConnectionRestarted() {
+ RTC_DLOG(LS_INFO) << debug_name_ << "->OnConnectionRestarted().";
+}
+
+void DcSctpTransport::OnStreamsResetFailed(
+ rtc::ArrayView<const dcsctp::StreamID> outgoing_streams,
+ absl::string_view reason) {
+ // TODO(orphis): Need a test to check for correct behavior
+ for (auto& stream_id : outgoing_streams) {
+ RTC_LOG(LS_WARNING)
+ << debug_name_
+ << "->OnStreamsResetFailed(...): Outgoing stream reset failed"
+ << ", sid=" << stream_id.value() << ", reason: " << reason << ".";
+ }
+}
+
+void DcSctpTransport::OnStreamsResetPerformed(
+ rtc::ArrayView<const dcsctp::StreamID> outgoing_streams) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ for (auto& stream_id : outgoing_streams) {
+ RTC_LOG(LS_INFO) << debug_name_
+ << "->OnStreamsResetPerformed(...): Outgoing stream reset"
+ << ", sid=" << stream_id.value();
+
+ auto it = stream_states_.find(stream_id);
+ if (it == stream_states_.end()) {
+ // Ignoring an outgoing stream reset for a closed stream
+ return;
+ }
+
+ StreamState& stream_state = it->second;
+ stream_state.outgoing_reset_done = true;
+
+ if (stream_state.incoming_reset_done) {
+ // When the close was not initiated locally, we can signal the end of the
+ // data channel close procedure when the remote ACKs the reset.
+ if (data_channel_sink_) {
+ data_channel_sink_->OnChannelClosed(stream_id.value());
+ }
+ stream_states_.erase(stream_id);
+ }
+ }
+}
+
+void DcSctpTransport::OnIncomingStreamsReset(
+ rtc::ArrayView<const dcsctp::StreamID> incoming_streams) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ for (auto& stream_id : incoming_streams) {
+ RTC_LOG(LS_INFO) << debug_name_
+ << "->OnIncomingStreamsReset(...): Incoming stream reset"
+ << ", sid=" << stream_id.value();
+
+ auto it = stream_states_.find(stream_id);
+ if (it == stream_states_.end())
+ return;
+
+ StreamState& stream_state = it->second;
+ stream_state.incoming_reset_done = true;
+
+ if (!stream_state.closure_initiated) {
+ // When receiving an incoming stream reset event for a non local close
+ // procedure, the transport needs to reset the stream in the other
+ // direction too.
+ dcsctp::StreamID streams[1] = {stream_id};
+ socket_->ResetStreams(streams);
+ if (data_channel_sink_) {
+ data_channel_sink_->OnChannelClosing(stream_id.value());
+ }
+ }
+
+ if (stream_state.outgoing_reset_done) {
+ // The close procedure that was initiated locally is complete when we
+ // receive and incoming reset event.
+ if (data_channel_sink_) {
+ data_channel_sink_->OnChannelClosed(stream_id.value());
+ }
+ stream_states_.erase(stream_id);
+ }
+ }
+}
+
+void DcSctpTransport::ConnectTransportSignals() {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ if (!transport_) {
+ return;
+ }
+ transport_->SignalWritableState.connect(
+ this, &DcSctpTransport::OnTransportWritableState);
+ transport_->SignalReadPacket.connect(this,
+ &DcSctpTransport::OnTransportReadPacket);
+ transport_->SignalClosed.connect(this, &DcSctpTransport::OnTransportClosed);
+}
+
+void DcSctpTransport::DisconnectTransportSignals() {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ if (!transport_) {
+ return;
+ }
+ transport_->SignalWritableState.disconnect(this);
+ transport_->SignalReadPacket.disconnect(this);
+ transport_->SignalClosed.disconnect(this);
+}
+
+void DcSctpTransport::OnTransportWritableState(
+ rtc::PacketTransportInternal* transport) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DCHECK_EQ(transport_, transport);
+ RTC_DLOG(LS_VERBOSE) << debug_name_
+ << "->OnTransportWritableState(), writable="
+ << transport->writable();
+ MaybeConnectSocket();
+}
+
+void DcSctpTransport::OnTransportReadPacket(
+ rtc::PacketTransportInternal* transport,
+ const char* data,
+ size_t length,
+ const int64_t& /* packet_time_us */,
+ int flags) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ if (flags) {
+ // We are only interested in SCTP packets.
+ return;
+ }
+
+ RTC_DLOG(LS_VERBOSE) << debug_name_
+ << "->OnTransportReadPacket(), length=" << length;
+ if (socket_) {
+ socket_->ReceivePacket(rtc::ArrayView<const uint8_t>(
+ reinterpret_cast<const uint8_t*>(data), length));
+ }
+}
+
+void DcSctpTransport::OnTransportClosed(
+ rtc::PacketTransportInternal* transport) {
+ RTC_DCHECK_RUN_ON(network_thread_);
+ RTC_DLOG(LS_VERBOSE) << debug_name_ << "->OnTransportClosed().";
+ if (data_channel_sink_) {
+ data_channel_sink_->OnTransportClosed({});
+ }
+}
+
+void DcSctpTransport::MaybeConnectSocket() {
+ if (transport_ && transport_->writable() && socket_ &&
+ socket_->state() == dcsctp::SocketState::kClosed) {
+ socket_->Connect();
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/sctp/dcsctp_transport.h b/third_party/libwebrtc/media/sctp/dcsctp_transport.h
new file mode 100644
index 0000000000..7ae0d64134
--- /dev/null
+++ b/third_party/libwebrtc/media/sctp/dcsctp_transport.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_SCTP_DCSCTP_TRANSPORT_H_
+#define MEDIA_SCTP_DCSCTP_TRANSPORT_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/task_queue/task_queue_base.h"
+#include "media/sctp/sctp_transport_internal.h"
+#include "net/dcsctp/public/dcsctp_options.h"
+#include "net/dcsctp/public/dcsctp_socket.h"
+#include "net/dcsctp/public/dcsctp_socket_factory.h"
+#include "net/dcsctp/public/types.h"
+#include "net/dcsctp/timer/task_queue_timeout.h"
+#include "p2p/base/packet_transport_internal.h"
+#include "rtc_base/containers/flat_map.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/random.h"
+#include "rtc_base/third_party/sigslot/sigslot.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class DcSctpTransport : public cricket::SctpTransportInternal,
+ public dcsctp::DcSctpSocketCallbacks,
+ public sigslot::has_slots<> {
+ public:
+ DcSctpTransport(rtc::Thread* network_thread,
+ rtc::PacketTransportInternal* transport,
+ Clock* clock);
+ DcSctpTransport(rtc::Thread* network_thread,
+ rtc::PacketTransportInternal* transport,
+ Clock* clock,
+ std::unique_ptr<dcsctp::DcSctpSocketFactory> socket_factory);
+ ~DcSctpTransport() override;
+
+ // cricket::SctpTransportInternal
+ void SetOnConnectedCallback(std::function<void()> callback) override;
+ void SetDataChannelSink(DataChannelSink* sink) override;
+ void SetDtlsTransport(rtc::PacketTransportInternal* transport) override;
+ bool Start(int local_sctp_port,
+ int remote_sctp_port,
+ int max_message_size) override;
+ bool OpenStream(int sid) override;
+ bool ResetStream(int sid) override;
+ RTCError SendData(int sid,
+ const SendDataParams& params,
+ const rtc::CopyOnWriteBuffer& payload) override;
+ bool ReadyToSendData() override;
+ int max_message_size() const override;
+ absl::optional<int> max_outbound_streams() const override;
+ absl::optional<int> max_inbound_streams() const override;
+ void set_debug_name_for_testing(const char* debug_name) override;
+
+ private:
+ // dcsctp::DcSctpSocketCallbacks
+ dcsctp::SendPacketStatus SendPacketWithStatus(
+ rtc::ArrayView<const uint8_t> data) override;
+ std::unique_ptr<dcsctp::Timeout> CreateTimeout(
+ TaskQueueBase::DelayPrecision precision) override;
+ dcsctp::TimeMs TimeMillis() override;
+ uint32_t GetRandomInt(uint32_t low, uint32_t high) override;
+ void OnTotalBufferedAmountLow() override;
+ void OnMessageReceived(dcsctp::DcSctpMessage message) override;
+ void OnError(dcsctp::ErrorKind error, absl::string_view message) override;
+ void OnAborted(dcsctp::ErrorKind error, absl::string_view message) override;
+ void OnConnected() override;
+ void OnClosed() override;
+ void OnConnectionRestarted() override;
+ void OnStreamsResetFailed(
+ rtc::ArrayView<const dcsctp::StreamID> outgoing_streams,
+ absl::string_view reason) override;
+ void OnStreamsResetPerformed(
+ rtc::ArrayView<const dcsctp::StreamID> outgoing_streams) override;
+ void OnIncomingStreamsReset(
+ rtc::ArrayView<const dcsctp::StreamID> incoming_streams) override;
+
+ // Transport callbacks
+ void ConnectTransportSignals();
+ void DisconnectTransportSignals();
+ void OnTransportWritableState(rtc::PacketTransportInternal* transport);
+ void OnTransportReadPacket(rtc::PacketTransportInternal* transport,
+ const char* data,
+ size_t length,
+ const int64_t& /* packet_time_us */,
+ int flags);
+ void OnTransportClosed(rtc::PacketTransportInternal* transport);
+
+ void MaybeConnectSocket();
+
+ rtc::Thread* network_thread_;
+ rtc::PacketTransportInternal* transport_;
+ Clock* clock_;
+ Random random_;
+
+ std::unique_ptr<dcsctp::DcSctpSocketFactory> socket_factory_;
+ dcsctp::TaskQueueTimeoutFactory task_queue_timeout_factory_;
+ std::unique_ptr<dcsctp::DcSctpSocketInterface> socket_;
+ std::string debug_name_ = "DcSctpTransport";
+ rtc::CopyOnWriteBuffer receive_buffer_;
+
+ // Used to keep track of the state of data channels.
+ // Reset needs to happen both ways before signaling the transport
+ // is closed.
+ struct StreamState {
+ // True when the local connection has initiated the reset.
+ // If a connection receives a reset for a stream that isn't
+ // already being reset locally, it needs to fire the signal
+ // SignalClosingProcedureStartedRemotely.
+ bool closure_initiated = false;
+ // True when the local connection received OnIncomingStreamsReset
+ bool incoming_reset_done = false;
+ // True when the local connection received OnStreamsResetPerformed
+ bool outgoing_reset_done = false;
+ };
+
+ // Map of all currently open or closing data channels
+ flat_map<dcsctp::StreamID, StreamState> stream_states_
+ RTC_GUARDED_BY(network_thread_);
+ bool ready_to_send_data_ RTC_GUARDED_BY(network_thread_) = false;
+ std::function<void()> on_connected_callback_ RTC_GUARDED_BY(network_thread_);
+ DataChannelSink* data_channel_sink_ RTC_GUARDED_BY(network_thread_) = nullptr;
+};
+
+} // namespace webrtc
+
+#endif // MEDIA_SCTP_DCSCTP_TRANSPORT_H_
diff --git a/third_party/libwebrtc/media/sctp/dcsctp_transport_unittest.cc b/third_party/libwebrtc/media/sctp/dcsctp_transport_unittest.cc
new file mode 100644
index 0000000000..65fc3a1690
--- /dev/null
+++ b/third_party/libwebrtc/media/sctp/dcsctp_transport_unittest.cc
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/sctp/dcsctp_transport.h"
+
+#include <memory>
+#include <utility>
+
+#include "net/dcsctp/public/mock_dcsctp_socket.h"
+#include "net/dcsctp/public/mock_dcsctp_socket_factory.h"
+#include "p2p/base/fake_packet_transport.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::ByMove;
+using ::testing::DoAll;
+using ::testing::ElementsAre;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::ReturnPointee;
+
+namespace webrtc {
+
+namespace {
+class MockDataChannelSink : public DataChannelSink {
+ public:
+ MOCK_METHOD(void, OnConnected, ());
+
+ // DataChannelSink
+ MOCK_METHOD(void,
+ OnDataReceived,
+ (int, DataMessageType, const rtc::CopyOnWriteBuffer&));
+ MOCK_METHOD(void, OnChannelClosing, (int));
+ MOCK_METHOD(void, OnChannelClosed, (int));
+ MOCK_METHOD(void, OnReadyToSend, ());
+ MOCK_METHOD(void, OnTransportClosed, (RTCError));
+};
+
+static_assert(!std::is_abstract_v<MockDataChannelSink>);
+
+class Peer {
+ public:
+ Peer() : fake_packet_transport_("transport"), simulated_clock_(1000) {
+ auto socket_ptr = std::make_unique<dcsctp::MockDcSctpSocket>();
+ socket_ = socket_ptr.get();
+
+ auto mock_dcsctp_socket_factory =
+ std::make_unique<dcsctp::MockDcSctpSocketFactory>();
+ EXPECT_CALL(*mock_dcsctp_socket_factory, Create)
+ .Times(1)
+ .WillOnce(Return(ByMove(std::move(socket_ptr))));
+
+ sctp_transport_ = std::make_unique<webrtc::DcSctpTransport>(
+ rtc::Thread::Current(), &fake_packet_transport_, &simulated_clock_,
+ std::move(mock_dcsctp_socket_factory));
+ sctp_transport_->SetDataChannelSink(&sink_);
+ sctp_transport_->SetOnConnectedCallback([this]() { sink_.OnConnected(); });
+ }
+
+ rtc::FakePacketTransport fake_packet_transport_;
+ webrtc::SimulatedClock simulated_clock_;
+ dcsctp::MockDcSctpSocket* socket_;
+ std::unique_ptr<webrtc::DcSctpTransport> sctp_transport_;
+ NiceMock<MockDataChannelSink> sink_;
+};
+} // namespace
+
+TEST(DcSctpTransportTest, OpenSequence) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+ peer_a.fake_packet_transport_.SetWritable(true);
+
+ EXPECT_CALL(*peer_a.socket_, Connect)
+ .Times(1)
+ .WillOnce(Invoke(peer_a.sctp_transport_.get(),
+ &dcsctp::DcSctpSocketCallbacks::OnConnected));
+ EXPECT_CALL(peer_a.sink_, OnReadyToSend);
+ EXPECT_CALL(peer_a.sink_, OnConnected);
+
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+}
+
+// Tests that the close sequence invoked from one end results in the stream to
+// be reset from both ends and all the proper signals are sent.
+TEST(DcSctpTransportTest, CloseSequence) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+ Peer peer_b;
+ peer_a.fake_packet_transport_.SetDestination(&peer_b.fake_packet_transport_,
+ false);
+ {
+ InSequence sequence;
+
+ EXPECT_CALL(*peer_a.socket_, ResetStreams(ElementsAre(dcsctp::StreamID(1))))
+ .WillOnce(Return(dcsctp::ResetStreamsStatus::kPerformed));
+
+ EXPECT_CALL(*peer_b.socket_, ResetStreams(ElementsAre(dcsctp::StreamID(1))))
+ .WillOnce(Return(dcsctp::ResetStreamsStatus::kPerformed));
+
+ EXPECT_CALL(peer_a.sink_, OnChannelClosing(1)).Times(0);
+ EXPECT_CALL(peer_b.sink_, OnChannelClosing(1));
+ EXPECT_CALL(peer_a.sink_, OnChannelClosed(1));
+ EXPECT_CALL(peer_b.sink_, OnChannelClosed(1));
+ }
+
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+ peer_b.sctp_transport_->Start(5000, 5000, 256 * 1024);
+ peer_a.sctp_transport_->OpenStream(1);
+ peer_b.sctp_transport_->OpenStream(1);
+ peer_a.sctp_transport_->ResetStream(1);
+
+ // Simulate the callbacks from the stream resets
+ dcsctp::StreamID streams[1] = {dcsctp::StreamID(1)};
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_a.sctp_transport_.get())
+ ->OnStreamsResetPerformed(streams);
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_b.sctp_transport_.get())
+ ->OnIncomingStreamsReset(streams);
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_a.sctp_transport_.get())
+ ->OnIncomingStreamsReset(streams);
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_b.sctp_transport_.get())
+ ->OnStreamsResetPerformed(streams);
+}
+
+// Tests that the close sequence initiated from both peers at the same time
+// terminates properly. Both peers will think they initiated it, so no
+// OnClosingProcedureStartedRemotely should be called.
+TEST(DcSctpTransportTest, CloseSequenceSimultaneous) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+ Peer peer_b;
+ peer_a.fake_packet_transport_.SetDestination(&peer_b.fake_packet_transport_,
+ false);
+ {
+ InSequence sequence;
+
+ EXPECT_CALL(*peer_a.socket_, ResetStreams(ElementsAre(dcsctp::StreamID(1))))
+ .WillOnce(Return(dcsctp::ResetStreamsStatus::kPerformed));
+
+ EXPECT_CALL(*peer_b.socket_, ResetStreams(ElementsAre(dcsctp::StreamID(1))))
+ .WillOnce(Return(dcsctp::ResetStreamsStatus::kPerformed));
+
+ EXPECT_CALL(peer_a.sink_, OnChannelClosing(1)).Times(0);
+ EXPECT_CALL(peer_b.sink_, OnChannelClosing(1)).Times(0);
+ EXPECT_CALL(peer_a.sink_, OnChannelClosed(1));
+ EXPECT_CALL(peer_b.sink_, OnChannelClosed(1));
+ }
+
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+ peer_b.sctp_transport_->Start(5000, 5000, 256 * 1024);
+ peer_a.sctp_transport_->OpenStream(1);
+ peer_b.sctp_transport_->OpenStream(1);
+ peer_a.sctp_transport_->ResetStream(1);
+ peer_b.sctp_transport_->ResetStream(1);
+
+ // Simulate the callbacks from the stream resets
+ dcsctp::StreamID streams[1] = {dcsctp::StreamID(1)};
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_a.sctp_transport_.get())
+ ->OnStreamsResetPerformed(streams);
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_b.sctp_transport_.get())
+ ->OnStreamsResetPerformed(streams);
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_a.sctp_transport_.get())
+ ->OnIncomingStreamsReset(streams);
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_b.sctp_transport_.get())
+ ->OnIncomingStreamsReset(streams);
+}
+
+TEST(DcSctpTransportTest, DiscardMessageClosedChannel) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+
+ EXPECT_CALL(*peer_a.socket_, Send(_, _)).Times(0);
+
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+
+ SendDataParams params;
+ rtc::CopyOnWriteBuffer payload;
+ EXPECT_EQ(peer_a.sctp_transport_->SendData(1, params, payload).type(),
+ RTCErrorType::INVALID_STATE);
+}
+
+TEST(DcSctpTransportTest, DiscardMessageClosingChannel) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+
+ EXPECT_CALL(*peer_a.socket_, Send(_, _)).Times(0);
+
+ peer_a.sctp_transport_->OpenStream(1);
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+ peer_a.sctp_transport_->ResetStream(1);
+
+ SendDataParams params;
+ rtc::CopyOnWriteBuffer payload;
+ EXPECT_EQ(peer_a.sctp_transport_->SendData(1, params, payload).type(),
+ RTCErrorType::INVALID_STATE);
+}
+
+TEST(DcSctpTransportTest, SendDataOpenChannel) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+ dcsctp::DcSctpOptions options;
+
+ EXPECT_CALL(*peer_a.socket_, Send(_, _)).Times(1);
+ EXPECT_CALL(*peer_a.socket_, options()).WillOnce(ReturnPointee(&options));
+
+ peer_a.sctp_transport_->OpenStream(1);
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+
+ SendDataParams params;
+ rtc::CopyOnWriteBuffer payload;
+ EXPECT_TRUE(peer_a.sctp_transport_->SendData(1, params, payload).ok());
+}
+
+TEST(DcSctpTransportTest, DeliversMessage) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+
+ EXPECT_CALL(peer_a.sink_,
+ OnDataReceived(1, webrtc::DataMessageType::kBinary, _))
+ .Times(1);
+
+ peer_a.sctp_transport_->OpenStream(1);
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_a.sctp_transport_.get())
+ ->OnMessageReceived(
+ dcsctp::DcSctpMessage(dcsctp::StreamID(1), dcsctp::PPID(53), {0}));
+}
+
+TEST(DcSctpTransportTest, DropMessageWithUnknownPpid) {
+ rtc::AutoThread main_thread;
+ Peer peer_a;
+
+ EXPECT_CALL(peer_a.sink_, OnDataReceived(_, _, _)).Times(0);
+
+ peer_a.sctp_transport_->OpenStream(1);
+ peer_a.sctp_transport_->Start(5000, 5000, 256 * 1024);
+
+ static_cast<dcsctp::DcSctpSocketCallbacks*>(peer_a.sctp_transport_.get())
+ ->OnMessageReceived(
+ dcsctp::DcSctpMessage(dcsctp::StreamID(1), dcsctp::PPID(1337), {0}));
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/media/sctp/sctp_transport_factory.cc b/third_party/libwebrtc/media/sctp/sctp_transport_factory.cc
new file mode 100644
index 0000000000..457bc5f889
--- /dev/null
+++ b/third_party/libwebrtc/media/sctp/sctp_transport_factory.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "media/sctp/sctp_transport_factory.h"
+
+#include "rtc_base/system/unused.h"
+
+#ifdef WEBRTC_HAVE_DCSCTP
+#include "media/sctp/dcsctp_transport.h" // nogncheck
+#include "system_wrappers/include/clock.h" // nogncheck
+#endif
+
+namespace cricket {
+
+SctpTransportFactory::SctpTransportFactory(rtc::Thread* network_thread)
+ : network_thread_(network_thread) {
+ RTC_UNUSED(network_thread_);
+}
+
+std::unique_ptr<SctpTransportInternal>
+SctpTransportFactory::CreateSctpTransport(
+ rtc::PacketTransportInternal* transport) {
+ std::unique_ptr<SctpTransportInternal> result;
+#ifdef WEBRTC_HAVE_DCSCTP
+ result = std::unique_ptr<SctpTransportInternal>(new webrtc::DcSctpTransport(
+ network_thread_, transport, webrtc::Clock::GetRealTimeClock()));
+#endif
+ return result;
+}
+
+} // namespace cricket
diff --git a/third_party/libwebrtc/media/sctp/sctp_transport_factory.h b/third_party/libwebrtc/media/sctp/sctp_transport_factory.h
new file mode 100644
index 0000000000..4fff214129
--- /dev/null
+++ b/third_party/libwebrtc/media/sctp/sctp_transport_factory.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_SCTP_SCTP_TRANSPORT_FACTORY_H_
+#define MEDIA_SCTP_SCTP_TRANSPORT_FACTORY_H_
+
+#include <memory>
+
+#include "api/transport/sctp_transport_factory_interface.h"
+#include "media/sctp/sctp_transport_internal.h"
+#include "rtc_base/thread.h"
+
+namespace cricket {
+
+class SctpTransportFactory : public webrtc::SctpTransportFactoryInterface {
+ public:
+ explicit SctpTransportFactory(rtc::Thread* network_thread);
+
+ std::unique_ptr<SctpTransportInternal> CreateSctpTransport(
+ rtc::PacketTransportInternal* transport) override;
+
+ private:
+ rtc::Thread* network_thread_;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_SCTP_SCTP_TRANSPORT_FACTORY_H__
diff --git a/third_party/libwebrtc/media/sctp/sctp_transport_internal.h b/third_party/libwebrtc/media/sctp/sctp_transport_internal.h
new file mode 100644
index 0000000000..8a7450f405
--- /dev/null
+++ b/third_party/libwebrtc/media/sctp/sctp_transport_internal.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MEDIA_SCTP_SCTP_TRANSPORT_INTERNAL_H_
+#define MEDIA_SCTP_SCTP_TRANSPORT_INTERNAL_H_
+
+// TODO(deadbeef): Move SCTP code out of media/, and make it not depend on
+// anything in media/.
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/rtc_error.h"
+#include "api/transport/data_channel_transport_interface.h"
+#include "media/base/media_channel.h"
+#include "p2p/base/packet_transport_internal.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/thread.h"
+
+namespace cricket {
+
+// Constants that are important to API users
+// The size of the SCTP association send buffer. 256kB, the usrsctp default.
+constexpr int kSctpSendBufferSize = 256 * 1024;
+
+// The number of outgoing streams that we'll negotiate. Since stream IDs (SIDs)
+// are 0-based, the highest usable SID is 1023.
+//
+// It's recommended to use the maximum of 65535 in:
+// https://tools.ietf.org/html/draft-ietf-rtcweb-data-channel-13#section-6.2
+// However, we use 1024 in order to save memory. usrsctp allocates 104 bytes
+// for each pair of incoming/outgoing streams (on a 64-bit system), so 65535
+// streams would waste ~6MB.
+//
+// Note: "max" and "min" here are inclusive.
+constexpr uint16_t kMaxSctpStreams = 1024;
+constexpr uint16_t kMaxSctpSid = kMaxSctpStreams - 1;
+constexpr uint16_t kMinSctpSid = 0;
+// The maximum number of streams that can be negotiated according to spec.
+constexpr uint16_t kSpecMaxSctpSid = 65535;
+
+// This is the default SCTP port to use. It is passed along the wire and the
+// connectee and connector must be using the same port. It is not related to the
+// ports at the IP level. (Corresponds to: sockaddr_conn.sconn_port in
+// usrsctp.h)
+const int kSctpDefaultPort = 5000;
+
+// Error cause codes defined at
+// https://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml#sctp-parameters-24
+enum class SctpErrorCauseCode : uint16_t {
+ kInvalidStreamIdentifier = 1,
+ kMissingMandatoryParameter = 2,
+ kStaleCookieError = 3,
+ kOutOfResource = 4,
+ kUnresolvableAddress = 5,
+ kUnrecognizedChunkType = 6,
+ kInvalidMandatoryParameter = 7,
+ kUnrecognizedParameters = 8,
+ kNoUserData = 9,
+ kCookieReceivedWhileShuttingDown = 10,
+ kRestartWithNewAddresses = 11,
+ kUserInitiatedAbort = 12,
+ kProtocolViolation = 13,
+};
+
+// Abstract SctpTransport interface for use internally (by PeerConnection etc.).
+// Exists to allow mock/fake SctpTransports to be created.
+class SctpTransportInternal {
+ public:
+ virtual ~SctpTransportInternal() {}
+
+ virtual void SetOnConnectedCallback(std::function<void()> callback) = 0;
+ virtual void SetDataChannelSink(webrtc::DataChannelSink* sink) = 0;
+
+ // Changes what underlying DTLS transport is uses. Used when switching which
+ // bundled transport the SctpTransport uses.
+ virtual void SetDtlsTransport(rtc::PacketTransportInternal* transport) = 0;
+
+ // When Start is called, connects as soon as possible; this can be called
+ // before DTLS completes, in which case the connection will begin when DTLS
+ // completes. This method can be called multiple times, though not if either
+ // of the ports are changed.
+ //
+ // `local_sctp_port` and `remote_sctp_port` are passed along the wire and the
+ // listener and connector must be using the same port. They are not related
+ // to the ports at the IP level. If set to -1, we default to
+ // kSctpDefaultPort.
+ // `max_message_size_` sets the max message size on the connection.
+ // It must be smaller than or equal to kSctpSendBufferSize.
+ // It can be changed by a secons Start() call.
+ //
+ // TODO(deadbeef): Support calling Start with different local/remote ports
+ // and create a new association? Not clear if this is something we need to
+ // support though. See: https://github.com/w3c/webrtc-pc/issues/979
+ virtual bool Start(int local_sctp_port,
+ int remote_sctp_port,
+ int max_message_size) = 0;
+
+ // NOTE: Initially there was a "Stop" method here, but it was never used, so
+ // it was removed.
+
+ // Informs SctpTransport that `sid` will start being used. Returns false if
+ // it is impossible to use `sid`, or if it's already in use.
+ // Until calling this, can't send data using `sid`.
+ // TODO(deadbeef): Actually implement the "returns false if `sid` can't be
+ // used" part. See:
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=619849
+ virtual bool OpenStream(int sid) = 0;
+ // The inverse of OpenStream. Begins the closing procedure, which will
+ // eventually result in SignalClosingProcedureComplete on the side that
+ // initiates it, and both SignalClosingProcedureStartedRemotely and
+ // SignalClosingProcedureComplete on the other side.
+ virtual bool ResetStream(int sid) = 0;
+ // Send data down this channel.
+ // Returns RTCError::OK() if successful an error otherwise. Notably
+ // RTCErrorType::RESOURCE_EXHAUSTED for blocked operations.
+ virtual webrtc::RTCError SendData(int sid,
+ const webrtc::SendDataParams& params,
+ const rtc::CopyOnWriteBuffer& payload) = 0;
+
+ // Indicates when the SCTP socket is created and not blocked by congestion
+ // control. This changes to false when SDR_BLOCK is returned from SendData,
+ // and
+ // changes to true when SignalReadyToSendData is fired. The underlying DTLS/
+ // ICE channels may be unwritable while ReadyToSendData is true, because data
+ // can still be queued in usrsctp.
+ virtual bool ReadyToSendData() = 0;
+ // Returns the current max message size, set with Start().
+ virtual int max_message_size() const = 0;
+ // Returns the current negotiated max # of outbound streams.
+ // Will return absl::nullopt if negotiation is incomplete.
+ virtual absl::optional<int> max_outbound_streams() const = 0;
+ // Returns the current negotiated max # of inbound streams.
+ virtual absl::optional<int> max_inbound_streams() const = 0;
+
+ // Helper for debugging.
+ virtual void set_debug_name_for_testing(const char* debug_name) = 0;
+};
+
+} // namespace cricket
+
+#endif // MEDIA_SCTP_SCTP_TRANSPORT_INTERNAL_H_
diff --git a/third_party/libwebrtc/media/stream_params_gn/moz.build b/third_party/libwebrtc/media/stream_params_gn/moz.build
new file mode 100644
index 0000000000..71875c4e01
--- /dev/null
+++ b/third_party/libwebrtc/media/stream_params_gn/moz.build
@@ -0,0 +1,205 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "!/third_party/libwebrtc/gen",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["RTC_ENABLE_WIN_WGC"] = True
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+if CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["TARGET_CPU"] == "mips32":
+
+ DEFINES["MIPS32_LE"] = True
+ DEFINES["MIPS_FPU_LE"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "mips64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64":
+
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("stream_params_gn")