summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/webrtc/audio
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/webrtc/audio')
-rw-r--r--third_party/libwebrtc/webrtc/audio/BUILD.gn220
-rw-r--r--third_party/libwebrtc/webrtc/audio/DEPS28
-rw-r--r--third_party/libwebrtc/webrtc/audio/OWNERS7
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_gn/moz.build239
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_receive_stream.cc361
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_receive_stream.h102
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_receive_stream_unittest.cc384
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_send_stream.cc658
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_send_stream.h132
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_send_stream_tests.cc238
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_send_stream_unittest.cc600
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_state.cc112
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_state.h79
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_state_unittest.cc134
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_transport_proxy.cc137
-rw-r--r--third_party/libwebrtc/webrtc/audio/audio_transport_proxy.h78
-rw-r--r--third_party/libwebrtc/webrtc/audio/conversion.h27
-rw-r--r--third_party/libwebrtc/webrtc/audio/null_audio_poller.cc66
-rw-r--r--third_party/libwebrtc/webrtc/audio/null_audio_poller.h38
-rw-r--r--third_party/libwebrtc/webrtc/audio/scoped_voe_interface.h45
-rw-r--r--third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.cc155
-rw-r--r--third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.h57
-rw-r--r--third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.cc104
-rw-r--r--third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.h68
-rw-r--r--third_party/libwebrtc/webrtc/audio/test/audio_stats_test.cc117
-rw-r--r--third_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.cc112
-rwxr-xr-xthird_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.py259
-rwxr-xr-xthird_party/libwebrtc/webrtc/audio/test/unittests/low_bandwidth_audio_test_test.py184
-rw-r--r--third_party/libwebrtc/webrtc/audio/time_interval.cc56
-rw-r--r--third_party/libwebrtc/webrtc/audio/time_interval.h65
-rw-r--r--third_party/libwebrtc/webrtc/audio/time_interval_unittest.cc48
-rw-r--r--third_party/libwebrtc/webrtc/audio/utility/BUILD.gn48
-rw-r--r--third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.cc330
-rw-r--r--third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.h123
-rw-r--r--third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_gn/moz.build225
-rw-r--r--third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_unittest.cc629
36 files changed, 6265 insertions, 0 deletions
diff --git a/third_party/libwebrtc/webrtc/audio/BUILD.gn b/third_party/libwebrtc/webrtc/audio/BUILD.gn
new file mode 100644
index 0000000000..2a29ceb22b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/BUILD.gn
@@ -0,0 +1,220 @@
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../webrtc.gni")
+if (is_android) {
+ import("//build/config/android/config.gni")
+ import("//build/config/android/rules.gni")
+}
+
+rtc_static_library("audio") {
+ sources = [
+ "audio_receive_stream.cc",
+ "audio_receive_stream.h",
+ "audio_send_stream.cc",
+ "audio_send_stream.h",
+ "audio_state.cc",
+ "audio_state.h",
+ "audio_transport_proxy.cc",
+ "audio_transport_proxy.h",
+ "conversion.h",
+ "null_audio_poller.cc",
+ "null_audio_poller.h",
+ "scoped_voe_interface.h",
+ "time_interval.cc",
+ "time_interval.h",
+ ]
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+
+ deps = [
+ "..:webrtc_common",
+ "../api:audio_mixer_api",
+ "../api:call_api",
+ "../api:optional",
+ "../api/audio_codecs:audio_codecs_api",
+ "../api/audio_codecs:builtin_audio_encoder_factory",
+ "../call:bitrate_allocator",
+ "../call:call_interfaces",
+ "../call:rtp_interfaces",
+ "../common_audio",
+ "../modules/audio_coding:cng",
+ "../modules/audio_device",
+ "../modules/audio_processing",
+ "../modules/bitrate_controller:bitrate_controller",
+ "../modules/congestion_controller:congestion_controller",
+ "../modules/pacing:pacing",
+ "../modules/remote_bitrate_estimator:remote_bitrate_estimator",
+ "../modules/rtp_rtcp:rtp_rtcp",
+ "../rtc_base:rtc_base",
+ "../rtc_base:rtc_base_approved",
+ "../rtc_base:rtc_task_queue",
+ "../system_wrappers",
+ "../voice_engine",
+ ]
+}
+if (rtc_include_tests) {
+ rtc_source_set("audio_end_to_end_test") {
+ testonly = true
+
+ sources = [
+ "test/audio_end_to_end_test.cc",
+ "test/audio_end_to_end_test.h",
+ ]
+ deps = [
+ ":audio",
+ "../system_wrappers:system_wrappers",
+ "../test:fake_audio_device",
+ "../test:test_common",
+ "../test:test_support",
+ ]
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+
+ rtc_source_set("audio_tests") {
+ testonly = true
+
+ sources = [
+ "audio_receive_stream_unittest.cc",
+ "audio_send_stream_tests.cc",
+ "audio_send_stream_unittest.cc",
+ "audio_state_unittest.cc",
+ "time_interval_unittest.cc",
+ ]
+ deps = [
+ ":audio",
+ ":audio_end_to_end_test",
+ "../api:mock_audio_mixer",
+ "../call:mock_rtp_interfaces",
+ "../call:rtp_interfaces",
+ "../call:rtp_receiver",
+ "../modules/audio_device:mock_audio_device",
+ "../modules/audio_mixer:audio_mixer_impl",
+ "../modules/audio_processing:audio_processing_statistics",
+ "../modules/congestion_controller:congestion_controller",
+ "../modules/congestion_controller:mock_congestion_controller",
+ "../modules/pacing:mock_paced_sender",
+ "../modules/pacing:pacing",
+ "../modules/rtp_rtcp:mock_rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
+ "../rtc_base:rtc_base_approved",
+ "../rtc_base:rtc_base_tests_utils",
+ "../rtc_base:rtc_task_queue",
+ "../system_wrappers:system_wrappers",
+ "../test:audio_codec_mocks",
+ "../test:rtp_test_utils",
+ "../test:test_common",
+ "../test:test_support",
+ "../voice_engine",
+ "utility:utility_tests",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ if (!rtc_use_memcheck) {
+ # This test is timing dependent, which rules out running on memcheck bots.
+ sources += [ "test/audio_stats_test.cc" ]
+ }
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+
+ if (rtc_enable_protobuf) {
+ rtc_test("low_bandwidth_audio_test") {
+ testonly = true
+
+ sources = [
+ "test/low_bandwidth_audio_test.cc",
+ ]
+
+ deps = [
+ ":audio_end_to_end_test",
+ "../common_audio",
+ "../rtc_base:rtc_base_approved",
+ "../system_wrappers",
+ "../test:fake_audio_device",
+ "../test:test_common",
+ "../test:test_main",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ if (is_android) {
+ deps += [ "//testing/android/native_test:native_test_native_code" ]
+ }
+
+ data = [
+ "../resources/voice_engine/audio_dtx16.wav",
+ "../resources/voice_engine/audio_tiny16.wav",
+ "../resources/voice_engine/audio_tiny48.wav",
+ "test/low_bandwidth_audio_test.py",
+ ]
+ if (is_linux) {
+ data += [
+ "../tools_webrtc/audio_quality/linux/PolqaOem64",
+ "../tools_webrtc/audio_quality/linux/pesq",
+ ]
+ }
+ if (is_win) {
+ data += [
+ "../tools_webrtc/audio_quality/win/PolqaOem64.dll",
+ "../tools_webrtc/audio_quality/win/PolqaOem64.exe",
+ "../tools_webrtc/audio_quality/win/pesq.exe",
+ "../tools_webrtc/audio_quality/win/vcomp120.dll",
+ ]
+ }
+ if (is_mac) {
+ data += [ "../tools_webrtc/audio_quality/mac/pesq" ]
+ }
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163)
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+ }
+
+ rtc_source_set("audio_perf_tests") {
+ testonly = true
+
+ sources = [
+ "test/audio_bwe_integration_test.cc",
+ "test/audio_bwe_integration_test.h",
+ ]
+ deps = [
+ "../common_audio",
+ "../rtc_base:rtc_base_approved",
+ "../system_wrappers",
+ "../test:fake_audio_device",
+ "../test:field_trial",
+ "../test:single_threaded_task_queue",
+ "../test:test_common",
+ "../test:test_main",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ data = [
+ "//resources/voice_engine/audio_dtx16.wav",
+ ]
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/webrtc/audio/DEPS b/third_party/libwebrtc/webrtc/audio/DEPS
new file mode 100644
index 0000000000..0f952a3e7a
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/DEPS
@@ -0,0 +1,28 @@
+include_rules = [
+ "+call",
+ "+common_audio",
+ "+logging/rtc_event_log",
+ "+modules/audio_coding",
+ "+modules/audio_device",
+ "+modules/audio_mixer",
+ "+modules/audio_processing/include",
+ "+modules/bitrate_controller",
+ "+modules/congestion_controller",
+ "+modules/pacing",
+ "+modules/remote_bitrate_estimator",
+ "+modules/rtp_rtcp",
+ "+system_wrappers",
+ "+voice_engine",
+]
+
+specific_include_rules = {
+ "audio_send_stream.cc": [
+ "+modules/audio_coding/codecs/cng/audio_encoder_cng.h",
+ ],
+ # TODO(ossu): Remove this exception when builtin_audio_encoder_factory.h
+ # has moved to api/, or when the proper mocks have been made.
+ "audio_send_stream_unittest.cc": [
+ "+modules/audio_coding/codecs/builtin_audio_encoder_factory.h",
+ ],
+}
+
diff --git a/third_party/libwebrtc/webrtc/audio/OWNERS b/third_party/libwebrtc/webrtc/audio/OWNERS
new file mode 100644
index 0000000000..d53e4fabf6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/OWNERS
@@ -0,0 +1,7 @@
+solenberg@webrtc.org
+ossu@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/third_party/libwebrtc/webrtc/audio/audio_gn/moz.build b/third_party/libwebrtc/webrtc/audio/audio_gn/moz.build
new file mode 100644
index 0000000000..05fbc3ba00
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_gn/moz.build
@@ -0,0 +1,239 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["CHROMIUM_BUILD"] = True
+DEFINES["V8_DEPRECATION_WARNINGS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_RESTRICT_LOGGING"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/ipc/glue",
+ "/third_party/libwebrtc/webrtc/",
+ "/third_party/libwebrtc/webrtc/common_audio/resampler/include/",
+ "/third_party/libwebrtc/webrtc/common_audio/signal_processing/include/",
+ "/third_party/libwebrtc/webrtc/common_audio/vad/include/",
+ "/third_party/libwebrtc/webrtc/modules/audio_coding/include/",
+ "/third_party/libwebrtc/webrtc/modules/audio_device/dummy/",
+ "/third_party/libwebrtc/webrtc/modules/audio_device/include/",
+ "/third_party/libwebrtc/webrtc/modules/include/",
+ "/third_party/libwebrtc/webrtc/modules/include/"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/webrtc/audio/audio_receive_stream.cc",
+ "/third_party/libwebrtc/webrtc/audio/audio_send_stream.cc",
+ "/third_party/libwebrtc/webrtc/audio/audio_state.cc",
+ "/third_party/libwebrtc/webrtc/audio/audio_transport_proxy.cc",
+ "/third_party/libwebrtc/webrtc/audio/null_audio_poller.cc",
+ "/third_party/libwebrtc/webrtc/audio/time_interval.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["WTF_USE_DYNAMIC_ANNOTATIONS"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION"] = "r12b"
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["USE_OPENSSL_CERTS"] = "1"
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["__GNU_SOURCE"] = "1"
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORE"] = "0"
+
+ OS_LIBS += [
+ "-framework Foundation"
+ ]
+
+if CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+ OS_LIBS += [
+ "m",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "1"
+ DEFINES["UNICODE"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_CRT_SECURE_NO_WARNINGS"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_USING_V110_SDK71_"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0120"
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0920"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["NO_TCMALLOC"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "NetBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+Library("audio_gn")
diff --git a/third_party/libwebrtc/webrtc/audio/audio_receive_stream.cc b/third_party/libwebrtc/webrtc/audio/audio_receive_stream.cc
new file mode 100644
index 0000000000..f537ff0804
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_receive_stream.cc
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_receive_stream.h"
+
+#include <string>
+#include <utility>
+
+#include "api/call/audio_sink.h"
+#include "audio/audio_send_stream.h"
+#include "audio/audio_state.h"
+#include "audio/conversion.h"
+#include "call/rtp_stream_receiver_controller_interface.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+#include "voice_engine/channel_proxy.h"
+#include "voice_engine/include/voe_base.h"
+#include "voice_engine/voice_engine_impl.h"
+
+namespace webrtc {
+
+std::string AudioReceiveStream::Config::Rtp::ToString() const {
+ std::stringstream ss;
+ ss << "{remote_ssrc: " << remote_ssrc;
+ ss << ", local_ssrc: " << local_ssrc;
+ ss << ", transport_cc: " << (transport_cc ? "on" : "off");
+ ss << ", nack: " << nack.ToString();
+ ss << ", extensions: [";
+ for (size_t i = 0; i < extensions.size(); ++i) {
+ ss << extensions[i].ToString();
+ if (i != extensions.size() - 1) {
+ ss << ", ";
+ }
+ }
+ ss << ']';
+ ss << '}';
+ return ss.str();
+}
+
+std::string AudioReceiveStream::Config::ToString() const {
+ std::stringstream ss;
+ ss << "{rtp: " << rtp.ToString();
+ ss << ", rtcp_send_transport: "
+ << (rtcp_send_transport ? "(Transport)" : "null");
+ ss << ", voe_channel_id: " << voe_channel_id;
+ if (!sync_group.empty()) {
+ ss << ", sync_group: " << sync_group;
+ }
+ ss << '}';
+ return ss.str();
+}
+
+namespace internal {
+AudioReceiveStream::AudioReceiveStream(
+ RtpStreamReceiverControllerInterface* receiver_controller,
+ PacketRouter* packet_router,
+ const webrtc::AudioReceiveStream::Config& config,
+ const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+ webrtc::RtcEventLog* event_log)
+ : config_(config), audio_state_(audio_state) {
+ RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config_.ToString();
+ RTC_DCHECK_NE(config_.voe_channel_id, -1);
+ RTC_DCHECK(audio_state_.get());
+ RTC_DCHECK(packet_router);
+
+ module_process_thread_checker_.DetachFromThread();
+
+ VoiceEngineImpl* voe_impl = static_cast<VoiceEngineImpl*>(voice_engine());
+ channel_proxy_ = voe_impl->GetChannelProxy(config_.voe_channel_id);
+ channel_proxy_->SetRtcEventLog(event_log);
+ channel_proxy_->SetLocalSSRC(config.rtp.local_ssrc);
+ // TODO(solenberg): Config NACK history window (which is a packet count),
+ // using the actual packet size for the configured codec.
+ channel_proxy_->SetNACKStatus(config_.rtp.nack.rtp_history_ms != 0,
+ config_.rtp.nack.rtp_history_ms / 20);
+
+ // TODO(ossu): This is where we'd like to set the decoder factory to
+ // use. However, since it needs to be included when constructing Channel, we
+ // cannot do that until we're able to move Channel ownership into the
+ // Audio{Send,Receive}Streams. The best we can do is check that we're not
+ // trying to use two different factories using the different interfaces.
+ RTC_CHECK(config.decoder_factory);
+ RTC_CHECK_EQ(config.decoder_factory,
+ channel_proxy_->GetAudioDecoderFactory());
+
+ channel_proxy_->RegisterTransport(config.rtcp_send_transport);
+ channel_proxy_->SetReceiveCodecs(config.decoder_map);
+
+ for (const auto& extension : config.rtp.extensions) {
+ if (extension.uri == RtpExtension::kAudioLevelUri) {
+ channel_proxy_->SetReceiveAudioLevelIndicationStatus(true, extension.id);
+ } else if (extension.uri == RtpExtension::kTransportSequenceNumberUri) {
+ channel_proxy_->EnableReceiveTransportSequenceNumber(extension.id);
+ } else if (extension.uri == RtpExtension::kCsrcAudioLevelUri) {
+ channel_proxy_->SetReceiveCsrcAudioLevelIndicationStatus(true,
+ extension.id);
+ } else {
+ RTC_NOTREACHED() << "Unsupported RTP extension.";
+ }
+ }
+ // Configure bandwidth estimation.
+ channel_proxy_->RegisterReceiverCongestionControlObjects(packet_router);
+
+ // Register with transport.
+ rtp_stream_receiver_ =
+ receiver_controller->CreateReceiver(config_.rtp.remote_ssrc,
+ channel_proxy_.get());
+}
+
+AudioReceiveStream::~AudioReceiveStream() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ RTC_LOG(LS_INFO) << "~AudioReceiveStream: " << config_.ToString();
+ if (playing_) {
+ Stop();
+ }
+ channel_proxy_->DisassociateSendChannel();
+ channel_proxy_->RegisterTransport(nullptr);
+ channel_proxy_->ResetReceiverCongestionControlObjects();
+ channel_proxy_->SetRtcEventLog(nullptr);
+}
+
+void AudioReceiveStream::Start() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (playing_) {
+ return;
+ }
+
+ int error = SetVoiceEnginePlayout(true);
+ if (error != 0) {
+ RTC_LOG(LS_ERROR) << "AudioReceiveStream::Start failed with error: "
+ << error;
+ return;
+ }
+
+ if (!audio_state()->mixer()->AddSource(this)) {
+ RTC_LOG(LS_ERROR) << "Failed to add source to mixer.";
+ SetVoiceEnginePlayout(false);
+ return;
+ }
+
+ playing_ = true;
+}
+
+void AudioReceiveStream::Stop() {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (!playing_) {
+ return;
+ }
+ playing_ = false;
+
+ audio_state()->mixer()->RemoveSource(this);
+ SetVoiceEnginePlayout(false);
+}
+
+webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const {
+ // TODO: Mozilla - currently we run stats on the STS thread
+ //RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ webrtc::AudioReceiveStream::Stats stats;
+ stats.remote_ssrc = config_.rtp.remote_ssrc;
+
+ webrtc::CallStatistics call_stats = channel_proxy_->GetRTCPStatistics();
+ // TODO(solenberg): Don't return here if we can't get the codec - return the
+ // stats we *can* get.
+ webrtc::CodecInst codec_inst = {0};
+ if (!channel_proxy_->GetRecCodec(&codec_inst)) {
+ return stats;
+ }
+
+ stats.bytes_rcvd = call_stats.bytesReceived;
+ stats.packets_rcvd = call_stats.packetsReceived;
+ stats.packets_lost = call_stats.cumulativeLost;
+ stats.fraction_lost = Q8ToFloat(call_stats.fractionLost);
+ stats.capture_start_ntp_time_ms = call_stats.capture_start_ntp_time_ms_;
+ if (codec_inst.pltype != -1) {
+ stats.codec_name = codec_inst.plname;
+ stats.codec_payload_type = codec_inst.pltype;
+ }
+ stats.ext_seqnum = call_stats.extendedMax;
+ if (codec_inst.plfreq / 1000 > 0) {
+ stats.jitter_ms = call_stats.jitterSamples / (codec_inst.plfreq / 1000);
+ }
+ stats.delay_estimate_ms = channel_proxy_->GetDelayEstimate();
+ stats.audio_level = channel_proxy_->GetSpeechOutputLevelFullRange();
+ stats.total_output_energy = channel_proxy_->GetTotalOutputEnergy();
+ stats.total_output_duration = channel_proxy_->GetTotalOutputDuration();
+
+ // Get jitter buffer and total delay (alg + jitter + playout) stats.
+ auto ns = channel_proxy_->GetNetworkStatistics();
+ stats.jitter_buffer_ms = ns.currentBufferSize;
+ stats.jitter_buffer_preferred_ms = ns.preferredBufferSize;
+ stats.total_samples_received = ns.totalSamplesReceived;
+ stats.concealed_samples = ns.concealedSamples;
+ stats.concealment_events = ns.concealmentEvents;
+ stats.jitter_buffer_delay_seconds =
+ static_cast<double>(ns.jitterBufferDelayMs) /
+ static_cast<double>(rtc::kNumMillisecsPerSec);
+ stats.expand_rate = Q14ToFloat(ns.currentExpandRate);
+ stats.speech_expand_rate = Q14ToFloat(ns.currentSpeechExpandRate);
+ stats.secondary_decoded_rate = Q14ToFloat(ns.currentSecondaryDecodedRate);
+ stats.secondary_discarded_rate = Q14ToFloat(ns.currentSecondaryDiscardedRate);
+ stats.accelerate_rate = Q14ToFloat(ns.currentAccelerateRate);
+ stats.preemptive_expand_rate = Q14ToFloat(ns.currentPreemptiveRate);
+
+ auto ds(channel_proxy_->GetDecodingCallStatistics());
+ stats.decoding_calls_to_silence_generator = ds.calls_to_silence_generator;
+ stats.decoding_calls_to_neteq = ds.calls_to_neteq;
+ stats.decoding_normal = ds.decoded_normal;
+ stats.decoding_plc = ds.decoded_plc;
+ stats.decoding_cng = ds.decoded_cng;
+ stats.decoding_plc_cng = ds.decoded_plc_cng;
+ stats.decoding_muted_output = ds.decoded_muted_output;
+
+ return stats;
+}
+
+int AudioReceiveStream::GetOutputLevel() const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return channel_proxy_->GetSpeechOutputLevel();
+}
+
+void AudioReceiveStream::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ channel_proxy_->SetSink(std::move(sink));
+}
+
+void AudioReceiveStream::SetGain(float gain) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ channel_proxy_->SetChannelOutputVolumeScaling(gain);
+}
+
+std::vector<RtpSource> AudioReceiveStream::GetSources() const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return channel_proxy_->GetSources();
+}
+
+AudioMixer::Source::AudioFrameInfo AudioReceiveStream::GetAudioFrameWithInfo(
+ int sample_rate_hz,
+ AudioFrame* audio_frame) {
+ return channel_proxy_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
+}
+
+int AudioReceiveStream::Ssrc() const {
+ return config_.rtp.remote_ssrc;
+}
+
+int AudioReceiveStream::PreferredSampleRate() const {
+ return channel_proxy_->PreferredSampleRate();
+}
+
+int AudioReceiveStream::id() const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return config_.rtp.remote_ssrc;
+}
+
+rtc::Optional<Syncable::Info> AudioReceiveStream::GetInfo() const {
+ RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
+ Syncable::Info info;
+
+ RtpRtcp* rtp_rtcp = nullptr;
+ RtpReceiver* rtp_receiver = nullptr;
+ channel_proxy_->GetRtpRtcp(&rtp_rtcp, &rtp_receiver);
+ RTC_DCHECK(rtp_rtcp);
+ RTC_DCHECK(rtp_receiver);
+
+ if (!rtp_receiver->GetLatestTimestamps(
+ &info.latest_received_capture_timestamp,
+ &info.latest_receive_time_ms)) {
+ return rtc::nullopt;
+ }
+ if (rtp_rtcp->RemoteNTP(&info.capture_time_ntp_secs,
+ &info.capture_time_ntp_frac,
+ nullptr,
+ nullptr,
+ &info.capture_time_source_clock) != 0) {
+ return rtc::nullopt;
+ }
+
+ info.current_delay_ms = channel_proxy_->GetDelayEstimate();
+ return info;
+}
+
+uint32_t AudioReceiveStream::GetPlayoutTimestamp() const {
+ // Called on video capture thread.
+ return channel_proxy_->GetPlayoutTimestamp();
+}
+
+void AudioReceiveStream::SetMinimumPlayoutDelay(int delay_ms) {
+ RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
+ return channel_proxy_->SetMinimumPlayoutDelay(delay_ms);
+}
+
+void AudioReceiveStream::AssociateSendStream(AudioSendStream* send_stream) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ if (send_stream) {
+ VoiceEngineImpl* voe_impl = static_cast<VoiceEngineImpl*>(voice_engine());
+ std::unique_ptr<voe::ChannelProxy> send_channel_proxy =
+ voe_impl->GetChannelProxy(send_stream->GetConfig().voe_channel_id);
+ channel_proxy_->AssociateSendChannel(*send_channel_proxy.get());
+ } else {
+ channel_proxy_->DisassociateSendChannel();
+ }
+}
+
+void AudioReceiveStream::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+}
+
+bool AudioReceiveStream::DeliverRtcp(const uint8_t* packet, size_t length) {
+ // TODO(solenberg): Tests call this function on a network thread, libjingle
+ // calls on the worker thread. We should move towards always using a network
+ // thread. Then this check can be enabled.
+ // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
+ return channel_proxy_->ReceivedRTCPPacket(packet, length);
+}
+
+void AudioReceiveStream::OnRtpPacket(const RtpPacketReceived& packet) {
+ // TODO(solenberg): Tests call this function on a network thread, libjingle
+ // calls on the worker thread. We should move towards always using a network
+ // thread. Then this check can be enabled.
+ // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
+ channel_proxy_->OnRtpPacket(packet);
+}
+
+const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ return config_;
+}
+
+VoiceEngine* AudioReceiveStream::voice_engine() const {
+ auto* voice_engine = audio_state()->voice_engine();
+ RTC_DCHECK(voice_engine);
+ return voice_engine;
+}
+
+internal::AudioState* AudioReceiveStream::audio_state() const {
+ auto* audio_state = static_cast<internal::AudioState*>(audio_state_.get());
+ RTC_DCHECK(audio_state);
+ return audio_state;
+}
+
+int AudioReceiveStream::SetVoiceEnginePlayout(bool playout) {
+ ScopedVoEInterface<VoEBase> base(voice_engine());
+ if (playout) {
+ return base->StartPlayout(config_.voe_channel_id);
+ } else {
+ return base->StopPlayout(config_.voe_channel_id);
+ }
+}
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_receive_stream.h b/third_party/libwebrtc/webrtc/audio/audio_receive_stream.h
new file mode 100644
index 0000000000..a61c8963d2
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_receive_stream.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_RECEIVE_STREAM_H_
+#define AUDIO_AUDIO_RECEIVE_STREAM_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/audio/audio_mixer.h"
+#include "audio/audio_state.h"
+#include "call/audio_receive_stream.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "call/syncable.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+class PacketRouter;
+class RtcEventLog;
+class RtpPacketReceived;
+class RtpStreamReceiverControllerInterface;
+class RtpStreamReceiverInterface;
+
+namespace voe {
+class ChannelProxy;
+} // namespace voe
+
+namespace internal {
+class AudioSendStream;
+
+class AudioReceiveStream final : public webrtc::AudioReceiveStream,
+ public AudioMixer::Source,
+ public Syncable {
+ public:
+ AudioReceiveStream(RtpStreamReceiverControllerInterface* receiver_controller,
+ PacketRouter* packet_router,
+ const webrtc::AudioReceiveStream::Config& config,
+ const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+ webrtc::RtcEventLog* event_log);
+ ~AudioReceiveStream() override;
+
+ // webrtc::AudioReceiveStream implementation.
+ void Start() override;
+ void Stop() override;
+ webrtc::AudioReceiveStream::Stats GetStats() const override;
+ int GetOutputLevel() const override;
+ void SetSink(std::unique_ptr<AudioSinkInterface> sink) override;
+ void SetGain(float gain) override;
+ std::vector<webrtc::RtpSource> GetSources() const override;
+
+ // TODO(nisse): We don't formally implement RtpPacketSinkInterface, and this
+ // method shouldn't be needed. But it's currently used by the
+ // AudioReceiveStreamTest.ReceiveRtpPacket unittest. Figure out if that test
+ // shuld be refactored or deleted, and then delete this method.
+ void OnRtpPacket(const RtpPacketReceived& packet);
+
+ // AudioMixer::Source
+ AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz,
+ AudioFrame* audio_frame) override;
+ int Ssrc() const override;
+ int PreferredSampleRate() const override;
+
+ // Syncable
+ int id() const override;
+ rtc::Optional<Syncable::Info> GetInfo() const override;
+ uint32_t GetPlayoutTimestamp() const override;
+ void SetMinimumPlayoutDelay(int delay_ms) override;
+
+ void AssociateSendStream(AudioSendStream* send_stream);
+ void SignalNetworkState(NetworkState state);
+ bool DeliverRtcp(const uint8_t* packet, size_t length);
+ const webrtc::AudioReceiveStream::Config& config() const;
+
+ private:
+ VoiceEngine* voice_engine() const;
+ AudioState* audio_state() const;
+ int SetVoiceEnginePlayout(bool playout);
+
+ rtc::ThreadChecker worker_thread_checker_;
+ rtc::ThreadChecker module_process_thread_checker_;
+ const webrtc::AudioReceiveStream::Config config_;
+ rtc::scoped_refptr<webrtc::AudioState> audio_state_;
+ std::unique_ptr<voe::ChannelProxy> channel_proxy_;
+
+ bool playing_ RTC_ACCESS_ON(worker_thread_checker_) = false;
+
+ std::unique_ptr<RtpStreamReceiverInterface> rtp_stream_receiver_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioReceiveStream);
+};
+} // namespace internal
+} // namespace webrtc
+
+#endif // AUDIO_AUDIO_RECEIVE_STREAM_H_
diff --git a/third_party/libwebrtc/webrtc/audio/audio_receive_stream_unittest.cc b/third_party/libwebrtc/webrtc/audio/audio_receive_stream_unittest.cc
new file mode 100644
index 0000000000..d6c2dbe69b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_receive_stream_unittest.cc
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "api/test/mock_audio_mixer.h"
+#include "audio/audio_receive_stream.h"
+#include "audio/conversion.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/bitrate_controller/include/mock/mock_bitrate_controller.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+#include "test/mock_voe_channel_proxy.h"
+#include "test/mock_voice_engine.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using testing::_;
+using testing::FloatEq;
+using testing::Return;
+using testing::ReturnRef;
+
+AudioDecodingCallStats MakeAudioDecodeStatsForTest() {
+ AudioDecodingCallStats audio_decode_stats;
+ audio_decode_stats.calls_to_silence_generator = 234;
+ audio_decode_stats.calls_to_neteq = 567;
+ audio_decode_stats.decoded_normal = 890;
+ audio_decode_stats.decoded_plc = 123;
+ audio_decode_stats.decoded_cng = 456;
+ audio_decode_stats.decoded_plc_cng = 789;
+ audio_decode_stats.decoded_muted_output = 987;
+ return audio_decode_stats;
+}
+
+const int kChannelId = 2;
+const uint32_t kRemoteSsrc = 1234;
+const uint32_t kLocalSsrc = 5678;
+const size_t kOneByteExtensionHeaderLength = 4;
+const size_t kOneByteExtensionLength = 4;
+const int kAudioLevelId = 3;
+const int kTransportSequenceNumberId = 4;
+const int kJitterBufferDelay = -7;
+const int kPlayoutBufferDelay = 302;
+const unsigned int kSpeechOutputLevel = 99;
+const double kTotalOutputEnergy = 0.25;
+const double kTotalOutputDuration = 0.5;
+
+const CallStatistics kCallStats = {
+ 345, 678, 901, 234, -12, 3456, 7890, 567, 890, 123};
+const CodecInst kCodecInst = {
+ 123, "codec_name_recv", 96000, -187, 0, -103};
+const NetworkStatistics kNetworkStats = {
+ 123, 456, false, 789012, 3456, 123, 456, 0, {}, 789, 12,
+ 345, 678, 901, 0, -1, -1, -1, -1, -1, 0};
+const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest();
+
+struct ConfigHelper {
+ ConfigHelper()
+ : decoder_factory_(new rtc::RefCountedObject<MockAudioDecoderFactory>),
+ audio_mixer_(new rtc::RefCountedObject<MockAudioMixer>()) {
+ using testing::Invoke;
+
+ EXPECT_CALL(voice_engine_, audio_transport());
+
+ AudioState::Config config;
+ config.voice_engine = &voice_engine_;
+ config.audio_mixer = audio_mixer_;
+ config.audio_processing = new rtc::RefCountedObject<MockAudioProcessing>();
+ audio_state_ = AudioState::Create(config);
+
+ EXPECT_CALL(voice_engine_, ChannelProxyFactory(kChannelId))
+ .WillOnce(Invoke([this](int channel_id) {
+ EXPECT_FALSE(channel_proxy_);
+ channel_proxy_ = new testing::StrictMock<MockVoEChannelProxy>();
+ EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kLocalSsrc)).Times(1);
+ EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 15)).Times(1);
+ EXPECT_CALL(*channel_proxy_,
+ SetReceiveAudioLevelIndicationStatus(true, kAudioLevelId))
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_,
+ EnableReceiveTransportSequenceNumber(kTransportSequenceNumberId))
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_,
+ RegisterReceiverCongestionControlObjects(&packet_router_))
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_, ResetReceiverCongestionControlObjects())
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_, RegisterTransport(nullptr)).Times(2);
+ EXPECT_CALL(*channel_proxy_, GetAudioDecoderFactory())
+ .WillOnce(ReturnRef(decoder_factory_));
+ testing::Expectation expect_set =
+ EXPECT_CALL(*channel_proxy_, SetRtcEventLog(&event_log_))
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_, SetRtcEventLog(testing::IsNull()))
+ .Times(1)
+ .After(expect_set);
+ EXPECT_CALL(*channel_proxy_, DisassociateSendChannel()).Times(1);
+ EXPECT_CALL(*channel_proxy_, SetReceiveCodecs(_))
+ .WillRepeatedly(
+ Invoke([](const std::map<int, SdpAudioFormat>& codecs) {
+ EXPECT_THAT(codecs, testing::IsEmpty());
+ }));
+ return channel_proxy_;
+ }));
+ stream_config_.voe_channel_id = kChannelId;
+ stream_config_.rtp.local_ssrc = kLocalSsrc;
+ stream_config_.rtp.remote_ssrc = kRemoteSsrc;
+ stream_config_.rtp.nack.rtp_history_ms = 300;
+ stream_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+ stream_config_.rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
+ stream_config_.decoder_factory = decoder_factory_;
+ }
+
+ PacketRouter* packet_router() { return &packet_router_; }
+ MockRtcEventLog* event_log() { return &event_log_; }
+ AudioReceiveStream::Config& config() { return stream_config_; }
+ rtc::scoped_refptr<AudioState> audio_state() { return audio_state_; }
+ rtc::scoped_refptr<MockAudioMixer> audio_mixer() { return audio_mixer_; }
+ MockVoiceEngine& voice_engine() { return voice_engine_; }
+ MockVoEChannelProxy* channel_proxy() { return channel_proxy_; }
+ RtpStreamReceiverControllerInterface* rtp_stream_receiver_controller() {
+ return &rtp_stream_receiver_controller_;
+ }
+
+ void SetupMockForGetStats() {
+ using testing::DoAll;
+ using testing::SetArgPointee;
+
+ ASSERT_TRUE(channel_proxy_);
+ EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+ .WillOnce(Return(kCallStats));
+ EXPECT_CALL(*channel_proxy_, GetDelayEstimate())
+ .WillOnce(Return(kJitterBufferDelay + kPlayoutBufferDelay));
+ EXPECT_CALL(*channel_proxy_, GetSpeechOutputLevelFullRange())
+ .WillOnce(Return(kSpeechOutputLevel));
+ EXPECT_CALL(*channel_proxy_, GetTotalOutputEnergy())
+ .WillOnce(Return(kTotalOutputEnergy));
+ EXPECT_CALL(*channel_proxy_, GetTotalOutputDuration())
+ .WillOnce(Return(kTotalOutputDuration));
+ EXPECT_CALL(*channel_proxy_, GetNetworkStatistics())
+ .WillOnce(Return(kNetworkStats));
+ EXPECT_CALL(*channel_proxy_, GetDecodingCallStatistics())
+ .WillOnce(Return(kAudioDecodeStats));
+ EXPECT_CALL(*channel_proxy_, GetRecCodec(_))
+ .WillOnce(DoAll(SetArgPointee<0>(kCodecInst), Return(true)));
+ }
+
+ private:
+ PacketRouter packet_router_;
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+ MockRtcEventLog event_log_;
+ testing::StrictMock<MockVoiceEngine> voice_engine_;
+ rtc::scoped_refptr<AudioState> audio_state_;
+ rtc::scoped_refptr<MockAudioMixer> audio_mixer_;
+ AudioReceiveStream::Config stream_config_;
+ testing::StrictMock<MockVoEChannelProxy>* channel_proxy_ = nullptr;
+ RtpStreamReceiverController rtp_stream_receiver_controller_;
+};
+
+void BuildOneByteExtension(std::vector<uint8_t>::iterator it,
+ int id,
+ uint32_t extension_value,
+ size_t value_length) {
+ const uint16_t kRtpOneByteHeaderExtensionId = 0xBEDE;
+ ByteWriter<uint16_t>::WriteBigEndian(&(*it), kRtpOneByteHeaderExtensionId);
+ it += 2;
+
+ ByteWriter<uint16_t>::WriteBigEndian(&(*it), kOneByteExtensionLength / 4);
+ it += 2;
+ const size_t kExtensionDataLength = kOneByteExtensionLength - 1;
+ uint32_t shifted_value = extension_value
+ << (8 * (kExtensionDataLength - value_length));
+ *it = (id << 4) + (static_cast<uint8_t>(value_length) - 1);
+ ++it;
+ ByteWriter<uint32_t, kExtensionDataLength>::WriteBigEndian(&(*it),
+ shifted_value);
+}
+
+const std::vector<uint8_t> CreateRtpHeaderWithOneByteExtension(
+ int extension_id,
+ uint32_t extension_value,
+ size_t value_length) {
+ std::vector<uint8_t> header;
+ header.resize(webrtc::kRtpHeaderSize + kOneByteExtensionHeaderLength +
+ kOneByteExtensionLength);
+ header[0] = 0x80; // Version 2.
+ header[0] |= 0x10; // Set extension bit.
+ header[1] = 100; // Payload type.
+ header[1] |= 0x80; // Marker bit is set.
+ ByteWriter<uint16_t>::WriteBigEndian(&header[2], 0x1234); // Sequence number.
+ ByteWriter<uint32_t>::WriteBigEndian(&header[4], 0x5678); // Timestamp.
+ ByteWriter<uint32_t>::WriteBigEndian(&header[8], 0x4321); // SSRC.
+
+ BuildOneByteExtension(header.begin() + webrtc::kRtpHeaderSize, extension_id,
+ extension_value, value_length);
+ return header;
+}
+
+const std::vector<uint8_t> CreateRtcpSenderReport() {
+ std::vector<uint8_t> packet;
+ const size_t kRtcpSrLength = 28; // In bytes.
+ packet.resize(kRtcpSrLength);
+ packet[0] = 0x80; // Version 2.
+ packet[1] = 0xc8; // PT = 200, SR.
+ // Length in number of 32-bit words - 1.
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[2], 6);
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[4], kLocalSsrc);
+ return packet;
+}
+} // namespace
+
+TEST(AudioReceiveStreamTest, ConfigToString) {
+ AudioReceiveStream::Config config;
+ config.rtp.remote_ssrc = kRemoteSsrc;
+ config.rtp.local_ssrc = kLocalSsrc;
+ config.voe_channel_id = kChannelId;
+ config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+ EXPECT_EQ(
+ "{rtp: {remote_ssrc: 1234, local_ssrc: 5678, transport_cc: off, nack: "
+ "{rtp_history_ms: 0}, extensions: [{uri: "
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 3}]}, "
+ "rtcp_send_transport: null, voe_channel_id: 2}",
+ config.ToString());
+}
+
+TEST(AudioReceiveStreamTest, ConstructDestruct) {
+ ConfigHelper helper;
+ internal::AudioReceiveStream recv_stream(
+ helper.rtp_stream_receiver_controller(),
+ helper.packet_router(),
+ helper.config(), helper.audio_state(), helper.event_log());
+}
+
+TEST(AudioReceiveStreamTest, ReceiveRtpPacket) {
+ ConfigHelper helper;
+ helper.config().rtp.transport_cc = true;
+ internal::AudioReceiveStream recv_stream(
+ helper.rtp_stream_receiver_controller(),
+ helper.packet_router(),
+ helper.config(), helper.audio_state(), helper.event_log());
+ const int kTransportSequenceNumberValue = 1234;
+ std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
+ kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
+ PacketTime packet_time(5678000, 0);
+
+ RtpPacketReceived parsed_packet;
+ ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
+ parsed_packet.set_arrival_time_ms((packet_time.timestamp + 500) / 1000);
+
+ EXPECT_CALL(*helper.channel_proxy(),
+ OnRtpPacket(testing::Ref(parsed_packet)));
+
+ recv_stream.OnRtpPacket(parsed_packet);
+}
+
+TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) {
+ ConfigHelper helper;
+ helper.config().rtp.transport_cc = true;
+ internal::AudioReceiveStream recv_stream(
+ helper.rtp_stream_receiver_controller(),
+ helper.packet_router(),
+ helper.config(), helper.audio_state(), helper.event_log());
+
+ std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
+ EXPECT_CALL(*helper.channel_proxy(),
+ ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
+ .WillOnce(Return(true));
+ EXPECT_TRUE(recv_stream.DeliverRtcp(&rtcp_packet[0], rtcp_packet.size()));
+}
+
+TEST(AudioReceiveStreamTest, GetStats) {
+ ConfigHelper helper;
+ internal::AudioReceiveStream recv_stream(
+ helper.rtp_stream_receiver_controller(),
+ helper.packet_router(),
+ helper.config(), helper.audio_state(), helper.event_log());
+ helper.SetupMockForGetStats();
+ AudioReceiveStream::Stats stats = recv_stream.GetStats();
+ EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
+ EXPECT_EQ(static_cast<int64_t>(kCallStats.bytesReceived), stats.bytes_rcvd);
+ EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
+ stats.packets_rcvd);
+ EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
+ EXPECT_EQ(Q8ToFloat(kCallStats.fractionLost), stats.fraction_lost);
+ EXPECT_EQ(std::string(kCodecInst.plname), stats.codec_name);
+ EXPECT_EQ(kCallStats.extendedMax, stats.ext_seqnum);
+ EXPECT_EQ(kCallStats.jitterSamples / (kCodecInst.plfreq / 1000),
+ stats.jitter_ms);
+ EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
+ EXPECT_EQ(kNetworkStats.preferredBufferSize,
+ stats.jitter_buffer_preferred_ms);
+ EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
+ stats.delay_estimate_ms);
+ EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
+ EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
+ EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
+ EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
+ EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
+ EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
+ EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
+ static_cast<double>(rtc::kNumMillisecsPerSec),
+ stats.jitter_buffer_delay_seconds);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
+ stats.speech_expand_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
+ stats.secondary_decoded_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
+ stats.secondary_discarded_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
+ stats.accelerate_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
+ stats.preemptive_expand_rate);
+ EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
+ stats.decoding_calls_to_silence_generator);
+ EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
+ EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
+ EXPECT_EQ(kAudioDecodeStats.decoded_plc, stats.decoding_plc);
+ EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
+ EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
+ EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
+ stats.decoding_muted_output);
+ EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
+ stats.capture_start_ntp_time_ms);
+}
+
+TEST(AudioReceiveStreamTest, SetGain) {
+ ConfigHelper helper;
+ internal::AudioReceiveStream recv_stream(
+ helper.rtp_stream_receiver_controller(),
+ helper.packet_router(),
+ helper.config(), helper.audio_state(), helper.event_log());
+ EXPECT_CALL(*helper.channel_proxy(),
+ SetChannelOutputVolumeScaling(FloatEq(0.765f)));
+ recv_stream.SetGain(0.765f);
+}
+
+TEST(AudioReceiveStreamTest, StreamShouldNotBeAddedToMixerWhenVoEReturnsError) {
+ ConfigHelper helper;
+ internal::AudioReceiveStream recv_stream(
+ helper.rtp_stream_receiver_controller(),
+ helper.packet_router(),
+ helper.config(), helper.audio_state(), helper.event_log());
+
+ EXPECT_CALL(helper.voice_engine(), StartPlayout(_)).WillOnce(Return(-1));
+ EXPECT_CALL(*helper.audio_mixer(), AddSource(_)).Times(0);
+
+ recv_stream.Start();
+}
+
+TEST(AudioReceiveStreamTest, StreamShouldBeAddedToMixerOnStart) {
+ ConfigHelper helper;
+ internal::AudioReceiveStream recv_stream(
+ helper.rtp_stream_receiver_controller(),
+ helper.packet_router(),
+ helper.config(), helper.audio_state(), helper.event_log());
+
+ EXPECT_CALL(helper.voice_engine(), StartPlayout(_)).WillOnce(Return(0));
+ EXPECT_CALL(helper.voice_engine(), StopPlayout(_));
+ EXPECT_CALL(*helper.audio_mixer(), AddSource(&recv_stream))
+ .WillOnce(Return(true));
+
+ recv_stream.Start();
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_send_stream.cc b/third_party/libwebrtc/webrtc/audio/audio_send_stream.cc
new file mode 100644
index 0000000000..df8614c0f6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_send_stream.cc
@@ -0,0 +1,658 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_send_stream.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "audio/audio_state.h"
+#include "audio/conversion.h"
+#include "audio/scoped_voe_interface.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/bitrate_controller/include/bitrate_controller.h"
+#include "modules/congestion_controller/include/send_side_congestion_controller.h"
+#include "modules/pacing/paced_sender.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/function_view.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/timeutils.h"
+#include "voice_engine/channel_proxy.h"
+#include "voice_engine/include/voe_base.h"
+#include "voice_engine/transmit_mixer.h"
+#include "voice_engine/voice_engine_impl.h"
+
+namespace webrtc {
+
+namespace internal {
+// TODO(eladalon): Subsequent CL will make these values experiment-dependent.
+constexpr size_t kPacketLossTrackerMaxWindowSizeMs = 15000;
+constexpr size_t kPacketLossRateMinNumAckedPackets = 50;
+constexpr size_t kRecoverablePacketLossRateMinNumAckedPairs = 40;
+
+namespace {
+void CallEncoder(const std::unique_ptr<voe::ChannelProxy>& channel_proxy,
+ rtc::FunctionView<void(AudioEncoder*)> lambda) {
+ channel_proxy->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+ RTC_DCHECK(encoder_ptr);
+ lambda(encoder_ptr->get());
+ });
+}
+} // namespace
+
+// TODO(saza): Move this declaration further down when we can use
+// std::make_unique.
+class AudioSendStream::TimedTransport : public Transport {
+ public:
+ TimedTransport(Transport* transport, TimeInterval* time_interval)
+ : transport_(transport), lifetime_(time_interval) {}
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) {
+ if (lifetime_) {
+ lifetime_->Extend();
+ }
+ return transport_->SendRtp(packet, length, options);
+ }
+ bool SendRtcp(const uint8_t* packet, size_t length) {
+ return transport_->SendRtcp(packet, length);
+ }
+ ~TimedTransport() {}
+
+ private:
+ Transport* transport_;
+ TimeInterval* lifetime_;
+};
+
+AudioSendStream::AudioSendStream(
+ const webrtc::AudioSendStream::Config& config,
+ const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+ rtc::TaskQueue* worker_queue,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocator* bitrate_allocator,
+ RtcEventLog* event_log,
+ RtcpRttStats* rtcp_rtt_stats,
+ const rtc::Optional<RtpState>& suspended_rtp_state)
+ : worker_queue_(worker_queue),
+ config_(Config(nullptr)),
+ audio_state_(audio_state),
+ event_log_(event_log),
+ bitrate_allocator_(bitrate_allocator),
+ transport_(transport),
+ packet_loss_tracker_(kPacketLossTrackerMaxWindowSizeMs,
+ kPacketLossRateMinNumAckedPackets,
+ kRecoverablePacketLossRateMinNumAckedPairs),
+ rtp_rtcp_module_(nullptr),
+ suspended_rtp_state_(suspended_rtp_state) {
+ RTC_LOG(LS_INFO) << "AudioSendStream: " << config.ToString();
+ RTC_DCHECK_NE(config.voe_channel_id, -1);
+ RTC_DCHECK(audio_state_.get());
+ RTC_DCHECK(transport);
+ RTC_DCHECK(transport->send_side_cc());
+
+ VoiceEngineImpl* voe_impl = static_cast<VoiceEngineImpl*>(voice_engine());
+ channel_proxy_ = voe_impl->GetChannelProxy(config.voe_channel_id);
+ channel_proxy_->SetRtcEventLog(event_log_);
+ channel_proxy_->SetRtcpRttStats(rtcp_rtt_stats);
+ channel_proxy_->SetRTCPStatus(true);
+ RtpReceiver* rtpReceiver = nullptr; // Unused, but required for call.
+ channel_proxy_->GetRtpRtcp(&rtp_rtcp_module_, &rtpReceiver);
+ RTC_DCHECK(rtp_rtcp_module_);
+
+ ConfigureStream(this, config, true);
+
+ pacer_thread_checker_.DetachFromThread();
+ // Signal congestion controller this object is ready for OnPacket* callbacks.
+ transport_->send_side_cc()->RegisterPacketFeedbackObserver(this);
+}
+
+AudioSendStream::~AudioSendStream() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_LOG(LS_INFO) << "~AudioSendStream: " << config_.ToString();
+ transport_->send_side_cc()->DeRegisterPacketFeedbackObserver(this);
+ channel_proxy_->RegisterTransport(nullptr);
+ channel_proxy_->ResetSenderCongestionControlObjects();
+ channel_proxy_->SetRtcEventLog(nullptr);
+ channel_proxy_->SetRtcpRttStats(nullptr);
+}
+
+const webrtc::AudioSendStream::Config& AudioSendStream::GetConfig() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return config_;
+}
+
+void AudioSendStream::Reconfigure(
+ const webrtc::AudioSendStream::Config& new_config) {
+ ConfigureStream(this, new_config, false);
+}
+
+void AudioSendStream::ConfigureStream(
+ webrtc::internal::AudioSendStream* stream,
+ const webrtc::AudioSendStream::Config& new_config,
+ bool first_time) {
+ RTC_LOG(LS_INFO) << "AudioSendStream::Configuring: " << new_config.ToString();
+ const auto& channel_proxy = stream->channel_proxy_;
+ const auto& old_config = stream->config_;
+
+ if (first_time || old_config.rtp.ssrc != new_config.rtp.ssrc) {
+ channel_proxy->SetLocalSSRC(new_config.rtp.ssrc);
+ if (stream->suspended_rtp_state_) {
+ stream->rtp_rtcp_module_->SetRtpState(*stream->suspended_rtp_state_);
+ }
+ }
+ if (first_time || old_config.rtp.c_name != new_config.rtp.c_name) {
+ channel_proxy->SetRTCP_CNAME(new_config.rtp.c_name);
+ }
+ // TODO(solenberg): Config NACK history window (which is a packet count),
+ // using the actual packet size for the configured codec.
+ if (first_time || old_config.rtp.nack.rtp_history_ms !=
+ new_config.rtp.nack.rtp_history_ms) {
+ channel_proxy->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
+ new_config.rtp.nack.rtp_history_ms / 20);
+ }
+
+ if (first_time ||
+ new_config.send_transport != old_config.send_transport) {
+ if (old_config.send_transport) {
+ channel_proxy->RegisterTransport(nullptr);
+ }
+ if (new_config.send_transport) {
+ stream->timed_send_transport_adapter_.reset(new TimedTransport(
+ new_config.send_transport, &stream->active_lifetime_));
+ } else {
+ stream->timed_send_transport_adapter_.reset(nullptr);
+ }
+ channel_proxy->RegisterTransport(
+ stream->timed_send_transport_adapter_.get());
+ }
+
+ // RFC 5285: Each distinct extension MUST have a unique ID. The value 0 is
+ // reserved for padding and MUST NOT be used as a local identifier.
+ // So it should be safe to use 0 here to indicate "not configured".
+ struct ExtensionIds {
+ int audio_level = 0;
+ int transport_sequence_number = 0;
+ };
+
+ auto find_extension_ids = [](const std::vector<RtpExtension>& extensions) {
+ ExtensionIds ids;
+ for (const auto& extension : extensions) {
+ if (extension.uri == RtpExtension::kAudioLevelUri) {
+ ids.audio_level = extension.id;
+ } else if (extension.uri == RtpExtension::kTransportSequenceNumberUri) {
+ ids.transport_sequence_number = extension.id;
+ }
+ }
+ return ids;
+ };
+
+ const ExtensionIds old_ids = find_extension_ids(old_config.rtp.extensions);
+ const ExtensionIds new_ids = find_extension_ids(new_config.rtp.extensions);
+ // Audio level indication
+ if (first_time || new_ids.audio_level != old_ids.audio_level) {
+ channel_proxy->SetSendAudioLevelIndicationStatus(new_ids.audio_level != 0,
+ new_ids.audio_level);
+ }
+ bool transport_seq_num_id_changed =
+ new_ids.transport_sequence_number != old_ids.transport_sequence_number;
+ if (first_time || transport_seq_num_id_changed) {
+ if (!first_time) {
+ channel_proxy->ResetSenderCongestionControlObjects();
+ }
+
+ RtcpBandwidthObserver* bandwidth_observer = nullptr;
+ bool has_transport_sequence_number = new_ids.transport_sequence_number != 0;
+ if (has_transport_sequence_number) {
+ channel_proxy->EnableSendTransportSequenceNumber(
+ new_ids.transport_sequence_number);
+ // Probing in application limited region is only used in combination with
+ // send side congestion control, wich depends on feedback packets which
+ // requires transport sequence numbers to be enabled.
+ stream->transport_->send_side_cc()->EnablePeriodicAlrProbing(true);
+ bandwidth_observer =
+ stream->transport_->send_side_cc()->GetBandwidthObserver();
+ }
+
+ channel_proxy->RegisterSenderCongestionControlObjects(stream->transport_,
+ bandwidth_observer);
+ }
+
+ if (!ReconfigureSendCodec(stream, new_config)) {
+ RTC_LOG(LS_ERROR) << "Failed to set up send codec state.";
+ }
+
+ ReconfigureBitrateObserver(stream, new_config);
+ stream->config_ = new_config;
+}
+
+void AudioSendStream::Start() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ if (config_.min_bitrate_bps != -1 && config_.max_bitrate_bps != -1) {
+ // Audio BWE is enabled.
+ transport_->packet_sender()->SetAccountForAudioPackets(true);
+ ConfigureBitrateObserver(config_.min_bitrate_bps, config_.max_bitrate_bps);
+ }
+
+ ScopedVoEInterface<VoEBase> base(voice_engine());
+ int error = base->StartSend(config_.voe_channel_id);
+ if (error != 0) {
+ RTC_LOG(LS_ERROR) << "AudioSendStream::Start failed with error: " << error;
+ }
+}
+
+void AudioSendStream::Stop() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RemoveBitrateObserver();
+
+ ScopedVoEInterface<VoEBase> base(voice_engine());
+ int error = base->StopSend(config_.voe_channel_id);
+ if (error != 0) {
+ RTC_LOG(LS_ERROR) << "AudioSendStream::Stop failed with error: " << error;
+ }
+}
+
+bool AudioSendStream::SendTelephoneEvent(int payload_type,
+ int payload_frequency, int event,
+ int duration_ms) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel_proxy_->SetSendTelephoneEventPayloadType(payload_type,
+ payload_frequency) &&
+ channel_proxy_->SendTelephoneEventOutband(event, duration_ms);
+}
+
+void AudioSendStream::SetMuted(bool muted) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel_proxy_->SetInputMute(muted);
+}
+
+webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
+ return GetStats(true);
+}
+
+webrtc::AudioSendStream::Stats AudioSendStream::GetStats(
+ bool has_remote_tracks) const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ webrtc::AudioSendStream::Stats stats;
+ stats.local_ssrc = config_.rtp.ssrc;
+
+ webrtc::CallStatistics call_stats = channel_proxy_->GetRTCPStatistics();
+ stats.bytes_sent = call_stats.bytesSent;
+ stats.packets_sent = call_stats.packetsSent;
+ // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
+ // returns 0 to indicate an error value.
+ if (call_stats.rttMs > 0) {
+ stats.rtt_ms = call_stats.rttMs;
+ }
+ if (config_.send_codec_spec) {
+ const auto& spec = *config_.send_codec_spec;
+ stats.codec_name = spec.format.name;
+ stats.codec_payload_type = spec.payload_type;
+
+ // Get data from the last remote RTCP report.
+ for (const auto& block : channel_proxy_->GetRemoteRTCPReportBlocks()) {
+ // Lookup report for send ssrc only.
+ if (block.source_SSRC == stats.local_ssrc) {
+ stats.packets_lost = block.cumulative_num_packets_lost;
+ stats.fraction_lost = Q8ToFloat(block.fraction_lost);
+ stats.ext_seqnum = block.extended_highest_sequence_number;
+ // Convert timestamps to milliseconds.
+ if (spec.format.clockrate_hz / 1000 > 0) {
+ stats.jitter_ms =
+ block.interarrival_jitter / (spec.format.clockrate_hz / 1000);
+ }
+ break;
+ }
+ }
+ }
+
+ ScopedVoEInterface<VoEBase> base(voice_engine());
+ RTC_DCHECK(base->transmit_mixer());
+ stats.audio_level = base->transmit_mixer()->AudioLevelFullRange();
+ RTC_DCHECK_LE(0, stats.audio_level);
+
+ stats.total_input_energy = base->transmit_mixer()->GetTotalInputEnergy();
+ stats.total_input_duration = base->transmit_mixer()->GetTotalInputDuration();
+
+ internal::AudioState* audio_state =
+ static_cast<internal::AudioState*>(audio_state_.get());
+ stats.typing_noise_detected = audio_state->typing_noise_detected();
+ stats.ana_statistics = channel_proxy_->GetANAStatistics();
+ RTC_DCHECK(audio_state_->audio_processing());
+ stats.apm_statistics =
+ audio_state_->audio_processing()->GetStatistics(has_remote_tracks);
+
+ return stats;
+}
+
+void AudioSendStream::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+}
+
+bool AudioSendStream::DeliverRtcp(const uint8_t* packet, size_t length) {
+ // TODO(solenberg): Tests call this function on a network thread, libjingle
+ // calls on the worker thread. We should move towards always using a network
+ // thread. Then this check can be enabled.
+ // RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread());
+ return channel_proxy_->ReceivedRTCPPacket(packet, length);
+}
+
+uint32_t AudioSendStream::OnBitrateUpdated(uint32_t bitrate_bps,
+ uint8_t fraction_loss,
+ int64_t rtt,
+ int64_t bwe_period_ms) {
+ // A send stream may be allocated a bitrate of zero if the allocator decides
+ // to disable it. For now we ignore this decision and keep sending on min
+ // bitrate.
+ if (bitrate_bps == 0) {
+ bitrate_bps = config_.min_bitrate_bps;
+ }
+ RTC_DCHECK_GE(bitrate_bps,
+ static_cast<uint32_t>(config_.min_bitrate_bps));
+ // The bitrate allocator might allocate an higher than max configured bitrate
+ // if there is room, to allow for, as example, extra FEC. Ignore that for now.
+ const uint32_t max_bitrate_bps = config_.max_bitrate_bps;
+ if (bitrate_bps > max_bitrate_bps)
+ bitrate_bps = max_bitrate_bps;
+
+ channel_proxy_->SetBitrate(bitrate_bps, bwe_period_ms);
+
+ // The amount of audio protection is not exposed by the encoder, hence
+ // always returning 0.
+ return 0;
+}
+
+void AudioSendStream::OnPacketAdded(uint32_t ssrc, uint16_t seq_num) {
+ RTC_DCHECK(pacer_thread_checker_.CalledOnValidThread());
+ // Only packets that belong to this stream are of interest.
+ if (ssrc == config_.rtp.ssrc) {
+ rtc::CritScope lock(&packet_loss_tracker_cs_);
+ // TODO(eladalon): This function call could potentially reset the window,
+ // setting both PLR and RPLR to unknown. Consider (during upcoming
+ // refactoring) passing an indication of such an event.
+ packet_loss_tracker_.OnPacketAdded(seq_num, rtc::TimeMillis());
+ }
+}
+
+void AudioSendStream::OnPacketFeedbackVector(
+ const std::vector<PacketFeedback>& packet_feedback_vector) {
+ //Called on STS Thread as a result of delivering a packet.
+ //The functions below are protected by locks, so this should be safe.
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ rtc::Optional<float> plr;
+ rtc::Optional<float> rplr;
+ {
+ rtc::CritScope lock(&packet_loss_tracker_cs_);
+ packet_loss_tracker_.OnPacketFeedbackVector(packet_feedback_vector);
+ plr = packet_loss_tracker_.GetPacketLossRate();
+ rplr = packet_loss_tracker_.GetRecoverablePacketLossRate();
+ }
+ // TODO(eladalon): If R/PLR go back to unknown, no indication is given that
+ // the previously sent value is no longer relevant. This will be taken care
+ // of with some refactoring which is now being done.
+ if (plr) {
+ channel_proxy_->OnTwccBasedUplinkPacketLossRate(*plr);
+ }
+ if (rplr) {
+ channel_proxy_->OnRecoverableUplinkPacketLossRate(*rplr);
+ }
+}
+
+void AudioSendStream::SetTransportOverhead(int transport_overhead_per_packet) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ transport_->send_side_cc()->SetTransportOverhead(
+ transport_overhead_per_packet);
+ channel_proxy_->SetTransportOverhead(transport_overhead_per_packet);
+}
+
+RtpState AudioSendStream::GetRtpState() const {
+ return rtp_rtcp_module_->GetRtpState();
+}
+
+const TimeInterval& AudioSendStream::GetActiveLifetime() const {
+ return active_lifetime_;
+}
+
+VoiceEngine* AudioSendStream::voice_engine() const {
+ internal::AudioState* audio_state =
+ static_cast<internal::AudioState*>(audio_state_.get());
+ VoiceEngine* voice_engine = audio_state->voice_engine();
+ RTC_DCHECK(voice_engine);
+ return voice_engine;
+}
+
+// Apply current codec settings to a single voe::Channel used for sending.
+bool AudioSendStream::SetupSendCodec(AudioSendStream* stream,
+ const Config& new_config) {
+ RTC_DCHECK(new_config.send_codec_spec);
+ const auto& spec = *new_config.send_codec_spec;
+
+ RTC_DCHECK(new_config.encoder_factory);
+ std::unique_ptr<AudioEncoder> encoder =
+ new_config.encoder_factory->MakeAudioEncoder(spec.payload_type,
+ spec.format);
+
+ if (!encoder) {
+ RTC_LOG(LS_ERROR) << "Unable to create encoder for " << spec.format;
+ return false;
+ }
+ // If a bitrate has been specified for the codec, use it over the
+ // codec's default.
+ if (spec.target_bitrate_bps) {
+ encoder->OnReceivedTargetAudioBitrate(*spec.target_bitrate_bps);
+ }
+
+ // Enable ANA if configured (currently only used by Opus).
+ if (new_config.audio_network_adaptor_config) {
+ if (encoder->EnableAudioNetworkAdaptor(
+ *new_config.audio_network_adaptor_config, stream->event_log_)) {
+ RTC_LOG(LS_INFO) << "Audio network adaptor enabled on SSRC "
+ << new_config.rtp.ssrc;
+ } else {
+ RTC_NOTREACHED();
+ }
+ }
+
+ // Wrap the encoder in a an AudioEncoderCNG, if VAD is enabled.
+ if (spec.cng_payload_type) {
+ AudioEncoderCng::Config cng_config;
+ cng_config.num_channels = encoder->NumChannels();
+ cng_config.payload_type = *spec.cng_payload_type;
+ cng_config.speech_encoder = std::move(encoder);
+ cng_config.vad_mode = Vad::kVadNormal;
+ encoder.reset(new AudioEncoderCng(std::move(cng_config)));
+
+ stream->RegisterCngPayloadType(
+ *spec.cng_payload_type,
+ new_config.send_codec_spec->format.clockrate_hz);
+ }
+
+ stream->channel_proxy_->SetEncoder(new_config.send_codec_spec->payload_type,
+ std::move(encoder));
+ return true;
+}
+
+bool AudioSendStream::ReconfigureSendCodec(AudioSendStream* stream,
+ const Config& new_config) {
+ const auto& old_config = stream->config_;
+
+ if (!new_config.send_codec_spec) {
+ // We cannot de-configure a send codec. So we will do nothing.
+ // By design, the send codec should have not been configured.
+ RTC_DCHECK(!old_config.send_codec_spec);
+ return true;
+ }
+
+ if (new_config.send_codec_spec == old_config.send_codec_spec &&
+ new_config.audio_network_adaptor_config ==
+ old_config.audio_network_adaptor_config) {
+ return true;
+ }
+
+ // If we have no encoder, or the format or payload type's changed, create a
+ // new encoder.
+ if (!old_config.send_codec_spec ||
+ new_config.send_codec_spec->format !=
+ old_config.send_codec_spec->format ||
+ new_config.send_codec_spec->payload_type !=
+ old_config.send_codec_spec->payload_type) {
+ return SetupSendCodec(stream, new_config);
+ }
+
+ const rtc::Optional<int>& new_target_bitrate_bps =
+ new_config.send_codec_spec->target_bitrate_bps;
+ // If a bitrate has been specified for the codec, use it over the
+ // codec's default.
+ if (new_target_bitrate_bps &&
+ new_target_bitrate_bps !=
+ old_config.send_codec_spec->target_bitrate_bps) {
+ CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+ encoder->OnReceivedTargetAudioBitrate(*new_target_bitrate_bps);
+ });
+ }
+
+ ReconfigureANA(stream, new_config);
+ ReconfigureCNG(stream, new_config);
+
+ return true;
+}
+
+void AudioSendStream::ReconfigureANA(AudioSendStream* stream,
+ const Config& new_config) {
+ if (new_config.audio_network_adaptor_config ==
+ stream->config_.audio_network_adaptor_config) {
+ return;
+ }
+ if (new_config.audio_network_adaptor_config) {
+ CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+ if (encoder->EnableAudioNetworkAdaptor(
+ *new_config.audio_network_adaptor_config, stream->event_log_)) {
+ RTC_LOG(LS_INFO) << "Audio network adaptor enabled on SSRC "
+ << new_config.rtp.ssrc;
+ } else {
+ RTC_NOTREACHED();
+ }
+ });
+ } else {
+ CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+ encoder->DisableAudioNetworkAdaptor();
+ });
+ RTC_LOG(LS_INFO) << "Audio network adaptor disabled on SSRC "
+ << new_config.rtp.ssrc;
+ }
+}
+
+void AudioSendStream::ReconfigureCNG(AudioSendStream* stream,
+ const Config& new_config) {
+ if (new_config.send_codec_spec->cng_payload_type ==
+ stream->config_.send_codec_spec->cng_payload_type) {
+ return;
+ }
+
+ // Register the CNG payload type if it's been added, don't do anything if CNG
+ // is removed. Payload types must not be redefined.
+ if (new_config.send_codec_spec->cng_payload_type) {
+ stream->RegisterCngPayloadType(
+ *new_config.send_codec_spec->cng_payload_type,
+ new_config.send_codec_spec->format.clockrate_hz);
+ }
+
+ // Wrap or unwrap the encoder in an AudioEncoderCNG.
+ stream->channel_proxy_->ModifyEncoder(
+ [&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+ std::unique_ptr<AudioEncoder> old_encoder(std::move(*encoder_ptr));
+ auto sub_encoders = old_encoder->ReclaimContainedEncoders();
+ if (!sub_encoders.empty()) {
+ // Replace enc with its sub encoder. We need to put the sub
+ // encoder in a temporary first, since otherwise the old value
+ // of enc would be destroyed before the new value got assigned,
+ // which would be bad since the new value is a part of the old
+ // value.
+ auto tmp = std::move(sub_encoders[0]);
+ old_encoder = std::move(tmp);
+ }
+ if (new_config.send_codec_spec->cng_payload_type) {
+ AudioEncoderCng::Config config;
+ config.speech_encoder = std::move(old_encoder);
+ config.num_channels = config.speech_encoder->NumChannels();
+ config.payload_type = *new_config.send_codec_spec->cng_payload_type;
+ config.vad_mode = Vad::kVadNormal;
+ encoder_ptr->reset(new AudioEncoderCng(std::move(config)));
+ } else {
+ *encoder_ptr = std::move(old_encoder);
+ }
+ });
+}
+
+void AudioSendStream::ReconfigureBitrateObserver(
+ AudioSendStream* stream,
+ const webrtc::AudioSendStream::Config& new_config) {
+ // Since the Config's default is for both of these to be -1, this test will
+ // allow us to configure the bitrate observer if the new config has bitrate
+ // limits set, but would only have us call RemoveBitrateObserver if we were
+ // previously configured with bitrate limits.
+ if (stream->config_.min_bitrate_bps == new_config.min_bitrate_bps &&
+ stream->config_.max_bitrate_bps == new_config.max_bitrate_bps) {
+ return;
+ }
+
+ if (new_config.min_bitrate_bps != -1 && new_config.max_bitrate_bps != -1) {
+ stream->ConfigureBitrateObserver(new_config.min_bitrate_bps,
+ new_config.max_bitrate_bps);
+ } else {
+ stream->RemoveBitrateObserver();
+ }
+}
+
+void AudioSendStream::ConfigureBitrateObserver(int min_bitrate_bps,
+ int max_bitrate_bps) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK_GE(max_bitrate_bps, min_bitrate_bps);
+ rtc::Event thread_sync_event(false /* manual_reset */, false);
+ worker_queue_->PostTask([&] {
+ // We may get a callback immediately as the observer is registered, so make
+ // sure the bitrate limits in config_ are up-to-date.
+ config_.min_bitrate_bps = min_bitrate_bps;
+ config_.max_bitrate_bps = max_bitrate_bps;
+ bitrate_allocator_->AddObserver(this, min_bitrate_bps, max_bitrate_bps, 0,
+ true, config_.track_id);
+ thread_sync_event.Set();
+ });
+ thread_sync_event.Wait(rtc::Event::kForever);
+}
+
+void AudioSendStream::RemoveBitrateObserver() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ rtc::Event thread_sync_event(false /* manual_reset */, false);
+ worker_queue_->PostTask([this, &thread_sync_event] {
+ bitrate_allocator_->RemoveObserver(this);
+ thread_sync_event.Set();
+ });
+ thread_sync_event.Wait(rtc::Event::kForever);
+}
+
+void AudioSendStream::RegisterCngPayloadType(int payload_type,
+ int clockrate_hz) {
+ const CodecInst codec = {payload_type, "CN", clockrate_hz, 0, 1, 0};
+ if (rtp_rtcp_module_->RegisterSendPayload(codec) != 0) {
+ rtp_rtcp_module_->DeRegisterSendPayload(codec.pltype);
+ if (rtp_rtcp_module_->RegisterSendPayload(codec) != 0) {
+ RTC_LOG(LS_ERROR) << "RegisterCngPayloadType() failed to register CN to "
+ "RTP/RTCP module";
+ }
+ }
+}
+
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_send_stream.h b/third_party/libwebrtc/webrtc/audio/audio_send_stream.h
new file mode 100644
index 0000000000..08bdddb203
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_send_stream.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_SEND_STREAM_H_
+#define AUDIO_AUDIO_SEND_STREAM_H_
+
+#include <memory>
+#include <vector>
+
+#include "audio/time_interval.h"
+#include "call/audio_send_stream.h"
+#include "call/audio_state.h"
+#include "call/bitrate_allocator.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/thread_checker.h"
+#include "voice_engine/transport_feedback_packet_loss_tracker.h"
+
+namespace webrtc {
+class VoiceEngine;
+class RtcEventLog;
+class RtcpBandwidthObserver;
+class RtcpRttStats;
+class RtpTransportControllerSendInterface;
+
+namespace voe {
+class ChannelProxy;
+} // namespace voe
+
+namespace internal {
+class AudioSendStream final : public webrtc::AudioSendStream,
+ public webrtc::BitrateAllocatorObserver,
+ public webrtc::PacketFeedbackObserver {
+ public:
+ AudioSendStream(const webrtc::AudioSendStream::Config& config,
+ const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+ rtc::TaskQueue* worker_queue,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocator* bitrate_allocator,
+ RtcEventLog* event_log,
+ RtcpRttStats* rtcp_rtt_stats,
+ const rtc::Optional<RtpState>& suspended_rtp_state);
+ ~AudioSendStream() override;
+
+ // webrtc::AudioSendStream implementation.
+ const webrtc::AudioSendStream::Config& GetConfig() const override;
+ void Reconfigure(const webrtc::AudioSendStream::Config& config) override;
+ void Start() override;
+ void Stop() override;
+ bool SendTelephoneEvent(int payload_type, int payload_frequency, int event,
+ int duration_ms) override;
+ void SetMuted(bool muted) override;
+ webrtc::AudioSendStream::Stats GetStats() const override;
+ webrtc::AudioSendStream::Stats GetStats(
+ bool has_remote_tracks) const override;
+
+ void SignalNetworkState(NetworkState state);
+ bool DeliverRtcp(const uint8_t* packet, size_t length);
+
+ // Implements BitrateAllocatorObserver.
+ uint32_t OnBitrateUpdated(uint32_t bitrate_bps,
+ uint8_t fraction_loss,
+ int64_t rtt,
+ int64_t bwe_period_ms) override;
+
+ // From PacketFeedbackObserver.
+ void OnPacketAdded(uint32_t ssrc, uint16_t seq_num) override;
+ void OnPacketFeedbackVector(
+ const std::vector<PacketFeedback>& packet_feedback_vector) override;
+
+ void SetTransportOverhead(int transport_overhead_per_packet);
+
+ RtpState GetRtpState() const;
+ const TimeInterval& GetActiveLifetime() const;
+
+ private:
+ class TimedTransport;
+
+ VoiceEngine* voice_engine() const;
+
+ // These are all static to make it less likely that (the old) config_ is
+ // accessed unintentionally.
+ static void ConfigureStream(AudioSendStream* stream,
+ const Config& new_config,
+ bool first_time);
+ static bool SetupSendCodec(AudioSendStream* stream, const Config& new_config);
+ static bool ReconfigureSendCodec(AudioSendStream* stream,
+ const Config& new_config);
+ static void ReconfigureANA(AudioSendStream* stream, const Config& new_config);
+ static void ReconfigureCNG(AudioSendStream* stream, const Config& new_config);
+ static void ReconfigureBitrateObserver(AudioSendStream* stream,
+ const Config& new_config);
+
+ void ConfigureBitrateObserver(int min_bitrate_bps, int max_bitrate_bps);
+ void RemoveBitrateObserver();
+
+ void RegisterCngPayloadType(int payload_type, int clockrate_hz);
+
+ rtc::ThreadChecker worker_thread_checker_;
+ rtc::ThreadChecker pacer_thread_checker_;
+ rtc::TaskQueue* worker_queue_;
+ webrtc::AudioSendStream::Config config_;
+ rtc::scoped_refptr<webrtc::AudioState> audio_state_;
+ std::unique_ptr<voe::ChannelProxy> channel_proxy_;
+ RtcEventLog* const event_log_;
+
+ BitrateAllocator* const bitrate_allocator_;
+ RtpTransportControllerSendInterface* const transport_;
+
+ rtc::CriticalSection packet_loss_tracker_cs_;
+ TransportFeedbackPacketLossTracker packet_loss_tracker_
+ RTC_GUARDED_BY(&packet_loss_tracker_cs_);
+
+ RtpRtcp* rtp_rtcp_module_;
+ rtc::Optional<RtpState> const suspended_rtp_state_;
+
+ std::unique_ptr<TimedTransport> timed_send_transport_adapter_;
+ TimeInterval active_lifetime_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSendStream);
+};
+} // namespace internal
+} // namespace webrtc
+
+#endif // AUDIO_AUDIO_SEND_STREAM_H_
diff --git a/third_party/libwebrtc/webrtc/audio/audio_send_stream_tests.cc b/third_party/libwebrtc/webrtc/audio/audio_send_stream_tests.cc
new file mode 100644
index 0000000000..3f96c3350f
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_send_stream_tests.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/call_test.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+class AudioSendTest : public SendTest {
+ public:
+ AudioSendTest() : SendTest(CallTest::kDefaultTimeoutMs) {}
+
+ size_t GetNumVideoStreams() const override {
+ return 0;
+ }
+ size_t GetNumAudioStreams() const override {
+ return 1;
+ }
+ size_t GetNumFlexfecStreams() const override {
+ return 0;
+ }
+};
+} // namespace
+
+using AudioSendStreamCallTest = CallTest;
+
+TEST_F(AudioSendStreamCallTest, SupportsCName) {
+ static std::string kCName = "PjqatC14dGfbVwGPUOA9IH7RlsFDbWl4AhXEiDsBizo=";
+ class CNameObserver : public AudioSendTest {
+ public:
+ CNameObserver() = default;
+
+ private:
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ if (parser.sdes()->num_packets() > 0) {
+ EXPECT_EQ(1u, parser.sdes()->chunks().size());
+ EXPECT_EQ(kCName, parser.sdes()->chunks()[0].cname);
+
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.c_name = kCName;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP with CNAME.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, NoExtensionsByDefault) {
+ class NoExtensionsObserver : public AudioSendTest {
+ public:
+ NoExtensionsObserver() = default;
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
+ EXPECT_FALSE(header.extension.hasTransportSequenceNumber);
+ EXPECT_FALSE(header.extension.hasAudioLevel);
+ EXPECT_FALSE(header.extension.hasVideoRotation);
+ EXPECT_FALSE(header.extension.hasVideoContentType);
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.extensions.clear();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, SupportsAudioLevel) {
+ class AudioLevelObserver : public AudioSendTest {
+ public:
+ AudioLevelObserver() : AudioSendTest() {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionAudioLevel, test::kAudioLevelExtensionId));
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_TRUE(header.extension.hasAudioLevel);
+ if (header.extension.audioLevel != 0) {
+ // Wait for at least one packet with a non-zero level.
+ observation_complete_.Set();
+ } else {
+ RTC_LOG(LS_WARNING) << "Got a packet with zero audioLevel - waiting"
+ " for another packet...";
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAudioLevelUri, test::kAudioLevelExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, SupportsTransportWideSequenceNumbers) {
+ static const uint8_t kExtensionId = test::kTransportSequenceNumberExtensionId;
+ class TransportWideSequenceNumberObserver : public AudioSendTest {
+ public:
+ TransportWideSequenceNumberObserver() : AudioSendTest() {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionTransportSequenceNumber, kExtensionId));
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
+
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, SendDtmf) {
+ static const uint8_t kDtmfPayloadType = 120;
+ static const int kDtmfPayloadFrequency = 8000;
+ static const int kDtmfEventFirst = 12;
+ static const int kDtmfEventLast = 31;
+ static const int kDtmfDuration = 50;
+ class DtmfObserver : public AudioSendTest {
+ public:
+ DtmfObserver() = default;
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ if (header.payloadType == kDtmfPayloadType) {
+ EXPECT_EQ(12u, header.headerLength);
+ EXPECT_EQ(16u, length);
+ const int event = packet[12];
+ if (event != expected_dtmf_event_) {
+ ++expected_dtmf_event_;
+ EXPECT_EQ(event, expected_dtmf_event_);
+ if (expected_dtmf_event_ == kDtmfEventLast) {
+ observation_complete_.Set();
+ }
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ void OnAudioStreamsCreated(
+ AudioSendStream* send_stream,
+ const std::vector<AudioReceiveStream*>& receive_streams) override {
+ // Need to start stream here, else DTMF events are dropped.
+ send_stream->Start();
+ for (int event = kDtmfEventFirst; event <= kDtmfEventLast; ++event) {
+ send_stream->SendTelephoneEvent(kDtmfPayloadType, kDtmfPayloadFrequency,
+ event, kDtmfDuration);
+ }
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for DTMF stream.";
+ }
+
+ int expected_dtmf_event_ = kDtmfEventFirst;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_send_stream_unittest.cc b/third_party/libwebrtc/webrtc/audio/audio_send_stream_unittest.cc
new file mode 100644
index 0000000000..145a8e2419
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_send_stream_unittest.cc
@@ -0,0 +1,600 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "audio/audio_send_stream.h"
+#include "audio/audio_state.h"
+#include "audio/conversion.h"
+#include "call/fake_rtp_transport_controller_send.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/congestion_controller/include/mock/mock_congestion_observer.h"
+#include "modules/congestion_controller/include/send_side_congestion_controller.h"
+#include "modules/pacing/mock/mock_paced_sender.h"
+#include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/task_queue.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+#include "test/mock_audio_encoder_factory.h"
+#include "test/mock_voe_channel_proxy.h"
+#include "test/mock_voice_engine.h"
+#include "voice_engine/transmit_mixer.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using testing::_;
+using testing::Eq;
+using testing::Ne;
+using testing::Invoke;
+using testing::Return;
+using testing::StrEq;
+
+const int kChannelId = 1;
+const uint32_t kSsrc = 1234;
+const char* kCName = "foo_name";
+const int kAudioLevelId = 2;
+const int kTransportSequenceNumberId = 4;
+const int32_t kEchoDelayMedian = 254;
+const int32_t kEchoDelayStdDev = -3;
+const double kDivergentFilterFraction = 0.2f;
+const double kEchoReturnLoss = -65;
+const double kEchoReturnLossEnhancement = 101;
+const double kResidualEchoLikelihood = -1.0f;
+const double kResidualEchoLikelihoodMax = 23.0f;
+const int32_t kSpeechInputLevel = 96;
+const double kTotalInputEnergy = 0.25;
+const double kTotalInputDuration = 0.5;
+const CallStatistics kCallStats = {
+ 1345, 1678, 1901, 1234, 112, 13456, 17890, 1567, -1890, -1123};
+const ReportBlock kReportBlock = {456, 780, 123, 567, 890, 132, 143, 13354};
+const int kTelephoneEventPayloadType = 123;
+const int kTelephoneEventPayloadFrequency = 65432;
+const int kTelephoneEventCode = 45;
+const int kTelephoneEventDuration = 6789;
+const CodecInst kIsacCodec = {103, "isac", 16000, 320, 1, 32000};
+constexpr int kIsacPayloadType = 103;
+const SdpAudioFormat kIsacFormat = {"isac", 16000, 1};
+const SdpAudioFormat kOpusFormat = {"opus", 48000, 2};
+const SdpAudioFormat kG722Format = {"g722", 8000, 1};
+const AudioCodecSpec kCodecSpecs[] = {
+ {kIsacFormat, {16000, 1, 32000, 10000, 32000}},
+ {kOpusFormat, {48000, 1, 32000, 6000, 510000}},
+ {kG722Format, {16000, 1, 64000}}};
+
+class MockLimitObserver : public BitrateAllocator::LimitObserver {
+ public:
+ MOCK_METHOD2(OnAllocationLimitsChanged,
+ void(uint32_t min_send_bitrate_bps,
+ uint32_t max_padding_bitrate_bps));
+};
+
+class MockTransmitMixer : public voe::TransmitMixer {
+ public:
+ MOCK_CONST_METHOD0(AudioLevelFullRange, int16_t());
+ MOCK_CONST_METHOD0(GetTotalInputEnergy, double());
+ MOCK_CONST_METHOD0(GetTotalInputDuration, double());
+ MOCK_CONST_METHOD0(typing_noise_detected, bool());
+};
+
+std::unique_ptr<MockAudioEncoder> SetupAudioEncoderMock(
+ int payload_type,
+ const SdpAudioFormat& format) {
+ for (const auto& spec : kCodecSpecs) {
+ if (format == spec.format) {
+ std::unique_ptr<MockAudioEncoder> encoder(new MockAudioEncoder);
+ ON_CALL(*encoder.get(), SampleRateHz())
+ .WillByDefault(Return(spec.info.sample_rate_hz));
+ ON_CALL(*encoder.get(), NumChannels())
+ .WillByDefault(Return(spec.info.num_channels));
+ ON_CALL(*encoder.get(), RtpTimestampRateHz())
+ .WillByDefault(Return(spec.format.clockrate_hz));
+ return encoder;
+ }
+ }
+ return nullptr;
+}
+
+rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
+ rtc::scoped_refptr<MockAudioEncoderFactory> factory =
+ new rtc::RefCountedObject<MockAudioEncoderFactory>();
+ ON_CALL(*factory.get(), GetSupportedEncoders())
+ .WillByDefault(Return(std::vector<AudioCodecSpec>(
+ std::begin(kCodecSpecs), std::end(kCodecSpecs))));
+ ON_CALL(*factory.get(), QueryAudioEncoder(_))
+ .WillByDefault(Invoke(
+ [](const SdpAudioFormat& format) -> rtc::Optional<AudioCodecInfo> {
+ for (const auto& spec : kCodecSpecs) {
+ if (format == spec.format) {
+ return spec.info;
+ }
+ }
+ return rtc::nullopt;
+ }));
+ ON_CALL(*factory.get(), MakeAudioEncoderMock(_, _, _))
+ .WillByDefault(Invoke([](int payload_type, const SdpAudioFormat& format,
+ std::unique_ptr<AudioEncoder>* return_value) {
+ *return_value = SetupAudioEncoderMock(payload_type, format);
+ }));
+ return factory;
+}
+
+struct ConfigHelper {
+ ConfigHelper(bool audio_bwe_enabled, bool expect_set_encoder_call)
+ : stream_config_(nullptr),
+ audio_processing_(new rtc::RefCountedObject<MockAudioProcessing>()),
+ simulated_clock_(123456),
+ send_side_cc_(rtc::MakeUnique<SendSideCongestionController>(
+ &simulated_clock_,
+ nullptr /* observer */,
+ &event_log_,
+ &pacer_)),
+ fake_transport_(&packet_router_, &pacer_, send_side_cc_.get()),
+ bitrate_allocator_(&limit_observer_),
+ worker_queue_("ConfigHelper_worker_queue"),
+ audio_encoder_(nullptr) {
+ using testing::Invoke;
+
+ EXPECT_CALL(voice_engine_, audio_transport());
+
+ AudioState::Config config;
+ config.voice_engine = &voice_engine_;
+ config.audio_mixer = AudioMixerImpl::Create();
+ config.audio_processing = audio_processing_;
+ audio_state_ = AudioState::Create(config);
+
+ SetupDefaultChannelProxy(audio_bwe_enabled);
+
+ EXPECT_CALL(voice_engine_, ChannelProxyFactory(kChannelId))
+ .WillOnce(Invoke([this](int channel_id) {
+ return channel_proxy_;
+ }));
+
+ SetupMockForSetupSendCodec(expect_set_encoder_call);
+
+ // Use ISAC as default codec so as to prevent unnecessary |voice_engine_|
+ // calls from the default ctor behavior.
+ stream_config_.send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
+ stream_config_.voe_channel_id = kChannelId;
+ stream_config_.rtp.ssrc = kSsrc;
+ stream_config_.rtp.nack.rtp_history_ms = 200;
+ stream_config_.rtp.c_name = kCName;
+ stream_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+ if (audio_bwe_enabled) {
+ AddBweToConfig(&stream_config_);
+ }
+ stream_config_.encoder_factory = SetupEncoderFactoryMock();
+ stream_config_.min_bitrate_bps = 10000;
+ stream_config_.max_bitrate_bps = 65000;
+ }
+
+ AudioSendStream::Config& config() { return stream_config_; }
+ MockAudioEncoderFactory& mock_encoder_factory() {
+ return *static_cast<MockAudioEncoderFactory*>(
+ stream_config_.encoder_factory.get());
+ }
+ rtc::scoped_refptr<AudioState> audio_state() { return audio_state_; }
+ MockVoEChannelProxy* channel_proxy() { return channel_proxy_; }
+ RtpTransportControllerSendInterface* transport() { return &fake_transport_; }
+ BitrateAllocator* bitrate_allocator() { return &bitrate_allocator_; }
+ rtc::TaskQueue* worker_queue() { return &worker_queue_; }
+ RtcEventLog* event_log() { return &event_log_; }
+ MockVoiceEngine* voice_engine() { return &voice_engine_; }
+
+ static void AddBweToConfig(AudioSendStream::Config* config) {
+ config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberId));
+ config->send_codec_spec->transport_cc_enabled = true;
+ }
+
+ void SetupDefaultChannelProxy(bool audio_bwe_enabled) {
+ channel_proxy_ = new testing::StrictMock<MockVoEChannelProxy>();
+ EXPECT_CALL(*channel_proxy_, GetRtpRtcp(_, _))
+ .WillRepeatedly(Invoke(
+ [this](RtpRtcp** rtp_rtcp_module, RtpReceiver** rtp_receiver) {
+ *rtp_rtcp_module = &this->rtp_rtcp_;
+ *rtp_receiver = nullptr; // Not deemed necessary for tests yet.
+ }));
+ EXPECT_CALL(*channel_proxy_, SetRTCPStatus(true)).Times(1);
+ EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kSsrc)).Times(1);
+ EXPECT_CALL(*channel_proxy_, SetRTCP_CNAME(StrEq(kCName))).Times(1);
+ EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 10)).Times(1);
+ EXPECT_CALL(*channel_proxy_,
+ SetSendAudioLevelIndicationStatus(true, kAudioLevelId))
+ .Times(1);
+ if (audio_bwe_enabled) {
+ EXPECT_CALL(*channel_proxy_,
+ EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_, RegisterSenderCongestionControlObjects(
+ &fake_transport_, Ne(nullptr)))
+ .Times(1);
+ } else {
+ EXPECT_CALL(*channel_proxy_, RegisterSenderCongestionControlObjects(
+ &fake_transport_, Eq(nullptr)))
+ .Times(1);
+ }
+ EXPECT_CALL(*channel_proxy_, SetBitrate(_, _))
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_, ResetSenderCongestionControlObjects())
+ .Times(1);
+ EXPECT_CALL(*channel_proxy_, RegisterTransport(nullptr)).Times(2);
+ EXPECT_CALL(*channel_proxy_, SetRtcEventLog(testing::NotNull())).Times(1);
+ EXPECT_CALL(*channel_proxy_, SetRtcEventLog(testing::IsNull()))
+ .Times(1); // Destructor resets the event log
+ EXPECT_CALL(*channel_proxy_, SetRtcpRttStats(&rtcp_rtt_stats_)).Times(1);
+ EXPECT_CALL(*channel_proxy_, SetRtcpRttStats(testing::IsNull()))
+ .Times(1); // Destructor resets the rtt stats.
+ }
+
+ void SetupMockForSetupSendCodec(bool expect_set_encoder_call) {
+ if (expect_set_encoder_call) {
+ EXPECT_CALL(*channel_proxy_, SetEncoderForMock(_, _))
+ .WillOnce(Invoke(
+ [this](int payload_type, std::unique_ptr<AudioEncoder>* encoder) {
+ this->audio_encoder_ = std::move(*encoder);
+ return true;
+ }));
+ }
+ }
+
+ void SetupMockForModifyEncoder() {
+ // Let ModifyEncoder to invoke mock audio encoder.
+ EXPECT_CALL(*channel_proxy_, ModifyEncoder(_))
+ .WillRepeatedly(Invoke(
+ [this](rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
+ modifier) {
+ if (this->audio_encoder_)
+ modifier(&this->audio_encoder_);
+ }));
+ }
+
+ RtcpRttStats* rtcp_rtt_stats() { return &rtcp_rtt_stats_; }
+
+ void SetupMockForSendTelephoneEvent() {
+ EXPECT_TRUE(channel_proxy_);
+ EXPECT_CALL(*channel_proxy_,
+ SetSendTelephoneEventPayloadType(kTelephoneEventPayloadType,
+ kTelephoneEventPayloadFrequency))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*channel_proxy_,
+ SendTelephoneEventOutband(kTelephoneEventCode, kTelephoneEventDuration))
+ .WillOnce(Return(true));
+ }
+
+ void SetupMockForGetStats() {
+ using testing::DoAll;
+ using testing::SetArgPointee;
+ using testing::SetArgReferee;
+
+ std::vector<ReportBlock> report_blocks;
+ webrtc::ReportBlock block = kReportBlock;
+ report_blocks.push_back(block); // Has wrong SSRC.
+ block.source_SSRC = kSsrc;
+ report_blocks.push_back(block); // Correct block.
+ block.fraction_lost = 0;
+ report_blocks.push_back(block); // Duplicate SSRC, bad fraction_lost.
+
+ EXPECT_TRUE(channel_proxy_);
+ EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+ .WillRepeatedly(Return(kCallStats));
+ EXPECT_CALL(*channel_proxy_, GetRemoteRTCPReportBlocks())
+ .WillRepeatedly(Return(report_blocks));
+ EXPECT_CALL(*channel_proxy_, GetANAStatistics())
+ .WillRepeatedly(Return(ANAStats()));
+ EXPECT_CALL(voice_engine_, transmit_mixer())
+ .WillRepeatedly(Return(&transmit_mixer_));
+
+ EXPECT_CALL(transmit_mixer_, AudioLevelFullRange())
+ .WillRepeatedly(Return(kSpeechInputLevel));
+ EXPECT_CALL(transmit_mixer_, GetTotalInputEnergy())
+ .WillRepeatedly(Return(kTotalInputEnergy));
+ EXPECT_CALL(transmit_mixer_, GetTotalInputDuration())
+ .WillRepeatedly(Return(kTotalInputDuration));
+ EXPECT_CALL(transmit_mixer_, typing_noise_detected())
+ .WillRepeatedly(Return(true));
+
+ audio_processing_stats_.echo_return_loss = kEchoReturnLoss;
+ audio_processing_stats_.echo_return_loss_enhancement =
+ kEchoReturnLossEnhancement;
+ audio_processing_stats_.delay_median_ms = kEchoDelayMedian;
+ audio_processing_stats_.delay_standard_deviation_ms = kEchoDelayStdDev;
+ audio_processing_stats_.divergent_filter_fraction =
+ kDivergentFilterFraction;
+ audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood;
+ audio_processing_stats_.residual_echo_likelihood_recent_max =
+ kResidualEchoLikelihoodMax;
+
+ EXPECT_CALL(*audio_processing_, GetStatistics(true))
+ .WillRepeatedly(Return(audio_processing_stats_));
+ }
+
+ private:
+ testing::StrictMock<MockVoiceEngine> voice_engine_;
+ rtc::scoped_refptr<AudioState> audio_state_;
+ AudioSendStream::Config stream_config_;
+ testing::StrictMock<MockVoEChannelProxy>* channel_proxy_ = nullptr;
+ rtc::scoped_refptr<MockAudioProcessing> audio_processing_;
+ MockTransmitMixer transmit_mixer_;
+ AudioProcessingStats audio_processing_stats_;
+ SimulatedClock simulated_clock_;
+ PacketRouter packet_router_;
+ testing::NiceMock<MockPacedSender> pacer_;
+ std::unique_ptr<SendSideCongestionController> send_side_cc_;
+ FakeRtpTransportControllerSend fake_transport_;
+ MockRtcEventLog event_log_;
+ MockRtpRtcp rtp_rtcp_;
+ MockRtcpRttStats rtcp_rtt_stats_;
+ testing::NiceMock<MockLimitObserver> limit_observer_;
+ BitrateAllocator bitrate_allocator_;
+ // |worker_queue| is defined last to ensure all pending tasks are cancelled
+ // and deleted before any other members.
+ rtc::TaskQueue worker_queue_;
+ std::unique_ptr<AudioEncoder> audio_encoder_;
+};
+} // namespace
+
+TEST(AudioSendStreamTest, ConfigToString) {
+ AudioSendStream::Config config(nullptr);
+ config.rtp.ssrc = kSsrc;
+ config.rtp.c_name = kCName;
+ config.voe_channel_id = kChannelId;
+ config.min_bitrate_bps = 12000;
+ config.max_bitrate_bps = 34000;
+ config.send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
+ config.send_codec_spec->nack_enabled = true;
+ config.send_codec_spec->transport_cc_enabled = false;
+ config.send_codec_spec->cng_payload_type = 42;
+ config.encoder_factory = MockAudioEncoderFactory::CreateUnusedFactory();
+ config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+ EXPECT_EQ(
+ "{rtp: {ssrc: 1234, extensions: [{uri: "
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], nack: "
+ "{rtp_history_ms: 0}, c_name: foo_name}, send_transport: null, "
+ "voe_channel_id: 1, min_bitrate_bps: 12000, max_bitrate_bps: 34000, "
+ "send_codec_spec: {nack_enabled: true, transport_cc_enabled: false, "
+ "cng_payload_type: 42, payload_type: 103, "
+ "format: {name: isac, clockrate_hz: 16000, num_channels: 1, "
+ "parameters: {}}}}",
+ config.ToString());
+}
+
+TEST(AudioSendStreamTest, ConstructDestruct) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+}
+
+TEST(AudioSendStreamTest, SendTelephoneEvent) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+ helper.SetupMockForSendTelephoneEvent();
+ EXPECT_TRUE(send_stream.SendTelephoneEvent(kTelephoneEventPayloadType,
+ kTelephoneEventPayloadFrequency, kTelephoneEventCode,
+ kTelephoneEventDuration));
+}
+
+TEST(AudioSendStreamTest, SetMuted) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+ EXPECT_CALL(*helper.channel_proxy(), SetInputMute(true));
+ send_stream.SetMuted(true);
+}
+
+TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) {
+ ConfigHelper helper(true, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+}
+
+TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+}
+
+TEST(AudioSendStreamTest, GetStats) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+ helper.SetupMockForGetStats();
+ AudioSendStream::Stats stats = send_stream.GetStats(true);
+ EXPECT_EQ(kSsrc, stats.local_ssrc);
+ EXPECT_EQ(static_cast<int64_t>(kCallStats.bytesSent), stats.bytes_sent);
+ EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
+ EXPECT_EQ(static_cast<int32_t>(kReportBlock.cumulative_num_packets_lost),
+ stats.packets_lost);
+ EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
+ EXPECT_EQ(std::string(kIsacCodec.plname), stats.codec_name);
+ EXPECT_EQ(static_cast<int32_t>(kReportBlock.extended_highest_sequence_number),
+ stats.ext_seqnum);
+ EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
+ (kIsacCodec.plfreq / 1000)),
+ stats.jitter_ms);
+ EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
+ EXPECT_EQ(static_cast<int32_t>(kSpeechInputLevel), stats.audio_level);
+ EXPECT_EQ(kTotalInputEnergy, stats.total_input_energy);
+ EXPECT_EQ(kTotalInputDuration, stats.total_input_duration);
+ EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
+ EXPECT_EQ(kEchoDelayStdDev, stats.apm_statistics.delay_standard_deviation_ms);
+ EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
+ EXPECT_EQ(kEchoReturnLossEnhancement,
+ stats.apm_statistics.echo_return_loss_enhancement);
+ EXPECT_EQ(kDivergentFilterFraction,
+ stats.apm_statistics.divergent_filter_fraction);
+ EXPECT_EQ(kResidualEchoLikelihood,
+ stats.apm_statistics.residual_echo_likelihood);
+ EXPECT_EQ(kResidualEchoLikelihoodMax,
+ stats.apm_statistics.residual_echo_likelihood_recent_max);
+ EXPECT_TRUE(stats.typing_noise_detected);
+}
+
+TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) {
+ ConfigHelper helper(false, true);
+ auto stream_config = helper.config();
+ stream_config.send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
+ const std::string kAnaConfigString = "abcde";
+ const std::string kAnaReconfigString = "12345";
+
+ stream_config.audio_network_adaptor_config = kAnaConfigString;
+
+ EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _))
+ .WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
+ int payload_type, const SdpAudioFormat& format,
+ std::unique_ptr<AudioEncoder>* return_value) {
+ auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
+ EXPECT_CALL(*mock_encoder,
+ EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*mock_encoder,
+ EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
+ .WillOnce(Return(true));
+ *return_value = std::move(mock_encoder);
+ }));
+
+ internal::AudioSendStream send_stream(
+ stream_config, helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+
+ stream_config.audio_network_adaptor_config = kAnaReconfigString;
+
+ helper.SetupMockForModifyEncoder();
+ send_stream.Reconfigure(stream_config);
+}
+
+// VAD is applied when codec is mono and the CNG frequency matches the codec
+// clock rate.
+TEST(AudioSendStreamTest, SendCodecCanApplyVad) {
+ ConfigHelper helper(false, false);
+ auto stream_config = helper.config();
+ stream_config.send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+ stream_config.send_codec_spec->cng_payload_type = 105;
+ using ::testing::Invoke;
+ std::unique_ptr<AudioEncoder> stolen_encoder;
+ EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+ .WillOnce(
+ Invoke([&stolen_encoder](int payload_type,
+ std::unique_ptr<AudioEncoder>* encoder) {
+ stolen_encoder = std::move(*encoder);
+ return true;
+ }));
+
+ internal::AudioSendStream send_stream(
+ stream_config, helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+
+ // We cannot truly determine if the encoder created is an AudioEncoderCng. It
+ // is the only reasonable implementation that will return something from
+ // ReclaimContainedEncoders, though.
+ ASSERT_TRUE(stolen_encoder);
+ EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
+}
+
+TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+ EXPECT_CALL(*helper.channel_proxy(),
+ SetBitrate(helper.config().max_bitrate_bps, _));
+ send_stream.OnBitrateUpdated(helper.config().max_bitrate_bps + 5000, 0.0, 50,
+ 6000);
+}
+
+TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+ EXPECT_CALL(*helper.channel_proxy(), SetBitrate(_, 5000));
+ send_stream.OnBitrateUpdated(50000, 0.0, 50, 5000);
+}
+
+// Test that AudioSendStream doesn't recreate the encoder unnecessarily.
+TEST(AudioSendStreamTest, DontRecreateEncoder) {
+ ConfigHelper helper(false, false);
+ // WillOnce is (currently) the default used by ConfigHelper if asked to set an
+ // expectation for SetEncoder. Since this behavior is essential for this test
+ // to be correct, it's instead set-up manually here. Otherwise a simple change
+ // to ConfigHelper (say to WillRepeatedly) would silently make this test
+ // useless.
+ EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+ .WillOnce(Return(true));
+
+ auto stream_config = helper.config();
+ stream_config.send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+ stream_config.send_codec_spec->cng_payload_type = 105;
+ internal::AudioSendStream send_stream(
+ stream_config, helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+ send_stream.Reconfigure(stream_config);
+}
+
+TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) {
+ ConfigHelper helper(false, true);
+ internal::AudioSendStream send_stream(
+ helper.config(), helper.audio_state(), helper.worker_queue(),
+ helper.transport(), helper.bitrate_allocator(), helper.event_log(),
+ helper.rtcp_rtt_stats(), rtc::nullopt);
+ auto new_config = helper.config();
+ ConfigHelper::AddBweToConfig(&new_config);
+ EXPECT_CALL(*helper.channel_proxy(),
+ EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
+ .Times(1);
+ {
+ ::testing::InSequence seq;
+ EXPECT_CALL(*helper.channel_proxy(), ResetSenderCongestionControlObjects())
+ .Times(1);
+ EXPECT_CALL(*helper.channel_proxy(), RegisterSenderCongestionControlObjects(
+ helper.transport(), Ne(nullptr)))
+ .Times(1);
+ }
+ send_stream.Reconfigure(new_config);
+}
+
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_state.cc b/third_party/libwebrtc/webrtc/audio/audio_state.cc
new file mode 100644
index 0000000000..5a30c53b3d
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_state.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_state.h"
+
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/thread.h"
+#include "voice_engine/transmit_mixer.h"
+
+namespace webrtc {
+namespace internal {
+
+AudioState::AudioState(const AudioState::Config& config)
+ : config_(config),
+ voe_base_(config.voice_engine),
+ audio_transport_proxy_(voe_base_->audio_transport(),
+ config_.audio_processing.get(),
+ config_.audio_mixer) {
+ process_thread_checker_.DetachFromThread();
+ RTC_DCHECK(config_.audio_mixer);
+}
+
+AudioState::~AudioState() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+VoiceEngine* AudioState::voice_engine() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return config_.voice_engine;
+}
+
+rtc::scoped_refptr<AudioMixer> AudioState::mixer() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return config_.audio_mixer;
+}
+
+bool AudioState::typing_noise_detected() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // TODO(solenberg): Remove const_cast once AudioState owns transmit mixer
+ // functionality.
+ voe::TransmitMixer* transmit_mixer =
+ const_cast<AudioState*>(this)->voe_base_->transmit_mixer();
+ return transmit_mixer->typing_noise_detected();
+}
+
+void AudioState::SetPlayout(bool enabled) {
+ RTC_LOG(INFO) << "SetPlayout(" << enabled << ")";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ const bool currently_enabled = (null_audio_poller_ == nullptr);
+ if (enabled == currently_enabled) {
+ return;
+ }
+ VoEBase* const voe = VoEBase::GetInterface(voice_engine());
+ RTC_DCHECK(voe);
+ if (enabled) {
+ null_audio_poller_.reset();
+ }
+ // Will stop/start playout of the underlying device, if necessary, and
+ // remember the setting for when it receives subsequent calls of
+ // StartPlayout.
+ voe->SetPlayout(enabled);
+ if (!enabled) {
+ null_audio_poller_ =
+ rtc::MakeUnique<NullAudioPoller>(&audio_transport_proxy_);
+ }
+ voe->Release();
+}
+
+void AudioState::SetRecording(bool enabled) {
+ RTC_LOG(INFO) << "SetRecording(" << enabled << ")";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ // TODO(henrika): keep track of state as in SetPlayout().
+ VoEBase* const voe = VoEBase::GetInterface(voice_engine());
+ RTC_DCHECK(voe);
+ // Will stop/start recording of the underlying device, if necessary, and
+ // remember the setting for when it receives subsequent calls of
+ // StartPlayout.
+ voe->SetRecording(enabled);
+ voe->Release();
+}
+
+// Reference count; implementation copied from rtc::RefCountedObject.
+void AudioState::AddRef() const {
+ rtc::AtomicOps::Increment(&ref_count_);
+}
+
+// Reference count; implementation copied from rtc::RefCountedObject.
+rtc::RefCountReleaseStatus AudioState::Release() const {
+ if (rtc::AtomicOps::Decrement(&ref_count_) == 0) {
+ delete this;
+ return rtc::RefCountReleaseStatus::kDroppedLastRef;
+ }
+ return rtc::RefCountReleaseStatus::kOtherRefsRemained;
+}
+} // namespace internal
+
+rtc::scoped_refptr<AudioState> AudioState::Create(
+ const AudioState::Config& config) {
+ return rtc::scoped_refptr<AudioState>(new internal::AudioState(config));
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_state.h b/third_party/libwebrtc/webrtc/audio/audio_state.h
new file mode 100644
index 0000000000..f4bddbfa85
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_state.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_STATE_H_
+#define AUDIO_AUDIO_STATE_H_
+
+#include <memory>
+
+#include "audio/audio_transport_proxy.h"
+#include "audio/null_audio_poller.h"
+#include "audio/scoped_voe_interface.h"
+#include "call/audio_state.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/thread_checker.h"
+#include "voice_engine/include/voe_base.h"
+
+namespace webrtc {
+namespace internal {
+
+class AudioState final : public webrtc::AudioState {
+ public:
+ explicit AudioState(const AudioState::Config& config);
+ ~AudioState() override;
+
+ AudioProcessing* audio_processing() override {
+ RTC_DCHECK(config_.audio_processing);
+ return config_.audio_processing.get();
+ }
+ AudioTransport* audio_transport() override {
+ return &audio_transport_proxy_;
+ }
+
+ void SetPlayout(bool enabled) override;
+ void SetRecording(bool enabled) override;
+
+ VoiceEngine* voice_engine();
+ rtc::scoped_refptr<AudioMixer> mixer();
+ bool typing_noise_detected() const;
+
+ private:
+ // rtc::RefCountInterface implementation.
+ void AddRef() const override;
+ rtc::RefCountReleaseStatus Release() const override;
+
+ rtc::ThreadChecker thread_checker_;
+ rtc::ThreadChecker process_thread_checker_;
+ const webrtc::AudioState::Config config_;
+
+ // We hold one interface pointer to the VoE to make sure it is kept alive.
+ ScopedVoEInterface<VoEBase> voe_base_;
+
+ // Reference count; implementation copied from rtc::RefCountedObject.
+ // TODO(nisse): Use RefCountedObject or RefCountedBase instead.
+ mutable volatile int ref_count_ = 0;
+
+ // Transports mixed audio from the mixer to the audio device and
+ // recorded audio to the VoE AudioTransport.
+ AudioTransportProxy audio_transport_proxy_;
+
+ // Null audio poller is used to continue polling the audio streams if audio
+ // playout is disabled so that audio processing still happens and the audio
+ // stats are still updated.
+ std::unique_ptr<NullAudioPoller> null_audio_poller_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioState);
+};
+} // namespace internal
+} // namespace webrtc
+
+#endif // AUDIO_AUDIO_STATE_H_
diff --git a/third_party/libwebrtc/webrtc/audio/audio_state_unittest.cc b/third_party/libwebrtc/webrtc/audio/audio_state_unittest.cc
new file mode 100644
index 0000000000..28b0a715f6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_state_unittest.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "audio/audio_state.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "test/gtest.h"
+#include "test/mock_voice_engine.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+const int kSampleRate = 8000;
+const int kNumberOfChannels = 1;
+const int kBytesPerSample = 2;
+
+struct ConfigHelper {
+ ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) {
+ EXPECT_CALL(mock_voice_engine, audio_transport())
+ .WillRepeatedly(testing::Return(&audio_transport));
+
+ audio_state_config.voice_engine = &mock_voice_engine;
+ audio_state_config.audio_mixer = audio_mixer;
+ audio_state_config.audio_processing =
+ new rtc::RefCountedObject<MockAudioProcessing>();
+ }
+ AudioState::Config& config() { return audio_state_config; }
+ MockVoiceEngine& voice_engine() { return mock_voice_engine; }
+ rtc::scoped_refptr<AudioMixer> mixer() { return audio_mixer; }
+ MockAudioTransport& original_audio_transport() { return audio_transport; }
+
+ private:
+ testing::StrictMock<MockVoiceEngine> mock_voice_engine;
+ AudioState::Config audio_state_config;
+ rtc::scoped_refptr<AudioMixer> audio_mixer;
+ MockAudioTransport audio_transport;
+};
+
+class FakeAudioSource : public AudioMixer::Source {
+ public:
+ // TODO(aleloi): Valid overrides commented out, because the gmock
+ // methods don't use any override declarations, and we want to avoid
+ // warnings from -Winconsistent-missing-override. See
+ // http://crbug.com/428099.
+ int Ssrc() const /*override*/ { return 0; }
+
+ int PreferredSampleRate() const /*override*/ { return kSampleRate; }
+
+ MOCK_METHOD2(GetAudioFrameWithInfo,
+ AudioFrameInfo(int sample_rate_hz, AudioFrame* audio_frame));
+};
+
+} // namespace
+
+TEST(AudioStateTest, Create) {
+ ConfigHelper helper;
+ rtc::scoped_refptr<AudioState> audio_state =
+ AudioState::Create(helper.config());
+ EXPECT_TRUE(audio_state.get());
+}
+
+TEST(AudioStateTest, ConstructDestruct) {
+ ConfigHelper helper;
+ std::unique_ptr<internal::AudioState> audio_state(
+ new internal::AudioState(helper.config()));
+}
+
+TEST(AudioStateTest, GetVoiceEngine) {
+ ConfigHelper helper;
+ std::unique_ptr<internal::AudioState> audio_state(
+ new internal::AudioState(helper.config()));
+ EXPECT_EQ(audio_state->voice_engine(), &helper.voice_engine());
+}
+
+// Test that RecordedDataIsAvailable calls get to the original transport.
+TEST(AudioStateAudioPathTest, RecordedAudioArrivesAtOriginalTransport) {
+ ConfigHelper helper;
+
+ rtc::scoped_refptr<AudioState> audio_state =
+ AudioState::Create(helper.config());
+
+ // Setup completed. Ensure call of original transport is forwarded to new.
+ uint32_t new_mic_level;
+ EXPECT_CALL(
+ helper.original_audio_transport(),
+ RecordedDataIsAvailable(nullptr, kSampleRate / 100, kBytesPerSample,
+ kNumberOfChannels, kSampleRate, 0, 0, 0, false,
+ testing::Ref(new_mic_level)));
+
+ audio_state->audio_transport()->RecordedDataIsAvailable(
+ nullptr, kSampleRate / 100, kBytesPerSample, kNumberOfChannels,
+ kSampleRate, 0, 0, 0, false, new_mic_level);
+}
+
+TEST(AudioStateAudioPathTest,
+ QueryingProxyForAudioShouldResultInGetAudioCallOnMixerSource) {
+ ConfigHelper helper;
+
+ rtc::scoped_refptr<AudioState> audio_state =
+ AudioState::Create(helper.config());
+
+ FakeAudioSource fake_source;
+
+ helper.mixer()->AddSource(&fake_source);
+
+ EXPECT_CALL(fake_source, GetAudioFrameWithInfo(testing::_, testing::_))
+ .WillOnce(
+ testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
+ audio_frame->sample_rate_hz_ = sample_rate_hz;
+ audio_frame->samples_per_channel_ = sample_rate_hz / 100;
+ audio_frame->num_channels_ = kNumberOfChannels;
+ return AudioMixer::Source::AudioFrameInfo::kNormal;
+ }));
+
+ int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
+ size_t n_samples_out;
+ int64_t elapsed_time_ms;
+ int64_t ntp_time_ms;
+ audio_state->audio_transport()->NeedMorePlayData(
+ kSampleRate / 100, kBytesPerSample, kNumberOfChannels, kSampleRate,
+ audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms);
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_transport_proxy.cc b/third_party/libwebrtc/webrtc/audio/audio_transport_proxy.cc
new file mode 100644
index 0000000000..e3201ea79b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_transport_proxy.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_transport_proxy.h"
+
+namespace webrtc {
+
+namespace {
+// Resample audio in |frame| to given sample rate preserving the
+// channel count and place the result in |destination|.
+int Resample(const AudioFrame& frame,
+ const int destination_sample_rate,
+ PushResampler<int16_t>* resampler,
+ int16_t* destination) {
+ const int number_of_channels = static_cast<int>(frame.num_channels_);
+ const int target_number_of_samples_per_channel =
+ destination_sample_rate / 100;
+ resampler->InitializeIfNeeded(frame.sample_rate_hz_, destination_sample_rate,
+ number_of_channels);
+
+ // TODO(yujo): make resampler take an AudioFrame, and add special case
+ // handling of muted frames.
+ return resampler->Resample(
+ frame.data(), frame.samples_per_channel_ * number_of_channels,
+ destination, number_of_channels * target_number_of_samples_per_channel);
+}
+} // namespace
+
+AudioTransportProxy::AudioTransportProxy(AudioTransport* voe_audio_transport,
+ AudioProcessing* audio_processing,
+ AudioMixer* mixer)
+ : voe_audio_transport_(voe_audio_transport),
+ audio_processing_(audio_processing),
+ mixer_(mixer) {
+ RTC_DCHECK(voe_audio_transport);
+ RTC_DCHECK(audio_processing);
+ RTC_DCHECK(mixer);
+}
+
+AudioTransportProxy::~AudioTransportProxy() {}
+
+int32_t AudioTransportProxy::RecordedDataIsAvailable(
+ const void* audioSamples,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ const uint32_t totalDelayMS,
+ const int32_t clockDrift,
+ const uint32_t currentMicLevel,
+ const bool keyPressed,
+ uint32_t& newMicLevel) { // NOLINT: to avoid changing APIs
+ // Pass call through to original audio transport instance.
+ return voe_audio_transport_->RecordedDataIsAvailable(
+ audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec,
+ totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel);
+}
+
+int32_t AudioTransportProxy::NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {
+ RTC_DCHECK_EQ(sizeof(int16_t) * nChannels, nBytesPerSample);
+ RTC_DCHECK_GE(nChannels, 1);
+ RTC_DCHECK_LE(nChannels, 2);
+ RTC_DCHECK_GE(
+ samplesPerSec,
+ static_cast<uint32_t>(AudioProcessing::NativeRate::kSampleRate8kHz));
+
+ // 100 = 1 second / data duration (10 ms).
+ RTC_DCHECK_EQ(nSamples * 100, samplesPerSec);
+ RTC_DCHECK_LE(nBytesPerSample * nSamples * nChannels,
+ AudioFrame::kMaxDataSizeBytes);
+
+ mixer_->Mix(nChannels, &mixed_frame_);
+ *elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
+ *ntp_time_ms = mixed_frame_.ntp_time_ms_;
+
+ const auto error = audio_processing_->ProcessReverseStream(&mixed_frame_);
+ RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
+
+ nSamplesOut = Resample(mixed_frame_, samplesPerSec, &resampler_,
+ static_cast<int16_t*>(audioSamples));
+ RTC_DCHECK_EQ(nSamplesOut, nChannels * nSamples);
+ return 0;
+}
+
+void AudioTransportProxy::PushCaptureData(int voe_channel,
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) {
+ // This is part of deprecated VoE interface operating on specific
+ // VoE channels. It should not be used.
+ RTC_NOTREACHED();
+}
+
+void AudioTransportProxy::PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {
+ RTC_DCHECK_EQ(bits_per_sample, 16);
+ RTC_DCHECK_GE(number_of_channels, 1);
+ RTC_DCHECK_LE(number_of_channels, 2);
+ RTC_DCHECK_GE(sample_rate, AudioProcessing::NativeRate::kSampleRate8kHz);
+
+ // 100 = 1 second / data duration (10 ms).
+ RTC_DCHECK_EQ(number_of_frames * 100, sample_rate);
+
+ // 8 = bits per byte.
+ RTC_DCHECK_LE(bits_per_sample / 8 * number_of_frames * number_of_channels,
+ AudioFrame::kMaxDataSizeBytes);
+ mixer_->Mix(number_of_channels, &mixed_frame_);
+ *elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
+ *ntp_time_ms = mixed_frame_.ntp_time_ms_;
+
+ const auto output_samples = Resample(mixed_frame_, sample_rate, &resampler_,
+ static_cast<int16_t*>(audio_data));
+ RTC_DCHECK_EQ(output_samples, number_of_channels * number_of_frames);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/audio_transport_proxy.h b/third_party/libwebrtc/webrtc/audio/audio_transport_proxy.h
new file mode 100644
index 0000000000..a51a7dba31
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/audio_transport_proxy.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_TRANSPORT_PROXY_H_
+#define AUDIO_AUDIO_TRANSPORT_PROXY_H_
+
+#include "api/audio/audio_mixer.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+class AudioTransportProxy : public AudioTransport {
+ public:
+ AudioTransportProxy(AudioTransport* voe_audio_transport,
+ AudioProcessing* audio_processing,
+ AudioMixer* mixer);
+
+ ~AudioTransportProxy() override;
+
+ int32_t RecordedDataIsAvailable(const void* audioSamples,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ const uint32_t totalDelayMS,
+ const int32_t clockDrift,
+ const uint32_t currentMicLevel,
+ const bool keyPressed,
+ uint32_t& newMicLevel) override;
+
+ int32_t NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+
+ void PushCaptureData(int voe_channel,
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) override;
+
+ void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+
+ private:
+ AudioTransport* voe_audio_transport_;
+ AudioProcessing* audio_processing_;
+ rtc::scoped_refptr<AudioMixer> mixer_;
+ AudioFrame mixed_frame_;
+ // Converts mixed audio to the audio device output rate.
+ PushResampler<int16_t> resampler_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTransportProxy);
+};
+} // namespace webrtc
+
+#endif // AUDIO_AUDIO_TRANSPORT_PROXY_H_
diff --git a/third_party/libwebrtc/webrtc/audio/conversion.h b/third_party/libwebrtc/webrtc/audio/conversion.h
new file mode 100644
index 0000000000..920aa3a434
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/conversion.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_CONVERSION_H_
+#define AUDIO_CONVERSION_H_
+
+namespace webrtc {
+
+// Convert fixed point number with 8 bit fractional part, to floating point.
+inline float Q8ToFloat(uint32_t v) {
+ return static_cast<float>(v) / (1 << 8);
+}
+
+// Convert fixed point number with 14 bit fractional part, to floating point.
+inline float Q14ToFloat(uint32_t v) {
+ return static_cast<float>(v) / (1 << 14);
+}
+} // namespace webrtc
+
+#endif // AUDIO_CONVERSION_H_
diff --git a/third_party/libwebrtc/webrtc/audio/null_audio_poller.cc b/third_party/libwebrtc/webrtc/audio/null_audio_poller.cc
new file mode 100644
index 0000000000..c22b3d8791
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/null_audio_poller.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/null_audio_poller.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+namespace internal {
+
+namespace {
+
+constexpr int64_t kPollDelayMs = 10; // WebRTC uses 10ms by default
+
+constexpr size_t kNumChannels = 1;
+constexpr uint32_t kSamplesPerSecond = 48000; // 48kHz
+constexpr size_t kNumSamples = kSamplesPerSecond / 100; // 10ms of samples
+
+} // namespace
+
+NullAudioPoller::NullAudioPoller(AudioTransport* audio_transport)
+ : audio_transport_(audio_transport),
+ reschedule_at_(rtc::TimeMillis() + kPollDelayMs) {
+ RTC_DCHECK(audio_transport);
+ OnMessage(nullptr); // Start the poll loop.
+}
+
+NullAudioPoller::~NullAudioPoller() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ rtc::Thread::Current()->Clear(this);
+}
+
+void NullAudioPoller::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Buffer to hold the audio samples.
+ int16_t buffer[kNumSamples * kNumChannels];
+ // Output variables from |NeedMorePlayData|.
+ size_t n_samples;
+ int64_t elapsed_time_ms;
+ int64_t ntp_time_ms;
+ audio_transport_->NeedMorePlayData(kNumSamples, sizeof(int16_t), kNumChannels,
+ kSamplesPerSecond, buffer, n_samples,
+ &elapsed_time_ms, &ntp_time_ms);
+
+ // Reschedule the next poll iteration. If, for some reason, the given
+ // reschedule time has already passed, reschedule as soon as possible.
+ int64_t now = rtc::TimeMillis();
+ if (reschedule_at_ < now) {
+ reschedule_at_ = now;
+ }
+ rtc::Thread::Current()->PostAt(RTC_FROM_HERE, reschedule_at_, this, 0);
+
+ // Loop after next will be kPollDelayMs later.
+ reschedule_at_ += kPollDelayMs;
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/null_audio_poller.h b/third_party/libwebrtc/webrtc/audio/null_audio_poller.h
new file mode 100644
index 0000000000..b6ddf17150
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/null_audio_poller.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_NULL_AUDIO_POLLER_H_
+#define AUDIO_NULL_AUDIO_POLLER_H_
+
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+namespace internal {
+
+class NullAudioPoller final : public rtc::MessageHandler {
+ public:
+ explicit NullAudioPoller(AudioTransport* audio_transport);
+ ~NullAudioPoller();
+
+ protected:
+ void OnMessage(rtc::Message* msg) override;
+
+ private:
+ rtc::ThreadChecker thread_checker_;
+ AudioTransport* const audio_transport_;
+ int64_t reschedule_at_;
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // AUDIO_NULL_AUDIO_POLLER_H_
diff --git a/third_party/libwebrtc/webrtc/audio/scoped_voe_interface.h b/third_party/libwebrtc/webrtc/audio/scoped_voe_interface.h
new file mode 100644
index 0000000000..7aa2d1dacf
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/scoped_voe_interface.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_SCOPED_VOE_INTERFACE_H_
+#define AUDIO_SCOPED_VOE_INTERFACE_H_
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+namespace internal {
+
+// Utility template for obtaining and holding a reference to a VoiceEngine
+// interface and making sure it is released when this object goes out of scope.
+template<class T> class ScopedVoEInterface {
+ public:
+ explicit ScopedVoEInterface(webrtc::VoiceEngine* e)
+ : ptr_(T::GetInterface(e)) {
+ RTC_DCHECK(ptr_);
+ }
+ ~ScopedVoEInterface() {
+ if (ptr_) {
+ ptr_->Release();
+ }
+ }
+ T* operator->() {
+ RTC_DCHECK(ptr_);
+ return ptr_;
+ }
+ private:
+ T* ptr_;
+};
+} // namespace internal
+} // namespace webrtc
+
+#endif // AUDIO_SCOPED_VOE_INTERFACE_H_
diff --git a/third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.cc b/third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.cc
new file mode 100644
index 0000000000..f89ced97f4
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/test/audio_bwe_integration_test.h"
+
+#include "common_audio/wav_file.h"
+#include "rtc_base/ptr_util.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+// Wait a second between stopping sending and stopping receiving audio.
+constexpr int kExtraProcessTimeMs = 1000;
+} // namespace
+
+AudioBweTest::AudioBweTest() : EndToEndTest(CallTest::kDefaultTimeoutMs) {}
+
+size_t AudioBweTest::GetNumVideoStreams() const {
+ return 0;
+}
+size_t AudioBweTest::GetNumAudioStreams() const {
+ return 1;
+}
+size_t AudioBweTest::GetNumFlexfecStreams() const {
+ return 0;
+}
+
+std::unique_ptr<test::FakeAudioDevice::Capturer>
+AudioBweTest::CreateCapturer() {
+ return test::FakeAudioDevice::CreateWavFileReader(AudioInputFile());
+}
+
+void AudioBweTest::OnFakeAudioDevicesCreated(
+ test::FakeAudioDevice* send_audio_device,
+ test::FakeAudioDevice* recv_audio_device) {
+ send_audio_device_ = send_audio_device;
+}
+
+test::PacketTransport* AudioBweTest::CreateSendTransport(
+ SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) {
+ return new test::PacketTransport(
+ task_queue, sender_call, this, test::PacketTransport::kSender,
+ test::CallTest::payload_type_map_, GetNetworkPipeConfig());
+}
+
+test::PacketTransport* AudioBweTest::CreateReceiveTransport(
+ SingleThreadedTaskQueueForTesting* task_queue) {
+ return new test::PacketTransport(
+ task_queue, nullptr, this, test::PacketTransport::kReceiver,
+ test::CallTest::payload_type_map_, GetNetworkPipeConfig());
+}
+
+void AudioBweTest::PerformTest() {
+ send_audio_device_->WaitForRecordingEnd();
+ SleepMs(GetNetworkPipeConfig().queue_delay_ms + kExtraProcessTimeMs);
+}
+
+class StatsPollTask : public rtc::QueuedTask {
+ public:
+ explicit StatsPollTask(Call* sender_call) : sender_call_(sender_call) {}
+
+ private:
+ bool Run() override {
+ RTC_CHECK(sender_call_);
+ Call::Stats call_stats = sender_call_->GetStats();
+ EXPECT_GT(call_stats.send_bandwidth_bps, 25000);
+ rtc::TaskQueue::Current()->PostDelayedTask(
+ std::unique_ptr<QueuedTask>(this), 100);
+ return false;
+ }
+ Call* sender_call_;
+};
+
+class NoBandwidthDropAfterDtx : public AudioBweTest {
+ public:
+ NoBandwidthDropAfterDtx()
+ : sender_call_(nullptr), stats_poller_("stats poller task queue") {}
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->send_codec_spec = AudioSendStream::Config::SendCodecSpec(
+ test::CallTest::kAudioSendPayloadType,
+ {"OPUS",
+ 48000,
+ 2,
+ {{"ptime", "60"}, {"usedtx", "1"}, {"stereo", "1"}}});
+
+ send_config->min_bitrate_bps = 6000;
+ send_config->max_bitrate_bps = 100000;
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberExtensionId));
+ for (AudioReceiveStream::Config& recv_config : *receive_configs) {
+ recv_config.rtp.transport_cc = true;
+ recv_config.rtp.extensions = send_config->rtp.extensions;
+ recv_config.rtp.remote_ssrc = send_config->rtp.ssrc;
+ }
+ }
+
+ std::string AudioInputFile() override {
+ return test::ResourcePath("voice_engine/audio_dtx16", "wav");
+ }
+
+ FakeNetworkPipe::Config GetNetworkPipeConfig() override {
+ FakeNetworkPipe::Config pipe_config;
+ pipe_config.link_capacity_kbps = 50;
+ pipe_config.queue_length_packets = 1500;
+ pipe_config.queue_delay_ms = 300;
+ return pipe_config;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ }
+
+ void PerformTest() override {
+ stats_poller_.PostDelayedTask(
+ std::unique_ptr<rtc::QueuedTask>(new StatsPollTask(sender_call_)), 100);
+ sender_call_->OnTransportOverheadChanged(webrtc::MediaType::AUDIO, 0);
+ AudioBweTest::PerformTest();
+ }
+
+ private:
+ Call* sender_call_;
+ rtc::TaskQueue stats_poller_;
+};
+
+using AudioBweIntegrationTest = CallTest;
+
+// TODO(tschumim): This test is flaky when run on android and mac. Re-enable the
+// test for when the issue is fixed.
+TEST_F(AudioBweIntegrationTest, DISABLED_NoBandwidthDropAfterDtx) {
+ webrtc::test::ScopedFieldTrials override_field_trials(
+ "WebRTC-Audio-SendSideBwe/Enabled/"
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ NoBandwidthDropAfterDtx test;
+ RunBaseTest(&test);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.h b/third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.h
new file mode 100644
index 0000000000..0b0cb6c73b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/audio_bwe_integration_test.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef AUDIO_TEST_AUDIO_BWE_INTEGRATION_TEST_H_
+#define AUDIO_TEST_AUDIO_BWE_INTEGRATION_TEST_H_
+
+#include <memory>
+#include <string>
+
+#include "test/call_test.h"
+#include "test/fake_audio_device.h"
+#include "test/single_threaded_task_queue.h"
+
+namespace webrtc {
+namespace test {
+
+class AudioBweTest : public test::EndToEndTest {
+ public:
+ AudioBweTest();
+
+ protected:
+ virtual std::string AudioInputFile() = 0;
+
+ virtual FakeNetworkPipe::Config GetNetworkPipeConfig() = 0;
+
+ size_t GetNumVideoStreams() const override;
+ size_t GetNumAudioStreams() const override;
+ size_t GetNumFlexfecStreams() const override;
+
+ std::unique_ptr<test::FakeAudioDevice::Capturer> CreateCapturer() override;
+
+ void OnFakeAudioDevicesCreated(
+ test::FakeAudioDevice* send_audio_device,
+ test::FakeAudioDevice* recv_audio_device) override;
+
+ test::PacketTransport* CreateSendTransport(
+ SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override;
+ test::PacketTransport* CreateReceiveTransport(
+ SingleThreadedTaskQueueForTesting* task_queue) override;
+
+ void PerformTest() override;
+
+ private:
+ test::FakeAudioDevice* send_audio_device_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // AUDIO_TEST_AUDIO_BWE_INTEGRATION_TEST_H_
diff --git a/third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.cc b/third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.cc
new file mode 100644
index 0000000000..44bf3f775a
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+
+#include "audio/test/audio_end_to_end_test.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/fake_audio_device.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+// Wait half a second between stopping sending and stopping receiving audio.
+constexpr int kExtraRecordTimeMs = 500;
+
+constexpr int kSampleRate = 48000;
+} // namespace
+
+AudioEndToEndTest::AudioEndToEndTest()
+ : EndToEndTest(CallTest::kDefaultTimeoutMs) {}
+
+FakeNetworkPipe::Config AudioEndToEndTest::GetNetworkPipeConfig() const {
+ return FakeNetworkPipe::Config();
+}
+
+size_t AudioEndToEndTest::GetNumVideoStreams() const {
+ return 0;
+}
+
+size_t AudioEndToEndTest::GetNumAudioStreams() const {
+ return 1;
+}
+
+size_t AudioEndToEndTest::GetNumFlexfecStreams() const {
+ return 0;
+}
+
+std::unique_ptr<test::FakeAudioDevice::Capturer>
+ AudioEndToEndTest::CreateCapturer() {
+ return test::FakeAudioDevice::CreatePulsedNoiseCapturer(32000, kSampleRate);
+}
+
+std::unique_ptr<test::FakeAudioDevice::Renderer>
+ AudioEndToEndTest::CreateRenderer() {
+ return test::FakeAudioDevice::CreateDiscardRenderer(kSampleRate);
+}
+
+void AudioEndToEndTest::OnFakeAudioDevicesCreated(
+ test::FakeAudioDevice* send_audio_device,
+ test::FakeAudioDevice* recv_audio_device) {
+ send_audio_device_ = send_audio_device;
+}
+
+test::PacketTransport* AudioEndToEndTest::CreateSendTransport(
+ SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) {
+ return new test::PacketTransport(
+ task_queue, sender_call, this, test::PacketTransport::kSender,
+ test::CallTest::payload_type_map_, GetNetworkPipeConfig());
+}
+
+test::PacketTransport* AudioEndToEndTest::CreateReceiveTransport(
+ SingleThreadedTaskQueueForTesting* task_queue) {
+ return new test::PacketTransport(
+ task_queue, nullptr, this, test::PacketTransport::kReceiver,
+ test::CallTest::payload_type_map_, GetNetworkPipeConfig());
+}
+
+void AudioEndToEndTest::ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) {
+ // Large bitrate by default.
+ const webrtc::SdpAudioFormat kDefaultFormat("opus", 48000, 2,
+ {{"stereo", "1"}});
+ send_config->send_codec_spec = AudioSendStream::Config::SendCodecSpec(
+ test::CallTest::kAudioSendPayloadType, kDefaultFormat);
+}
+
+void AudioEndToEndTest::OnAudioStreamsCreated(
+ AudioSendStream* send_stream,
+ const std::vector<AudioReceiveStream*>& receive_streams) {
+ ASSERT_NE(nullptr, send_stream);
+ ASSERT_EQ(1u, receive_streams.size());
+ ASSERT_NE(nullptr, receive_streams[0]);
+ send_stream_ = send_stream;
+ receive_stream_ = receive_streams[0];
+}
+
+void AudioEndToEndTest::PerformTest() {
+ // Wait until the input audio file is done...
+ send_audio_device_->WaitForRecordingEnd();
+ // and some extra time to account for network delay.
+ SleepMs(GetNetworkPipeConfig().queue_delay_ms + kExtraRecordTimeMs);
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.h b/third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.h
new file mode 100644
index 0000000000..921de84483
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/audio_end_to_end_test.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef AUDIO_TEST_AUDIO_END_TO_END_TEST_H_
+#define AUDIO_TEST_AUDIO_END_TO_END_TEST_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "test/call_test.h"
+
+namespace webrtc {
+namespace test {
+
+class AudioEndToEndTest : public test::EndToEndTest {
+ public:
+ AudioEndToEndTest();
+
+ protected:
+ test::FakeAudioDevice* send_audio_device() { return send_audio_device_; }
+ const AudioSendStream* send_stream() const { return send_stream_; }
+ const AudioReceiveStream* receive_stream() const { return receive_stream_; }
+
+ virtual FakeNetworkPipe::Config GetNetworkPipeConfig() const;
+
+ size_t GetNumVideoStreams() const override;
+ size_t GetNumAudioStreams() const override;
+ size_t GetNumFlexfecStreams() const override;
+
+ std::unique_ptr<test::FakeAudioDevice::Capturer> CreateCapturer() override;
+ std::unique_ptr<test::FakeAudioDevice::Renderer> CreateRenderer() override;
+
+ void OnFakeAudioDevicesCreated(
+ test::FakeAudioDevice* send_audio_device,
+ test::FakeAudioDevice* recv_audio_device) override;
+
+ test::PacketTransport* CreateSendTransport(
+ SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override;
+ test::PacketTransport* CreateReceiveTransport(
+ SingleThreadedTaskQueueForTesting* task_queue) override;
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override;
+ void OnAudioStreamsCreated(
+ AudioSendStream* send_stream,
+ const std::vector<AudioReceiveStream*>& receive_streams) override;
+
+ void PerformTest() override;
+
+ private:
+ test::FakeAudioDevice* send_audio_device_ = nullptr;
+ AudioSendStream* send_stream_ = nullptr;
+ AudioReceiveStream* receive_stream_ = nullptr;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // AUDIO_TEST_AUDIO_END_TO_END_TEST_H_
diff --git a/third_party/libwebrtc/webrtc/audio/test/audio_stats_test.cc b/third_party/libwebrtc/webrtc/audio/test/audio_stats_test.cc
new file mode 100644
index 0000000000..ee225c0b50
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/audio_stats_test.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/test/audio_end_to_end_test.h"
+#include "rtc_base/numerics/safe_compare.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+bool IsNear(int reference, int v) {
+ // Margin is 10%.
+ const int error = reference / 10 + 1;
+ return std::abs(reference - v) <= error;
+}
+
+class NoLossTest : public AudioEndToEndTest {
+ public:
+ const int kTestDurationMs = 8000;
+ const int kBytesSent = 69351;
+ const int32_t kPacketsSent = 400;
+ const int64_t kRttMs = 100;
+
+ NoLossTest() = default;
+
+ FakeNetworkPipe::Config GetNetworkPipeConfig() const override {
+ FakeNetworkPipe::Config pipe_config;
+ pipe_config.queue_delay_ms = kRttMs / 2;
+ return pipe_config;
+ }
+
+ void PerformTest() override {
+ SleepMs(kTestDurationMs);
+ send_audio_device()->StopRecording();
+ AudioEndToEndTest::PerformTest();
+ }
+
+ void OnStreamsStopped() override {
+ AudioSendStream::Stats send_stats = send_stream()->GetStats();
+ EXPECT_PRED2(IsNear, kBytesSent, send_stats.bytes_sent);
+ EXPECT_PRED2(IsNear, kPacketsSent, send_stats.packets_sent);
+ EXPECT_EQ(0, send_stats.packets_lost);
+ EXPECT_EQ(0.0f, send_stats.fraction_lost);
+ EXPECT_EQ("opus", send_stats.codec_name);
+ // send_stats.jitter_ms
+ EXPECT_PRED2(IsNear, kRttMs, send_stats.rtt_ms);
+ // Send level is 0 because it is cleared in TransmitMixer::StopSend().
+ EXPECT_EQ(0, send_stats.audio_level);
+ // send_stats.total_input_energy
+ // send_stats.total_input_duration
+ EXPECT_FALSE(send_stats.apm_statistics.delay_median_ms);
+ EXPECT_FALSE(send_stats.apm_statistics.delay_standard_deviation_ms);
+ EXPECT_FALSE(send_stats.apm_statistics.echo_return_loss);
+ EXPECT_FALSE(send_stats.apm_statistics.echo_return_loss_enhancement);
+ EXPECT_FALSE(send_stats.apm_statistics.residual_echo_likelihood);
+ EXPECT_FALSE(send_stats.apm_statistics.residual_echo_likelihood_recent_max);
+ EXPECT_EQ(false, send_stats.typing_noise_detected);
+
+ AudioReceiveStream::Stats recv_stats = receive_stream()->GetStats();
+ EXPECT_PRED2(IsNear, kBytesSent, recv_stats.bytes_rcvd);
+ EXPECT_PRED2(IsNear, kPacketsSent, recv_stats.packets_rcvd);
+ EXPECT_EQ(0u, recv_stats.packets_lost);
+ EXPECT_EQ(0.0f, recv_stats.fraction_lost);
+ EXPECT_EQ("opus", send_stats.codec_name);
+ // recv_stats.jitter_ms
+ // recv_stats.jitter_buffer_ms
+ EXPECT_EQ(20u, recv_stats.jitter_buffer_preferred_ms);
+ // recv_stats.delay_estimate_ms
+ // Receive level is 0 because it is cleared in Channel::StopPlayout().
+ EXPECT_EQ(0, recv_stats.audio_level);
+ // recv_stats.total_output_energy
+ // recv_stats.total_samples_received
+ // recv_stats.total_output_duration
+ // recv_stats.concealed_samples
+ // recv_stats.expand_rate
+ // recv_stats.speech_expand_rate
+ EXPECT_EQ(0.0, recv_stats.secondary_decoded_rate);
+ EXPECT_EQ(0.0, recv_stats.secondary_discarded_rate);
+ EXPECT_EQ(0.0, recv_stats.accelerate_rate);
+ EXPECT_EQ(0.0, recv_stats.preemptive_expand_rate);
+ EXPECT_EQ(0, recv_stats.decoding_calls_to_silence_generator);
+ // recv_stats.decoding_calls_to_neteq
+ // recv_stats.decoding_normal
+ // recv_stats.decoding_plc
+ EXPECT_EQ(0, recv_stats.decoding_cng);
+ // recv_stats.decoding_plc_cng
+ // recv_stats.decoding_muted_output
+ // Capture start time is -1 because we do not have an associated send stream
+ // on the receiver side.
+ EXPECT_EQ(-1, recv_stats.capture_start_ntp_time_ms);
+
+ // Match these stats between caller and receiver.
+ EXPECT_EQ(send_stats.local_ssrc, recv_stats.remote_ssrc);
+ EXPECT_EQ(*send_stats.codec_payload_type, *recv_stats.codec_payload_type);
+ EXPECT_TRUE(rtc::SafeEq(send_stats.ext_seqnum, recv_stats.ext_seqnum));
+ }
+};
+} // namespace
+
+using AudioStatsTest = CallTest;
+
+TEST_F(AudioStatsTest, DISABLED_NoLoss) {
+ NoLossTest test;
+ RunBaseTest(&test);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.cc b/third_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.cc
new file mode 100644
index 0000000000..cf84e19924
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/test/audio_end_to_end_test.h"
+#include "rtc_base/flags.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/testsupport/fileutils.h"
+
+DEFINE_int(sample_rate_hz, 16000,
+ "Sample rate (Hz) of the produced audio files.");
+
+DEFINE_bool(quick, false,
+ "Don't do the full audio recording. "
+ "Used to quickly check that the test runs without crashing.");
+
+namespace webrtc {
+namespace test {
+namespace {
+
+std::string FileSampleRateSuffix() {
+ return std::to_string(FLAG_sample_rate_hz / 1000);
+}
+
+class AudioQualityTest : public AudioEndToEndTest {
+ public:
+ AudioQualityTest() = default;
+
+ private:
+ std::string AudioInputFile() const {
+ return test::ResourcePath(
+ "voice_engine/audio_tiny" + FileSampleRateSuffix(), "wav");
+ }
+
+ std::string AudioOutputFile() const {
+ const ::testing::TestInfo* const test_info =
+ ::testing::UnitTest::GetInstance()->current_test_info();
+ return webrtc::test::OutputPath() + "LowBandwidth_" + test_info->name() +
+ "_" + FileSampleRateSuffix() + ".wav";
+ }
+
+ std::unique_ptr<test::FakeAudioDevice::Capturer> CreateCapturer() override {
+ return test::FakeAudioDevice::CreateWavFileReader(AudioInputFile());
+ }
+
+ std::unique_ptr<test::FakeAudioDevice::Renderer> CreateRenderer() override {
+ return test::FakeAudioDevice::CreateBoundedWavFileWriter(
+ AudioOutputFile(), FLAG_sample_rate_hz);
+ }
+
+ void PerformTest() override {
+ if (FLAG_quick) {
+ // Let the recording run for a small amount of time to check if it works.
+ SleepMs(1000);
+ } else {
+ AudioEndToEndTest::PerformTest();
+ }
+ }
+
+ void OnStreamsStopped() override {
+ const ::testing::TestInfo* const test_info =
+ ::testing::UnitTest::GetInstance()->current_test_info();
+
+ // Output information about the input and output audio files so that further
+ // processing can be done by an external process.
+ printf("TEST %s %s %s\n", test_info->name(),
+ AudioInputFile().c_str(), AudioOutputFile().c_str());
+ }
+};
+
+class Mobile2GNetworkTest : public AudioQualityTest {
+ void ModifyAudioConfigs(AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->send_codec_spec = AudioSendStream::Config::SendCodecSpec(
+ test::CallTest::kAudioSendPayloadType,
+ {"OPUS",
+ 48000,
+ 2,
+ {{"maxaveragebitrate", "6000"},
+ {"ptime", "60"},
+ {"stereo", "1"}}});
+ }
+
+ FakeNetworkPipe::Config GetNetworkPipeConfig() const override {
+ FakeNetworkPipe::Config pipe_config;
+ pipe_config.link_capacity_kbps = 12;
+ pipe_config.queue_length_packets = 1500;
+ pipe_config.queue_delay_ms = 400;
+ return pipe_config;
+ }
+};
+} // namespace
+
+using LowBandwidthAudioTest = CallTest;
+
+TEST_F(LowBandwidthAudioTest, GoodNetworkHighBitrate) {
+ AudioQualityTest test;
+ RunBaseTest(&test);
+}
+
+TEST_F(LowBandwidthAudioTest, Mobile2GNetwork) {
+ Mobile2GNetworkTest test;
+ RunBaseTest(&test);
+}
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.py b/third_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.py
new file mode 100755
index 0000000000..05dcf47f99
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/low_bandwidth_audio_test.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+"""
+This script is the wrapper that runs the low-bandwidth audio test.
+
+After running the test, post-process steps for calculating audio quality of the
+output files will be performed.
+"""
+
+import argparse
+import collections
+import logging
+import os
+import re
+import shutil
+import subprocess
+import sys
+
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
+
+NO_TOOLS_ERROR_MESSAGE = (
+ 'Could not find PESQ or POLQA at %s.\n'
+ '\n'
+ 'To fix this run:\n'
+ ' python %s %s\n'
+ '\n'
+ 'Note that these tools are Google-internal due to licensing, so in order to '
+ 'use them you will have to get your own license and manually put them in the '
+ 'right location.\n'
+ 'See https://cs.chromium.org/chromium/src/third_party/webrtc/tools_webrtc/'
+ 'download_tools.py?rcl=bbceb76f540159e2dba0701ac03c514f01624130&l=13')
+
+
+def _LogCommand(command):
+ logging.info('Running %r', command)
+ return command
+
+
+def _ParseArgs():
+ parser = argparse.ArgumentParser(description='Run low-bandwidth audio tests.')
+ parser.add_argument('build_dir',
+ help='Path to the build directory (e.g. out/Release).')
+ parser.add_argument('--remove', action='store_true',
+ help='Remove output audio files after testing.')
+ parser.add_argument('--android', action='store_true',
+ help='Perform the test on a connected Android device instead.')
+ parser.add_argument('--adb-path', help='Path to adb binary.', default='adb')
+
+ # Ignore Chromium-specific flags
+ parser.add_argument('--isolated-script-test-output',
+ type=str, default=None)
+ parser.add_argument('--isolated-script-test-perf-output',
+ type=str, default=None)
+ args = parser.parse_args()
+
+ return args
+
+
+def _GetPlatform():
+ if sys.platform == 'win32':
+ return 'win'
+ elif sys.platform == 'darwin':
+ return 'mac'
+ elif sys.platform.startswith('linux'):
+ return 'linux'
+
+
+def _GetExtension():
+ return '.exe' if sys.platform == 'win32' else ''
+
+
+def _GetPathToTools():
+ tools_dir = os.path.join(SRC_DIR, 'tools_webrtc')
+ toolchain_dir = os.path.join(tools_dir, 'audio_quality')
+
+ platform = _GetPlatform()
+ ext = _GetExtension()
+
+ pesq_path = os.path.join(toolchain_dir, platform, 'pesq' + ext)
+ if not os.path.isfile(pesq_path):
+ pesq_path = None
+
+ polqa_path = os.path.join(toolchain_dir, platform, 'PolqaOem64' + ext)
+ if not os.path.isfile(polqa_path):
+ polqa_path = None
+
+ if (platform != 'mac' and not polqa_path) or not pesq_path:
+ logging.error(NO_TOOLS_ERROR_MESSAGE,
+ toolchain_dir,
+ os.path.join(tools_dir, 'download_tools.py'),
+ toolchain_dir)
+
+ return pesq_path, polqa_path
+
+
+def ExtractTestRuns(lines, echo=False):
+ """Extracts information about tests from the output of a test runner.
+
+ Produces tuples (android_device, test_name, reference_file, degraded_file).
+ """
+ for line in lines:
+ if echo:
+ sys.stdout.write(line)
+
+ # Output from Android has a prefix with the device name.
+ android_prefix_re = r'(?:I\b.+\brun_tests_on_device\((.+?)\)\s*)?'
+ test_re = r'^' + android_prefix_re + r'TEST (\w+) ([^ ]+?) ([^ ]+?)\s*$'
+
+ match = re.search(test_re, line)
+ if match:
+ yield match.groups()
+
+
+def _GetFile(file_path, out_dir, move=False,
+ android=False, adb_prefix=('adb',)):
+ out_file_name = os.path.basename(file_path)
+ out_file_path = os.path.join(out_dir, out_file_name)
+
+ if android:
+ # Pull the file from the connected Android device.
+ adb_command = adb_prefix + ('pull', file_path, out_dir)
+ subprocess.check_call(_LogCommand(adb_command))
+ if move:
+ # Remove that file.
+ adb_command = adb_prefix + ('shell', 'rm', file_path)
+ subprocess.check_call(_LogCommand(adb_command))
+ elif os.path.abspath(file_path) != os.path.abspath(out_file_path):
+ if move:
+ shutil.move(file_path, out_file_path)
+ else:
+ shutil.copy(file_path, out_file_path)
+
+ return out_file_path
+
+
+def _RunPesq(executable_path, reference_file, degraded_file,
+ sample_rate_hz=16000):
+ directory = os.path.dirname(reference_file)
+ assert os.path.dirname(degraded_file) == directory
+
+ # Analyze audio.
+ command = [executable_path, '+%d' % sample_rate_hz,
+ os.path.basename(reference_file),
+ os.path.basename(degraded_file)]
+ # Need to provide paths in the current directory due to a bug in PESQ:
+ # On Mac, for some 'path/to/file.wav', if 'file.wav' is longer than
+ # 'path/to', PESQ crashes.
+ out = subprocess.check_output(_LogCommand(command),
+ cwd=directory, stderr=subprocess.STDOUT)
+
+ # Find the scores in stdout of PESQ.
+ match = re.search(
+ r'Prediction \(Raw MOS, MOS-LQO\):\s+=\s+([\d.]+)\s+([\d.]+)', out)
+ if match:
+ raw_mos, _ = match.groups()
+
+ return {'pesq_mos': (raw_mos, 'score')}
+ else:
+ logging.error('PESQ: %s', out.splitlines()[-1])
+ return {}
+
+
+def _RunPolqa(executable_path, reference_file, degraded_file):
+ # Analyze audio.
+ command = [executable_path, '-q', '-LC', 'NB',
+ '-Ref', reference_file, '-Test', degraded_file]
+ process = subprocess.Popen(_LogCommand(command),
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = process.communicate()
+
+ # Find the scores in stdout of POLQA.
+ match = re.search(r'\bMOS-LQO:\s+([\d.]+)', out)
+
+ if process.returncode != 0 or not match:
+ if process.returncode == 2:
+ logging.warning('%s (2)', err.strip())
+ logging.warning('POLQA license error, skipping test.')
+ else:
+ logging.error('%s (%d)', err.strip(), process.returncode)
+ return {}
+
+ mos_lqo, = match.groups()
+ return {'polqa_mos_lqo': (mos_lqo, 'score')}
+
+
+Analyzer = collections.namedtuple('Analyzer', ['func', 'executable',
+ 'sample_rate_hz'])
+
+
+def main():
+ # pylint: disable=W0101
+ logging.basicConfig(level=logging.INFO)
+
+ args = _ParseArgs()
+
+ pesq_path, polqa_path = _GetPathToTools()
+ if pesq_path is None:
+ return 1
+
+ out_dir = os.path.join(args.build_dir, '..')
+ if args.android:
+ test_command = [os.path.join(args.build_dir, 'bin',
+ 'run_low_bandwidth_audio_test'), '-v']
+ else:
+ test_command = [os.path.join(args.build_dir, 'low_bandwidth_audio_test')]
+
+ analyzers = [Analyzer(_RunPesq, pesq_path, 16000)]
+ # Check if POLQA can run at all, or skip the 48 kHz tests entirely.
+ example_path = os.path.join(SRC_DIR, 'resources',
+ 'voice_engine', 'audio_tiny48.wav')
+ if polqa_path and _RunPolqa(polqa_path, example_path, example_path):
+ analyzers.append(Analyzer(_RunPolqa, polqa_path, 48000))
+
+ for analyzer in analyzers:
+ # Start the test executable that produces audio files.
+ test_process = subprocess.Popen(
+ _LogCommand(test_command + ['--sample_rate_hz=%d' %
+ analyzer.sample_rate_hz]),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ try:
+ lines = iter(test_process.stdout.readline, '')
+ for result in ExtractTestRuns(lines, echo=True):
+ (android_device, test_name, reference_file, degraded_file) = result
+
+ adb_prefix = (args.adb_path,)
+ if android_device:
+ adb_prefix += ('-s', android_device)
+
+ reference_file = _GetFile(reference_file, out_dir,
+ android=args.android, adb_prefix=adb_prefix)
+ degraded_file = _GetFile(degraded_file, out_dir, move=True,
+ android=args.android, adb_prefix=adb_prefix)
+
+ analyzer_results = analyzer.func(analyzer.executable,
+ reference_file, degraded_file)
+ for metric, (value, units) in analyzer_results.items():
+ # Output a result for the perf dashboard.
+ print 'RESULT %s: %s= %s %s' % (metric, test_name, value, units)
+
+ if args.remove:
+ os.remove(reference_file)
+ os.remove(degraded_file)
+ finally:
+ test_process.terminate()
+
+ return test_process.wait()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/libwebrtc/webrtc/audio/test/unittests/low_bandwidth_audio_test_test.py b/third_party/libwebrtc/webrtc/audio/test/unittests/low_bandwidth_audio_test_test.py
new file mode 100755
index 0000000000..18c1eb1887
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/test/unittests/low_bandwidth_audio_test_test.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import os
+import unittest
+import sys
+
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir)
+sys.path.append(PARENT_DIR)
+import low_bandwidth_audio_test
+
+
+class TestExtractTestRuns(unittest.TestCase):
+ def _TestLog(self, log, *expected):
+ self.assertEqual(
+ tuple(low_bandwidth_audio_test.ExtractTestRuns(log.splitlines(True))),
+ expected)
+
+ def testLinux(self):
+ self._TestLog(LINUX_LOG,
+ (None, 'GoodNetworkHighBitrate',
+ '/webrtc/src/resources/voice_engine/audio_tiny16.wav',
+ '/webrtc/src/out/LowBandwidth_GoodNetworkHighBitrate.wav'),
+ (None, 'Mobile2GNetwork',
+ '/webrtc/src/resources/voice_engine/audio_tiny16.wav',
+ '/webrtc/src/out/LowBandwidth_Mobile2GNetwork.wav'))
+
+ def testAndroid(self):
+ self._TestLog(ANDROID_LOG,
+ ('ddfa6149', 'Mobile2GNetwork',
+ '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav',
+ '/sdcard/chromium_tests_root/LowBandwidth_Mobile2GNetwork.wav'),
+ ('TA99205CNO', 'GoodNetworkHighBitrate',
+ '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav',
+ '/sdcard/chromium_tests_root/LowBandwidth_GoodNetworkHighBitrate.wav'))
+
+
+LINUX_LOG = r'''\
+[==========] Running 2 tests from 1 test case.
+[----------] Global test environment set-up.
+[----------] 2 tests from LowBandwidthAudioTest
+[ RUN ] LowBandwidthAudioTest.GoodNetworkHighBitrate
+TEST GoodNetworkHighBitrate /webrtc/src/resources/voice_engine/audio_tiny16.wav /webrtc/src/out/LowBandwidth_GoodNetworkHighBitrate.wav
+[ OK ] LowBandwidthAudioTest.GoodNetworkHighBitrate (5932 ms)
+[ RUN ] LowBandwidthAudioTest.Mobile2GNetwork
+TEST Mobile2GNetwork /webrtc/src/resources/voice_engine/audio_tiny16.wav /webrtc/src/out/LowBandwidth_Mobile2GNetwork.wav
+[ OK ] LowBandwidthAudioTest.Mobile2GNetwork (6333 ms)
+[----------] 2 tests from LowBandwidthAudioTest (12265 ms total)
+
+[----------] Global test environment tear-down
+[==========] 2 tests from 1 test case ran. (12266 ms total)
+[ PASSED ] 2 tests.
+'''
+
+ANDROID_LOG = r'''\
+I 0.000s Main command: /webrtc/src/build/android/test_runner.py gtest --suite low_bandwidth_audio_test --output-directory /webrtc/src/out/debug-android --runtime-deps-path /webrtc/src/out/debug-android/gen.runtime/webrtc/audio/low_bandwidth_audio_test__test_runner_script.runtime_deps -v
+I 0.007s Main [host]> /webrtc/src/third_party/android_tools/sdk/build-tools/24.0.2/aapt dump xmltree /webrtc/src/out/debug-android/low_bandwidth_audio_test_apk/low_bandwidth_audio_test-debug.apk AndroidManifest.xml
+I 0.028s TimeoutThread-1-for-MainThread [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb devices
+I 0.062s TimeoutThread-1-for-prepare_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO wait-for-device
+I 0.063s TimeoutThread-1-for-prepare_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 wait-for-device
+I 0.102s TimeoutThread-1-for-prepare_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( ( c=/data/local/tmp/cache_token;echo $EXTERNAL_STORAGE;cat $c 2>/dev/null||echo;echo "77611072-160c-11d7-9362-705b0f464195">$c &&getprop )>/data/local/tmp/temp_file-5ea34389e3f92 );echo %$?'
+I 0.105s TimeoutThread-1-for-prepare_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( ( c=/data/local/tmp/cache_token;echo $EXTERNAL_STORAGE;cat $c 2>/dev/null||echo;echo "77618afc-160c-11d7-bda4-705b0f464195">$c &&getprop )>/data/local/tmp/temp_file-b995cef6e0e3d );echo %$?'
+I 0.204s TimeoutThread-1-for-prepare_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 pull /data/local/tmp/temp_file-b995cef6e0e3d /tmp/tmpieAgDj/tmp_ReadFileWithPull
+I 0.285s TimeoutThread-1-for-prepare_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( test -d /storage/emulated/legacy );echo %$?'
+I 0.285s TimeoutThread-1-for-delete_temporary_file(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell 'rm -f /data/local/tmp/temp_file-b995cef6e0e3d'
+I 0.302s TimeoutThread-1-for-prepare_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO pull /data/local/tmp/temp_file-5ea34389e3f92 /tmp/tmpvlyG3I/tmp_ReadFileWithPull
+I 0.352s TimeoutThread-1-for-prepare_device(ddfa6149) condition 'sd_card_ready' met (0.3s)
+I 0.353s TimeoutThread-1-for-prepare_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( pm path android );echo %$?'
+I 0.369s TimeoutThread-1-for-prepare_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( test -d /sdcard );echo %$?'
+I 0.370s TimeoutThread-1-for-delete_temporary_file(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell 'rm -f /data/local/tmp/temp_file-5ea34389e3f92'
+I 0.434s TimeoutThread-1-for-prepare_device(TA99205CNO) condition 'sd_card_ready' met (0.4s)
+I 0.434s TimeoutThread-1-for-prepare_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( pm path android );echo %$?'
+I 1.067s TimeoutThread-1-for-prepare_device(ddfa6149) condition 'pm_ready' met (1.0s)
+I 1.067s TimeoutThread-1-for-prepare_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( getprop sys.boot_completed );echo %$?'
+I 1.115s TimeoutThread-1-for-prepare_device(ddfa6149) condition 'boot_completed' met (1.1s)
+I 1.181s TimeoutThread-1-for-prepare_device(TA99205CNO) condition 'pm_ready' met (1.1s)
+I 1.181s TimeoutThread-1-for-prepare_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( getprop sys.boot_completed );echo %$?'
+I 1.242s TimeoutThread-1-for-prepare_device(TA99205CNO) condition 'boot_completed' met (1.2s)
+I 1.268s TimeoutThread-1-for-individual_device_set_up(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( pm path org.chromium.native_test );echo %$?'
+I 1.269s TimeoutThread-1-for-individual_device_set_up(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( pm path org.chromium.native_test );echo %$?'
+I 2.008s calculate_device_checksums [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( a=/data/local/tmp/md5sum/md5sum_bin;! [[ $(ls -l $a) = *1225256* ]]&&exit 2;export LD_LIBRARY_PATH=/data/local/tmp/md5sum;$a /data/app/org.chromium.native_test-2/base.apk;: );echo %$?'
+I 2.008s calculate_host_checksums [host]> /webrtc/src/out/debug-android/md5sum_bin_host /webrtc/src/out/debug-android/low_bandwidth_audio_test_apk/low_bandwidth_audio_test-debug.apk
+I 2.019s calculate_device_checksums [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( a=/data/local/tmp/md5sum/md5sum_bin;! [[ $(ls -l $a) = *1225256* ]]&&exit 2;export LD_LIBRARY_PATH=/data/local/tmp/md5sum;$a /data/app/org.chromium.native_test-1/base.apk;: );echo %$?'
+I 2.020s calculate_host_checksums [host]> /webrtc/src/out/debug-android/md5sum_bin_host /webrtc/src/out/debug-android/low_bandwidth_audio_test_apk/low_bandwidth_audio_test-debug.apk
+I 2.172s TimeoutThread-1-for-individual_device_set_up(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( p=org.chromium.native_test;if [[ "$(ps)" = *$p* ]]; then am force-stop $p; fi );echo %$?'
+I 2.183s TimeoutThread-1-for-individual_device_set_up(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( p=org.chromium.native_test;if [[ "$(ps)" = *$p* ]]; then am force-stop $p; fi );echo %$?'
+I 2.290s calculate_device_checksums [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( a=/data/local/tmp/md5sum/md5sum_bin;! [[ $(ls -l $a) = *1225256* ]]&&exit 2;export LD_LIBRARY_PATH=/data/local/tmp/md5sum;$a /sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav;: );echo %$?'
+I 2.291s calculate_host_checksums [host]> /webrtc/src/out/debug-android/md5sum_bin_host /webrtc/src/resources/voice_engine/audio_tiny16.wav
+I 2.373s calculate_device_checksums [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( a=/data/local/tmp/md5sum/md5sum_bin;! [[ $(ls -l $a) = *1225256* ]]&&exit 2;export LD_LIBRARY_PATH=/data/local/tmp/md5sum;$a /storage/emulated/legacy/chromium_tests_root/resources/voice_engine/audio_tiny16.wav;: );echo %$?'
+I 2.374s calculate_host_checksums [host]> /webrtc/src/out/debug-android/md5sum_bin_host /webrtc/src/resources/voice_engine/audio_tiny16.wav
+I 2.390s calculate_device_checksums [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( a=/data/local/tmp/md5sum/md5sum_bin;! [[ $(ls -l $a) = *1225256* ]]&&exit 2;export LD_LIBRARY_PATH=/data/local/tmp/md5sum;$a /sdcard/chromium_tests_root/icudtl.dat;: );echo %$?'
+I 2.390s calculate_host_checksums [host]> /webrtc/src/out/debug-android/md5sum_bin_host /webrtc/src/out/debug-android/icudtl.dat
+I 2.472s calculate_device_checksums [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( a=/data/local/tmp/md5sum/md5sum_bin;! [[ $(ls -l $a) = *1225256* ]]&&exit 2;export LD_LIBRARY_PATH=/data/local/tmp/md5sum;$a /storage/emulated/legacy/chromium_tests_root/icudtl.dat;: );echo %$?'
+I 2.472s calculate_host_checksums [host]> /webrtc/src/out/debug-android/md5sum_bin_host /webrtc/src/out/debug-android/icudtl.dat
+I 2.675s TimeoutThread-1-for-list_tests(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( ( p=org.chromium.native_test;am instrument -w -e "$p".NativeTestInstrumentationTestRunner.ShardNanoTimeout 30000000000 -e "$p".NativeTestInstrumentationTestRunner.NativeTestActivity "$p".NativeUnitTestActivity -e "$p".NativeTestInstrumentationTestRunner.StdoutFile /sdcard/temp_file-6407c967884af.gtest_out -e "$p".NativeTest.CommandLineFlags --gtest_list_tests "$p"/"$p".NativeTestInstrumentationTestRunner )>/data/local/tmp/temp_file-d21ebcd0977d9 );echo %$?'
+I 2.675s TimeoutThread-1-for-list_tests(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( ( p=org.chromium.native_test;am instrument -w -e "$p".NativeTestInstrumentationTestRunner.ShardNanoTimeout 30000000000 -e "$p".NativeTestInstrumentationTestRunner.NativeTestActivity "$p".NativeUnitTestActivity -e "$p".NativeTestInstrumentationTestRunner.StdoutFile /storage/emulated/legacy/temp_file-fa09560c3259.gtest_out -e "$p".NativeTest.CommandLineFlags --gtest_list_tests "$p"/"$p".NativeTestInstrumentationTestRunner )>/data/local/tmp/temp_file-95ad995999939 );echo %$?'
+I 3.739s TimeoutThread-1-for-list_tests(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 pull /data/local/tmp/temp_file-95ad995999939 /tmp/tmpSnnF6Y/tmp_ReadFileWithPull
+I 3.807s TimeoutThread-1-for-delete_temporary_file(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell 'rm -f /data/local/tmp/temp_file-95ad995999939'
+I 3.812s TimeoutThread-1-for-list_tests(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( TZ=utc ls -a -l /storage/emulated/legacy/ );echo %$?'
+I 3.866s TimeoutThread-1-for-list_tests(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( cat /storage/emulated/legacy/temp_file-fa09560c3259.gtest_out );echo %$?'
+I 3.912s TimeoutThread-1-for-delete_temporary_file(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell 'rm -f /storage/emulated/legacy/temp_file-fa09560c3259.gtest_out'
+I 4.256s TimeoutThread-1-for-list_tests(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO pull /data/local/tmp/temp_file-d21ebcd0977d9 /tmp/tmpokPF5b/tmp_ReadFileWithPull
+I 4.324s TimeoutThread-1-for-delete_temporary_file(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell 'rm -f /data/local/tmp/temp_file-d21ebcd0977d9'
+I 4.342s TimeoutThread-1-for-list_tests(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( TZ=utc ls -a -l /sdcard/ );echo %$?'
+I 4.432s TimeoutThread-1-for-list_tests(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( cat /sdcard/temp_file-6407c967884af.gtest_out );echo %$?'
+I 4.476s TimeoutThread-1-for-delete_temporary_file(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell 'rm -f /sdcard/temp_file-6407c967884af.gtest_out'
+I 4.483s Main Using external sharding settings. This is shard 0/1
+I 4.483s Main STARTING TRY #1/3
+I 4.484s Main Will run 2 tests on 2 devices: TA99205CNO, ddfa6149
+I 4.486s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( pm dump org.chromium.native_test | grep dataDir=; echo "PIPESTATUS: ${PIPESTATUS[@]}" );echo %$?'
+I 4.486s TimeoutThread-1-for-run_tests_on_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( pm dump org.chromium.native_test | grep dataDir=; echo "PIPESTATUS: ${PIPESTATUS[@]}" );echo %$?'
+I 5.551s run_tests_on_device(TA99205CNO) flags:
+I 5.552s run_tests_on_device(ddfa6149) flags:
+I 5.554s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( ( p=org.chromium.native_test;am instrument -w -e "$p".NativeTestInstrumentationTestRunner.ShardNanoTimeout 120000000000 -e "$p".NativeTestInstrumentationTestRunner.NativeTestActivity "$p".NativeUnitTestActivity -e "$p".NativeTestInstrumentationTestRunner.Test LowBandwidthAudioTest.GoodNetworkHighBitrate -e "$p".NativeTestInstrumentationTestRunner.StdoutFile /sdcard/temp_file-ffe7b76691cb7.gtest_out "$p"/"$p".NativeTestInstrumentationTestRunner )>/data/local/tmp/temp_file-c9d83b3078ab1 );echo %$?'
+I 5.556s TimeoutThread-1-for-run_tests_on_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( ( p=org.chromium.native_test;am instrument -w -e "$p".NativeTestInstrumentationTestRunner.ShardNanoTimeout 120000000000 -e "$p".NativeTestInstrumentationTestRunner.NativeTestActivity "$p".NativeUnitTestActivity -e "$p".NativeTestInstrumentationTestRunner.Test LowBandwidthAudioTest.Mobile2GNetwork -e "$p".NativeTestInstrumentationTestRunner.StdoutFile /storage/emulated/legacy/temp_file-f0ceb1a05ea8.gtest_out "$p"/"$p".NativeTestInstrumentationTestRunner )>/data/local/tmp/temp_file-245ef307a5b32 );echo %$?'
+I 12.956s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO pull /data/local/tmp/temp_file-c9d83b3078ab1 /tmp/tmpRQhTcM/tmp_ReadFileWithPull
+I 13.024s TimeoutThread-1-for-delete_temporary_file(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell 'rm -f /data/local/tmp/temp_file-c9d83b3078ab1'
+I 13.032s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( TZ=utc ls -a -l /sdcard/ );echo %$?'
+I 13.114s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( cat /sdcard/temp_file-ffe7b76691cb7.gtest_out );echo %$?'
+I 13.154s TimeoutThread-1-for-run_tests_on_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 pull /data/local/tmp/temp_file-245ef307a5b32 /tmp/tmpfQ4J96/tmp_ReadFileWithPull
+I 13.167s TimeoutThread-1-for-delete_temporary_file(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell 'rm -f /sdcard/temp_file-ffe7b76691cb7.gtest_out'
+I 13.169s TimeoutThread-1-for-delete_temporary_file(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell 'rm -f /data/user/0/org.chromium.native_test/temp_file-f07c4808dbf8f.xml'
+I 13.170s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( pm clear org.chromium.native_test );echo %$?'
+I 13.234s TimeoutThread-1-for-delete_temporary_file(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell 'rm -f /data/local/tmp/temp_file-245ef307a5b32'
+I 13.239s TimeoutThread-1-for-run_tests_on_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( TZ=utc ls -a -l /storage/emulated/legacy/ );echo %$?'
+I 13.291s TimeoutThread-1-for-run_tests_on_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( cat /storage/emulated/legacy/temp_file-f0ceb1a05ea8.gtest_out );echo %$?'
+I 13.341s TimeoutThread-1-for-delete_temporary_file(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell 'rm -f /storage/emulated/legacy/temp_file-f0ceb1a05ea8.gtest_out'
+I 13.343s TimeoutThread-1-for-delete_temporary_file(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell 'rm -f /data/data/org.chromium.native_test/temp_file-5649bb01682da.xml'
+I 13.346s TimeoutThread-1-for-run_tests_on_device(ddfa6149) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s ddfa6149 shell '( pm clear org.chromium.native_test );echo %$?'
+I 13.971s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) Setting permissions for org.chromium.native_test.
+I 13.971s TimeoutThread-1-for-run_tests_on_device(TA99205CNO) [host]> /webrtc/src/third_party/android_tools/sdk/platform-tools/adb -s TA99205CNO shell '( pm grant org.chromium.native_test android.permission.CAMERA&&pm grant org.chromium.native_test android.permission.RECORD_AUDIO&&pm grant org.chromium.native_test android.permission.WRITE_EXTERNAL_STORAGE&&pm grant org.chromium.native_test android.permission.READ_EXTERNAL_STORAGE );echo %$?'
+I 14.078s run_tests_on_device(ddfa6149) >>ScopedMainEntryLogger
+I 14.078s run_tests_on_device(ddfa6149) Note: Google Test filter = LowBandwidthAudioTest.Mobile2GNetwork
+I 14.078s run_tests_on_device(ddfa6149) [==========] Running 1 test from 1 test case.
+I 14.078s run_tests_on_device(ddfa6149) [----------] Global test environment set-up.
+I 14.078s run_tests_on_device(ddfa6149) [----------] 1 test from LowBandwidthAudioTest
+I 14.078s run_tests_on_device(ddfa6149) [ RUN ] LowBandwidthAudioTest.Mobile2GNetwork
+I 14.078s run_tests_on_device(ddfa6149) TEST Mobile2GNetwork /sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav /sdcard/chromium_tests_root/LowBandwidth_Mobile2GNetwork.wav
+I 14.078s run_tests_on_device(ddfa6149) [ OK ] LowBandwidthAudioTest.Mobile2GNetwork (6438 ms)
+I 14.078s run_tests_on_device(ddfa6149) [----------] 1 test from LowBandwidthAudioTest (6438 ms total)
+I 14.078s run_tests_on_device(ddfa6149)
+I 14.078s run_tests_on_device(ddfa6149) [----------] Global test environment tear-down
+I 14.079s run_tests_on_device(ddfa6149) [==========] 1 test from 1 test case ran. (6438 ms total)
+I 14.079s run_tests_on_device(ddfa6149) [ PASSED ] 1 test.
+I 14.079s run_tests_on_device(ddfa6149) <<ScopedMainEntryLogger
+I 16.576s run_tests_on_device(TA99205CNO) >>ScopedMainEntryLogger
+I 16.576s run_tests_on_device(TA99205CNO) Note: Google Test filter = LowBandwidthAudioTest.GoodNetworkHighBitrate
+I 16.576s run_tests_on_device(TA99205CNO) [==========] Running 1 test from 1 test case.
+I 16.576s run_tests_on_device(TA99205CNO) [----------] Global test environment set-up.
+I 16.576s run_tests_on_device(TA99205CNO) [----------] 1 test from LowBandwidthAudioTest
+I 16.576s run_tests_on_device(TA99205CNO) [ RUN ] LowBandwidthAudioTest.GoodNetworkHighBitrate
+I 16.576s run_tests_on_device(TA99205CNO) TEST GoodNetworkHighBitrate /sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav /sdcard/chromium_tests_root/LowBandwidth_GoodNetworkHighBitrate.wav
+I 16.576s run_tests_on_device(TA99205CNO) [ OK ] LowBandwidthAudioTest.GoodNetworkHighBitrate (5968 ms)
+I 16.576s run_tests_on_device(TA99205CNO) [----------] 1 test from LowBandwidthAudioTest (5968 ms total)
+I 16.576s run_tests_on_device(TA99205CNO)
+I 16.576s run_tests_on_device(TA99205CNO) [----------] Global test environment tear-down
+I 16.576s run_tests_on_device(TA99205CNO) [==========] 1 test from 1 test case ran. (5968 ms total)
+I 16.577s run_tests_on_device(TA99205CNO) [ PASSED ] 1 test.
+I 16.577s run_tests_on_device(TA99205CNO) <<ScopedMainEntryLogger
+I 16.577s run_tests_on_device(TA99205CNO) Finished running tests on this device.
+I 16.577s run_tests_on_device(ddfa6149) Finished running tests on this device.
+I 16.604s Main FINISHED TRY #1/3
+I 16.604s Main All tests completed.
+C 16.604s Main ********************************************************************************
+C 16.604s Main Summary
+C 16.604s Main ********************************************************************************
+C 16.605s Main [==========] 2 tests ran.
+C 16.605s Main [ PASSED ] 2 tests.
+C 16.605s Main ********************************************************************************
+I 16.608s tear_down_device(ddfa6149) Wrote device cache: /webrtc/src/out/debug-android/device_cache_ddea6549.json
+I 16.608s tear_down_device(TA99205CNO) Wrote device cache: /webrtc/src/out/debug-android/device_cache_TA99305CMO.json
+'''
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/third_party/libwebrtc/webrtc/audio/time_interval.cc b/third_party/libwebrtc/webrtc/audio/time_interval.cc
new file mode 100644
index 0000000000..cc103408a3
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/time_interval.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/time_interval.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+TimeInterval::TimeInterval() = default;
+TimeInterval::~TimeInterval() = default;
+
+void TimeInterval::Extend() {
+ Extend(rtc::TimeMillis());
+}
+
+void TimeInterval::Extend(int64_t time) {
+ if (!interval_) {
+ interval_.emplace(time, time);
+ } else {
+ if (time < interval_->first) {
+ interval_->first = time;
+ }
+ if (time > interval_->last) {
+ interval_->last = time;
+ }
+ }
+}
+
+void TimeInterval::Extend(const TimeInterval& other_interval) {
+ if (!other_interval.Empty()) {
+ Extend(other_interval.interval_->first);
+ Extend(other_interval.interval_->last);
+ }
+}
+
+bool TimeInterval::Empty() const {
+ return !interval_;
+}
+
+int64_t TimeInterval::Length() const {
+ RTC_DCHECK(interval_);
+ return interval_->last - interval_->first;
+}
+
+TimeInterval::Interval::Interval(int64_t first, int64_t last)
+ : first(first), last(last) {}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/time_interval.h b/third_party/libwebrtc/webrtc/audio/time_interval.h
new file mode 100644
index 0000000000..88b2f7dd4f
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/time_interval.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_TIME_INTERVAL_H_
+#define AUDIO_TIME_INTERVAL_H_
+
+#include <stdint.h>
+
+#include "api/optional.h"
+
+namespace webrtc {
+
+// This class logs the first and last time its Extend() function is called.
+//
+// This class is not thread-safe; Extend() calls should only be made by a
+// single thread at a time, such as within a lock or destructor.
+//
+// Example usage:
+// // let x < y < z < u < v
+// rtc::TimeInterval interval;
+// ... // interval.Extend(); // at time x
+// ...
+// interval.Extend(); // at time y
+// ...
+// interval.Extend(); // at time u
+// ...
+// interval.Extend(z); // at time v
+// ...
+// if (!interval.Empty()) {
+// int64_t active_time = interval.Length(); // returns (u - x)
+// }
+class TimeInterval {
+ public:
+ TimeInterval();
+ ~TimeInterval();
+ // Extend the interval with the current time.
+ void Extend();
+ // Extend the interval with a given time.
+ void Extend(int64_t time);
+ // Take the convex hull with another interval.
+ void Extend(const TimeInterval& other_interval);
+ // True iff Extend has never been called.
+ bool Empty() const;
+ // Returns the time between the first and the last tick, in milliseconds.
+ int64_t Length() const;
+
+ private:
+ struct Interval {
+ Interval(int64_t first, int64_t last);
+
+ int64_t first, last;
+ };
+ rtc::Optional<Interval> interval_;
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_TIME_INTERVAL_H_
diff --git a/third_party/libwebrtc/webrtc/audio/time_interval_unittest.cc b/third_party/libwebrtc/webrtc/audio/time_interval_unittest.cc
new file mode 100644
index 0000000000..7f8b44ecec
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/time_interval_unittest.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/time_interval.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/timedelta.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(TimeIntervalTest, TimeInMs) {
+ rtc::ScopedFakeClock fake_clock;
+ TimeInterval interval;
+ interval.Extend();
+ fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(100));
+ interval.Extend();
+ EXPECT_EQ(interval.Length(), 100);
+}
+
+TEST(TimeIntervalTest, Empty) {
+ TimeInterval interval;
+ EXPECT_TRUE(interval.Empty());
+ interval.Extend();
+ EXPECT_FALSE(interval.Empty());
+ interval.Extend(200);
+ EXPECT_FALSE(interval.Empty());
+}
+
+TEST(TimeIntervalTest, MonotoneIncreasing) {
+ const size_t point_count = 7;
+ const int64_t interval_points[] = {3, 2, 5, 0, 4, 1, 6};
+ const int64_t interval_differences[] = {0, 1, 3, 5, 5, 5, 6};
+ TimeInterval interval;
+ EXPECT_TRUE(interval.Empty());
+ for (size_t i = 0; i < point_count; ++i) {
+ interval.Extend(interval_points[i]);
+ EXPECT_EQ(interval_differences[i], interval.Length());
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/utility/BUILD.gn b/third_party/libwebrtc/webrtc/audio/utility/BUILD.gn
new file mode 100644
index 0000000000..65f9cb0da6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/utility/BUILD.gn
@@ -0,0 +1,48 @@
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+import("../../webrtc.gni")
+
+group("utility") {
+ public_deps = [
+ ":audio_frame_operations",
+ ]
+}
+
+rtc_static_library("audio_frame_operations") {
+ sources = [
+ "audio_frame_operations.cc",
+ "audio_frame_operations.h",
+ ]
+
+ deps = [
+ "../..:webrtc_common",
+ "../../modules:module_api",
+ "../../modules/audio_coding:audio_format_conversion",
+ "../../rtc_base:rtc_base_approved",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_source_set("utility_tests") {
+ testonly = true
+ sources = [
+ "audio_frame_operations_unittest.cc",
+ ]
+ deps = [
+ ":audio_frame_operations",
+ "../../modules:module_api",
+ "../../rtc_base:rtc_base_approved",
+ "../../test:test_support",
+ "//testing/gtest",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.cc b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.cc
new file mode 100644
index 0000000000..a7c77821f6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.cc
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/utility/audio_frame_operations.h"
+
+#include <algorithm>
+
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace {
+
+// 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz.
+const size_t kMuteFadeFrames = 128;
+const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
+
+} // namespace
+
+void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
+ AudioFrame* result_frame) {
+ // Sanity check.
+ RTC_DCHECK(result_frame);
+ RTC_DCHECK_GT(result_frame->num_channels_, 0);
+ RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
+
+ bool no_previous_data = result_frame->muted();
+ if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
+ // Special case we have no data to start with.
+ RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
+ result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
+ no_previous_data = true;
+ }
+
+ if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
+ frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
+ result_frame->vad_activity_ = AudioFrame::kVadActive;
+ } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
+ frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
+ result_frame->vad_activity_ = AudioFrame::kVadUnknown;
+ }
+
+ if (result_frame->speech_type_ != frame_to_add.speech_type_)
+ result_frame->speech_type_ = AudioFrame::kUndefined;
+
+ if (!frame_to_add.muted()) {
+ const int16_t* in_data = frame_to_add.data();
+ int16_t* out_data = result_frame->mutable_data();
+ size_t length =
+ frame_to_add.samples_per_channel_ * frame_to_add.num_channels_;
+ if (no_previous_data) {
+ std::copy(in_data, in_data + length, out_data);
+ } else {
+ for (size_t i = 0; i < length; i++) {
+ const int32_t wrap_guard = static_cast<int32_t>(out_data[i]) +
+ static_cast<int32_t>(in_data[i]);
+ out_data[i] = rtc::saturated_cast<int16_t>(wrap_guard);
+ }
+ }
+ }
+}
+
+void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ dst_audio[2 * i] = src_audio[i];
+ dst_audio[2 * i + 1] = src_audio[i];
+ }
+}
+
+int AudioFrameOperations::MonoToStereo(AudioFrame* frame) {
+ if (frame->num_channels_ != 1) {
+ return -1;
+ }
+ if ((frame->samples_per_channel_ * 2) >= AudioFrame::kMaxDataSizeSamples) {
+ // Not enough memory to expand from mono to stereo.
+ return -1;
+ }
+
+ if (!frame->muted()) {
+ // TODO(yujo): this operation can be done in place.
+ int16_t data_copy[AudioFrame::kMaxDataSizeSamples];
+ memcpy(data_copy, frame->data(),
+ sizeof(int16_t) * frame->samples_per_channel_);
+ MonoToStereo(data_copy, frame->samples_per_channel_, frame->mutable_data());
+ }
+ frame->num_channels_ = 2;
+
+ return 0;
+}
+
+void AudioFrameOperations::StereoToMono(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ dst_audio[i] =
+ (static_cast<int32_t>(src_audio[2 * i]) + src_audio[2 * i + 1]) >> 1;
+ }
+}
+
+int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
+ if (frame->num_channels_ != 2) {
+ return -1;
+ }
+
+ RTC_DCHECK_LE(frame->samples_per_channel_ * 2,
+ AudioFrame::kMaxDataSizeSamples);
+
+ if (!frame->muted()) {
+ StereoToMono(frame->data(), frame->samples_per_channel_,
+ frame->mutable_data());
+ }
+ frame->num_channels_ = 1;
+
+ return 0;
+}
+
+void AudioFrameOperations::QuadToStereo(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ dst_audio[i * 2] =
+ (static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1]) >> 1;
+ dst_audio[i * 2 + 1] =
+ (static_cast<int32_t>(src_audio[4 * i + 2]) + src_audio[4 * i + 3]) >>
+ 1;
+ }
+}
+
+int AudioFrameOperations::QuadToStereo(AudioFrame* frame) {
+ if (frame->num_channels_ != 4) {
+ return -1;
+ }
+
+ RTC_DCHECK_LE(frame->samples_per_channel_ * 4,
+ AudioFrame::kMaxDataSizeSamples);
+
+ if (!frame->muted()) {
+ QuadToStereo(frame->data(), frame->samples_per_channel_,
+ frame->mutable_data());
+ }
+ frame->num_channels_ = 2;
+
+ return 0;
+}
+
+void AudioFrameOperations::QuadToMono(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
+ dst_audio[i] =
+ (static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1] +
+ src_audio[4 * i + 2] + src_audio[4 * i + 3]) >> 2;
+ }
+}
+
+int AudioFrameOperations::QuadToMono(AudioFrame* frame) {
+ if (frame->num_channels_ != 4) {
+ return -1;
+ }
+
+ RTC_DCHECK_LE(frame->samples_per_channel_ * 4,
+ AudioFrame::kMaxDataSizeSamples);
+
+ if (!frame->muted()) {
+ QuadToMono(frame->data(), frame->samples_per_channel_,
+ frame->mutable_data());
+ }
+ frame->num_channels_ = 1;
+
+ return 0;
+}
+
+void AudioFrameOperations::DownmixChannels(const int16_t* src_audio,
+ size_t src_channels,
+ size_t samples_per_channel,
+ size_t dst_channels,
+ int16_t* dst_audio) {
+ if (src_channels == 2 && dst_channels == 1) {
+ StereoToMono(src_audio, samples_per_channel, dst_audio);
+ return;
+ } else if (src_channels == 4 && dst_channels == 2) {
+ QuadToStereo(src_audio, samples_per_channel, dst_audio);
+ return;
+ } else if (src_channels == 4 && dst_channels == 1) {
+ QuadToMono(src_audio, samples_per_channel, dst_audio);
+ return;
+ }
+
+ RTC_NOTREACHED() << "src_channels: " << src_channels
+ << ", dst_channels: " << dst_channels;
+}
+
+int AudioFrameOperations::DownmixChannels(size_t dst_channels,
+ AudioFrame* frame) {
+ if (frame->num_channels_ == 2 && dst_channels == 1) {
+ return StereoToMono(frame);
+ } else if (frame->num_channels_ == 4 && dst_channels == 2) {
+ return QuadToStereo(frame);
+ } else if (frame->num_channels_ == 4 && dst_channels == 1) {
+ return QuadToMono(frame);
+ }
+
+ return -1;
+}
+
+void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
+ RTC_DCHECK(frame);
+ if (frame->num_channels_ != 2 || frame->muted()) {
+ return;
+ }
+
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+ int16_t temp_data = frame_data[i];
+ frame_data[i] = frame_data[i + 1];
+ frame_data[i + 1] = temp_data;
+ }
+}
+
+void AudioFrameOperations::Mute(AudioFrame* frame,
+ bool previous_frame_muted,
+ bool current_frame_muted) {
+ RTC_DCHECK(frame);
+ if (!previous_frame_muted && !current_frame_muted) {
+ // Not muted, don't touch.
+ } else if (previous_frame_muted && current_frame_muted) {
+ // Frame fully muted.
+ size_t total_samples = frame->samples_per_channel_ * frame->num_channels_;
+ RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples);
+ frame->Mute();
+ } else {
+ // Fade is a no-op on a muted frame.
+ if (frame->muted()) {
+ return;
+ }
+
+ // Limit number of samples to fade, if frame isn't long enough.
+ size_t count = kMuteFadeFrames;
+ float inc = kMuteFadeInc;
+ if (frame->samples_per_channel_ < kMuteFadeFrames) {
+ count = frame->samples_per_channel_;
+ if (count > 0) {
+ inc = 1.0f / count;
+ }
+ }
+
+ size_t start = 0;
+ size_t end = count;
+ float start_g = 0.0f;
+ if (current_frame_muted) {
+ // Fade out the last |count| samples of frame.
+ RTC_DCHECK(!previous_frame_muted);
+ start = frame->samples_per_channel_ - count;
+ end = frame->samples_per_channel_;
+ start_g = 1.0f;
+ inc = -inc;
+ } else {
+ // Fade in the first |count| samples of frame.
+ RTC_DCHECK(previous_frame_muted);
+ }
+
+ // Perform fade.
+ int16_t* frame_data = frame->mutable_data();
+ size_t channels = frame->num_channels_;
+ for (size_t j = 0; j < channels; ++j) {
+ float g = start_g;
+ for (size_t i = start * channels; i < end * channels; i += channels) {
+ g += inc;
+ frame_data[i + j] *= g;
+ }
+ }
+ }
+}
+
+void AudioFrameOperations::Mute(AudioFrame* frame) {
+ Mute(frame, true, true);
+}
+
+void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
+ RTC_DCHECK(frame);
+ RTC_DCHECK_GT(frame->num_channels_, 0);
+ if (frame->num_channels_ < 1 || frame->muted()) {
+ return;
+ }
+
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+ i++) {
+ frame_data[i] = frame_data[i] >> 1;
+ }
+}
+
+int AudioFrameOperations::Scale(float left, float right, AudioFrame* frame) {
+ if (frame->num_channels_ != 2) {
+ return -1;
+ } else if (frame->muted()) {
+ return 0;
+ }
+
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+ frame_data[2 * i] = static_cast<int16_t>(left * frame_data[2 * i]);
+ frame_data[2 * i + 1] = static_cast<int16_t>(right * frame_data[2 * i + 1]);
+ }
+ return 0;
+}
+
+int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) {
+ if (frame->muted()) {
+ return 0;
+ }
+
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+ i++) {
+ frame_data[i] = rtc::saturated_cast<int16_t>(scale * frame_data[i]);
+ }
+ return 0;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.h b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.h
new file mode 100644
index 0000000000..cd55f19fc1
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
+#define AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
+
+#include <stddef.h>
+
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+
+// TODO(andrew): consolidate this with utility.h and audio_frame_manipulator.h.
+// Change reference parameters to pointers. Consider using a namespace rather
+// than a class.
+class AudioFrameOperations {
+ public:
+ // Add samples in |frame_to_add| with samples in |result_frame|
+ // putting the results in |results_frame|. The fields
+ // |vad_activity_| and |speech_type_| of the result frame are
+ // updated. If |result_frame| is empty (|samples_per_channel_|==0),
+ // the samples in |frame_to_add| are added to it. The number of
+ // channels and number of samples per channel must match except when
+ // |result_frame| is empty.
+ static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
+
+ // Upmixes mono |src_audio| to stereo |dst_audio|. This is an out-of-place
+ // operation, meaning src_audio and dst_audio must point to different
+ // buffers. It is the caller's responsibility to ensure that |dst_audio| is
+ // sufficiently large.
+ static void MonoToStereo(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio);
+
+ // |frame.num_channels_| will be updated. This version checks for sufficient
+ // buffer size and that |num_channels_| is mono.
+ static int MonoToStereo(AudioFrame* frame);
+
+ // Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
+ // operation, meaning |src_audio| and |dst_audio| may point to the same
+ // buffer.
+ static void StereoToMono(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio);
+
+ // |frame.num_channels_| will be updated. This version checks that
+ // |num_channels_| is stereo.
+ static int StereoToMono(AudioFrame* frame);
+
+ // Downmixes 4 channels |src_audio| to stereo |dst_audio|. This is an in-place
+ // operation, meaning |src_audio| and |dst_audio| may point to the same
+ // buffer.
+ static void QuadToStereo(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio);
+
+ // |frame.num_channels_| will be updated. This version checks that
+ // |num_channels_| is 4 channels.
+ static int QuadToStereo(AudioFrame* frame);
+
+ // Downmixes 4 channels |src_audio| to mono |dst_audio|. This is an in-place
+ // operation, meaning |src_audio| and |dst_audio| may point to the same
+ // buffer.
+ static void QuadToMono(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio);
+
+ // |frame.num_channels_| will be updated. This version checks that
+ // |num_channels_| is 4 channels.
+ static int QuadToMono(AudioFrame* frame);
+
+ // Downmixes |src_channels| |src_audio| to |dst_channels| |dst_audio|.
+ // This is an in-place operation, meaning |src_audio| and |dst_audio|
+ // may point to the same buffer. Supported channel combinations are
+ // Stereo to Mono, Quad to Mono, and Quad to Stereo.
+ static void DownmixChannels(const int16_t* src_audio,
+ size_t src_channels,
+ size_t samples_per_channel,
+ size_t dst_channels,
+ int16_t* dst_audio);
+
+ // |frame.num_channels_| will be updated. This version checks that
+ // |num_channels_| and |dst_channels| are valid and performs relevant
+ // downmix. Supported channel combinations are Stereo to Mono, Quad to Mono,
+ // and Quad to Stereo.
+ static int DownmixChannels(size_t dst_channels, AudioFrame* frame);
+
+ // Swap the left and right channels of |frame|. Fails silently if |frame| is
+ // not stereo.
+ static void SwapStereoChannels(AudioFrame* frame);
+
+ // Conditionally zero out contents of |frame| for implementing audio mute:
+ // |previous_frame_muted| && |current_frame_muted| - Zero out whole frame.
+ // |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
+ // !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end.
+ // !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
+ static void Mute(AudioFrame* frame,
+ bool previous_frame_muted,
+ bool current_frame_muted);
+
+ // Zero out contents of frame.
+ static void Mute(AudioFrame* frame);
+
+ // Halve samples in |frame|.
+ static void ApplyHalfGain(AudioFrame* frame);
+
+ static int Scale(float left, float right, AudioFrame* frame);
+
+ static int ScaleWithSat(float scale, AudioFrame* frame);
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
diff --git a/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_gn/moz.build b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_gn/moz.build
new file mode 100644
index 0000000000..6423661baa
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_gn/moz.build
@@ -0,0 +1,225 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["CHROMIUM_BUILD"] = True
+DEFINES["V8_DEPRECATION_WARNINGS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_RESTRICT_LOGGING"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/ipc/glue",
+ "/third_party/libwebrtc/webrtc/"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["WTF_USE_DYNAMIC_ANNOTATIONS"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION"] = "r12b"
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["USE_OPENSSL_CERTS"] = "1"
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["__GNU_SOURCE"] = "1"
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORE"] = "0"
+
+ OS_LIBS += [
+ "-framework Foundation"
+ ]
+
+if CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "1"
+ DEFINES["UNICODE"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_CRT_SECURE_NO_WARNINGS"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_USING_V110_SDK71_"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0120"
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0920"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["NO_TCMALLOC"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "NetBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+Library("audio_frame_operations_gn")
diff --git a/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_unittest.cc b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_unittest.cc
new file mode 100644
index 0000000000..6d23731a05
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/audio/utility/audio_frame_operations_unittest.cc
@@ -0,0 +1,629 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/utility/audio_frame_operations.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+class AudioFrameOperationsTest : public ::testing::Test {
+ protected:
+ AudioFrameOperationsTest() {
+ // Set typical values.
+ frame_.samples_per_channel_ = 320;
+ frame_.num_channels_ = 2;
+ }
+
+ AudioFrame frame_;
+};
+
+void SetFrameData(int16_t ch1,
+ int16_t ch2,
+ int16_t ch3,
+ int16_t ch4,
+ AudioFrame* frame) {
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_ * 4; i += 4) {
+ frame_data[i] = ch1;
+ frame_data[i + 1] = ch2;
+ frame_data[i + 2] = ch3;
+ frame_data[i + 3] = ch4;
+ }
+}
+
+void SetFrameData(int16_t left, int16_t right, AudioFrame* frame) {
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+ frame_data[i] = left;
+ frame_data[i + 1] = right;
+ }
+}
+
+void SetFrameData(int16_t data, AudioFrame* frame) {
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0;
+ i < frame->samples_per_channel_ * frame->num_channels_; i++) {
+ frame_data[i] = data;
+ }
+}
+
+void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
+ EXPECT_EQ(frame1.num_channels_, frame2.num_channels_);
+ EXPECT_EQ(frame1.samples_per_channel_,
+ frame2.samples_per_channel_);
+ const int16_t* frame1_data = frame1.data();
+ const int16_t* frame2_data = frame2.data();
+ for (size_t i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
+ i++) {
+ EXPECT_EQ(frame1_data[i], frame2_data[i]);
+ }
+ EXPECT_EQ(frame1.muted(), frame2.muted());
+}
+
+void InitFrame(AudioFrame* frame, size_t channels, size_t samples_per_channel,
+ int16_t left_data, int16_t right_data) {
+ RTC_DCHECK(frame);
+ RTC_DCHECK_GE(2, channels);
+ RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples,
+ samples_per_channel * channels);
+ frame->samples_per_channel_ = samples_per_channel;
+ frame->num_channels_ = channels;
+ if (channels == 2) {
+ SetFrameData(left_data, right_data, frame);
+ } else if (channels == 1) {
+ SetFrameData(left_data, frame);
+ }
+}
+
+int16_t GetChannelData(const AudioFrame& frame, size_t channel, size_t index) {
+ RTC_DCHECK_LT(channel, frame.num_channels_);
+ RTC_DCHECK_LT(index, frame.samples_per_channel_);
+ return frame.data()[index * frame.num_channels_ + channel];
+}
+
+void VerifyFrameDataBounds(const AudioFrame& frame, size_t channel, int16_t max,
+ int16_t min) {
+ for (size_t i = 0; i < frame.samples_per_channel_; ++i) {
+ int16_t s = GetChannelData(frame, channel, i);
+ EXPECT_LE(min, s);
+ EXPECT_GE(max, s);
+ }
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoFailsWithBadParameters) {
+ EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(&frame_));
+
+ frame_.samples_per_channel_ = AudioFrame::kMaxDataSizeSamples;
+ frame_.num_channels_ = 1;
+ EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
+ frame_.num_channels_ = 1;
+ SetFrameData(1, &frame_);
+
+ EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(&frame_));
+
+ AudioFrame stereo_frame;
+ stereo_frame.samples_per_channel_ = 320;
+ stereo_frame.num_channels_ = 2;
+ SetFrameData(1, 1, &stereo_frame);
+ VerifyFramesAreEqual(stereo_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoMuted) {
+ frame_.num_channels_ = 1;
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoBufferSucceeds) {
+ AudioFrame target_frame;
+ frame_.num_channels_ = 1;
+ SetFrameData(4, &frame_);
+
+ target_frame.num_channels_ = 2;
+ target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+ AudioFrameOperations::MonoToStereo(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
+
+ AudioFrame stereo_frame;
+ stereo_frame.samples_per_channel_ = 320;
+ stereo_frame.num_channels_ = 2;
+ SetFrameData(4, 4, &stereo_frame);
+ VerifyFramesAreEqual(stereo_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoFailsWithBadParameters) {
+ frame_.num_channels_ = 1;
+ EXPECT_EQ(-1, AudioFrameOperations::StereoToMono(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) {
+ SetFrameData(4, 2, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_));
+
+ AudioFrame mono_frame;
+ mono_frame.samples_per_channel_ = 320;
+ mono_frame.num_channels_ = 1;
+ SetFrameData(3, &mono_frame);
+ VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoMuted) {
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoBufferSucceeds) {
+ AudioFrame target_frame;
+ SetFrameData(4, 2, &frame_);
+
+ target_frame.num_channels_ = 1;
+ target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+ AudioFrameOperations::StereoToMono(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
+
+ AudioFrame mono_frame;
+ mono_frame.samples_per_channel_ = 320;
+ mono_frame.num_channels_ = 1;
+ SetFrameData(3, &mono_frame);
+ VerifyFramesAreEqual(mono_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoDoesNotWrapAround) {
+ SetFrameData(-32768, -32768, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_));
+
+ AudioFrame mono_frame;
+ mono_frame.samples_per_channel_ = 320;
+ mono_frame.num_channels_ = 1;
+ SetFrameData(-32768, &mono_frame);
+ VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoFailsWithBadParameters) {
+ frame_.num_channels_ = 1;
+ EXPECT_EQ(-1, AudioFrameOperations::QuadToMono(&frame_));
+ frame_.num_channels_ = 2;
+ EXPECT_EQ(-1, AudioFrameOperations::QuadToMono(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoSucceeds) {
+ frame_.num_channels_ = 4;
+ SetFrameData(4, 2, 6, 8, &frame_);
+
+ EXPECT_EQ(0, AudioFrameOperations::QuadToMono(&frame_));
+
+ AudioFrame mono_frame;
+ mono_frame.samples_per_channel_ = 320;
+ mono_frame.num_channels_ = 1;
+ SetFrameData(5, &mono_frame);
+ VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoMuted) {
+ frame_.num_channels_ = 4;
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::QuadToMono(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoBufferSucceeds) {
+ AudioFrame target_frame;
+ frame_.num_channels_ = 4;
+ SetFrameData(4, 2, 6, 8, &frame_);
+
+ target_frame.num_channels_ = 1;
+ target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+ AudioFrameOperations::QuadToMono(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
+ AudioFrame mono_frame;
+ mono_frame.samples_per_channel_ = 320;
+ mono_frame.num_channels_ = 1;
+ SetFrameData(5, &mono_frame);
+ VerifyFramesAreEqual(mono_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoDoesNotWrapAround) {
+ frame_.num_channels_ = 4;
+ SetFrameData(-32768, -32768, -32768, -32768, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::QuadToMono(&frame_));
+
+ AudioFrame mono_frame;
+ mono_frame.samples_per_channel_ = 320;
+ mono_frame.num_channels_ = 1;
+ SetFrameData(-32768, &mono_frame);
+ VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoFailsWithBadParameters) {
+ frame_.num_channels_ = 1;
+ EXPECT_EQ(-1, AudioFrameOperations::QuadToStereo(&frame_));
+ frame_.num_channels_ = 2;
+ EXPECT_EQ(-1, AudioFrameOperations::QuadToStereo(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoSucceeds) {
+ frame_.num_channels_ = 4;
+ SetFrameData(4, 2, 6, 8, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
+
+ AudioFrame stereo_frame;
+ stereo_frame.samples_per_channel_ = 320;
+ stereo_frame.num_channels_ = 2;
+ SetFrameData(3, 7, &stereo_frame);
+ VerifyFramesAreEqual(stereo_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoMuted) {
+ frame_.num_channels_ = 4;
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoBufferSucceeds) {
+ AudioFrame target_frame;
+ frame_.num_channels_ = 4;
+ SetFrameData(4, 2, 6, 8, &frame_);
+
+ target_frame.num_channels_ = 2;
+ target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+ AudioFrameOperations::QuadToStereo(frame_.data(), frame_.samples_per_channel_,
+ target_frame.mutable_data());
+ AudioFrame stereo_frame;
+ stereo_frame.samples_per_channel_ = 320;
+ stereo_frame.num_channels_ = 2;
+ SetFrameData(3, 7, &stereo_frame);
+ VerifyFramesAreEqual(stereo_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoDoesNotWrapAround) {
+ frame_.num_channels_ = 4;
+ SetFrameData(-32768, -32768, -32768, -32768, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
+
+ AudioFrame stereo_frame;
+ stereo_frame.samples_per_channel_ = 320;
+ stereo_frame.num_channels_ = 2;
+ SetFrameData(-32768, -32768, &stereo_frame);
+ VerifyFramesAreEqual(stereo_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
+ SetFrameData(0, 1, &frame_);
+
+ AudioFrame swapped_frame;
+ swapped_frame.samples_per_channel_ = 320;
+ swapped_frame.num_channels_ = 2;
+ SetFrameData(1, 0, &swapped_frame);
+
+ AudioFrameOperations::SwapStereoChannels(&frame_);
+ VerifyFramesAreEqual(swapped_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, SwapStereoChannelsMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::SwapStereoChannels(&frame_);
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
+ frame_.num_channels_ = 1;
+ // Set data to "stereo", despite it being a mono frame.
+ SetFrameData(0, 1, &frame_);
+
+ AudioFrame orig_frame;
+ orig_frame.CopyFrom(frame_);
+ AudioFrameOperations::SwapStereoChannels(&frame_);
+ // Verify that no swap occurred.
+ VerifyFramesAreEqual(orig_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, MuteDisabled) {
+ SetFrameData(1000, -1000, &frame_);
+ AudioFrameOperations::Mute(&frame_, false, false);
+
+ AudioFrame muted_frame;
+ muted_frame.samples_per_channel_ = 320;
+ muted_frame.num_channels_ = 2;
+ SetFrameData(1000, -1000, &muted_frame);
+ VerifyFramesAreEqual(muted_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEnabled) {
+ SetFrameData(1000, -1000, &frame_);
+ AudioFrameOperations::Mute(&frame_, true, true);
+
+ AudioFrame muted_frame;
+ muted_frame.samples_per_channel_ = frame_.samples_per_channel_;
+ muted_frame.num_channels_ = frame_.num_channels_;
+ ASSERT_TRUE(muted_frame.muted());
+ VerifyFramesAreEqual(muted_frame, frame_);
+}
+
+// Verify that *beginning* to mute works for short and long (>128) frames, mono
+// and stereo. Beginning mute should yield a ramp down to zero.
+TEST_F(AudioFrameOperationsTest, MuteBeginMonoLong) {
+ InitFrame(&frame_, 1, 228, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, false, true);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ EXPECT_EQ(1000, GetChannelData(frame_, 0, 99));
+ EXPECT_EQ(992, GetChannelData(frame_, 0, 100));
+ EXPECT_EQ(7, GetChannelData(frame_, 0, 226));
+ EXPECT_EQ(0, GetChannelData(frame_, 0, 227));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginMonoShort) {
+ InitFrame(&frame_, 1, 93, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, false, true);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ EXPECT_EQ(989, GetChannelData(frame_, 0, 0));
+ EXPECT_EQ(978, GetChannelData(frame_, 0, 1));
+ EXPECT_EQ(10, GetChannelData(frame_, 0, 91));
+ EXPECT_EQ(0, GetChannelData(frame_, 0, 92));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginStereoLong) {
+ InitFrame(&frame_, 2, 228, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, false, true);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ VerifyFrameDataBounds(frame_, 1, 0, -1000);
+ EXPECT_EQ(1000, GetChannelData(frame_, 0, 99));
+ EXPECT_EQ(-1000, GetChannelData(frame_, 1, 99));
+ EXPECT_EQ(992, GetChannelData(frame_, 0, 100));
+ EXPECT_EQ(-992, GetChannelData(frame_, 1, 100));
+ EXPECT_EQ(7, GetChannelData(frame_, 0, 226));
+ EXPECT_EQ(-7, GetChannelData(frame_, 1, 226));
+ EXPECT_EQ(0, GetChannelData(frame_, 0, 227));
+ EXPECT_EQ(0, GetChannelData(frame_, 1, 227));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginStereoShort) {
+ InitFrame(&frame_, 2, 93, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, false, true);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ VerifyFrameDataBounds(frame_, 1, 0, -1000);
+ EXPECT_EQ(989, GetChannelData(frame_, 0, 0));
+ EXPECT_EQ(-989, GetChannelData(frame_, 1, 0));
+ EXPECT_EQ(978, GetChannelData(frame_, 0, 1));
+ EXPECT_EQ(-978, GetChannelData(frame_, 1, 1));
+ EXPECT_EQ(10, GetChannelData(frame_, 0, 91));
+ EXPECT_EQ(-10, GetChannelData(frame_, 1, 91));
+ EXPECT_EQ(0, GetChannelData(frame_, 0, 92));
+ EXPECT_EQ(0, GetChannelData(frame_, 1, 92));
+}
+
+// Verify that *ending* to mute works for short and long (>128) frames, mono
+// and stereo. Ending mute should yield a ramp up from zero.
+TEST_F(AudioFrameOperationsTest, MuteEndMonoLong) {
+ InitFrame(&frame_, 1, 228, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, true, false);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ EXPECT_EQ(7, GetChannelData(frame_, 0, 0));
+ EXPECT_EQ(15, GetChannelData(frame_, 0, 1));
+ EXPECT_EQ(1000, GetChannelData(frame_, 0, 127));
+ EXPECT_EQ(1000, GetChannelData(frame_, 0, 128));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndMonoShort) {
+ InitFrame(&frame_, 1, 93, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, true, false);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ EXPECT_EQ(10, GetChannelData(frame_, 0, 0));
+ EXPECT_EQ(21, GetChannelData(frame_, 0, 1));
+ EXPECT_EQ(989, GetChannelData(frame_, 0, 91));
+ EXPECT_EQ(999, GetChannelData(frame_, 0, 92));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndStereoLong) {
+ InitFrame(&frame_, 2, 228, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, true, false);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ VerifyFrameDataBounds(frame_, 1, 0, -1000);
+ EXPECT_EQ(7, GetChannelData(frame_, 0, 0));
+ EXPECT_EQ(-7, GetChannelData(frame_, 1, 0));
+ EXPECT_EQ(15, GetChannelData(frame_, 0, 1));
+ EXPECT_EQ(-15, GetChannelData(frame_, 1, 1));
+ EXPECT_EQ(1000, GetChannelData(frame_, 0, 127));
+ EXPECT_EQ(-1000, GetChannelData(frame_, 1, 127));
+ EXPECT_EQ(1000, GetChannelData(frame_, 0, 128));
+ EXPECT_EQ(-1000, GetChannelData(frame_, 1, 128));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndStereoShort) {
+ InitFrame(&frame_, 2, 93, 1000, -1000);
+ AudioFrameOperations::Mute(&frame_, true, false);
+ VerifyFrameDataBounds(frame_, 0, 1000, 0);
+ VerifyFrameDataBounds(frame_, 1, 0, -1000);
+ EXPECT_EQ(10, GetChannelData(frame_, 0, 0));
+ EXPECT_EQ(-10, GetChannelData(frame_, 1, 0));
+ EXPECT_EQ(21, GetChannelData(frame_, 0, 1));
+ EXPECT_EQ(-21, GetChannelData(frame_, 1, 1));
+ EXPECT_EQ(989, GetChannelData(frame_, 0, 91));
+ EXPECT_EQ(-989, GetChannelData(frame_, 1, 91));
+ EXPECT_EQ(999, GetChannelData(frame_, 0, 92));
+ EXPECT_EQ(-999, GetChannelData(frame_, 1, 92));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginAlreadyMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::Mute(&frame_, false, true);
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndAlreadyMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::Mute(&frame_, true, false);
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, ApplyHalfGainSucceeds) {
+ SetFrameData(2, &frame_);
+
+ AudioFrame half_gain_frame;
+ half_gain_frame.num_channels_ = frame_.num_channels_;
+ half_gain_frame.samples_per_channel_ = frame_.samples_per_channel_;
+ SetFrameData(1, &half_gain_frame);
+
+ AudioFrameOperations::ApplyHalfGain(&frame_);
+ VerifyFramesAreEqual(half_gain_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ApplyHalfGainMuted) {
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::ApplyHalfGain(&frame_);
+ EXPECT_TRUE(frame_.muted());
+}
+
+// TODO(andrew): should not allow negative scales.
+TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
+ frame_.num_channels_ = 1;
+ EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
+
+ frame_.num_channels_ = 3;
+ EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
+
+ frame_.num_channels_ = 2;
+ EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, &frame_));
+ EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, &frame_));
+}
+
+// TODO(andrew): fix the wraparound bug. We should always saturate.
+TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
+ SetFrameData(4000, -4000, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, &frame_));
+
+ AudioFrame clipped_frame;
+ clipped_frame.samples_per_channel_ = 320;
+ clipped_frame.num_channels_ = 2;
+ SetFrameData(32767, -32768, &clipped_frame);
+ VerifyFramesAreEqual(clipped_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
+ SetFrameData(1, -1, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
+
+ AudioFrame scaled_frame;
+ scaled_frame.samples_per_channel_ = 320;
+ scaled_frame.num_channels_ = 2;
+ SetFrameData(2, -3, &scaled_frame);
+ VerifyFramesAreEqual(scaled_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleMuted) {
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
+// TODO(andrew): should fail with a negative scale.
+TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
+ EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, &frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
+ frame_.num_channels_ = 1;
+ SetFrameData(4000, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, &frame_));
+
+ AudioFrame clipped_frame;
+ clipped_frame.samples_per_channel_ = 320;
+ clipped_frame.num_channels_ = 1;
+ SetFrameData(32767, &clipped_frame);
+ VerifyFramesAreEqual(clipped_frame, frame_);
+
+ SetFrameData(-4000, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, &frame_));
+ SetFrameData(-32768, &clipped_frame);
+ VerifyFramesAreEqual(clipped_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
+ frame_.num_channels_ = 1;
+ SetFrameData(1, &frame_);
+ EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, &frame_));
+
+ AudioFrame scaled_frame;
+ scaled_frame.samples_per_channel_ = 320;
+ scaled_frame.num_channels_ = 1;
+ SetFrameData(2, &scaled_frame);
+ VerifyFramesAreEqual(scaled_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleWithSatMuted) {
+ ASSERT_TRUE(frame_.muted());
+ EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, &frame_));
+ EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
+ // When samples_per_channel_ is 0, the frame counts as empty and zero.
+ AudioFrame frame_to_add_to;
+ frame_to_add_to.mutable_data(); // Unmute the frame.
+ ASSERT_FALSE(frame_to_add_to.muted());
+ frame_to_add_to.samples_per_channel_ = 0;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+
+ SetFrameData(1000, &frame_);
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingXToMutedGivesX) {
+ AudioFrame frame_to_add_to;
+ ASSERT_TRUE(frame_to_add_to.muted());
+ frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+
+ SetFrameData(1000, &frame_);
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingMutedToXGivesX) {
+ AudioFrame frame_to_add_to;
+ frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+ SetFrameData(1000, &frame_to_add_to);
+
+ AudioFrame frame_copy;
+ frame_copy.CopyFrom(frame_to_add_to);
+
+ ASSERT_TRUE(frame_.muted());
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ VerifyFramesAreEqual(frame_copy, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) {
+ AudioFrame frame_to_add_to;
+ frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+ SetFrameData(1000, &frame_to_add_to);
+ SetFrameData(2000, &frame_);
+
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ SetFrameData(frame_.data()[0] + 1000, &frame_);
+ VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+} // namespace
+} // namespace webrtc