summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/modules/audio_mixer
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/modules/audio_mixer')
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/BUILD.gn143
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/DEPS13
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/OWNERS2
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc92
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h33
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build212
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc66
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc264
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h100
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build215
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc794
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc182
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc41
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h36
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc245
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/frame_combiner.h62
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc341
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/g3doc/index.md54
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc63
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h42
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h32
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc35
-rw-r--r--third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h40
23 files changed, 3107 insertions, 0 deletions
diff --git a/third_party/libwebrtc/modules/audio_mixer/BUILD.gn b/third_party/libwebrtc/modules/audio_mixer/BUILD.gn
new file mode 100644
index 0000000000..1196835fec
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/BUILD.gn
@@ -0,0 +1,143 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+group("audio_mixer") {
+ deps = [
+ ":audio_frame_manipulator",
+ ":audio_mixer_impl",
+ ]
+}
+
+rtc_library("audio_mixer_impl") {
+ visibility = [ "*" ]
+ sources = [
+ "audio_mixer_impl.cc",
+ "audio_mixer_impl.h",
+ "default_output_rate_calculator.cc",
+ "default_output_rate_calculator.h",
+ "frame_combiner.cc",
+ "frame_combiner.h",
+ "output_rate_calculator.h",
+ ]
+
+ public = [
+ "audio_mixer_impl.h",
+ "default_output_rate_calculator.h", # For creating a mixer with limiter
+ # disabled.
+ "frame_combiner.h",
+ ]
+
+ configs += [ "../audio_processing:apm_debug_dump" ]
+
+ deps = [
+ ":audio_frame_manipulator",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api:scoped_refptr",
+ "../../api/audio:audio_frame_api",
+ "../../api/audio:audio_mixer_api",
+ "../../audio/utility:audio_frame_operations",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:race_checker",
+ "../../rtc_base:refcount",
+ "../../rtc_base:safe_conversions",
+ "../../rtc_base/synchronization:mutex",
+ "../../system_wrappers",
+ "../../system_wrappers:metrics",
+ "../audio_processing:api",
+ "../audio_processing:apm_logging",
+ "../audio_processing:audio_frame_view",
+ "../audio_processing/agc2:fixed_digital",
+ ]
+}
+
+rtc_library("audio_frame_manipulator") {
+ visibility = [
+ ":*",
+ "../../modules:*",
+ ]
+
+ sources = [
+ "audio_frame_manipulator.cc",
+ "audio_frame_manipulator.h",
+ ]
+
+ deps = [
+ "../../api/audio:audio_frame_api",
+ "../../audio/utility:audio_frame_operations",
+ "../../rtc_base:checks",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("audio_mixer_test_utils") {
+ testonly = true
+
+ sources = [
+ "gain_change_calculator.cc",
+ "gain_change_calculator.h",
+ "sine_wave_generator.cc",
+ "sine_wave_generator.h",
+ ]
+
+ deps = [
+ ":audio_frame_manipulator",
+ ":audio_mixer_impl",
+ "../../api:array_view",
+ "../../api/audio:audio_frame_api",
+ "../../rtc_base:checks",
+ "../../rtc_base:safe_conversions",
+ ]
+ }
+
+ rtc_library("audio_mixer_unittests") {
+ testonly = true
+
+ sources = [
+ "audio_frame_manipulator_unittest.cc",
+ "audio_mixer_impl_unittest.cc",
+ "frame_combiner_unittest.cc",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+ deps = [
+ ":audio_frame_manipulator",
+ ":audio_mixer_impl",
+ ":audio_mixer_test_utils",
+ "../../api:array_view",
+ "../../api:rtp_packet_info",
+ "../../api/audio:audio_mixer_api",
+ "../../api/units:timestamp",
+ "../../audio/utility:audio_frame_operations",
+ "../../rtc_base:checks",
+ "../../rtc_base:stringutils",
+ "../../rtc_base:task_queue_for_test",
+ "../../test:test_support",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ rtc_executable("audio_mixer_test") {
+ testonly = true
+ sources = [ "audio_mixer_test.cc" ]
+
+ deps = [
+ ":audio_mixer_impl",
+ "../../api/audio:audio_mixer_api",
+ "../../common_audio",
+ "../../rtc_base:stringutils",
+ "//third_party/abseil-cpp/absl/flags:flag",
+ "//third_party/abseil-cpp/absl/flags:parse",
+ ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/modules/audio_mixer/DEPS b/third_party/libwebrtc/modules/audio_mixer/DEPS
new file mode 100644
index 0000000000..46f29bccf8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/DEPS
@@ -0,0 +1,13 @@
+include_rules = [
+ "+audio/utility/audio_frame_operations.h",
+ "+audio/utility/channel_mixer.h",
+ "+call",
+ "+common_audio",
+ "+modules/audio_coding",
+ "+modules/audio_device",
+ "+modules/audio_processing",
+ "+modules/pacing",
+ "+modules/rtp_rtcp",
+ "+modules/utility",
+ "+system_wrappers",
+]
diff --git a/third_party/libwebrtc/modules/audio_mixer/OWNERS b/third_party/libwebrtc/modules/audio_mixer/OWNERS
new file mode 100644
index 0000000000..5edc304ab3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/OWNERS
@@ -0,0 +1,2 @@
+alessiob@webrtc.org
+henrik.lundin@webrtc.org
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc
new file mode 100644
index 0000000000..3100271cfb
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+
+#include "audio/utility/audio_frame_operations.h"
+#include "audio/utility/channel_mixer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+uint32_t AudioMixerCalculateEnergy(const AudioFrame& audio_frame) {
+ if (audio_frame.muted()) {
+ return 0;
+ }
+
+ uint32_t energy = 0;
+ const int16_t* frame_data = audio_frame.data();
+ for (size_t position = 0;
+ position < audio_frame.samples_per_channel_ * audio_frame.num_channels_;
+ position++) {
+ // TODO(aleloi): This can overflow. Convert to floats.
+ energy += frame_data[position] * frame_data[position];
+ }
+ return energy;
+}
+
+void Ramp(float start_gain, float target_gain, AudioFrame* audio_frame) {
+ RTC_DCHECK(audio_frame);
+ RTC_DCHECK_GE(start_gain, 0.0f);
+ RTC_DCHECK_GE(target_gain, 0.0f);
+ if (start_gain == target_gain || audio_frame->muted()) {
+ return;
+ }
+
+ size_t samples = audio_frame->samples_per_channel_;
+ RTC_DCHECK_LT(0, samples);
+ float increment = (target_gain - start_gain) / samples;
+ float gain = start_gain;
+ int16_t* frame_data = audio_frame->mutable_data();
+ for (size_t i = 0; i < samples; ++i) {
+ // If the audio is interleaved of several channels, we want to
+ // apply the same gain change to the ith sample of every channel.
+ for (size_t ch = 0; ch < audio_frame->num_channels_; ++ch) {
+ frame_data[audio_frame->num_channels_ * i + ch] *= gain;
+ }
+ gain += increment;
+ }
+}
+
+void RemixFrame(size_t target_number_of_channels, AudioFrame* frame) {
+ RTC_DCHECK_GE(target_number_of_channels, 1);
+ // TODO(bugs.webrtc.org/10783): take channel layout into account as well.
+ if (frame->num_channels() == target_number_of_channels) {
+ return;
+ }
+
+ // Use legacy components for the most simple cases (mono <-> stereo) to ensure
+ // that native WebRTC clients are not affected when support for multi-channel
+ // audio is added to Chrome.
+ // TODO(bugs.webrtc.org/10783): utilize channel mixer for mono/stereo as well.
+ if (target_number_of_channels < 3 && frame->num_channels() < 3) {
+ if (frame->num_channels() > target_number_of_channels) {
+ AudioFrameOperations::DownmixChannels(target_number_of_channels, frame);
+ } else {
+ AudioFrameOperations::UpmixChannels(target_number_of_channels, frame);
+ }
+ } else {
+ // Use generic channel mixer when the number of channels for input our
+ // output is larger than two. E.g. stereo -> 5.1 channel up-mixing.
+ // TODO(bugs.webrtc.org/10783): ensure that actual channel layouts are used
+ // instead of guessing based on number of channels.
+ const ChannelLayout output_layout(
+ GuessChannelLayout(target_number_of_channels));
+ ChannelMixer mixer(GuessChannelLayout(frame->num_channels()),
+ output_layout);
+ mixer.Transform(frame);
+ RTC_DCHECK_EQ(frame->channel_layout(), output_layout);
+ }
+ RTC_DCHECK_EQ(frame->num_channels(), target_number_of_channels)
+ << "Wrong number of channels, " << frame->num_channels() << " vs "
+ << target_number_of_channels;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h
new file mode 100644
index 0000000000..ab3633d266
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
+#define MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "api/audio/audio_frame.h"
+
+namespace webrtc {
+
+// Updates the audioFrame's energy (based on its samples).
+uint32_t AudioMixerCalculateEnergy(const AudioFrame& audio_frame);
+
+// Ramps up or down the provided audio frame. Ramp(0, 1, frame) will
+// linearly increase the samples in the frame from 0 to full volume.
+void Ramp(float start_gain, float target_gain, AudioFrame* audio_frame);
+
+// Downmixes or upmixes a frame between stereo and mono.
+void RemixFrame(size_t target_number_of_channels, AudioFrame* frame);
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_AUDIO_FRAME_MANIPULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build
new file mode 100644
index 0000000000..79dbb7b153
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_gn/moz.build
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_frame_manipulator_gn")
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc
new file mode 100644
index 0000000000..cfb3f2c230
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_frame_manipulator_unittest.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+
+#include <algorithm>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+void FillFrameWithConstants(size_t samples_per_channel,
+ size_t number_of_channels,
+ int16_t value,
+ AudioFrame* frame) {
+ frame->num_channels_ = number_of_channels;
+ frame->samples_per_channel_ = samples_per_channel;
+ int16_t* frame_data = frame->mutable_data();
+ std::fill(frame_data, frame_data + samples_per_channel * number_of_channels,
+ value);
+}
+} // namespace
+
+TEST(AudioFrameManipulator, CompareForwardRampWithExpectedResultStereo) {
+ constexpr int kSamplesPerChannel = 5;
+ constexpr int kNumberOfChannels = 2;
+
+ // Create a frame with values 5, 5, 5, ... and channels & samples as above.
+ AudioFrame frame;
+ FillFrameWithConstants(kSamplesPerChannel, kNumberOfChannels, 5, &frame);
+
+ Ramp(0.0f, 1.0f, &frame);
+
+ const int total_samples = kSamplesPerChannel * kNumberOfChannels;
+ const int16_t expected_result[total_samples] = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4};
+ const int16_t* frame_data = frame.data();
+ EXPECT_TRUE(
+ std::equal(frame_data, frame_data + total_samples, expected_result));
+}
+
+TEST(AudioFrameManipulator, CompareBackwardRampWithExpectedResultMono) {
+ constexpr int kSamplesPerChannel = 5;
+ constexpr int kNumberOfChannels = 1;
+
+ // Create a frame with values 5, 5, 5, ... and channels & samples as above.
+ AudioFrame frame;
+ FillFrameWithConstants(kSamplesPerChannel, kNumberOfChannels, 5, &frame);
+
+ Ramp(1.0f, 0.0f, &frame);
+
+ const int total_samples = kSamplesPerChannel * kNumberOfChannels;
+ const int16_t expected_result[total_samples] = {5, 4, 3, 2, 1};
+ const int16_t* frame_data = frame.data();
+ EXPECT_TRUE(
+ std::equal(frame_data, frame_data + total_samples, expected_result));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc
new file mode 100644
index 0000000000..73a6e3a8a4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_mixer_impl.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+struct AudioMixerImpl::SourceStatus {
+ SourceStatus(Source* audio_source, bool is_mixed, float gain)
+ : audio_source(audio_source), is_mixed(is_mixed), gain(gain) {}
+ Source* audio_source = nullptr;
+ bool is_mixed = false;
+ float gain = 0.0f;
+
+ // A frame that will be passed to audio_source->GetAudioFrameWithInfo.
+ AudioFrame audio_frame;
+};
+
+namespace {
+
+struct SourceFrame {
+ SourceFrame() = default;
+
+ SourceFrame(AudioMixerImpl::SourceStatus* source_status,
+ AudioFrame* audio_frame,
+ bool muted)
+ : source_status(source_status), audio_frame(audio_frame), muted(muted) {
+ RTC_DCHECK(source_status);
+ RTC_DCHECK(audio_frame);
+ if (!muted) {
+ energy = AudioMixerCalculateEnergy(*audio_frame);
+ }
+ }
+
+ SourceFrame(AudioMixerImpl::SourceStatus* source_status,
+ AudioFrame* audio_frame,
+ bool muted,
+ uint32_t energy)
+ : source_status(source_status),
+ audio_frame(audio_frame),
+ muted(muted),
+ energy(energy) {
+ RTC_DCHECK(source_status);
+ RTC_DCHECK(audio_frame);
+ }
+
+ AudioMixerImpl::SourceStatus* source_status = nullptr;
+ AudioFrame* audio_frame = nullptr;
+ bool muted = true;
+ uint32_t energy = 0;
+};
+
+// ShouldMixBefore(a, b) is used to select mixer sources.
+// Returns true if `a` is preferred over `b` as a source to be mixed.
+bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) {
+ if (a.muted != b.muted) {
+ return b.muted;
+ }
+
+ const auto a_activity = a.audio_frame->vad_activity_;
+ const auto b_activity = b.audio_frame->vad_activity_;
+
+ if (a_activity != b_activity) {
+ return a_activity == AudioFrame::kVadActive;
+ }
+
+ return a.energy > b.energy;
+}
+
+void RampAndUpdateGain(
+ rtc::ArrayView<const SourceFrame> mixed_sources_and_frames) {
+ for (const auto& source_frame : mixed_sources_and_frames) {
+ float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f;
+ Ramp(source_frame.source_status->gain, target_gain,
+ source_frame.audio_frame);
+ source_frame.source_status->gain = target_gain;
+ }
+}
+
+std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>>::const_iterator
+FindSourceInList(
+ AudioMixerImpl::Source const* audio_source,
+ std::vector<std::unique_ptr<AudioMixerImpl::SourceStatus>> const*
+ audio_source_list) {
+ return std::find_if(
+ audio_source_list->begin(), audio_source_list->end(),
+ [audio_source](const std::unique_ptr<AudioMixerImpl::SourceStatus>& p) {
+ return p->audio_source == audio_source;
+ });
+}
+} // namespace
+
+struct AudioMixerImpl::HelperContainers {
+ void resize(size_t size) {
+ audio_to_mix.resize(size);
+ audio_source_mixing_data_list.resize(size);
+ ramp_list.resize(size);
+ preferred_rates.resize(size);
+ }
+
+ std::vector<AudioFrame*> audio_to_mix;
+ std::vector<SourceFrame> audio_source_mixing_data_list;
+ std::vector<SourceFrame> ramp_list;
+ std::vector<int> preferred_rates;
+};
+
+AudioMixerImpl::AudioMixerImpl(
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix)
+ : max_sources_to_mix_(max_sources_to_mix),
+ output_rate_calculator_(std::move(output_rate_calculator)),
+ audio_source_list_(),
+ helper_containers_(std::make_unique<HelperContainers>()),
+ frame_combiner_(use_limiter) {
+ RTC_CHECK_GE(max_sources_to_mix, 1) << "At least one source must be mixed";
+ audio_source_list_.reserve(max_sources_to_mix);
+ helper_containers_->resize(max_sources_to_mix);
+}
+
+AudioMixerImpl::~AudioMixerImpl() {}
+
+rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create(
+ int max_sources_to_mix) {
+ return Create(std::unique_ptr<DefaultOutputRateCalculator>(
+ new DefaultOutputRateCalculator()),
+ /*use_limiter=*/true, max_sources_to_mix);
+}
+
+rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create(
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix) {
+ return rtc::make_ref_counted<AudioMixerImpl>(
+ std::move(output_rate_calculator), use_limiter, max_sources_to_mix);
+}
+
+void AudioMixerImpl::Mix(size_t number_of_channels,
+ AudioFrame* audio_frame_for_mixing) {
+ RTC_DCHECK(number_of_channels >= 1);
+ MutexLock lock(&mutex_);
+
+ size_t number_of_streams = audio_source_list_.size();
+
+ std::transform(audio_source_list_.begin(), audio_source_list_.end(),
+ helper_containers_->preferred_rates.begin(),
+ [&](std::unique_ptr<SourceStatus>& a) {
+ return a->audio_source->PreferredSampleRate();
+ });
+
+ int output_frequency = output_rate_calculator_->CalculateOutputRateFromRange(
+ rtc::ArrayView<const int>(helper_containers_->preferred_rates.data(),
+ number_of_streams));
+
+ frame_combiner_.Combine(GetAudioFromSources(output_frequency),
+ number_of_channels, output_frequency,
+ number_of_streams, audio_frame_for_mixing);
+}
+
+bool AudioMixerImpl::AddSource(Source* audio_source) {
+ RTC_DCHECK(audio_source);
+ MutexLock lock(&mutex_);
+ RTC_DCHECK(FindSourceInList(audio_source, &audio_source_list_) ==
+ audio_source_list_.end())
+ << "Source already added to mixer";
+ audio_source_list_.emplace_back(new SourceStatus(audio_source, false, 0));
+ helper_containers_->resize(audio_source_list_.size());
+ return true;
+}
+
+void AudioMixerImpl::RemoveSource(Source* audio_source) {
+ RTC_DCHECK(audio_source);
+ MutexLock lock(&mutex_);
+ const auto iter = FindSourceInList(audio_source, &audio_source_list_);
+ RTC_DCHECK(iter != audio_source_list_.end()) << "Source not present in mixer";
+ audio_source_list_.erase(iter);
+}
+
+rtc::ArrayView<AudioFrame* const> AudioMixerImpl::GetAudioFromSources(
+ int output_frequency) {
+ // Get audio from the audio sources and put it in the SourceFrame vector.
+ int audio_source_mixing_data_count = 0;
+ for (auto& source_and_status : audio_source_list_) {
+ const auto audio_frame_info =
+ source_and_status->audio_source->GetAudioFrameWithInfo(
+ output_frequency, &source_and_status->audio_frame);
+
+ if (audio_frame_info == Source::AudioFrameInfo::kError) {
+ RTC_LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source";
+ continue;
+ }
+ helper_containers_
+ ->audio_source_mixing_data_list[audio_source_mixing_data_count++] =
+ SourceFrame(source_and_status.get(), &source_and_status->audio_frame,
+ audio_frame_info == Source::AudioFrameInfo::kMuted);
+ }
+ rtc::ArrayView<SourceFrame> audio_source_mixing_data_view(
+ helper_containers_->audio_source_mixing_data_list.data(),
+ audio_source_mixing_data_count);
+
+ // Sort frames by sorting function.
+ std::sort(audio_source_mixing_data_view.begin(),
+ audio_source_mixing_data_view.end(), ShouldMixBefore);
+
+ int max_audio_frame_counter = max_sources_to_mix_;
+ int ramp_list_lengh = 0;
+ int audio_to_mix_count = 0;
+ // Go through list in order and put unmuted frames in result list.
+ for (const auto& p : audio_source_mixing_data_view) {
+ // Filter muted.
+ if (p.muted) {
+ p.source_status->is_mixed = false;
+ continue;
+ }
+
+ // Add frame to result vector for mixing.
+ bool is_mixed = false;
+ if (max_audio_frame_counter > 0) {
+ --max_audio_frame_counter;
+ helper_containers_->audio_to_mix[audio_to_mix_count++] = p.audio_frame;
+ helper_containers_->ramp_list[ramp_list_lengh++] =
+ SourceFrame(p.source_status, p.audio_frame, false, -1);
+ is_mixed = true;
+ }
+ p.source_status->is_mixed = is_mixed;
+ }
+ RampAndUpdateGain(rtc::ArrayView<SourceFrame>(
+ helper_containers_->ramp_list.data(), ramp_list_lengh));
+ return rtc::ArrayView<AudioFrame* const>(
+ helper_containers_->audio_to_mix.data(), audio_to_mix_count);
+}
+
+bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
+ AudioMixerImpl::Source* audio_source) const {
+ MutexLock lock(&mutex_);
+
+ const auto iter = FindSourceInList(audio_source, &audio_source_list_);
+ if (iter != audio_source_list_.end()) {
+ return (*iter)->is_mixed;
+ }
+
+ RTC_LOG(LS_ERROR) << "Audio source unknown";
+ return false;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h
new file mode 100644
index 0000000000..76b1131777
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
+#define MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/audio_frame.h"
+#include "api/audio/audio_mixer.h"
+#include "api/scoped_refptr.h"
+#include "modules/audio_mixer/frame_combiner.h"
+#include "modules/audio_mixer/output_rate_calculator.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class AudioMixerImpl : public AudioMixer {
+ public:
+ struct SourceStatus;
+
+ // AudioProcessing only accepts 10 ms frames.
+ static const int kFrameDurationInMs = 10;
+
+ static const int kDefaultNumberOfMixedAudioSources = 3;
+
+ static rtc::scoped_refptr<AudioMixerImpl> Create(
+ int max_sources_to_mix = kDefaultNumberOfMixedAudioSources);
+
+ static rtc::scoped_refptr<AudioMixerImpl> Create(
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix = kDefaultNumberOfMixedAudioSources);
+
+ ~AudioMixerImpl() override;
+
+ AudioMixerImpl(const AudioMixerImpl&) = delete;
+ AudioMixerImpl& operator=(const AudioMixerImpl&) = delete;
+
+ // AudioMixer functions
+ bool AddSource(Source* audio_source) override;
+ void RemoveSource(Source* audio_source) override;
+
+ void Mix(size_t number_of_channels,
+ AudioFrame* audio_frame_for_mixing) override
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Returns true if the source was mixed last round. Returns
+ // false and logs an error if the source was never added to the
+ // mixer.
+ bool GetAudioSourceMixabilityStatusForTest(Source* audio_source) const;
+
+ protected:
+ AudioMixerImpl(std::unique_ptr<OutputRateCalculator> output_rate_calculator,
+ bool use_limiter,
+ int max_sources_to_mix);
+
+ private:
+ struct HelperContainers;
+
+ // Compute what audio sources to mix from audio_source_list_. Ramp
+ // in and out. Update mixed status. Mixes up to
+ // kMaximumAmountOfMixedAudioSources audio sources.
+ rtc::ArrayView<AudioFrame* const> GetAudioFromSources(int output_frequency)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ // The critical section lock guards audio source insertion and
+ // removal, which can be done from any thread. The race checker
+ // checks that mixing is done sequentially.
+ mutable Mutex mutex_;
+
+ const int max_sources_to_mix_;
+
+ std::unique_ptr<OutputRateCalculator> output_rate_calculator_;
+
+ // List of all audio sources.
+ std::vector<std::unique_ptr<SourceStatus>> audio_source_list_
+ RTC_GUARDED_BY(mutex_);
+ const std::unique_ptr<HelperContainers> helper_containers_
+ RTC_GUARDED_BY(mutex_);
+
+ // Component that handles actual adding of audio frames.
+ FrameCombiner frame_combiner_;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_AUDIO_MIXER_IMPL_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build
new file mode 100644
index 0000000000..1e88321d53
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_gn/moz.build
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1"
+DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True
+DEFINES["RTC_ENABLE_VP9"] = True
+DEFINES["WEBRTC_APM_DEBUG_DUMP"] = "0"
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_LIBRARY_IMPL"] = True
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/third_party/libwebrtc/",
+ "/third_party/libwebrtc/third_party/abseil-cpp/",
+ "/tools/profiler/public"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl.cc",
+ "/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc",
+ "/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1"
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_GNU_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0"
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_UDEV"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_GLIB"] = "1"
+ DEFINES["USE_OZONE"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["_LARGEFILE64_SOURCE"] = True
+ DEFINES["_LARGEFILE_SOURCE"] = True
+ DEFINES["__STDC_CONSTANT_MACROS"] = True
+ DEFINES["__STDC_FORMAT_MACROS"] = True
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "2"
+ DEFINES["UNICODE"] = True
+ DEFINES["USE_AURA"] = "1"
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP"
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_HAS_NODISCARD"] = True
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "crypt32",
+ "iphlpapi",
+ "secur32",
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_DEBUG"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0"
+
+if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_X11"] = "1"
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support",
+ "unwind"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ OS_LIBS += [
+ "android_support"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "arm" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["WEBRTC_ENABLE_AVX2"] = True
+ DEFINES["_GNU_SOURCE"] = True
+
+Library("audio_mixer_impl_gn")
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc
new file mode 100644
index 0000000000..20b7d299f2
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_impl_unittest.cc
@@ -0,0 +1,794 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/audio_mixer_impl.h"
+
+#include <string.h>
+
+#include <cstdint>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/audio/audio_mixer.h"
+#include "api/rtp_packet_info.h"
+#include "api/rtp_packet_infos.h"
+#include "api/units/timestamp.h"
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Exactly;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::UnorderedElementsAre;
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDefaultSampleRateHz = 48000;
+
+// Utility function that resets the frame member variables with
+// sensible defaults.
+void ResetFrame(AudioFrame* frame) {
+ frame->sample_rate_hz_ = kDefaultSampleRateHz;
+ frame->num_channels_ = 1;
+
+ // Frame duration 10ms.
+ frame->samples_per_channel_ = kDefaultSampleRateHz / 100;
+ frame->vad_activity_ = AudioFrame::kVadActive;
+ frame->speech_type_ = AudioFrame::kNormalSpeech;
+}
+
+std::string ProduceDebugText(int sample_rate_hz,
+ int number_of_channels,
+ int number_of_sources) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz << " ";
+ ss << "Number of channels: " << number_of_channels << " ";
+ ss << "Number of sources: " << number_of_sources;
+ return ss.Release();
+}
+
+AudioFrame frame_for_mixing;
+
+} // namespace
+
+class MockMixerAudioSource : public ::testing::NiceMock<AudioMixer::Source> {
+ public:
+ MockMixerAudioSource()
+ : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) {
+ ON_CALL(*this, GetAudioFrameWithInfo(_, _))
+ .WillByDefault(
+ Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithInfo));
+ ON_CALL(*this, PreferredSampleRate())
+ .WillByDefault(Return(kDefaultSampleRateHz));
+ }
+
+ MOCK_METHOD(AudioFrameInfo,
+ GetAudioFrameWithInfo,
+ (int sample_rate_hz, AudioFrame* audio_frame),
+ (override));
+
+ MOCK_METHOD(int, PreferredSampleRate, (), (const, override));
+ MOCK_METHOD(int, Ssrc, (), (const, override));
+
+ AudioFrame* fake_frame() { return &fake_frame_; }
+ AudioFrameInfo fake_info() { return fake_audio_frame_info_; }
+ void set_fake_info(const AudioFrameInfo audio_frame_info) {
+ fake_audio_frame_info_ = audio_frame_info;
+ }
+
+ void set_packet_infos(const RtpPacketInfos& packet_infos) {
+ packet_infos_ = packet_infos;
+ }
+
+ private:
+ AudioFrameInfo FakeAudioFrameWithInfo(int sample_rate_hz,
+ AudioFrame* audio_frame) {
+ audio_frame->CopyFrom(fake_frame_);
+ audio_frame->sample_rate_hz_ = sample_rate_hz;
+ audio_frame->samples_per_channel_ =
+ rtc::CheckedDivExact(sample_rate_hz, 100);
+ audio_frame->packet_infos_ = packet_infos_;
+ return fake_info();
+ }
+
+ AudioFrame fake_frame_;
+ AudioFrameInfo fake_audio_frame_info_;
+ RtpPacketInfos packet_infos_;
+};
+
+class CustomRateCalculator : public OutputRateCalculator {
+ public:
+ explicit CustomRateCalculator(int rate) : rate_(rate) {}
+ int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_rates) override {
+ return rate_;
+ }
+
+ private:
+ const int rate_;
+};
+
+// Creates participants from `frames` and `frame_info` and adds them
+// to the mixer. Compares mixed status with `expected_status`
+void MixAndCompare(
+ const std::vector<AudioFrame>& frames,
+ const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info,
+ const std::vector<bool>& expected_status) {
+ const size_t num_audio_sources = frames.size();
+ RTC_DCHECK(frames.size() == frame_info.size());
+ RTC_DCHECK(frame_info.size() == expected_status.size());
+
+ const auto mixer = AudioMixerImpl::Create();
+ std::vector<MockMixerAudioSource> participants(num_audio_sources);
+
+ for (size_t i = 0; i < num_audio_sources; ++i) {
+ participants[i].fake_frame()->CopyFrom(frames[i]);
+ participants[i].set_fake_info(frame_info[i]);
+ }
+
+ for (size_t i = 0; i < num_audio_sources; ++i) {
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ for (size_t i = 0; i < num_audio_sources; ++i) {
+ EXPECT_EQ(expected_status[i],
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Mixed status of AudioSource #" << i << " wrong.";
+ }
+}
+
+void MixMonoAtGivenNativeRate(int native_sample_rate,
+ AudioFrame* mix_frame,
+ rtc::scoped_refptr<AudioMixer> mixer,
+ MockMixerAudioSource* audio_source) {
+ ON_CALL(*audio_source, PreferredSampleRate())
+ .WillByDefault(Return(native_sample_rate));
+ audio_source->fake_frame()->sample_rate_hz_ = native_sample_rate;
+ audio_source->fake_frame()->samples_per_channel_ = native_sample_rate / 100;
+
+ mixer->Mix(1, mix_frame);
+}
+
+TEST(AudioMixer, LargestEnergyVadActiveMixed) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 3;
+
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participants[kAudioSources];
+
+ for (int i = 0; i < kAudioSources; ++i) {
+ ResetFrame(participants[i].fake_frame());
+
+ // We set the 80-th sample value since the first 80 samples may be
+ // modified by a ramped-in window.
+ participants[i].fake_frame()->mutable_data()[80] = i;
+
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, _)).Times(Exactly(1));
+ }
+
+ // Last participant gives audio frame with passive VAD, although it has the
+ // largest energy.
+ participants[kAudioSources - 1].fake_frame()->vad_activity_ =
+ AudioFrame::kVadPassive;
+
+ AudioFrame audio_frame;
+ mixer->Mix(1, // number of channels
+ &audio_frame);
+
+ for (int i = 0; i < kAudioSources; ++i) {
+ bool is_mixed =
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]);
+ if (i == kAudioSources - 1 ||
+ i < kAudioSources - 1 -
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources) {
+ EXPECT_FALSE(is_mixed)
+ << "Mixing status of AudioSource #" << i << " wrong.";
+ } else {
+ EXPECT_TRUE(is_mixed)
+ << "Mixing status of AudioSource #" << i << " wrong.";
+ }
+ }
+}
+
+TEST(AudioMixer, FrameNotModifiedForSingleParticipant) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participant;
+
+ ResetFrame(participant.fake_frame());
+ const size_t n_samples = participant.fake_frame()->samples_per_channel_;
+
+ // Modify the frame so that it's not zero.
+ int16_t* fake_frame_data = participant.fake_frame()->mutable_data();
+ for (size_t j = 0; j < n_samples; ++j) {
+ fake_frame_data[j] = static_cast<int16_t>(j);
+ }
+
+ EXPECT_TRUE(mixer->AddSource(&participant));
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2));
+
+ AudioFrame audio_frame;
+ // Two mix iteration to compare after the ramp-up step.
+ for (int i = 0; i < 2; ++i) {
+ mixer->Mix(1, // number of channels
+ &audio_frame);
+ }
+
+ EXPECT_EQ(0, memcmp(participant.fake_frame()->data(), audio_frame.data(),
+ n_samples));
+}
+
+TEST(AudioMixer, SourceAtNativeRateShouldNeverResample) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource audio_source;
+ ResetFrame(audio_source.fake_frame());
+
+ mixer->AddSource(&audio_source);
+
+ for (auto frequency : {8000, 16000, 32000, 48000}) {
+ EXPECT_CALL(audio_source, GetAudioFrameWithInfo(frequency, _))
+ .Times(Exactly(1));
+
+ MixMonoAtGivenNativeRate(frequency, &frame_for_mixing, mixer,
+ &audio_source);
+ }
+}
+
+TEST(AudioMixer, MixerShouldMixAtNativeSourceRate) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource audio_source;
+ ResetFrame(audio_source.fake_frame());
+
+ mixer->AddSource(&audio_source);
+
+ for (auto frequency : {8000, 16000, 32000, 48000}) {
+ MixMonoAtGivenNativeRate(frequency, &frame_for_mixing, mixer,
+ &audio_source);
+
+ EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_);
+ }
+}
+
+TEST(AudioMixer, MixerShouldAlwaysMixAtNativeRate) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participant;
+ ResetFrame(participant.fake_frame());
+ mixer->AddSource(&participant);
+
+ const int needed_frequency = 44100;
+ ON_CALL(participant, PreferredSampleRate())
+ .WillByDefault(Return(needed_frequency));
+
+ // We expect mixing frequency to be native and >= needed_frequency.
+ const int expected_mix_frequency = 48000;
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(expected_mix_frequency, _))
+ .Times(Exactly(1));
+ participant.fake_frame()->sample_rate_hz_ = expected_mix_frequency;
+ participant.fake_frame()->samples_per_channel_ = expected_mix_frequency / 100;
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ EXPECT_EQ(48000, frame_for_mixing.sample_rate_hz_);
+}
+
+// Check that the mixing rate is always >= participants preferred rate.
+TEST(AudioMixer, ShouldNotCauseQualityLossForMultipleSources) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ std::vector<MockMixerAudioSource> audio_sources(2);
+ const std::vector<int> source_sample_rates = {8000, 16000};
+ for (int i = 0; i < 2; ++i) {
+ auto& source = audio_sources[i];
+ ResetFrame(source.fake_frame());
+ mixer->AddSource(&source);
+ const auto sample_rate = source_sample_rates[i];
+ EXPECT_CALL(source, PreferredSampleRate()).WillOnce(Return(sample_rate));
+
+ EXPECT_CALL(source, GetAudioFrameWithInfo(::testing::Ge(sample_rate), _));
+ }
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, ParticipantNumberOfChannels) {
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource participant;
+ ResetFrame(participant.fake_frame());
+
+ EXPECT_TRUE(mixer->AddSource(&participant));
+ for (size_t number_of_channels : {1, 2}) {
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ mixer->Mix(number_of_channels, &frame_for_mixing);
+ EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_);
+ }
+}
+
+// Maximal amount of participants are mixed one iteration, then
+// another participant with higher energy is added.
+TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ const auto mixer = AudioMixerImpl::Create();
+ MockMixerAudioSource participants[kAudioSources];
+
+ for (int i = 0; i < kAudioSources; ++i) {
+ ResetFrame(participants[i].fake_frame());
+ // Set the participant audio energy to increase with the index
+ // `i`.
+ participants[i].fake_frame()->mutable_data()[0] = 100 * i;
+ }
+
+ // Add all participants but the loudest for mixing.
+ for (int i = 0; i < kAudioSources - 1; ++i) {
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ // First mixer iteration
+ mixer->Mix(1, &frame_for_mixing);
+
+ // All participants but the loudest should have been mixed.
+ for (int i = 0; i < kAudioSources - 1; ++i) {
+ EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Mixed status of AudioSource #" << i << " wrong.";
+ }
+
+ // Add new participant with higher energy.
+ EXPECT_TRUE(mixer->AddSource(&participants[kAudioSources - 1]));
+ for (int i = 0; i < kAudioSources; ++i) {
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ // The most quiet participant should not have been mixed.
+ EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0]))
+ << "Mixed status of AudioSource #0 wrong.";
+
+ // The loudest participants should have been mixed.
+ for (int i = 1; i < kAudioSources; ++i) {
+ EXPECT_EQ(true,
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Mixed status of AudioSource #" << i << " wrong.";
+ }
+}
+
+// This test checks that the initialization and participant addition
+// can be done on a different thread.
+TEST(AudioMixer, ConstructFromOtherThread) {
+ TaskQueueForTest init_queue("init");
+ rtc::scoped_refptr<AudioMixer> mixer;
+ init_queue.SendTask([&mixer]() { mixer = AudioMixerImpl::Create(); });
+
+ MockMixerAudioSource participant;
+ EXPECT_CALL(participant, PreferredSampleRate())
+ .WillRepeatedly(Return(kDefaultSampleRateHz));
+
+ ResetFrame(participant.fake_frame());
+
+ TaskQueueForTest participant_queue("participant");
+ participant_queue.SendTask(
+ [&mixer, &participant]() { mixer->AddSource(&participant); });
+
+ EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+
+ // Do one mixer iteration
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, MutedShouldMixAfterUnmuted) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted;
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, PassiveShouldMixAfterNormal) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frames[0].vad_activity_ = AudioFrame::kVadPassive;
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, ActiveShouldMixBeforeLoud) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frames[0].vad_activity_ = AudioFrame::kVadPassive;
+ int16_t* frame_data = frames[0].mutable_data();
+ std::fill(frame_data, frame_data + kDefaultSampleRateHz / 100,
+ std::numeric_limits<int16_t>::max());
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, ShouldMixUpToSpecifiedNumberOfSourcesToMix) {
+ constexpr int kAudioSources = 5;
+ constexpr int kSourcesToMix = 2;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ // Set up to kSourceToMix sources with kVadActive so that they're mixed.
+ const std::vector<AudioFrame::VADActivity> kVadActivities = {
+ AudioFrame::kVadUnknown, AudioFrame::kVadPassive, AudioFrame::kVadPassive,
+ AudioFrame::kVadActive, AudioFrame::kVadActive};
+ // Populate VAD and frame for all sources.
+ for (int i = 0; i < kAudioSources; i++) {
+ frames[i].vad_activity_ = kVadActivities[i];
+ }
+
+ std::vector<MockMixerAudioSource> participants(kAudioSources);
+ for (int i = 0; i < kAudioSources; ++i) {
+ participants[i].fake_frame()->CopyFrom(frames[i]);
+ participants[i].set_fake_info(frame_info[i]);
+ }
+
+ const auto mixer = AudioMixerImpl::Create(kSourcesToMix);
+ for (int i = 0; i < kAudioSources; ++i) {
+ EXPECT_TRUE(mixer->AddSource(&participants[i]));
+ EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _))
+ .Times(Exactly(1));
+ }
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ std::vector<bool> expected_status = {false, false, false, true, true};
+ for (int i = 0; i < kAudioSources; ++i) {
+ EXPECT_EQ(expected_status[i],
+ mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]))
+ << "Wrong mix status for source #" << i << " is wrong";
+ }
+}
+
+TEST(AudioMixer, UnmutedShouldMixBeforeLoud) {
+ constexpr int kAudioSources =
+ AudioMixerImpl::kDefaultNumberOfMixedAudioSources + 1;
+
+ std::vector<AudioFrame> frames(kAudioSources);
+ for (auto& frame : frames) {
+ ResetFrame(&frame);
+ }
+
+ std::vector<AudioMixer::Source::AudioFrameInfo> frame_info(
+ kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal);
+ frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted;
+ int16_t* frame_data = frames[0].mutable_data();
+ std::fill(frame_data, frame_data + kDefaultSampleRateHz / 100,
+ std::numeric_limits<int16_t>::max());
+ std::vector<bool> expected_status(kAudioSources, true);
+ expected_status[0] = false;
+
+ MixAndCompare(frames, frame_info, expected_status);
+}
+
+TEST(AudioMixer, MixingRateShouldBeDecidedByRateCalculator) {
+ constexpr int kOutputRate = 22000;
+ const auto mixer =
+ AudioMixerImpl::Create(std::unique_ptr<OutputRateCalculator>(
+ new CustomRateCalculator(kOutputRate)),
+ true);
+ MockMixerAudioSource audio_source;
+ mixer->AddSource(&audio_source);
+ ResetFrame(audio_source.fake_frame());
+
+ EXPECT_CALL(audio_source, GetAudioFrameWithInfo(kOutputRate, _))
+ .Times(Exactly(1));
+
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, ZeroSourceRateShouldBeDecidedByRateCalculator) {
+ constexpr int kOutputRate = 8000;
+ const auto mixer =
+ AudioMixerImpl::Create(std::unique_ptr<OutputRateCalculator>(
+ new CustomRateCalculator(kOutputRate)),
+ true);
+
+ mixer->Mix(1, &frame_for_mixing);
+
+ EXPECT_EQ(kOutputRate, frame_for_mixing.sample_rate_hz_);
+}
+
+TEST(AudioMixer, NoLimiterBasicApiCalls) {
+ const auto mixer = AudioMixerImpl::Create(
+ std::unique_ptr<OutputRateCalculator>(new DefaultOutputRateCalculator()),
+ false);
+ mixer->Mix(1, &frame_for_mixing);
+}
+
+TEST(AudioMixer, AnyRateIsPossibleWithNoLimiter) {
+ // No APM limiter means no AudioProcessing::NativeRate restriction
+ // on mixing rate. The rate has to be divisible by 100 since we use
+ // 10 ms frames, though.
+ for (const auto rate : {8000, 20000, 24000, 32000, 44100}) {
+ for (const size_t number_of_channels : {1, 2}) {
+ for (const auto number_of_sources : {0, 1, 2, 3, 4}) {
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_sources, number_of_sources));
+ const auto mixer =
+ AudioMixerImpl::Create(std::unique_ptr<OutputRateCalculator>(
+ new CustomRateCalculator(rate)),
+ false);
+
+ std::vector<MockMixerAudioSource> sources(number_of_sources);
+ for (auto& source : sources) {
+ ResetFrame(source.fake_frame());
+ mixer->AddSource(&source);
+ }
+
+ mixer->Mix(number_of_channels, &frame_for_mixing);
+ EXPECT_EQ(rate, frame_for_mixing.sample_rate_hz_);
+ EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_);
+ }
+ }
+ }
+}
+
+TEST(AudioMixer, MultipleChannelsOneParticipant) {
+ // Set up a participant with a 6-channel frame, and make sure a 6-channel
+ // frame with the right sample values comes out from the mixer. There are 2
+ // Mix calls because of ramp-up.
+ constexpr size_t kNumberOfChannels = 6;
+ MockMixerAudioSource source;
+ ResetFrame(source.fake_frame());
+ const auto mixer = AudioMixerImpl::Create();
+ mixer->AddSource(&source);
+ mixer->Mix(1, &frame_for_mixing);
+ auto* frame = source.fake_frame();
+ frame->num_channels_ = kNumberOfChannels;
+ std::fill(frame->mutable_data(),
+ frame->mutable_data() + AudioFrame::kMaxDataSizeSamples, 0);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ frame->mutable_data()[100 * frame->num_channels_ + i] = 1000 * i;
+ }
+
+ mixer->Mix(kNumberOfChannels, &frame_for_mixing);
+
+ EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ EXPECT_EQ(frame_for_mixing.data()[100 * frame_for_mixing.num_channels_ + i],
+ static_cast<int16_t>(1000 * i));
+ }
+}
+
+TEST(AudioMixer, MultipleChannelsManyParticipants) {
+ // Sets up 2 participants. One has a 6-channel frame. Make sure a 6-channel
+ // frame with the right sample values comes out from the mixer. There are 2
+ // Mix calls because of ramp-up.
+ constexpr size_t kNumberOfChannels = 6;
+ MockMixerAudioSource source;
+ const auto mixer = AudioMixerImpl::Create();
+ mixer->AddSource(&source);
+ ResetFrame(source.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+ auto* frame = source.fake_frame();
+ frame->num_channels_ = kNumberOfChannels;
+ std::fill(frame->mutable_data(),
+ frame->mutable_data() + AudioFrame::kMaxDataSizeSamples, 0);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ frame->mutable_data()[100 * frame->num_channels_ + i] = 1000 * i;
+ }
+ MockMixerAudioSource other_source;
+ ResetFrame(other_source.fake_frame());
+ mixer->AddSource(&other_source);
+
+ mixer->Mix(kNumberOfChannels, &frame_for_mixing);
+
+ EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels);
+ for (size_t i = 0; i < kNumberOfChannels; ++i) {
+ EXPECT_EQ(frame_for_mixing.data()[100 * frame_for_mixing.num_channels_ + i],
+ static_cast<int16_t>(1000 * i));
+ }
+}
+
+TEST(AudioMixer, ShouldIncludeRtpPacketInfoFromAllMixedSources) {
+ const uint32_t kSsrc0 = 10;
+ const uint32_t kSsrc1 = 11;
+ const uint32_t kSsrc2 = 12;
+ const uint32_t kCsrc0 = 20;
+ const uint32_t kCsrc1 = 21;
+ const uint32_t kCsrc2 = 22;
+ const uint32_t kCsrc3 = 23;
+ const int kAudioLevel0 = 10;
+ const int kAudioLevel1 = 40;
+ const absl::optional<uint32_t> kAudioLevel2 = absl::nullopt;
+ const uint32_t kRtpTimestamp0 = 300;
+ const uint32_t kRtpTimestamp1 = 400;
+ const Timestamp kReceiveTime0 = Timestamp::Millis(10);
+ const Timestamp kReceiveTime1 = Timestamp::Millis(20);
+
+ const RtpPacketInfo kPacketInfo0(kSsrc0, {kCsrc0, kCsrc1}, kRtpTimestamp0,
+ kAudioLevel0, absl::nullopt, kReceiveTime0);
+ const RtpPacketInfo kPacketInfo1(kSsrc1, {kCsrc2}, kRtpTimestamp1,
+ kAudioLevel1, absl::nullopt, kReceiveTime1);
+ const RtpPacketInfo kPacketInfo2(kSsrc2, {kCsrc3}, kRtpTimestamp1,
+ kAudioLevel2, absl::nullopt, kReceiveTime1);
+
+ const auto mixer = AudioMixerImpl::Create();
+
+ MockMixerAudioSource source;
+ source.set_packet_infos(RtpPacketInfos({kPacketInfo0}));
+ mixer->AddSource(&source);
+ ResetFrame(source.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+
+ MockMixerAudioSource other_source;
+ other_source.set_packet_infos(RtpPacketInfos({kPacketInfo1, kPacketInfo2}));
+ ResetFrame(other_source.fake_frame());
+ mixer->AddSource(&other_source);
+
+ mixer->Mix(/*number_of_channels=*/1, &frame_for_mixing);
+
+ EXPECT_THAT(frame_for_mixing.packet_infos_,
+ UnorderedElementsAre(kPacketInfo0, kPacketInfo1, kPacketInfo2));
+}
+
+TEST(AudioMixer, MixerShouldIncludeRtpPacketInfoFromMixedSourcesOnly) {
+ const uint32_t kSsrc0 = 10;
+ const uint32_t kSsrc1 = 11;
+ const uint32_t kSsrc2 = 21;
+ const uint32_t kCsrc0 = 30;
+ const uint32_t kCsrc1 = 31;
+ const uint32_t kCsrc2 = 32;
+ const uint32_t kCsrc3 = 33;
+ const int kAudioLevel0 = 10;
+ const absl::optional<uint32_t> kAudioLevelMissing = absl::nullopt;
+ const uint32_t kRtpTimestamp0 = 300;
+ const uint32_t kRtpTimestamp1 = 400;
+ const Timestamp kReceiveTime0 = Timestamp::Millis(10);
+ const Timestamp kReceiveTime1 = Timestamp::Millis(20);
+
+ const RtpPacketInfo kPacketInfo0(kSsrc0, {kCsrc0, kCsrc1}, kRtpTimestamp0,
+ kAudioLevel0, absl::nullopt, kReceiveTime0);
+ const RtpPacketInfo kPacketInfo1(kSsrc1, {kCsrc2}, kRtpTimestamp1,
+ kAudioLevelMissing, absl::nullopt,
+ kReceiveTime1);
+ const RtpPacketInfo kPacketInfo2(kSsrc2, {kCsrc3}, kRtpTimestamp1,
+ kAudioLevelMissing, absl::nullopt,
+ kReceiveTime1);
+
+ const auto mixer = AudioMixerImpl::Create(/*max_sources_to_mix=*/2);
+
+ MockMixerAudioSource source1;
+ source1.set_packet_infos(RtpPacketInfos({kPacketInfo0}));
+ mixer->AddSource(&source1);
+ ResetFrame(source1.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+
+ MockMixerAudioSource source2;
+ source2.set_packet_infos(RtpPacketInfos({kPacketInfo1}));
+ ResetFrame(source2.fake_frame());
+ mixer->AddSource(&source2);
+
+ // The mixer prioritizes kVadActive over kVadPassive.
+ // We limit the number of sources to mix to 2 and set the third source's VAD
+ // activity to kVadPassive so that it will not be added to the mix.
+ MockMixerAudioSource source3;
+ source3.set_packet_infos(RtpPacketInfos({kPacketInfo2}));
+ ResetFrame(source3.fake_frame());
+ source3.fake_frame()->vad_activity_ = AudioFrame::kVadPassive;
+ mixer->AddSource(&source3);
+
+ mixer->Mix(/*number_of_channels=*/1, &frame_for_mixing);
+
+ EXPECT_THAT(frame_for_mixing.packet_infos_,
+ UnorderedElementsAre(kPacketInfo0, kPacketInfo1));
+}
+
+class HighOutputRateCalculator : public OutputRateCalculator {
+ public:
+ static const int kDefaultFrequency = 76000;
+ int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) override {
+ return kDefaultFrequency;
+ }
+ ~HighOutputRateCalculator() override {}
+};
+const int HighOutputRateCalculator::kDefaultFrequency;
+
+TEST(AudioMixerDeathTest, MultipleChannelsAndHighRate) {
+ constexpr size_t kSamplesPerChannel =
+ HighOutputRateCalculator::kDefaultFrequency / 100;
+ // As many channels as an AudioFrame can fit:
+ constexpr size_t kNumberOfChannels =
+ AudioFrame::kMaxDataSizeSamples / kSamplesPerChannel;
+ MockMixerAudioSource source;
+ const auto mixer = AudioMixerImpl::Create(
+ std::make_unique<HighOutputRateCalculator>(), true);
+ mixer->AddSource(&source);
+ ResetFrame(source.fake_frame());
+ mixer->Mix(1, &frame_for_mixing);
+ auto* frame = source.fake_frame();
+ frame->num_channels_ = kNumberOfChannels;
+ frame->sample_rate_hz_ = HighOutputRateCalculator::kDefaultFrequency;
+ frame->samples_per_channel_ = kSamplesPerChannel;
+
+ std::fill(frame->mutable_data(),
+ frame->mutable_data() + AudioFrame::kMaxDataSizeSamples, 0);
+ MockMixerAudioSource other_source;
+ ResetFrame(other_source.fake_frame());
+ auto* other_frame = other_source.fake_frame();
+ other_frame->num_channels_ = kNumberOfChannels;
+ other_frame->sample_rate_hz_ = HighOutputRateCalculator::kDefaultFrequency;
+ other_frame->samples_per_channel_ = kSamplesPerChannel;
+ mixer->AddSource(&other_source);
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+ EXPECT_DEATH(mixer->Mix(kNumberOfChannels, &frame_for_mixing), "");
+#elif !RTC_DCHECK_IS_ON
+ mixer->Mix(kNumberOfChannels, &frame_for_mixing);
+ EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels);
+ EXPECT_EQ(frame_for_mixing.sample_rate_hz_,
+ HighOutputRateCalculator::kDefaultFrequency);
+#endif
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc
new file mode 100644
index 0000000000..3ee28a7937
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/audio_mixer_test.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio/audio_mixer.h"
+
+#include <cstring>
+#include <iostream>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+#include "rtc_base/strings/string_builder.h"
+
+ABSL_FLAG(int,
+ sampling_rate,
+ 16000,
+ "Rate at which to mix (all input streams must have this rate)");
+
+ABSL_FLAG(bool,
+ stereo,
+ false,
+ "Enable stereo (interleaved). Inputs need not be as this parameter.");
+
+ABSL_FLAG(bool, limiter, true, "Enable limiter.");
+ABSL_FLAG(std::string,
+ output_file,
+ "mixed_file.wav",
+ "File in which to store the mixed result.");
+ABSL_FLAG(std::string, input_file_1, "", "First input. Default none.");
+ABSL_FLAG(std::string, input_file_2, "", "Second input. Default none.");
+ABSL_FLAG(std::string, input_file_3, "", "Third input. Default none.");
+ABSL_FLAG(std::string, input_file_4, "", "Fourth input. Default none.");
+
+namespace webrtc {
+namespace test {
+
+class FilePlayingSource : public AudioMixer::Source {
+ public:
+ explicit FilePlayingSource(absl::string_view filename)
+ : wav_reader_(new WavReader(filename)),
+ sample_rate_hz_(wav_reader_->sample_rate()),
+ samples_per_channel_(sample_rate_hz_ / 100),
+ number_of_channels_(wav_reader_->num_channels()) {}
+
+ AudioFrameInfo GetAudioFrameWithInfo(int target_rate_hz,
+ AudioFrame* frame) override {
+ frame->samples_per_channel_ = samples_per_channel_;
+ frame->num_channels_ = number_of_channels_;
+ frame->sample_rate_hz_ = target_rate_hz;
+
+ RTC_CHECK_EQ(target_rate_hz, sample_rate_hz_);
+
+ const size_t num_to_read = number_of_channels_ * samples_per_channel_;
+ const size_t num_read =
+ wav_reader_->ReadSamples(num_to_read, frame->mutable_data());
+
+ file_has_ended_ = num_to_read != num_read;
+ if (file_has_ended_) {
+ frame->Mute();
+ }
+ return file_has_ended_ ? AudioFrameInfo::kMuted : AudioFrameInfo::kNormal;
+ }
+
+ int Ssrc() const override { return 0; }
+
+ int PreferredSampleRate() const override { return sample_rate_hz_; }
+
+ bool FileHasEnded() const { return file_has_ended_; }
+
+ std::string ToString() const {
+ rtc::StringBuilder ss;
+ ss << "{rate: " << sample_rate_hz_ << ", channels: " << number_of_channels_
+ << ", samples_tot: " << wav_reader_->num_samples() << "}";
+ return ss.Release();
+ }
+
+ private:
+ std::unique_ptr<WavReader> wav_reader_;
+ int sample_rate_hz_;
+ int samples_per_channel_;
+ int number_of_channels_;
+ bool file_has_ended_ = false;
+};
+} // namespace test
+} // namespace webrtc
+
+namespace {
+
+const std::vector<std::string> parse_input_files() {
+ std::vector<std::string> result;
+ for (auto& x :
+ {absl::GetFlag(FLAGS_input_file_1), absl::GetFlag(FLAGS_input_file_2),
+ absl::GetFlag(FLAGS_input_file_3), absl::GetFlag(FLAGS_input_file_4)}) {
+ if (!x.empty()) {
+ result.push_back(x);
+ }
+ }
+ return result;
+}
+} // namespace
+
+int main(int argc, char* argv[]) {
+ absl::ParseCommandLine(argc, argv);
+
+ rtc::scoped_refptr<webrtc::AudioMixerImpl> mixer(
+ webrtc::AudioMixerImpl::Create(
+ std::unique_ptr<webrtc::OutputRateCalculator>(
+ new webrtc::DefaultOutputRateCalculator()),
+ absl::GetFlag(FLAGS_limiter)));
+
+ const std::vector<std::string> input_files = parse_input_files();
+ std::vector<webrtc::test::FilePlayingSource> sources;
+ const int num_channels = absl::GetFlag(FLAGS_stereo) ? 2 : 1;
+ sources.reserve(input_files.size());
+ for (const auto& input_file : input_files) {
+ sources.emplace_back(input_file);
+ }
+
+ for (auto& source : sources) {
+ auto error = mixer->AddSource(&source);
+ RTC_CHECK(error);
+ }
+
+ if (sources.empty()) {
+ std::cout << "Need at least one source!\n";
+ return 1;
+ }
+
+ const size_t sample_rate = sources[0].PreferredSampleRate();
+ for (const auto& source : sources) {
+ RTC_CHECK_EQ(sample_rate, source.PreferredSampleRate());
+ }
+
+ // Print stats.
+ std::cout << "Limiting is: " << (absl::GetFlag(FLAGS_limiter) ? "on" : "off")
+ << "\n"
+ "Channels: "
+ << num_channels
+ << "\n"
+ "Rate: "
+ << sample_rate
+ << "\n"
+ "Number of input streams: "
+ << input_files.size() << "\n";
+ for (const auto& source : sources) {
+ std::cout << "\t" << source.ToString() << "\n";
+ }
+ std::cout << "Now mixing\n...\n";
+
+ webrtc::WavWriter wav_writer(absl::GetFlag(FLAGS_output_file), sample_rate,
+ num_channels);
+
+ webrtc::AudioFrame frame;
+
+ bool all_streams_finished = false;
+ while (!all_streams_finished) {
+ mixer->Mix(num_channels, &frame);
+ RTC_CHECK_EQ(sample_rate / 100, frame.samples_per_channel_);
+ RTC_CHECK_EQ(sample_rate, frame.sample_rate_hz_);
+ RTC_CHECK_EQ(num_channels, frame.num_channels_);
+ wav_writer.WriteSamples(frame.data(),
+ num_channels * frame.samples_per_channel_);
+
+ all_streams_finished =
+ std::all_of(sources.begin(), sources.end(),
+ [](const webrtc::test::FilePlayingSource& source) {
+ return source.FileHasEnded();
+ });
+ }
+
+ std::cout << "Done!\n" << std::endl;
+}
diff --git a/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc
new file mode 100644
index 0000000000..5f24b653a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/default_output_rate_calculator.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+int DefaultOutputRateCalculator::CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) {
+ if (preferred_sample_rates.empty()) {
+ return DefaultOutputRateCalculator::kDefaultFrequency;
+ }
+ using NativeRate = AudioProcessing::NativeRate;
+ const int maximal_frequency = *std::max_element(
+ preferred_sample_rates.cbegin(), preferred_sample_rates.cend());
+
+ RTC_DCHECK_LE(NativeRate::kSampleRate8kHz, maximal_frequency);
+ RTC_DCHECK_GE(NativeRate::kSampleRate48kHz, maximal_frequency);
+
+ static constexpr NativeRate native_rates[] = {
+ NativeRate::kSampleRate8kHz, NativeRate::kSampleRate16kHz,
+ NativeRate::kSampleRate32kHz, NativeRate::kSampleRate48kHz};
+ const auto* rounded_up_index = std::lower_bound(
+ std::begin(native_rates), std::end(native_rates), maximal_frequency);
+ RTC_DCHECK(rounded_up_index != std::end(native_rates));
+ return *rounded_up_index;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h
new file mode 100644
index 0000000000..02a3b5c37b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/default_output_rate_calculator.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
+#define MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_mixer/output_rate_calculator.h"
+
+namespace webrtc {
+
+class DefaultOutputRateCalculator : public OutputRateCalculator {
+ public:
+ static const int kDefaultFrequency = 48000;
+
+ // Produces the least native rate greater or equal to the preferred
+ // sample rates. A native rate is one in
+ // AudioProcessing::NativeRate. If `preferred_sample_rates` is
+ // empty, returns `kDefaultFrequency`.
+ int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) override;
+ ~DefaultOutputRateCalculator() override {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_DEFAULT_OUTPUT_RATE_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc
new file mode 100644
index 0000000000..e31eea595f
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/frame_combiner.h"
+
+#include <algorithm>
+#include <array>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/rtp_packet_infos.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_mixer/audio_frame_manipulator.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+using MixingBuffer =
+ std::array<std::array<float, FrameCombiner::kMaximumChannelSize>,
+ FrameCombiner::kMaximumNumberOfChannels>;
+
+void SetAudioFrameFields(rtc::ArrayView<const AudioFrame* const> mix_list,
+ size_t number_of_channels,
+ int sample_rate,
+ size_t number_of_streams,
+ AudioFrame* audio_frame_for_mixing) {
+ const size_t samples_per_channel = static_cast<size_t>(
+ (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
+
+ // TODO(minyue): Issue bugs.webrtc.org/3390.
+ // Audio frame timestamp. The 'timestamp_' field is set to dummy
+ // value '0', because it is only supported in the one channel case and
+ // is then updated in the helper functions.
+ audio_frame_for_mixing->UpdateFrame(
+ 0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
+ AudioFrame::kVadUnknown, number_of_channels);
+
+ if (mix_list.empty()) {
+ audio_frame_for_mixing->elapsed_time_ms_ = -1;
+ } else {
+ audio_frame_for_mixing->timestamp_ = mix_list[0]->timestamp_;
+ audio_frame_for_mixing->elapsed_time_ms_ = mix_list[0]->elapsed_time_ms_;
+ audio_frame_for_mixing->ntp_time_ms_ = mix_list[0]->ntp_time_ms_;
+ std::vector<RtpPacketInfo> packet_infos;
+ for (const auto& frame : mix_list) {
+ audio_frame_for_mixing->timestamp_ =
+ std::min(audio_frame_for_mixing->timestamp_, frame->timestamp_);
+ audio_frame_for_mixing->ntp_time_ms_ =
+ std::min(audio_frame_for_mixing->ntp_time_ms_, frame->ntp_time_ms_);
+ audio_frame_for_mixing->elapsed_time_ms_ = std::max(
+ audio_frame_for_mixing->elapsed_time_ms_, frame->elapsed_time_ms_);
+ packet_infos.insert(packet_infos.end(), frame->packet_infos_.begin(),
+ frame->packet_infos_.end());
+ }
+ audio_frame_for_mixing->packet_infos_ =
+ RtpPacketInfos(std::move(packet_infos));
+ }
+}
+
+void MixFewFramesWithNoLimiter(rtc::ArrayView<const AudioFrame* const> mix_list,
+ AudioFrame* audio_frame_for_mixing) {
+ if (mix_list.empty()) {
+ audio_frame_for_mixing->Mute();
+ return;
+ }
+ RTC_DCHECK_LE(mix_list.size(), 1);
+ std::copy(mix_list[0]->data(),
+ mix_list[0]->data() +
+ mix_list[0]->num_channels_ * mix_list[0]->samples_per_channel_,
+ audio_frame_for_mixing->mutable_data());
+}
+
+void MixToFloatFrame(rtc::ArrayView<const AudioFrame* const> mix_list,
+ size_t samples_per_channel,
+ size_t number_of_channels,
+ MixingBuffer* mixing_buffer) {
+ RTC_DCHECK_LE(samples_per_channel, FrameCombiner::kMaximumChannelSize);
+ RTC_DCHECK_LE(number_of_channels, FrameCombiner::kMaximumNumberOfChannels);
+ // Clear the mixing buffer.
+ for (auto& one_channel_buffer : *mixing_buffer) {
+ std::fill(one_channel_buffer.begin(), one_channel_buffer.end(), 0.f);
+ }
+
+ // Convert to FloatS16 and mix.
+ for (size_t i = 0; i < mix_list.size(); ++i) {
+ const AudioFrame* const frame = mix_list[i];
+ const int16_t* const frame_data = frame->data();
+ for (size_t j = 0; j < std::min(number_of_channels,
+ FrameCombiner::kMaximumNumberOfChannels);
+ ++j) {
+ for (size_t k = 0; k < std::min(samples_per_channel,
+ FrameCombiner::kMaximumChannelSize);
+ ++k) {
+ (*mixing_buffer)[j][k] += frame_data[number_of_channels * k + j];
+ }
+ }
+ }
+}
+
+void RunLimiter(AudioFrameView<float> mixing_buffer_view, Limiter* limiter) {
+ const size_t sample_rate = mixing_buffer_view.samples_per_channel() * 1000 /
+ AudioMixerImpl::kFrameDurationInMs;
+ // TODO(alessiob): Avoid calling SetSampleRate every time.
+ limiter->SetSampleRate(sample_rate);
+ limiter->Process(mixing_buffer_view);
+}
+
+// Both interleaves and rounds.
+void InterleaveToAudioFrame(AudioFrameView<const float> mixing_buffer_view,
+ AudioFrame* audio_frame_for_mixing) {
+ const size_t number_of_channels = mixing_buffer_view.num_channels();
+ const size_t samples_per_channel = mixing_buffer_view.samples_per_channel();
+ int16_t* const mixing_data = audio_frame_for_mixing->mutable_data();
+ // Put data in the result frame.
+ for (size_t i = 0; i < number_of_channels; ++i) {
+ for (size_t j = 0; j < samples_per_channel; ++j) {
+ mixing_data[number_of_channels * j + i] =
+ FloatS16ToS16(mixing_buffer_view.channel(i)[j]);
+ }
+ }
+}
+} // namespace
+
+constexpr size_t FrameCombiner::kMaximumNumberOfChannels;
+constexpr size_t FrameCombiner::kMaximumChannelSize;
+
+FrameCombiner::FrameCombiner(bool use_limiter)
+ : data_dumper_(new ApmDataDumper(0)),
+ mixing_buffer_(
+ std::make_unique<std::array<std::array<float, kMaximumChannelSize>,
+ kMaximumNumberOfChannels>>()),
+ limiter_(static_cast<size_t>(48000), data_dumper_.get(), "AudioMixer"),
+ use_limiter_(use_limiter) {
+ static_assert(kMaximumChannelSize * kMaximumNumberOfChannels <=
+ AudioFrame::kMaxDataSizeSamples,
+ "");
+}
+
+FrameCombiner::~FrameCombiner() = default;
+
+void FrameCombiner::Combine(rtc::ArrayView<AudioFrame* const> mix_list,
+ size_t number_of_channels,
+ int sample_rate,
+ size_t number_of_streams,
+ AudioFrame* audio_frame_for_mixing) {
+ RTC_DCHECK(audio_frame_for_mixing);
+
+ LogMixingStats(mix_list, sample_rate, number_of_streams);
+
+ SetAudioFrameFields(mix_list, number_of_channels, sample_rate,
+ number_of_streams, audio_frame_for_mixing);
+
+ const size_t samples_per_channel = static_cast<size_t>(
+ (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
+
+ for (const auto* frame : mix_list) {
+ RTC_DCHECK_EQ(samples_per_channel, frame->samples_per_channel_);
+ RTC_DCHECK_EQ(sample_rate, frame->sample_rate_hz_);
+ }
+
+ // The 'num_channels_' field of frames in 'mix_list' could be
+ // different from 'number_of_channels'.
+ for (auto* frame : mix_list) {
+ RemixFrame(number_of_channels, frame);
+ }
+
+ if (number_of_streams <= 1) {
+ MixFewFramesWithNoLimiter(mix_list, audio_frame_for_mixing);
+ return;
+ }
+
+ MixToFloatFrame(mix_list, samples_per_channel, number_of_channels,
+ mixing_buffer_.get());
+
+ const size_t output_number_of_channels =
+ std::min(number_of_channels, kMaximumNumberOfChannels);
+ const size_t output_samples_per_channel =
+ std::min(samples_per_channel, kMaximumChannelSize);
+
+ // Put float data in an AudioFrameView.
+ std::array<float*, kMaximumNumberOfChannels> channel_pointers{};
+ for (size_t i = 0; i < output_number_of_channels; ++i) {
+ channel_pointers[i] = &(*mixing_buffer_.get())[i][0];
+ }
+ AudioFrameView<float> mixing_buffer_view(&channel_pointers[0],
+ output_number_of_channels,
+ output_samples_per_channel);
+
+ if (use_limiter_) {
+ RunLimiter(mixing_buffer_view, &limiter_);
+ }
+
+ InterleaveToAudioFrame(mixing_buffer_view, audio_frame_for_mixing);
+}
+
+void FrameCombiner::LogMixingStats(
+ rtc::ArrayView<const AudioFrame* const> mix_list,
+ int sample_rate,
+ size_t number_of_streams) const {
+ // Log every second.
+ uma_logging_counter_++;
+ if (uma_logging_counter_ > 1000 / AudioMixerImpl::kFrameDurationInMs) {
+ uma_logging_counter_ = 0;
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Audio.AudioMixer.NumIncomingStreams",
+ static_cast<int>(number_of_streams));
+ RTC_HISTOGRAM_COUNTS_LINEAR(
+ "WebRTC.Audio.AudioMixer.NumIncomingActiveStreams2",
+ rtc::dchecked_cast<int>(mix_list.size()), /*min=*/1, /*max=*/16,
+ /*bucket_count=*/16);
+
+ using NativeRate = AudioProcessing::NativeRate;
+ static constexpr NativeRate native_rates[] = {
+ NativeRate::kSampleRate8kHz, NativeRate::kSampleRate16kHz,
+ NativeRate::kSampleRate32kHz, NativeRate::kSampleRate48kHz};
+ const auto* rate_position = std::lower_bound(
+ std::begin(native_rates), std::end(native_rates), sample_rate);
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.AudioMixer.MixingRate",
+ std::distance(std::begin(native_rates), rate_position),
+ arraysize(native_rates));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/frame_combiner.h b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.h
new file mode 100644
index 0000000000..9ddf81e41e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/frame_combiner.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
+#define MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/audio_frame.h"
+#include "modules/audio_processing/agc2/limiter.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+class FrameCombiner {
+ public:
+ enum class LimiterType { kNoLimiter, kApmAgcLimiter, kApmAgc2Limiter };
+ explicit FrameCombiner(bool use_limiter);
+ ~FrameCombiner();
+
+ // Combine several frames into one. Assumes sample_rate,
+ // samples_per_channel of the input frames match the parameters. The
+ // parameters 'number_of_channels' and 'sample_rate' are needed
+ // because 'mix_list' can be empty. The parameter
+ // 'number_of_streams' is used for determining whether to pass the
+ // data through a limiter.
+ void Combine(rtc::ArrayView<AudioFrame* const> mix_list,
+ size_t number_of_channels,
+ int sample_rate,
+ size_t number_of_streams,
+ AudioFrame* audio_frame_for_mixing);
+
+ // Stereo, 48 kHz, 10 ms.
+ static constexpr size_t kMaximumNumberOfChannels = 8;
+ static constexpr size_t kMaximumChannelSize = 48 * 10;
+
+ using MixingBuffer = std::array<std::array<float, kMaximumChannelSize>,
+ kMaximumNumberOfChannels>;
+
+ private:
+ void LogMixingStats(rtc::ArrayView<const AudioFrame* const> mix_list,
+ int sample_rate,
+ size_t number_of_streams) const;
+
+ std::unique_ptr<ApmDataDumper> data_dumper_;
+ std::unique_ptr<MixingBuffer> mixing_buffer_;
+ Limiter limiter_;
+ const bool use_limiter_;
+ mutable int uma_logging_counter_ = 0;
+};
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_FRAME_COMBINER_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc b/third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc
new file mode 100644
index 0000000000..fa1fef325c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/frame_combiner_unittest.cc
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/frame_combiner.h"
+
+#include <cstdint>
+#include <initializer_list>
+#include <numeric>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/rtp_packet_infos.h"
+#include "audio/utility/audio_frame_operations.h"
+#include "modules/audio_mixer/gain_change_calculator.h"
+#include "modules/audio_mixer/sine_wave_generator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAreArray;
+
+using LimiterType = FrameCombiner::LimiterType;
+
+struct FrameCombinerConfig {
+ bool use_limiter;
+ int sample_rate_hz;
+ int number_of_channels;
+ float wave_frequency;
+};
+
+std::string ProduceDebugText(int sample_rate_hz,
+ int number_of_channels,
+ int number_of_sources) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << sample_rate_hz << " ,";
+ ss << "number of channels: " << number_of_channels << " ,";
+ ss << "number of sources: " << number_of_sources;
+ return ss.Release();
+}
+
+std::string ProduceDebugText(const FrameCombinerConfig& config) {
+ rtc::StringBuilder ss;
+ ss << "Sample rate: " << config.sample_rate_hz << " ,";
+ ss << "number of channels: " << config.number_of_channels << " ,";
+ ss << "limiter active: " << (config.use_limiter ? "on" : "off") << " ,";
+ ss << "wave frequency: " << config.wave_frequency << " ,";
+ return ss.Release();
+}
+
+AudioFrame frame1;
+AudioFrame frame2;
+
+void SetUpFrames(int sample_rate_hz, int number_of_channels) {
+ RtpPacketInfo packet_info1(
+ /*ssrc=*/1001, /*csrcs=*/{}, /*rtp_timestamp=*/1000,
+ /*audio_level=*/absl::nullopt, /*absolute_capture_time=*/absl::nullopt,
+ /*receive_time_ms=*/1);
+ RtpPacketInfo packet_info2(
+ /*ssrc=*/4004, /*csrcs=*/{}, /*rtp_timestamp=*/1234,
+ /*audio_level=*/absl::nullopt, /*absolute_capture_time=*/absl::nullopt,
+ /*receive_time_ms=*/2);
+ RtpPacketInfo packet_info3(
+ /*ssrc=*/7007, /*csrcs=*/{}, /*rtp_timestamp=*/1333,
+ /*audio_level=*/absl::nullopt, /*absolute_capture_time=*/absl::nullopt,
+ /*receive_time_ms=*/2);
+
+ frame1.packet_infos_ = RtpPacketInfos({packet_info1});
+ frame2.packet_infos_ = RtpPacketInfos({packet_info2, packet_info3});
+
+ for (auto* frame : {&frame1, &frame2}) {
+ frame->UpdateFrame(0, nullptr, rtc::CheckedDivExact(sample_rate_hz, 100),
+ sample_rate_hz, AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive, number_of_channels);
+ }
+}
+} // namespace
+
+// The limiter requires sample rate divisible by 2000.
+TEST(FrameCombiner, BasicApiCallsLimiter) {
+ FrameCombiner combiner(true);
+ for (const int rate : {8000, 18000, 34000, 48000}) {
+ for (const int number_of_channels : {1, 2, 4, 8}) {
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ for (const int number_of_frames : {0, 1, 2}) {
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ }
+ }
+ }
+}
+
+// The RtpPacketInfos field of the mixed packet should contain the union of the
+// RtpPacketInfos from the frames that were actually mixed.
+TEST(FrameCombiner, ContainsAllRtpPacketInfos) {
+ static constexpr int kSampleRateHz = 48000;
+ static constexpr int kNumChannels = 1;
+ FrameCombiner combiner(true);
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(kSampleRateHz, kNumChannels);
+
+ for (const int number_of_frames : {0, 1, 2}) {
+ SCOPED_TRACE(
+ ProduceDebugText(kSampleRateHz, kNumChannels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+
+ std::vector<RtpPacketInfo> packet_infos;
+ for (const auto& frame : frames_to_combine) {
+ packet_infos.insert(packet_infos.end(), frame->packet_infos_.begin(),
+ frame->packet_infos_.end());
+ }
+
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, kNumChannels, kSampleRateHz,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ EXPECT_THAT(audio_frame_for_mixing.packet_infos_,
+ UnorderedElementsAreArray(packet_infos));
+ }
+}
+
+// There are DCHECKs in place to check for invalid parameters.
+TEST(FrameCombinerDeathTest, DebugBuildCrashesWithManyChannels) {
+ FrameCombiner combiner(true);
+ for (const int rate : {8000, 18000, 34000, 48000}) {
+ for (const int number_of_channels : {10, 20, 21}) {
+ if (static_cast<size_t>(rate / 100 * number_of_channels) >
+ AudioFrame::kMaxDataSizeSamples) {
+ continue;
+ }
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ const int number_of_frames = 2;
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+ EXPECT_DEATH(
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing),
+ "");
+#elif !RTC_DCHECK_IS_ON
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+#endif
+ }
+ }
+}
+
+TEST(FrameCombinerDeathTest, DebugBuildCrashesWithHighRate) {
+ FrameCombiner combiner(true);
+ for (const int rate : {50000, 96000, 128000, 196000}) {
+ for (const int number_of_channels : {1, 2, 3}) {
+ if (static_cast<size_t>(rate / 100 * number_of_channels) >
+ AudioFrame::kMaxDataSizeSamples) {
+ continue;
+ }
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ const int number_of_frames = 2;
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+ EXPECT_DEATH(
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing),
+ "");
+#elif !RTC_DCHECK_IS_ON
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+#endif
+ }
+ }
+}
+
+// With no limiter, the rate has to be divisible by 100 since we use
+// 10 ms frames.
+TEST(FrameCombiner, BasicApiCallsNoLimiter) {
+ FrameCombiner combiner(false);
+ for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
+ for (const int number_of_channels : {1, 2, 4, 8}) {
+ const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
+ SetUpFrames(rate, number_of_channels);
+
+ for (const int number_of_frames : {0, 1, 2}) {
+ SCOPED_TRACE(
+ ProduceDebugText(rate, number_of_channels, number_of_frames));
+ const std::vector<AudioFrame*> frames_to_combine(
+ all_frames.begin(), all_frames.begin() + number_of_frames);
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ }
+ }
+ }
+}
+
+TEST(FrameCombiner, CombiningZeroFramesShouldProduceSilence) {
+ FrameCombiner combiner(false);
+ for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
+ for (const int number_of_channels : {1, 2}) {
+ SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 0));
+
+ AudioFrame audio_frame_for_mixing;
+
+ const std::vector<AudioFrame*> frames_to_combine;
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+ const int16_t* audio_frame_for_mixing_data =
+ audio_frame_for_mixing.data();
+ const std::vector<int16_t> mixed_data(
+ audio_frame_for_mixing_data,
+ audio_frame_for_mixing_data + number_of_channels * rate / 100);
+
+ const std::vector<int16_t> expected(number_of_channels * rate / 100, 0);
+ EXPECT_EQ(mixed_data, expected);
+ EXPECT_THAT(audio_frame_for_mixing.packet_infos_, IsEmpty());
+ }
+ }
+}
+
+TEST(FrameCombiner, CombiningOneFrameShouldNotChangeFrame) {
+ FrameCombiner combiner(false);
+ for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
+ for (const int number_of_channels : {1, 2, 4, 8, 10}) {
+ SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 1));
+
+ AudioFrame audio_frame_for_mixing;
+
+ SetUpFrames(rate, number_of_channels);
+ int16_t* frame1_data = frame1.mutable_data();
+ std::iota(frame1_data, frame1_data + number_of_channels * rate / 100, 0);
+ const std::vector<AudioFrame*> frames_to_combine = {&frame1};
+ combiner.Combine(frames_to_combine, number_of_channels, rate,
+ frames_to_combine.size(), &audio_frame_for_mixing);
+
+ const int16_t* audio_frame_for_mixing_data =
+ audio_frame_for_mixing.data();
+ const std::vector<int16_t> mixed_data(
+ audio_frame_for_mixing_data,
+ audio_frame_for_mixing_data + number_of_channels * rate / 100);
+
+ std::vector<int16_t> expected(number_of_channels * rate / 100);
+ std::iota(expected.begin(), expected.end(), 0);
+ EXPECT_EQ(mixed_data, expected);
+ EXPECT_THAT(audio_frame_for_mixing.packet_infos_,
+ ElementsAreArray(frame1.packet_infos_));
+ }
+ }
+}
+
+// Send a sine wave through the FrameCombiner, and check that the
+// difference between input and output varies smoothly. Also check
+// that it is inside reasonable bounds. This is to catch issues like
+// chromium:695993 and chromium:816875.
+TEST(FrameCombiner, GainCurveIsSmoothForAlternatingNumberOfStreams) {
+ // Rates are divisible by 2000 when limiter is active.
+ std::vector<FrameCombinerConfig> configs = {
+ {false, 30100, 2, 50.f}, {false, 16500, 1, 3200.f},
+ {true, 8000, 1, 3200.f}, {true, 16000, 1, 50.f},
+ {true, 18000, 8, 3200.f}, {true, 10000, 2, 50.f},
+ };
+
+ for (const auto& config : configs) {
+ SCOPED_TRACE(ProduceDebugText(config));
+
+ FrameCombiner combiner(config.use_limiter);
+
+ constexpr int16_t wave_amplitude = 30000;
+ SineWaveGenerator wave_generator(config.wave_frequency, wave_amplitude);
+
+ GainChangeCalculator change_calculator;
+ float cumulative_change = 0.f;
+
+ constexpr size_t iterations = 100;
+
+ for (size_t i = 0; i < iterations; ++i) {
+ SetUpFrames(config.sample_rate_hz, config.number_of_channels);
+ wave_generator.GenerateNextFrame(&frame1);
+ AudioFrameOperations::Mute(&frame2);
+
+ std::vector<AudioFrame*> frames_to_combine = {&frame1};
+ if (i % 2 == 0) {
+ frames_to_combine.push_back(&frame2);
+ }
+ const size_t number_of_samples =
+ frame1.samples_per_channel_ * config.number_of_channels;
+
+ // Ensures limiter is on if 'use_limiter'.
+ constexpr size_t number_of_streams = 2;
+ AudioFrame audio_frame_for_mixing;
+ combiner.Combine(frames_to_combine, config.number_of_channels,
+ config.sample_rate_hz, number_of_streams,
+ &audio_frame_for_mixing);
+ cumulative_change += change_calculator.CalculateGainChange(
+ rtc::ArrayView<const int16_t>(frame1.data(), number_of_samples),
+ rtc::ArrayView<const int16_t>(audio_frame_for_mixing.data(),
+ number_of_samples));
+ }
+
+ // Check that the gain doesn't vary too much.
+ EXPECT_LT(cumulative_change, 10);
+
+ // Check that the latest gain is within reasonable bounds. It
+ // should be slightly less that 1.
+ EXPECT_LT(0.9f, change_calculator.LatestGain());
+ EXPECT_LT(change_calculator.LatestGain(), 1.01f);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/g3doc/index.md b/third_party/libwebrtc/modules/audio_mixer/g3doc/index.md
new file mode 100644
index 0000000000..4ced289bf8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/g3doc/index.md
@@ -0,0 +1,54 @@
+<?% config.freshness.owner = 'alessiob' %?>
+<?% config.freshness.reviewed = '2021-04-21' %?>
+
+# The WebRTC Audio Mixer Module
+
+The WebRTC audio mixer module is responsible for mixing multiple incoming audio
+streams (sources) into a single audio stream (mix). It works with 10 ms frames,
+it supports sample rates up to 48 kHz and up to 8 audio channels. The API is
+defined in
+[`api/audio/audio_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/audio/audio_mixer.h)
+and it includes the definition of
+[`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h),
+which describes an incoming audio stream, and the definition of
+[`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h),
+which operates on a collection of
+[`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+objects to produce a mix.
+
+## AudioMixer::Source
+
+A source has different characteristic (e.g., sample rate, number of channels,
+muted state) and it is identified by an SSRC[^1].
+[`AudioMixer::Source::GetAudioFrameWithInfo()`](https://source.chromium.org/search?q=symbol:AudioMixer::Source::GetAudioFrameWithInfo%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+is used to retrieve the next 10 ms chunk of audio to be mixed.
+
+[^1]: A synchronization source (SSRC) is the source of a stream of RTP packets,
+ identified by a 32-bit numeric SSRC identifier carried in the RTP header
+ so as not to be dependent upon the network address (see
+ [RFC 3550](https://tools.ietf.org/html/rfc3550#section-3)).
+
+## AudioMixer
+
+The interface allows to add and remove sources and the
+[`AudioMixer::Mix()`](https://source.chromium.org/search?q=symbol:AudioMixer::Mix%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+method allows to generates a mix with the desired number of channels.
+
+## WebRTC implementation
+
+The interface is implemented in different parts of WebRTC:
+
+* [`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h):
+ [`audio/audio_receive_stream.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/audio_receive_stream.h)
+* [`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h):
+ [`modules/audio_mixer/audio_mixer_impl.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_mixer/audio_mixer_impl.h)
+
+[`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h)
+is thread-safe. The output sample rate of the generated mix is automatically
+assigned depending on the sample rate of the sources; whereas the number of
+output channels is defined by the caller[^2]. Samples from the non-muted sources
+are summed up and then a limiter is used to apply soft-clipping when needed.
+
+[^2]: [`audio/utility/channel_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/utility/channel_mixer.h)
+ is used to mix channels in the non-trivial cases - i.e., if the number of
+ channels for a source or the mix is greater than 3.
diff --git a/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc
new file mode 100644
index 0000000000..dbd0945239
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/gain_change_calculator.h"
+
+#include <math.h>
+
+#include <cstdlib>
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int16_t kReliabilityThreshold = 100;
+} // namespace
+
+float GainChangeCalculator::CalculateGainChange(
+ rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out) {
+ RTC_DCHECK_EQ(in.size(), out.size());
+
+ std::vector<float> gain(in.size());
+ CalculateGain(in, out, gain);
+ return CalculateDifferences(gain);
+}
+
+float GainChangeCalculator::LatestGain() const {
+ return last_reliable_gain_;
+}
+
+void GainChangeCalculator::CalculateGain(rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out,
+ rtc::ArrayView<float> gain) {
+ RTC_DCHECK_EQ(in.size(), out.size());
+ RTC_DCHECK_EQ(in.size(), gain.size());
+
+ for (size_t i = 0; i < in.size(); ++i) {
+ if (std::abs(in[i]) >= kReliabilityThreshold) {
+ last_reliable_gain_ = out[i] / static_cast<float>(in[i]);
+ }
+ gain[i] = last_reliable_gain_;
+ }
+}
+
+float GainChangeCalculator::CalculateDifferences(
+ rtc::ArrayView<const float> values) {
+ float res = 0;
+ for (float f : values) {
+ res += fabs(f - last_value_);
+ last_value_ = f;
+ }
+ return res;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h
new file mode 100644
index 0000000000..3dde9be61e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/gain_change_calculator.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
+#define MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
+
+#include <stdint.h>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+class GainChangeCalculator {
+ public:
+ // The 'out' signal is assumed to be produced from 'in' by applying
+ // a smoothly varying gain. This method computes variations of the
+ // gain and handles special cases when the samples are small.
+ float CalculateGainChange(rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out);
+
+ float LatestGain() const;
+
+ private:
+ void CalculateGain(rtc::ArrayView<const int16_t> in,
+ rtc::ArrayView<const int16_t> out,
+ rtc::ArrayView<float> gain);
+
+ float CalculateDifferences(rtc::ArrayView<const float> values);
+ float last_value_ = 0.f;
+ float last_reliable_gain_ = 1.0f;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_GAIN_CHANGE_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h b/third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h
new file mode 100644
index 0000000000..46b65a8b57
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/output_rate_calculator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
+#define MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+
+namespace webrtc {
+
+// Decides the sample rate of a mixing iteration given the preferred
+// sample rates of the sources.
+class OutputRateCalculator {
+ public:
+ virtual int CalculateOutputRateFromRange(
+ rtc::ArrayView<const int> preferred_sample_rates) = 0;
+
+ virtual ~OutputRateCalculator() {}
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_OUTPUT_RATE_CALCULATOR_H_
diff --git a/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc
new file mode 100644
index 0000000000..591fe14e8c
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_mixer/sine_wave_generator.h"
+
+#include <math.h>
+#include <stddef.h>
+
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+constexpr float kPi = 3.14159265f;
+} // namespace
+
+void SineWaveGenerator::GenerateNextFrame(AudioFrame* frame) {
+ RTC_DCHECK(frame);
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_; ++i) {
+ for (size_t ch = 0; ch < frame->num_channels_; ++ch) {
+ frame_data[frame->num_channels_ * i + ch] =
+ rtc::saturated_cast<int16_t>(amplitude_ * sinf(phase_));
+ }
+ phase_ += wave_frequency_hz_ * 2 * kPi / frame->sample_rate_hz_;
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h
new file mode 100644
index 0000000000..ec0fcd24bd
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_mixer/sine_wave_generator.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_
+#define MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_
+
+#include <stdint.h>
+
+#include "api/audio/audio_frame.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class SineWaveGenerator {
+ public:
+ SineWaveGenerator(float wave_frequency_hz, int16_t amplitude)
+ : wave_frequency_hz_(wave_frequency_hz), amplitude_(amplitude) {
+ RTC_DCHECK_GT(wave_frequency_hz, 0);
+ }
+
+ // Produces appropriate output based on frame->num_channels_,
+ // frame->sample_rate_hz_.
+ void GenerateNextFrame(AudioFrame* frame);
+
+ private:
+ float phase_ = 0.f;
+ const float wave_frequency_hz_;
+ const int16_t amplitude_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_MIXER_SINE_WAVE_GENERATOR_H_