summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/webrtc/voice_engine
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/libwebrtc/webrtc/voice_engine
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/webrtc/voice_engine')
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/BUILD.gn155
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/DEPS14
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/OWNERS10
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/audio_level.cc111
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/audio_level.h60
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/audio_level_gn/moz.build228
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/channel.cc2019
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/channel.h492
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/channel_manager.cc134
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/channel_manager.h127
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/channel_proxy.cc431
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/channel_proxy.h164
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/channel_unittest.cc15
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/include/voe_base.h169
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/include/voe_errors.h165
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/shared_data.cc85
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/shared_data.h69
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.cc242
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.h131
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.cc366
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.h141
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker_unittest.cc574
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/utility.cc91
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/utility.h51
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/utility_unittest.cc275
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.cc410
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.h112
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/voe_base_unittest.cc55
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/voice_engine_defines.h11
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/voice_engine_gn/moz.build243
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.cc81
-rw-r--r--third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.h52
32 files changed, 7283 insertions, 0 deletions
diff --git a/third_party/libwebrtc/webrtc/voice_engine/BUILD.gn b/third_party/libwebrtc/webrtc/voice_engine/BUILD.gn
new file mode 100644
index 0000000000..299c7a3043
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/BUILD.gn
@@ -0,0 +1,155 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../webrtc.gni")
+
+rtc_static_library("voice_engine") {
+ sources = [
+ "channel.cc",
+ "channel.h",
+ "channel_manager.cc",
+ "channel_manager.h",
+ "channel_proxy.cc",
+ "channel_proxy.h",
+ "include/voe_base.h",
+ "include/voe_errors.h",
+ "shared_data.cc",
+ "shared_data.h",
+ "transmit_mixer.cc",
+ "transmit_mixer.h",
+ "transport_feedback_packet_loss_tracker.cc",
+ "transport_feedback_packet_loss_tracker.h",
+ "utility.cc",
+ "utility.h",
+ "voe_base_impl.cc",
+ "voe_base_impl.h",
+ "voice_engine_impl.cc",
+ "voice_engine_impl.h",
+ ]
+
+ if (is_win) {
+ defines = [ "WEBRTC_DRIFT_COMPENSATION_SUPPORTED" ]
+
+ cflags = [
+ # TODO(kjellander): Bug 261: fix this warning.
+ "/wd4373", # Virtual function override.
+ ]
+ }
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+
+ public_deps = [
+ "../modules/audio_coding",
+ ]
+ deps = [
+ ":audio_level",
+ "..:webrtc_common",
+ "../api:array_view",
+ "../api:audio_mixer_api",
+ "../api:call_api",
+ "../api:optional",
+ "../api:refcountedbase",
+ "../api:transport_api",
+ "../api/audio_codecs:audio_codecs_api",
+ "../audio/utility:audio_frame_operations",
+ "../call:rtp_interfaces",
+ "../common_audio",
+ "../logging:rtc_event_log_api",
+ "../modules:module_api",
+ "../modules/audio_coding:audio_format_conversion",
+ "../modules/audio_coding:audio_network_adaptor_config",
+ "../modules/audio_device",
+ "../modules/audio_processing",
+ "../modules/bitrate_controller",
+ "../modules/media_file",
+ "../modules/pacing",
+ "../modules/rtp_rtcp",
+ "../modules/utility",
+ "../rtc_base:rtc_base_approved",
+ "../rtc_base:rtc_task_queue",
+ "../system_wrappers",
+ ]
+
+ if (!build_with_mozilla) {
+ deps += [ "../api:libjingle_peerconnection_api" ]
+ }
+}
+
+rtc_static_library("audio_level") {
+ sources = [
+ "audio_level.cc",
+ "audio_level.h",
+ ]
+
+ deps = [
+ "..:webrtc_common",
+ "../common_audio",
+ "../modules:module_api",
+ "../rtc_base:rtc_base_approved",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_test("voice_engine_unittests") {
+ deps = [
+ ":voice_engine",
+ "../api/audio_codecs:builtin_audio_decoder_factory",
+ "../common_audio",
+ "../modules:module_api",
+ "../modules/audio_coding",
+ "../modules/audio_device",
+ "../modules/audio_processing",
+ "../modules/media_file",
+ "../modules/rtp_rtcp",
+ "../modules/utility",
+ "../modules/video_capture:video_capture",
+ "../rtc_base:rtc_base_approved",
+ "../rtc_base:rtc_base_tests_utils",
+ "../system_wrappers",
+ "../test:test_common",
+ "../test:test_main",
+ "../test:video_test_common",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ if (is_android) {
+ deps += [ "//testing/android/native_test:native_test_native_code" ]
+ shard_timeout = 900
+ }
+
+ sources = [
+ "channel_unittest.cc",
+ "transport_feedback_packet_loss_tracker_unittest.cc",
+ "utility_unittest.cc",
+ "voe_base_unittest.cc",
+ ]
+
+ data = [
+ "../resources/utility/encapsulated_pcm16b_8khz.wav",
+ "../resources/utility/encapsulated_pcmu_8khz.wav",
+ ]
+
+ if (is_win) {
+ defines = [ "WEBRTC_DRIFT_COMPENSATION_SUPPORTED" ]
+
+ cflags = [
+ # TODO(kjellander): Bug 261: fix this warning.
+ "/wd4373", # Virtual function override.
+ ]
+ }
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/webrtc/voice_engine/DEPS b/third_party/libwebrtc/webrtc/voice_engine/DEPS
new file mode 100644
index 0000000000..c8e9a1cac6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/DEPS
@@ -0,0 +1,14 @@
+include_rules = [
+ "+audio/utility/audio_frame_operations.h",
+ "+call",
+ "+common_audio",
+ "+logging/rtc_event_log",
+ "+modules/audio_coding",
+ "+modules/audio_device",
+ "+modules/audio_processing",
+ "+modules/media_file",
+ "+modules/pacing",
+ "+modules/rtp_rtcp",
+ "+modules/utility",
+ "+system_wrappers",
+]
diff --git a/third_party/libwebrtc/webrtc/voice_engine/OWNERS b/third_party/libwebrtc/webrtc/voice_engine/OWNERS
new file mode 100644
index 0000000000..0430ede769
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/OWNERS
@@ -0,0 +1,10 @@
+henrikg@webrtc.org
+henrika@webrtc.org
+niklas.enbom@webrtc.org
+solenberg@webrtc.org
+
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/third_party/libwebrtc/webrtc/voice_engine/audio_level.cc b/third_party/libwebrtc/webrtc/voice_engine/audio_level.cc
new file mode 100644
index 0000000000..57b485546b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/audio_level.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/audio_level.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+namespace voe {
+
+// Number of bars on the indicator.
+// Note that the number of elements is specified because we are indexing it
+// in the range of 0-32
+constexpr int8_t kPermutation[33] = {0, 1, 2, 3, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
+ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9};
+
+AudioLevel::AudioLevel()
+ : abs_max_(0), count_(0), current_level_(0), current_level_full_range_(0) {
+ WebRtcSpl_Init();
+}
+
+AudioLevel::~AudioLevel() {}
+
+int8_t AudioLevel::Level() const {
+ rtc::CritScope cs(&crit_sect_);
+ return current_level_;
+}
+
+int16_t AudioLevel::LevelFullRange() const {
+ rtc::CritScope cs(&crit_sect_);
+ return current_level_full_range_;
+}
+
+void AudioLevel::Clear() {
+ rtc::CritScope cs(&crit_sect_);
+ abs_max_ = 0;
+ count_ = 0;
+ current_level_ = 0;
+ current_level_full_range_ = 0;
+}
+
+double AudioLevel::TotalEnergy() const {
+ rtc::CritScope cs(&crit_sect_);
+ return total_energy_;
+}
+
+double AudioLevel::TotalDuration() const {
+ rtc::CritScope cs(&crit_sect_);
+ return total_duration_;
+}
+
+void AudioLevel::ComputeLevel(const AudioFrame& audioFrame, double duration) {
+ // Check speech level (works for 2 channels as well)
+ int16_t abs_value = audioFrame.muted() ? 0 :
+ WebRtcSpl_MaxAbsValueW16(
+ audioFrame.data(),
+ audioFrame.samples_per_channel_ * audioFrame.num_channels_);
+
+ // Protect member access using a lock since this method is called on a
+ // dedicated audio thread in the RecordedDataIsAvailable() callback.
+ rtc::CritScope cs(&crit_sect_);
+
+ if (abs_value > abs_max_)
+ abs_max_ = abs_value;
+
+ // Update level approximately 10 times per second
+ if (count_++ == kUpdateFrequency) {
+ current_level_full_range_ = abs_max_;
+
+ count_ = 0;
+
+ // Highest value for a int16_t is 0x7fff = 32767
+ // Divide with 1000 to get in the range of 0-32 which is the range of the
+ // permutation vector
+ int32_t position = abs_max_ / 1000;
+
+ // Make it less likely that the bar stays at position 0. I.e. only if it's
+ // in the range 0-250 (instead of 0-1000)
+ if ((position == 0) && (abs_max_ > 250)) {
+ position = 1;
+ }
+ current_level_ = kPermutation[position];
+
+ // Decay the absolute maximum (divide by 4)
+ abs_max_ >>= 2;
+ }
+
+ // See the description for "totalAudioEnergy" in the WebRTC stats spec
+ // (https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy)
+ // for an explanation of these formulas. In short, we need a value that can
+ // be used to compute RMS audio levels over different time intervals, by
+ // taking the difference between the results from two getStats calls. To do
+ // this, the value needs to be of units "squared sample value * time".
+ double additional_energy =
+ static_cast<double>(current_level_full_range_) / INT16_MAX;
+ additional_energy *= additional_energy;
+ total_energy_ += additional_energy * duration;
+ total_duration_ += duration;
+}
+
+} // namespace voe
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/audio_level.h b/third_party/libwebrtc/webrtc/voice_engine/audio_level.h
new file mode 100644
index 0000000000..a1951edba9
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/audio_level.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_AUDIO_LEVEL_H_
+#define VOICE_ENGINE_AUDIO_LEVEL_H_
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+namespace voe {
+
+class AudioLevel {
+ public:
+ AudioLevel();
+ ~AudioLevel();
+
+ // Called on "API thread(s)" from APIs like VoEBase::CreateChannel(),
+ // VoEBase::StopSend(), VoEVolumeControl::GetSpeechOutputLevel().
+ int8_t Level() const;
+ int16_t LevelFullRange() const;
+ void Clear();
+ // See the description for "totalAudioEnergy" in the WebRTC stats spec
+ // (https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy)
+ double TotalEnergy() const;
+ double TotalDuration() const;
+
+ // Called on a native capture audio thread (platform dependent) from the
+ // AudioTransport::RecordedDataIsAvailable() callback.
+ // In Chrome, this method is called on the AudioInputDevice thread.
+ void ComputeLevel(const AudioFrame& audioFrame, double duration);
+
+ private:
+ enum { kUpdateFrequency = 10 };
+
+ rtc::CriticalSection crit_sect_;
+
+ int16_t abs_max_ RTC_GUARDED_BY(crit_sect_);
+ int16_t count_ RTC_GUARDED_BY(crit_sect_);
+ int8_t current_level_ RTC_GUARDED_BY(crit_sect_);
+ int16_t current_level_full_range_ RTC_GUARDED_BY(crit_sect_);
+
+ double total_energy_ RTC_GUARDED_BY(crit_sect_) = 0.0;
+ double total_duration_ RTC_GUARDED_BY(crit_sect_) = 0.0;
+};
+
+} // namespace voe
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_AUDIO_LEVEL_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/audio_level_gn/moz.build b/third_party/libwebrtc/webrtc/voice_engine/audio_level_gn/moz.build
new file mode 100644
index 0000000000..d0ff0ed948
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/audio_level_gn/moz.build
@@ -0,0 +1,228 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["CHROMIUM_BUILD"] = True
+DEFINES["V8_DEPRECATION_WARNINGS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_RESTRICT_LOGGING"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/ipc/glue",
+ "/third_party/libwebrtc/webrtc/",
+ "/third_party/libwebrtc/webrtc/common_audio/resampler/include/",
+ "/third_party/libwebrtc/webrtc/common_audio/signal_processing/include/",
+ "/third_party/libwebrtc/webrtc/common_audio/vad/include/"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/webrtc/voice_engine/audio_level.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["WTF_USE_DYNAMIC_ANNOTATIONS"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION"] = "r12b"
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["USE_OPENSSL_CERTS"] = "1"
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["__GNU_SOURCE"] = "1"
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORE"] = "0"
+
+ OS_LIBS += [
+ "-framework Foundation"
+ ]
+
+if CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+ OS_LIBS += [
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "1"
+ DEFINES["UNICODE"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_CRT_SECURE_NO_WARNINGS"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_USING_V110_SDK71_"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0120"
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0920"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["NO_TCMALLOC"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "NetBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+Library("audio_level_gn")
diff --git a/third_party/libwebrtc/webrtc/voice_engine/channel.cc b/third_party/libwebrtc/webrtc/voice_engine/channel.cc
new file mode 100644
index 0000000000..fa2989e0bc
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/channel.cc
@@ -0,0 +1,2019 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/channel.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "audio/utility/audio_frame_operations.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "logging/rtc_event_log/rtc_event_log.h"
+#include "logging/rtc_event_log/events/rtc_event_audio_playout.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "modules/audio_coding/codecs/audio_format_conversion.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/include/module_common_types.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_packet_observer.h"
+#include "modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_receiver_strategy.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_checker.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+#include "voice_engine/utility.h"
+
+namespace webrtc {
+namespace voe {
+
+namespace {
+
+constexpr double kAudioSampleDurationSeconds = 0.01;
+constexpr int64_t kMaxRetransmissionWindowMs = 1000;
+constexpr int64_t kMinRetransmissionWindowMs = 30;
+
+// Video Sync.
+constexpr int kVoiceEngineMinMinPlayoutDelayMs = 0;
+constexpr int kVoiceEngineMaxMinPlayoutDelayMs = 10000;
+
+} // namespace
+
+const int kTelephoneEventAttenuationdB = 10;
+
+class RtcEventLogProxy final : public webrtc::RtcEventLog {
+ public:
+ RtcEventLogProxy() : event_log_(nullptr) {}
+
+ bool StartLogging(std::unique_ptr<RtcEventLogOutput> output,
+ int64_t output_period_ms) override {
+ RTC_NOTREACHED();
+ return false;
+ }
+
+ void StopLogging() override { RTC_NOTREACHED(); }
+
+ void Log(std::unique_ptr<RtcEvent> event) override {
+ rtc::CritScope lock(&crit_);
+ if (event_log_) {
+ event_log_->Log(std::move(event));
+ }
+ }
+
+ void SetEventLog(RtcEventLog* event_log) {
+ rtc::CritScope lock(&crit_);
+ event_log_ = event_log;
+ }
+
+ private:
+ rtc::CriticalSection crit_;
+ RtcEventLog* event_log_ RTC_GUARDED_BY(crit_);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtcEventLogProxy);
+};
+
+class RtcpRttStatsProxy final : public RtcpRttStats {
+ public:
+ RtcpRttStatsProxy() : rtcp_rtt_stats_(nullptr) {}
+
+ void OnRttUpdate(int64_t rtt) override {
+ rtc::CritScope lock(&crit_);
+ if (rtcp_rtt_stats_)
+ rtcp_rtt_stats_->OnRttUpdate(rtt);
+ }
+
+ int64_t LastProcessedRtt() const override {
+ rtc::CritScope lock(&crit_);
+ if (!rtcp_rtt_stats_)
+ return 0;
+ return rtcp_rtt_stats_->LastProcessedRtt();
+ }
+
+ void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
+ rtc::CritScope lock(&crit_);
+ rtcp_rtt_stats_ = rtcp_rtt_stats;
+ }
+
+ private:
+ rtc::CriticalSection crit_;
+ RtcpRttStats* rtcp_rtt_stats_ RTC_GUARDED_BY(crit_);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtcpRttStatsProxy);
+};
+
+// Extend the default RTCP statistics struct with max_jitter, defined as the
+// maximum jitter value seen in an RTCP report block.
+struct ChannelStatistics : public RtcpStatistics {
+ ChannelStatistics() : rtcp(), max_jitter(0) {}
+
+ RtcpStatistics rtcp;
+ uint32_t max_jitter;
+};
+
+// Statistics callback, called at each generation of a new RTCP report block.
+class StatisticsProxy : public RtcpStatisticsCallback,
+ public RtcpPacketTypeCounterObserver {
+ public:
+ StatisticsProxy(uint32_t ssrc) : ssrc_(ssrc) {}
+ virtual ~StatisticsProxy() {}
+
+ void StatisticsUpdated(const RtcpStatistics& statistics,
+ uint32_t ssrc) override {
+ rtc::CritScope cs(&stats_lock_);
+ if (ssrc != ssrc_)
+ return;
+
+ stats_.rtcp = statistics;
+ if (statistics.jitter > stats_.max_jitter) {
+ stats_.max_jitter = statistics.jitter;
+ }
+ }
+
+ void CNameChanged(const char* cname, uint32_t ssrc) override {}
+
+ void SetSSRC(uint32_t ssrc) {
+ rtc::CritScope cs(&stats_lock_);
+ ssrc_ = ssrc;
+ mReceiverReportDerivedStats.clear();
+ mInitialSequenceNumber.reset();
+ }
+
+ ChannelStatistics GetStats() {
+ rtc::CritScope cs(&stats_lock_);
+ return stats_;
+ }
+
+ // These can be created before reports are received so that information
+ // needed to derive certain stats (e.g. PacketsReceived) can be stored.
+ class ReceiverReportDerivedStats {
+ public:
+ // Event handler for incoming RTCP Receiver Reports
+ void UpdateWithReceiverReport(const RTCPReportBlock& aReceiverReport,
+ rtc::Optional<uint32_t> initialSequenceNum,
+ int64_t aRoundTripTime,
+ uint32_t aEncoderFrequencyHz,
+ int64_t aReceptionTime)
+ {
+ if (!mFirstExtendedSequenceNumber && initialSequenceNum) {
+ mFirstExtendedSequenceNumber = *initialSequenceNum;
+ }
+
+ // No initial sequence number available!
+ if (!mFirstExtendedSequenceNumber) {
+ RTC_LOG(LS_WARNING) <<
+ "ReceiverReportDerivedStats::UpdateWithReceiverReport()"
+ " called before a first sequence number is known to the"
+ " StatisticsProxy";
+ // This is as good a guess as we can get if the initial
+ // sequence number is not known
+ mFirstExtendedSequenceNumber = static_cast<uint32_t>(
+ std::max<int64_t>(0, aReceiverReport.extended_highest_sequence_number -
+ aReceiverReport.packets_lost));
+ }
+
+ mReceiverSsrc = aReceiverReport.sender_ssrc;
+ mSenderSsrc = aReceiverReport.source_ssrc;
+ mLatestHighExtendedSequenceNumber = aReceiverReport.extended_highest_sequence_number;
+ mLatestReceiverReportReceptionTime = aReceptionTime;
+ mFractionOfPacketsLostInQ8 = aReceiverReport.fraction_lost;
+ mJitterInSamples = aReceiverReport.jitter;
+ mEncoderFrequencyHz = aEncoderFrequencyHz;
+ mCumulativePacketsLost = aReceiverReport.packets_lost;
+ mLastSenderReportTimestamp = aReceiverReport.last_sender_report_timestamp;
+ mDelaySinceLastSenderReport = aReceiverReport.delay_since_last_sender_report;
+ mRoundTripTime = aRoundTripTime;
+ }
+ bool HasReceivedReport() { return mFirstReceiverReportReceptionTime; }
+ // This is the SSRC of the entity sending the RTCP Receiver Reports
+ // That is it is the SSRC of the RTP receiver
+ uint32_t mReceiverSsrc = 0;
+ // This is the SSRC of the entity receiving the RTCP Receiver Reports
+ // That is it is the SSRC of the RTP sender
+ uint32_t mSenderSsrc = 0;
+ // Reception time for the RTCP packet containing this data
+ // Only available if an receiver report has been received
+ int64_t mLatestReceiverReportReceptionTime = 0;
+ // Reception time for the first RTCP packet contianing a
+ // Receiver Report with match mReceiverSsrc.
+ int64_t mFirstReceiverReportReceptionTime = 0;
+ // The total number of packets know to be lost
+ uint32_t mCumulativePacketsLost = 0;
+ // The RTP sender must record the first sequence number used
+ // so that number of packets received can be calculated from ...
+ uint32_t mFirstExtendedSequenceNumber = 0;
+ // The most recent sequence number seen by the receiver at the time
+ // Receiver Report was generated
+ uint32_t mLatestHighExtendedSequenceNumber = 0;
+ int64_t mRoundTripTime = 0;
+ // The amount of jitter measured in MS, derived from the
+ // RTCP reported jitter (measured in frames), and the
+ // effective playout frequency.
+ double JitterMs() const {
+ if (!mEncoderFrequencyHz) {
+ if (!mHasWarnedAboutNoFrequency) {
+ mHasWarnedAboutNoFrequency = true;
+ RTC_LOG(LS_WARNING) <<
+ "ReceiverReportDerivedStats::JitterMs() called before"
+ " the playout frequency is known.";
+ }
+ return 0;
+ }
+ return (mJitterInSamples * 1000) / mEncoderFrequencyHz;
+ }
+ // Fraction of packets lost
+ double FractionOfPacketsLost() const {
+ return (double) mFractionOfPacketsLostInQ8 / 256;
+ }
+ uint32_t PacketsReceived() const {
+ return static_cast<uint32_t>(std::max<int64_t>(0,
+ (int64_t) mLatestHighExtendedSequenceNumber -
+ (mFirstExtendedSequenceNumber + mCumulativePacketsLost)));
+ }
+ private:
+ // The ratio of packets lost to total packets sent expressed
+ // as the dividend in X / 256.
+ uint8_t mFractionOfPacketsLostInQ8 = 0;
+ // Jitter in the RTCP packet is in Time Units,
+ // which is the sample rate of the audio.
+ uint32_t mJitterInSamples = 0;
+ // Use to calculate the jitter
+ uint32_t mEncoderFrequencyHz = 0;
+ // Used to calculate the RTT
+ uint32_t mLastSenderReportTimestamp = 0;
+ // Used to calculate the RTT
+ uint32_t mDelaySinceLastSenderReport = 0;
+ // Only warn about jitter calculation once per instance
+ mutable bool mHasWarnedAboutNoFrequency = false;
+ };
+
+ void RtcpPacketTypesCounterUpdated(uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override {
+ rtc::CritScope cs(&stats_lock_);
+ if (ssrc != ssrc_) {
+ return;
+ }
+ packet_counter_ = packet_counter;
+ };
+
+ // Called when we receive RTCP receiver reports
+ void OnIncomingReceiverReports(const ReportBlockList & mReceiverReports,
+ const int64_t aRoundTripTime,
+ const int64_t aReceptionTime) {
+ if (!mReceiverReports.empty()) { // Don't lock if we have nothing to do.
+ rtc::CritScope cs(&stats_lock_);
+ for(const auto& report : mReceiverReports) {
+ // Creates a new report if necessary before updating
+ ReceiverReportDerivedStats newStats;
+ mReceiverReportDerivedStats.emplace(report.source_ssrc, newStats)
+ .first->second.UpdateWithReceiverReport(report,
+ mInitialSequenceNumber,
+ aRoundTripTime,
+ mPlayoutFrequency,
+ aReceptionTime);
+ }
+ }
+ }
+
+ void OnSendCodecFrequencyChanged(uint32_t aFrequency) {
+ rtc::CritScope cs(&stats_lock_);
+ mPlayoutFrequency = aFrequency;
+ }
+
+ void OnInitialSequenceNumberSet(uint32_t aSequenceNumber) {
+ rtc::CritScope cs(&stats_lock_);
+ mInitialSequenceNumber.emplace(aSequenceNumber);
+ mReceiverReportDerivedStats.clear();
+ }
+
+ const rtc::Optional<ReceiverReportDerivedStats>
+ GetReceiverReportDerivedStats(const uint32_t receiverSsrc) const {
+ rtc::CritScope cs(&stats_lock_);
+ const auto& it = mReceiverReportDerivedStats.find(receiverSsrc);
+ if (it != mReceiverReportDerivedStats.end()) {
+ return rtc::Optional<ReceiverReportDerivedStats>(it->second);
+ }
+ return rtc::Optional<ReceiverReportDerivedStats>();
+ }
+
+ void GetPacketTypeCounter(RtcpPacketTypeCounter& aPacketTypeCounter) {
+ rtc::CritScope cs(&stats_lock_);
+ aPacketTypeCounter = packet_counter_;
+ }
+
+ private:
+ // StatisticsUpdated calls are triggered from threads in the RTP module,
+ // while GetStats calls can be triggered from the public voice engine API,
+ // hence synchronization is needed.
+ rtc::CriticalSection stats_lock_;
+ uint32_t ssrc_;
+ ChannelStatistics stats_;
+ RtcpPacketTypeCounter packet_counter_;
+
+ // receiver report handling, maps ssrc -> stats
+ std::map<uint32_t, ReceiverReportDerivedStats> mReceiverReportDerivedStats;
+ // store initial sender sequence number
+ rtc::Optional<uint32_t> mInitialSequenceNumber;
+ uint32_t mPlayoutFrequency;
+ };
+
+class TransportFeedbackProxy : public TransportFeedbackObserver {
+ public:
+ TransportFeedbackProxy() : feedback_observer_(nullptr) {
+ pacer_thread_.DetachFromThread();
+ network_thread_.DetachFromThread();
+ }
+
+ void SetTransportFeedbackObserver(
+ TransportFeedbackObserver* feedback_observer) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ rtc::CritScope lock(&crit_);
+ feedback_observer_ = feedback_observer;
+ }
+
+ // Implements TransportFeedbackObserver.
+ void AddPacket(uint32_t ssrc,
+ uint16_t sequence_number,
+ size_t length,
+ const PacedPacketInfo& pacing_info) override {
+ RTC_DCHECK(pacer_thread_.CalledOnValidThread());
+ rtc::CritScope lock(&crit_);
+ if (feedback_observer_)
+ feedback_observer_->AddPacket(ssrc, sequence_number, length, pacing_info);
+ }
+
+ void OnTransportFeedback(const rtcp::TransportFeedback& feedback) override {
+ RTC_DCHECK(network_thread_.CalledOnValidThread());
+ rtc::CritScope lock(&crit_);
+ if (feedback_observer_)
+ feedback_observer_->OnTransportFeedback(feedback);
+ }
+ std::vector<PacketFeedback> GetTransportFeedbackVector() const override {
+ RTC_NOTREACHED();
+ return std::vector<PacketFeedback>();
+ }
+
+ private:
+ rtc::CriticalSection crit_;
+ rtc::ThreadChecker thread_checker_;
+ rtc::ThreadChecker pacer_thread_;
+ rtc::ThreadChecker network_thread_;
+ TransportFeedbackObserver* feedback_observer_ RTC_GUARDED_BY(&crit_);
+};
+
+class TransportSequenceNumberProxy : public TransportSequenceNumberAllocator {
+ public:
+ TransportSequenceNumberProxy() : seq_num_allocator_(nullptr) {
+ pacer_thread_.DetachFromThread();
+ }
+
+ void SetSequenceNumberAllocator(
+ TransportSequenceNumberAllocator* seq_num_allocator) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ rtc::CritScope lock(&crit_);
+ seq_num_allocator_ = seq_num_allocator;
+ }
+
+ // Implements TransportSequenceNumberAllocator.
+ uint16_t AllocateSequenceNumber() override {
+ RTC_DCHECK(pacer_thread_.CalledOnValidThread());
+ rtc::CritScope lock(&crit_);
+ if (!seq_num_allocator_)
+ return 0;
+ return seq_num_allocator_->AllocateSequenceNumber();
+ }
+
+ private:
+ rtc::CriticalSection crit_;
+ rtc::ThreadChecker thread_checker_;
+ rtc::ThreadChecker pacer_thread_;
+ TransportSequenceNumberAllocator* seq_num_allocator_ RTC_GUARDED_BY(&crit_);
+};
+
+class RtpPacketSenderProxy : public RtpPacketSender {
+ public:
+ RtpPacketSenderProxy() : rtp_packet_sender_(nullptr) {}
+
+ void SetPacketSender(RtpPacketSender* rtp_packet_sender) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ rtc::CritScope lock(&crit_);
+ rtp_packet_sender_ = rtp_packet_sender;
+ }
+
+ // Implements RtpPacketSender.
+ void InsertPacket(Priority priority,
+ uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_time_ms,
+ size_t bytes,
+ bool retransmission) override {
+ rtc::CritScope lock(&crit_);
+ if (rtp_packet_sender_) {
+ rtp_packet_sender_->InsertPacket(priority, ssrc, sequence_number,
+ capture_time_ms, bytes, retransmission);
+ }
+ }
+
+ void SetAccountForAudioPackets(bool account_for_audio) override {
+ RTC_NOTREACHED();
+ }
+
+ private:
+ rtc::ThreadChecker thread_checker_;
+ rtc::CriticalSection crit_;
+ RtpPacketSender* rtp_packet_sender_ RTC_GUARDED_BY(&crit_);
+};
+
+class VoERtcpObserver : public RtcpBandwidthObserver, public RtcpEventObserver {
+ public:
+ explicit VoERtcpObserver(Channel* owner)
+ : owner_(owner), bandwidth_observer_(nullptr), event_observer_(nullptr) {}
+ virtual ~VoERtcpObserver() {}
+
+ void SetBandwidthObserver(RtcpBandwidthObserver* bandwidth_observer) {
+ rtc::CritScope lock(&crit_);
+ bandwidth_observer_ = bandwidth_observer;
+ }
+
+ void SetEventObserver(RtcpEventObserver* event_observer) {
+ rtc::CritScope lock(&crit_);
+ event_observer_ = event_observer;
+ }
+
+ void OnReceivedEstimatedBitrate(uint32_t bitrate) override {
+ rtc::CritScope lock(&crit_);
+ if (bandwidth_observer_) {
+ bandwidth_observer_->OnReceivedEstimatedBitrate(bitrate);
+ }
+ }
+
+ void OnReceivedRtcpReceiverReport(const ReportBlockList& report_blocks,
+ int64_t rtt,
+ int64_t now_ms) override {
+ {
+ rtc::CritScope lock(&crit_);
+ if (bandwidth_observer_) {
+ bandwidth_observer_->OnReceivedRtcpReceiverReport(report_blocks, rtt,
+ now_ms);
+ }
+ }
+ // TODO(mflodman): Do we need to aggregate reports here or can we jut send
+ // what we get? I.e. do we ever get multiple reports bundled into one RTCP
+ // report for VoiceEngine?
+ if (report_blocks.empty())
+ return;
+
+ int fraction_lost_aggregate = 0;
+ int total_number_of_packets = 0;
+
+ // If receiving multiple report blocks, calculate the weighted average based
+ // on the number of packets a report refers to.
+ for (ReportBlockList::const_iterator block_it = report_blocks.begin();
+ block_it != report_blocks.end(); ++block_it) {
+ // Find the previous extended high sequence number for this remote SSRC,
+ // to calculate the number of RTP packets this report refers to. Ignore if
+ // we haven't seen this SSRC before.
+ std::map<uint32_t, uint32_t>::iterator seq_num_it =
+ extended_max_sequence_number_.find(block_it->source_ssrc);
+ int number_of_packets = 0;
+ if (seq_num_it != extended_max_sequence_number_.end()) {
+ number_of_packets =
+ block_it->extended_highest_sequence_number - seq_num_it->second;
+ }
+ fraction_lost_aggregate += number_of_packets * block_it->fraction_lost;
+ total_number_of_packets += number_of_packets;
+
+ extended_max_sequence_number_[block_it->source_ssrc] =
+ block_it->extended_highest_sequence_number;
+ }
+ int weighted_fraction_lost = 0;
+ if (total_number_of_packets > 0) {
+ weighted_fraction_lost =
+ (fraction_lost_aggregate + total_number_of_packets / 2) /
+ total_number_of_packets;
+ }
+ owner_->OnUplinkPacketLossRate(weighted_fraction_lost / 255.0f);
+ owner_->OnIncomingReceiverReports(report_blocks, rtt, now_ms);
+ }
+
+ void OnRtcpBye() override {
+ rtc::CritScope lock(&crit_);
+ if (event_observer_) {
+ event_observer_->OnRtcpBye();
+ }
+ }
+
+ void OnRtcpTimeout() override {
+ rtc::CritScope lock(&crit_);
+ if (event_observer_) {
+ event_observer_->OnRtcpTimeout();
+ }
+ }
+
+ private:
+ Channel* owner_;
+ // Maps remote side ssrc to extended highest sequence number received.
+ std::map<uint32_t, uint32_t> extended_max_sequence_number_;
+ rtc::CriticalSection crit_;
+ RtcpBandwidthObserver* bandwidth_observer_ RTC_GUARDED_BY(crit_);
+ RtcpEventObserver* event_observer_ RTC_GUARDED_BY(crit_);
+};
+
+class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
+ public:
+ ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_frame,
+ Channel* channel)
+ : audio_frame_(std::move(audio_frame)), channel_(channel) {
+ RTC_DCHECK(channel_);
+ }
+
+ private:
+ bool Run() override {
+ RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
+ channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get());
+ return true;
+ }
+
+ std::unique_ptr<AudioFrame> audio_frame_;
+ Channel* const channel_;
+};
+
+void Channel::OnIncomingReceiverReports(const ReportBlockList& aReportBlocks,
+ const int64_t aRoundTripTime,
+ const int64_t aReceptionTime) {
+
+ statistics_proxy_->OnIncomingReceiverReports(aReportBlocks,
+ aRoundTripTime,
+ aReceptionTime);
+}
+
+bool Channel::GetRTCPReceiverStatistics(int64_t* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* cumulativeLost,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ double* packetsFractionLost,
+ int64_t* rtt) const {
+ uint32_t ssrc = _rtpRtcpModule->SSRC();
+
+ const auto& stats = statistics_proxy_->GetReceiverReportDerivedStats(ssrc);
+ if (!stats || !stats->PacketsReceived()) {
+ return false;
+ }
+ *timestamp = stats->mLatestReceiverReportReceptionTime;
+ *jitterMs = stats->JitterMs();
+ *cumulativeLost = stats->mCumulativePacketsLost;
+ *packetsReceived = stats->PacketsReceived();
+ *packetsFractionLost = stats->FractionOfPacketsLost();
+ *rtt = stats->mRoundTripTime;
+
+ // bytesReceived is only an estimate, which we derive from the locally
+ // generated RTCP sender reports, and the remotely genderated receiver
+ // reports.
+ // There is an open issue in the spec as to if this should be included
+ // here where it is only a guess.
+ // https://github.com/w3c/webrtc-stats/issues/241
+ *bytesReceived = 0;
+ if (*packetsReceived) {
+ // GetDataCounters has internal CS lock within RtpSender
+ StreamDataCounters rtpCounters;
+ StreamDataCounters rtxCounters; // unused
+ _rtpRtcpModule->GetSendStreamDataCounters(&rtpCounters, &rtxCounters);
+ uint64_t sentPackets = rtpCounters.transmitted.packets;
+ if (sentPackets) {
+ uint64_t sentBytes = rtpCounters.MediaPayloadBytes();
+ *bytesReceived = sentBytes * (*packetsReceived) / sentPackets;
+ }
+ }
+
+ return true;
+}
+
+void Channel::SetRtpPacketObserver(RtpPacketObserver* observer) {
+ rtp_source_observer_ = observer;
+}
+
+int32_t Channel::SendData(FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ const RTPFragmentationHeader* fragmentation) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ if (_includeAudioLevelIndication) {
+ // Store current audio level in the RTP/RTCP module.
+ // The level will be used in combination with voice-activity state
+ // (frameType) to add an RTP header extension
+ _rtpRtcpModule->SetAudioLevel(rms_level_.Average());
+ }
+
+ // Push data from ACM to RTP/RTCP-module to deliver audio frame for
+ // packetization.
+ // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
+ if (!_rtpRtcpModule->SendOutgoingData(
+ (FrameType&)frameType, payloadType, timeStamp,
+ // Leaving the time when this frame was
+ // received from the capture device as
+ // undefined for voice for now.
+ -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
+ RTC_LOG(LS_ERROR)
+ << "Channel::SendData() failed to send data to RTP/RTCP module";
+ return -1;
+ }
+
+ return 0;
+}
+
+bool Channel::SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) {
+ rtc::CritScope cs(&_callbackCritSect);
+
+ if (_transportPtr == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "Channel::SendPacket() failed to send RTP packet due to"
+ << " invalid transport object";
+ return false;
+ }
+
+ uint8_t* bufferToSendPtr = (uint8_t*)data;
+ size_t bufferLength = len;
+
+ if (!_transportPtr->SendRtp(bufferToSendPtr, bufferLength, options)) {
+ RTC_LOG(LS_ERROR) << "Channel::SendPacket() RTP transmission failed";
+ return false;
+ }
+ return true;
+}
+
+bool Channel::SendRtcp(const uint8_t* data, size_t len) {
+ rtc::CritScope cs(&_callbackCritSect);
+ if (_transportPtr == NULL) {
+ RTC_LOG(LS_ERROR) << "Channel::SendRtcp() failed to send RTCP packet due to"
+ << " invalid transport object";
+ return false;
+ }
+
+ uint8_t* bufferToSendPtr = (uint8_t*)data;
+ size_t bufferLength = len;
+
+ int n = _transportPtr->SendRtcp(bufferToSendPtr, bufferLength);
+ if (n < 0) {
+ RTC_LOG(LS_ERROR) << "Channel::SendRtcp() transmission failed";
+ return false;
+ }
+ return true;
+}
+
+void Channel::OnIncomingSSRCChanged(uint32_t ssrc) {
+ // Update ssrc so that NTP for AV sync can be updated.
+ _rtpRtcpModule->SetRemoteSSRC(ssrc);
+
+ // Update stats proxy to receive stats for new ssrc
+ statistics_proxy_->SetSSRC(ssrc);
+}
+
+void Channel::OnIncomingCSRCChanged(uint32_t CSRC, bool added) {
+ // TODO(saza): remove.
+}
+
+int32_t Channel::OnInitializeDecoder(int payload_type,
+ const SdpAudioFormat& audio_format,
+ uint32_t rate) {
+ if (!audio_coding_->RegisterReceiveCodec(payload_type, audio_format)) {
+ RTC_LOG(LS_WARNING) << "Channel::OnInitializeDecoder() invalid codec (pt="
+ << payload_type << ", " << audio_format
+ << ") received -1";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t Channel::OnReceivedPayloadData(const uint8_t* payloadData,
+ size_t payloadSize,
+ const WebRtcRTPHeader* rtpHeader) {
+ if (!channel_state_.Get().playing) {
+ // Avoid inserting into NetEQ when we are not playing. Count the
+ // packet as discarded.
+ return 0;
+ }
+
+ // Push the incoming payload (parsed and ready for decoding) into the ACM
+ if (audio_coding_->IncomingPacket(payloadData, payloadSize, *rtpHeader) !=
+ 0) {
+ RTC_LOG(LS_ERROR)
+ << "Channel::OnReceivedPayloadData() unable to push data to the ACM";
+ return -1;
+ }
+
+ // Observe incoming packets for getContributingSources and
+ // getSynchronizationSources.
+ if (rtp_source_observer_) {
+ const auto playoutFrequency = audio_coding_->PlayoutFrequency();
+ uint32_t jitter = 0;
+ if (playoutFrequency > 0) {
+ const ChannelStatistics stats = statistics_proxy_->GetStats();
+ jitter = stats.rtcp.jitter / (playoutFrequency / 1000);
+ }
+ rtp_source_observer_->OnRtpPacket(rtpHeader->header,
+ webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds(), jitter);
+ }
+
+ int64_t round_trip_time = 0;
+ _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), &round_trip_time, NULL, NULL,
+ NULL);
+
+ std::vector<uint16_t> nack_list = audio_coding_->GetNackList(round_trip_time);
+ if (!nack_list.empty()) {
+ // Can't use nack_list.data() since it's not supported by all
+ // compilers.
+ ResendPackets(&(nack_list[0]), static_cast<int>(nack_list.size()));
+ }
+ return 0;
+}
+
+bool Channel::OnRecoveredPacket(const uint8_t* rtp_packet,
+ size_t rtp_packet_length) {
+ RTPHeader header;
+ if (!rtp_header_parser_->Parse(rtp_packet, rtp_packet_length, &header)) {
+ RTC_LOG(LS_WARNING) << "IncomingPacket invalid RTP header";
+ return false;
+ }
+ header.payload_type_frequency =
+ rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
+ if (header.payload_type_frequency < 0)
+ return false;
+ // TODO(nisse): Pass RtpPacketReceived with |recovered()| true.
+ return ReceivePacket(rtp_packet, rtp_packet_length, header);
+}
+
+AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo(
+ int sample_rate_hz,
+ AudioFrame* audio_frame) {
+ audio_frame->sample_rate_hz_ = sample_rate_hz;
+
+ // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
+ bool muted;
+ if (audio_coding_->PlayoutData10Ms(audio_frame->sample_rate_hz_, audio_frame,
+ &muted) == -1) {
+ RTC_LOG(LS_ERROR) << "Channel::GetAudioFrame() PlayoutData10Ms() failed!";
+ // In all likelihood, the audio in this frame is garbage. We return an
+ // error so that the audio mixer module doesn't add it to the mix. As
+ // a result, it won't be played out and the actions skipped here are
+ // irrelevant.
+ return AudioMixer::Source::AudioFrameInfo::kError;
+ }
+
+ if (muted) {
+ // TODO(henrik.lundin): We should be able to do better than this. But we
+ // will have to go through all the cases below where the audio samples may
+ // be used, and handle the muted case in some way.
+ AudioFrameOperations::Mute(audio_frame);
+ }
+
+ // Store speech type for dead-or-alive detection
+ _outputSpeechType = audio_frame->speech_type_;
+
+ {
+ // Pass the audio buffers to an optional sink callback, before applying
+ // scaling/panning, as that applies to the mix operation.
+ // External recipients of the audio (e.g. via AudioTrack), will do their
+ // own mixing/dynamic processing.
+ rtc::CritScope cs(&_callbackCritSect);
+ if (audio_sink_) {
+ AudioSinkInterface::Data data(
+ audio_frame->data(), audio_frame->samples_per_channel_,
+ audio_frame->sample_rate_hz_, audio_frame->num_channels_,
+ audio_frame->timestamp_);
+ audio_sink_->OnData(data);
+ }
+ }
+
+ float output_gain = 1.0f;
+ {
+ rtc::CritScope cs(&volume_settings_critsect_);
+ output_gain = _outputGain;
+ }
+
+ // Output volume scaling
+ if (output_gain < 0.99f || output_gain > 1.01f) {
+ // TODO(solenberg): Combine with mute state - this can cause clicks!
+ AudioFrameOperations::ScaleWithSat(output_gain, audio_frame);
+ }
+
+ // Measure audio level (0-9)
+ // TODO(henrik.lundin) Use the |muted| information here too.
+ // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see
+ // https://crbug.com/webrtc/7517).
+ _outputAudioLevel.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds);
+
+ if (capture_start_rtp_time_stamp_ < 0 && audio_frame->timestamp_ != 0) {
+ // The first frame with a valid rtp timestamp.
+ capture_start_rtp_time_stamp_ = audio_frame->timestamp_;
+ }
+
+ if (capture_start_rtp_time_stamp_ >= 0) {
+ // audio_frame.timestamp_ should be valid from now on.
+
+ // Compute elapsed time.
+ int64_t unwrap_timestamp =
+ rtp_ts_wraparound_handler_->Unwrap(audio_frame->timestamp_);
+ audio_frame->elapsed_time_ms_ =
+ (unwrap_timestamp - capture_start_rtp_time_stamp_) /
+ (GetRtpTimestampRateHz() / 1000);
+
+ {
+ rtc::CritScope lock(&ts_stats_lock_);
+ // Compute ntp time.
+ audio_frame->ntp_time_ms_ =
+ ntp_estimator_.Estimate(audio_frame->timestamp_);
+ // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
+ if (audio_frame->ntp_time_ms_ > 0) {
+ // Compute |capture_start_ntp_time_ms_| so that
+ // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
+ capture_start_ntp_time_ms_ =
+ audio_frame->ntp_time_ms_ - audio_frame->elapsed_time_ms_;
+ }
+ }
+ }
+
+ {
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs",
+ audio_coding_->TargetDelayMs());
+ const int jitter_buffer_delay = audio_coding_->FilteredCurrentDelayMs();
+ rtc::CritScope lock(&video_sync_lock_);
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs",
+ jitter_buffer_delay + playout_delay_ms_);
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs",
+ jitter_buffer_delay);
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs",
+ playout_delay_ms_);
+ }
+
+ return muted ? AudioMixer::Source::AudioFrameInfo::kMuted
+ : AudioMixer::Source::AudioFrameInfo::kNormal;
+}
+
+int Channel::PreferredSampleRate() const {
+ // Return the bigger of playout and receive frequency in the ACM.
+ return std::max(audio_coding_->ReceiveFrequency(),
+ audio_coding_->PlayoutFrequency());
+}
+
+int32_t Channel::CreateChannel(Channel*& channel,
+ int32_t channelId,
+ uint32_t instanceId,
+ const VoEBase::ChannelConfig& config) {
+ channel = new Channel(channelId, instanceId, config);
+ if (channel == NULL) {
+ RTC_LOG(LS_ERROR) << "unable to allocate memory for new channel";
+ return -1;
+ }
+ return 0;
+}
+
+Channel::Channel(int32_t channelId,
+ uint32_t instanceId,
+ const VoEBase::ChannelConfig& config)
+ : _instanceId(instanceId),
+ _channelId(channelId),
+ event_log_proxy_(new RtcEventLogProxy()),
+ rtcp_rtt_stats_proxy_(new RtcpRttStatsProxy()),
+ rtp_header_parser_(RtpHeaderParser::Create()),
+ rtp_payload_registry_(new RTPPayloadRegistry()),
+ rtp_receive_statistics_(
+ ReceiveStatistics::Create(Clock::GetRealTimeClock())),
+ rtp_receiver_(
+ RtpReceiver::CreateAudioReceiver(Clock::GetRealTimeClock(),
+ this,
+ this,
+ rtp_payload_registry_.get())),
+ telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()),
+ _outputAudioLevel(),
+ _timeStamp(0), // This is just an offset, RTP module will add it's own
+ // random offset
+ ntp_estimator_(Clock::GetRealTimeClock()),
+ playout_timestamp_rtp_(0),
+ playout_delay_ms_(0),
+ send_sequence_number_(0),
+ rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
+ capture_start_rtp_time_stamp_(-1),
+ capture_start_ntp_time_ms_(-1),
+ _moduleProcessThreadPtr(NULL),
+ _audioDeviceModulePtr(NULL),
+ _transportPtr(NULL),
+ input_mute_(false),
+ previous_frame_muted_(false),
+ _outputGain(1.0f),
+ _includeAudioLevelIndication(false),
+ transport_overhead_per_packet_(0),
+ rtp_overhead_per_packet_(0),
+ _outputSpeechType(AudioFrame::kNormalSpeech),
+ rtcp_observer_(new VoERtcpObserver(this)),
+ associate_send_channel_(ChannelOwner(nullptr)),
+ pacing_enabled_(config.enable_voice_pacing),
+ feedback_observer_proxy_(new TransportFeedbackProxy()),
+ seq_num_allocator_proxy_(new TransportSequenceNumberProxy()),
+ rtp_packet_sender_proxy_(new RtpPacketSenderProxy()),
+ retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(),
+ kMaxRetransmissionWindowMs)),
+ decoder_factory_(config.acm_config.decoder_factory),
+ use_twcc_plr_for_ana_(
+ webrtc::field_trial::FindFullName("UseTwccPlrForAna") == "Enabled") {
+ AudioCodingModule::Config acm_config(config.acm_config);
+ acm_config.neteq_config.enable_muted_state = true;
+ audio_coding_.reset(AudioCodingModule::Create(acm_config));
+
+ _outputAudioLevel.Clear();
+
+ RtpRtcp::Configuration configuration;
+ configuration.audio = true;
+ configuration.outgoing_transport = this;
+ configuration.overhead_observer = this;
+ configuration.receive_statistics = rtp_receive_statistics_.get();
+ configuration.bandwidth_callback = rtcp_observer_.get();
+ configuration.event_callback = rtcp_observer_.get();
+ if (pacing_enabled_) {
+ configuration.paced_sender = rtp_packet_sender_proxy_.get();
+ configuration.transport_sequence_number_allocator =
+ seq_num_allocator_proxy_.get();
+ configuration.transport_feedback_callback = feedback_observer_proxy_.get();
+ }
+ configuration.event_log = &(*event_log_proxy_);
+ configuration.rtt_stats = &(*rtcp_rtt_stats_proxy_);
+ configuration.retransmission_rate_limiter =
+ retransmission_rate_limiter_.get();
+
+ _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
+ _rtpRtcpModule->SetSendingMediaStatus(false);
+
+ statistics_proxy_.reset(new StatisticsProxy(_rtpRtcpModule->SSRC()));
+ rtp_receive_statistics_->RegisterRtcpStatisticsCallback(
+ statistics_proxy_.get());
+ configuration.rtcp_packet_type_counter_observer = statistics_proxy_.get();
+}
+
+Channel::~Channel() {
+ RTC_DCHECK(!channel_state_.Get().sending);
+ RTC_DCHECK(!channel_state_.Get().playing);
+}
+
+int32_t Channel::Init() {
+ RTC_DCHECK(construction_thread_.CalledOnValidThread());
+
+ channel_state_.Reset();
+
+ // --- Initial sanity
+
+ if (_moduleProcessThreadPtr == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "Channel::Init() must call SetEngineInformation() first";
+ return -1;
+ }
+
+ // --- Add modules to process thread (for periodic schedulation)
+
+ _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);
+
+ // --- ACM initialization
+
+ if (audio_coding_->InitializeReceiver() == -1) {
+ RTC_LOG(LS_ERROR) << "Channel::Init() unable to initialize the ACM - 1";
+ return -1;
+ }
+
+ // --- RTP/RTCP module initialization
+
+ // Ensure that RTCP is enabled by default for the created channel.
+ // Note that, the module will keep generating RTCP until it is explicitly
+ // disabled by the user.
+ // After StopListen (when no sockets exists), RTCP packets will no longer
+ // be transmitted since the Transport object will then be invalid.
+ telephone_event_handler_->SetTelephoneEventForwardToDecoder(true);
+ // RTCP is enabled by default.
+ _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
+ // --- Register all permanent callbacks
+ if (audio_coding_->RegisterTransportCallback(this) == -1) {
+ RTC_LOG(LS_ERROR) << "Channel::Init() callbacks not registered";
+ return -1;
+ }
+
+ return 0;
+}
+
+void Channel::Terminate() {
+ RTC_DCHECK(construction_thread_.CalledOnValidThread());
+ // Must be called on the same thread as Init().
+ rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
+
+ StopSend();
+ StopPlayout();
+
+ // The order to safely shutdown modules in a channel is:
+ // 1. De-register callbacks in modules
+ // 2. De-register modules in process thread
+ // 3. Destroy modules
+ if (audio_coding_->RegisterTransportCallback(NULL) == -1) {
+ RTC_LOG(LS_WARNING)
+ << "Terminate() failed to de-register transport callback"
+ << " (Audio coding module)";
+ }
+
+ // De-register modules in process thread
+ if (_moduleProcessThreadPtr)
+ _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
+
+ // End of modules shutdown
+}
+
+int32_t Channel::SetEngineInformation(ProcessThread& moduleProcessThread,
+ AudioDeviceModule& audioDeviceModule,
+ rtc::TaskQueue* encoder_queue) {
+ RTC_DCHECK(encoder_queue);
+ RTC_DCHECK(!encoder_queue_);
+ _moduleProcessThreadPtr = &moduleProcessThread;
+ _audioDeviceModulePtr = &audioDeviceModule;
+ encoder_queue_ = encoder_queue;
+ return 0;
+}
+
+void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
+ rtc::CritScope cs(&_callbackCritSect);
+ audio_sink_ = std::move(sink);
+}
+
+const rtc::scoped_refptr<AudioDecoderFactory>&
+Channel::GetAudioDecoderFactory() const {
+ return decoder_factory_;
+}
+
+int32_t Channel::StartPlayout() {
+ if (channel_state_.Get().playing) {
+ return 0;
+ }
+
+ channel_state_.SetPlaying(true);
+
+ return 0;
+}
+
+int32_t Channel::StopPlayout() {
+ if (!channel_state_.Get().playing) {
+ return 0;
+ }
+
+ channel_state_.SetPlaying(false);
+ _outputAudioLevel.Clear();
+
+ return 0;
+}
+
+int32_t Channel::StartSend() {
+ if (channel_state_.Get().sending) {
+ return 0;
+ }
+ channel_state_.SetSending(true);
+ {
+ // It is now OK to start posting tasks to the encoder task queue.
+ rtc::CritScope cs(&encoder_queue_lock_);
+ encoder_queue_is_active_ = true;
+ }
+ // Resume the previous sequence number which was reset by StopSend(). This
+ // needs to be done before |sending| is set to true on the RTP/RTCP module.
+ if (send_sequence_number_) {
+ _rtpRtcpModule->SetSequenceNumber(send_sequence_number_);
+ }
+ _rtpRtcpModule->SetSendingMediaStatus(true);
+ if (_rtpRtcpModule->SetSendingStatus(true) != 0) {
+ RTC_LOG(LS_ERROR) << "StartSend() RTP/RTCP failed to start sending";
+ _rtpRtcpModule->SetSendingMediaStatus(false);
+ rtc::CritScope cs(&_callbackCritSect);
+ channel_state_.SetSending(false);
+ return -1;
+ }
+
+ return 0;
+}
+
+void Channel::StopSend() {
+ if (!channel_state_.Get().sending) {
+ return;
+ }
+ channel_state_.SetSending(false);
+
+ // Post a task to the encoder thread which sets an event when the task is
+ // executed. We know that no more encoding tasks will be added to the task
+ // queue for this channel since sending is now deactivated. It means that,
+ // if we wait for the event to bet set, we know that no more pending tasks
+ // exists and it is therfore guaranteed that the task queue will never try
+ // to acccess and invalid channel object.
+ RTC_DCHECK(encoder_queue_);
+
+ rtc::Event flush(false, false);
+ {
+ // Clear |encoder_queue_is_active_| under lock to prevent any other tasks
+ // than this final "flush task" to be posted on the queue.
+ rtc::CritScope cs(&encoder_queue_lock_);
+ encoder_queue_is_active_ = false;
+ encoder_queue_->PostTask([&flush]() { flush.Set(); });
+ }
+ flush.Wait(rtc::Event::kForever);
+
+ // Store the sequence number to be able to pick up the same sequence for
+ // the next StartSend(). This is needed for restarting device, otherwise
+ // it might cause libSRTP to complain about packets being replayed.
+ // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring
+ // CL is landed. See issue
+ // https://code.google.com/p/webrtc/issues/detail?id=2111 .
+ send_sequence_number_ = _rtpRtcpModule->SequenceNumber();
+
+ // Reset sending SSRC and sequence number and triggers direct transmission
+ // of RTCP BYE
+ if (_rtpRtcpModule->SetSendingStatus(false) == -1) {
+ RTC_LOG(LS_ERROR) << "StartSend() RTP/RTCP failed to stop sending";
+ }
+ _rtpRtcpModule->SetSendingMediaStatus(false);
+}
+
+bool Channel::SetEncoder(int payload_type,
+ std::unique_ptr<AudioEncoder> encoder) {
+ RTC_DCHECK_GE(payload_type, 0);
+ RTC_DCHECK_LE(payload_type, 127);
+ // TODO(ossu): Make CodecInsts up, for now: one for the RTP/RTCP module and
+ // one for for us to keep track of sample rate and number of channels, etc.
+
+ // The RTP/RTCP module needs to know the RTP timestamp rate (i.e. clockrate)
+ // as well as some other things, so we collect this info and send it along.
+ CodecInst rtp_codec;
+ rtp_codec.pltype = payload_type;
+ strncpy(rtp_codec.plname, "audio", sizeof(rtp_codec.plname));
+ rtp_codec.plname[sizeof(rtp_codec.plname) - 1] = 0;
+ // Seems unclear if it should be clock rate or sample rate. CodecInst
+ // supposedly carries the sample rate, but only clock rate seems sensible to
+ // send to the RTP/RTCP module.
+ rtp_codec.plfreq = encoder->RtpTimestampRateHz();
+ rtp_codec.pacsize = rtc::CheckedDivExact(
+ static_cast<int>(encoder->Max10MsFramesInAPacket() * rtp_codec.plfreq),
+ 100);
+ rtp_codec.channels = encoder->NumChannels();
+ rtp_codec.rate = 0;
+
+ cached_encoder_props_.emplace(
+ EncoderProps{encoder->SampleRateHz(), encoder->NumChannels()});
+
+ if (_rtpRtcpModule->RegisterSendPayload(rtp_codec) != 0) {
+ _rtpRtcpModule->DeRegisterSendPayload(payload_type);
+ if (_rtpRtcpModule->RegisterSendPayload(rtp_codec) != 0) {
+ RTC_LOG(LS_ERROR)
+ << "SetEncoder() failed to register codec to RTP/RTCP module";
+ return false;
+ }
+ }
+
+ audio_coding_->SetEncoder(std::move(encoder));
+ return true;
+}
+
+void Channel::ModifyEncoder(
+ rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+ audio_coding_->ModifyEncoder(modifier);
+}
+
+rtc::Optional<Channel::EncoderProps> Channel::GetEncoderProps() const {
+ return cached_encoder_props_;
+}
+
+int32_t Channel::GetRecCodec(CodecInst& codec) {
+ return (audio_coding_->ReceiveCodec(&codec));
+}
+
+void Channel::SetBitRate(int bitrate_bps, int64_t probing_interval_ms) {
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder) {
+ (*encoder)->OnReceivedUplinkBandwidth(bitrate_bps, probing_interval_ms);
+ }
+ });
+ retransmission_rate_limiter_->SetMaxRate(bitrate_bps);
+}
+
+void Channel::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
+ if (!use_twcc_plr_for_ana_)
+ return;
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder) {
+ (*encoder)->OnReceivedUplinkPacketLossFraction(packet_loss_rate);
+ }
+ });
+}
+
+void Channel::OnRecoverableUplinkPacketLossRate(
+ float recoverable_packet_loss_rate) {
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder) {
+ (*encoder)->OnReceivedUplinkRecoverablePacketLossFraction(
+ recoverable_packet_loss_rate);
+ }
+ });
+}
+
+void Channel::SetRtcpEventObserver(RtcpEventObserver* observer) {
+ rtcp_observer_->SetEventObserver(observer);
+}
+
+void Channel::OnUplinkPacketLossRate(float packet_loss_rate) {
+ if (use_twcc_plr_for_ana_)
+ return;
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder) {
+ (*encoder)->OnReceivedUplinkPacketLossFraction(packet_loss_rate);
+ }
+ });
+}
+
+void Channel::SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs) {
+ rtp_payload_registry_->SetAudioReceivePayloads(codecs);
+ audio_coding_->SetReceiveCodecs(codecs);
+}
+
+bool Channel::EnableAudioNetworkAdaptor(const std::string& config_string) {
+ bool success = false;
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder) {
+ success = (*encoder)->EnableAudioNetworkAdaptor(config_string,
+ event_log_proxy_.get());
+ }
+ });
+ return success;
+}
+
+void Channel::DisableAudioNetworkAdaptor() {
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder)
+ (*encoder)->DisableAudioNetworkAdaptor();
+ });
+}
+
+void Channel::SetReceiverFrameLengthRange(int min_frame_length_ms,
+ int max_frame_length_ms) {
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder) {
+ (*encoder)->SetReceiverFrameLengthRange(min_frame_length_ms,
+ max_frame_length_ms);
+ }
+ });
+}
+
+void Channel::RegisterTransport(Transport* transport) {
+ rtc::CritScope cs(&_callbackCritSect);
+ _transportPtr = transport;
+}
+
+void Channel::OnRtpPacket(const RtpPacketReceived& packet) {
+ RTPHeader header;
+ packet.GetHeader(&header);
+
+ // Store playout timestamp for the received RTP packet
+ UpdatePlayoutTimestamp(false);
+
+ header.payload_type_frequency =
+ rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
+ if (header.payload_type_frequency >= 0) {
+ bool in_order = IsPacketInOrder(header);
+ statistics_proxy_->OnSendCodecFrequencyChanged(header.payload_type_frequency);
+ rtp_receive_statistics_->IncomingPacket(
+ header, packet.size(), IsPacketRetransmitted(header, in_order));
+ rtp_payload_registry_->SetIncomingPayloadType(header);
+
+ ReceivePacket(packet.data(), packet.size(), header);
+ }
+}
+
+bool Channel::ReceivePacket(const uint8_t* packet,
+ size_t packet_length,
+ const RTPHeader& header) {
+ const uint8_t* payload = packet + header.headerLength;
+ assert(packet_length >= header.headerLength);
+ size_t payload_length = packet_length - header.headerLength;
+ const auto pl =
+ rtp_payload_registry_->PayloadTypeToPayload(header.payloadType);
+ if (!pl) {
+ return false;
+ }
+ return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length,
+ pl->typeSpecific);
+}
+
+bool Channel::IsPacketInOrder(const RTPHeader& header) const {
+ StreamStatistician* statistician =
+ rtp_receive_statistics_->GetStatistician(header.ssrc);
+ if (!statistician)
+ return false;
+ return statistician->IsPacketInOrder(header.sequenceNumber);
+}
+
+bool Channel::IsPacketRetransmitted(const RTPHeader& header,
+ bool in_order) const {
+ StreamStatistician* statistician =
+ rtp_receive_statistics_->GetStatistician(header.ssrc);
+ if (!statistician)
+ return false;
+ // Check if this is a retransmission.
+ int64_t min_rtt = 0;
+ _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), NULL, NULL, &min_rtt, NULL);
+ return !in_order && statistician->IsRetransmitOfOldPacket(header, min_rtt);
+}
+
+int32_t Channel::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
+ // Store playout timestamp for the received RTCP packet
+ UpdatePlayoutTimestamp(true);
+
+ // Deliver RTCP packet to RTP/RTCP module for parsing
+ _rtpRtcpModule->IncomingRtcpPacket(data, length);
+
+ int64_t rtt = GetRTT(true);
+ if (rtt == 0) {
+ // Waiting for valid RTT.
+ return 0;
+ }
+
+ int64_t nack_window_ms = rtt;
+ if (nack_window_ms < kMinRetransmissionWindowMs) {
+ nack_window_ms = kMinRetransmissionWindowMs;
+ } else if (nack_window_ms > kMaxRetransmissionWindowMs) {
+ nack_window_ms = kMaxRetransmissionWindowMs;
+ }
+ retransmission_rate_limiter_->SetWindowSize(nack_window_ms);
+
+ // Invoke audio encoders OnReceivedRtt().
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder)
+ (*encoder)->OnReceivedRtt(rtt);
+ });
+
+ uint32_t ntp_secs = 0;
+ uint32_t ntp_frac = 0;
+ uint32_t rtp_timestamp = 0;
+ if (0 !=
+ _rtpRtcpModule->RemoteNTP(&ntp_secs, &ntp_frac, NULL, NULL,
+ &rtp_timestamp)) {
+ // Waiting for RTCP.
+ return 0;
+ }
+
+ {
+ rtc::CritScope lock(&ts_stats_lock_);
+ ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
+ }
+ return 0;
+}
+
+int Channel::GetSpeechOutputLevel() const {
+ return _outputAudioLevel.Level();
+}
+
+int Channel::GetSpeechOutputLevelFullRange() const {
+ return _outputAudioLevel.LevelFullRange();
+}
+
+double Channel::GetTotalOutputEnergy() const {
+ return _outputAudioLevel.TotalEnergy();
+}
+
+double Channel::GetTotalOutputDuration() const {
+ return _outputAudioLevel.TotalDuration();
+}
+
+void Channel::SetInputMute(bool enable) {
+ rtc::CritScope cs(&volume_settings_critsect_);
+ input_mute_ = enable;
+}
+
+bool Channel::InputMute() const {
+ rtc::CritScope cs(&volume_settings_critsect_);
+ return input_mute_;
+}
+
+void Channel::SetChannelOutputVolumeScaling(float scaling) {
+ rtc::CritScope cs(&volume_settings_critsect_);
+ _outputGain = scaling;
+}
+
+int Channel::SendTelephoneEventOutband(int event, int duration_ms) {
+ RTC_DCHECK_LE(0, event);
+ RTC_DCHECK_GE(255, event);
+ RTC_DCHECK_LE(0, duration_ms);
+ RTC_DCHECK_GE(65535, duration_ms);
+ if (!Sending()) {
+ return -1;
+ }
+ if (_rtpRtcpModule->SendTelephoneEventOutband(
+ event, duration_ms, kTelephoneEventAttenuationdB) != 0) {
+ RTC_LOG(LS_ERROR) << "SendTelephoneEventOutband() failed to send event";
+ return -1;
+ }
+ return 0;
+}
+
+int Channel::SetSendTelephoneEventPayloadType(int payload_type,
+ int payload_frequency) {
+ RTC_DCHECK_LE(0, payload_type);
+ RTC_DCHECK_GE(127, payload_type);
+ CodecInst codec = {0};
+ codec.pltype = payload_type;
+ codec.plfreq = payload_frequency;
+ memcpy(codec.plname, "telephone-event", 16);
+ if (_rtpRtcpModule->RegisterSendPayload(codec) != 0) {
+ _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
+ if (_rtpRtcpModule->RegisterSendPayload(codec) != 0) {
+ RTC_LOG(LS_ERROR)
+ << "SetSendTelephoneEventPayloadType() failed to register "
+ "send payload type";
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int Channel::SetLocalMID(const char* mid) {
+ if (channel_state_.Get().sending) {
+ return -1;
+ }
+ _rtpRtcpModule->SetMID(mid);
+ return 0;
+}
+
+int Channel::SetLocalSSRC(unsigned int ssrc) {
+ if (channel_state_.Get().sending) {
+ RTC_LOG(LS_ERROR) << "SetLocalSSRC() already sending";
+ return -1;
+ }
+ _rtpRtcpModule->SetSSRC(ssrc);
+ return 0;
+}
+/*
+int Channel::GetRemoteSSRC(unsigned int& ssrc) {
+ ssrc = rtp_receiver_->SSRC();
+ return 0;
+}
+*/
+int Channel::SetSendAudioLevelIndicationStatus(bool enable, unsigned char id) {
+ _includeAudioLevelIndication = enable;
+ return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+}
+
+int Channel::SetSendMIDStatus(bool enable, unsigned char id) {
+ return SetSendRtpHeaderExtension(enable, kRtpExtensionMid, id);
+}
+
+int Channel::SetReceiveAudioLevelIndicationStatus(bool enable,
+ unsigned char id,
+ bool isLevelSsrc) {
+ const webrtc::RTPExtensionType& rtpExt = isLevelSsrc ?
+ kRtpExtensionAudioLevel : kRtpExtensionCsrcAudioLevel;
+ rtp_header_parser_->DeregisterRtpHeaderExtension(rtpExt);
+ if (enable && !rtp_header_parser_->RegisterRtpHeaderExtension(rtpExt, id)) {
+ return -1;
+ }
+ return 0;
+}
+
+int Channel::SetReceiveCsrcAudioLevelIndicationStatus(bool enable,
+ unsigned char id) {
+ rtp_header_parser_->DeregisterRtpHeaderExtension(kRtpExtensionCsrcAudioLevel);
+ if (enable &&
+ !rtp_header_parser_->RegisterRtpHeaderExtension(kRtpExtensionCsrcAudioLevel,
+ id)) {
+ return -1;
+ }
+ return 0;
+}
+
+
+void Channel::EnableSendTransportSequenceNumber(int id) {
+ int ret =
+ SetSendRtpHeaderExtension(true, kRtpExtensionTransportSequenceNumber, id);
+ RTC_DCHECK_EQ(0, ret);
+}
+
+void Channel::EnableReceiveTransportSequenceNumber(int id) {
+ rtp_header_parser_->DeregisterRtpHeaderExtension(
+ kRtpExtensionTransportSequenceNumber);
+ bool ret = rtp_header_parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionTransportSequenceNumber, id);
+ RTC_DCHECK(ret);
+}
+
+void Channel::RegisterSenderCongestionControlObjects(
+ RtpTransportControllerSendInterface* transport,
+ RtcpBandwidthObserver* bandwidth_observer) {
+ RtpPacketSender* rtp_packet_sender = transport->packet_sender();
+ TransportFeedbackObserver* transport_feedback_observer =
+ transport->transport_feedback_observer();
+ PacketRouter* packet_router = transport->packet_router();
+ //This allows us to re-create streams but keep the same channel
+ if (packet_router_ == packet_router) {
+ return;
+ }
+
+ RTC_DCHECK(rtp_packet_sender);
+ RTC_DCHECK(transport_feedback_observer);
+ RTC_DCHECK(packet_router);
+ RTC_DCHECK(!packet_router_);
+ rtcp_observer_->SetBandwidthObserver(bandwidth_observer);
+ feedback_observer_proxy_->SetTransportFeedbackObserver(
+ transport_feedback_observer);
+ seq_num_allocator_proxy_->SetSequenceNumberAllocator(packet_router);
+ rtp_packet_sender_proxy_->SetPacketSender(rtp_packet_sender);
+ _rtpRtcpModule->SetStorePacketsStatus(true, 600);
+ constexpr bool remb_candidate = false;
+ packet_router->AddSendRtpModule(_rtpRtcpModule.get(), remb_candidate);
+ packet_router_ = packet_router;
+}
+
+void Channel::RegisterReceiverCongestionControlObjects(
+ PacketRouter* packet_router) {
+ RTC_DCHECK(packet_router);
+ //This allows us to re-create streams but keep the same channel
+ if (packet_router_ == packet_router) {
+ return;
+ }
+
+ RTC_DCHECK(!packet_router_);
+ constexpr bool remb_candidate = false;
+ packet_router->AddReceiveRtpModule(_rtpRtcpModule.get(), remb_candidate);
+ packet_router_ = packet_router;
+}
+
+void Channel::ResetSenderCongestionControlObjects() {
+ RTC_DCHECK(packet_router_);
+ _rtpRtcpModule->SetStorePacketsStatus(false, 600);
+ rtcp_observer_->SetBandwidthObserver(nullptr);
+ feedback_observer_proxy_->SetTransportFeedbackObserver(nullptr);
+ seq_num_allocator_proxy_->SetSequenceNumberAllocator(nullptr);
+ packet_router_->RemoveSendRtpModule(_rtpRtcpModule.get());
+ packet_router_ = nullptr;
+ rtp_packet_sender_proxy_->SetPacketSender(nullptr);
+}
+
+void Channel::ResetReceiverCongestionControlObjects() {
+ RTC_DCHECK(packet_router_);
+ packet_router_->RemoveReceiveRtpModule(_rtpRtcpModule.get());
+ packet_router_ = nullptr;
+}
+
+void Channel::SetRTCPStatus(bool enable) {
+ _rtpRtcpModule->SetRTCPStatus(enable ? RtcpMode::kCompound : RtcpMode::kOff);
+}
+
+int Channel::SetRTCP_CNAME(const char cName[256]) {
+ if (_rtpRtcpModule->SetCNAME(cName) != 0) {
+ RTC_LOG(LS_ERROR) << "SetRTCP_CNAME() failed to set RTCP CNAME";
+ return -1;
+ }
+ return 0;
+}
+
+int Channel::GetRemoteRTCPReportBlocks(
+ std::vector<ReportBlock>* report_blocks) {
+ if (report_blocks == NULL) {
+ RTC_LOG(LS_ERROR) << "GetRemoteRTCPReportBlock()s invalid report_blocks.";
+ return -1;
+ }
+
+ // Get the report blocks from the latest received RTCP Sender or Receiver
+ // Report. Each element in the vector contains the sender's SSRC and a
+ // report block according to RFC 3550.
+ std::vector<RTCPReportBlock> rtcp_report_blocks;
+ if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks) != 0) {
+ return -1;
+ }
+
+ if (rtcp_report_blocks.empty())
+ return 0;
+
+ std::vector<RTCPReportBlock>::const_iterator it = rtcp_report_blocks.begin();
+ for (; it != rtcp_report_blocks.end(); ++it) {
+ ReportBlock report_block;
+ report_block.sender_SSRC = it->sender_ssrc;
+ report_block.source_SSRC = it->source_ssrc;
+ report_block.fraction_lost = it->fraction_lost;
+ report_block.cumulative_num_packets_lost = it->packets_lost;
+ report_block.extended_highest_sequence_number =
+ it->extended_highest_sequence_number;
+ report_block.interarrival_jitter = it->jitter;
+ report_block.last_SR_timestamp = it->last_sender_report_timestamp;
+ report_block.delay_since_last_SR = it->delay_since_last_sender_report;
+ report_blocks->push_back(report_block);
+ }
+ return 0;
+}
+
+int Channel::GetRTPStatistics(CallStatistics& stats) {
+ // --- RtcpStatistics
+ // GetStatistics() grabs the stream_lock_ inside the object
+ // rtp_receiver_->SSRC grabs a lock too.
+
+ // The jitter statistics is updated for each received RTP packet and is
+ // based on received packets.
+ RtcpStatistics statistics;
+ StreamStatistician* statistician =
+ rtp_receive_statistics_->GetStatistician(rtp_receiver_->SSRC());
+ if (statistician) {
+ statistician->GetStatistics(&statistics,
+ _rtpRtcpModule->RTCP() == RtcpMode::kOff);
+ }
+
+ stats.fractionLost = statistics.fraction_lost;
+ stats.cumulativeLost = statistics.packets_lost;
+ stats.extendedMax = statistics.extended_highest_sequence_number;
+ stats.jitterSamples = statistics.jitter;
+
+ // --- RTT
+ stats.rttMs = GetRTT(true);
+
+ // --- Data counters
+
+ size_t bytesSent(0);
+ uint32_t packetsSent(0);
+ size_t bytesReceived(0);
+ uint32_t packetsReceived(0);
+
+ if (statistician) {
+ statistician->GetDataCounters(&bytesReceived, &packetsReceived);
+ }
+
+ if (_rtpRtcpModule->DataCountersRTP(&bytesSent, &packetsSent) != 0) {
+ RTC_LOG(LS_WARNING)
+ << "GetRTPStatistics() failed to retrieve RTP datacounters"
+ << " => output will not be complete";
+ }
+
+ stats.bytesSent = bytesSent;
+ stats.packetsSent = packetsSent;
+ stats.bytesReceived = bytesReceived;
+ stats.packetsReceived = packetsReceived;
+
+ _rtpRtcpModule->RemoteRTCPSenderInfo(&stats.rtcp_sender_packets_sent,
+ &stats.rtcp_sender_octets_sent,
+ &stats.rtcp_sender_ntp_timestamp);
+
+ // --- Timestamps
+ {
+ rtc::CritScope lock(&ts_stats_lock_);
+ stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
+ }
+ return 0;
+}
+
+int Channel::GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats) {
+ if (_rtpRtcpModule->RTCP() == RtcpMode::kOff) {
+ return -1;
+ }
+
+ statistics_proxy_->GetPacketTypeCounter(stats);
+ return 0;
+}
+
+void Channel::SetNACKStatus(bool enable, int maxNumberOfPackets) {
+ // None of these functions can fail.
+ // If pacing is enabled we always store packets.
+ if (!pacing_enabled_)
+ _rtpRtcpModule->SetStorePacketsStatus(enable, maxNumberOfPackets);
+ rtp_receive_statistics_->SetMaxReorderingThreshold(maxNumberOfPackets);
+ if (enable)
+ audio_coding_->EnableNack(maxNumberOfPackets);
+ else
+ audio_coding_->DisableNack();
+}
+
+// Called when we are missing one or more packets.
+int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
+ return _rtpRtcpModule->SendNACK(sequence_numbers, length);
+}
+
+void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
+ // Avoid posting any new tasks if sending was already stopped in StopSend().
+ rtc::CritScope cs(&encoder_queue_lock_);
+ if (!encoder_queue_is_active_) {
+ return;
+ }
+ std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
+ // TODO(henrika): try to avoid copying by moving ownership of audio frame
+ // either into pool of frames or into the task itself.
+ audio_frame->CopyFrom(audio_input);
+ // Profile time between when the audio frame is added to the task queue and
+ // when the task is actually executed.
+ audio_frame->UpdateProfileTimeStamp();
+ encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
+ new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
+}
+
+void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
+ int sample_rate,
+ size_t number_of_frames,
+ size_t number_of_channels) {
+ // Avoid posting as new task if sending was already stopped in StopSend().
+ rtc::CritScope cs(&encoder_queue_lock_);
+ if (!encoder_queue_is_active_) {
+ return;
+ }
+ std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
+ const auto props = GetEncoderProps();
+ RTC_CHECK(props);
+ audio_frame->sample_rate_hz_ = std::min(props->sample_rate_hz, sample_rate);
+ audio_frame->num_channels_ =
+ std::min(props->num_channels, number_of_channels);
+ RemixAndResample(audio_data, number_of_frames, number_of_channels,
+ sample_rate, &input_resampler_, audio_frame.get());
+ encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
+ new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
+}
+
+void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK_GT(audio_input->samples_per_channel_, 0);
+ RTC_DCHECK_LE(audio_input->num_channels_, 2);
+
+ // Measure time between when the audio frame is added to the task queue and
+ // when the task is actually executed. Goal is to keep track of unwanted
+ // extra latency added by the task queue.
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Audio.EncodingTaskQueueLatencyMs",
+ audio_input->ElapsedProfileTimeMs());
+
+ bool is_muted = InputMute();
+ AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
+
+ if (_includeAudioLevelIndication) {
+ size_t length =
+ audio_input->samples_per_channel_ * audio_input->num_channels_;
+ RTC_CHECK_LE(length, AudioFrame::kMaxDataSizeBytes);
+ if (is_muted && previous_frame_muted_) {
+ rms_level_.AnalyzeMuted(length);
+ } else {
+ rms_level_.Analyze(
+ rtc::ArrayView<const int16_t>(audio_input->data(), length));
+ }
+ }
+ previous_frame_muted_ = is_muted;
+
+ // Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
+
+ // The ACM resamples internally.
+ audio_input->timestamp_ = _timeStamp;
+ // This call will trigger AudioPacketizationCallback::SendData if encoding
+ // is done and payload is ready for packetization and transmission.
+ // Otherwise, it will return without invoking the callback.
+ if (audio_coding_->Add10MsData(*audio_input) < 0) {
+ RTC_LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
+ return;
+ }
+
+ _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
+}
+
+void Channel::set_associate_send_channel(const ChannelOwner& channel) {
+ RTC_DCHECK(!channel.channel() ||
+ channel.channel()->ChannelId() != _channelId);
+ rtc::CritScope lock(&assoc_send_channel_lock_);
+ associate_send_channel_ = channel;
+}
+
+void Channel::DisassociateSendChannel(int channel_id) {
+ rtc::CritScope lock(&assoc_send_channel_lock_);
+ Channel* channel = associate_send_channel_.channel();
+ if (channel && channel->ChannelId() == channel_id) {
+ // If this channel is associated with a send channel of the specified
+ // Channel ID, disassociate with it.
+ ChannelOwner ref(NULL);
+ associate_send_channel_ = ref;
+ }
+}
+
+void Channel::SetRtcEventLog(RtcEventLog* event_log) {
+ event_log_proxy_->SetEventLog(event_log);
+}
+
+void Channel::SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
+ rtcp_rtt_stats_proxy_->SetRtcpRttStats(rtcp_rtt_stats);
+}
+
+void Channel::UpdateOverheadForEncoder() {
+ size_t overhead_per_packet =
+ transport_overhead_per_packet_ + rtp_overhead_per_packet_;
+ audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+ if (*encoder) {
+ (*encoder)->OnReceivedOverhead(overhead_per_packet);
+ }
+ });
+}
+
+void Channel::SetTransportOverhead(size_t transport_overhead_per_packet) {
+ rtc::CritScope cs(&overhead_per_packet_lock_);
+ transport_overhead_per_packet_ = transport_overhead_per_packet;
+ UpdateOverheadForEncoder();
+}
+
+// TODO(solenberg): Make AudioSendStream an OverheadObserver instead.
+void Channel::OnOverheadChanged(size_t overhead_bytes_per_packet) {
+ rtc::CritScope cs(&overhead_per_packet_lock_);
+ rtp_overhead_per_packet_ = overhead_bytes_per_packet;
+ UpdateOverheadForEncoder();
+}
+
+int Channel::GetNetworkStatistics(NetworkStatistics& stats) {
+ return audio_coding_->GetNetworkStatistics(&stats);
+}
+
+void Channel::GetDecodingCallStatistics(AudioDecodingCallStats* stats) const {
+ audio_coding_->GetDecodingCallStatistics(stats);
+}
+
+ANAStats Channel::GetANAStatistics() const {
+ return audio_coding_->GetANAStats();
+}
+
+uint32_t Channel::GetDelayEstimate() const {
+ rtc::CritScope lock(&video_sync_lock_);
+ return audio_coding_->FilteredCurrentDelayMs() + playout_delay_ms_;
+}
+
+int Channel::SetMinimumPlayoutDelay(int delayMs) {
+ if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
+ (delayMs > kVoiceEngineMaxMinPlayoutDelayMs)) {
+ RTC_LOG(LS_ERROR) << "SetMinimumPlayoutDelay() invalid min delay";
+ return -1;
+ }
+ if (audio_coding_->SetMinimumPlayoutDelay(delayMs) != 0) {
+ RTC_LOG(LS_ERROR)
+ << "SetMinimumPlayoutDelay() failed to set min playout delay";
+ return -1;
+ }
+ return 0;
+}
+
+int Channel::GetPlayoutTimestamp(unsigned int& timestamp) {
+ uint32_t playout_timestamp_rtp = 0;
+ {
+ rtc::CritScope lock(&video_sync_lock_);
+ playout_timestamp_rtp = playout_timestamp_rtp_;
+ }
+ if (playout_timestamp_rtp == 0) {
+ RTC_LOG(LS_ERROR) << "GetPlayoutTimestamp() failed to retrieve timestamp";
+ return -1;
+ }
+ timestamp = playout_timestamp_rtp;
+ return 0;
+}
+
+int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
+ RtpReceiver** rtp_receiver) const {
+ *rtpRtcpModule = _rtpRtcpModule.get();
+ *rtp_receiver = rtp_receiver_.get();
+ return 0;
+}
+
+void Channel::UpdatePlayoutTimestamp(bool rtcp) {
+ jitter_buffer_playout_timestamp_ = audio_coding_->PlayoutTimestamp();
+
+ if (!jitter_buffer_playout_timestamp_) {
+ // This can happen if this channel has not received any RTP packets. In
+ // this case, NetEq is not capable of computing a playout timestamp.
+ return;
+ }
+
+ uint16_t delay_ms = 0;
+ if (_audioDeviceModulePtr->PlayoutDelay(&delay_ms) == -1) {
+ RTC_LOG(LS_WARNING) << "Channel::UpdatePlayoutTimestamp() failed to read"
+ << " playout delay from the ADM";
+ return;
+ }
+
+ RTC_DCHECK(jitter_buffer_playout_timestamp_);
+ uint32_t playout_timestamp = *jitter_buffer_playout_timestamp_;
+
+ // Remove the playout delay.
+ playout_timestamp -= (delay_ms * (GetRtpTimestampRateHz() / 1000));
+
+ {
+ rtc::CritScope lock(&video_sync_lock_);
+ if (!rtcp) {
+ playout_timestamp_rtp_ = playout_timestamp;
+ }
+ playout_delay_ms_ = delay_ms;
+ }
+}
+
+void Channel::RegisterReceiveCodecsToRTPModule() {
+ // TODO(kwiberg): Iterate over the factory's supported codecs instead?
+ const int nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+ for (int idx = 0; idx < nSupportedCodecs; idx++) {
+ CodecInst codec;
+ if (audio_coding_->Codec(idx, &codec) == -1) {
+ RTC_LOG(LS_WARNING) << "Unable to register codec #" << idx
+ << " for RTP/RTCP receiver.";
+ continue;
+ }
+ const SdpAudioFormat format = CodecInstToSdp(codec);
+ if (!decoder_factory_->IsSupportedDecoder(format) ||
+ rtp_receiver_->RegisterReceivePayload(codec.pltype, format) == -1) {
+ RTC_LOG(LS_WARNING) << "Unable to register " << format
+ << " for RTP/RTCP receiver.";
+ }
+ }
+}
+
+int Channel::SetSendRtpHeaderExtension(bool enable,
+ RTPExtensionType type,
+ unsigned char id) {
+ int error = 0;
+ _rtpRtcpModule->DeregisterSendRtpHeaderExtension(type);
+ if (enable) {
+ error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(type, id);
+ }
+ return error;
+}
+
+int Channel::GetRtpTimestampRateHz() const {
+ int sampleRate = audio_coding_->ReceiveSampleRate();
+ // Default to the playout frequency if we've not gotten any packets yet.
+ // TODO(ossu): Zero clockrate can only happen if we've added an external
+ // decoder for a format we don't support internally. Remove once that way of
+ // adding decoders is gone!
+ return sampleRate != 0 ? sampleRate
+ : audio_coding_->PlayoutFrequency();
+}
+
+int64_t Channel::GetRTT(bool allow_associate_channel) const {
+ RtcpMode method = _rtpRtcpModule->RTCP();
+ if (method == RtcpMode::kOff) {
+ return 0;
+ }
+ std::vector<RTCPReportBlock> report_blocks;
+ _rtpRtcpModule->RemoteRTCPStat(&report_blocks);
+
+ int64_t rtt = 0;
+ if (report_blocks.empty()) {
+ if (allow_associate_channel) {
+ rtc::CritScope lock(&assoc_send_channel_lock_);
+ Channel* channel = associate_send_channel_.channel();
+ // Tries to get RTT from an associated channel. This is important for
+ // receive-only channels.
+ if (channel) {
+ // To prevent infinite recursion and deadlock, calling GetRTT of
+ // associate channel should always use "false" for argument:
+ // |allow_associate_channel|.
+ rtt = channel->GetRTT(false);
+ }
+ }
+ return rtt;
+ }
+
+ uint32_t remoteSSRC = rtp_receiver_->SSRC();
+ std::vector<RTCPReportBlock>::const_iterator it = report_blocks.begin();
+ for (; it != report_blocks.end(); ++it) {
+ if (it->sender_ssrc == remoteSSRC)
+ break;
+ }
+ if (it == report_blocks.end()) {
+ // We have not received packets with SSRC matching the report blocks.
+ // To calculate RTT we try with the SSRC of the first report block.
+ // This is very important for send-only channels where we don't know
+ // the SSRC of the other end.
+ remoteSSRC = report_blocks[0].sender_ssrc;
+ }
+
+ int64_t avg_rtt = 0;
+ int64_t max_rtt = 0;
+ int64_t min_rtt = 0;
+ if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
+ 0) {
+ return 0;
+ }
+ return rtt;
+}
+
+} // namespace voe
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/channel.h b/third_party/libwebrtc/webrtc/voice_engine/channel.h
new file mode 100644
index 0000000000..975dfc259c
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/channel.h
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_CHANNEL_H_
+#define VOICE_ENGINE_CHANNEL_H_
+
+#include <memory>
+
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/call/audio_sink.h"
+#include "api/call/transport.h"
+#include "api/optional.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_processing/rms_level.h"
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/thread_checker.h"
+#include "voice_engine/audio_level.h"
+#include "voice_engine/include/voe_base.h"
+#include "voice_engine/shared_data.h"
+
+namespace rtc {
+class TimestampWrapAroundHandler;
+}
+
+namespace webrtc {
+
+class AudioDeviceModule;
+class PacketRouter;
+class ProcessThread;
+class RateLimiter;
+class ReceiveStatistics;
+class RemoteNtpTimeEstimator;
+class RtcEventLog;
+class RtpPacketObserver;
+class RTPPayloadRegistry;
+class RTPReceiverAudio;
+class RtpPacketReceived;
+class RtpRtcp;
+class RtpTransportControllerSendInterface;
+class TelephoneEventHandler;
+
+struct SenderInfo;
+
+struct CallStatistics {
+ unsigned short fractionLost;
+ unsigned int cumulativeLost;
+ unsigned int extendedMax;
+ unsigned int jitterSamples;
+ int64_t rttMs;
+ size_t bytesSent;
+ int packetsSent;
+ size_t bytesReceived;
+ int packetsReceived;
+ // The capture ntp time (in local timebase) of the first played out audio
+ // frame.
+ int64_t capture_start_ntp_time_ms_;
+
+ uint32_t rtcp_sender_packets_sent;
+ uint32_t rtcp_sender_octets_sent;
+ NtpTime rtcp_sender_ntp_timestamp;
+};
+
+// See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details.
+struct ReportBlock {
+ uint32_t sender_SSRC; // SSRC of sender
+ uint32_t source_SSRC;
+ uint8_t fraction_lost;
+ uint32_t cumulative_num_packets_lost;
+ uint32_t extended_highest_sequence_number;
+ uint32_t interarrival_jitter;
+ uint32_t last_SR_timestamp;
+ uint32_t delay_since_last_SR;
+};
+
+namespace voe {
+
+class RtcEventLogProxy;
+class RtcpRttStatsProxy;
+class RtpPacketSenderProxy;
+class StatisticsProxy;
+class TransportFeedbackProxy;
+class TransportSequenceNumberProxy;
+class VoERtcpObserver;
+
+// Helper class to simplify locking scheme for members that are accessed from
+// multiple threads.
+// Example: a member can be set on thread T1 and read by an internal audio
+// thread T2. Accessing the member via this class ensures that we are
+// safe and also avoid TSan v2 warnings.
+class ChannelState {
+ public:
+ struct State {
+ bool playing = false;
+ bool sending = false;
+ };
+
+ ChannelState() {}
+ virtual ~ChannelState() {}
+
+ void Reset() {
+ rtc::CritScope lock(&lock_);
+ state_ = State();
+ }
+
+ State Get() const {
+ rtc::CritScope lock(&lock_);
+ return state_;
+ }
+
+ void SetPlaying(bool enable) {
+ rtc::CritScope lock(&lock_);
+ state_.playing = enable;
+ }
+
+ void SetSending(bool enable) {
+ rtc::CritScope lock(&lock_);
+ state_.sending = enable;
+ }
+
+ private:
+ rtc::CriticalSection lock_;
+ State state_;
+};
+
+class Channel
+ : public RtpData,
+ public RtpFeedback,
+ public Transport,
+ public AudioPacketizationCallback, // receive encoded packets from the
+ // ACM
+ public OverheadObserver {
+ public:
+ friend class VoERtcpObserver;
+
+ enum { KNumSocketThreads = 1 };
+ enum { KNumberOfSocketBuffers = 8 };
+ virtual ~Channel();
+ static int32_t CreateChannel(Channel*& channel,
+ int32_t channelId,
+ uint32_t instanceId,
+ const VoEBase::ChannelConfig& config);
+ Channel(int32_t channelId,
+ uint32_t instanceId,
+ const VoEBase::ChannelConfig& config);
+ int32_t Init();
+ void Terminate();
+ int32_t SetEngineInformation(ProcessThread& moduleProcessThread,
+ AudioDeviceModule& audioDeviceModule,
+ rtc::TaskQueue* encoder_queue);
+
+ void SetSink(std::unique_ptr<AudioSinkInterface> sink);
+
+ // TODO(ossu): Don't use! It's only here to confirm that the decoder factory
+ // passed into AudioReceiveStream is the same as the one set when creating the
+ // ADM. Once Channel creation is moved into Audio{Send,Receive}Stream this can
+ // go.
+ const rtc::scoped_refptr<AudioDecoderFactory>& GetAudioDecoderFactory() const;
+
+ void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
+
+ // Send using this encoder, with this payload type.
+ bool SetEncoder(int payload_type, std::unique_ptr<AudioEncoder> encoder);
+ void ModifyEncoder(
+ rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
+
+ // API methods
+
+ // VoEBase
+ int32_t StartPlayout();
+ int32_t StopPlayout();
+ int32_t StartSend();
+ void StopSend();
+
+ // Codecs
+ struct EncoderProps {
+ int sample_rate_hz;
+ size_t num_channels;
+ };
+ rtc::Optional<EncoderProps> GetEncoderProps() const;
+ int32_t GetRecCodec(CodecInst& codec);
+ void SetBitRate(int bitrate_bps, int64_t probing_interval_ms);
+ bool EnableAudioNetworkAdaptor(const std::string& config_string);
+ void DisableAudioNetworkAdaptor();
+ void SetReceiverFrameLengthRange(int min_frame_length_ms,
+ int max_frame_length_ms);
+
+ // Network
+ void RegisterTransport(Transport* transport);
+ // TODO(nisse, solenberg): Delete when VoENetwork is deleted.
+ int32_t ReceivedRTCPPacket(const uint8_t* data, size_t length);
+ void OnRtpPacket(const RtpPacketReceived& packet);
+
+ // Muting, Volume and Level.
+ void SetInputMute(bool enable);
+ void SetChannelOutputVolumeScaling(float scaling);
+ int GetSpeechOutputLevel() const;
+ int GetSpeechOutputLevelFullRange() const;
+ // See description of "totalAudioEnergy" in the WebRTC stats spec:
+ // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+ double GetTotalOutputEnergy() const;
+ double GetTotalOutputDuration() const;
+
+ // Stats.
+ int GetNetworkStatistics(NetworkStatistics& stats);
+ void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
+ ANAStats GetANAStatistics() const;
+
+ // Audio+Video Sync.
+ uint32_t GetDelayEstimate() const;
+ int SetMinimumPlayoutDelay(int delayMs);
+ int GetPlayoutTimestamp(unsigned int& timestamp);
+ int GetRtpRtcp(RtpRtcp** rtpRtcpModule, RtpReceiver** rtp_receiver) const;
+
+ // DTMF.
+ int SendTelephoneEventOutband(int event, int duration_ms);
+ int SetSendTelephoneEventPayloadType(int payload_type, int payload_frequency);
+
+ // RTP+RTCP
+ int SetLocalMID(const char* mid);
+ int SetLocalSSRC(unsigned int ssrc);
+ int SetSendAudioLevelIndicationStatus(bool enable, unsigned char id);
+ int SetSendMIDStatus(bool enable, unsigned char id);
+ int SetReceiveAudioLevelIndicationStatus(bool enable, unsigned char id, bool isLevelSsrc);
+ int SetReceiveCsrcAudioLevelIndicationStatus(bool enable, unsigned char id);
+ void EnableSendTransportSequenceNumber(int id);
+ void EnableReceiveTransportSequenceNumber(int id);
+
+ void RegisterSenderCongestionControlObjects(
+ RtpTransportControllerSendInterface* transport,
+ RtcpBandwidthObserver* bandwidth_observer);
+ void RegisterReceiverCongestionControlObjects(PacketRouter* packet_router);
+ void ResetSenderCongestionControlObjects();
+ void ResetReceiverCongestionControlObjects();
+ void SetRTCPStatus(bool enable);
+ int SetRTCP_CNAME(const char cName[256]);
+ int GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats);
+ int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks);
+ int GetRTPStatistics(CallStatistics& stats);
+ void SetNACKStatus(bool enable, int maxNumberOfPackets);
+
+ // From AudioPacketizationCallback in the ACM
+ int32_t SendData(FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ // From RtpData in the RTP/RTCP module
+ int32_t OnReceivedPayloadData(const uint8_t* payloadData,
+ size_t payloadSize,
+ const WebRtcRTPHeader* rtpHeader) override;
+
+ // From RtpFeedback in the RTP/RTCP module
+ int32_t OnInitializeDecoder(int payload_type,
+ const SdpAudioFormat& audio_format,
+ uint32_t rate) override;
+ void OnIncomingSSRCChanged(uint32_t ssrc) override;
+ void OnIncomingCSRCChanged(uint32_t CSRC, bool added) override;
+
+ void OnIncomingReceiverReports(const ReportBlockList& aReportBlocks,
+ const int64_t aRoundTripTime,
+ const int64_t aReceptionTime);
+
+ // From Transport (called by the RTP/RTCP module)
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& packet_options) override;
+ bool SendRtcp(const uint8_t* data, size_t len) override;
+
+ // From AudioMixer::Source.
+ AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+ int sample_rate_hz,
+ AudioFrame* audio_frame);
+
+ int PreferredSampleRate() const;
+
+ uint32_t InstanceId() const { return _instanceId; }
+ int32_t ChannelId() const { return _channelId; }
+ bool Playing() const { return channel_state_.Get().playing; }
+ bool Sending() const { return channel_state_.Get().sending; }
+ RtpRtcp* RtpRtcpModulePtr() const { return _rtpRtcpModule.get(); }
+ int8_t OutputEnergyLevel() const { return _outputAudioLevel.Level(); }
+
+ // ProcessAndEncodeAudio() creates an audio frame copy and posts a task
+ // on the shared encoder task queue, wich in turn calls (on the queue)
+ // ProcessAndEncodeAudioOnTaskQueue() where the actual processing of the
+ // audio takes place. The processing mainly consists of encoding and preparing
+ // the result for sending by adding it to a send queue.
+ // The main reason for using a task queue here is to release the native,
+ // OS-specific, audio capture thread as soon as possible to ensure that it
+ // can go back to sleep and be prepared to deliver an new captured audio
+ // packet.
+ void ProcessAndEncodeAudio(const AudioFrame& audio_input);
+
+ // This version of ProcessAndEncodeAudio() is used by PushCaptureData() in
+ // VoEBase and the audio in |audio_data| has not been subject to any APM
+ // processing. Some extra steps are therfore needed when building up the
+ // audio frame copy before using the same task as in the default call to
+ // ProcessAndEncodeAudio(const AudioFrame& audio_input).
+ void ProcessAndEncodeAudio(const int16_t* audio_data,
+ int sample_rate,
+ size_t number_of_frames,
+ size_t number_of_channels);
+
+ // Associate to a send channel.
+ // Used for obtaining RTT for a receive-only channel.
+ void set_associate_send_channel(const ChannelOwner& channel);
+ // Disassociate a send channel if it was associated.
+ void DisassociateSendChannel(int channel_id);
+
+ // Set a RtcEventLog logging object.
+ void SetRtcEventLog(RtcEventLog* event_log);
+
+ void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats);
+ void SetTransportOverhead(size_t transport_overhead_per_packet);
+
+ // From OverheadObserver in the RTP/RTCP module
+ void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
+
+ bool GetRTCPReceiverStatistics(int64_t* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* cumulativeLost,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ double* packetsFractionLost,
+ int64_t* rtt) const;
+ virtual void SetRtpPacketObserver(RtpPacketObserver* observer);
+
+ // The existence of this function alongside OnUplinkPacketLossRate is
+ // a compromise. We want the encoder to be agnostic of the PLR source, but
+ // we also don't want it to receive conflicting information from TWCC and
+ // from RTCP-XR.
+ void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
+
+ void OnRecoverableUplinkPacketLossRate(float recoverable_packet_loss_rate);
+
+ std::vector<RtpSource> GetSources() const {
+ return rtp_receiver_->GetSources();
+ }
+
+ int GetPlayoutFrequency() const {
+ if (audio_coding_) {
+ return audio_coding_->PlayoutFrequency();
+ }
+ return 0;
+ }
+
+ void SetRtcpEventObserver(RtcpEventObserver* observer);
+
+ private:
+ class ProcessAndEncodeAudioTask;
+
+ void OnUplinkPacketLossRate(float packet_loss_rate);
+ bool InputMute() const;
+ bool OnRecoveredPacket(const uint8_t* packet, size_t packet_length);
+
+ bool ReceivePacket(const uint8_t* packet,
+ size_t packet_length,
+ const RTPHeader& header);
+ bool IsPacketInOrder(const RTPHeader& header) const;
+ bool IsPacketRetransmitted(const RTPHeader& header, bool in_order) const;
+ int ResendPackets(const uint16_t* sequence_numbers, int length);
+ void UpdatePlayoutTimestamp(bool rtcp);
+ void RegisterReceiveCodecsToRTPModule();
+
+ int SetSendRtpHeaderExtension(bool enable,
+ RTPExtensionType type,
+ unsigned char id);
+
+ void UpdateOverheadForEncoder()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(overhead_per_packet_lock_);
+
+ int GetRtpTimestampRateHz() const;
+ int64_t GetRTT(bool allow_associate_channel) const;
+
+ // Called on the encoder task queue when a new input audio frame is ready
+ // for encoding.
+ void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input);
+
+ uint32_t _instanceId;
+ int32_t _channelId;
+
+ rtc::CriticalSection _callbackCritSect;
+ rtc::CriticalSection volume_settings_critsect_;
+
+ ChannelState channel_state_;
+
+ std::unique_ptr<voe::RtcEventLogProxy> event_log_proxy_;
+ std::unique_ptr<voe::RtcpRttStatsProxy> rtcp_rtt_stats_proxy_;
+
+ std::unique_ptr<RtpHeaderParser> rtp_header_parser_;
+ std::unique_ptr<RTPPayloadRegistry> rtp_payload_registry_;
+ std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+ std::unique_ptr<StatisticsProxy> statistics_proxy_;
+ std::unique_ptr<RtpReceiver> rtp_receiver_;
+ TelephoneEventHandler* telephone_event_handler_;
+ std::unique_ptr<RtpRtcp> _rtpRtcpModule;
+ std::unique_ptr<AudioCodingModule> audio_coding_;
+ std::unique_ptr<AudioSinkInterface> audio_sink_;
+ AudioLevel _outputAudioLevel;
+ // Downsamples to the codec rate if necessary.
+ PushResampler<int16_t> input_resampler_;
+ uint32_t _timeStamp RTC_ACCESS_ON(encoder_queue_);
+
+ RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_);
+
+ // Timestamp of the audio pulled from NetEq.
+ rtc::Optional<uint32_t> jitter_buffer_playout_timestamp_;
+
+ rtc::CriticalSection video_sync_lock_;
+ uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_);
+ uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_);
+ uint16_t send_sequence_number_;
+
+ rtc::CriticalSection ts_stats_lock_;
+
+ std::unique_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
+ // The rtp timestamp of the first played out audio frame.
+ int64_t capture_start_rtp_time_stamp_;
+ // The capture ntp time (in local timebase) of the first played out audio
+ // frame.
+ int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_);
+
+ // uses
+ ProcessThread* _moduleProcessThreadPtr;
+ AudioDeviceModule* _audioDeviceModulePtr;
+ Transport* _transportPtr; // WebRtc socket or external transport
+ RmsLevel rms_level_ RTC_ACCESS_ON(encoder_queue_);
+ bool input_mute_ RTC_GUARDED_BY(volume_settings_critsect_);
+ bool previous_frame_muted_ RTC_ACCESS_ON(encoder_queue_);
+ float _outputGain RTC_GUARDED_BY(volume_settings_critsect_);
+ // VoeRTP_RTCP
+ // TODO(henrika): can today be accessed on the main thread and on the
+ // task queue; hence potential race.
+ bool _includeAudioLevelIndication;
+ size_t transport_overhead_per_packet_
+ RTC_GUARDED_BY(overhead_per_packet_lock_);
+ size_t rtp_overhead_per_packet_ RTC_GUARDED_BY(overhead_per_packet_lock_);
+ rtc::CriticalSection overhead_per_packet_lock_;
+ // VoENetwork
+ AudioFrame::SpeechType _outputSpeechType;
+ // RtcpBandwidthObserver
+ std::unique_ptr<VoERtcpObserver> rtcp_observer_;
+ // An associated send channel.
+ rtc::CriticalSection assoc_send_channel_lock_;
+ ChannelOwner associate_send_channel_ RTC_GUARDED_BY(assoc_send_channel_lock_);
+
+ bool pacing_enabled_;
+ PacketRouter* packet_router_ = nullptr;
+ std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
+ std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
+ std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
+ std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
+
+ // TODO(ossu): Remove once GetAudioDecoderFactory() is no longer needed.
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+
+ RtpPacketObserver* rtp_source_observer_ = nullptr;
+
+ rtc::Optional<EncoderProps> cached_encoder_props_;
+
+ rtc::ThreadChecker construction_thread_;
+
+ const bool use_twcc_plr_for_ana_;
+
+ rtc::CriticalSection encoder_queue_lock_;
+
+ bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_lock_) = false;
+
+ rtc::TaskQueue* encoder_queue_ = nullptr;
+};
+
+} // namespace voe
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_CHANNEL_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/channel_manager.cc b/third_party/libwebrtc/webrtc/voice_engine/channel_manager.cc
new file mode 100644
index 0000000000..9a82d2445f
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/channel_manager.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/channel_manager.h"
+
+#include "rtc_base/timeutils.h"
+#include "voice_engine/channel.h"
+
+namespace webrtc {
+namespace voe {
+
+ChannelOwner::ChannelOwner(class Channel* channel)
+ : channel_ref_(new ChannelRef(channel)) {}
+
+ChannelOwner::ChannelRef::ChannelRef(class Channel* channel)
+ : channel(channel) {}
+
+ChannelManager::ChannelManager(uint32_t instance_id)
+ : instance_id_(instance_id),
+ last_channel_id_(-1),
+ random_(rtc::TimeNanos()) {}
+
+ChannelOwner ChannelManager::CreateChannel(
+ const VoEBase::ChannelConfig& config) {
+ Channel* channel;
+ Channel::CreateChannel(channel, ++last_channel_id_, instance_id_, config);
+ // TODO(solenberg): Delete this, users should configure ssrc
+ // explicitly.
+ channel->SetLocalSSRC(random_.Rand<uint32_t>());
+
+ ChannelOwner channel_owner(channel);
+
+ rtc::CritScope crit(&lock_);
+
+ channels_.push_back(channel_owner);
+
+ return channel_owner;
+}
+
+ChannelOwner ChannelManager::GetChannel(int32_t channel_id) {
+ rtc::CritScope crit(&lock_);
+
+ for (size_t i = 0; i < channels_.size(); ++i) {
+ if (channels_[i].channel()->ChannelId() == channel_id)
+ return channels_[i];
+ }
+ return ChannelOwner(NULL);
+}
+
+void ChannelManager::GetAllChannels(std::vector<ChannelOwner>* channels) {
+ rtc::CritScope crit(&lock_);
+
+ *channels = channels_;
+}
+
+void ChannelManager::DestroyChannel(int32_t channel_id) {
+ assert(channel_id >= 0);
+ // Holds a reference to a channel, this is used so that we never delete
+ // Channels while holding a lock, but rather when the method returns.
+ ChannelOwner reference(NULL);
+ {
+ rtc::CritScope crit(&lock_);
+ std::vector<ChannelOwner>::iterator to_delete = channels_.end();
+ for (auto it = channels_.begin(); it != channels_.end(); ++it) {
+ Channel* channel = it->channel();
+ // For channels associated with the channel to be deleted, disassociate
+ // with that channel.
+ channel->DisassociateSendChannel(channel_id);
+
+ if (channel->ChannelId() == channel_id) {
+ to_delete = it;
+ }
+ }
+ if (to_delete != channels_.end()) {
+ reference = *to_delete;
+ channels_.erase(to_delete);
+ }
+ }
+ if (reference.channel()) {
+ // Ensure the channel is torn down now, on this thread, since a reference
+ // may still be held on a different thread (e.g. in the audio capture
+ // thread).
+ reference.channel()->Terminate();
+ }
+}
+
+void ChannelManager::DestroyAllChannels() {
+ // Holds references so that Channels are not destroyed while holding this
+ // lock, but rather when the method returns.
+ std::vector<ChannelOwner> references;
+ {
+ rtc::CritScope crit(&lock_);
+ references = channels_;
+ channels_.clear();
+ }
+ for (auto& owner : references) {
+ if (owner.channel())
+ owner.channel()->Terminate();
+ }
+}
+
+size_t ChannelManager::NumOfChannels() const {
+ rtc::CritScope crit(&lock_);
+ return channels_.size();
+}
+
+ChannelManager::Iterator::Iterator(ChannelManager* channel_manager)
+ : iterator_pos_(0) {
+ channel_manager->GetAllChannels(&channels_);
+}
+
+Channel* ChannelManager::Iterator::GetChannel() {
+ if (iterator_pos_ < channels_.size())
+ return channels_[iterator_pos_].channel();
+ return NULL;
+}
+
+bool ChannelManager::Iterator::IsValid() {
+ return iterator_pos_ < channels_.size();
+}
+
+void ChannelManager::Iterator::Increment() {
+ ++iterator_pos_;
+}
+
+} // namespace voe
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/channel_manager.h b/third_party/libwebrtc/webrtc/voice_engine/channel_manager.h
new file mode 100644
index 0000000000..f7cf5f4dac
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/channel_manager.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_CHANNEL_MANAGER_H_
+#define VOICE_ENGINE_CHANNEL_MANAGER_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/refcountedbase.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/random.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "system_wrappers/include/atomic32.h"
+#include "typedefs.h" // NOLINT(build/include)
+#include "voice_engine/include/voe_base.h"
+
+namespace webrtc {
+
+class AudioDecoderFactory;
+
+namespace voe {
+
+class Channel;
+
+// Shared-pointer implementation for keeping track of Channels. The underlying
+// shared instance will be dropped when no more ChannelOwners point to it.
+//
+// One common source of ChannelOwner instances are
+// ChannelManager::CreateChannel() and ChannelManager::GetChannel(...).
+// It has a similar use case to shared_ptr in C++11. Should this move to C++11
+// in the future, this class should be replaced by exactly that.
+//
+// To access the underlying Channel, use .channel().
+// IsValid() implements a convenience method as an alternative for checking
+// whether the underlying pointer is NULL or not.
+//
+// Channel channel_owner = channel_manager.GetChannel(channel_id);
+// if (channel_owner.IsValid())
+// channel_owner.channel()->...;
+//
+class ChannelOwner {
+ public:
+ explicit ChannelOwner(Channel* channel);
+ ChannelOwner(const ChannelOwner& channel_owner) = default;
+
+ ~ChannelOwner() = default;
+
+ ChannelOwner& operator=(const ChannelOwner& other) = default;
+
+ Channel* channel() const { return channel_ref_->channel.get(); }
+ bool IsValid() { return channel_ref_->channel.get() != NULL; }
+ private:
+ // Shared instance of a Channel. Copying ChannelOwners increase the reference
+ // count and destroying ChannelOwners decrease references. Channels are
+ // deleted when no references to them are held.
+ struct ChannelRef : public rtc::RefCountedBase {
+ ChannelRef(Channel* channel);
+ const std::unique_ptr<Channel> channel;
+ };
+
+ rtc::scoped_refptr<ChannelRef> channel_ref_;
+};
+
+class ChannelManager {
+ public:
+ ChannelManager(uint32_t instance_id);
+
+ // Upon construction of an Iterator it will grab a copy of the channel list of
+ // the ChannelManager. The iteration will then occur over this state, not the
+ // current one of the ChannelManager. As the Iterator holds its own references
+ // to the Channels, they will remain valid even if they are removed from the
+ // ChannelManager.
+ class Iterator {
+ public:
+ explicit Iterator(ChannelManager* channel_manager);
+
+ Channel* GetChannel();
+ bool IsValid();
+
+ void Increment();
+
+ private:
+ size_t iterator_pos_;
+ std::vector<ChannelOwner> channels_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
+ // CreateChannel will always return a valid ChannelOwner instance.
+ ChannelOwner CreateChannel(const VoEBase::ChannelConfig& config);
+
+ // ChannelOwner.channel() will be NULL if channel_id is invalid or no longer
+ // exists. This should be checked with ChannelOwner::IsValid().
+ ChannelOwner GetChannel(int32_t channel_id);
+ void GetAllChannels(std::vector<ChannelOwner>* channels);
+
+ void DestroyChannel(int32_t channel_id);
+ void DestroyAllChannels();
+
+ size_t NumOfChannels() const;
+
+ private:
+ uint32_t instance_id_;
+
+ Atomic32 last_channel_id_;
+
+ rtc::CriticalSection lock_;
+ std::vector<ChannelOwner> channels_;
+
+ // For generation of random ssrc:s.
+ webrtc::Random random_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(ChannelManager);
+};
+} // namespace voe
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_CHANNEL_MANAGER_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/channel_proxy.cc b/third_party/libwebrtc/webrtc/voice_engine/channel_proxy.cc
new file mode 100644
index 0000000000..ab10e9a737
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/channel_proxy.cc
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/channel_proxy.h"
+
+#include <utility>
+
+#include "api/call/audio_sink.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "voice_engine/channel.h"
+
+namespace webrtc {
+namespace voe {
+ChannelProxy::ChannelProxy() : channel_owner_(nullptr) {}
+
+ChannelProxy::ChannelProxy(const ChannelOwner& channel_owner) :
+ channel_owner_(channel_owner) {
+ RTC_CHECK(channel_owner_.channel());
+ module_process_thread_checker_.DetachFromThread();
+}
+
+ChannelProxy::~ChannelProxy() {}
+
+bool ChannelProxy::SetEncoder(int payload_type,
+ std::unique_ptr<AudioEncoder> encoder) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->SetEncoder(payload_type, std::move(encoder));
+}
+
+void ChannelProxy::ModifyEncoder(
+ rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->ModifyEncoder(modifier);
+}
+
+void ChannelProxy::SetRTCPStatus(bool enable) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetRTCPStatus(enable);
+}
+
+void ChannelProxy::SetLocalMID(const char* mid) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetLocalMID(mid);
+}
+
+void ChannelProxy::SetLocalSSRC(uint32_t ssrc) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ int error = channel()->SetLocalSSRC(ssrc);
+ RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetRTCP_CNAME(const std::string& c_name) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ // Note: VoERTP_RTCP::SetRTCP_CNAME() accepts a char[256] array.
+ std::string c_name_limited = c_name.substr(0, 255);
+ int error = channel()->SetRTCP_CNAME(c_name_limited.c_str());
+ RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetNACKStatus(bool enable, int max_packets) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetNACKStatus(enable, max_packets);
+}
+
+void ChannelProxy::SetSendAudioLevelIndicationStatus(bool enable, int id) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ int error = channel()->SetSendAudioLevelIndicationStatus(enable, id);
+ RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetReceiveAudioLevelIndicationStatus(bool enable, int id,
+ bool isLevelSsrc) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ int error = channel()->SetReceiveAudioLevelIndicationStatus(enable, id,
+ isLevelSsrc);
+ RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetReceiveCsrcAudioLevelIndicationStatus(bool enable, int id) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ int error = channel()->SetReceiveCsrcAudioLevelIndicationStatus(enable, id);
+ RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetSendMIDStatus(bool enable, int id) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ int error = channel()->SetSendMIDStatus(enable, id);
+ RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::EnableSendTransportSequenceNumber(int id) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->EnableSendTransportSequenceNumber(id);
+}
+
+void ChannelProxy::EnableReceiveTransportSequenceNumber(int id) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->EnableReceiveTransportSequenceNumber(id);
+}
+
+void ChannelProxy::RegisterSenderCongestionControlObjects(
+ RtpTransportControllerSendInterface* transport,
+ RtcpBandwidthObserver* bandwidth_observer) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->RegisterSenderCongestionControlObjects(transport,
+ bandwidth_observer);
+}
+
+void ChannelProxy::RegisterReceiverCongestionControlObjects(
+ PacketRouter* packet_router) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->RegisterReceiverCongestionControlObjects(packet_router);
+}
+
+void ChannelProxy::ResetSenderCongestionControlObjects() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->ResetSenderCongestionControlObjects();
+}
+
+void ChannelProxy::ResetReceiverCongestionControlObjects() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->ResetReceiverCongestionControlObjects();
+}
+
+bool ChannelProxy::GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats)
+{
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetRTCPPacketTypeCounters(stats) == 0;
+}
+
+bool ChannelProxy::GetRTCPReceiverStatistics(int64_t* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* cumulativeLost,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ double* packetsFractionLost,
+ int64_t* rtt) const {
+ // No thread check necessary, we are synchronizing on the lock in StatsProxy
+ return channel()->GetRTCPReceiverStatistics(timestamp,
+ jitterMs,
+ cumulativeLost,
+ packetsReceived,
+ bytesReceived,
+ packetsFractionLost,
+ rtt);
+}
+
+CallStatistics ChannelProxy::GetRTCPStatistics() const {
+ // Since we (Mozilla) need to collect stats on STS, we can't
+ // use the thread-checker (which will want to be called on MainThread)
+ // without refactor of ExecuteStatsQuery_s().
+ // However, GetRTPStatistics internally locks in the SSRC()
+ // and statistician methods.
+
+ // RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ CallStatistics stats = {0};
+ int error = channel()->GetRTPStatistics(stats);
+ RTC_DCHECK_EQ(0, error);
+ return stats;
+}
+
+int ChannelProxy::GetRTPStatistics(unsigned int& averageJitterMs,
+ unsigned int& cumulativeLost) const {
+ // Since we (Mozilla) need to collect stats on STS, we can't
+ // use the thread-checker (which will want to be called on MainThread)
+ // without refactor of ExecuteStatsQuery_s().
+ // However, GetRTPStatistics internally locks in the SSRC()
+ // and statistician methods. PlayoutFrequency() should also be safe.
+ // statistics_proxy_->GetStats() also locks
+
+ CallStatistics stats;
+ int result = channel()->GetRTPStatistics(stats);
+ int32_t playoutFrequency = channel()->GetPlayoutFrequency() / 1000;
+ if (playoutFrequency) {
+ averageJitterMs = stats.jitterSamples / playoutFrequency;
+ }
+ cumulativeLost = stats.cumulativeLost;
+ return result;
+}
+
+std::vector<ReportBlock> ChannelProxy::GetRemoteRTCPReportBlocks() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ std::vector<webrtc::ReportBlock> blocks;
+ int error = channel()->GetRemoteRTCPReportBlocks(&blocks);
+ RTC_DCHECK_EQ(0, error);
+ return blocks;
+}
+
+NetworkStatistics ChannelProxy::GetNetworkStatistics() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ NetworkStatistics stats = {0};
+ int error = channel()->GetNetworkStatistics(stats);
+ RTC_DCHECK_EQ(0, error);
+ return stats;
+}
+
+AudioDecodingCallStats ChannelProxy::GetDecodingCallStatistics() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ AudioDecodingCallStats stats;
+ channel()->GetDecodingCallStatistics(&stats);
+ return stats;
+}
+
+ANAStats ChannelProxy::GetANAStatistics() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetANAStatistics();
+}
+
+int ChannelProxy::GetSpeechOutputLevel() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetSpeechOutputLevel();
+}
+
+int ChannelProxy::GetSpeechOutputLevelFullRange() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetSpeechOutputLevelFullRange();
+}
+
+double ChannelProxy::GetTotalOutputEnergy() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetTotalOutputEnergy();
+}
+
+double ChannelProxy::GetTotalOutputDuration() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetTotalOutputDuration();
+}
+
+uint32_t ChannelProxy::GetDelayEstimate() const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+ // module_process_thread_checker_.CalledOnValidThread());
+ return channel()->GetDelayEstimate();
+}
+
+bool ChannelProxy::SetSendTelephoneEventPayloadType(int payload_type,
+ int payload_frequency) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->SetSendTelephoneEventPayloadType(payload_type,
+ payload_frequency) == 0;
+}
+
+bool ChannelProxy::SendTelephoneEventOutband(int event, int duration_ms) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->SendTelephoneEventOutband(event, duration_ms) == 0;
+}
+
+void ChannelProxy::SetBitrate(int bitrate_bps, int64_t probing_interval_ms) {
+ // This method can be called on the worker thread, module process thread
+ // or on a TaskQueue via VideoSendStreamImpl::OnEncoderConfigurationChanged.
+ // TODO(solenberg): Figure out a good way to check this or enforce calling
+ // rules.
+ // RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+ // module_process_thread_checker_.CalledOnValidThread());
+ channel()->SetBitRate(bitrate_bps, probing_interval_ms);
+}
+
+void ChannelProxy::SetReceiveCodecs(
+ const std::map<int, SdpAudioFormat>& codecs) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetReceiveCodecs(codecs);
+}
+
+void ChannelProxy::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetSink(std::move(sink));
+}
+
+void ChannelProxy::SetInputMute(bool muted) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetInputMute(muted);
+}
+
+void ChannelProxy::RegisterTransport(Transport* transport) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->RegisterTransport(transport);
+}
+
+void ChannelProxy::OnRtpPacket(const RtpPacketReceived& packet) {
+ // May be called on either worker thread or network thread.
+ channel()->OnRtpPacket(packet);
+}
+
+bool ChannelProxy::ReceivedRTCPPacket(const uint8_t* packet, size_t length) {
+ // May be called on either worker thread or network thread.
+ return channel()->ReceivedRTCPPacket(packet, length) == 0;
+}
+
+const rtc::scoped_refptr<AudioDecoderFactory>&
+ ChannelProxy::GetAudioDecoderFactory() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetAudioDecoderFactory();
+}
+
+void ChannelProxy::SetChannelOutputVolumeScaling(float scaling) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetChannelOutputVolumeScaling(scaling);
+}
+
+void ChannelProxy::SetRtcEventLog(RtcEventLog* event_log) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetRtcEventLog(event_log);
+}
+
+AudioMixer::Source::AudioFrameInfo ChannelProxy::GetAudioFrameWithInfo(
+ int sample_rate_hz,
+ AudioFrame* audio_frame) {
+ RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
+ return channel()->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
+}
+
+int ChannelProxy::PreferredSampleRate() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
+ return channel()->PreferredSampleRate();
+}
+
+void ChannelProxy::SetTransportOverhead(int transport_overhead_per_packet) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetTransportOverhead(transport_overhead_per_packet);
+}
+
+void ChannelProxy::AssociateSendChannel(
+ const ChannelProxy& send_channel_proxy) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->set_associate_send_channel(send_channel_proxy.channel_owner_);
+}
+
+void ChannelProxy::DisassociateSendChannel() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->set_associate_send_channel(ChannelOwner(nullptr));
+}
+
+void ChannelProxy::GetRtpRtcp(RtpRtcp** rtp_rtcp,
+ RtpReceiver** rtp_receiver) const {
+ RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(rtp_rtcp);
+ RTC_DCHECK(rtp_receiver);
+ int error = channel()->GetRtpRtcp(rtp_rtcp, rtp_receiver);
+ RTC_DCHECK_EQ(0, error);
+}
+
+uint32_t ChannelProxy::GetPlayoutTimestamp() const {
+ RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_);
+ unsigned int timestamp = 0;
+ int error = channel()->GetPlayoutTimestamp(timestamp);
+ RTC_DCHECK(!error || timestamp == 0);
+ return timestamp;
+}
+
+void ChannelProxy::SetMinimumPlayoutDelay(int delay_ms) {
+ RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
+ // Limit to range accepted by both VoE and ACM, so we're at least getting as
+ // close as possible, instead of failing.
+ delay_ms = rtc::SafeClamp(delay_ms, 0, 10000);
+ int error = channel()->SetMinimumPlayoutDelay(delay_ms);
+ if (0 != error) {
+ RTC_LOG(LS_WARNING) << "Error setting minimum playout delay.";
+ }
+}
+
+void ChannelProxy::SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetRtcpRttStats(rtcp_rtt_stats);
+}
+
+bool ChannelProxy::GetRecCodec(CodecInst* codec_inst) const {
+ //Called on STS Thread to get stats
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetRecCodec(*codec_inst) == 0;
+}
+
+void ChannelProxy::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
+ //Called on STS Thread as a result of delivering a packet.
+ //OnTwccBasedUplinkPacketLossRate does its work using
+ //AudioCodingModuleImpl::ModifyEncoder, which takes a lock, so this should
+ //be safe.
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->OnTwccBasedUplinkPacketLossRate(packet_loss_rate);
+}
+
+void ChannelProxy::OnRecoverableUplinkPacketLossRate(
+ float recoverable_packet_loss_rate) {
+ //Called on STS Thread as a result of delivering a packet.
+ //OnRecoverableUplinkPacketLossRate does its work using
+ //AudioCodingModuleImpl::ModifyEncoder, which takes a lock, so this should
+ //be safe.
+ //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->OnRecoverableUplinkPacketLossRate(recoverable_packet_loss_rate);
+}
+
+std::vector<RtpSource> ChannelProxy::GetSources() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return channel()->GetSources();
+}
+
+void ChannelProxy::SetRtpPacketObserver(RtpPacketObserver* observer) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetRtpPacketObserver(observer);
+}
+
+void ChannelProxy::SetRtcpEventObserver(RtcpEventObserver* observer) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ channel()->SetRtcpEventObserver(observer);
+}
+
+Channel* ChannelProxy::channel() const {
+ RTC_DCHECK(channel_owner_.channel());
+ return channel_owner_.channel();
+}
+
+} // namespace voe
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/channel_proxy.h b/third_party/libwebrtc/webrtc/voice_engine/channel_proxy.h
new file mode 100644
index 0000000000..caaa0bc584
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/channel_proxy.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_CHANNEL_PROXY_H_
+#define VOICE_ENGINE_CHANNEL_PROXY_H_
+
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/rtpreceiverinterface.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/thread_checker.h"
+#include "voice_engine/channel.h"
+#include "voice_engine/channel_manager.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace webrtc {
+
+class AudioSinkInterface;
+class PacketRouter;
+class RtcEventLog;
+class RtcpBandwidthObserver;
+class RtcpRttStats;
+class RtpPacketObserver;
+class RtpPacketSender;
+class RtpPacketReceived;
+class RtpReceiver;
+class RtpRtcp;
+class RtpTransportControllerSendInterface;
+class Transport;
+class TransportFeedbackObserver;
+
+namespace voe {
+
+// This class provides the "view" of a voe::Channel that we need to implement
+// webrtc::AudioSendStream and webrtc::AudioReceiveStream. It serves two
+// purposes:
+// 1. Allow mocking just the interfaces used, instead of the entire
+// voe::Channel class.
+// 2. Provide a refined interface for the stream classes, including assumptions
+// on return values and input adaptation.
+class ChannelProxy : public RtpPacketSinkInterface {
+ public:
+ ChannelProxy();
+ explicit ChannelProxy(const ChannelOwner& channel_owner);
+ virtual ~ChannelProxy();
+
+ virtual bool SetEncoder(int payload_type,
+ std::unique_ptr<AudioEncoder> encoder);
+ virtual void ModifyEncoder(
+ rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
+
+ virtual void SetRTCPStatus(bool enable);
+ virtual void SetLocalMID(const char* mid);
+ virtual void SetLocalSSRC(uint32_t ssrc);
+ virtual void SetRTCP_CNAME(const std::string& c_name);
+ virtual void SetNACKStatus(bool enable, int max_packets);
+ virtual void SetSendAudioLevelIndicationStatus(bool enable, int id);
+ virtual void SetReceiveAudioLevelIndicationStatus(bool enable, int id,
+ bool isLevelSsrc = true);
+ virtual void SetReceiveCsrcAudioLevelIndicationStatus(bool enable, int id);
+ virtual void SetSendMIDStatus(bool enable, int id);
+ virtual void EnableSendTransportSequenceNumber(int id);
+ virtual void EnableReceiveTransportSequenceNumber(int id);
+ virtual void RegisterSenderCongestionControlObjects(
+ RtpTransportControllerSendInterface* transport,
+ RtcpBandwidthObserver* bandwidth_observer);
+ virtual void RegisterReceiverCongestionControlObjects(
+ PacketRouter* packet_router);
+ virtual void ResetSenderCongestionControlObjects();
+ virtual void ResetReceiverCongestionControlObjects();
+ virtual bool GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats);
+ virtual bool GetRTCPReceiverStatistics(int64_t* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* cumulativeLost,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ double* packetsFractionLost,
+ int64_t* rtt) const;
+ virtual CallStatistics GetRTCPStatistics() const;
+ virtual int GetRTPStatistics(unsigned int& averageJitterMs,
+ unsigned int& cumulativeLost) const;
+ virtual std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const;
+ virtual NetworkStatistics GetNetworkStatistics() const;
+ virtual AudioDecodingCallStats GetDecodingCallStatistics() const;
+ virtual ANAStats GetANAStatistics() const;
+ virtual int GetSpeechOutputLevel() const;
+ virtual int GetSpeechOutputLevelFullRange() const;
+ // See description of "totalAudioEnergy" in the WebRTC stats spec:
+ // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+ virtual double GetTotalOutputEnergy() const;
+ virtual double GetTotalOutputDuration() const;
+ virtual uint32_t GetDelayEstimate() const;
+ virtual bool SetSendTelephoneEventPayloadType(int payload_type,
+ int payload_frequency);
+ virtual bool SendTelephoneEventOutband(int event, int duration_ms);
+ virtual void SetBitrate(int bitrate_bps, int64_t probing_interval_ms);
+ virtual void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
+ virtual void SetSink(std::unique_ptr<AudioSinkInterface> sink);
+ virtual void SetInputMute(bool muted);
+ virtual void RegisterTransport(Transport* transport);
+
+ // Implements RtpPacketSinkInterface
+ void OnRtpPacket(const RtpPacketReceived& packet) override;
+ virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length);
+ virtual const rtc::scoped_refptr<AudioDecoderFactory>&
+ GetAudioDecoderFactory() const;
+ virtual void SetChannelOutputVolumeScaling(float scaling);
+ virtual void SetRtcEventLog(RtcEventLog* event_log);
+ virtual AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+ int sample_rate_hz,
+ AudioFrame* audio_frame);
+ virtual int PreferredSampleRate() const;
+ virtual void SetTransportOverhead(int transport_overhead_per_packet);
+ virtual void AssociateSendChannel(const ChannelProxy& send_channel_proxy);
+ virtual void DisassociateSendChannel();
+ virtual void GetRtpRtcp(RtpRtcp** rtp_rtcp,
+ RtpReceiver** rtp_receiver) const;
+ virtual uint32_t GetPlayoutTimestamp() const;
+ virtual void SetMinimumPlayoutDelay(int delay_ms);
+ virtual void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats);
+ virtual bool GetRecCodec(CodecInst* codec_inst) const;
+ virtual void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
+ virtual void OnRecoverableUplinkPacketLossRate(
+ float recoverable_packet_loss_rate);
+ virtual std::vector<webrtc::RtpSource> GetSources() const;
+
+ virtual void SetRtpPacketObserver(RtpPacketObserver* observer);
+ virtual void SetRtcpEventObserver(RtcpEventObserver* observer);
+
+ private:
+ Channel* channel() const;
+
+ // Thread checkers document and lock usage of some methods on voe::Channel to
+ // specific threads we know about. The goal is to eventually split up
+ // voe::Channel into parts with single-threaded semantics, and thereby reduce
+ // the need for locks.
+ rtc::ThreadChecker worker_thread_checker_;
+ rtc::ThreadChecker module_process_thread_checker_;
+ // Methods accessed from audio and video threads are checked for sequential-
+ // only access. We don't necessarily own and control these threads, so thread
+ // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
+ // audio thread to another, but access is still sequential.
+ rtc::RaceChecker audio_thread_race_checker_;
+ rtc::RaceChecker video_capture_thread_race_checker_;
+ ChannelOwner channel_owner_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(ChannelProxy);
+};
+} // namespace voe
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_CHANNEL_PROXY_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/channel_unittest.cc b/third_party/libwebrtc/webrtc/voice_engine/channel_unittest.cc
new file mode 100644
index 0000000000..37dd9b54dd
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/channel_unittest.cc
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/gtest.h"
+#include "voice_engine/channel.h"
+
+// Empty test just to get coverage metrics.
+TEST(ChannelTest, EmptyTestToGetCodeCoverage) {}
diff --git a/third_party/libwebrtc/webrtc/voice_engine/include/voe_base.h b/third_party/libwebrtc/webrtc/voice_engine/include/voe_base.h
new file mode 100644
index 0000000000..de25526db8
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/include/voe_base.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Enables full duplex VoIP sessions via RTP using G.711 (mu-Law or A-Law).
+// - Initialization and termination.
+// - Trace information on text files or via callbacks.
+// - Multi-channel support (mixing, sending to multiple destinations etc.).
+//
+// To support other codecs than G.711, the VoECodec sub-API must be utilized.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// base->Init();
+// int ch = base->CreateChannel();
+// base->StartPlayout(ch);
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef VOICE_ENGINE_VOE_BASE_H_
+#define VOICE_ENGINE_VOE_BASE_H_
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+class AudioDeviceModule;
+class AudioProcessing;
+class AudioTransport;
+namespace voe {
+class TransmitMixer;
+} // namespace voe
+
+// VoiceEngine
+class WEBRTC_DLLEXPORT VoiceEngine {
+ public:
+ // Creates a VoiceEngine object, which can then be used to acquire
+ // sub-APIs. Returns NULL on failure.
+ static VoiceEngine* Create();
+
+ // Deletes a created VoiceEngine object and releases the utilized resources.
+ // Note that if there are outstanding references held via other interfaces,
+ // the voice engine instance will not actually be deleted until those
+ // references have been released.
+ static bool Delete(VoiceEngine*& voiceEngine);
+
+ protected:
+ VoiceEngine() {}
+ ~VoiceEngine() {}
+
+ private:
+ // VS 2015 (others?) gets confused by a baseclass with no vtbl, and
+ // static_cast<VoiceEngineImpl*>(mVoiceEngine) produces a bad ptr. It
+ // might also be related to the total size of the object.
+
+ // Add a virtual method to assuage the poor compiler.
+ virtual void DummyVS2015BugFix() {};
+};
+
+// VoEBase
+class WEBRTC_DLLEXPORT VoEBase {
+ public:
+ struct ChannelConfig {
+ AudioCodingModule::Config acm_config;
+ bool enable_voice_pacing = false;
+ };
+
+ // Factory for the VoEBase sub-API. Increases an internal reference
+ // counter if successful. Returns NULL if the API is not supported or if
+ // construction fails.
+ static VoEBase* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEBase sub-API and decreases an internal reference
+ // counter. Returns the new reference count. This value should be zero
+ // for all sub-APIs before the VoiceEngine object can be safely deleted.
+ virtual int Release() = 0;
+
+ // Initializes all common parts of the VoiceEngine; e.g. all
+ // encoders/decoders, the sound card and core receiving components.
+ // This method also makes it possible to install some user-defined external
+ // modules:
+ // - The Audio Device Module (ADM) which implements all the audio layer
+ // functionality in a separate (reference counted) module.
+ // - The AudioProcessing module handles capture-side processing.
+ // - An AudioDecoderFactory - used to create audio decoders.
+ virtual int Init(
+ AudioDeviceModule* audio_device,
+ AudioProcessing* audio_processing,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) = 0;
+
+ // This method is WIP - DO NOT USE!
+ // Returns NULL before Init() is called.
+ virtual voe::TransmitMixer* transmit_mixer() = 0;
+
+ // Terminates all VoiceEngine functions and releases allocated resources.
+ virtual void Terminate() = 0;
+
+ // Creates a new channel and allocates the required resources for it.
+ // The second version accepts a |config| struct which includes an Audio Coding
+ // Module config and an option to enable voice pacing. Note that the
+ // decoder_factory member of the ACM config will be ignored (the decoder
+ // factory set through Init() will always be used).
+ // Returns channel ID or -1 in case of an error.
+ virtual int CreateChannel() = 0;
+ virtual int CreateChannel(const ChannelConfig& config) = 0;
+
+ // Deletes an existing channel and releases the utilized resources.
+ // Returns -1 in case of an error, 0 otherwise.
+ virtual int DeleteChannel(int channel) = 0;
+
+ // Starts forwarding the packets to the mixer/soundcard for a
+ // specified |channel|.
+ virtual int StartPlayout(int channel) = 0;
+
+ // Stops forwarding the packets to the mixer/soundcard for a
+ // specified |channel|.
+ virtual int StopPlayout(int channel) = 0;
+
+ // Starts sending packets to an already specified IP address and
+ // port number for a specified |channel|.
+ virtual int StartSend(int channel) = 0;
+
+ // Stops sending packets from a specified |channel|.
+ virtual int StopSend(int channel) = 0;
+
+ // Enable or disable playout to the underlying device. Takes precedence over
+ // StartPlayout. Though calls to StartPlayout are remembered; if
+ // SetPlayout(true) is called after StartPlayout, playout will be started.
+ //
+ // By default, playout is enabled.
+ virtual int SetPlayout(bool enabled) = 0;
+
+ // Enable or disable recording (which drives sending of encoded audio packtes)
+ // from the underlying device. Takes precedence over StartSend. Though calls
+ // to StartSend are remembered; if SetRecording(true) is called after
+ // StartSend, recording will be started.
+ //
+ // By default, recording is enabled.
+ virtual int SetRecording(bool enabled) = 0;
+
+ // TODO(xians): Make the interface pure virtual after libjingle
+ // implements the interface in its FakeWebRtcVoiceEngine.
+ virtual AudioTransport* audio_transport() { return NULL; }
+
+ protected:
+ VoEBase() {}
+ virtual ~VoEBase() {}
+};
+
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_VOE_BASE_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/include/voe_errors.h b/third_party/libwebrtc/webrtc/voice_engine/include/voe_errors.h
new file mode 100644
index 0000000000..7479ab3957
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/include/voe_errors.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_VOE_ERRORS_H_
+#define VOICE_ENGINE_VOE_ERRORS_H_
+
+// Warnings
+#define VE_PORT_NOT_DEFINED 8001
+#define VE_CHANNEL_NOT_VALID 8002
+#define VE_FUNC_NOT_SUPPORTED 8003
+#define VE_INVALID_LISTNR 8004
+#define VE_INVALID_ARGUMENT 8005
+#define VE_INVALID_PORT_NMBR 8006
+#define VE_INVALID_PLNAME 8007
+#define VE_INVALID_PLFREQ 8008
+#define VE_INVALID_PLTYPE 8009
+#define VE_INVALID_PACSIZE 8010
+#define VE_NOT_SUPPORTED 8011
+#define VE_ALREADY_LISTENING 8012
+#define VE_CHANNEL_NOT_CREATED 8013
+#define VE_MAX_ACTIVE_CHANNELS_REACHED 8014
+#define VE_REC_CANNOT_PREPARE_HEADER 8015
+#define VE_REC_CANNOT_ADD_BUFFER 8016
+#define VE_PLAY_CANNOT_PREPARE_HEADER 8017
+#define VE_ALREADY_SENDING 8018
+#define VE_INVALID_IP_ADDRESS 8019
+#define VE_ALREADY_PLAYING 8020
+#define VE_NOT_ALL_VERSION_INFO 8021
+// 8022 is not used
+#define VE_INVALID_CHANNELS 8023
+#define VE_SET_PLTYPE_FAILED 8024
+// 8025 is not used
+#define VE_NOT_INITED 8026
+#define VE_NOT_SENDING 8027
+#define VE_EXT_TRANSPORT_NOT_SUPPORTED 8028
+#define VE_EXTERNAL_TRANSPORT_ENABLED 8029
+#define VE_STOP_RECORDING_FAILED 8030
+#define VE_INVALID_RATE 8031
+#define VE_INVALID_PACKET 8032
+#define VE_NO_GQOS 8033
+#define VE_INVALID_TIMESTAMP 8034
+#define VE_RECEIVE_PACKET_TIMEOUT 8035
+// 8036 is not used
+#define VE_INIT_FAILED_WRONG_EXPIRY 8037
+#define VE_SENDING 8038
+#define VE_ENABLE_IPV6_FAILED 8039
+#define VE_FUNC_NO_STEREO 8040
+// Range 8041-8080 is not used
+#define VE_FW_TRAVERSAL_ALREADY_INITIALIZED 8081
+#define VE_PACKET_RECEIPT_RESTARTED 8082
+#define VE_NOT_ALL_INFO 8083
+#define VE_CANNOT_SET_SEND_CODEC 8084
+#define VE_CODEC_ERROR 8085
+#define VE_NETEQ_ERROR 8086
+#define VE_RTCP_ERROR 8087
+#define VE_INVALID_OPERATION 8088
+#define VE_CPU_INFO_ERROR 8089
+#define VE_SOUNDCARD_ERROR 8090
+#define VE_SPEECH_LEVEL_ERROR 8091
+#define VE_SEND_ERROR 8092
+#define VE_CANNOT_REMOVE_CONF_CHANNEL 8093
+#define VE_PLTYPE_ERROR 8094
+#define VE_SET_RED_FAILED 8095
+#define VE_CANNOT_GET_PLAY_DATA 8096
+#define VE_APM_ERROR 8097
+#define VE_RUNTIME_PLAY_WARNING 8098
+#define VE_RUNTIME_REC_WARNING 8099
+#define VE_NOT_PLAYING 8100
+#define VE_SOCKETS_NOT_INITED 8101
+#define VE_CANNOT_GET_SOCKET_INFO 8102
+#define VE_INVALID_MULTICAST_ADDRESS 8103
+#define VE_DESTINATION_NOT_INITED 8104
+#define VE_RECEIVE_SOCKETS_CONFLICT 8105
+#define VE_SEND_SOCKETS_CONFLICT 8106
+// 8107 is not used
+#define VE_NOISE_WARNING 8109
+#define VE_CANNOT_GET_SEND_CODEC 8110
+#define VE_CANNOT_GET_REC_CODEC 8111
+#define VE_ALREADY_INITED 8112
+#define VE_CANNOT_SET_SECONDARY_SEND_CODEC 8113
+#define VE_CANNOT_GET_SECONDARY_SEND_CODEC 8114
+#define VE_CANNOT_REMOVE_SECONDARY_SEND_CODEC 8115
+// 8116 is not used
+
+// Errors causing limited functionality
+#define VE_RTCP_SOCKET_ERROR 9001
+#define VE_MIC_VOL_ERROR 9002
+#define VE_SPEAKER_VOL_ERROR 9003
+#define VE_CANNOT_ACCESS_MIC_VOL 9004
+#define VE_CANNOT_ACCESS_SPEAKER_VOL 9005
+#define VE_GET_MIC_VOL_ERROR 9006
+#define VE_GET_SPEAKER_VOL_ERROR 9007
+#define VE_THREAD_RTCP_ERROR 9008
+#define VE_CANNOT_INIT_APM 9009
+#define VE_SEND_SOCKET_TOS_ERROR 9010
+#define VE_CANNOT_RETRIEVE_DEVICE_NAME 9013
+#define VE_SRTP_ERROR 9014
+// 9015 is not used
+#define VE_INTERFACE_NOT_FOUND 9016
+#define VE_TOS_GQOS_CONFLICT 9017
+#define VE_CANNOT_ADD_CONF_CHANNEL 9018
+#define VE_BUFFER_TOO_SMALL 9019
+#define VE_CANNOT_EXECUTE_SETTING 9020
+#define VE_CANNOT_RETRIEVE_SETTING 9021
+// 9022 is not used
+#define VE_RTP_KEEPALIVE_FAILED 9023
+#define VE_SEND_DTMF_FAILED 9024
+#define VE_CANNOT_RETRIEVE_CNAME 9025
+// 9026 is not used
+// 9027 is not used
+#define VE_CANNOT_RETRIEVE_RTP_STAT 9028
+#define VE_GQOS_ERROR 9029
+#define VE_BINDING_SOCKET_TO_LOCAL_ADDRESS_FAILED 9030
+#define VE_TOS_INVALID 9031
+#define VE_TOS_ERROR 9032
+#define VE_CANNOT_RETRIEVE_VALUE 9033
+
+// Critical errors that stops voice functionality
+#define VE_PLAY_UNDEFINED_SC_ERR 10001
+#define VE_REC_CANNOT_OPEN_SC 10002
+#define VE_SOCKET_ERROR 10003
+#define VE_MMSYSERR_INVALHANDLE 10004
+#define VE_MMSYSERR_NODRIVER 10005
+#define VE_MMSYSERR_NOMEM 10006
+#define VE_WAVERR_UNPREPARED 10007
+#define VE_WAVERR_STILLPLAYING 10008
+#define VE_UNDEFINED_SC_ERR 10009
+#define VE_UNDEFINED_SC_REC_ERR 10010
+#define VE_THREAD_ERROR 10011
+#define VE_CANNOT_START_RECORDING 10012
+#define VE_PLAY_CANNOT_OPEN_SC 10013
+#define VE_NO_WINSOCK_2 10014
+#define VE_SEND_SOCKET_ERROR 10015
+#define VE_BAD_FILE 10016
+#define VE_EXPIRED_COPY 10017
+#define VE_NOT_AUTHORISED 10018
+#define VE_RUNTIME_PLAY_ERROR 10019
+#define VE_RUNTIME_REC_ERROR 10020
+#define VE_BAD_ARGUMENT 10021
+#define VE_LINUX_API_ONLY 10022
+#define VE_REC_DEVICE_REMOVED 10023
+#define VE_NO_MEMORY 10024
+#define VE_BAD_HANDLE 10025
+#define VE_RTP_RTCP_MODULE_ERROR 10026
+#define VE_AUDIO_CODING_MODULE_ERROR 10027
+#define VE_AUDIO_DEVICE_MODULE_ERROR 10028
+#define VE_CANNOT_START_PLAYOUT 10029
+#define VE_CANNOT_STOP_RECORDING 10030
+#define VE_CANNOT_STOP_PLAYOUT 10031
+#define VE_CANNOT_INIT_CHANNEL 10032
+#define VE_RECV_SOCKET_ERROR 10033
+#define VE_SOCKET_TRANSPORT_MODULE_ERROR 10034
+#define VE_AUDIO_CONF_MIX_MODULE_ERROR 10035
+
+// Warnings for other platforms (reserved range 8061-8080)
+#define VE_IGNORED_FUNCTION 8061
+
+#endif // VOICE_ENGINE_VOE_ERRORS_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/shared_data.cc b/third_party/libwebrtc/webrtc/voice_engine/shared_data.cc
new file mode 100644
index 0000000000..01e163b394
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/shared_data.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/shared_data.h"
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "voice_engine/channel.h"
+#include "voice_engine/transmit_mixer.h"
+
+namespace webrtc {
+
+namespace voe {
+
+static int32_t _gInstanceCounter = 0;
+
+SharedData::SharedData()
+ : _instanceId(++_gInstanceCounter),
+ _channelManager(_gInstanceCounter),
+ _audioDevicePtr(NULL),
+ _moduleProcessThreadPtr(ProcessThread::Create("VoiceProcessThread")),
+ encoder_queue_("AudioEncoderQueue") {
+ if (TransmitMixer::Create(_transmitMixerPtr) == 0) {
+ _transmitMixerPtr->SetEngineInformation(&_channelManager);
+ }
+}
+
+SharedData::~SharedData()
+{
+ TransmitMixer::Destroy(_transmitMixerPtr);
+ if (_audioDevicePtr) {
+ _audioDevicePtr->Release();
+ }
+ _moduleProcessThreadPtr->Stop();
+}
+
+rtc::TaskQueue* SharedData::encoder_queue() {
+ RTC_DCHECK_RUN_ON(&construction_thread_);
+ return &encoder_queue_;
+}
+
+void SharedData::set_audio_device(
+ const rtc::scoped_refptr<AudioDeviceModule>& audio_device) {
+ _audioDevicePtr = audio_device;
+}
+
+void SharedData::set_audio_processing(AudioProcessing* audioproc) {
+ _transmitMixerPtr->SetAudioProcessingModule(audioproc);
+}
+
+int SharedData::NumOfSendingChannels() {
+ ChannelManager::Iterator it(&_channelManager);
+ int sending_channels = 0;
+
+ for (ChannelManager::Iterator it(&_channelManager); it.IsValid();
+ it.Increment()) {
+ if (it.GetChannel()->Sending())
+ ++sending_channels;
+ }
+
+ return sending_channels;
+}
+
+int SharedData::NumOfPlayingChannels() {
+ ChannelManager::Iterator it(&_channelManager);
+ int playout_channels = 0;
+
+ for (ChannelManager::Iterator it(&_channelManager); it.IsValid();
+ it.Increment()) {
+ if (it.GetChannel()->Playing())
+ ++playout_channels;
+ }
+
+ return playout_channels;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/shared_data.h b/third_party/libwebrtc/webrtc/voice_engine/shared_data.h
new file mode 100644
index 0000000000..7978ff7497
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/shared_data.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_SHARED_DATA_H_
+#define VOICE_ENGINE_SHARED_DATA_H_
+
+#include <memory>
+
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/thread_checker.h"
+#include "voice_engine/channel_manager.h"
+
+class ProcessThread;
+
+namespace webrtc {
+namespace voe {
+
+class TransmitMixer;
+
+class SharedData
+{
+public:
+ // Public accessors.
+ uint32_t instance_id() const { return _instanceId; }
+ ChannelManager& channel_manager() { return _channelManager; }
+ AudioDeviceModule* audio_device() { return _audioDevicePtr.get(); }
+ void set_audio_device(
+ const rtc::scoped_refptr<AudioDeviceModule>& audio_device);
+ void set_audio_processing(AudioProcessing* audio_processing);
+ TransmitMixer* transmit_mixer() { return _transmitMixerPtr; }
+ rtc::CriticalSection* crit_sec() { return &_apiCritPtr; }
+ ProcessThread* process_thread() { return _moduleProcessThreadPtr.get(); }
+ rtc::TaskQueue* encoder_queue();
+
+ int NumOfSendingChannels();
+ int NumOfPlayingChannels();
+
+protected:
+ rtc::ThreadChecker construction_thread_;
+ const uint32_t _instanceId;
+ rtc::CriticalSection _apiCritPtr;
+ ChannelManager _channelManager;
+ rtc::scoped_refptr<AudioDeviceModule> _audioDevicePtr;
+ TransmitMixer* _transmitMixerPtr;
+ std::unique_ptr<ProcessThread> _moduleProcessThreadPtr;
+ // |encoder_queue| is defined last to ensure all pending tasks are cancelled
+ // and deleted before any other members.
+ rtc::TaskQueue encoder_queue_ RTC_ACCESS_ON(construction_thread_);
+
+ SharedData();
+ virtual ~SharedData();
+};
+
+} // namespace voe
+} // namespace webrtc
+#endif // VOICE_ENGINE_SHARED_DATA_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.cc b/third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.cc
new file mode 100644
index 0000000000..049a64c2e8
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.cc
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/transmit_mixer.h"
+
+#include <memory>
+
+#include "audio/utility/audio_frame_operations.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/event_wrapper.h"
+#include "voice_engine/channel.h"
+#include "voice_engine/channel_manager.h"
+#include "voice_engine/utility.h"
+
+namespace webrtc {
+namespace voe {
+
+// TODO(solenberg): The thread safety in this class is dubious.
+
+int32_t
+TransmitMixer::Create(TransmitMixer*& mixer)
+{
+ mixer = new TransmitMixer();
+ if (mixer == NULL)
+ {
+ RTC_DLOG(LS_ERROR) <<
+ "TransmitMixer::Create() unable to allocate memory for mixer";
+ return -1;
+ }
+ return 0;
+}
+
+void
+TransmitMixer::Destroy(TransmitMixer*& mixer)
+{
+ if (mixer)
+ {
+ delete mixer;
+ mixer = NULL;
+ }
+}
+
+TransmitMixer::~TransmitMixer() = default;
+
+void TransmitMixer::SetEngineInformation(ChannelManager* channelManager) {
+ _channelManagerPtr = channelManager;
+}
+
+int32_t
+TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
+{
+ audioproc_ = audioProcessingModule;
+ return 0;
+}
+
+void TransmitMixer::GetSendCodecInfo(int* max_sample_rate,
+ size_t* max_channels) {
+ *max_sample_rate = 8000;
+ *max_channels = 1;
+ for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
+ it.Increment()) {
+ Channel* channel = it.GetChannel();
+ if (channel->Sending()) {
+ const auto props = channel->GetEncoderProps();
+ RTC_CHECK(props);
+ *max_sample_rate = std::max(*max_sample_rate, props->sample_rate_hz);
+ *max_channels = std::max(*max_channels, props->num_channels);
+ }
+ }
+}
+
+int32_t
+TransmitMixer::PrepareDemux(const void* audioSamples,
+ size_t nSamples,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint16_t totalDelayMS,
+ int32_t clockDrift,
+ uint16_t currentMicLevel,
+ bool keyPressed)
+{
+ // --- Resample input audio and create/store the initial audio frame
+ GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
+ nSamples,
+ nChannels,
+ samplesPerSec);
+
+ // --- Near-end audio processing.
+ ProcessAudio(totalDelayMS, clockDrift, currentMicLevel, keyPressed);
+
+ if (swap_stereo_channels_ && stereo_codec_)
+ // Only bother swapping if we're using a stereo codec.
+ AudioFrameOperations::SwapStereoChannels(&_audioFrame);
+
+ // --- Annoying typing detection (utilizes the APM/VAD decision)
+#if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ TypingDetection(keyPressed);
+#endif
+
+ // --- Measure audio level of speech after all processing.
+ double sample_duration = static_cast<double>(nSamples) / samplesPerSec;
+ _audioLevel.ComputeLevel(_audioFrame, sample_duration);
+
+ return 0;
+}
+
+void TransmitMixer::ProcessAndEncodeAudio() {
+ RTC_DCHECK_GT(_audioFrame.samples_per_channel_, 0);
+ for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
+ it.Increment()) {
+ Channel* const channel = it.GetChannel();
+ if (channel->Sending()) {
+ channel->ProcessAndEncodeAudio(_audioFrame);
+ }
+ }
+}
+
+uint32_t TransmitMixer::CaptureLevel() const
+{
+ return _captureLevel;
+}
+
+int32_t
+TransmitMixer::StopSend()
+{
+ _audioLevel.Clear();
+ return 0;
+}
+
+int8_t TransmitMixer::AudioLevel() const
+{
+ // Speech + file level [0,9]
+ return _audioLevel.Level();
+}
+
+int16_t TransmitMixer::AudioLevelFullRange() const
+{
+ // Speech + file level [0,32767]
+ return _audioLevel.LevelFullRange();
+}
+
+double TransmitMixer::GetTotalInputEnergy() const {
+ return _audioLevel.TotalEnergy();
+}
+
+double TransmitMixer::GetTotalInputDuration() const {
+ return _audioLevel.TotalDuration();
+}
+
+void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
+ size_t samples_per_channel,
+ size_t num_channels,
+ int sample_rate_hz) {
+ int codec_rate;
+ size_t num_codec_channels;
+ GetSendCodecInfo(&codec_rate, &num_codec_channels);
+ stereo_codec_ = num_codec_channels == 2;
+
+ // We want to process at the lowest rate possible without losing information.
+ // Choose the lowest native rate at least equal to the input and codec rates.
+ const int min_processing_rate = std::min(sample_rate_hz, codec_rate);
+ for (size_t i = 0; i < AudioProcessing::kNumNativeSampleRates; ++i) {
+ _audioFrame.sample_rate_hz_ = AudioProcessing::kNativeSampleRatesHz[i];
+ if (_audioFrame.sample_rate_hz_ >= min_processing_rate) {
+ break;
+ }
+ }
+ _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels);
+ RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz,
+ &resampler_, &_audioFrame);
+}
+
+void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
+ int current_mic_level, bool key_pressed) {
+ if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
+ // Silently ignore this failure to avoid flooding the logs.
+ }
+
+ GainControl* agc = audioproc_->gain_control();
+ if (agc->set_stream_analog_level(current_mic_level) != 0) {
+ RTC_DLOG(LS_ERROR) << "set_stream_analog_level failed: current_mic_level = "
+ << current_mic_level;
+ assert(false);
+ }
+
+ EchoCancellation* aec = audioproc_->echo_cancellation();
+ if (aec->is_drift_compensation_enabled()) {
+ aec->set_stream_drift_samples(clock_drift);
+ }
+
+ audioproc_->set_stream_key_pressed(key_pressed);
+
+ int err = audioproc_->ProcessStream(&_audioFrame);
+ if (err != 0) {
+ RTC_DLOG(LS_ERROR) << "ProcessStream() error: " << err;
+ assert(false);
+ }
+
+ // Store new capture level. Only updated when analog AGC is enabled.
+ _captureLevel = agc->stream_analog_level();
+}
+
+#if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+void TransmitMixer::TypingDetection(bool key_pressed)
+{
+ // We let the VAD determine if we're using this feature or not.
+ if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) {
+ return;
+ }
+
+ bool vad_active = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
+ bool typing_detected = typing_detection_.Process(key_pressed, vad_active);
+
+ rtc::CritScope cs(&lock_);
+ typing_noise_detected_ = typing_detected;
+}
+#endif
+
+void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
+ swap_stereo_channels_ = enable;
+}
+
+bool TransmitMixer::IsStereoChannelSwappingEnabled() {
+ return swap_stereo_channels_;
+}
+
+bool TransmitMixer::typing_noise_detected() const {
+ rtc::CritScope cs(&lock_);
+ return typing_noise_detected_;
+}
+
+} // namespace voe
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.h b/third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.h
new file mode 100644
index 0000000000..42b6212bfe
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_TRANSMIT_MIXER_H_
+#define VOICE_ENGINE_TRANSMIT_MIXER_H_
+
+#include <memory>
+
+#include "common_audio/resampler/include/push_resampler.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/audio_processing/typing_detection.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/criticalsection.h"
+#include "voice_engine/audio_level.h"
+#include "voice_engine/include/voe_base.h"
+
+#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
+#define WEBRTC_VOICE_ENGINE_TYPING_DETECTION 1
+#else
+#define WEBRTC_VOICE_ENGINE_TYPING_DETECTION 0
+#endif
+
+namespace webrtc {
+class AudioProcessing;
+class ProcessThread;
+
+namespace voe {
+
+class ChannelManager;
+class MixedAudio;
+
+class TransmitMixer {
+public:
+ static int32_t Create(TransmitMixer*& mixer);
+
+ static void Destroy(TransmitMixer*& mixer);
+
+ void SetEngineInformation(ChannelManager* channelManager);
+
+ int32_t SetAudioProcessingModule(AudioProcessing* audioProcessingModule);
+
+ int32_t PrepareDemux(const void* audioSamples,
+ size_t nSamples,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint16_t totalDelayMS,
+ int32_t clockDrift,
+ uint16_t currentMicLevel,
+ bool keyPressed);
+
+ void ProcessAndEncodeAudio();
+
+ // Must be called on the same thread as PrepareDemux().
+ uint32_t CaptureLevel() const;
+
+ int32_t StopSend();
+
+ // TODO(solenberg): Remove, once AudioMonitor is gone.
+ int8_t AudioLevel() const;
+
+ // 'virtual' to allow mocking.
+ virtual int16_t AudioLevelFullRange() const;
+
+ // See description of "totalAudioEnergy" in the WebRTC stats spec:
+ // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+ // 'virtual' to allow mocking.
+ virtual double GetTotalInputEnergy() const;
+
+ // 'virtual' to allow mocking.
+ virtual double GetTotalInputDuration() const;
+
+ virtual ~TransmitMixer();
+
+ // Virtual to allow mocking.
+ virtual void EnableStereoChannelSwapping(bool enable);
+ bool IsStereoChannelSwappingEnabled();
+
+ // Virtual to allow mocking.
+ virtual bool typing_noise_detected() const;
+
+protected:
+ TransmitMixer() = default;
+
+private:
+ // Gets the maximum sample rate and number of channels over all currently
+ // sending codecs.
+ void GetSendCodecInfo(int* max_sample_rate, size_t* max_channels);
+
+ void GenerateAudioFrame(const int16_t audioSamples[],
+ size_t nSamples,
+ size_t nChannels,
+ int samplesPerSec);
+
+ void ProcessAudio(int delay_ms, int clock_drift, int current_mic_level,
+ bool key_pressed);
+
+#if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ void TypingDetection(bool key_pressed);
+#endif
+
+ // uses
+ ChannelManager* _channelManagerPtr = nullptr;
+ AudioProcessing* audioproc_ = nullptr;
+
+ // owns
+ AudioFrame _audioFrame;
+ PushResampler<int16_t> resampler_; // ADM sample rate -> mixing rate
+ voe::AudioLevel _audioLevel;
+
+#if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ webrtc::TypingDetection typing_detection_;
+#endif
+
+ rtc::CriticalSection lock_;
+ bool typing_noise_detected_ RTC_GUARDED_BY(lock_) = false;
+
+ uint32_t _captureLevel = 0;
+ bool stereo_codec_ = false;
+ bool swap_stereo_channels_ = false;
+};
+} // namespace voe
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_TRANSMIT_MIXER_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.cc b/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.cc
new file mode 100644
index 0000000000..774faf5219
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.cc
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/transport_feedback_packet_loss_tracker.h"
+
+#include <limits>
+#include <utility>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/mod_ops.h"
+
+namespace {
+constexpr uint16_t kSeqNumHalf = 0x8000u;
+void UpdateCounter(size_t* counter, bool increment) {
+ if (increment) {
+ RTC_DCHECK_LT(*counter, std::numeric_limits<std::size_t>::max());
+ ++(*counter);
+ } else {
+ RTC_DCHECK_GT(*counter, 0);
+ --(*counter);
+ }
+}
+} // namespace
+
+namespace webrtc {
+
+TransportFeedbackPacketLossTracker::TransportFeedbackPacketLossTracker(
+ int64_t max_window_size_ms,
+ size_t plr_min_num_acked_packets,
+ size_t rplr_min_num_acked_pairs)
+ : max_window_size_ms_(max_window_size_ms),
+ ref_packet_status_(packet_status_window_.begin()),
+ plr_state_(plr_min_num_acked_packets),
+ rplr_state_(rplr_min_num_acked_pairs) {
+ RTC_DCHECK_GT(max_window_size_ms, 0);
+ RTC_DCHECK_GT(plr_min_num_acked_packets, 0);
+ RTC_DCHECK_GT(rplr_min_num_acked_pairs, 0);
+ Reset();
+}
+
+void TransportFeedbackPacketLossTracker::Reset() {
+ acked_packets_ = 0;
+ plr_state_.Reset();
+ rplr_state_.Reset();
+ packet_status_window_.clear();
+ ref_packet_status_ = packet_status_window_.begin();
+}
+
+uint16_t TransportFeedbackPacketLossTracker::ReferenceSequenceNumber() const {
+ RTC_DCHECK(!packet_status_window_.empty());
+ return ref_packet_status_->first;
+}
+
+uint16_t TransportFeedbackPacketLossTracker::NewestSequenceNumber() const {
+ RTC_DCHECK(!packet_status_window_.empty());
+ return PreviousPacketStatus(packet_status_window_.end())->first;
+}
+
+void TransportFeedbackPacketLossTracker::OnPacketAdded(uint16_t seq_num,
+ int64_t send_time_ms) {
+ // Sanity - time can't flow backwards.
+ RTC_DCHECK(
+ packet_status_window_.empty() ||
+ PreviousPacketStatus(packet_status_window_.end())->second.send_time_ms <=
+ send_time_ms);
+
+ if (packet_status_window_.find(seq_num) != packet_status_window_.end() ||
+ (!packet_status_window_.empty() &&
+ ForwardDiff(seq_num, NewestSequenceNumber()) <= kSeqNumHalf)) {
+ // The only way for these two to happen is when the stream lies dormant for
+ // long enough for the sequence numbers to wrap. Everything in the window in
+ // such a case would be too old to use.
+ Reset();
+ }
+
+ // Maintain a window where the newest sequence number is at most 0x7fff away
+ // from the oldest, so that would could still distinguish old/new.
+ while (!packet_status_window_.empty() &&
+ ForwardDiff(ref_packet_status_->first, seq_num) >= kSeqNumHalf) {
+ RemoveOldestPacketStatus();
+ }
+
+ SentPacket sent_packet(send_time_ms, PacketStatus::Unacked);
+ packet_status_window_.insert(packet_status_window_.end(),
+ std::make_pair(seq_num, sent_packet));
+
+ if (packet_status_window_.size() == 1) {
+ ref_packet_status_ = packet_status_window_.cbegin();
+ }
+}
+
+void TransportFeedbackPacketLossTracker::OnPacketFeedbackVector(
+ const std::vector<PacketFeedback>& packet_feedback_vector) {
+ for (const PacketFeedback& packet : packet_feedback_vector) {
+ const auto& it = packet_status_window_.find(packet.sequence_number);
+
+ // Packets which aren't at least marked as unacked either do not belong to
+ // this media stream, or have been shifted out of window.
+ if (it == packet_status_window_.end())
+ continue;
+
+ const bool lost = packet.arrival_time_ms == PacketFeedback::kNotReceived;
+ const PacketStatus packet_status =
+ lost ? PacketStatus::Lost : PacketStatus::Received;
+
+ UpdatePacketStatus(it, packet_status);
+ }
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::GetPacketLossRate() const {
+ return plr_state_.GetMetric();
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::GetRecoverablePacketLossRate() const {
+ return rplr_state_.GetMetric();
+}
+
+void TransportFeedbackPacketLossTracker::UpdatePacketStatus(
+ SentPacketStatusMap::iterator it,
+ PacketStatus new_status) {
+ if (it->second.status != PacketStatus::Unacked) {
+ // Normally, packets are sent (inserted into window as "unacked"), then we
+ // receive one feedback for them.
+ // But it is possible that a packet would receive two feedbacks. Then:
+ if (it->second.status == PacketStatus::Lost &&
+ new_status == PacketStatus::Received) {
+ // If older status said that the packet was lost but newer one says it
+ // is received, we take the newer one.
+ UpdateMetrics(it, false);
+ it->second.status =
+ PacketStatus::Unacked; // For clarity; overwritten shortly.
+ } else {
+ // If the value is unchanged or if older status said that the packet was
+ // received but the newer one says it is lost, we ignore it.
+ // The standard allows for previously-reported packets to carry
+ // no report when the reports overlap, which also looks like the
+ // packet is being reported as lost.
+ return;
+ }
+ }
+
+ // Change from UNACKED to RECEIVED/LOST.
+ it->second.status = new_status;
+ UpdateMetrics(it, true);
+
+ // Remove packets from the beginning of the window until we only hold packets,
+ // be they acked or unacked, which are not more than |max_window_size_ms|
+ // older from the newest packet. (If the packet we're now inserting into the
+ // window isn't the newest, it would not trigger any removals; the newest
+ // already removed all relevant.)
+ while (ref_packet_status_ != packet_status_window_.end() &&
+ (it->second.send_time_ms - ref_packet_status_->second.send_time_ms) >
+ max_window_size_ms_) {
+ RemoveOldestPacketStatus();
+ }
+}
+
+void TransportFeedbackPacketLossTracker::RemoveOldestPacketStatus() {
+ UpdateMetrics(ref_packet_status_, false);
+ const auto it = ref_packet_status_;
+ ref_packet_status_ = NextPacketStatus(it);
+ packet_status_window_.erase(it);
+}
+
+void TransportFeedbackPacketLossTracker::UpdateMetrics(
+ ConstPacketStatusIterator it,
+ bool apply /* false = undo */) {
+ RTC_DCHECK(it != packet_status_window_.end());
+ // Metrics are dependent on feedbacks from the other side. We don't want
+ // to update the metrics each time a packet is sent, except for the case
+ // when it shifts old sent-but-unacked-packets out of window.
+ RTC_DCHECK(!apply || it->second.status != PacketStatus::Unacked);
+
+ if (it->second.status != PacketStatus::Unacked) {
+ UpdateCounter(&acked_packets_, apply);
+ }
+
+ UpdatePlr(it, apply);
+ UpdateRplr(it, apply);
+}
+
+void TransportFeedbackPacketLossTracker::UpdatePlr(
+ ConstPacketStatusIterator it,
+ bool apply /* false = undo */) {
+ switch (it->second.status) {
+ case PacketStatus::Unacked:
+ return;
+ case PacketStatus::Received:
+ UpdateCounter(&plr_state_.num_received_packets_, apply);
+ break;
+ case PacketStatus::Lost:
+ UpdateCounter(&plr_state_.num_lost_packets_, apply);
+ break;
+ default:
+ RTC_NOTREACHED();
+ }
+}
+
+void TransportFeedbackPacketLossTracker::UpdateRplr(
+ ConstPacketStatusIterator it,
+ bool apply /* false = undo */) {
+ if (it->second.status == PacketStatus::Unacked) {
+ // Unacked packets cannot compose a pair.
+ return;
+ }
+
+ // Previous packet and current packet might compose a pair.
+ if (it != ref_packet_status_) {
+ const auto& prev = PreviousPacketStatus(it);
+ if (prev->second.status != PacketStatus::Unacked) {
+ UpdateCounter(&rplr_state_.num_acked_pairs_, apply);
+ if (prev->second.status == PacketStatus::Lost &&
+ it->second.status == PacketStatus::Received) {
+ UpdateCounter(
+ &rplr_state_.num_recoverable_losses_, apply);
+ }
+ }
+ }
+
+ // Current packet and next packet might compose a pair.
+ const auto& next = NextPacketStatus(it);
+ if (next != packet_status_window_.end() &&
+ next->second.status != PacketStatus::Unacked) {
+ UpdateCounter(&rplr_state_.num_acked_pairs_, apply);
+ if (it->second.status == PacketStatus::Lost &&
+ next->second.status == PacketStatus::Received) {
+ UpdateCounter(&rplr_state_.num_recoverable_losses_, apply);
+ }
+ }
+}
+
+TransportFeedbackPacketLossTracker::ConstPacketStatusIterator
+TransportFeedbackPacketLossTracker::PreviousPacketStatus(
+ ConstPacketStatusIterator it) const {
+ RTC_DCHECK(it != ref_packet_status_);
+ if (it == packet_status_window_.end()) {
+ // This is to make PreviousPacketStatus(packet_status_window_.end()) point
+ // to the last element.
+ it = ref_packet_status_;
+ }
+
+ if (it == packet_status_window_.begin()) {
+ // Due to the circular nature of sequence numbers, we let the iterator
+ // go to the end.
+ it = packet_status_window_.end();
+ }
+ return --it;
+}
+
+TransportFeedbackPacketLossTracker::ConstPacketStatusIterator
+TransportFeedbackPacketLossTracker::NextPacketStatus(
+ ConstPacketStatusIterator it) const {
+ RTC_DCHECK(it != packet_status_window_.end());
+ ++it;
+ if (it == packet_status_window_.end()) {
+ // Due to the circular nature of sequence numbers, we let the iterator
+ // goes back to the beginning.
+ it = packet_status_window_.begin();
+ }
+ if (it == ref_packet_status_) {
+ // This is to make the NextPacketStatus of the last element to return the
+ // beyond-the-end iterator.
+ it = packet_status_window_.end();
+ }
+ return it;
+}
+
+// TODO(minyue): This method checks the states of this class do not misbehave.
+// The method is used both in unit tests and a fuzzer test. The fuzzer test
+// is present to help finding potential errors. Once the fuzzer test shows no
+// error after long period, we can remove the fuzzer test, and move this method
+// to unit test.
+void TransportFeedbackPacketLossTracker::Validate() const { // Testing only!
+ RTC_CHECK_EQ(plr_state_.num_received_packets_ + plr_state_.num_lost_packets_,
+ acked_packets_);
+ RTC_CHECK_LE(acked_packets_, packet_status_window_.size());
+ RTC_CHECK_LE(rplr_state_.num_recoverable_losses_,
+ rplr_state_.num_acked_pairs_);
+ RTC_CHECK_LE(rplr_state_.num_acked_pairs_, acked_packets_ - 1);
+
+ size_t unacked_packets = 0;
+ size_t received_packets = 0;
+ size_t lost_packets = 0;
+ size_t acked_pairs = 0;
+ size_t recoverable_losses = 0;
+
+ if (!packet_status_window_.empty()) {
+ ConstPacketStatusIterator it = ref_packet_status_;
+ do {
+ switch (it->second.status) {
+ case PacketStatus::Unacked:
+ ++unacked_packets;
+ break;
+ case PacketStatus::Received:
+ ++received_packets;
+ break;
+ case PacketStatus::Lost:
+ ++lost_packets;
+ break;
+ default:
+ RTC_NOTREACHED();
+ }
+
+ auto next = std::next(it);
+ if (next == packet_status_window_.end())
+ next = packet_status_window_.begin();
+
+ if (next != ref_packet_status_) { // If we have a next packet...
+ RTC_CHECK_GE(next->second.send_time_ms, it->second.send_time_ms);
+
+ if (it->second.status != PacketStatus::Unacked &&
+ next->second.status != PacketStatus::Unacked) {
+ ++acked_pairs;
+ if (it->second.status == PacketStatus::Lost &&
+ next->second.status == PacketStatus::Received) {
+ ++recoverable_losses;
+ }
+ }
+ }
+
+ RTC_CHECK_LT(ForwardDiff(ReferenceSequenceNumber(), it->first),
+ kSeqNumHalf);
+
+ it = next;
+ } while (it != ref_packet_status_);
+ }
+
+ RTC_CHECK_EQ(plr_state_.num_received_packets_, received_packets);
+ RTC_CHECK_EQ(plr_state_.num_lost_packets_, lost_packets);
+ RTC_CHECK_EQ(packet_status_window_.size(),
+ unacked_packets + received_packets + lost_packets);
+ RTC_CHECK_EQ(rplr_state_.num_acked_pairs_, acked_pairs);
+ RTC_CHECK_EQ(rplr_state_.num_recoverable_losses_, recoverable_losses);
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::PlrState::GetMetric() const {
+ const size_t total = num_lost_packets_ + num_received_packets_;
+ if (total < min_num_acked_packets_) {
+ return rtc::nullopt;
+ } else {
+ return static_cast<float>(num_lost_packets_) / total;
+ }
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::RplrState::GetMetric() const {
+ if (num_acked_pairs_ < min_num_acked_pairs_) {
+ return rtc::nullopt;
+ } else {
+ return static_cast<float>(num_recoverable_losses_) / num_acked_pairs_;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.h b/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.h
new file mode 100644
index 0000000000..d7420785f7
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_TRANSPORT_FEEDBACK_PACKET_LOSS_TRACKER_H_
+#define VOICE_ENGINE_TRANSPORT_FEEDBACK_PACKET_LOSS_TRACKER_H_
+
+#include <map>
+
+#include "api/optional.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+
+namespace rtcp {
+class TransportFeedback;
+}
+
+struct PacketFeedback;
+
+class TransportFeedbackPacketLossTracker final {
+ public:
+ // * We count up to |max_window_size_ms| from the sent
+ // time of the latest acked packet for the calculation of the metrics.
+ // * PLR (packet-loss-rate) is reliably computable once the statuses of
+ // |plr_min_num_acked_packets| packets are known.
+ // * RPLR (recoverable-packet-loss-rate) is reliably computable once the
+ // statuses of |rplr_min_num_acked_pairs| pairs are known.
+ TransportFeedbackPacketLossTracker(int64_t max_window_size_ms,
+ size_t plr_min_num_acked_packets,
+ size_t rplr_min_num_acked_pairs);
+
+ void OnPacketAdded(uint16_t seq_num, int64_t send_time_ms);
+
+ void OnPacketFeedbackVector(
+ const std::vector<PacketFeedback>& packet_feedbacks_vector);
+
+ // Returns the packet loss rate, if the window has enough packet statuses to
+ // reliably compute it. Otherwise, returns empty.
+ rtc::Optional<float> GetPacketLossRate() const;
+
+ // Returns the first-order-FEC recoverable packet loss rate, if the window has
+ // enough status pairs to reliably compute it. Otherwise, returns empty.
+ rtc::Optional<float> GetRecoverablePacketLossRate() const;
+
+ // Verifies that the internal states are correct. Only used for tests.
+ void Validate() const;
+
+ private:
+ // When a packet is sent, we memorize its association with the stream by
+ // marking it as (sent-but-so-far-) unacked. If we ever receive a feedback
+ // that reports it as received/lost, we update the state and
+ // metrics accordingly.
+
+ enum class PacketStatus { Unacked = 0, Received = 1, Lost = 2 };
+ struct SentPacket {
+ SentPacket(int64_t send_time_ms, PacketStatus status)
+ : send_time_ms(send_time_ms), status(status) {}
+ int64_t send_time_ms;
+ PacketStatus status;
+ };
+ typedef std::map<uint16_t, SentPacket> SentPacketStatusMap;
+ typedef SentPacketStatusMap::const_iterator ConstPacketStatusIterator;
+
+ void Reset();
+
+ // ReferenceSequenceNumber() provides a sequence number that defines the
+ // order of packet reception info stored in |packet_status_window_|. In
+ // particular, given any sequence number |x|,
+ // (2^16 + x - ref_seq_num_) % 2^16 defines its actual position in
+ // |packet_status_window_|.
+ uint16_t ReferenceSequenceNumber() const;
+ uint16_t NewestSequenceNumber() const;
+ void UpdatePacketStatus(SentPacketStatusMap::iterator it,
+ PacketStatus new_status);
+ void RemoveOldestPacketStatus();
+
+ void UpdateMetrics(ConstPacketStatusIterator it,
+ bool apply /* false = undo */);
+ void UpdatePlr(ConstPacketStatusIterator it, bool apply /* false = undo */);
+ void UpdateRplr(ConstPacketStatusIterator it, bool apply /* false = undo */);
+
+ ConstPacketStatusIterator PreviousPacketStatus(
+ ConstPacketStatusIterator it) const;
+ ConstPacketStatusIterator NextPacketStatus(
+ ConstPacketStatusIterator it) const;
+
+ const int64_t max_window_size_ms_;
+ size_t acked_packets_;
+
+ SentPacketStatusMap packet_status_window_;
+ // |ref_packet_status_| points to the oldest item in |packet_status_window_|.
+ ConstPacketStatusIterator ref_packet_status_;
+
+ // Packet-loss-rate calculation (lost / all-known-packets).
+ struct PlrState {
+ explicit PlrState(size_t min_num_acked_packets)
+ : min_num_acked_packets_(min_num_acked_packets) {
+ Reset();
+ }
+ void Reset() {
+ num_received_packets_ = 0;
+ num_lost_packets_ = 0;
+ }
+ rtc::Optional<float> GetMetric() const;
+ const size_t min_num_acked_packets_;
+ size_t num_received_packets_;
+ size_t num_lost_packets_;
+ } plr_state_;
+
+ // Recoverable packet loss calculation (first-order-FEC recoverable).
+ struct RplrState {
+ explicit RplrState(size_t min_num_acked_pairs)
+ : min_num_acked_pairs_(min_num_acked_pairs) {
+ Reset();
+ }
+ void Reset() {
+ num_acked_pairs_ = 0;
+ num_recoverable_losses_ = 0;
+ }
+ rtc::Optional<float> GetMetric() const;
+ // Recoverable packets are those which were lost, but immediately followed
+ // by a properly received packet. If that second packet carried FEC,
+ // the data from the former (lost) packet could be recovered.
+ // The RPLR is calculated as the fraction of such pairs (lost-received) out
+ // of all pairs of consecutive acked packets.
+ const size_t min_num_acked_pairs_;
+ size_t num_acked_pairs_;
+ size_t num_recoverable_losses_;
+ } rplr_state_;
+};
+
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_TRANSPORT_FEEDBACK_PACKET_LOSS_TRACKER_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker_unittest.cc b/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker_unittest.cc
new file mode 100644
index 0000000000..55626bedd0
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker_unittest.cc
@@ -0,0 +1,574 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "voice_engine/transport_feedback_packet_loss_tracker.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int64_t kDefaultSendIntervalMs = 10;
+constexpr int64_t kDefaultMaxWindowSizeMs = 500 * kDefaultSendIntervalMs;
+
+class TransportFeedbackPacketLossTrackerTest
+ : public ::testing::TestWithParam<uint16_t> {
+ public:
+ TransportFeedbackPacketLossTrackerTest() = default;
+ virtual ~TransportFeedbackPacketLossTrackerTest() = default;
+
+ protected:
+ void SendPackets(TransportFeedbackPacketLossTracker* tracker,
+ const std::vector<uint16_t>& sequence_numbers,
+ int64_t send_time_interval_ms,
+ bool validate_all = true) {
+ RTC_CHECK_GE(send_time_interval_ms, 0);
+ for (uint16_t sequence_number : sequence_numbers) {
+ tracker->OnPacketAdded(sequence_number, time_ms_);
+ if (validate_all) {
+ tracker->Validate();
+ }
+ time_ms_ += send_time_interval_ms;
+ }
+
+ // We've either validated after each packet, or, for making sure the UT
+ // doesn't run too long, we might validate only at the end of the range.
+ if (!validate_all) {
+ tracker->Validate();
+ }
+ }
+
+ void SendPackets(TransportFeedbackPacketLossTracker* tracker,
+ uint16_t first_seq_num,
+ size_t num_of_packets,
+ int64_t send_time_interval_ms,
+ bool validate_all = true) {
+ RTC_CHECK_GE(send_time_interval_ms, 0);
+ std::vector<uint16_t> sequence_numbers(num_of_packets);
+ std::iota(sequence_numbers.begin(), sequence_numbers.end(), first_seq_num);
+ SendPackets(tracker, sequence_numbers, send_time_interval_ms, validate_all);
+ }
+
+ void AdvanceClock(int64_t time_delta_ms) {
+ RTC_CHECK_GT(time_delta_ms, 0);
+ time_ms_ += time_delta_ms;
+ }
+
+ void AddTransportFeedbackAndValidate(
+ TransportFeedbackPacketLossTracker* tracker,
+ uint16_t base_sequence_num,
+ const std::vector<bool>& reception_status_vec) {
+ // Any positive integer signals reception. kNotReceived signals loss.
+ // Other values are just illegal.
+ constexpr int64_t kArrivalTimeMs = 1234;
+
+ std::vector<PacketFeedback> packet_feedback_vector;
+ uint16_t seq_num = base_sequence_num;
+ for (bool received : reception_status_vec) {
+ packet_feedback_vector.emplace_back(PacketFeedback(
+ received ? kArrivalTimeMs : PacketFeedback::kNotReceived, seq_num));
+ ++seq_num;
+ }
+
+ tracker->OnPacketFeedbackVector(packet_feedback_vector);
+ tracker->Validate();
+ }
+
+ // Checks that validty is as expected. If valid, checks also that
+ // value is as expected.
+ void ValidatePacketLossStatistics(
+ const TransportFeedbackPacketLossTracker& tracker,
+ rtc::Optional<float> expected_plr,
+ rtc::Optional<float> expected_rplr) {
+ // TODO(eladalon): Comparing the rtc::Optional<float> directly would have
+ // given concise code, but less readable error messages. If we modify
+ // the way rtc::Optional is printed, we can get rid of this.
+ rtc::Optional<float> plr = tracker.GetPacketLossRate();
+ EXPECT_EQ(static_cast<bool>(expected_plr), static_cast<bool>(plr));
+ if (expected_plr && plr) {
+ EXPECT_EQ(*expected_plr, *plr);
+ }
+
+ rtc::Optional<float> rplr = tracker.GetRecoverablePacketLossRate();
+ EXPECT_EQ(static_cast<bool>(expected_rplr), static_cast<bool>(rplr));
+ if (expected_rplr && rplr) {
+ EXPECT_EQ(*expected_rplr, *rplr);
+ }
+ }
+
+ uint16_t base_{GetParam()};
+
+ private:
+ int64_t time_ms_{0};
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(TransportFeedbackPacketLossTrackerTest);
+};
+
+} // namespace
+
+// Sanity check on an empty window.
+TEST_P(TransportFeedbackPacketLossTrackerTest, EmptyWindow) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 5);
+
+ // PLR and RPLR reported as unknown before reception of first feedback.
+ ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+// A feedback received for an empty window has no effect.
+TEST_P(TransportFeedbackPacketLossTrackerTest, EmptyWindowFeedback) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 3, 2);
+
+ // Feedback doesn't correspond to any packets - ignored.
+ AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+ ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+
+ // After the packets are transmitted, acking them would have an effect.
+ SendPackets(&tracker, base_, 3, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+ ValidatePacketLossStatistics(tracker, 1.0f / 3.0f, 0.5f);
+}
+
+// Sanity check on partially filled window.
+TEST_P(TransportFeedbackPacketLossTrackerTest, PartiallyFilledWindow) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+ // PLR unknown before minimum window size reached.
+ // RPLR unknown before minimum pairs reached.
+ // Expected window contents: [] -> [1001].
+ SendPackets(&tracker, base_, 3, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_, {true, false, false, true});
+ ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+// Sanity check on minimum filled window - PLR known, RPLR unknown.
+TEST_P(TransportFeedbackPacketLossTrackerTest, PlrMinimumFilledWindow) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 5);
+
+ // PLR correctly calculated after minimum window size reached.
+ // RPLR not necessarily known at that time (not if min-pairs not reached).
+ // Expected window contents: [] -> [10011].
+ SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, rtc::nullopt);
+}
+
+// Sanity check on minimum filled window - PLR unknown, RPLR known.
+TEST_P(TransportFeedbackPacketLossTrackerTest, RplrMinimumFilledWindow) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 6, 4);
+
+ // RPLR correctly calculated after minimum pairs reached.
+ // PLR not necessarily known at that time (not if min window not reached).
+ // Expected window contents: [] -> [10011].
+ SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, rtc::nullopt, 1.0f / 4.0f);
+}
+
+// If packets are sent close enough together that the clock reading for both
+// is the same, that's handled properly.
+TEST_P(TransportFeedbackPacketLossTrackerTest, SameSentTime) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 3, 2);
+
+ // Expected window contents: [] -> [101].
+ SendPackets(&tracker, base_, 3, 0); // Note: time interval = 0ms.
+ AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+
+ ValidatePacketLossStatistics(tracker, 1.0f / 3.0f, 0.5f);
+}
+
+// Additional reports update PLR and RPLR.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ExtendWindow) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 5);
+
+ SendPackets(&tracker, base_, 25, kDefaultSendIntervalMs);
+
+ // Expected window contents: [] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, rtc::nullopt);
+
+ // Expected window contents: [10011] -> [1001110101].
+ AddTransportFeedbackAndValidate(&tracker, base_ + 5,
+ {true, false, true, false, true});
+ ValidatePacketLossStatistics(tracker, 4.0f / 10.0f, 3.0f / 9.0f);
+
+ // Expected window contents: [1001110101] -> [1001110101-GAP-10001].
+ AddTransportFeedbackAndValidate(&tracker, base_ + 20,
+ {true, false, false, false, true});
+ ValidatePacketLossStatistics(tracker, 7.0f / 15.0f, 4.0f / 13.0f);
+}
+
+// Correct calculation with different packet lengths.
+TEST_P(TransportFeedbackPacketLossTrackerTest, DifferentSentIntervals) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+ int64_t frames[] = {20, 60, 120, 20, 60};
+ for (size_t i = 0; i < sizeof(frames) / sizeof(frames[0]); i++) {
+ SendPackets(&tracker, {static_cast<uint16_t>(base_ + i)}, frames[i]);
+ }
+
+ // Expected window contents: [] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// The window retains information up to sent times that exceed the the max
+// window size. The oldest packets get shifted out of window to make room
+// for the newer ones.
+TEST_P(TransportFeedbackPacketLossTrackerTest, MaxWindowSize) {
+ TransportFeedbackPacketLossTracker tracker(4 * kDefaultSendIntervalMs, 5, 1);
+
+ SendPackets(&tracker, base_, 6, kDefaultSendIntervalMs, true);
+
+ // Up to the maximum time-span retained (first + 4 * kDefaultSendIntervalMs).
+ // Expected window contents: [] -> [01001].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {false, true, false, false, true});
+ ValidatePacketLossStatistics(tracker, 3.0f / 5.0f, 2.0f / 4.0f);
+
+ // After the maximum time-span, older entries are discarded to accommodate
+ // newer ones.
+ // Expected window contents: [01001] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_ + 5, {true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// All packets received.
+TEST_P(TransportFeedbackPacketLossTrackerTest, AllReceived) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+ // Expected window contents: [] -> [11111].
+ SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, true, true, true, true});
+ ValidatePacketLossStatistics(tracker, 0.0f, 0.0f);
+}
+
+// All packets lost.
+TEST_P(TransportFeedbackPacketLossTrackerTest, AllLost) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+ // Note: The last packet in the feedback does not belong to the stream.
+ // It's only there because we're not allowed to end a feedback with a loss.
+ // Expected window contents: [] -> [00000].
+ SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {false, false, false, false, false, true});
+ ValidatePacketLossStatistics(tracker, 1.0f, 0.0f);
+}
+
+// Repeated reports are ignored.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ReportRepetition) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+ SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+
+ // Expected window contents: [] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+ // Repeat entire previous feedback
+ // Expected window contents: [10011] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// Report overlap.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ReportOverlap) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 1);
+
+ SendPackets(&tracker, base_, 15, kDefaultSendIntervalMs);
+
+ // Expected window contents: [] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+ // Expected window contents: [10011] -> [1001101].
+ AddTransportFeedbackAndValidate(&tracker, base_ + 3,
+ {true, true, false, true});
+ ValidatePacketLossStatistics(tracker, 3.0f / 7.0f, 2.0f / 6.0f);
+}
+
+// Report conflict.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ReportConflict) {
+ TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+ SendPackets(&tracker, base_, 15, 10);
+
+ // Expected window contents: [] -> [01001].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {false, true, false, false, true});
+ ValidatePacketLossStatistics(tracker, 3.0f / 5.0f, 2.0f / 4.0f);
+
+ // Expected window contents: [01001] -> [11101].
+ // While false->true will be applied, true -> false will be ignored.
+ AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+ ValidatePacketLossStatistics(tracker, 1.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// Skipped packets treated as unknown (not lost).
+TEST_P(TransportFeedbackPacketLossTrackerTest, SkippedPackets) {
+ TransportFeedbackPacketLossTracker tracker(200 * kDefaultSendIntervalMs, 5,
+ 1);
+
+ SendPackets(&tracker, base_, 200, kDefaultSendIntervalMs);
+
+ // Expected window contents: [] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+ // Expected window contents: [10011] -> [10011-GAP-101].
+ AddTransportFeedbackAndValidate(&tracker, base_ + 100, {true, false, true});
+ ValidatePacketLossStatistics(tracker, 3.0f / 8.0f, 2.0f / 6.0f);
+}
+
+// Moving a window, if it excludes some old acked messages, can leave
+// in-window unacked messages intact, and ready to be used later.
+TEST_P(TransportFeedbackPacketLossTrackerTest, MovedWindowRetainsRelevantInfo) {
+ constexpr int64_t max_window_size_ms = 100;
+ TransportFeedbackPacketLossTracker tracker(max_window_size_ms, 5, 1);
+
+ // Note: All messages in this test are sent 1ms apart from each other.
+ // Therefore, the delta in sequence numbers equals the timestamps delta.
+ SendPackets(&tracker, base_, 4 * max_window_size_ms, 1);
+
+ // Expected window contents: [] -> [10101].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, true, false, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+ // Expected window contents: [10101] -> [100011].
+ const int64_t moved_oldest_acked = base_ + 2 * max_window_size_ms;
+ const std::vector<bool> feedback = {true, false, false, false, true, true};
+ AddTransportFeedbackAndValidate(&tracker, moved_oldest_acked, feedback);
+ ValidatePacketLossStatistics(tracker, 3.0f / 6.0f, 1.0f / 5.0f);
+
+ // Having acked |feedback.size()| starting with |moved_oldest_acked|, the
+ // newest of the acked ones is now:
+ const int64_t moved_newest_acked = moved_oldest_acked + feedback.size() - 1;
+
+ // Messages that *are* more than the span-limit away from the newest
+ // acked message *are* too old. Acking them would have no effect.
+ AddTransportFeedbackAndValidate(
+ &tracker, moved_newest_acked - max_window_size_ms - 1, {true});
+ ValidatePacketLossStatistics(tracker, 3.0f / 6.0f, 1.0f / 5.0f);
+
+ // Messages that are *not* more than the span-limit away from the newest
+ // acked message are *not* too old. Acking them would have an effect.
+ AddTransportFeedbackAndValidate(
+ &tracker, moved_newest_acked - max_window_size_ms, {true});
+ ValidatePacketLossStatistics(tracker, 3.0f / 7.0f, 1.0f / 5.0f);
+}
+
+// Inserting feedback into the middle of a window works correctly - can
+// complete two pairs.
+TEST_P(TransportFeedbackPacketLossTrackerTest, InsertionCompletesTwoPairs) {
+ TransportFeedbackPacketLossTracker tracker(150 * kDefaultSendIntervalMs, 5,
+ 1);
+
+ SendPackets(&tracker, base_, 15, kDefaultSendIntervalMs);
+
+ // Expected window contents: [] -> [10111].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, true, true, true});
+ ValidatePacketLossStatistics(tracker, 1.0f / 5.0f, 1.0f / 4.0f);
+
+ // Expected window contents: [10111] -> [10111-GAP-10101].
+ AddTransportFeedbackAndValidate(&tracker, base_ + 7,
+ {true, false, true, false, true});
+ ValidatePacketLossStatistics(tracker, 3.0f / 10.0f, 3.0f / 8.0f);
+
+ // Insert in between, closing the gap completely.
+ // Expected window contents: [10111-GAP-10101] -> [101110110101].
+ AddTransportFeedbackAndValidate(&tracker, base_ + 5, {false, true});
+ ValidatePacketLossStatistics(tracker, 4.0f / 12.0f, 4.0f / 11.0f);
+}
+
+// Sequence number gaps are not gaps in reception. However, gaps in reception
+// are still possible, if a packet which WAS sent on the stream is not acked.
+TEST_P(TransportFeedbackPacketLossTrackerTest, SanityGapsInSequenceNumbers) {
+ TransportFeedbackPacketLossTracker tracker(50 * kDefaultSendIntervalMs, 5, 1);
+
+ SendPackets(&tracker,
+ {static_cast<uint16_t>(base_),
+ static_cast<uint16_t>(base_ + 2),
+ static_cast<uint16_t>(base_ + 4),
+ static_cast<uint16_t>(base_ + 6),
+ static_cast<uint16_t>(base_ + 8)},
+ kDefaultSendIntervalMs);
+
+ // Gaps in sequence numbers not considered as gaps in window, because only
+ // those sequence numbers which were associated with the stream count.
+ // Expected window contents: [] -> [11011].
+ AddTransportFeedbackAndValidate(
+ // Note: Left packets belong to this stream, right ones ignored.
+ &tracker, base_, {true, false,
+ true, false,
+ false, false,
+ true, false,
+ true, true});
+ ValidatePacketLossStatistics(tracker, 1.0f / 5.0f, 1.0f / 4.0f);
+
+ // Create gap by sending [base + 10] but not acking it.
+ // Note: Acks for [base + 11] and [base + 13] ignored (other stream).
+ // Expected window contents: [11011] -> [11011-GAP-01].
+ SendPackets(&tracker,
+ {static_cast<uint16_t>(base_ + 10),
+ static_cast<uint16_t>(base_ + 12),
+ static_cast<uint16_t>(base_ + 14)},
+ kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_ + 11,
+ {false, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 7.0f, 2.0f / 5.0f);
+}
+
+// The window cannot span more than 0x8000 in sequence numbers, regardless
+// of time stamps and ack/unacked status.
+TEST_P(TransportFeedbackPacketLossTrackerTest, MaxUnackedPackets) {
+ TransportFeedbackPacketLossTracker tracker(0x10000, 4, 1);
+
+ SendPackets(&tracker, base_, 0x2000, 1, false);
+
+ // Expected window contents: [] -> [10011].
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {true, false, false, true, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+ // Sending more unacked packets, up to 0x7fff from the base, does not
+ // move the window or discard any information.
+ SendPackets(&tracker, static_cast<uint16_t>(base_ + 0x8000 - 0x2000), 0x2000,
+ 1, false);
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+ // Sending more unacked packets, up to 0x7fff from the base, does not
+ // move the window or discard any information.
+ // Expected window contents: [10011] -> [0011].
+ SendPackets(&tracker, static_cast<uint16_t>(base_ + 0x8000), 1, 1);
+ ValidatePacketLossStatistics(tracker, 2.0f / 4.0f, 1.0f / 3.0f);
+}
+
+// The window holds acked packets up until the difference in timestamps between
+// the oldest and newest reaches the configured maximum. Once this maximum
+// is exceeded, old packets are shifted out of window until the maximum is
+// once again observed.
+TEST_P(TransportFeedbackPacketLossTrackerTest, TimeDifferenceMaximumObserved) {
+ constexpr int64_t max_window_size_ms = 500;
+ TransportFeedbackPacketLossTracker tracker(max_window_size_ms, 3, 1);
+
+ // Note: All messages in this test are sent 1ms apart from each other.
+ // Therefore, the delta in sequence numbers equals the timestamps delta.
+
+ // Baseline - window has acked messages.
+ // Expected window contents: [] -> [01101].
+ const std::vector<bool> feedback = {false, true, true, false, true};
+ SendPackets(&tracker, base_, feedback.size(), 1);
+ AddTransportFeedbackAndValidate(&tracker, base_, feedback);
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+ // Test - window base not moved.
+ // Expected window contents: [01101] -> [011011].
+ AdvanceClock(max_window_size_ms - feedback.size());
+ SendPackets(&tracker, static_cast<uint16_t>(base_ + feedback.size()), 1, 1);
+ AddTransportFeedbackAndValidate(
+ &tracker, static_cast<uint16_t>(base_ + feedback.size()), {true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 6.0f, 2.0f / 5.0f);
+
+ // Another packet, sent 1ms later, would already be too late. The window will
+ // be moved, but only after the ACK is received.
+ const uint16_t new_packet_seq_num =
+ static_cast<uint16_t>(base_ + feedback.size() + 1);
+ SendPackets(&tracker, {new_packet_seq_num}, 1);
+ ValidatePacketLossStatistics(tracker, 2.0f / 6.0f, 2.0f / 5.0f);
+ // Expected window contents: [011011] -> [110111].
+ AddTransportFeedbackAndValidate(&tracker, new_packet_seq_num, {true});
+ ValidatePacketLossStatistics(tracker, 1.0f / 6.0f, 1.0f / 5.0f);
+}
+
+TEST_P(TransportFeedbackPacketLossTrackerTest, RepeatedSeqNumResetsWindow) {
+ TransportFeedbackPacketLossTracker tracker(50 * kDefaultSendIntervalMs, 2, 1);
+
+ // Baseline - window has acked messages.
+ // Expected window contents: [] -> [01101].
+ SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {false, true, true, false, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+ // A reset occurs.
+ SendPackets(&tracker, {static_cast<uint16_t>(base_ + 2)},
+ kDefaultSendIntervalMs);
+ ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+// The window is reset by the sending of a packet which is 0x8000 or more
+// away from the newest packet acked/unacked packet.
+TEST_P(TransportFeedbackPacketLossTrackerTest,
+ SendAfterLongSuspensionResetsWindow) {
+ TransportFeedbackPacketLossTracker tracker(50 * kDefaultSendIntervalMs, 2, 1);
+
+ // Baseline - window has acked messages.
+ // Expected window contents: [] -> [01101].
+ SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+ AddTransportFeedbackAndValidate(&tracker, base_,
+ {false, true, true, false, true});
+ ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+ // A reset occurs.
+ SendPackets(&tracker, {static_cast<uint16_t>(base_ + 5 + 0x8000)},
+ kDefaultSendIntervalMs);
+ ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(TransportFeedbackPacketLossTrackerTest, InvalidConfigMaxWindowSize) {
+ EXPECT_DEATH(TransportFeedbackPacketLossTracker tracker(0, 20, 10), "");
+}
+
+TEST(TransportFeedbackPacketLossTrackerTest, InvalidConfigPlrMinAcked) {
+ EXPECT_DEATH(TransportFeedbackPacketLossTracker tracker(5000, 0, 10), "");
+}
+
+TEST(TransportFeedbackPacketLossTrackerTest, InvalidConfigRplrMinPairs) {
+ EXPECT_DEATH(TransportFeedbackPacketLossTracker tracker(5000, 20, 0), "");
+}
+
+TEST(TransportFeedbackPacketLossTrackerTest, TimeCantFlowBackwards) {
+ TransportFeedbackPacketLossTracker tracker(5000, 2, 1);
+ tracker.OnPacketAdded(100, 0);
+ tracker.OnPacketAdded(101, 2);
+ EXPECT_DEATH(tracker.OnPacketAdded(102, 1), "");
+}
+#endif
+
+// All tests are run multiple times with various baseline sequence number,
+// to weed out potential bugs with wrap-around handling.
+constexpr uint16_t kBases[] = {0x0000, 0x3456, 0xc032, 0xfffe};
+
+INSTANTIATE_TEST_CASE_P(_,
+ TransportFeedbackPacketLossTrackerTest,
+ testing::ValuesIn(kBases));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/utility.cc b/third_party/libwebrtc/webrtc/voice_engine/utility.cc
new file mode 100644
index 0000000000..939870245f
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/utility.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/utility.h"
+
+#include "audio/utility/audio_frame_operations.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace voe {
+
+void RemixAndResample(const AudioFrame& src_frame,
+ PushResampler<int16_t>* resampler,
+ AudioFrame* dst_frame) {
+ RemixAndResample(src_frame.data(), src_frame.samples_per_channel_,
+ src_frame.num_channels_, src_frame.sample_rate_hz_,
+ resampler, dst_frame);
+ dst_frame->timestamp_ = src_frame.timestamp_;
+ dst_frame->elapsed_time_ms_ = src_frame.elapsed_time_ms_;
+ dst_frame->ntp_time_ms_ = src_frame.ntp_time_ms_;
+}
+
+void RemixAndResample(const int16_t* src_data,
+ size_t samples_per_channel,
+ size_t num_channels,
+ int sample_rate_hz,
+ PushResampler<int16_t>* resampler,
+ AudioFrame* dst_frame) {
+ const int16_t* audio_ptr = src_data;
+ size_t audio_ptr_num_channels = num_channels;
+ int16_t downmixed_audio[AudioFrame::kMaxDataSizeSamples];
+
+ // Downmix before resampling.
+ if (num_channels > dst_frame->num_channels_) {
+ RTC_DCHECK(num_channels == 2 || num_channels == 4)
+ << "num_channels: " << num_channels;
+ RTC_DCHECK(dst_frame->num_channels_ == 1 || dst_frame->num_channels_ == 2)
+ << "dst_frame->num_channels_: " << dst_frame->num_channels_;
+
+ AudioFrameOperations::DownmixChannels(
+ src_data, num_channels, samples_per_channel, dst_frame->num_channels_,
+ downmixed_audio);
+ audio_ptr = downmixed_audio;
+ audio_ptr_num_channels = dst_frame->num_channels_;
+ }
+
+ if (resampler->InitializeIfNeeded(sample_rate_hz, dst_frame->sample_rate_hz_,
+ audio_ptr_num_channels) == -1) {
+ FATAL() << "InitializeIfNeeded failed: sample_rate_hz = " << sample_rate_hz
+ << ", dst_frame->sample_rate_hz_ = " << dst_frame->sample_rate_hz_
+ << ", audio_ptr_num_channels = " << audio_ptr_num_channels;
+ }
+
+ // TODO(yujo): for muted input frames, don't resample. Either 1) allow
+ // resampler to return output length without doing the resample, so we know
+ // how much to zero here; or 2) make resampler accept a hint that the input is
+ // zeroed.
+ const size_t src_length = samples_per_channel * audio_ptr_num_channels;
+ int out_length = resampler->Resample(audio_ptr, src_length,
+ dst_frame->mutable_data(),
+ AudioFrame::kMaxDataSizeSamples);
+ if (out_length == -1) {
+ FATAL() << "Resample failed: audio_ptr = " << audio_ptr
+ << ", src_length = " << src_length
+ << ", dst_frame->mutable_data() = " << dst_frame->mutable_data();
+ }
+ dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
+
+ // Upmix after resampling.
+ if (num_channels == 1 && dst_frame->num_channels_ == 2) {
+ // The audio in dst_frame really is mono at this point; MonoToStereo will
+ // set this back to stereo.
+ dst_frame->num_channels_ = 1;
+ AudioFrameOperations::MonoToStereo(dst_frame);
+ }
+}
+
+} // namespace voe
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/utility.h b/third_party/libwebrtc/webrtc/voice_engine/utility.h
new file mode 100644
index 0000000000..dc23e1667d
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/utility.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Contains functions often used by different parts of VoiceEngine.
+ */
+
+#ifndef VOICE_ENGINE_UTILITY_H_
+#define VOICE_ENGINE_UTILITY_H_
+
+#include "common_audio/resampler/include/push_resampler.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+
+namespace voe {
+
+// Upmix or downmix and resample the audio to |dst_frame|. Expects |dst_frame|
+// to have its sample rate and channels members set to the desired values.
+// Updates the |samples_per_channel_| member accordingly.
+//
+// This version has an AudioFrame |src_frame| as input and sets the output
+// |timestamp_|, |elapsed_time_ms_| and |ntp_time_ms_| members equals to the
+// input ones.
+void RemixAndResample(const AudioFrame& src_frame,
+ PushResampler<int16_t>* resampler,
+ AudioFrame* dst_frame);
+
+// This version has a pointer to the samples |src_data| as input and receives
+// |samples_per_channel|, |num_channels| and |sample_rate_hz| of the data as
+// parameters.
+void RemixAndResample(const int16_t* src_data,
+ size_t samples_per_channel,
+ size_t num_channels,
+ int sample_rate_hz,
+ PushResampler<int16_t>* resampler,
+ AudioFrame* dst_frame);
+
+} // namespace voe
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_UTILITY_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/utility_unittest.cc b/third_party/libwebrtc/webrtc/voice_engine/utility_unittest.cc
new file mode 100644
index 0000000000..c798582d4b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/utility_unittest.cc
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "common_audio/resampler/include/push_resampler.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/format_macros.h"
+#include "test/gtest.h"
+#include "voice_engine/utility.h"
+
+namespace webrtc {
+namespace voe {
+namespace {
+
+class UtilityTest : public ::testing::Test {
+ protected:
+ UtilityTest() {
+ src_frame_.sample_rate_hz_ = 16000;
+ src_frame_.samples_per_channel_ = src_frame_.sample_rate_hz_ / 100;
+ src_frame_.num_channels_ = 1;
+ dst_frame_.CopyFrom(src_frame_);
+ golden_frame_.CopyFrom(src_frame_);
+ }
+
+ void RunResampleTest(int src_channels,
+ int src_sample_rate_hz,
+ int dst_channels,
+ int dst_sample_rate_hz);
+
+ PushResampler<int16_t> resampler_;
+ AudioFrame src_frame_;
+ AudioFrame dst_frame_;
+ AudioFrame golden_frame_;
+};
+
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(float data, int sample_rate_hz, AudioFrame* frame) {
+ frame->Mute();
+ frame->num_channels_ = 1;
+ frame->sample_rate_hz_ = sample_rate_hz;
+ frame->samples_per_channel_ = rtc::CheckedDivExact(sample_rate_hz, 100);
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+ frame_data[i] = static_cast<int16_t>(data * i);
+ }
+}
+
+// Keep the existing sample rate.
+void SetMonoFrame(float data, AudioFrame* frame) {
+ SetMonoFrame(data, frame->sample_rate_hz_, frame);
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(float left,
+ float right,
+ int sample_rate_hz,
+ AudioFrame* frame) {
+ frame->Mute();
+ frame->num_channels_ = 2;
+ frame->sample_rate_hz_ = sample_rate_hz;
+ frame->samples_per_channel_ = rtc::CheckedDivExact(sample_rate_hz, 100);
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+ frame_data[i * 2] = static_cast<int16_t>(left * i);
+ frame_data[i * 2 + 1] = static_cast<int16_t>(right * i);
+ }
+}
+
+// Keep the existing sample rate.
+void SetStereoFrame(float left, float right, AudioFrame* frame) {
+ SetStereoFrame(left, right, frame->sample_rate_hz_, frame);
+}
+
+// Sets the signal value to increase by |ch1|, |ch2|, |ch3|, |ch4| with every
+// sample in each channel respectively.
+void SetQuadFrame(float ch1,
+ float ch2,
+ float ch3,
+ float ch4,
+ int sample_rate_hz,
+ AudioFrame* frame) {
+ frame->Mute();
+ frame->num_channels_ = 4;
+ frame->sample_rate_hz_ = sample_rate_hz;
+ frame->samples_per_channel_ = rtc::CheckedDivExact(sample_rate_hz, 100);
+ int16_t* frame_data = frame->mutable_data();
+ for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+ frame_data[i * 4] = static_cast<int16_t>(ch1 * i);
+ frame_data[i * 4 + 1] = static_cast<int16_t>(ch2 * i);
+ frame_data[i * 4 + 2] = static_cast<int16_t>(ch3 * i);
+ frame_data[i * 4 + 3] = static_cast<int16_t>(ch4 * i);
+ }
+}
+
+void VerifyParams(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
+ EXPECT_EQ(ref_frame.num_channels_, test_frame.num_channels_);
+ EXPECT_EQ(ref_frame.samples_per_channel_, test_frame.samples_per_channel_);
+ EXPECT_EQ(ref_frame.sample_rate_hz_, test_frame.sample_rate_hz_);
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for up to a |max_delay| in samples between the
+// signals to compensate for the resampling delay.
+float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame,
+ size_t max_delay) {
+ VerifyParams(ref_frame, test_frame);
+ float best_snr = 0;
+ size_t best_delay = 0;
+ for (size_t delay = 0; delay <= max_delay; delay++) {
+ float mse = 0;
+ float variance = 0;
+ const int16_t* ref_frame_data = ref_frame.data();
+ const int16_t* test_frame_data = test_frame.data();
+ for (size_t i = 0; i < ref_frame.samples_per_channel_ *
+ ref_frame.num_channels_ - delay; i++) {
+ int error = ref_frame_data[i] - test_frame_data[i + delay];
+ mse += error * error;
+ variance += ref_frame_data[i] * ref_frame_data[i];
+ }
+ float snr = 100; // We assign 100 dB to the zero-error case.
+ if (mse > 0)
+ snr = 10 * log10(variance / mse);
+ if (snr > best_snr) {
+ best_snr = snr;
+ best_delay = delay;
+ }
+ }
+ printf("SNR=%.1f dB at delay=%" PRIuS "\n", best_snr, best_delay);
+ return best_snr;
+}
+
+void VerifyFramesAreEqual(const AudioFrame& ref_frame,
+ const AudioFrame& test_frame) {
+ VerifyParams(ref_frame, test_frame);
+ const int16_t* ref_frame_data = ref_frame.data();
+ const int16_t* test_frame_data = test_frame.data();
+ for (size_t i = 0;
+ i < ref_frame.samples_per_channel_ * ref_frame.num_channels_; i++) {
+ EXPECT_EQ(ref_frame_data[i], test_frame_data[i]);
+ }
+}
+
+void UtilityTest::RunResampleTest(int src_channels,
+ int src_sample_rate_hz,
+ int dst_channels,
+ int dst_sample_rate_hz) {
+ PushResampler<int16_t> resampler; // Create a new one with every test.
+ const int16_t kSrcCh1 = 30; // Shouldn't overflow for any used sample rate.
+ const int16_t kSrcCh2 = 15;
+ const int16_t kSrcCh3 = 22;
+ const int16_t kSrcCh4 = 8;
+ const float resampling_factor = (1.0 * src_sample_rate_hz) /
+ dst_sample_rate_hz;
+ const float dst_ch1 = resampling_factor * kSrcCh1;
+ const float dst_ch2 = resampling_factor * kSrcCh2;
+ const float dst_ch3 = resampling_factor * kSrcCh3;
+ const float dst_ch4 = resampling_factor * kSrcCh4;
+ const float dst_stereo_to_mono = (dst_ch1 + dst_ch2) / 2;
+ const float dst_quad_to_mono = (dst_ch1 + dst_ch2 + dst_ch3 + dst_ch4) / 4;
+ const float dst_quad_to_stereo_ch1 = (dst_ch1 + dst_ch2) / 2;
+ const float dst_quad_to_stereo_ch2 = (dst_ch3 + dst_ch4) / 2;
+ if (src_channels == 1)
+ SetMonoFrame(kSrcCh1, src_sample_rate_hz, &src_frame_);
+ else if (src_channels == 2)
+ SetStereoFrame(kSrcCh1, kSrcCh2, src_sample_rate_hz, &src_frame_);
+ else
+ SetQuadFrame(kSrcCh1, kSrcCh2, kSrcCh3, kSrcCh4, src_sample_rate_hz,
+ &src_frame_);
+
+ if (dst_channels == 1) {
+ SetMonoFrame(0, dst_sample_rate_hz, &dst_frame_);
+ if (src_channels == 1)
+ SetMonoFrame(dst_ch1, dst_sample_rate_hz, &golden_frame_);
+ else if (src_channels == 2)
+ SetMonoFrame(dst_stereo_to_mono, dst_sample_rate_hz, &golden_frame_);
+ else
+ SetMonoFrame(dst_quad_to_mono, dst_sample_rate_hz, &golden_frame_);
+ } else {
+ SetStereoFrame(0, 0, dst_sample_rate_hz, &dst_frame_);
+ if (src_channels == 1)
+ SetStereoFrame(dst_ch1, dst_ch1, dst_sample_rate_hz, &golden_frame_);
+ else if (src_channels == 2)
+ SetStereoFrame(dst_ch1, dst_ch2, dst_sample_rate_hz, &golden_frame_);
+ else
+ SetStereoFrame(dst_quad_to_stereo_ch1, dst_quad_to_stereo_ch2,
+ dst_sample_rate_hz, &golden_frame_);
+ }
+
+ // The sinc resampler has a known delay, which we compute here. Multiplying by
+ // two gives us a crude maximum for any resampling, as the old resampler
+ // typically (but not always) has lower delay.
+ static const size_t kInputKernelDelaySamples = 16;
+ const size_t max_delay = static_cast<size_t>(
+ static_cast<double>(dst_sample_rate_hz) / src_sample_rate_hz *
+ kInputKernelDelaySamples * dst_channels * 2);
+ printf("(%d, %d Hz) -> (%d, %d Hz) ", // SNR reported on the same line later.
+ src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
+ RemixAndResample(src_frame_, &resampler, &dst_frame_);
+
+ if (src_sample_rate_hz == 96000 && dst_sample_rate_hz == 8000) {
+ // The sinc resampler gives poor SNR at this extreme conversion, but we
+ // expect to see this rarely in practice.
+ EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 14.0f);
+ } else {
+ EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 46.0f);
+ }
+}
+
+TEST_F(UtilityTest, RemixAndResampleCopyFrameSucceeds) {
+ // Stereo -> stereo.
+ SetStereoFrame(10, 10, &src_frame_);
+ SetStereoFrame(0, 0, &dst_frame_);
+ RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+ VerifyFramesAreEqual(src_frame_, dst_frame_);
+
+ // Mono -> mono.
+ SetMonoFrame(20, &src_frame_);
+ SetMonoFrame(0, &dst_frame_);
+ RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+ VerifyFramesAreEqual(src_frame_, dst_frame_);
+}
+
+TEST_F(UtilityTest, RemixAndResampleMixingOnlySucceeds) {
+ // Stereo -> mono.
+ SetStereoFrame(0, 0, &dst_frame_);
+ SetMonoFrame(10, &src_frame_);
+ SetStereoFrame(10, 10, &golden_frame_);
+ RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+ VerifyFramesAreEqual(dst_frame_, golden_frame_);
+
+ // Mono -> stereo.
+ SetMonoFrame(0, &dst_frame_);
+ SetStereoFrame(10, 20, &src_frame_);
+ SetMonoFrame(15, &golden_frame_);
+ RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+ VerifyFramesAreEqual(golden_frame_, dst_frame_);
+}
+
+TEST_F(UtilityTest, RemixAndResampleSucceeds) {
+ const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000, 96000};
+ const int kSampleRatesSize = arraysize(kSampleRates);
+ const int kSrcChannels[] = {1, 2, 4};
+ const int kSrcChannelsSize = arraysize(kSrcChannels);
+ const int kDstChannels[] = {1, 2};
+ const int kDstChannelsSize = arraysize(kDstChannels);
+
+ for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+ for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+ for (int src_channel = 0; src_channel < kSrcChannelsSize;
+ src_channel++) {
+ for (int dst_channel = 0; dst_channel < kDstChannelsSize;
+ dst_channel++) {
+ RunResampleTest(kSrcChannels[src_channel], kSampleRates[src_rate],
+ kDstChannels[dst_channel], kSampleRates[dst_rate]);
+ }
+ }
+ }
+ }
+}
+
+} // namespace
+} // namespace voe
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.cc b/third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.cc
new file mode 100644
index 0000000000..5d49872620
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.cc
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/voe_base_impl.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_device/audio_device_impl.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "voice_engine/channel.h"
+#include "voice_engine/include/voe_errors.h"
+#include "voice_engine/transmit_mixer.h"
+#include "voice_engine/voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEBase* VoEBase::GetInterface(VoiceEngine* voiceEngine) {
+ if (nullptr == voiceEngine) {
+ return nullptr;
+ }
+ VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+}
+
+VoEBaseImpl::VoEBaseImpl(voe::SharedData* shared)
+ : shared_(shared) {}
+
+VoEBaseImpl::~VoEBaseImpl() {
+ TerminateInternal();
+}
+
+int32_t VoEBaseImpl::RecordedDataIsAvailable(
+ const void* audio_data,
+ const size_t number_of_frames,
+ const size_t bytes_per_sample,
+ const size_t number_of_channels,
+ const uint32_t sample_rate,
+ const uint32_t audio_delay_milliseconds,
+ const int32_t clock_drift,
+ const uint32_t volume,
+ const bool key_pressed,
+ uint32_t& new_mic_volume) {
+ RTC_DCHECK_EQ(2 * number_of_channels, bytes_per_sample);
+ RTC_DCHECK(shared_->transmit_mixer() != nullptr);
+ RTC_DCHECK(shared_->audio_device() != nullptr);
+
+ constexpr uint32_t kMaxVolumeLevel = 255;
+
+ uint32_t max_volume = 0;
+ uint16_t voe_mic_level = 0;
+ // Check for zero to skip this calculation; the consumer may use this to
+ // indicate no volume is available.
+ if (volume != 0) {
+ // Scale from ADM to VoE level range
+ if (shared_->audio_device()->MaxMicrophoneVolume(&max_volume) == 0) {
+ if (max_volume) {
+ voe_mic_level = static_cast<uint16_t>(
+ (volume * kMaxVolumeLevel + static_cast<int>(max_volume / 2)) /
+ max_volume);
+ }
+ }
+ // We learned that on certain systems (e.g Linux) the voe_mic_level
+ // can be greater than the maxVolumeLevel therefore
+ // we are going to cap the voe_mic_level to the maxVolumeLevel
+ // and change the maxVolume to volume if it turns out that
+ // the voe_mic_level is indeed greater than the maxVolumeLevel.
+ if (voe_mic_level > kMaxVolumeLevel) {
+ voe_mic_level = kMaxVolumeLevel;
+ max_volume = volume;
+ }
+ }
+
+ // Perform channel-independent operations
+ // (APM, mix with file, record to file, mute, etc.)
+ shared_->transmit_mixer()->PrepareDemux(
+ audio_data, number_of_frames, number_of_channels, sample_rate,
+ static_cast<uint16_t>(audio_delay_milliseconds), clock_drift,
+ voe_mic_level, key_pressed);
+
+ // Copy the audio frame to each sending channel and perform
+ // channel-dependent operations (file mixing, mute, etc.), encode and
+ // packetize+transmit the RTP packet.
+ shared_->transmit_mixer()->ProcessAndEncodeAudio();
+
+ // Scale from VoE to ADM level range.
+ uint32_t new_voe_mic_level = shared_->transmit_mixer()->CaptureLevel();
+ if (new_voe_mic_level != voe_mic_level) {
+ // Return the new volume if AGC has changed the volume.
+ return static_cast<int>((new_voe_mic_level * max_volume +
+ static_cast<int>(kMaxVolumeLevel / 2)) /
+ kMaxVolumeLevel);
+ }
+
+ return 0;
+}
+
+int32_t VoEBaseImpl::NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {
+ RTC_NOTREACHED();
+ return 0;
+}
+
+void VoEBaseImpl::PushCaptureData(int voe_channel, const void* audio_data,
+ int bits_per_sample, int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) {
+ voe::ChannelOwner ch = shared_->channel_manager().GetChannel(voe_channel);
+ voe::Channel* channel = ch.channel();
+ if (!channel)
+ return;
+ if (channel->Sending()) {
+ // Send the audio to each channel directly without using the APM in the
+ // transmit mixer.
+ channel->ProcessAndEncodeAudio(static_cast<const int16_t*>(audio_data),
+ sample_rate, number_of_frames,
+ number_of_channels);
+ }
+}
+
+void VoEBaseImpl::PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data, int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {
+ RTC_NOTREACHED();
+}
+
+int VoEBaseImpl::Init(
+ AudioDeviceModule* audio_device,
+ AudioProcessing* audio_processing,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
+ RTC_DCHECK(audio_device);
+ RTC_DCHECK(audio_processing);
+ rtc::CritScope cs(shared_->crit_sec());
+ if (shared_->process_thread()) {
+ shared_->process_thread()->Start();
+ }
+
+ shared_->set_audio_device(audio_device);
+ shared_->set_audio_processing(audio_processing);
+
+ RTC_DCHECK(decoder_factory);
+ decoder_factory_ = decoder_factory;
+
+ return 0;
+}
+
+void VoEBaseImpl::Terminate() {
+ rtc::CritScope cs(shared_->crit_sec());
+ TerminateInternal();
+}
+
+int VoEBaseImpl::CreateChannel() {
+ return CreateChannel(ChannelConfig());
+}
+
+int VoEBaseImpl::CreateChannel(const ChannelConfig& config) {
+ rtc::CritScope cs(shared_->crit_sec());
+ ChannelConfig config_copy(config);
+ config_copy.acm_config.decoder_factory = decoder_factory_;
+ voe::ChannelOwner channel_owner =
+ shared_->channel_manager().CreateChannel(config_copy);
+ return InitializeChannel(&channel_owner);
+}
+
+int VoEBaseImpl::InitializeChannel(voe::ChannelOwner* channel_owner) {
+ if (channel_owner->channel()->SetEngineInformation(
+ *shared_->process_thread(), *shared_->audio_device(),
+ shared_->encoder_queue()) != 0) {
+ RTC_LOG(LS_ERROR)
+ << "CreateChannel() failed to associate engine and channel."
+ " Destroying channel.";
+ shared_->channel_manager().DestroyChannel(
+ channel_owner->channel()->ChannelId());
+ return -1;
+ } else if (channel_owner->channel()->Init() != 0) {
+ RTC_LOG(LS_ERROR)
+ << "CreateChannel() failed to initialize channel. Destroying"
+ " channel.";
+ shared_->channel_manager().DestroyChannel(
+ channel_owner->channel()->ChannelId());
+ return -1;
+ }
+ return channel_owner->channel()->ChannelId();
+}
+
+int VoEBaseImpl::DeleteChannel(int channel) {
+ rtc::CritScope cs(shared_->crit_sec());
+ {
+ voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+ voe::Channel* channelPtr = ch.channel();
+ if (channelPtr == nullptr) {
+ RTC_LOG(LS_ERROR) << "DeleteChannel() failed to locate channel";
+ return -1;
+ }
+ }
+
+ shared_->channel_manager().DestroyChannel(channel);
+ if (StopSend() != 0) {
+ return -1;
+ }
+ if (StopPlayout() != 0) {
+ return -1;
+ }
+ return 0;
+}
+
+int VoEBaseImpl::StartPlayout(int channel) {
+ rtc::CritScope cs(shared_->crit_sec());
+ voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+ voe::Channel* channelPtr = ch.channel();
+ if (channelPtr == nullptr) {
+ RTC_LOG(LS_ERROR) << "StartPlayout() failed to locate channel";
+ return -1;
+ }
+ if (channelPtr->Playing()) {
+ return 0;
+ }
+ if (StartPlayout() != 0) {
+ RTC_LOG(LS_ERROR) << "StartPlayout() failed to start playout";
+ return -1;
+ }
+ return channelPtr->StartPlayout();
+}
+
+int VoEBaseImpl::StopPlayout(int channel) {
+ rtc::CritScope cs(shared_->crit_sec());
+ voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+ voe::Channel* channelPtr = ch.channel();
+ if (channelPtr == nullptr) {
+ RTC_LOG(LS_ERROR) << "StopPlayout() failed to locate channel";
+ return -1;
+ }
+ if (channelPtr->StopPlayout() != 0) {
+ RTC_LOG_F(LS_WARNING) << "StopPlayout() failed to stop playout for channel "
+ << channel;
+ }
+ return StopPlayout();
+}
+
+int VoEBaseImpl::StartSend(int channel) {
+ rtc::CritScope cs(shared_->crit_sec());
+ voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+ voe::Channel* channelPtr = ch.channel();
+ if (channelPtr == nullptr) {
+ RTC_LOG(LS_ERROR) << "StartSend() failed to locate channel";
+ return -1;
+ }
+ if (channelPtr->Sending()) {
+ return 0;
+ }
+ if (StartSend() != 0) {
+ RTC_LOG(LS_ERROR) << "StartSend() failed to start recording";
+ return -1;
+ }
+ return channelPtr->StartSend();
+}
+
+int VoEBaseImpl::StopSend(int channel) {
+ rtc::CritScope cs(shared_->crit_sec());
+ voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+ voe::Channel* channelPtr = ch.channel();
+ if (channelPtr == nullptr) {
+ RTC_LOG(LS_ERROR) << "StopSend() failed to locate channel";
+ return -1;
+ }
+ channelPtr->StopSend();
+ return StopSend();
+}
+
+int32_t VoEBaseImpl::StartPlayout() {
+ if (!shared_->audio_device()->Playing()) {
+ if (shared_->audio_device()->InitPlayout() != 0) {
+ RTC_LOG_F(LS_ERROR) << "Failed to initialize playout";
+ return -1;
+ }
+ if (playout_enabled_ && shared_->audio_device()->StartPlayout() != 0) {
+ RTC_LOG_F(LS_ERROR) << "Failed to start playout";
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t VoEBaseImpl::StopPlayout() {
+ if (!playout_enabled_) {
+ return 0;
+ }
+ // Stop audio-device playing if no channel is playing out.
+ if (shared_->NumOfPlayingChannels() == 0) {
+ if (shared_->audio_device()->StopPlayout() != 0) {
+ RTC_LOG(LS_ERROR) << "StopPlayout() failed to stop playout";
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t VoEBaseImpl::StartSend() {
+ if (!shared_->audio_device()->Recording()) {
+ if (shared_->audio_device()->InitRecording() != 0) {
+ RTC_LOG_F(LS_ERROR) << "Failed to initialize recording";
+ return -1;
+ }
+ if (recording_enabled_ && shared_->audio_device()->StartRecording() != 0) {
+ RTC_LOG_F(LS_ERROR) << "Failed to start recording";
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t VoEBaseImpl::StopSend() {
+ if (!recording_enabled_) {
+ return 0;
+ }
+ // Stop audio-device recording if no channel is recording.
+ if (shared_->NumOfSendingChannels() == 0) {
+ if (shared_->audio_device()->StopRecording() != 0) {
+ RTC_LOG(LS_ERROR) << "StopSend() failed to stop recording";
+ return -1;
+ }
+ shared_->transmit_mixer()->StopSend();
+ }
+
+ return 0;
+}
+
+int32_t VoEBaseImpl::SetPlayout(bool enabled) {
+ RTC_LOG(INFO) << "SetPlayout(" << enabled << ")";
+ if (playout_enabled_ == enabled) {
+ return 0;
+ }
+ playout_enabled_ = enabled;
+ if (shared_->NumOfPlayingChannels() == 0) {
+ // If there are no channels attempting to play out yet, there's nothing to
+ // be done; we should be in a "not playing out" state either way.
+ return 0;
+ }
+ int32_t ret;
+ if (enabled) {
+ ret = shared_->audio_device()->StartPlayout();
+ if (ret != 0) {
+ RTC_LOG(LS_ERROR) << "SetPlayout(true) failed to start playout";
+ }
+ } else {
+ ret = shared_->audio_device()->StopPlayout();
+ if (ret != 0) {
+ RTC_LOG(LS_ERROR) << "SetPlayout(false) failed to stop playout";
+ }
+ }
+ return ret;
+}
+
+int32_t VoEBaseImpl::SetRecording(bool enabled) {
+ RTC_LOG(INFO) << "SetRecording(" << enabled << ")";
+ if (recording_enabled_ == enabled) {
+ return 0;
+ }
+ recording_enabled_ = enabled;
+ if (shared_->NumOfSendingChannels() == 0) {
+ // If there are no channels attempting to record out yet, there's nothing to
+ // be done; we should be in a "not recording" state either way.
+ return 0;
+ }
+ int32_t ret;
+ if (enabled) {
+ ret = shared_->audio_device()->StartRecording();
+ if (ret != 0) {
+ RTC_LOG(LS_ERROR) << "SetRecording(true) failed to start recording";
+ }
+ } else {
+ ret = shared_->audio_device()->StopRecording();
+ if (ret != 0) {
+ RTC_LOG(LS_ERROR) << "SetRecording(false) failed to stop recording";
+ }
+ }
+ return ret;
+}
+
+void VoEBaseImpl::TerminateInternal() {
+ // Delete any remaining channel objects
+ shared_->channel_manager().DestroyAllChannels();
+
+ if (shared_->process_thread()) {
+ shared_->process_thread()->Stop();
+ }
+
+ shared_->set_audio_device(nullptr);
+ shared_->set_audio_processing(nullptr);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.h b/third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.h
new file mode 100644
index 0000000000..b96be9e343
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_VOE_BASE_IMPL_H_
+#define VOICE_ENGINE_VOE_BASE_IMPL_H_
+
+#include "voice_engine/include/voe_base.h"
+
+#include "modules/include/module_common_types.h"
+#include "rtc_base/criticalsection.h"
+#include "voice_engine/shared_data.h"
+
+namespace webrtc {
+
+class ProcessThread;
+
+class VoEBaseImpl : public VoEBase,
+ public AudioTransport {
+ public:
+ int Init(
+ AudioDeviceModule* audio_device,
+ AudioProcessing* audio_processing,
+ const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) override;
+ voe::TransmitMixer* transmit_mixer() override {
+ return shared_->transmit_mixer();
+ }
+ void Terminate() override;
+
+ int CreateChannel() override;
+ int CreateChannel(const ChannelConfig& config) override;
+ int DeleteChannel(int channel) override;
+
+ int StartPlayout(int channel) override;
+ int StartSend(int channel) override;
+ int StopPlayout(int channel) override;
+ int StopSend(int channel) override;
+
+ int SetPlayout(bool enabled) override;
+ int SetRecording(bool enabled) override;
+
+ AudioTransport* audio_transport() override { return this; }
+
+ // AudioTransport
+ int32_t RecordedDataIsAvailable(const void* audio_data,
+ const size_t number_of_frames,
+ const size_t bytes_per_sample,
+ const size_t number_of_channels,
+ const uint32_t sample_rate,
+ const uint32_t audio_delay_milliseconds,
+ const int32_t clock_drift,
+ const uint32_t volume,
+ const bool key_pressed,
+ uint32_t& new_mic_volume) override;
+ RTC_DEPRECATED int32_t NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const size_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+ void PushCaptureData(int voe_channel,
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) override;
+ RTC_DEPRECATED void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+
+ protected:
+ VoEBaseImpl(voe::SharedData* shared);
+ ~VoEBaseImpl() override;
+
+ private:
+ int32_t StartPlayout();
+ int32_t StopPlayout();
+ int32_t StartSend();
+ int32_t StopSend();
+ void TerminateInternal();
+
+ void GetPlayoutData(int sample_rate, size_t number_of_channels,
+ size_t number_of_frames, bool feed_data_to_apm,
+ void* audio_data, int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms);
+
+ // Initialize channel by setting Engine Information then initializing
+ // channel.
+ int InitializeChannel(voe::ChannelOwner* channel_owner);
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+
+ AudioFrame audioFrame_;
+ voe::SharedData* shared_;
+ bool playout_enabled_ = true;
+ bool recording_enabled_ = true;
+};
+
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_VOE_BASE_IMPL_H_
diff --git a/third_party/libwebrtc/webrtc/voice_engine/voe_base_unittest.cc b/third_party/libwebrtc/webrtc/voice_engine/voe_base_unittest.cc
new file mode 100644
index 0000000000..56c3d13a00
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/voe_base_unittest.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/include/voe_base.h"
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_device/include/fake_audio_device.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "rtc_base/refcountedobject.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class VoEBaseTest : public ::testing::Test {
+ protected:
+ VoEBaseTest()
+ : voe_(VoiceEngine::Create()),
+ base_(VoEBase::GetInterface(voe_)) {
+ EXPECT_NE(nullptr, base_);
+ apm_ = new rtc::RefCountedObject<test::MockAudioProcessing>();
+ }
+
+ ~VoEBaseTest() {
+ base_->Terminate();
+ EXPECT_EQ(1, base_->Release());
+ EXPECT_TRUE(VoiceEngine::Delete(voe_));
+ }
+
+ VoiceEngine* voe_;
+ VoEBase* base_;
+ FakeAudioDeviceModule adm_;
+ rtc::scoped_refptr<AudioProcessing> apm_;
+};
+
+TEST_F(VoEBaseTest, InitWithExternalAudioDevice) {
+ EXPECT_EQ(0,
+ base_->Init(&adm_, apm_.get(), CreateBuiltinAudioDecoderFactory()));
+}
+
+TEST_F(VoEBaseTest, CreateChannelAfterInit) {
+ EXPECT_EQ(0,
+ base_->Init(&adm_, apm_.get(), CreateBuiltinAudioDecoderFactory()));
+ int channelID = base_->CreateChannel();
+ EXPECT_NE(channelID, -1);
+ EXPECT_EQ(0, base_->DeleteChannel(channelID));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/voice_engine_defines.h b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_defines.h
new file mode 100644
index 0000000000..b4d928ab87
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_defines.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// TODO(solenberg): Remove this file once downstream dependencies are removed.
diff --git a/third_party/libwebrtc/webrtc/voice_engine/voice_engine_gn/moz.build b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_gn/moz.build
new file mode 100644
index 0000000000..03c08b9abe
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_gn/moz.build
@@ -0,0 +1,243 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["CHROMIUM_BUILD"] = True
+DEFINES["V8_DEPRECATION_WARNINGS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_RESTRICT_LOGGING"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/ipc/glue",
+ "/third_party/libwebrtc/webrtc/",
+ "/third_party/libwebrtc/webrtc/common_audio/resampler/include/",
+ "/third_party/libwebrtc/webrtc/common_audio/signal_processing/include/",
+ "/third_party/libwebrtc/webrtc/common_audio/vad/include/",
+ "/third_party/libwebrtc/webrtc/modules/audio_coding/include/",
+ "/third_party/libwebrtc/webrtc/modules/audio_device/dummy/",
+ "/third_party/libwebrtc/webrtc/modules/audio_device/include/",
+ "/third_party/libwebrtc/webrtc/modules/include/",
+ "/third_party/libwebrtc/webrtc/modules/include/"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/webrtc/voice_engine/channel.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/channel_manager.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/channel_proxy.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/shared_data.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/transmit_mixer.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/transport_feedback_packet_loss_tracker.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/utility.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/voe_base_impl.cc",
+ "/third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["WTF_USE_DYNAMIC_ANNOTATIONS"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION"] = "r12b"
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["USE_OPENSSL_CERTS"] = "1"
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["__GNU_SOURCE"] = "1"
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORE"] = "0"
+
+ OS_LIBS += [
+ "-framework Foundation"
+ ]
+
+if CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+ OS_LIBS += [
+ "m",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "1"
+ DEFINES["UNICODE"] = True
+ DEFINES["WEBRTC_DRIFT_COMPENSATION_SUPPORTED"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_CRT_SECURE_NO_WARNINGS"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_USING_V110_SDK71_"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0120"
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0920"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["NO_TCMALLOC"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "NetBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+Library("voice_engine_gn")
diff --git a/third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.cc b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.cc
new file mode 100644
index 0000000000..b0ff0849ba
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_ANDROID)
+#include "modules/audio_device/android/audio_device_template.h"
+#include "modules/audio_device/android/audio_record_jni.h"
+#include "modules/audio_device/android/audio_track_jni.h"
+#endif
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "rtc_base/checks.h"
+#include "voice_engine/channel_proxy.h"
+#include "voice_engine/voice_engine_impl.h"
+
+namespace webrtc {
+
+// Counter to be ensure that we can add a correct ID in all static trace
+// methods. It is not the nicest solution, especially not since we already
+// have a counter in VoEBaseImpl. In other words, there is room for
+// improvement here.
+static int32_t gVoiceEngineInstanceCounter = 0;
+
+VoiceEngine* GetVoiceEngine() {
+ VoiceEngineImpl* self = new VoiceEngineImpl();
+ if (self != NULL) {
+ self->AddRef(); // First reference. Released in VoiceEngine::Delete.
+ gVoiceEngineInstanceCounter++;
+ }
+ return self;
+}
+
+int VoiceEngineImpl::AddRef() {
+ return ++_ref_count;
+}
+
+// This implements the Release() method for all the inherited interfaces.
+int VoiceEngineImpl::Release() {
+ int new_ref = --_ref_count;
+ assert(new_ref >= 0);
+ if (new_ref == 0) {
+ // Clear any pointers before starting destruction. Otherwise worker-
+ // threads will still have pointers to a partially destructed object.
+ // Example: AudioDeviceBuffer::RequestPlayoutData() can access a
+ // partially deconstructed |_ptrCbAudioTransport| during destruction
+ // if we don't call Terminate here.
+ Terminate();
+ delete this;
+ }
+
+ return new_ref;
+}
+
+std::unique_ptr<voe::ChannelProxy> VoiceEngineImpl::GetChannelProxy(
+ int channel_id) {
+ RTC_DCHECK(channel_id >= 0);
+ rtc::CritScope cs(crit_sec());
+ return std::unique_ptr<voe::ChannelProxy>(
+ new voe::ChannelProxy(channel_manager().GetChannel(channel_id)));
+}
+
+VoiceEngine* VoiceEngine::Create() {
+ return GetVoiceEngine();
+}
+
+bool VoiceEngine::Delete(VoiceEngine*& voiceEngine) {
+ if (voiceEngine == NULL)
+ return false;
+
+ VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+ s->Release();
+ voiceEngine = NULL;
+ return true;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.h b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.h
new file mode 100644
index 0000000000..917cc1aa68
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/voice_engine/voice_engine_impl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VOICE_ENGINE_VOICE_ENGINE_IMPL_H_
+#define VOICE_ENGINE_VOICE_ENGINE_IMPL_H_
+
+#include <memory>
+
+#include "system_wrappers/include/atomic32.h"
+#include "typedefs.h" // NOLINT(build/include)
+#include "voice_engine/voe_base_impl.h"
+
+namespace webrtc {
+namespace voe {
+class ChannelProxy;
+} // namespace voe
+
+class VoiceEngineImpl : public voe::SharedData, // Must be the first base class
+ public VoiceEngine,
+ public VoEBaseImpl {
+ public:
+ VoiceEngineImpl()
+ : SharedData(),
+ VoEBaseImpl(this),
+ _ref_count(0) {}
+ ~VoiceEngineImpl() override { assert(_ref_count.Value() == 0); }
+
+ int AddRef();
+
+ // This implements the Release() method for all the inherited interfaces.
+ int Release() override;
+
+ // Backdoor to access a voe::Channel object without a channel ID. This is only
+ // to be used while refactoring the VoE API!
+ virtual std::unique_ptr<voe::ChannelProxy> GetChannelProxy(int channel_id);
+
+ // This is *protected* so that FakeVoiceEngine can inherit from the class and
+ // manipulate the reference count. See: fake_voice_engine.h.
+ protected:
+ Atomic32 _ref_count;
+};
+
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_VOICE_ENGINE_IMPL_H_