summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/audio/voip/test
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/libwebrtc/audio/voip/test/BUILD.gn101
-rw-r--r--third_party/libwebrtc/audio/voip/test/audio_channel_unittest.cc357
-rw-r--r--third_party/libwebrtc/audio/voip/test/audio_egress_unittest.cc327
-rw-r--r--third_party/libwebrtc/audio/voip/test/audio_ingress_unittest.cc238
-rw-r--r--third_party/libwebrtc/audio/voip/test/mock_task_queue.h55
-rw-r--r--third_party/libwebrtc/audio/voip/test/voip_core_unittest.cc193
6 files changed, 1271 insertions, 0 deletions
diff --git a/third_party/libwebrtc/audio/voip/test/BUILD.gn b/third_party/libwebrtc/audio/voip/test/BUILD.gn
new file mode 100644
index 0000000000..e89f2b001a
--- /dev/null
+++ b/third_party/libwebrtc/audio/voip/test/BUILD.gn
@@ -0,0 +1,101 @@
+# Copyright(c) 2020 The WebRTC project authors.All Rights Reserved.
+#
+# Use of this source code is governed by a BSD - style license
+# that can be found in the LICENSE file in the root of the source
+# tree.An additional intellectual property rights grant can be found
+# in the file PATENTS.All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+if (rtc_include_tests) {
+ rtc_source_set("mock_task_queue") {
+ testonly = true
+ visibility = [ "*" ]
+ sources = [ "mock_task_queue.h" ]
+ deps = [
+ "../../../api/task_queue:task_queue",
+ "../../../api/task_queue/test:mock_task_queue_base",
+ "../../../test:test_support",
+ ]
+ }
+
+ if (!build_with_chromium) {
+ rtc_library("voip_core_unittests") {
+ testonly = true
+ sources = [ "voip_core_unittest.cc" ]
+ deps = [
+ "..:voip_core",
+ "../../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../../api/task_queue:default_task_queue_factory",
+ "../../../modules/audio_device:mock_audio_device",
+ "../../../modules/audio_processing:mocks",
+ "../../../test:audio_codec_mocks",
+ "../../../test:mock_transport",
+ "../../../test:run_loop",
+ "../../../test:test_support",
+ ]
+ }
+ }
+
+ rtc_library("audio_channel_unittests") {
+ testonly = true
+ sources = [ "audio_channel_unittest.cc" ]
+ deps = [
+ ":mock_task_queue",
+ "..:audio_channel",
+ "../../../api:transport_api",
+ "../../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../../api/task_queue:task_queue",
+ "../../../modules/audio_mixer:audio_mixer_impl",
+ "../../../modules/audio_mixer:audio_mixer_test_utils",
+ "../../../modules/rtp_rtcp:rtp_rtcp",
+ "../../../modules/rtp_rtcp:rtp_rtcp_format",
+ "../../../rtc_base:logging",
+ "../../../test:mock_transport",
+ "../../../test:test_support",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/functional:any_invocable" ]
+ }
+
+ rtc_library("audio_ingress_unittests") {
+ testonly = true
+ sources = [ "audio_ingress_unittest.cc" ]
+ deps = [
+ "..:audio_egress",
+ "..:audio_ingress",
+ "../../../api:transport_api",
+ "../../../api/audio_codecs:builtin_audio_decoder_factory",
+ "../../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../../api/task_queue:default_task_queue_factory",
+ "../../../modules/audio_mixer:audio_mixer_test_utils",
+ "../../../modules/rtp_rtcp:rtp_rtcp",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:rtc_event",
+ "../../../test:mock_transport",
+ "../../../test:run_loop",
+ "../../../test:test_support",
+ ]
+ }
+
+ rtc_library("audio_egress_unittests") {
+ testonly = true
+ sources = [ "audio_egress_unittest.cc" ]
+ deps = [
+ "..:audio_egress",
+ "../../../api:transport_api",
+ "../../../api/audio_codecs:builtin_audio_encoder_factory",
+ "../../../api/task_queue:default_task_queue_factory",
+ "../../../modules/audio_mixer:audio_mixer_test_utils",
+ "../../../modules/rtp_rtcp:rtp_rtcp",
+ "../../../modules/rtp_rtcp:rtp_rtcp_format",
+ "../../../rtc_base:logging",
+ "../../../rtc_base:rtc_event",
+ "../../../test:mock_transport",
+ "../../../test:run_loop",
+ "../../../test:test_support",
+ ]
+ }
+}
diff --git a/third_party/libwebrtc/audio/voip/test/audio_channel_unittest.cc b/third_party/libwebrtc/audio/voip/test/audio_channel_unittest.cc
new file mode 100644
index 0000000000..8955810429
--- /dev/null
+++ b/third_party/libwebrtc/audio/voip/test/audio_channel_unittest.cc
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/audio_channel.h"
+
+#include "absl/functional/any_invocable.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/call/transport.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "audio/voip/test/mock_task_queue.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_mixer/sine_wave_generator.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::Unused;
+
+constexpr uint64_t kStartTime = 123456789;
+constexpr uint32_t kLocalSsrc = 0xdeadc0de;
+constexpr int16_t kAudioLevel = 3004; // used for sine wave level
+constexpr int kPcmuPayload = 0;
+
+class AudioChannelTest : public ::testing::Test {
+ public:
+ const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
+
+ AudioChannelTest()
+ : fake_clock_(kStartTime), wave_generator_(1000.0, kAudioLevel) {
+ task_queue_factory_ = std::make_unique<MockTaskQueueFactory>(&task_queue_);
+ audio_mixer_ = AudioMixerImpl::Create();
+ encoder_factory_ = CreateBuiltinAudioEncoderFactory();
+ decoder_factory_ = CreateBuiltinAudioDecoderFactory();
+
+ // By default, run the queued task immediately.
+ ON_CALL(task_queue_, PostTask)
+ .WillByDefault(
+ [](absl::AnyInvocable<void() &&> task) { std::move(task)(); });
+ }
+
+ void SetUp() override { audio_channel_ = CreateAudioChannel(kLocalSsrc); }
+
+ void TearDown() override { audio_channel_ = nullptr; }
+
+ rtc::scoped_refptr<AudioChannel> CreateAudioChannel(uint32_t ssrc) {
+ // Use same audio mixer here for simplicity sake as we are not checking
+ // audio activity of RTP in our testcases. If we need to do test on audio
+ // signal activity then we need to assign audio mixer for each channel.
+ // Also this uses the same transport object for different audio channel to
+ // simplify network routing logic.
+ rtc::scoped_refptr<AudioChannel> audio_channel =
+ rtc::make_ref_counted<AudioChannel>(
+ &transport_, ssrc, task_queue_factory_.get(), audio_mixer_.get(),
+ decoder_factory_);
+ audio_channel->SetEncoder(kPcmuPayload, kPcmuFormat,
+ encoder_factory_->MakeAudioEncoder(
+ kPcmuPayload, kPcmuFormat, absl::nullopt));
+ audio_channel->SetReceiveCodecs({{kPcmuPayload, kPcmuFormat}});
+ audio_channel->StartSend();
+ audio_channel->StartPlay();
+ return audio_channel;
+ }
+
+ std::unique_ptr<AudioFrame> GetAudioFrame(int order) {
+ auto frame = std::make_unique<AudioFrame>();
+ frame->sample_rate_hz_ = kPcmuFormat.clockrate_hz;
+ frame->samples_per_channel_ = kPcmuFormat.clockrate_hz / 100; // 10 ms.
+ frame->num_channels_ = kPcmuFormat.num_channels;
+ frame->timestamp_ = frame->samples_per_channel_ * order;
+ wave_generator_.GenerateNextFrame(frame.get());
+ return frame;
+ }
+
+ SimulatedClock fake_clock_;
+ SineWaveGenerator wave_generator_;
+ NiceMock<MockTransport> transport_;
+ NiceMock<MockTaskQueue> task_queue_;
+ std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ rtc::scoped_refptr<AudioMixer> audio_mixer_;
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+ rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+ rtc::scoped_refptr<AudioChannel> audio_channel_;
+};
+
+// Validate RTP packet generation by feeding audio frames with sine wave.
+// Resulted RTP packet is looped back into AudioChannel and gets decoded into
+// audio frame to see if it has some signal to indicate its validity.
+TEST_F(AudioChannelTest, PlayRtpByLocalLoop) {
+ auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ audio_channel_->ReceivedRTPPacket(
+ rtc::ArrayView<const uint8_t>(packet, length));
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp));
+
+ auto audio_sender = audio_channel_->GetAudioSender();
+ audio_sender->SendAudioData(GetAudioFrame(0));
+ audio_sender->SendAudioData(GetAudioFrame(1));
+
+ AudioFrame empty_frame, audio_frame;
+ empty_frame.Mute();
+ empty_frame.mutable_data(); // This will zero out the data.
+ audio_frame.CopyFrom(empty_frame);
+ audio_mixer_->Mix(/*number_of_channels*/ 1, &audio_frame);
+
+ // We expect now audio frame to pick up something.
+ EXPECT_NE(memcmp(empty_frame.data(), audio_frame.data(),
+ AudioFrame::kMaxDataSizeBytes),
+ 0);
+}
+
+// Validate assigned local SSRC is resulted in RTP packet.
+TEST_F(AudioChannelTest, VerifyLocalSsrcAsAssigned) {
+ RtpPacketReceived rtp;
+ auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ rtp.Parse(packet, length);
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp));
+
+ auto audio_sender = audio_channel_->GetAudioSender();
+ audio_sender->SendAudioData(GetAudioFrame(0));
+ audio_sender->SendAudioData(GetAudioFrame(1));
+
+ EXPECT_EQ(rtp.Ssrc(), kLocalSsrc);
+}
+
+// Check metrics after processing an RTP packet.
+TEST_F(AudioChannelTest, TestIngressStatistics) {
+ auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ audio_channel_->ReceivedRTPPacket(
+ rtc::ArrayView<const uint8_t>(packet, length));
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(loop_rtp));
+
+ auto audio_sender = audio_channel_->GetAudioSender();
+ audio_sender->SendAudioData(GetAudioFrame(0));
+ audio_sender->SendAudioData(GetAudioFrame(1));
+
+ AudioFrame audio_frame;
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+
+ absl::optional<IngressStatistics> ingress_stats =
+ audio_channel_->GetIngressStatistics();
+ EXPECT_TRUE(ingress_stats);
+ EXPECT_EQ(ingress_stats->neteq_stats.total_samples_received, 160ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.concealed_samples, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.concealment_events, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.inserted_samples_for_deceleration, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.removed_samples_for_acceleration, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.silent_concealed_samples, 0ULL);
+ // To extract the jitter buffer length in millisecond, jitter_buffer_delay_ms
+ // needs to be divided by jitter_buffer_emitted_count (number of samples).
+ EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_delay_ms, 1600ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_emitted_count, 160ULL);
+ EXPECT_GT(ingress_stats->neteq_stats.jitter_buffer_target_delay_ms, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.interruption_count, 0);
+ EXPECT_EQ(ingress_stats->neteq_stats.total_interruption_duration_ms, 0);
+ EXPECT_DOUBLE_EQ(ingress_stats->total_duration, 0.02);
+
+ // Now without any RTP pending in jitter buffer pull more.
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+
+ // Send another RTP packet to intentionally break PLC.
+ audio_sender->SendAudioData(GetAudioFrame(2));
+ audio_sender->SendAudioData(GetAudioFrame(3));
+
+ ingress_stats = audio_channel_->GetIngressStatistics();
+ EXPECT_TRUE(ingress_stats);
+ EXPECT_EQ(ingress_stats->neteq_stats.total_samples_received, 320ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.concealed_samples, 168ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.concealment_events, 1ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.inserted_samples_for_deceleration, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.removed_samples_for_acceleration, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.silent_concealed_samples, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_delay_ms, 1600ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_emitted_count, 160ULL);
+ EXPECT_GT(ingress_stats->neteq_stats.jitter_buffer_target_delay_ms, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.interruption_count, 0);
+ EXPECT_EQ(ingress_stats->neteq_stats.total_interruption_duration_ms, 0);
+ EXPECT_DOUBLE_EQ(ingress_stats->total_duration, 0.04);
+
+ // Pull the last RTP packet.
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+
+ ingress_stats = audio_channel_->GetIngressStatistics();
+ EXPECT_TRUE(ingress_stats);
+ EXPECT_EQ(ingress_stats->neteq_stats.total_samples_received, 480ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.concealed_samples, 168ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.concealment_events, 1ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.inserted_samples_for_deceleration, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.removed_samples_for_acceleration, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.silent_concealed_samples, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_delay_ms, 3200ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_emitted_count, 320ULL);
+ EXPECT_GT(ingress_stats->neteq_stats.jitter_buffer_target_delay_ms, 0ULL);
+ EXPECT_EQ(ingress_stats->neteq_stats.interruption_count, 0);
+ EXPECT_EQ(ingress_stats->neteq_stats.total_interruption_duration_ms, 0);
+ EXPECT_DOUBLE_EQ(ingress_stats->total_duration, 0.06);
+}
+
+// Check ChannelStatistics metric after processing RTP and RTCP packets.
+TEST_F(AudioChannelTest, TestChannelStatistics) {
+ auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ audio_channel_->ReceivedRTPPacket(
+ rtc::ArrayView<const uint8_t>(packet, length));
+ return true;
+ };
+ auto loop_rtcp = [&](const uint8_t* packet, size_t length) {
+ audio_channel_->ReceivedRTCPPacket(
+ rtc::ArrayView<const uint8_t>(packet, length));
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(loop_rtp));
+ EXPECT_CALL(transport_, SendRtcp).WillRepeatedly(Invoke(loop_rtcp));
+
+ // Simulate microphone giving audio frame (10 ms). This will trigger tranport
+ // to send RTP as handled in loop_rtp above.
+ auto audio_sender = audio_channel_->GetAudioSender();
+ audio_sender->SendAudioData(GetAudioFrame(0));
+ audio_sender->SendAudioData(GetAudioFrame(1));
+
+ // Simulate speaker requesting audio frame (10 ms). This will trigger VoIP
+ // engine to fetch audio samples from RTP packets stored in jitter buffer.
+ AudioFrame audio_frame;
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+
+ // Force sending RTCP SR report in order to have remote_rtcp field available
+ // in channel statistics. This will trigger tranport to send RTCP as handled
+ // in loop_rtcp above.
+ audio_channel_->SendRTCPReportForTesting(kRtcpSr);
+
+ absl::optional<ChannelStatistics> channel_stats =
+ audio_channel_->GetChannelStatistics();
+ EXPECT_TRUE(channel_stats);
+
+ EXPECT_EQ(channel_stats->packets_sent, 1ULL);
+ EXPECT_EQ(channel_stats->bytes_sent, 160ULL);
+
+ EXPECT_EQ(channel_stats->packets_received, 1ULL);
+ EXPECT_EQ(channel_stats->bytes_received, 160ULL);
+ EXPECT_EQ(channel_stats->jitter, 0);
+ EXPECT_EQ(channel_stats->packets_lost, 0);
+ EXPECT_EQ(channel_stats->remote_ssrc.value(), kLocalSsrc);
+
+ EXPECT_TRUE(channel_stats->remote_rtcp.has_value());
+
+ EXPECT_EQ(channel_stats->remote_rtcp->jitter, 0);
+ EXPECT_EQ(channel_stats->remote_rtcp->packets_lost, 0);
+ EXPECT_EQ(channel_stats->remote_rtcp->fraction_lost, 0);
+ EXPECT_GT(channel_stats->remote_rtcp->last_report_received_timestamp_ms, 0);
+ EXPECT_FALSE(channel_stats->remote_rtcp->round_trip_time.has_value());
+}
+
+// Check ChannelStatistics RTT metric after processing RTP and RTCP packets
+// using three audio channels where each represents media endpoint.
+//
+// 1) AC1 <- RTP/RTCP -> AC2
+// 2) AC1 <- RTP/RTCP -> AC3
+//
+// During step 1), AC1 should be able to check RTT from AC2's SSRC.
+// During step 2), AC1 should be able to check RTT from AC3's SSRC.
+TEST_F(AudioChannelTest, RttIsAvailableAfterChangeOfRemoteSsrc) {
+ // Create AC2 and AC3.
+ constexpr uint32_t kAc2Ssrc = 0xdeadbeef;
+ constexpr uint32_t kAc3Ssrc = 0xdeafbeef;
+
+ auto ac_2 = CreateAudioChannel(kAc2Ssrc);
+ auto ac_3 = CreateAudioChannel(kAc3Ssrc);
+
+ auto send_recv_rtp = [&](rtc::scoped_refptr<AudioChannel> rtp_sender,
+ rtc::scoped_refptr<AudioChannel> rtp_receiver) {
+ // Setup routing logic via transport_.
+ auto route_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ rtp_receiver->ReceivedRTPPacket(rtc::MakeArrayView(packet, length));
+ return true;
+ };
+ ON_CALL(transport_, SendRtp).WillByDefault(route_rtp);
+
+ // This will trigger route_rtp callback via transport_.
+ rtp_sender->GetAudioSender()->SendAudioData(GetAudioFrame(0));
+ rtp_sender->GetAudioSender()->SendAudioData(GetAudioFrame(1));
+
+ // Process received RTP in receiver.
+ AudioFrame audio_frame;
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+ audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame);
+
+ // Revert to default to avoid using reference in route_rtp lambda.
+ ON_CALL(transport_, SendRtp).WillByDefault(Return(true));
+ };
+
+ auto send_recv_rtcp = [&](rtc::scoped_refptr<AudioChannel> rtcp_sender,
+ rtc::scoped_refptr<AudioChannel> rtcp_receiver) {
+ // Setup routing logic via transport_.
+ auto route_rtcp = [&](const uint8_t* packet, size_t length) {
+ rtcp_receiver->ReceivedRTCPPacket(rtc::MakeArrayView(packet, length));
+ return true;
+ };
+ ON_CALL(transport_, SendRtcp).WillByDefault(route_rtcp);
+
+ // This will trigger route_rtcp callback via transport_.
+ rtcp_sender->SendRTCPReportForTesting(kRtcpSr);
+
+ // Revert to default to avoid using reference in route_rtcp lambda.
+ ON_CALL(transport_, SendRtcp).WillByDefault(Return(true));
+ };
+
+ // AC1 <-- RTP/RTCP --> AC2
+ send_recv_rtp(audio_channel_, ac_2);
+ send_recv_rtp(ac_2, audio_channel_);
+ send_recv_rtcp(audio_channel_, ac_2);
+ send_recv_rtcp(ac_2, audio_channel_);
+
+ absl::optional<ChannelStatistics> channel_stats =
+ audio_channel_->GetChannelStatistics();
+ ASSERT_TRUE(channel_stats);
+ EXPECT_EQ(channel_stats->remote_ssrc, kAc2Ssrc);
+ ASSERT_TRUE(channel_stats->remote_rtcp);
+ EXPECT_GT(channel_stats->remote_rtcp->round_trip_time, 0.0);
+
+ // AC1 <-- RTP/RTCP --> AC3
+ send_recv_rtp(audio_channel_, ac_3);
+ send_recv_rtp(ac_3, audio_channel_);
+ send_recv_rtcp(audio_channel_, ac_3);
+ send_recv_rtcp(ac_3, audio_channel_);
+
+ channel_stats = audio_channel_->GetChannelStatistics();
+ ASSERT_TRUE(channel_stats);
+ EXPECT_EQ(channel_stats->remote_ssrc, kAc3Ssrc);
+ ASSERT_TRUE(channel_stats->remote_rtcp);
+ EXPECT_GT(channel_stats->remote_rtcp->round_trip_time, 0.0);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/audio/voip/test/audio_egress_unittest.cc b/third_party/libwebrtc/audio/voip/test/audio_egress_unittest.cc
new file mode 100644
index 0000000000..34c5585347
--- /dev/null
+++ b/third_party/libwebrtc/audio/voip/test/audio_egress_unittest.cc
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/audio_egress.h"
+
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/call/transport.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_mixer/sine_wave_generator.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/run_loop.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Unused;
+
+std::unique_ptr<ModuleRtpRtcpImpl2> CreateRtpStack(Clock* clock,
+ Transport* transport,
+ uint32_t remote_ssrc) {
+ RtpRtcpInterface::Configuration rtp_config;
+ rtp_config.clock = clock;
+ rtp_config.audio = true;
+ rtp_config.rtcp_report_interval_ms = 5000;
+ rtp_config.outgoing_transport = transport;
+ rtp_config.local_media_ssrc = remote_ssrc;
+ auto rtp_rtcp = ModuleRtpRtcpImpl2::Create(rtp_config);
+ rtp_rtcp->SetSendingMediaStatus(false);
+ rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
+ return rtp_rtcp;
+}
+
+constexpr int16_t kAudioLevel = 3004; // Used for sine wave level.
+
+// AudioEgressTest configures audio egress by using Rtp Stack, fake clock,
+// and task queue factory. Encoder factory is needed to create codec and
+// configure the RTP stack in audio egress.
+class AudioEgressTest : public ::testing::Test {
+ public:
+ static constexpr uint16_t kSeqNum = 12345;
+ static constexpr uint64_t kStartTime = 123456789;
+ static constexpr uint32_t kRemoteSsrc = 0xDEADBEEF;
+ const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
+
+ AudioEgressTest()
+ : fake_clock_(kStartTime), wave_generator_(1000.0, kAudioLevel) {
+ task_queue_factory_ = CreateDefaultTaskQueueFactory();
+ encoder_factory_ = CreateBuiltinAudioEncoderFactory();
+ }
+
+ // Prepare test on audio egress by using PCMu codec with specific
+ // sequence number and its status to be running.
+ void SetUp() override {
+ rtp_rtcp_ = CreateRtpStack(&fake_clock_, &transport_, kRemoteSsrc);
+ egress_ = std::make_unique<AudioEgress>(rtp_rtcp_.get(), &fake_clock_,
+ task_queue_factory_.get());
+ constexpr int kPcmuPayload = 0;
+ egress_->SetEncoder(kPcmuPayload, kPcmuFormat,
+ encoder_factory_->MakeAudioEncoder(
+ kPcmuPayload, kPcmuFormat, absl::nullopt));
+ egress_->StartSend();
+ rtp_rtcp_->SetSequenceNumber(kSeqNum);
+ rtp_rtcp_->SetSendingStatus(true);
+ }
+
+ // Make sure we have shut down rtp stack and reset egress for each test.
+ void TearDown() override {
+ egress_->StopSend();
+ rtp_rtcp_->SetSendingStatus(false);
+ egress_.reset();
+ rtp_rtcp_.reset();
+ }
+
+ // Create an audio frame prepared for pcmu encoding. Timestamp is
+ // increased per RTP specification which is the number of samples it contains.
+ // Wave generator writes sine wave which has expected high level set
+ // by kAudioLevel.
+ std::unique_ptr<AudioFrame> GetAudioFrame(int order) {
+ auto frame = std::make_unique<AudioFrame>();
+ frame->sample_rate_hz_ = kPcmuFormat.clockrate_hz;
+ frame->samples_per_channel_ = kPcmuFormat.clockrate_hz / 100; // 10 ms.
+ frame->num_channels_ = kPcmuFormat.num_channels;
+ frame->timestamp_ = frame->samples_per_channel_ * order;
+ wave_generator_.GenerateNextFrame(frame.get());
+ return frame;
+ }
+
+ test::RunLoop run_loop_;
+ // SimulatedClock doesn't directly affect this testcase as the the
+ // AudioFrame's timestamp is driven by GetAudioFrame.
+ SimulatedClock fake_clock_;
+ NiceMock<MockTransport> transport_;
+ SineWaveGenerator wave_generator_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
+ std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+ std::unique_ptr<AudioEgress> egress_;
+};
+
+TEST_F(AudioEgressTest, SendingStatusAfterStartAndStop) {
+ EXPECT_TRUE(egress_->IsSending());
+ egress_->StopSend();
+ EXPECT_FALSE(egress_->IsSending());
+}
+
+TEST_F(AudioEgressTest, ProcessAudioWithMute) {
+ constexpr int kExpected = 10;
+ rtc::Event event;
+ int rtp_count = 0;
+ RtpPacketReceived rtp;
+ auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+ rtp.Parse(packet, length);
+ if (++rtp_count == kExpected) {
+ event.Set();
+ }
+ return true;
+ };
+
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+ egress_->SetMute(true);
+
+ // Two 10 ms audio frames will result in rtp packet with ptime 20.
+ for (size_t i = 0; i < kExpected * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+
+ event.Wait(TimeDelta::Seconds(1));
+ EXPECT_EQ(rtp_count, kExpected);
+
+ // we expect on pcmu payload to result in 255 for silenced payload
+ RTPHeader header;
+ rtp.GetHeader(&header);
+ size_t packet_length = rtp.size();
+ size_t payload_length = packet_length - header.headerLength;
+ size_t payload_data_length = payload_length - header.paddingLength;
+ const uint8_t* payload = rtp.data() + header.headerLength;
+ for (size_t i = 0; i < payload_data_length; ++i) {
+ EXPECT_EQ(*payload++, 255);
+ }
+}
+
+TEST_F(AudioEgressTest, ProcessAudioWithSineWave) {
+ constexpr int kExpected = 10;
+ rtc::Event event;
+ int rtp_count = 0;
+ RtpPacketReceived rtp;
+ auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+ rtp.Parse(packet, length);
+ if (++rtp_count == kExpected) {
+ event.Set();
+ }
+ return true;
+ };
+
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+ // Two 10 ms audio frames will result in rtp packet with ptime 20.
+ for (size_t i = 0; i < kExpected * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+
+ event.Wait(TimeDelta::Seconds(1));
+ EXPECT_EQ(rtp_count, kExpected);
+
+ // we expect on pcmu to result in < 255 for payload with sine wave
+ RTPHeader header;
+ rtp.GetHeader(&header);
+ size_t packet_length = rtp.size();
+ size_t payload_length = packet_length - header.headerLength;
+ size_t payload_data_length = payload_length - header.paddingLength;
+ const uint8_t* payload = rtp.data() + header.headerLength;
+ for (size_t i = 0; i < payload_data_length; ++i) {
+ EXPECT_NE(*payload++, 255);
+ }
+}
+
+TEST_F(AudioEgressTest, SkipAudioEncodingAfterStopSend) {
+ constexpr int kExpected = 10;
+ rtc::Event event;
+ int rtp_count = 0;
+ auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+ if (++rtp_count == kExpected) {
+ event.Set();
+ }
+ return true;
+ };
+
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+ // Two 10 ms audio frames will result in rtp packet with ptime 20.
+ for (size_t i = 0; i < kExpected * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+
+ event.Wait(TimeDelta::Seconds(1));
+ EXPECT_EQ(rtp_count, kExpected);
+
+ // Now stop send and yet feed more data.
+ egress_->StopSend();
+
+ // It should be safe to exit the test case while encoder_queue_ has
+ // outstanding data to process. We are making sure that this doesn't
+ // result in crahses or sanitizer errors due to remaining data.
+ for (size_t i = 0; i < kExpected * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+}
+
+TEST_F(AudioEgressTest, ChangeEncoderFromPcmuToOpus) {
+ absl::optional<SdpAudioFormat> pcmu = egress_->GetEncoderFormat();
+ EXPECT_TRUE(pcmu);
+ EXPECT_EQ(pcmu->clockrate_hz, kPcmuFormat.clockrate_hz);
+ EXPECT_EQ(pcmu->num_channels, kPcmuFormat.num_channels);
+
+ constexpr int kOpusPayload = 120;
+ const SdpAudioFormat kOpusFormat = {"opus", 48000, 2};
+
+ egress_->SetEncoder(kOpusPayload, kOpusFormat,
+ encoder_factory_->MakeAudioEncoder(
+ kOpusPayload, kOpusFormat, absl::nullopt));
+
+ absl::optional<SdpAudioFormat> opus = egress_->GetEncoderFormat();
+ EXPECT_TRUE(opus);
+ EXPECT_EQ(opus->clockrate_hz, kOpusFormat.clockrate_hz);
+ EXPECT_EQ(opus->num_channels, kOpusFormat.num_channels);
+}
+
+TEST_F(AudioEgressTest, SendDTMF) {
+ constexpr int kExpected = 7;
+ constexpr int kPayloadType = 100;
+ constexpr int kDurationMs = 100;
+ constexpr int kSampleRate = 8000;
+ constexpr int kEvent = 3;
+
+ egress_->RegisterTelephoneEventType(kPayloadType, kSampleRate);
+ // 100 ms duration will produce total 7 DTMF
+ // 1 @ 20 ms, 2 @ 40 ms, 3 @ 60 ms, 4 @ 80 ms
+ // 5, 6, 7 @ 100 ms (last one sends 3 dtmf)
+ egress_->SendTelephoneEvent(kEvent, kDurationMs);
+
+ rtc::Event event;
+ int dtmf_count = 0;
+ auto is_dtmf = [&](RtpPacketReceived& rtp) {
+ return (rtp.PayloadType() == kPayloadType &&
+ rtp.SequenceNumber() == kSeqNum + dtmf_count &&
+ rtp.padding_size() == 0 && rtp.Marker() == (dtmf_count == 0) &&
+ rtp.Ssrc() == kRemoteSsrc);
+ };
+
+ // It's possible that we may have actual audio RTP packets along with
+ // DTMF packtets. We are only interested in the exact number of DTMF
+ // packets rtp stack is emitting.
+ auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+ RtpPacketReceived rtp;
+ rtp.Parse(packet, length);
+ if (is_dtmf(rtp) && ++dtmf_count == kExpected) {
+ event.Set();
+ }
+ return true;
+ };
+
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+ // Two 10 ms audio frames will result in rtp packet with ptime 20.
+ for (size_t i = 0; i < kExpected * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+
+ event.Wait(TimeDelta::Seconds(1));
+ EXPECT_EQ(dtmf_count, kExpected);
+}
+
+TEST_F(AudioEgressTest, TestAudioInputLevelAndEnergyDuration) {
+ // Per audio_level's kUpdateFrequency, we need more than 10 audio samples to
+ // get audio level from input source.
+ constexpr int kExpected = 6;
+ rtc::Event event;
+ int rtp_count = 0;
+ auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+ if (++rtp_count == kExpected) {
+ event.Set();
+ }
+ return true;
+ };
+
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+ // Two 10 ms audio frames will result in rtp packet with ptime 20.
+ for (size_t i = 0; i < kExpected * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+
+ event.Wait(/*give_up_after=*/TimeDelta::Seconds(1));
+ EXPECT_EQ(rtp_count, kExpected);
+
+ constexpr double kExpectedEnergy = 0.00016809565587789564;
+ constexpr double kExpectedDuration = 0.11999999999999998;
+
+ EXPECT_EQ(egress_->GetInputAudioLevel(), kAudioLevel);
+ EXPECT_DOUBLE_EQ(egress_->GetInputTotalEnergy(), kExpectedEnergy);
+ EXPECT_DOUBLE_EQ(egress_->GetInputTotalDuration(), kExpectedDuration);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/audio/voip/test/audio_ingress_unittest.cc b/third_party/libwebrtc/audio/voip/test/audio_ingress_unittest.cc
new file mode 100644
index 0000000000..3c309dbf82
--- /dev/null
+++ b/third_party/libwebrtc/audio/voip/test/audio_ingress_unittest.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/audio_ingress.h"
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/call/transport.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "audio/voip/audio_egress.h"
+#include "modules/audio_mixer/sine_wave_generator.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/run_loop.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Unused;
+
+constexpr int16_t kAudioLevel = 3004; // Used for sine wave level.
+
+class AudioIngressTest : public ::testing::Test {
+ public:
+ const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
+
+ AudioIngressTest()
+ : fake_clock_(123456789), wave_generator_(1000.0, kAudioLevel) {
+ receive_statistics_ = ReceiveStatistics::Create(&fake_clock_);
+
+ RtpRtcpInterface::Configuration rtp_config;
+ rtp_config.clock = &fake_clock_;
+ rtp_config.audio = true;
+ rtp_config.receive_statistics = receive_statistics_.get();
+ rtp_config.rtcp_report_interval_ms = 5000;
+ rtp_config.outgoing_transport = &transport_;
+ rtp_config.local_media_ssrc = 0xdeadc0de;
+ rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(rtp_config);
+
+ rtp_rtcp_->SetSendingMediaStatus(false);
+ rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound);
+
+ task_queue_factory_ = CreateDefaultTaskQueueFactory();
+ encoder_factory_ = CreateBuiltinAudioEncoderFactory();
+ decoder_factory_ = CreateBuiltinAudioDecoderFactory();
+ }
+
+ void SetUp() override {
+ constexpr int kPcmuPayload = 0;
+ ingress_ = std::make_unique<AudioIngress>(rtp_rtcp_.get(), &fake_clock_,
+ receive_statistics_.get(),
+ decoder_factory_);
+ ingress_->SetReceiveCodecs({{kPcmuPayload, kPcmuFormat}});
+
+ egress_ = std::make_unique<AudioEgress>(rtp_rtcp_.get(), &fake_clock_,
+ task_queue_factory_.get());
+ egress_->SetEncoder(kPcmuPayload, kPcmuFormat,
+ encoder_factory_->MakeAudioEncoder(
+ kPcmuPayload, kPcmuFormat, absl::nullopt));
+ egress_->StartSend();
+ ingress_->StartPlay();
+ rtp_rtcp_->SetSendingStatus(true);
+ }
+
+ void TearDown() override {
+ rtp_rtcp_->SetSendingStatus(false);
+ ingress_->StopPlay();
+ egress_->StopSend();
+ egress_.reset();
+ ingress_.reset();
+ }
+
+ std::unique_ptr<AudioFrame> GetAudioFrame(int order) {
+ auto frame = std::make_unique<AudioFrame>();
+ frame->sample_rate_hz_ = kPcmuFormat.clockrate_hz;
+ frame->samples_per_channel_ = kPcmuFormat.clockrate_hz / 100; // 10 ms.
+ frame->num_channels_ = kPcmuFormat.num_channels;
+ frame->timestamp_ = frame->samples_per_channel_ * order;
+ wave_generator_.GenerateNextFrame(frame.get());
+ return frame;
+ }
+
+ test::RunLoop run_loop_;
+ SimulatedClock fake_clock_;
+ SineWaveGenerator wave_generator_;
+ NiceMock<MockTransport> transport_;
+ std::unique_ptr<ReceiveStatistics> receive_statistics_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
+ rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+ rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+ std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ std::unique_ptr<AudioIngress> ingress_;
+ std::unique_ptr<AudioEgress> egress_;
+};
+
+TEST_F(AudioIngressTest, PlayingAfterStartAndStop) {
+ EXPECT_EQ(ingress_->IsPlaying(), true);
+ ingress_->StopPlay();
+ EXPECT_EQ(ingress_->IsPlaying(), false);
+}
+
+TEST_F(AudioIngressTest, GetAudioFrameAfterRtpReceived) {
+ rtc::Event event;
+ auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
+ event.Set();
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(handle_rtp));
+ egress_->SendAudioData(GetAudioFrame(0));
+ egress_->SendAudioData(GetAudioFrame(1));
+ event.Wait(TimeDelta::Seconds(1));
+
+ AudioFrame audio_frame;
+ EXPECT_EQ(
+ ingress_->GetAudioFrameWithInfo(kPcmuFormat.clockrate_hz, &audio_frame),
+ AudioMixer::Source::AudioFrameInfo::kNormal);
+ EXPECT_FALSE(audio_frame.muted());
+ EXPECT_EQ(audio_frame.num_channels_, 1u);
+ EXPECT_EQ(audio_frame.samples_per_channel_,
+ static_cast<size_t>(kPcmuFormat.clockrate_hz / 100));
+ EXPECT_EQ(audio_frame.sample_rate_hz_, kPcmuFormat.clockrate_hz);
+ EXPECT_NE(audio_frame.timestamp_, 0u);
+ EXPECT_EQ(audio_frame.elapsed_time_ms_, 0);
+}
+
+TEST_F(AudioIngressTest, TestSpeechOutputLevelAndEnergyDuration) {
+ // Per audio_level's kUpdateFrequency, we need more than 10 audio samples to
+ // get audio level from output source.
+ constexpr int kNumRtp = 6;
+ int rtp_count = 0;
+ rtc::Event event;
+ auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
+ if (++rtp_count == kNumRtp) {
+ event.Set();
+ }
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(handle_rtp));
+ for (int i = 0; i < kNumRtp * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+ event.Wait(/*give_up_after=*/TimeDelta::Seconds(1));
+
+ for (int i = 0; i < kNumRtp * 2; ++i) {
+ AudioFrame audio_frame;
+ EXPECT_EQ(
+ ingress_->GetAudioFrameWithInfo(kPcmuFormat.clockrate_hz, &audio_frame),
+ AudioMixer::Source::AudioFrameInfo::kNormal);
+ }
+ EXPECT_EQ(ingress_->GetOutputAudioLevel(), kAudioLevel);
+
+ constexpr double kExpectedEnergy = 0.00016809565587789564;
+ constexpr double kExpectedDuration = 0.11999999999999998;
+
+ EXPECT_DOUBLE_EQ(ingress_->GetOutputTotalEnergy(), kExpectedEnergy);
+ EXPECT_DOUBLE_EQ(ingress_->GetOutputTotalDuration(), kExpectedDuration);
+}
+
+TEST_F(AudioIngressTest, PreferredSampleRate) {
+ rtc::Event event;
+ auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
+ event.Set();
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(handle_rtp));
+ egress_->SendAudioData(GetAudioFrame(0));
+ egress_->SendAudioData(GetAudioFrame(1));
+ event.Wait(TimeDelta::Seconds(1));
+
+ AudioFrame audio_frame;
+ EXPECT_EQ(
+ ingress_->GetAudioFrameWithInfo(kPcmuFormat.clockrate_hz, &audio_frame),
+ AudioMixer::Source::AudioFrameInfo::kNormal);
+ EXPECT_EQ(ingress_->PreferredSampleRate(), kPcmuFormat.clockrate_hz);
+}
+
+// This test highlights the case where caller invokes StopPlay() which then
+// AudioIngress should play silence frame afterwards.
+TEST_F(AudioIngressTest, GetMutedAudioFrameAfterRtpReceivedAndStopPlay) {
+ // StopPlay before we start sending RTP packet with sine wave.
+ ingress_->StopPlay();
+
+ // Send 6 RTP packets to generate more than 100 ms audio sample to get
+ // valid speech level.
+ constexpr int kNumRtp = 6;
+ int rtp_count = 0;
+ rtc::Event event;
+ auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+ ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
+ if (++rtp_count == kNumRtp) {
+ event.Set();
+ }
+ return true;
+ };
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(handle_rtp));
+ for (int i = 0; i < kNumRtp * 2; i++) {
+ egress_->SendAudioData(GetAudioFrame(i));
+ fake_clock_.AdvanceTimeMilliseconds(10);
+ }
+ event.Wait(/*give_up_after=*/TimeDelta::Seconds(1));
+
+ for (int i = 0; i < kNumRtp * 2; ++i) {
+ AudioFrame audio_frame;
+ EXPECT_EQ(
+ ingress_->GetAudioFrameWithInfo(kPcmuFormat.clockrate_hz, &audio_frame),
+ AudioMixer::Source::AudioFrameInfo::kMuted);
+ const int16_t* audio_data = audio_frame.data();
+ size_t length =
+ audio_frame.samples_per_channel_ * audio_frame.num_channels_;
+ for (size_t j = 0; j < length; ++j) {
+ EXPECT_EQ(audio_data[j], 0);
+ }
+ }
+
+ // Now we should still see valid speech output level as StopPlay won't affect
+ // the measurement.
+ EXPECT_EQ(ingress_->GetOutputAudioLevel(), kAudioLevel);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/audio/voip/test/mock_task_queue.h b/third_party/libwebrtc/audio/voip/test/mock_task_queue.h
new file mode 100644
index 0000000000..547b0d3f75
--- /dev/null
+++ b/third_party/libwebrtc/audio/voip/test/mock_task_queue.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_VOIP_TEST_MOCK_TASK_QUEUE_H_
+#define AUDIO_VOIP_TEST_MOCK_TASK_QUEUE_H_
+
+#include <memory>
+
+#include "api/task_queue/task_queue_factory.h"
+#include "api/task_queue/test/mock_task_queue_base.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+// MockTaskQueue enables immediate task run from global TaskQueueBase.
+// It's necessary for some tests depending on TaskQueueBase internally.
+class MockTaskQueue : public MockTaskQueueBase {
+ public:
+ MockTaskQueue() : current_(this) {}
+
+ // Delete is deliberately defined as no-op as MockTaskQueue is expected to
+ // hold onto current global TaskQueueBase throughout the testing.
+ void Delete() override {}
+
+ private:
+ CurrentTaskQueueSetter current_;
+};
+
+class MockTaskQueueFactory : public TaskQueueFactory {
+ public:
+ explicit MockTaskQueueFactory(MockTaskQueue* task_queue)
+ : task_queue_(task_queue) {}
+
+ std::unique_ptr<TaskQueueBase, TaskQueueDeleter> CreateTaskQueue(
+ absl::string_view name,
+ Priority priority) const override {
+ // Default MockTaskQueue::Delete is no-op, therefore it's safe to pass the
+ // raw pointer.
+ return std::unique_ptr<TaskQueueBase, TaskQueueDeleter>(task_queue_);
+ }
+
+ private:
+ MockTaskQueue* task_queue_;
+};
+
+} // namespace webrtc
+
+#endif // AUDIO_VOIP_TEST_MOCK_TASK_QUEUE_H_
diff --git a/third_party/libwebrtc/audio/voip/test/voip_core_unittest.cc b/third_party/libwebrtc/audio/voip/test/voip_core_unittest.cc
new file mode 100644
index 0000000000..b432506b12
--- /dev/null
+++ b/third_party/libwebrtc/audio/voip/test/voip_core_unittest.cc
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/voip_core.h"
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+#include "test/run_loop.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::NiceMock;
+using ::testing::Return;
+
+constexpr int kPcmuPayload = 0;
+constexpr int kPcmuSampleRateHz = 8000;
+constexpr int kDtmfEventDurationMs = 1000;
+constexpr DtmfEvent kDtmfEventCode = DtmfEvent::kDigitZero;
+
+class VoipCoreTest : public ::testing::Test {
+ public:
+ const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
+
+ VoipCoreTest() { audio_device_ = test::MockAudioDeviceModule::CreateNice(); }
+
+ void SetUp() override {
+ auto encoder_factory = CreateBuiltinAudioEncoderFactory();
+ auto decoder_factory = CreateBuiltinAudioDecoderFactory();
+ rtc::scoped_refptr<AudioProcessing> audio_processing =
+ rtc::make_ref_counted<NiceMock<test::MockAudioProcessing>>();
+
+ voip_core_ = std::make_unique<VoipCore>(
+ std::move(encoder_factory), std::move(decoder_factory),
+ CreateDefaultTaskQueueFactory(), audio_device_,
+ std::move(audio_processing));
+ }
+
+ test::RunLoop run_loop_;
+ std::unique_ptr<VoipCore> voip_core_;
+ NiceMock<MockTransport> transport_;
+ rtc::scoped_refptr<test::MockAudioDeviceModule> audio_device_;
+};
+
+// Validate expected API calls that involves with VoipCore. Some verification is
+// involved with checking mock audio device.
+TEST_F(VoipCoreTest, BasicVoipCoreOperation) {
+ // Program mock as non-operational and ready to start.
+ EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(false));
+ EXPECT_CALL(*audio_device_, Playing()).WillOnce(Return(false));
+ EXPECT_CALL(*audio_device_, InitRecording()).WillOnce(Return(0));
+ EXPECT_CALL(*audio_device_, InitPlayout()).WillOnce(Return(0));
+ EXPECT_CALL(*audio_device_, StartRecording()).WillOnce(Return(0));
+ EXPECT_CALL(*audio_device_, StartPlayout()).WillOnce(Return(0));
+
+ auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+
+ EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat),
+ VoipResult::kOk);
+ EXPECT_EQ(
+ voip_core_->SetReceiveCodecs(channel, {{kPcmuPayload, kPcmuFormat}}),
+ VoipResult::kOk);
+
+ EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kOk);
+ EXPECT_EQ(voip_core_->StartPlayout(channel), VoipResult::kOk);
+
+ EXPECT_EQ(voip_core_->RegisterTelephoneEventType(channel, kPcmuPayload,
+ kPcmuSampleRateHz),
+ VoipResult::kOk);
+
+ EXPECT_EQ(
+ voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs),
+ VoipResult::kOk);
+
+ // Program mock as operational that is ready to be stopped.
+ EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(true));
+ EXPECT_CALL(*audio_device_, Playing()).WillOnce(Return(true));
+ EXPECT_CALL(*audio_device_, StopRecording()).WillOnce(Return(0));
+ EXPECT_CALL(*audio_device_, StopPlayout()).WillOnce(Return(0));
+
+ EXPECT_EQ(voip_core_->StopSend(channel), VoipResult::kOk);
+ EXPECT_EQ(voip_core_->StopPlayout(channel), VoipResult::kOk);
+ EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk);
+}
+
+TEST_F(VoipCoreTest, ExpectFailToUseReleasedChannelId) {
+ auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+
+ // Release right after creation.
+ EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk);
+
+ // Now use released channel.
+
+ EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat),
+ VoipResult::kInvalidArgument);
+ EXPECT_EQ(
+ voip_core_->SetReceiveCodecs(channel, {{kPcmuPayload, kPcmuFormat}}),
+ VoipResult::kInvalidArgument);
+ EXPECT_EQ(voip_core_->RegisterTelephoneEventType(channel, kPcmuPayload,
+ kPcmuSampleRateHz),
+ VoipResult::kInvalidArgument);
+ EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kInvalidArgument);
+ EXPECT_EQ(voip_core_->StartPlayout(channel), VoipResult::kInvalidArgument);
+ EXPECT_EQ(
+ voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs),
+ VoipResult::kInvalidArgument);
+}
+
+TEST_F(VoipCoreTest, SendDtmfEventWithoutRegistering) {
+ // Program mock as non-operational and ready to start send.
+ EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(false));
+ EXPECT_CALL(*audio_device_, InitRecording()).WillOnce(Return(0));
+ EXPECT_CALL(*audio_device_, StartRecording()).WillOnce(Return(0));
+
+ auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+
+ EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat),
+ VoipResult::kOk);
+
+ EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kOk);
+ // Send Dtmf event without registering beforehand, thus payload
+ // type is not set and kFailedPrecondition is expected.
+ EXPECT_EQ(
+ voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs),
+ VoipResult::kFailedPrecondition);
+
+ // Program mock as sending and is ready to be stopped.
+ EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(true));
+ EXPECT_CALL(*audio_device_, StopRecording()).WillOnce(Return(0));
+
+ EXPECT_EQ(voip_core_->StopSend(channel), VoipResult::kOk);
+ EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk);
+}
+
+TEST_F(VoipCoreTest, SendDtmfEventWithoutStartSend) {
+ auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+
+ EXPECT_EQ(voip_core_->RegisterTelephoneEventType(channel, kPcmuPayload,
+ kPcmuSampleRateHz),
+ VoipResult::kOk);
+
+ // Send Dtmf event without calling StartSend beforehand, thus
+ // Dtmf events cannot be sent and kFailedPrecondition is expected.
+ EXPECT_EQ(
+ voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs),
+ VoipResult::kFailedPrecondition);
+
+ EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk);
+}
+
+TEST_F(VoipCoreTest, StartSendAndPlayoutWithoutSettingCodec) {
+ auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+
+ // Call StartSend and StartPlayout without setting send/receive
+ // codec. Code should see that codecs aren't set and return false.
+ EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kFailedPrecondition);
+ EXPECT_EQ(voip_core_->StartPlayout(channel), VoipResult::kFailedPrecondition);
+
+ EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk);
+}
+
+TEST_F(VoipCoreTest, StopSendAndPlayoutWithoutStarting) {
+ auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+
+ EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat),
+ VoipResult::kOk);
+ EXPECT_EQ(
+ voip_core_->SetReceiveCodecs(channel, {{kPcmuPayload, kPcmuFormat}}),
+ VoipResult::kOk);
+
+ // Call StopSend and StopPlayout without starting them in
+ // the first place. Should see that it is already in the
+ // stopped state and return true.
+ EXPECT_EQ(voip_core_->StopSend(channel), VoipResult::kOk);
+ EXPECT_EQ(voip_core_->StopPlayout(channel), VoipResult::kOk);
+
+ EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk);
+}
+
+} // namespace
+} // namespace webrtc