summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/webrtc/video
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/libwebrtc/webrtc/video
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/webrtc/video')
-rw-r--r--third_party/libwebrtc/webrtc/video/BUILD.gn317
-rw-r--r--third_party/libwebrtc/webrtc/video/DEPS19
-rw-r--r--third_party/libwebrtc/webrtc/video/OWNERS9
-rw-r--r--third_party/libwebrtc/webrtc/video/call_stats.cc189
-rw-r--r--third_party/libwebrtc/webrtc/video/call_stats.h88
-rw-r--r--third_party/libwebrtc/webrtc/video/call_stats_unittest.cc221
-rw-r--r--third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.cc66
-rw-r--r--third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.h45
-rw-r--r--third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback_unittest.cc80
-rw-r--r--third_party/libwebrtc/webrtc/video/end_to_end_tests.cc5112
-rw-r--r--third_party/libwebrtc/webrtc/video/full_stack_tests.cc853
-rwxr-xr-xthird_party/libwebrtc/webrtc/video/full_stack_tests_plot.py414
-rw-r--r--third_party/libwebrtc/webrtc/video/overuse_frame_detector.cc564
-rw-r--r--third_party/libwebrtc/webrtc/video/overuse_frame_detector.h164
-rw-r--r--third_party/libwebrtc/webrtc/video/overuse_frame_detector_unittest.cc483
-rw-r--r--third_party/libwebrtc/webrtc/video/payload_router.cc255
-rw-r--r--third_party/libwebrtc/webrtc/video/payload_router.h76
-rw-r--r--third_party/libwebrtc/webrtc/video/payload_router_unittest.cc504
-rw-r--r--third_party/libwebrtc/webrtc/video/picture_id_tests.cc382
-rw-r--r--third_party/libwebrtc/webrtc/video/quality_threshold.cc104
-rw-r--r--third_party/libwebrtc/webrtc/video/quality_threshold.h52
-rw-r--r--third_party/libwebrtc/webrtc/video/quality_threshold_unittest.cc133
-rw-r--r--third_party/libwebrtc/webrtc/video/receive_statistics_proxy.cc827
-rw-r--r--third_party/libwebrtc/webrtc/video/receive_statistics_proxy.h193
-rw-r--r--third_party/libwebrtc/webrtc/video/receive_statistics_proxy_unittest.cc1002
-rw-r--r--third_party/libwebrtc/webrtc/video/replay.cc349
-rw-r--r--third_party/libwebrtc/webrtc/video/report_block_stats.cc113
-rw-r--r--third_party/libwebrtc/webrtc/video/report_block_stats.h62
-rw-r--r--third_party/libwebrtc/webrtc/video/report_block_stats_unittest.cc146
-rw-r--r--third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.cc154
-rw-r--r--third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.h66
-rw-r--r--third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.cc704
-rw-r--r--third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.h216
-rw-r--r--third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver_unittest.cc476
-rw-r--r--third_party/libwebrtc/webrtc/video/screenshare_loopback.cc336
-rw-r--r--third_party/libwebrtc/webrtc/video/send_delay_stats.cc120
-rw-r--r--third_party/libwebrtc/webrtc/video/send_delay_stats.h86
-rw-r--r--third_party/libwebrtc/webrtc/video/send_delay_stats_unittest.cc127
-rw-r--r--third_party/libwebrtc/webrtc/video/send_statistics_proxy.cc1145
-rw-r--r--third_party/libwebrtc/webrtc/video/send_statistics_proxy.h298
-rw-r--r--third_party/libwebrtc/webrtc/video/send_statistics_proxy_unittest.cc2032
-rw-r--r--third_party/libwebrtc/webrtc/video/stats_counter.cc462
-rw-r--r--third_party/libwebrtc/webrtc/video/stats_counter.h293
-rw-r--r--third_party/libwebrtc/webrtc/video/stats_counter_unittest.cc603
-rw-r--r--third_party/libwebrtc/webrtc/video/stream_synchronization.cc199
-rw-r--r--third_party/libwebrtc/webrtc/video/stream_synchronization.h63
-rw-r--r--third_party/libwebrtc/webrtc/video/stream_synchronization_unittest.cc568
-rw-r--r--third_party/libwebrtc/webrtc/video/transport_adapter.cc48
-rw-r--r--third_party/libwebrtc/webrtc/video/transport_adapter.h40
-rw-r--r--third_party/libwebrtc/webrtc/video/video_gn/moz.build250
-rw-r--r--third_party/libwebrtc/webrtc/video/video_loopback.cc334
-rw-r--r--third_party/libwebrtc/webrtc/video/video_quality_test.cc2177
-rw-r--r--third_party/libwebrtc/webrtc/video/video_quality_test.h168
-rw-r--r--third_party/libwebrtc/webrtc/video/video_receive_stream.cc473
-rw-r--r--third_party/libwebrtc/webrtc/video/video_receive_stream.h168
-rw-r--r--third_party/libwebrtc/webrtc/video/video_receive_stream_unittest.cc140
-rw-r--r--third_party/libwebrtc/webrtc/video/video_send_stream.cc1336
-rw-r--r--third_party/libwebrtc/webrtc/video/video_send_stream.h115
-rw-r--r--third_party/libwebrtc/webrtc/video/video_send_stream_tests.cc3619
-rw-r--r--third_party/libwebrtc/webrtc/video/video_stream_decoder.cc135
-rw-r--r--third_party/libwebrtc/webrtc/video/video_stream_decoder.h105
-rw-r--r--third_party/libwebrtc/webrtc/video/video_stream_encoder.cc1273
-rw-r--r--third_party/libwebrtc/webrtc/video/video_stream_encoder.h306
-rw-r--r--third_party/libwebrtc/webrtc/video/video_stream_encoder_unittest.cc3280
64 files changed, 34737 insertions, 0 deletions
diff --git a/third_party/libwebrtc/webrtc/video/BUILD.gn b/third_party/libwebrtc/webrtc/video/BUILD.gn
new file mode 100644
index 0000000000..7dd6c040e1
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/BUILD.gn
@@ -0,0 +1,317 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../webrtc.gni")
+
+rtc_static_library("video") {
+ sources = [
+ "call_stats.cc",
+ "call_stats.h",
+ "encoder_rtcp_feedback.cc",
+ "encoder_rtcp_feedback.h",
+ "overuse_frame_detector.cc",
+ "overuse_frame_detector.h",
+ "payload_router.cc",
+ "payload_router.h",
+ "quality_threshold.cc",
+ "quality_threshold.h",
+ "receive_statistics_proxy.cc",
+ "receive_statistics_proxy.h",
+ "report_block_stats.cc",
+ "report_block_stats.h",
+ "rtp_streams_synchronizer.cc",
+ "rtp_streams_synchronizer.h",
+ "rtp_video_stream_receiver.cc",
+ "rtp_video_stream_receiver.h",
+ "send_delay_stats.cc",
+ "send_delay_stats.h",
+ "send_statistics_proxy.cc",
+ "send_statistics_proxy.h",
+ "stats_counter.cc",
+ "stats_counter.h",
+ "stream_synchronization.cc",
+ "stream_synchronization.h",
+ "transport_adapter.cc",
+ "transport_adapter.h",
+ "video_receive_stream.cc",
+ "video_receive_stream.h",
+ "video_send_stream.cc",
+ "video_send_stream.h",
+ "video_stream_decoder.cc",
+ "video_stream_decoder.h",
+ "video_stream_encoder.cc",
+ "video_stream_encoder.h",
+ ]
+
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+
+ deps = [
+ "..:webrtc_common",
+ "../api:optional",
+ "../api:transport_api",
+ "../api:video_frame_api_i420",
+ "../api/video_codecs:video_codecs_api",
+ "../call:bitrate_allocator",
+ "../call:call_interfaces",
+ "../call:rtp_interfaces",
+ "../call:video_stream_api",
+
+ # For RtxReceiveStream.
+ "../call:rtp_receiver",
+ "../common_video",
+ "../logging:rtc_event_log_api",
+ "../modules:module_api",
+ "../modules/bitrate_controller",
+ "../modules/congestion_controller",
+ "../modules/pacing",
+ "../modules/remote_bitrate_estimator",
+ "../modules/rtp_rtcp",
+ "../modules/utility",
+ "../modules/video_coding",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding:webrtc_vp8",
+ "../modules/video_processing",
+ "../rtc_base:rtc_base_approved",
+ "../rtc_base:rtc_numerics",
+ "../rtc_base:rtc_task_queue",
+ "../rtc_base:sequenced_task_checker",
+ "../rtc_base:weak_ptr",
+ "../system_wrappers",
+ "../voice_engine",
+ ]
+
+ if (!build_with_mozilla) {
+ deps += [ "../media:rtc_media_base" ]
+ }
+}
+
+if (rtc_include_tests) {
+ rtc_source_set("video_quality_test") {
+ testonly = true
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ sources = [
+ "video_quality_test.cc",
+ "video_quality_test.h",
+ ]
+ deps = [
+ "../api:optional",
+ "../call:call_interfaces",
+ "../common_video",
+ "../logging:rtc_event_log_api",
+ "../media:rtc_media",
+ "../media:rtc_media_base",
+ "../modules/audio_mixer:audio_mixer_impl",
+ "../modules/rtp_rtcp",
+ "../modules/video_coding:webrtc_h264",
+ "../modules/video_coding:webrtc_vp8",
+ "../modules/video_coding:webrtc_vp9",
+ "../rtc_base:rtc_base_approved",
+ "../rtc_base:rtc_base_tests_utils",
+ "../rtc_base:rtc_task_queue",
+ "../system_wrappers",
+ "../test:rtp_test_utils",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_renderer",
+ "../test:test_support",
+ "../test:test_support_test_artifacts",
+ "../test:video_test_common",
+ "../test:video_test_common",
+ "../test:video_test_support",
+ "../voice_engine",
+ "//testing/gtest",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+
+ rtc_source_set("video_full_stack_tests") {
+ testonly = true
+
+ sources = [
+ "full_stack_tests.cc",
+ ]
+ deps = [
+ ":video_quality_test",
+ "../modules/pacing:pacing",
+ "../test:field_trial",
+ "../test:test_common",
+ "../test:test_support",
+ "//testing/gtest",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ if (rtc_use_h264) {
+ defines = [ "WEBRTC_USE_H264" ]
+ }
+ }
+
+ rtc_executable("video_loopback") {
+ testonly = true
+ sources = [
+ "video_loopback.cc",
+ ]
+ deps = [
+ ":video_quality_test",
+ "../rtc_base:rtc_base_approved",
+ "../system_wrappers:metrics_default",
+ "../test:field_trial",
+ "../test:run_test",
+ "../test:run_test_interface",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+
+ rtc_executable("screenshare_loopback") {
+ testonly = true
+ sources = [
+ "screenshare_loopback.cc",
+ ]
+
+ deps = [
+ ":video_quality_test",
+ "../rtc_base:rtc_base_approved",
+ "../system_wrappers:metrics_default",
+ "../test:field_trial",
+ "../test:run_test",
+ "../test:run_test_interface",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_support",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from Chrome's Clang plugins.
+ # See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+
+ rtc_executable("video_replay") {
+ testonly = true
+ sources = [
+ "replay.cc",
+ ]
+ deps = [
+ "..:webrtc_common",
+ "../api/video_codecs:video_codecs_api",
+ "../call:call_interfaces",
+ "../common_video",
+ "../logging:rtc_event_log_api",
+ "../modules/rtp_rtcp",
+ "../rtc_base:rtc_base_approved",
+ "../system_wrappers",
+ "../system_wrappers:metrics_default",
+ "../test:field_trial",
+ "../test:rtp_test_utils",
+ "../test:run_test",
+ "../test:run_test_interface",
+ "../test:test_common",
+ "../test:test_renderer",
+ "../test:test_support",
+ "../test:video_test_common",
+ "../test:video_test_support",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ }
+
+ # TODO(pbos): Rename test suite.
+ rtc_source_set("video_tests") {
+ testonly = true
+
+ defines = []
+ sources = [
+ "call_stats_unittest.cc",
+ "encoder_rtcp_feedback_unittest.cc",
+ "end_to_end_tests.cc",
+ "overuse_frame_detector_unittest.cc",
+ "payload_router_unittest.cc",
+ "picture_id_tests.cc",
+ "quality_threshold_unittest.cc",
+ "receive_statistics_proxy_unittest.cc",
+ "report_block_stats_unittest.cc",
+ "rtp_video_stream_receiver_unittest.cc",
+ "send_delay_stats_unittest.cc",
+ "send_statistics_proxy_unittest.cc",
+ "stats_counter_unittest.cc",
+ "stream_synchronization_unittest.cc",
+ "video_receive_stream_unittest.cc",
+ "video_send_stream_tests.cc",
+ "video_stream_encoder_unittest.cc",
+ ]
+ deps = [
+ ":video",
+ "../api:optional",
+ "../api:video_frame_api",
+ "../api:video_frame_api_i420",
+ "../api/video_codecs:video_codecs_api",
+ "../call:call_interfaces",
+ "../call:mock_rtp_interfaces",
+ "../call:rtp_receiver",
+ "../call:rtp_sender",
+ "../call:video_stream_api",
+ "../common_video",
+ "../logging:rtc_event_log_api",
+ "../media:rtc_media",
+ "../media:rtc_media_base",
+ "../media:rtc_media_tests_utils",
+ "../modules:module_api",
+ "../modules/pacing",
+ "../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:mock_rtp_rtcp",
+ "../modules/utility",
+ "../modules/video_coding",
+ "../modules/video_coding:video_coding_utility",
+ "../modules/video_coding:webrtc_h264",
+ "../modules/video_coding:webrtc_vp8",
+ "../modules/video_coding:webrtc_vp9",
+ "../rtc_base:rtc_base_approved",
+ "../rtc_base:rtc_base_tests_utils",
+ "../rtc_base:rtc_numerics",
+ "../system_wrappers",
+ "../system_wrappers:field_trial_default",
+ "../system_wrappers:metrics_api",
+ "../system_wrappers:metrics_default",
+ "../test:direct_transport",
+ "../test:field_trial",
+ "../test:rtp_test_utils",
+ "../test:test_common",
+ "../test:test_support",
+ "../test:video_test_common",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ if (!build_with_chromium && is_clang) {
+ # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+ suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+ }
+ if (rtc_use_h264) {
+ defines += [ "WEBRTC_USE_H264" ]
+ }
+ if (!build_with_mozilla) {
+ deps += [ "../media:rtc_media_base" ]
+ }
+ }
+}
diff --git a/third_party/libwebrtc/webrtc/video/DEPS b/third_party/libwebrtc/webrtc/video/DEPS
new file mode 100644
index 0000000000..b94455edd0
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/DEPS
@@ -0,0 +1,19 @@
+include_rules = [
+ "+call",
+ "+common_video",
+ "+logging/rtc_event_log",
+ "+media/base",
+ "+media/engine",
+ "+modules/audio_mixer",
+ "+modules/bitrate_controller",
+ "+modules/congestion_controller",
+ "+modules/pacing",
+ "+modules/remote_bitrate_estimator",
+ "+modules/rtp_rtcp",
+ "+modules/utility",
+ "+modules/video_coding",
+ "+modules/video_capture",
+ "+modules/video_processing",
+ "+system_wrappers",
+ "+voice_engine",
+]
diff --git a/third_party/libwebrtc/webrtc/video/OWNERS b/third_party/libwebrtc/webrtc/video/OWNERS
new file mode 100644
index 0000000000..39f4c7196c
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/OWNERS
@@ -0,0 +1,9 @@
+mflodman@webrtc.org
+stefan@webrtc.org
+asapersson@webrtc.org
+sprang@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/third_party/libwebrtc/webrtc/video/call_stats.cc b/third_party/libwebrtc/webrtc/video/call_stats.cc
new file mode 100644
index 0000000000..c9f019929f
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/call_stats.cc
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/call_stats.h"
+
+#include <algorithm>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+// Time interval for updating the observers.
+const int64_t kUpdateIntervalMs = 1000;
+// Weight factor to apply to the average rtt.
+const float kWeightFactor = 0.3f;
+
+void RemoveOldReports(int64_t now, std::list<CallStats::RttTime>* reports) {
+ // A rtt report is considered valid for this long.
+ const int64_t kRttTimeoutMs = 1500;
+ while (!reports->empty() &&
+ (now - reports->front().time) > kRttTimeoutMs) {
+ reports->pop_front();
+ }
+}
+
+int64_t GetMaxRttMs(std::list<CallStats::RttTime>* reports) {
+ if (reports->empty())
+ return -1;
+ int64_t max_rtt_ms = 0;
+ for (const CallStats::RttTime& rtt_time : *reports)
+ max_rtt_ms = std::max(rtt_time.rtt, max_rtt_ms);
+ return max_rtt_ms;
+}
+
+int64_t GetAvgRttMs(std::list<CallStats::RttTime>* reports) {
+ if (reports->empty()) {
+ return -1;
+ }
+ int64_t sum = 0;
+ for (std::list<CallStats::RttTime>::const_iterator it = reports->begin();
+ it != reports->end(); ++it) {
+ sum += it->rtt;
+ }
+ return sum / reports->size();
+}
+
+void UpdateAvgRttMs(std::list<CallStats::RttTime>* reports, int64_t* avg_rtt) {
+ int64_t cur_rtt_ms = GetAvgRttMs(reports);
+ if (cur_rtt_ms == -1) {
+ // Reset.
+ *avg_rtt = -1;
+ return;
+ }
+ if (*avg_rtt == -1) {
+ // Initialize.
+ *avg_rtt = cur_rtt_ms;
+ return;
+ }
+ *avg_rtt = *avg_rtt * (1.0f - kWeightFactor) + cur_rtt_ms * kWeightFactor;
+}
+} // namespace
+
+class RtcpObserver : public RtcpRttStats {
+ public:
+ explicit RtcpObserver(CallStats* owner) : owner_(owner) {}
+ virtual ~RtcpObserver() {}
+
+ virtual void OnRttUpdate(int64_t rtt) {
+ owner_->OnRttUpdate(rtt);
+ }
+
+ // Returns the average RTT.
+ virtual int64_t LastProcessedRtt() const {
+ return owner_->avg_rtt_ms();
+ }
+
+ private:
+ CallStats* owner_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtcpObserver);
+};
+
+CallStats::CallStats(Clock* clock)
+ : clock_(clock),
+ rtcp_rtt_stats_(new RtcpObserver(this)),
+ last_process_time_(clock_->TimeInMilliseconds()),
+ max_rtt_ms_(-1),
+ avg_rtt_ms_(-1),
+ sum_avg_rtt_ms_(0),
+ num_avg_rtt_(0),
+ time_of_first_rtt_ms_(-1) {}
+
+CallStats::~CallStats() {
+ RTC_DCHECK(observers_.empty());
+ UpdateHistograms();
+}
+
+int64_t CallStats::TimeUntilNextProcess() {
+ return last_process_time_ + kUpdateIntervalMs - clock_->TimeInMilliseconds();
+}
+
+void CallStats::Process() {
+ rtc::CritScope cs(&crit_);
+ int64_t now = clock_->TimeInMilliseconds();
+ if (now < last_process_time_ + kUpdateIntervalMs)
+ return;
+
+ last_process_time_ = now;
+
+ RemoveOldReports(now, &reports_);
+ max_rtt_ms_ = GetMaxRttMs(&reports_);
+ UpdateAvgRttMs(&reports_, &avg_rtt_ms_);
+
+ // If there is a valid rtt, update all observers with the max rtt.
+ if (max_rtt_ms_ >= 0) {
+ RTC_DCHECK_GE(avg_rtt_ms_, 0);
+ for (std::list<CallStatsObserver*>::iterator it = observers_.begin();
+ it != observers_.end(); ++it) {
+ (*it)->OnRttUpdate(avg_rtt_ms_, max_rtt_ms_);
+ }
+ // Sum for Histogram of average RTT reported over the entire call.
+ sum_avg_rtt_ms_ += avg_rtt_ms_;
+ ++num_avg_rtt_;
+ }
+}
+
+int64_t CallStats::avg_rtt_ms() const {
+ rtc::CritScope cs(&crit_);
+ return avg_rtt_ms_;
+}
+
+RtcpRttStats* CallStats::rtcp_rtt_stats() const {
+ return rtcp_rtt_stats_.get();
+}
+
+void CallStats::RegisterStatsObserver(CallStatsObserver* observer) {
+ rtc::CritScope cs(&crit_);
+ for (std::list<CallStatsObserver*>::iterator it = observers_.begin();
+ it != observers_.end(); ++it) {
+ if (*it == observer)
+ return;
+ }
+ observers_.push_back(observer);
+}
+
+void CallStats::DeregisterStatsObserver(CallStatsObserver* observer) {
+ rtc::CritScope cs(&crit_);
+ for (std::list<CallStatsObserver*>::iterator it = observers_.begin();
+ it != observers_.end(); ++it) {
+ if (*it == observer) {
+ observers_.erase(it);
+ return;
+ }
+ }
+}
+
+void CallStats::OnRttUpdate(int64_t rtt) {
+ rtc::CritScope cs(&crit_);
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ reports_.push_back(RttTime(rtt, now_ms));
+ if (time_of_first_rtt_ms_ == -1)
+ time_of_first_rtt_ms_ = now_ms;
+}
+
+void CallStats::UpdateHistograms() {
+ rtc::CritScope cs(&crit_);
+ if (time_of_first_rtt_ms_ == -1 || num_avg_rtt_ < 1)
+ return;
+
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - time_of_first_rtt_ms_) / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int64_t avg_rtt_ms = (sum_avg_rtt_ms_ + num_avg_rtt_ / 2) / num_avg_rtt_;
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.AverageRoundTripTimeInMilliseconds", avg_rtt_ms);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/call_stats.h b/third_party/libwebrtc/webrtc/video/call_stats.h
new file mode 100644
index 0000000000..af5c45c996
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/call_stats.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_CALL_STATS_H_
+#define VIDEO_CALL_STATS_H_
+
+#include <list>
+#include <memory>
+
+#include "modules/include/module.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class CallStatsObserver;
+class RtcpRttStats;
+
+// CallStats keeps track of statistics for a call.
+class CallStats : public Module {
+ public:
+ friend class RtcpObserver;
+
+ explicit CallStats(Clock* clock);
+ ~CallStats();
+
+ // Implements Module, to use the process thread.
+ int64_t TimeUntilNextProcess() override;
+ void Process() override;
+
+ // Returns a RtcpRttStats to register at a statistics provider. The object
+ // has the same lifetime as the CallStats instance.
+ RtcpRttStats* rtcp_rtt_stats() const;
+
+ // Registers/deregisters a new observer to receive statistics updates.
+ void RegisterStatsObserver(CallStatsObserver* observer);
+ void DeregisterStatsObserver(CallStatsObserver* observer);
+
+ // Helper struct keeping track of the time a rtt value is reported.
+ struct RttTime {
+ RttTime(int64_t new_rtt, int64_t rtt_time)
+ : rtt(new_rtt), time(rtt_time) {}
+ const int64_t rtt;
+ const int64_t time;
+ };
+
+ protected:
+ void OnRttUpdate(int64_t rtt);
+
+ int64_t avg_rtt_ms() const;
+
+ private:
+ void UpdateHistograms();
+
+ Clock* const clock_;
+ // Protecting all members.
+ rtc::CriticalSection crit_;
+ // Observer receiving statistics updates.
+ std::unique_ptr<RtcpRttStats> rtcp_rtt_stats_;
+ // The last time 'Process' resulted in statistic update.
+ int64_t last_process_time_;
+ // The last RTT in the statistics update (zero if there is no valid estimate).
+ int64_t max_rtt_ms_;
+ int64_t avg_rtt_ms_;
+ int64_t sum_avg_rtt_ms_ RTC_GUARDED_BY(crit_);
+ int64_t num_avg_rtt_ RTC_GUARDED_BY(crit_);
+ int64_t time_of_first_rtt_ms_ RTC_GUARDED_BY(crit_);
+
+ // All Rtt reports within valid time interval, oldest first.
+ std::list<RttTime> reports_;
+
+ // Observers getting stats reports.
+ std::list<CallStatsObserver*> observers_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(CallStats);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_CALL_STATS_H_
diff --git a/third_party/libwebrtc/webrtc/video/call_stats_unittest.cc b/third_party/libwebrtc/webrtc/video/call_stats_unittest.cc
new file mode 100644
index 0000000000..989722d29f
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/call_stats_unittest.cc
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/metrics_default.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/call_stats.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::Return;
+
+namespace webrtc {
+
+class MockStatsObserver : public CallStatsObserver {
+ public:
+ MockStatsObserver() {}
+ virtual ~MockStatsObserver() {}
+
+ MOCK_METHOD2(OnRttUpdate, void(int64_t, int64_t));
+};
+
+class CallStatsTest : public ::testing::Test {
+ public:
+ CallStatsTest() : fake_clock_(12345) {}
+
+ protected:
+ virtual void SetUp() { call_stats_.reset(new CallStats(&fake_clock_)); }
+ SimulatedClock fake_clock_;
+ std::unique_ptr<CallStats> call_stats_;
+};
+
+TEST_F(CallStatsTest, AddAndTriggerCallback) {
+ MockStatsObserver stats_observer;
+ RtcpRttStats* rtcp_rtt_stats = call_stats_->rtcp_rtt_stats();
+ call_stats_->RegisterStatsObserver(&stats_observer);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_EQ(-1, rtcp_rtt_stats->LastProcessedRtt());
+
+ const int64_t kRtt = 25;
+ rtcp_rtt_stats->OnRttUpdate(kRtt);
+ EXPECT_CALL(stats_observer, OnRttUpdate(kRtt, kRtt)).Times(1);
+ call_stats_->Process();
+ EXPECT_EQ(kRtt, rtcp_rtt_stats->LastProcessedRtt());
+
+ const int64_t kRttTimeOutMs = 1500 + 10;
+ fake_clock_.AdvanceTimeMilliseconds(kRttTimeOutMs);
+ EXPECT_CALL(stats_observer, OnRttUpdate(_, _)).Times(0);
+ call_stats_->Process();
+ EXPECT_EQ(-1, rtcp_rtt_stats->LastProcessedRtt());
+
+ call_stats_->DeregisterStatsObserver(&stats_observer);
+}
+
+TEST_F(CallStatsTest, ProcessTime) {
+ MockStatsObserver stats_observer;
+ call_stats_->RegisterStatsObserver(&stats_observer);
+ RtcpRttStats* rtcp_rtt_stats = call_stats_->rtcp_rtt_stats();
+ rtcp_rtt_stats->OnRttUpdate(100);
+
+ // Time isn't updated yet.
+ EXPECT_CALL(stats_observer, OnRttUpdate(_, _)).Times(0);
+ call_stats_->Process();
+
+ // Advance clock and verify we get an update.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_CALL(stats_observer, OnRttUpdate(_, _)).Times(1);
+ call_stats_->Process();
+
+ // Advance clock just too little to get an update.
+ fake_clock_.AdvanceTimeMilliseconds(999);
+ rtcp_rtt_stats->OnRttUpdate(100);
+ EXPECT_CALL(stats_observer, OnRttUpdate(_, _)).Times(0);
+ call_stats_->Process();
+
+ // Advance enough to trigger a new update.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ EXPECT_CALL(stats_observer, OnRttUpdate(_, _)).Times(1);
+ call_stats_->Process();
+
+ call_stats_->DeregisterStatsObserver(&stats_observer);
+}
+
+// Verify all observers get correct estimates and observers can be added and
+// removed.
+TEST_F(CallStatsTest, MultipleObservers) {
+ MockStatsObserver stats_observer_1;
+ call_stats_->RegisterStatsObserver(&stats_observer_1);
+ // Add the second observer twice, there should still be only one report to the
+ // observer.
+ MockStatsObserver stats_observer_2;
+ call_stats_->RegisterStatsObserver(&stats_observer_2);
+ call_stats_->RegisterStatsObserver(&stats_observer_2);
+
+ RtcpRttStats* rtcp_rtt_stats = call_stats_->rtcp_rtt_stats();
+ const int64_t kRtt = 100;
+ rtcp_rtt_stats->OnRttUpdate(kRtt);
+
+ // Verify both observers are updated.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_CALL(stats_observer_1, OnRttUpdate(kRtt, kRtt)).Times(1);
+ EXPECT_CALL(stats_observer_2, OnRttUpdate(kRtt, kRtt)).Times(1);
+ call_stats_->Process();
+
+ // Deregister the second observer and verify update is only sent to the first
+ // observer.
+ call_stats_->DeregisterStatsObserver(&stats_observer_2);
+ rtcp_rtt_stats->OnRttUpdate(kRtt);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_CALL(stats_observer_1, OnRttUpdate(kRtt, kRtt)).Times(1);
+ EXPECT_CALL(stats_observer_2, OnRttUpdate(kRtt, kRtt)).Times(0);
+ call_stats_->Process();
+
+ // Deregister the first observer.
+ call_stats_->DeregisterStatsObserver(&stats_observer_1);
+ rtcp_rtt_stats->OnRttUpdate(kRtt);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_CALL(stats_observer_1, OnRttUpdate(kRtt, kRtt)).Times(0);
+ EXPECT_CALL(stats_observer_2, OnRttUpdate(kRtt, kRtt)).Times(0);
+ call_stats_->Process();
+}
+
+// Verify increasing and decreasing rtt triggers callbacks with correct values.
+TEST_F(CallStatsTest, ChangeRtt) {
+ MockStatsObserver stats_observer;
+ call_stats_->RegisterStatsObserver(&stats_observer);
+ RtcpRttStats* rtcp_rtt_stats = call_stats_->rtcp_rtt_stats();
+
+ // Advance clock to be ready for an update.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+
+ // Set a first value and verify the callback is triggered.
+ const int64_t kFirstRtt = 100;
+ rtcp_rtt_stats->OnRttUpdate(kFirstRtt);
+ EXPECT_CALL(stats_observer, OnRttUpdate(kFirstRtt, kFirstRtt)).Times(1);
+ call_stats_->Process();
+
+ // Increase rtt and verify the new value is reported.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ const int64_t kHighRtt = kFirstRtt + 20;
+ const int64_t kAvgRtt1 = 103;
+ rtcp_rtt_stats->OnRttUpdate(kHighRtt);
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt1, kHighRtt)).Times(1);
+ call_stats_->Process();
+
+ // Increase time enough for a new update, but not too much to make the
+ // rtt invalid. Report a lower rtt and verify the old/high value still is sent
+ // in the callback.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ const int64_t kLowRtt = kFirstRtt - 20;
+ const int64_t kAvgRtt2 = 102;
+ rtcp_rtt_stats->OnRttUpdate(kLowRtt);
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt2, kHighRtt)).Times(1);
+ call_stats_->Process();
+
+ // Advance time to make the high report invalid, the lower rtt should now be
+ // in the callback.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ const int64_t kAvgRtt3 = 95;
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt3, kLowRtt)).Times(1);
+ call_stats_->Process();
+
+ call_stats_->DeregisterStatsObserver(&stats_observer);
+}
+
+TEST_F(CallStatsTest, LastProcessedRtt) {
+ MockStatsObserver stats_observer;
+ call_stats_->RegisterStatsObserver(&stats_observer);
+ RtcpRttStats* rtcp_rtt_stats = call_stats_->rtcp_rtt_stats();
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+
+ // Set a first values and verify that LastProcessedRtt initially returns the
+ // average rtt.
+ const int64_t kRttLow = 10;
+ const int64_t kRttHigh = 30;
+ const int64_t kAvgRtt = 20;
+ rtcp_rtt_stats->OnRttUpdate(kRttLow);
+ rtcp_rtt_stats->OnRttUpdate(kRttHigh);
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt, kRttHigh)).Times(1);
+ call_stats_->Process();
+ EXPECT_EQ(kAvgRtt, rtcp_rtt_stats->LastProcessedRtt());
+
+ // Update values and verify LastProcessedRtt.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ rtcp_rtt_stats->OnRttUpdate(kRttLow);
+ rtcp_rtt_stats->OnRttUpdate(kRttHigh);
+ EXPECT_CALL(stats_observer, OnRttUpdate(kAvgRtt, kRttHigh)).Times(1);
+ call_stats_->Process();
+ EXPECT_EQ(kAvgRtt, rtcp_rtt_stats->LastProcessedRtt());
+
+ call_stats_->DeregisterStatsObserver(&stats_observer);
+}
+
+TEST_F(CallStatsTest, ProducesHistogramMetrics) {
+ metrics::Reset();
+ const int64_t kRtt = 123;
+ RtcpRttStats* rtcp_rtt_stats = call_stats_->rtcp_rtt_stats();
+ rtcp_rtt_stats->OnRttUpdate(kRtt);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ rtcp_rtt_stats->OnRttUpdate(kRtt);
+ call_stats_->Process();
+ call_stats_.reset();
+
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.AverageRoundTripTimeInMilliseconds"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.AverageRoundTripTimeInMilliseconds", kRtt));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.cc b/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.cc
new file mode 100644
index 0000000000..5a1194f966
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_rtcp_feedback.h"
+
+#include "rtc_base/checks.h"
+#include "video/video_stream_encoder.h"
+
+static const int kMinKeyFrameRequestIntervalMs = 300;
+
+namespace webrtc {
+
+EncoderRtcpFeedback::EncoderRtcpFeedback(Clock* clock,
+ const std::vector<uint32_t>& ssrcs,
+ VideoStreamEncoder* encoder)
+ : clock_(clock),
+ ssrcs_(ssrcs),
+ video_stream_encoder_(encoder),
+ time_last_intra_request_ms_(ssrcs.size(), -1) {
+ RTC_DCHECK(!ssrcs.empty());
+}
+
+bool EncoderRtcpFeedback::HasSsrc(uint32_t ssrc) {
+ for (uint32_t registered_ssrc : ssrcs_) {
+ if (registered_ssrc == ssrc) {
+ return true;
+ }
+ }
+ return false;
+}
+
+size_t EncoderRtcpFeedback::GetStreamIndex(uint32_t ssrc) {
+ for (size_t i = 0; i < ssrcs_.size(); ++i) {
+ if (ssrcs_[i] == ssrc)
+ return i;
+ }
+ RTC_NOTREACHED() << "Unknown ssrc " << ssrc;
+ return 0;
+}
+
+void EncoderRtcpFeedback::OnReceivedIntraFrameRequest(uint32_t ssrc) {
+ RTC_DCHECK(HasSsrc(ssrc));
+ size_t index = GetStreamIndex(ssrc);
+ {
+ // TODO(mflodman): Move to VideoStreamEncoder after some more changes making
+ // it easier to test there.
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ rtc::CritScope lock(&crit_);
+ if (time_last_intra_request_ms_[index] + kMinKeyFrameRequestIntervalMs >
+ now_ms) {
+ return;
+ }
+ time_last_intra_request_ms_[index] = now_ms;
+ }
+
+ video_stream_encoder_->OnReceivedIntraFrameRequest(index);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.h b/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.h
new file mode 100644
index 0000000000..fd57558fc6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_ENCODER_RTCP_FEEDBACK_H_
+#define VIDEO_ENCODER_RTCP_FEEDBACK_H_
+
+#include <vector>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/criticalsection.h"
+#include "system_wrappers/include/clock.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class VideoStreamEncoder;
+
+class EncoderRtcpFeedback : public RtcpIntraFrameObserver {
+ public:
+ EncoderRtcpFeedback(Clock* clock,
+ const std::vector<uint32_t>& ssrcs,
+ VideoStreamEncoder* encoder);
+ void OnReceivedIntraFrameRequest(uint32_t ssrc) override;
+
+ private:
+ bool HasSsrc(uint32_t ssrc);
+ size_t GetStreamIndex(uint32_t ssrc);
+
+ Clock* const clock_;
+ const std::vector<uint32_t> ssrcs_;
+ VideoStreamEncoder* const video_stream_encoder_;
+
+ rtc::CriticalSection crit_;
+ std::vector<int64_t> time_last_intra_request_ms_ RTC_GUARDED_BY(crit_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ENCODER_RTCP_FEEDBACK_H_
diff --git a/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback_unittest.cc b/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback_unittest.cc
new file mode 100644
index 0000000000..dd09540ce8
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback_unittest.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/encoder_rtcp_feedback.h"
+
+#include <memory>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/send_statistics_proxy.h"
+#include "video/video_stream_encoder.h"
+
+using ::testing::NiceMock;
+
+namespace webrtc {
+
+class MockVideoStreamEncoder : public VideoStreamEncoder {
+ public:
+ explicit MockVideoStreamEncoder(SendStatisticsProxy* send_stats_proxy)
+ : VideoStreamEncoder(1,
+ send_stats_proxy,
+ VideoSendStream::Config::EncoderSettings("fake", 0,
+ nullptr),
+ nullptr,
+ nullptr,
+ std::unique_ptr<OveruseFrameDetector>()) {}
+ ~MockVideoStreamEncoder() { Stop(); }
+
+ MOCK_METHOD1(OnReceivedIntraFrameRequest, void(size_t));
+};
+
+class VieKeyRequestTest : public ::testing::Test {
+ public:
+ VieKeyRequestTest()
+ : simulated_clock_(123456789),
+ send_stats_proxy_(&simulated_clock_,
+ VideoSendStream::Config(nullptr),
+ VideoEncoderConfig::ContentType::kRealtimeVideo),
+ encoder_(&send_stats_proxy_),
+ encoder_rtcp_feedback_(
+ &simulated_clock_,
+ std::vector<uint32_t>(1, VieKeyRequestTest::kSsrc),
+ &encoder_) {}
+
+ protected:
+ const uint32_t kSsrc = 1234;
+
+ SimulatedClock simulated_clock_;
+ SendStatisticsProxy send_stats_proxy_;
+ MockVideoStreamEncoder encoder_;
+ EncoderRtcpFeedback encoder_rtcp_feedback_;
+};
+
+TEST_F(VieKeyRequestTest, CreateAndTriggerRequests) {
+ EXPECT_CALL(encoder_, OnReceivedIntraFrameRequest(0)).Times(1);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+}
+
+TEST_F(VieKeyRequestTest, TooManyOnReceivedIntraFrameRequest) {
+ EXPECT_CALL(encoder_, OnReceivedIntraFrameRequest(0)).Times(1);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ simulated_clock_.AdvanceTimeMilliseconds(10);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+
+ EXPECT_CALL(encoder_, OnReceivedIntraFrameRequest(0)).Times(1);
+ simulated_clock_.AdvanceTimeMilliseconds(300);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+ encoder_rtcp_feedback_.OnReceivedIntraFrameRequest(kSsrc);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/end_to_end_tests.cc b/third_party/libwebrtc/webrtc/video/end_to_end_tests.cc
new file mode 100644
index 0000000000..5b5be324bb
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/end_to_end_tests.cc
@@ -0,0 +1,5112 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <algorithm>
+#include <list>
+#include <map>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/call.h"
+#include "common_video/include/frame_callback.h"
+#include "logging/rtc_event_log/rtc_event_log.h"
+#include "media/base/fakevideorenderer.h"
+#include "media/base/mediaconstants.h"
+#include "media/engine/internalencoderfactory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "media/engine/webrtcvideoencoderfactory.h"
+#include "modules/include/module_common_types.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_utility.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/file.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/random.h"
+#include "rtc_base/rate_limiter.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/metrics_default.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/direct_transport.h"
+#include "test/encoder_settings.h"
+#include "test/fake_decoder.h"
+#include "test/fake_encoder.h"
+#include "test/field_trial.h"
+#include "test/frame_generator.h"
+#include "test/frame_generator_capturer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/null_transport.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/rtp_rtcp_observer.h"
+#include "test/testsupport/fileutils.h"
+#include "test/testsupport/perf_test.h"
+#include "video/transport_adapter.h"
+
+// Flaky under MemorySanitizer: bugs.webrtc.org/7419
+#if defined(MEMORY_SANITIZER)
+#define MAYBE_InitialProbing DISABLED_InitialProbing
+// Fails on iOS bots: bugs.webrtc.org/7851
+#elif defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
+#define MAYBE_InitialProbing DISABLED_InitialProbing
+#else
+#define MAYBE_InitialProbing InitialProbing
+#endif
+
+namespace webrtc {
+
+namespace {
+constexpr int kSilenceTimeoutMs = 2000;
+}
+
+class EndToEndTest : public test::CallTest,
+ public testing::WithParamInterface<std::string> {
+ public:
+ EndToEndTest() : field_trial_(GetParam()) {}
+
+ virtual ~EndToEndTest() {
+ EXPECT_EQ(nullptr, video_send_stream_);
+ EXPECT_TRUE(video_receive_streams_.empty());
+ }
+
+ protected:
+ class UnusedTransport : public Transport {
+ private:
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override {
+ ADD_FAILURE() << "Unexpected RTP sent.";
+ return false;
+ }
+
+ bool SendRtcp(const uint8_t* packet, size_t length) override {
+ ADD_FAILURE() << "Unexpected RTCP sent.";
+ return false;
+ }
+ };
+
+ class RequiredTransport : public Transport {
+ public:
+ RequiredTransport(bool rtp_required, bool rtcp_required)
+ : need_rtp_(rtp_required), need_rtcp_(rtcp_required) {}
+ ~RequiredTransport() {
+ if (need_rtp_) {
+ ADD_FAILURE() << "Expected RTP packet not sent.";
+ }
+ if (need_rtcp_) {
+ ADD_FAILURE() << "Expected RTCP packet not sent.";
+ }
+ }
+
+ private:
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override {
+ rtc::CritScope lock(&crit_);
+ need_rtp_ = false;
+ return true;
+ }
+
+ bool SendRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ need_rtcp_ = false;
+ return true;
+ }
+ bool need_rtp_;
+ bool need_rtcp_;
+ rtc::CriticalSection crit_;
+ };
+
+ void DecodesRetransmittedFrame(bool enable_rtx, bool enable_red);
+ void ReceivesPliAndRecovers(int rtp_history_ms);
+ void RespectsRtcpMode(RtcpMode rtcp_mode);
+ void TestSendsSetSsrcs(size_t num_ssrcs, bool send_single_ssrc_first);
+ void TestRtpStatePreservation(bool use_rtx, bool provoke_rtcpsr_before_rtp);
+ void VerifyHistogramStats(bool use_rtx, bool use_fec, bool screenshare);
+ void VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ VideoEncoder* encoder,
+ Transport* transport);
+ void VerifyNewVideoReceiveStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ Transport* transport);
+
+ test::ScopedFieldTrials field_trial_;
+};
+
+TEST_P(EndToEndTest, ReceiverCanBeStartedTwice) {
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateMatchingReceiveConfigs(&transport);
+
+ CreateVideoStreams();
+
+ video_receive_streams_[0]->Start();
+ video_receive_streams_[0]->Start();
+
+ DestroyStreams();
+}
+
+TEST_P(EndToEndTest, ReceiverCanBeStoppedTwice) {
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateMatchingReceiveConfigs(&transport);
+
+ CreateVideoStreams();
+
+ video_receive_streams_[0]->Stop();
+ video_receive_streams_[0]->Stop();
+
+ DestroyStreams();
+}
+
+TEST_P(EndToEndTest, ReceiverCanBeStoppedAndRestarted) {
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateMatchingReceiveConfigs(&transport);
+
+ CreateVideoStreams();
+
+ video_receive_streams_[0]->Stop();
+ video_receive_streams_[0]->Start();
+ video_receive_streams_[0]->Stop();
+
+ DestroyStreams();
+}
+
+TEST_P(EndToEndTest, RendersSingleDelayedFrame) {
+ static const int kWidth = 320;
+ static const int kHeight = 240;
+ // This constant is chosen to be higher than the timeout in the video_render
+ // module. This makes sure that frames aren't dropped if there are no other
+ // frames in the queue.
+ static const int kRenderDelayMs = 1000;
+
+ class Renderer : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ Renderer() : event_(false, false) {}
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ SleepMs(kRenderDelayMs);
+ event_.Set();
+ }
+
+ bool Wait() { return event_.Wait(kDefaultTimeoutMs); }
+
+ rtc::Event event_;
+ } renderer;
+
+ test::FrameForwarder frame_forwarder;
+ std::unique_ptr<test::DirectTransport> sender_transport;
+ std::unique_ptr<test::DirectTransport> receiver_transport;
+
+ task_queue_.SendTask([this, &renderer, &frame_forwarder, &sender_transport,
+ &receiver_transport]() {
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+
+ sender_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, sender_call_.get(), payload_type_map_);
+ receiver_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, receiver_call_.get(), payload_type_map_);
+ sender_transport->SetReceiver(receiver_call_->Receiver());
+ receiver_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(1, 0, 0, sender_transport.get());
+ CreateMatchingReceiveConfigs(receiver_transport.get());
+
+ video_receive_configs_[0].renderer = &renderer;
+
+ CreateVideoStreams();
+ Start();
+
+ // Create frames that are smaller than the send width/height, this is done
+ // to check that the callbacks are done after processing video.
+ std::unique_ptr<test::FrameGenerator> frame_generator(
+ test::FrameGenerator::CreateSquareGenerator(kWidth, kHeight));
+ video_send_stream_->SetSource(
+ &frame_forwarder,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ frame_forwarder.IncomingCapturedFrame(*frame_generator->NextFrame());
+ });
+
+ EXPECT_TRUE(renderer.Wait())
+ << "Timed out while waiting for the frame to render.";
+
+ task_queue_.SendTask([this, &sender_transport, &receiver_transport]() {
+ Stop();
+ DestroyStreams();
+ sender_transport.reset();
+ receiver_transport.reset();
+ DestroyCalls();
+ });
+}
+
+TEST_P(EndToEndTest, TransmitsFirstFrame) {
+ class Renderer : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ Renderer() : event_(false, false) {}
+
+ void OnFrame(const VideoFrame& video_frame) override { event_.Set(); }
+
+ bool Wait() { return event_.Wait(kDefaultTimeoutMs); }
+
+ rtc::Event event_;
+ } renderer;
+
+ std::unique_ptr<test::FrameGenerator> frame_generator;
+ test::FrameForwarder frame_forwarder;
+
+ std::unique_ptr<test::DirectTransport> sender_transport;
+ std::unique_ptr<test::DirectTransport> receiver_transport;
+
+ task_queue_.SendTask([this, &renderer, &frame_generator, &frame_forwarder,
+ &sender_transport, &receiver_transport]() {
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+
+ sender_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, sender_call_.get(), payload_type_map_);
+ receiver_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, receiver_call_.get(), payload_type_map_);
+ sender_transport->SetReceiver(receiver_call_->Receiver());
+ receiver_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(1, 0, 0, sender_transport.get());
+ CreateMatchingReceiveConfigs(receiver_transport.get());
+ video_receive_configs_[0].renderer = &renderer;
+
+ CreateVideoStreams();
+ Start();
+
+ frame_generator = test::FrameGenerator::CreateSquareGenerator(
+ kDefaultWidth, kDefaultHeight);
+ video_send_stream_->SetSource(
+ &frame_forwarder,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+ frame_forwarder.IncomingCapturedFrame(*frame_generator->NextFrame());
+ });
+
+ EXPECT_TRUE(renderer.Wait())
+ << "Timed out while waiting for the frame to render.";
+
+ task_queue_.SendTask([this, &sender_transport, &receiver_transport]() {
+ Stop();
+ DestroyStreams();
+ sender_transport.reset();
+ receiver_transport.reset();
+ DestroyCalls();
+ });
+}
+
+class CodecObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ CodecObserver(int no_frames_to_wait_for,
+ VideoRotation rotation_to_test,
+ const std::string& payload_name,
+ std::unique_ptr<webrtc::VideoEncoder> encoder,
+ std::unique_ptr<webrtc::VideoDecoder> decoder)
+ : EndToEndTest(4 * webrtc::EndToEndTest::kDefaultTimeoutMs),
+ // TODO(hta): This timeout (120 seconds) is excessive.
+ // https://bugs.webrtc.org/6830
+ no_frames_to_wait_for_(no_frames_to_wait_for),
+ expected_rotation_(rotation_to_test),
+ payload_name_(payload_name),
+ encoder_(std::move(encoder)),
+ decoder_(std::move(decoder)),
+ frame_counter_(0) {}
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for enough frames to be decoded.";
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = encoder_.get();
+ send_config->encoder_settings.payload_name = payload_name_;
+ send_config->encoder_settings.payload_type =
+ test::CallTest::kVideoSendPayloadType;
+
+ (*receive_configs)[0].renderer = this;
+ (*receive_configs)[0].decoders.resize(1);
+ (*receive_configs)[0].decoders[0].payload_type =
+ send_config->encoder_settings.payload_type;
+ (*receive_configs)[0].decoders[0].payload_name =
+ send_config->encoder_settings.payload_name;
+ (*receive_configs)[0].decoders[0].decoder = decoder_.get();
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ EXPECT_EQ(expected_rotation_, video_frame.rotation());
+ if (++frame_counter_ == no_frames_to_wait_for_)
+ observation_complete_.Set();
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(expected_rotation_);
+ }
+
+ private:
+ int no_frames_to_wait_for_;
+ VideoRotation expected_rotation_;
+ std::string payload_name_;
+ std::unique_ptr<webrtc::VideoEncoder> encoder_;
+ std::unique_ptr<webrtc::VideoDecoder> decoder_;
+ int frame_counter_;
+};
+
+TEST_P(EndToEndTest, SendsAndReceivesVP8) {
+ CodecObserver test(5, kVideoRotation_0, "VP8", VP8Encoder::Create(),
+ VP8Decoder::Create());
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, SendsAndReceivesVP8Rotation90) {
+ CodecObserver test(5, kVideoRotation_90, "VP8", VP8Encoder::Create(),
+ VP8Decoder::Create());
+ RunBaseTest(&test);
+}
+
+#if !defined(RTC_DISABLE_VP9)
+TEST_P(EndToEndTest, SendsAndReceivesVP9) {
+ CodecObserver test(500, kVideoRotation_0, "VP9", VP9Encoder::Create(),
+ VP9Decoder::Create());
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, SendsAndReceivesVP9VideoRotation90) {
+ CodecObserver test(5, kVideoRotation_90, "VP9", VP9Encoder::Create(),
+ VP9Decoder::Create());
+ RunBaseTest(&test);
+}
+#endif // !defined(RTC_DISABLE_VP9)
+
+#if defined(WEBRTC_USE_H264)
+class EndToEndTestH264 : public EndToEndTest {};
+
+const auto h264_field_trial_combinations = ::testing::Values(
+ "WebRTC-SpsPpsIdrIsH264Keyframe/Disabled/WebRTC-RoundRobinPacing/Disabled/",
+ "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/WebRTC-RoundRobinPacing/Disabled/",
+ "WebRTC-SpsPpsIdrIsH264Keyframe/Disabled/WebRTC-RoundRobinPacing/Enabled/",
+ "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/WebRTC-RoundRobinPacing/Enabled/");
+INSTANTIATE_TEST_CASE_P(SpsPpsIdrIsKeyframe,
+ EndToEndTestH264,
+ h264_field_trial_combinations);
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264) {
+ CodecObserver test(500, kVideoRotation_0, "H264",
+ H264Encoder::Create(cricket::VideoCodec("H264")),
+ H264Decoder::Create());
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264VideoRotation90) {
+ CodecObserver test(5, kVideoRotation_90, "H264",
+ H264Encoder::Create(cricket::VideoCodec("H264")),
+ H264Decoder::Create());
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264PacketizationMode0) {
+ cricket::VideoCodec codec = cricket::VideoCodec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "0");
+ CodecObserver test(500, kVideoRotation_0, "H264", H264Encoder::Create(codec),
+ H264Decoder::Create());
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTestH264, SendsAndReceivesH264PacketizationMode1) {
+ cricket::VideoCodec codec = cricket::VideoCodec("H264");
+ codec.SetParam(cricket::kH264FmtpPacketizationMode, "1");
+ CodecObserver test(500, kVideoRotation_0, "H264", H264Encoder::Create(codec),
+ H264Decoder::Create());
+ RunBaseTest(&test);
+}
+#endif // defined(WEBRTC_USE_H264)
+
+TEST_P(EndToEndTest, ReceiverUsesLocalSsrc) {
+ class SyncRtcpObserver : public test::EndToEndTest {
+ public:
+ SyncRtcpObserver() : EndToEndTest(kDefaultTimeoutMs) {}
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ EXPECT_EQ(kReceiverLocalVideoSsrc, parser.sender_ssrc());
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for a receiver RTCP packet to be sent.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, ReceivesAndRetransmitsNack) {
+ static const int kNumberOfNacksToObserve = 2;
+ static const int kLossBurstSize = 2;
+ static const int kPacketsBetweenLossBursts = 9;
+ class NackObserver : public test::EndToEndTest {
+ public:
+ NackObserver()
+ : EndToEndTest(kLongTimeoutMs),
+ sent_rtp_packets_(0),
+ packets_left_to_drop_(0),
+ nacks_left_(kNumberOfNacksToObserve) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ // Never drop retransmitted packets.
+ if (dropped_packets_.find(header.sequenceNumber) !=
+ dropped_packets_.end()) {
+ retransmitted_packets_.insert(header.sequenceNumber);
+ return SEND_PACKET;
+ }
+
+ if (nacks_left_ <= 0 &&
+ retransmitted_packets_.size() == dropped_packets_.size()) {
+ observation_complete_.Set();
+ }
+
+ ++sent_rtp_packets_;
+
+ // Enough NACKs received, stop dropping packets.
+ if (nacks_left_ <= 0)
+ return SEND_PACKET;
+
+ // Check if it's time for a new loss burst.
+ if (sent_rtp_packets_ % kPacketsBetweenLossBursts == 0)
+ packets_left_to_drop_ = kLossBurstSize;
+
+ // Never drop padding packets as those won't be retransmitted.
+ if (packets_left_to_drop_ > 0 && header.paddingLength == 0) {
+ --packets_left_to_drop_;
+ dropped_packets_.insert(header.sequenceNumber);
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ nacks_left_ -= parser.nack()->num_packets();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for packets to be NACKed, retransmitted and "
+ "rendered.";
+ }
+
+ rtc::CriticalSection crit_;
+ std::set<uint16_t> dropped_packets_;
+ std::set<uint16_t> retransmitted_packets_;
+ uint64_t sent_rtp_packets_;
+ int packets_left_to_drop_;
+ int nacks_left_ RTC_GUARDED_BY(&crit_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, ReceivesNackAndRetransmitsAudio) {
+ class NackObserver : public test::EndToEndTest {
+ public:
+ NackObserver()
+ : EndToEndTest(kLongTimeoutMs),
+ local_ssrc_(0),
+ remote_ssrc_(0),
+ receive_transport_(nullptr) {}
+
+ private:
+ size_t GetNumVideoStreams() const override { return 0; }
+ size_t GetNumAudioStreams() const override { return 1; }
+
+ test::PacketTransport* CreateReceiveTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue) override {
+ test::PacketTransport* receive_transport = new test::PacketTransport(
+ task_queue, nullptr, this, test::PacketTransport::kReceiver,
+ payload_type_map_, FakeNetworkPipe::Config());
+ receive_transport_ = receive_transport;
+ return receive_transport;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ if (!sequence_number_to_retransmit_) {
+ sequence_number_to_retransmit_ =
+ rtc::Optional<uint16_t>(header.sequenceNumber);
+
+ // Don't ask for retransmission straight away, may be deduped in pacer.
+ } else if (header.sequenceNumber == *sequence_number_to_retransmit_) {
+ observation_complete_.Set();
+ } else {
+ // Send a NACK as often as necessary until retransmission is received.
+ rtcp::Nack nack;
+ nack.SetSenderSsrc(local_ssrc_);
+ nack.SetMediaSsrc(remote_ssrc_);
+ uint16_t nack_list[] = {*sequence_number_to_retransmit_};
+ nack.SetPacketIds(nack_list, 1);
+ rtc::Buffer buffer = nack.Build();
+
+ EXPECT_TRUE(receive_transport_->SendRtcp(buffer.data(), buffer.size()));
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ local_ssrc_ = (*receive_configs)[0].rtp.local_ssrc;
+ remote_ssrc_ = (*receive_configs)[0].rtp.remote_ssrc;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for packets to be NACKed, retransmitted and "
+ "rendered.";
+ }
+
+ uint32_t local_ssrc_;
+ uint32_t remote_ssrc_;
+ Transport* receive_transport_;
+ rtc::Optional<uint16_t> sequence_number_to_retransmit_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, ReceivesUlpfec) {
+ class UlpfecRenderObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ UlpfecRenderObserver()
+ : EndToEndTest(kDefaultTimeoutMs),
+ encoder_(VP8Encoder::Create()),
+ random_(0xcafef00d1),
+ num_packets_sent_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_TRUE(header.payloadType == kVideoSendPayloadType ||
+ header.payloadType == kRedPayloadType)
+ << "Unknown payload type received.";
+ EXPECT_EQ(kVideoSendSsrcs[0], header.ssrc) << "Unknown SSRC received.";
+
+ // Parse RED header.
+ int encapsulated_payload_type = -1;
+ if (header.payloadType == kRedPayloadType) {
+ encapsulated_payload_type =
+ static_cast<int>(packet[header.headerLength]);
+
+ EXPECT_TRUE(encapsulated_payload_type == kVideoSendPayloadType ||
+ encapsulated_payload_type == kUlpfecPayloadType)
+ << "Unknown encapsulated payload type received.";
+ }
+
+ // To minimize test flakiness, always let ULPFEC packets through.
+ if (encapsulated_payload_type == kUlpfecPayloadType) {
+ return SEND_PACKET;
+ }
+
+ // Simulate 5% video packet loss after rampup period. Record the
+ // corresponding timestamps that were dropped.
+ if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) {
+ if (encapsulated_payload_type == kVideoSendPayloadType) {
+ dropped_sequence_numbers_.insert(header.sequenceNumber);
+ dropped_timestamps_.insert(header.timestamp);
+ }
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ rtc::CritScope lock(&crit_);
+ // Rendering frame with timestamp of packet that was dropped -> FEC
+ // protection worked.
+ auto it = dropped_timestamps_.find(video_frame.timestamp());
+ if (it != dropped_timestamps_.end()) {
+ observation_complete_.Set();
+ }
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Use VP8 instead of FAKE, since the latter does not have PictureID
+ // in the packetization headers.
+ send_config->encoder_settings.encoder = encoder_.get();
+ send_config->encoder_settings.payload_name = "VP8";
+ send_config->encoder_settings.payload_type = kVideoSendPayloadType;
+ VideoReceiveStream::Decoder decoder =
+ test::CreateMatchingDecoder(send_config->encoder_settings);
+ decoder_.reset(decoder.decoder);
+ (*receive_configs)[0].decoders.clear();
+ (*receive_configs)[0].decoders.push_back(decoder);
+
+ // Enable ULPFEC over RED.
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ (*receive_configs)[0].rtp.red_payload_type = kRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type = kUlpfecPayloadType;
+
+ (*receive_configs)[0].renderer = this;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for dropped frames to be rendered.";
+ }
+
+ rtc::CriticalSection crit_;
+ std::unique_ptr<VideoEncoder> encoder_;
+ std::unique_ptr<VideoDecoder> decoder_;
+ std::set<uint32_t> dropped_sequence_numbers_ RTC_GUARDED_BY(crit_);
+ // Several packets can have the same timestamp.
+ std::multiset<uint32_t> dropped_timestamps_ RTC_GUARDED_BY(crit_);
+ Random random_;
+ int num_packets_sent_ RTC_GUARDED_BY(crit_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+class FlexfecRenderObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ static constexpr uint32_t kVideoLocalSsrc = 123;
+ static constexpr uint32_t kFlexfecLocalSsrc = 456;
+
+ explicit FlexfecRenderObserver(bool enable_nack, bool expect_flexfec_rtcp)
+ : test::EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ enable_nack_(enable_nack),
+ expect_flexfec_rtcp_(expect_flexfec_rtcp),
+ received_flexfec_rtcp_(false),
+ random_(0xcafef00d1),
+ num_packets_sent_(0) {}
+
+ size_t GetNumFlexfecStreams() const override { return 1; }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_TRUE(header.payloadType ==
+ test::CallTest::kFakeVideoSendPayloadType ||
+ header.payloadType == test::CallTest::kFlexfecPayloadType ||
+ (enable_nack_ &&
+ header.payloadType == test::CallTest::kSendRtxPayloadType))
+ << "Unknown payload type received.";
+ EXPECT_TRUE(
+ header.ssrc == test::CallTest::kVideoSendSsrcs[0] ||
+ header.ssrc == test::CallTest::kFlexfecSendSsrc ||
+ (enable_nack_ && header.ssrc == test::CallTest::kSendRtxSsrcs[0]))
+ << "Unknown SSRC received.";
+
+ // To reduce test flakiness, always let FlexFEC packets through.
+ if (header.payloadType == test::CallTest::kFlexfecPayloadType) {
+ EXPECT_EQ(test::CallTest::kFlexfecSendSsrc, header.ssrc);
+
+ return SEND_PACKET;
+ }
+
+ // To reduce test flakiness, always let RTX packets through.
+ if (header.payloadType == test::CallTest::kSendRtxPayloadType) {
+ EXPECT_EQ(test::CallTest::kSendRtxSsrcs[0], header.ssrc);
+
+ // Parse RTX header.
+ uint16_t original_sequence_number =
+ ByteReader<uint16_t>::ReadBigEndian(&packet[header.headerLength]);
+
+ // From the perspective of FEC, a retransmitted packet is no longer
+ // dropped, so remove it from list of dropped packets.
+ auto seq_num_it =
+ dropped_sequence_numbers_.find(original_sequence_number);
+ if (seq_num_it != dropped_sequence_numbers_.end()) {
+ dropped_sequence_numbers_.erase(seq_num_it);
+ auto ts_it = dropped_timestamps_.find(header.timestamp);
+ EXPECT_NE(ts_it, dropped_timestamps_.end());
+ dropped_timestamps_.erase(ts_it);
+ }
+
+ return SEND_PACKET;
+ }
+
+ // Simulate 5% video packet loss after rampup period. Record the
+ // corresponding timestamps that were dropped.
+ if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) {
+ EXPECT_EQ(test::CallTest::kFakeVideoSendPayloadType, header.payloadType);
+ EXPECT_EQ(test::CallTest::kVideoSendSsrcs[0], header.ssrc);
+
+ dropped_sequence_numbers_.insert(header.sequenceNumber);
+ dropped_timestamps_.insert(header.timestamp);
+
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* data, size_t length) override {
+ test::RtcpPacketParser parser;
+
+ parser.Parse(data, length);
+ if (parser.sender_ssrc() == kFlexfecLocalSsrc) {
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ const std::vector<rtcp::ReportBlock>& report_blocks =
+ parser.receiver_report()->report_blocks();
+ if (!report_blocks.empty()) {
+ EXPECT_EQ(1U, report_blocks.size());
+ EXPECT_EQ(test::CallTest::kFlexfecSendSsrc,
+ report_blocks[0].source_ssrc());
+ rtc::CritScope lock(&crit_);
+ received_flexfec_rtcp_ = true;
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ test::PacketTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ const int kNetworkDelayMs = 100;
+ FakeNetworkPipe::Config config;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return new test::PacketTransport(task_queue, sender_call, this,
+ test::PacketTransport::kSender,
+ test::CallTest::payload_type_map_, config);
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ EXPECT_EQ(kVideoRotation_90, video_frame.rotation());
+
+ rtc::CritScope lock(&crit_);
+ // Rendering frame with timestamp of packet that was dropped -> FEC
+ // protection worked.
+ auto it = dropped_timestamps_.find(video_frame.timestamp());
+ if (it != dropped_timestamps_.end()) {
+ if (!expect_flexfec_rtcp_ || received_flexfec_rtcp_) {
+ observation_complete_.Set();
+ }
+ }
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ (*receive_configs)[0].rtp.local_ssrc = kVideoLocalSsrc;
+ (*receive_configs)[0].renderer = this;
+
+ if (enable_nack_) {
+ send_config->rtp.nack.rtp_history_ms = test::CallTest::kNackRtpHistoryMs;
+ send_config->rtp.rtx.ssrcs.push_back(test::CallTest::kSendRtxSsrcs[0]);
+ send_config->rtp.rtx.payload_type = test::CallTest::kSendRtxPayloadType;
+
+ (*receive_configs)[0].rtp.nack.rtp_history_ms =
+ test::CallTest::kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.rtx_ssrc = test::CallTest::kSendRtxSsrcs[0];
+ (*receive_configs)[0]
+ .rtp
+ .rtx_associated_payload_types[test::CallTest::kSendRtxPayloadType] =
+ test::CallTest::kVideoSendPayloadType;
+ }
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(kVideoRotation_90);
+ }
+
+ void ModifyFlexfecConfigs(
+ std::vector<FlexfecReceiveStream::Config>* receive_configs) override {
+ (*receive_configs)[0].local_ssrc = kFlexfecLocalSsrc;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for dropped frames to be rendered.";
+ }
+
+ rtc::CriticalSection crit_;
+ std::set<uint32_t> dropped_sequence_numbers_ RTC_GUARDED_BY(crit_);
+ // Several packets can have the same timestamp.
+ std::multiset<uint32_t> dropped_timestamps_ RTC_GUARDED_BY(crit_);
+ const bool enable_nack_;
+ const bool expect_flexfec_rtcp_;
+ bool received_flexfec_rtcp_ RTC_GUARDED_BY(crit_);
+ Random random_;
+ int num_packets_sent_;
+};
+
+TEST_P(EndToEndTest, RecoversWithFlexfec) {
+ FlexfecRenderObserver test(false, false);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, RecoversWithFlexfecAndNack) {
+ FlexfecRenderObserver test(true, false);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, RecoversWithFlexfecAndSendsCorrespondingRtcp) {
+ FlexfecRenderObserver test(false, true);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, ReceivedUlpfecPacketsNotNacked) {
+ class UlpfecNackObserver : public test::EndToEndTest {
+ public:
+ UlpfecNackObserver()
+ : EndToEndTest(kDefaultTimeoutMs),
+ state_(kFirstPacket),
+ ulpfec_sequence_number_(0),
+ has_last_sequence_number_(false),
+ last_sequence_number_(0),
+ encoder_(VP8Encoder::Create()),
+ decoder_(VP8Decoder::Create()) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock_(&crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ int encapsulated_payload_type = -1;
+ if (header.payloadType == kRedPayloadType) {
+ encapsulated_payload_type =
+ static_cast<int>(packet[header.headerLength]);
+ if (encapsulated_payload_type != kFakeVideoSendPayloadType)
+ EXPECT_EQ(kUlpfecPayloadType, encapsulated_payload_type);
+ } else {
+ EXPECT_EQ(kFakeVideoSendPayloadType, header.payloadType);
+ }
+
+ if (has_last_sequence_number_ &&
+ !IsNewerSequenceNumber(header.sequenceNumber,
+ last_sequence_number_)) {
+ // Drop retransmitted packets.
+ return DROP_PACKET;
+ }
+ last_sequence_number_ = header.sequenceNumber;
+ has_last_sequence_number_ = true;
+
+ bool ulpfec_packet = encapsulated_payload_type == kUlpfecPayloadType;
+ switch (state_) {
+ case kFirstPacket:
+ state_ = kDropEveryOtherPacketUntilUlpfec;
+ break;
+ case kDropEveryOtherPacketUntilUlpfec:
+ if (ulpfec_packet) {
+ state_ = kDropAllMediaPacketsUntilUlpfec;
+ } else if (header.sequenceNumber % 2 == 0) {
+ return DROP_PACKET;
+ }
+ break;
+ case kDropAllMediaPacketsUntilUlpfec:
+ if (!ulpfec_packet)
+ return DROP_PACKET;
+ ulpfec_sequence_number_ = header.sequenceNumber;
+ state_ = kDropOneMediaPacket;
+ break;
+ case kDropOneMediaPacket:
+ if (ulpfec_packet)
+ return DROP_PACKET;
+ state_ = kPassOneMediaPacket;
+ return DROP_PACKET;
+ break;
+ case kPassOneMediaPacket:
+ if (ulpfec_packet)
+ return DROP_PACKET;
+ // Pass one media packet after dropped packet after last FEC,
+ // otherwise receiver might never see a seq_no after
+ // |ulpfec_sequence_number_|
+ state_ = kVerifyUlpfecPacketNotInNackList;
+ break;
+ case kVerifyUlpfecPacketNotInNackList:
+ // Continue to drop packets. Make sure no frame can be decoded.
+ if (ulpfec_packet || header.sequenceNumber % 2 == 0)
+ return DROP_PACKET;
+ break;
+ }
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock_(&crit_);
+ if (state_ == kVerifyUlpfecPacketNotInNackList) {
+ test::RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(packet, length);
+ const std::vector<uint16_t>& nacks = rtcp_parser.nack()->packet_ids();
+ EXPECT_TRUE(std::find(nacks.begin(), nacks.end(),
+ ulpfec_sequence_number_) == nacks.end())
+ << "Got nack for ULPFEC packet";
+ if (!nacks.empty() &&
+ IsNewerSequenceNumber(nacks.back(), ulpfec_sequence_number_)) {
+ observation_complete_.Set();
+ }
+ }
+ return SEND_PACKET;
+ }
+
+ test::PacketTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ // Configure some network delay.
+ const int kNetworkDelayMs = 50;
+ FakeNetworkPipe::Config config;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return new test::PacketTransport(task_queue, sender_call, this,
+ test::PacketTransport::kSender,
+ payload_type_map_, config);
+ }
+
+ // TODO(holmer): Investigate why we don't send FEC packets when the bitrate
+ // is 10 kbps.
+ Call::Config GetSenderCallConfig() override {
+ Call::Config config(event_log_.get());
+ const int kMinBitrateBps = 30000;
+ config.bitrate_config.min_bitrate_bps = kMinBitrateBps;
+ return config;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Configure hybrid NACK/FEC.
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ // Set codec to VP8, otherwise NACK/FEC hybrid will be disabled.
+ send_config->encoder_settings.encoder = encoder_.get();
+ send_config->encoder_settings.payload_name = "VP8";
+ send_config->encoder_settings.payload_type = kFakeVideoSendPayloadType;
+
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.red_payload_type = kRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type = kUlpfecPayloadType;
+
+ (*receive_configs)[0].decoders.resize(1);
+ (*receive_configs)[0].decoders[0].payload_type =
+ send_config->encoder_settings.payload_type;
+ (*receive_configs)[0].decoders[0].payload_name =
+ send_config->encoder_settings.payload_name;
+ (*receive_configs)[0].decoders[0].decoder = decoder_.get();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for FEC packets to be received.";
+ }
+
+ enum {
+ kFirstPacket,
+ kDropEveryOtherPacketUntilUlpfec,
+ kDropAllMediaPacketsUntilUlpfec,
+ kDropOneMediaPacket,
+ kPassOneMediaPacket,
+ kVerifyUlpfecPacketNotInNackList,
+ } state_;
+
+ rtc::CriticalSection crit_;
+ uint16_t ulpfec_sequence_number_ RTC_GUARDED_BY(&crit_);
+ bool has_last_sequence_number_;
+ uint16_t last_sequence_number_;
+ std::unique_ptr<webrtc::VideoEncoder> encoder_;
+ std::unique_ptr<webrtc::VideoDecoder> decoder_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+// This test drops second RTP packet with a marker bit set, makes sure it's
+// retransmitted and renders. Retransmission SSRCs are also checked.
+void EndToEndTest::DecodesRetransmittedFrame(bool enable_rtx, bool enable_red) {
+ static const int kDroppedFrameNumber = 10;
+ class RetransmissionObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ RetransmissionObserver(bool enable_rtx, bool enable_red)
+ : EndToEndTest(kDefaultTimeoutMs),
+ payload_type_(GetPayloadType(false, enable_red)),
+ retransmission_ssrc_(enable_rtx ? kSendRtxSsrcs[0]
+ : kVideoSendSsrcs[0]),
+ retransmission_payload_type_(GetPayloadType(enable_rtx, enable_red)),
+ encoder_(VP8Encoder::Create()),
+ marker_bits_observed_(0),
+ retransmitted_timestamp_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ // Ignore padding-only packets over RTX.
+ if (header.payloadType != payload_type_) {
+ EXPECT_EQ(retransmission_ssrc_, header.ssrc);
+ if (length == header.headerLength + header.paddingLength)
+ return SEND_PACKET;
+ }
+
+ if (header.timestamp == retransmitted_timestamp_) {
+ EXPECT_EQ(retransmission_ssrc_, header.ssrc);
+ EXPECT_EQ(retransmission_payload_type_, header.payloadType);
+ return SEND_PACKET;
+ }
+
+ // Found the final packet of the frame to inflict loss to, drop this and
+ // expect a retransmission.
+ if (header.payloadType == payload_type_ && header.markerBit &&
+ ++marker_bits_observed_ == kDroppedFrameNumber) {
+ // This should be the only dropped packet.
+ EXPECT_EQ(0u, retransmitted_timestamp_);
+ retransmitted_timestamp_ = header.timestamp;
+ if (std::find(rendered_timestamps_.begin(), rendered_timestamps_.end(),
+ retransmitted_timestamp_) != rendered_timestamps_.end()) {
+ // Frame was rendered before last packet was scheduled for sending.
+ // This is extremly rare but possible scenario because prober able to
+ // resend packet before it was send.
+ // TODO(danilchap): Remove this corner case when prober would not be
+ // able to sneak in between packet saved to history for resending and
+ // pacer notified about existance of that packet for sending.
+ // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5540 for
+ // details.
+ observation_complete_.Set();
+ }
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ void OnFrame(const VideoFrame& frame) override {
+ EXPECT_EQ(kVideoRotation_90, frame.rotation());
+ {
+ rtc::CritScope lock(&crit_);
+ if (frame.timestamp() == retransmitted_timestamp_)
+ observation_complete_.Set();
+ rendered_timestamps_.push_back(frame.timestamp());
+ }
+ orig_renderer_->OnFrame(frame);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+
+ // Insert ourselves into the rendering pipeline.
+ RTC_DCHECK(!orig_renderer_);
+ orig_renderer_ = (*receive_configs)[0].renderer;
+ RTC_DCHECK(orig_renderer_);
+ (*receive_configs)[0].disable_prerenderer_smoothing = true;
+ (*receive_configs)[0].renderer = this;
+
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+
+ if (payload_type_ == kRedPayloadType) {
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ if (retransmission_ssrc_ == kSendRtxSsrcs[0])
+ send_config->rtp.ulpfec.red_rtx_payload_type = kRtxRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type =
+ send_config->rtp.ulpfec.ulpfec_payload_type;
+ (*receive_configs)[0].rtp.red_payload_type =
+ send_config->rtp.ulpfec.red_payload_type;
+ }
+
+ if (retransmission_ssrc_ == kSendRtxSsrcs[0]) {
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+ (*receive_configs)[0].rtp.rtx_ssrc = kSendRtxSsrcs[0];
+ (*receive_configs)[0]
+ .rtp.rtx_associated_payload_types[(payload_type_ == kRedPayloadType)
+ ? kRtxRedPayloadType
+ : kSendRtxPayloadType] =
+ payload_type_;
+ }
+ // Configure encoding and decoding with VP8, since generic packetization
+ // doesn't support FEC with NACK.
+ RTC_DCHECK_EQ(1, (*receive_configs)[0].decoders.size());
+ send_config->encoder_settings.encoder = encoder_.get();
+ send_config->encoder_settings.payload_name = "VP8";
+ (*receive_configs)[0].decoders[0].payload_name = "VP8";
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(kVideoRotation_90);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for retransmission to render.";
+ }
+
+ int GetPayloadType(bool use_rtx, bool use_fec) {
+ if (use_fec) {
+ if (use_rtx)
+ return kRtxRedPayloadType;
+ return kRedPayloadType;
+ }
+ if (use_rtx)
+ return kSendRtxPayloadType;
+ return kFakeVideoSendPayloadType;
+ }
+
+ rtc::CriticalSection crit_;
+ rtc::VideoSinkInterface<VideoFrame>* orig_renderer_ = nullptr;
+ const int payload_type_;
+ const uint32_t retransmission_ssrc_;
+ const int retransmission_payload_type_;
+ std::unique_ptr<VideoEncoder> encoder_;
+ const std::string payload_name_;
+ int marker_bits_observed_;
+ uint32_t retransmitted_timestamp_ RTC_GUARDED_BY(&crit_);
+ std::vector<uint32_t> rendered_timestamps_ RTC_GUARDED_BY(&crit_);
+ } test(enable_rtx, enable_red);
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, DecodesRetransmittedFrame) {
+ DecodesRetransmittedFrame(false, false);
+}
+
+TEST_P(EndToEndTest, DecodesRetransmittedFrameOverRtx) {
+ DecodesRetransmittedFrame(true, false);
+}
+
+TEST_P(EndToEndTest, DecodesRetransmittedFrameByRed) {
+ DecodesRetransmittedFrame(false, true);
+}
+
+TEST_P(EndToEndTest, DecodesRetransmittedFrameByRedOverRtx) {
+ DecodesRetransmittedFrame(true, true);
+}
+
+void EndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) {
+ static const int kPacketsToDrop = 1;
+
+ class PliObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ explicit PliObserver(int rtp_history_ms)
+ : EndToEndTest(kLongTimeoutMs),
+ rtp_history_ms_(rtp_history_ms),
+ nack_enabled_(rtp_history_ms > 0),
+ highest_dropped_timestamp_(0),
+ frames_to_drop_(0),
+ received_pli_(false) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ // Drop all retransmitted packets to force a PLI.
+ if (header.timestamp <= highest_dropped_timestamp_)
+ return DROP_PACKET;
+
+ if (frames_to_drop_ > 0) {
+ highest_dropped_timestamp_ = header.timestamp;
+ --frames_to_drop_;
+ return DROP_PACKET;
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ if (!nack_enabled_)
+ EXPECT_EQ(0, parser.nack()->num_packets());
+ if (parser.pli()->num_packets() > 0)
+ received_pli_ = true;
+ return SEND_PACKET;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ rtc::CritScope lock(&crit_);
+ if (received_pli_ &&
+ video_frame.timestamp() > highest_dropped_timestamp_) {
+ observation_complete_.Set();
+ }
+ if (!received_pli_)
+ frames_to_drop_ = kPacketsToDrop;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = rtp_history_ms_;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = rtp_history_ms_;
+ (*receive_configs)[0].renderer = this;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for PLI to be "
+ "received and a frame to be "
+ "rendered afterwards.";
+ }
+
+ rtc::CriticalSection crit_;
+ int rtp_history_ms_;
+ bool nack_enabled_;
+ uint32_t highest_dropped_timestamp_ RTC_GUARDED_BY(&crit_);
+ int frames_to_drop_ RTC_GUARDED_BY(&crit_);
+ bool received_pli_ RTC_GUARDED_BY(&crit_);
+ } test(rtp_history_ms);
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, ReceivesPliAndRecoversWithNack) {
+ ReceivesPliAndRecovers(1000);
+}
+
+TEST_P(EndToEndTest, ReceivesPliAndRecoversWithoutNack) {
+ ReceivesPliAndRecovers(0);
+}
+
+TEST_P(EndToEndTest, UnknownRtpPacketGivesUnknownSsrcReturnCode) {
+ class PacketInputObserver : public PacketReceiver {
+ public:
+ explicit PacketInputObserver(PacketReceiver* receiver)
+ : receiver_(receiver), delivered_packet_(false, false) {}
+
+ bool Wait() { return delivered_packet_.Wait(kDefaultTimeoutMs); }
+
+ private:
+ DeliveryStatus DeliverPacket(MediaType media_type,
+ const uint8_t* packet,
+ size_t length,
+ const PacketTime& packet_time) override {
+ if (RtpHeaderParser::IsRtcp(packet, length)) {
+ return receiver_->DeliverPacket(media_type, packet, length,
+ packet_time);
+ } else {
+ DeliveryStatus delivery_status =
+ receiver_->DeliverPacket(media_type, packet, length, packet_time);
+ EXPECT_EQ(DELIVERY_UNKNOWN_SSRC, delivery_status);
+ delivered_packet_.Set();
+ return delivery_status;
+ }
+ }
+
+ PacketReceiver* receiver_;
+ rtc::Event delivered_packet_;
+ };
+
+ std::unique_ptr<test::DirectTransport> send_transport;
+ std::unique_ptr<test::DirectTransport> receive_transport;
+ std::unique_ptr<PacketInputObserver> input_observer;
+
+ task_queue_.SendTask([this, &send_transport, &receive_transport,
+ &input_observer]() {
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+
+ send_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, sender_call_.get(), payload_type_map_);
+ receive_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, receiver_call_.get(), payload_type_map_);
+ input_observer =
+ rtc::MakeUnique<PacketInputObserver>(receiver_call_->Receiver());
+ send_transport->SetReceiver(input_observer.get());
+ receive_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(1, 0, 0, send_transport.get());
+ CreateMatchingReceiveConfigs(receive_transport.get());
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ Start();
+
+ receiver_call_->DestroyVideoReceiveStream(video_receive_streams_[0]);
+ video_receive_streams_.clear();
+ });
+
+ // Wait() waits for a received packet.
+ EXPECT_TRUE(input_observer->Wait());
+
+ task_queue_.SendTask([this, &send_transport, &receive_transport]() {
+ Stop();
+ DestroyStreams();
+ send_transport.reset();
+ receive_transport.reset();
+ DestroyCalls();
+ });
+}
+
+void EndToEndTest::RespectsRtcpMode(RtcpMode rtcp_mode) {
+ static const int kNumCompoundRtcpPacketsToObserve = 10;
+ class RtcpModeObserver : public test::EndToEndTest {
+ public:
+ explicit RtcpModeObserver(RtcpMode rtcp_mode)
+ : EndToEndTest(kDefaultTimeoutMs),
+ rtcp_mode_(rtcp_mode),
+ sent_rtp_(0),
+ sent_rtcp_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ if (++sent_rtp_ % 3 == 0)
+ return DROP_PACKET;
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ ++sent_rtcp_;
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ EXPECT_EQ(0, parser.sender_report()->num_packets());
+
+ switch (rtcp_mode_) {
+ case RtcpMode::kCompound:
+ // TODO(holmer): We shouldn't send transport feedback alone if
+ // compound RTCP is negotiated.
+ if (parser.receiver_report()->num_packets() == 0 &&
+ parser.transport_feedback()->num_packets() == 0) {
+ ADD_FAILURE() << "Received RTCP packet without receiver report for "
+ "RtcpMode::kCompound.";
+ observation_complete_.Set();
+ }
+
+ if (sent_rtcp_ >= kNumCompoundRtcpPacketsToObserve)
+ observation_complete_.Set();
+
+ break;
+ case RtcpMode::kReducedSize:
+ if (parser.receiver_report()->num_packets() == 0)
+ observation_complete_.Set();
+ break;
+ case RtcpMode::kOff:
+ RTC_NOTREACHED();
+ break;
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.rtcp_mode = rtcp_mode_;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << (rtcp_mode_ == RtcpMode::kCompound
+ ? "Timed out before observing enough compound packets."
+ : "Timed out before receiving a non-compound RTCP packet.");
+ }
+
+ RtcpMode rtcp_mode_;
+ rtc::CriticalSection crit_;
+ // Must be protected since RTCP can be sent by both the process thread
+ // and the pacer thread.
+ int sent_rtp_ RTC_GUARDED_BY(&crit_);
+ int sent_rtcp_ RTC_GUARDED_BY(&crit_);
+ } test(rtcp_mode);
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, UsesRtcpCompoundMode) {
+ RespectsRtcpMode(RtcpMode::kCompound);
+}
+
+TEST_P(EndToEndTest, UsesRtcpReducedSizeMode) {
+ RespectsRtcpMode(RtcpMode::kReducedSize);
+}
+
+// Test sets up a Call multiple senders with different resolutions and SSRCs.
+// Another is set up to receive all three of these with different renderers.
+class MultiStreamTest {
+ public:
+ static constexpr size_t kNumStreams = 3;
+ const uint8_t kVideoPayloadType = 124;
+ const std::map<uint8_t, MediaType> payload_type_map_ = {
+ {kVideoPayloadType, MediaType::VIDEO}};
+
+ struct CodecSettings {
+ uint32_t ssrc;
+ int width;
+ int height;
+ } codec_settings[kNumStreams];
+
+ explicit MultiStreamTest(test::SingleThreadedTaskQueueForTesting* task_queue)
+ : task_queue_(task_queue) {
+ // TODO(sprang): Cleanup when msvc supports explicit initializers for array.
+ codec_settings[0] = {1, 640, 480};
+ codec_settings[1] = {2, 320, 240};
+ codec_settings[2] = {3, 240, 160};
+ }
+
+ virtual ~MultiStreamTest() {}
+
+ void RunTest() {
+ webrtc::RtcEventLogNullImpl event_log;
+ Call::Config config(&event_log);
+ std::unique_ptr<Call> sender_call;
+ std::unique_ptr<Call> receiver_call;
+ std::unique_ptr<test::DirectTransport> sender_transport;
+ std::unique_ptr<test::DirectTransport> receiver_transport;
+
+ VideoSendStream* send_streams[kNumStreams];
+ VideoReceiveStream* receive_streams[kNumStreams];
+ test::FrameGeneratorCapturer* frame_generators[kNumStreams];
+ std::vector<std::unique_ptr<VideoDecoder>> allocated_decoders;
+ std::unique_ptr<VideoEncoder> encoders[kNumStreams];
+
+ task_queue_->SendTask([&]() {
+ sender_call = rtc::WrapUnique(Call::Create(config));
+ receiver_call = rtc::WrapUnique(Call::Create(config));
+ sender_transport =
+ rtc::WrapUnique(CreateSendTransport(task_queue_, sender_call.get()));
+ receiver_transport = rtc::WrapUnique(
+ CreateReceiveTransport(task_queue_, receiver_call.get()));
+
+ sender_transport->SetReceiver(receiver_call->Receiver());
+ receiver_transport->SetReceiver(sender_call->Receiver());
+
+ for (size_t i = 0; i < kNumStreams; ++i)
+ encoders[i] = VP8Encoder::Create();
+
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ uint32_t ssrc = codec_settings[i].ssrc;
+ int width = codec_settings[i].width;
+ int height = codec_settings[i].height;
+
+ VideoSendStream::Config send_config(sender_transport.get());
+ send_config.rtp.ssrcs.push_back(ssrc);
+ send_config.encoder_settings.encoder = encoders[i].get();
+ send_config.encoder_settings.payload_name = "VP8";
+ send_config.encoder_settings.payload_type = kVideoPayloadType;
+ VideoEncoderConfig encoder_config;
+ test::FillEncoderConfiguration(1, &encoder_config);
+ encoder_config.max_bitrate_bps = 100000;
+
+ UpdateSendConfig(i, &send_config, &encoder_config,
+ &frame_generators[i]);
+
+ send_streams[i] = sender_call->CreateVideoSendStream(
+ send_config.Copy(), encoder_config.Copy());
+ send_streams[i]->Start();
+
+ VideoReceiveStream::Config receive_config(receiver_transport.get());
+ receive_config.rtp.remote_ssrc = ssrc;
+ receive_config.rtp.local_ssrc = test::CallTest::kReceiverLocalVideoSsrc;
+ VideoReceiveStream::Decoder decoder =
+ test::CreateMatchingDecoder(send_config.encoder_settings);
+ allocated_decoders.push_back(
+ std::unique_ptr<VideoDecoder>(decoder.decoder));
+ receive_config.decoders.push_back(decoder);
+
+ UpdateReceiveConfig(i, &receive_config);
+
+ receive_streams[i] =
+ receiver_call->CreateVideoReceiveStream(std::move(receive_config));
+ receive_streams[i]->Start();
+
+ frame_generators[i] = test::FrameGeneratorCapturer::Create(
+ width, height, 30, Clock::GetRealTimeClock());
+ send_streams[i]->SetSource(
+ frame_generators[i],
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+ frame_generators[i]->Start();
+ }
+ });
+
+ Wait();
+
+ task_queue_->SendTask([&]() {
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ frame_generators[i]->Stop();
+ sender_call->DestroyVideoSendStream(send_streams[i]);
+ receiver_call->DestroyVideoReceiveStream(receive_streams[i]);
+ delete frame_generators[i];
+ }
+
+ sender_transport.reset();
+ receiver_transport.reset();
+
+ sender_call.reset();
+ receiver_call.reset();
+ });
+ }
+
+ protected:
+ virtual void Wait() = 0;
+ // Note: frame_generator is a point-to-pointer, since the actual instance
+ // hasn't been created at the time of this call. Only when packets/frames
+ // start flowing should this be dereferenced.
+ virtual void UpdateSendConfig(
+ size_t stream_index,
+ VideoSendStream::Config* send_config,
+ VideoEncoderConfig* encoder_config,
+ test::FrameGeneratorCapturer** frame_generator) {}
+ virtual void UpdateReceiveConfig(size_t stream_index,
+ VideoReceiveStream::Config* receive_config) {
+ }
+ virtual test::DirectTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) {
+ return new test::DirectTransport(task_queue, sender_call,
+ payload_type_map_);
+ }
+ virtual test::DirectTransport* CreateReceiveTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* receiver_call) {
+ return new test::DirectTransport(task_queue, receiver_call,
+ payload_type_map_);
+ }
+
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+};
+
+// Each renderer verifies that it receives the expected resolution, and as soon
+// as every renderer has received a frame, the test finishes.
+TEST_P(EndToEndTest, SendsAndReceivesMultipleStreams) {
+ class VideoOutputObserver : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ VideoOutputObserver(const MultiStreamTest::CodecSettings& settings,
+ uint32_t ssrc,
+ test::FrameGeneratorCapturer** frame_generator)
+ : settings_(settings),
+ ssrc_(ssrc),
+ frame_generator_(frame_generator),
+ done_(false, false) {}
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ EXPECT_EQ(settings_.width, video_frame.width());
+ EXPECT_EQ(settings_.height, video_frame.height());
+ (*frame_generator_)->Stop();
+ done_.Set();
+ }
+
+ uint32_t Ssrc() { return ssrc_; }
+
+ bool Wait() { return done_.Wait(kDefaultTimeoutMs); }
+
+ private:
+ const MultiStreamTest::CodecSettings& settings_;
+ const uint32_t ssrc_;
+ test::FrameGeneratorCapturer** const frame_generator_;
+ rtc::Event done_;
+ };
+
+ class Tester : public MultiStreamTest {
+ public:
+ explicit Tester(test::SingleThreadedTaskQueueForTesting* task_queue)
+ : MultiStreamTest(task_queue) {}
+ virtual ~Tester() {}
+
+ protected:
+ void Wait() override {
+ for (const auto& observer : observers_) {
+ EXPECT_TRUE(observer->Wait()) << "Time out waiting for from on ssrc "
+ << observer->Ssrc();
+ }
+ }
+
+ void UpdateSendConfig(
+ size_t stream_index,
+ VideoSendStream::Config* send_config,
+ VideoEncoderConfig* encoder_config,
+ test::FrameGeneratorCapturer** frame_generator) override {
+ observers_[stream_index].reset(new VideoOutputObserver(
+ codec_settings[stream_index], send_config->rtp.ssrcs.front(),
+ frame_generator));
+ }
+
+ void UpdateReceiveConfig(
+ size_t stream_index,
+ VideoReceiveStream::Config* receive_config) override {
+ receive_config->renderer = observers_[stream_index].get();
+ }
+
+ private:
+ std::unique_ptr<VideoOutputObserver> observers_[kNumStreams];
+ } tester(&task_queue_);
+
+ tester.RunTest();
+}
+
+TEST_P(EndToEndTest, AssignsTransportSequenceNumbers) {
+ static const int kExtensionId = 5;
+
+ class RtpExtensionHeaderObserver : public test::DirectTransport {
+ public:
+ RtpExtensionHeaderObserver(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call,
+ const uint32_t& first_media_ssrc,
+ const std::map<uint32_t, uint32_t>& ssrc_map,
+ const std::map<uint8_t, MediaType>& payload_type_map)
+ : DirectTransport(task_queue, sender_call, payload_type_map),
+ done_(false, false),
+ parser_(RtpHeaderParser::Create()),
+ first_media_ssrc_(first_media_ssrc),
+ rtx_to_media_ssrcs_(ssrc_map),
+ padding_observed_(false),
+ rtx_padding_observed_(false),
+ retransmit_observed_(false),
+ started_(false) {
+ parser_->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber,
+ kExtensionId);
+ }
+ virtual ~RtpExtensionHeaderObserver() {}
+
+ bool SendRtp(const uint8_t* data,
+ size_t length,
+ const PacketOptions& options) override {
+ {
+ rtc::CritScope cs(&lock_);
+
+ if (IsDone())
+ return false;
+
+ if (started_) {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(data, length, &header));
+ bool drop_packet = false;
+
+ EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ EXPECT_EQ(options.packet_id,
+ header.extension.transportSequenceNumber);
+ if (!streams_observed_.empty()) {
+ // Unwrap packet id and verify uniqueness.
+ int64_t packet_id = unwrapper_.Unwrap(options.packet_id);
+ EXPECT_TRUE(received_packed_ids_.insert(packet_id).second);
+ }
+
+ // Drop (up to) every 17th packet, so we get retransmits.
+ // Only drop media, and not on the first stream (otherwise it will be
+ // hard to distinguish from padding, which is always sent on the first
+ // stream).
+ if (header.payloadType != kSendRtxPayloadType &&
+ header.ssrc != first_media_ssrc_ &&
+ header.extension.transportSequenceNumber % 17 == 0) {
+ dropped_seq_[header.ssrc].insert(header.sequenceNumber);
+ drop_packet = true;
+ }
+
+ if (header.payloadType == kSendRtxPayloadType) {
+ uint16_t original_sequence_number =
+ ByteReader<uint16_t>::ReadBigEndian(&data[header.headerLength]);
+ uint32_t original_ssrc =
+ rtx_to_media_ssrcs_.find(header.ssrc)->second;
+ std::set<uint16_t>* seq_no_map = &dropped_seq_[original_ssrc];
+ auto it = seq_no_map->find(original_sequence_number);
+ if (it != seq_no_map->end()) {
+ retransmit_observed_ = true;
+ seq_no_map->erase(it);
+ } else {
+ rtx_padding_observed_ = true;
+ }
+ } else {
+ streams_observed_.insert(header.ssrc);
+ }
+
+ if (IsDone())
+ done_.Set();
+
+ if (drop_packet)
+ return true;
+ }
+ }
+
+ return test::DirectTransport::SendRtp(data, length, options);
+ }
+
+ bool IsDone() {
+ bool observed_types_ok =
+ streams_observed_.size() == MultiStreamTest::kNumStreams &&
+ retransmit_observed_ && rtx_padding_observed_;
+ if (!observed_types_ok)
+ return false;
+ // We should not have any gaps in the sequence number range.
+ size_t seqno_range =
+ *received_packed_ids_.rbegin() - *received_packed_ids_.begin() + 1;
+ return seqno_range == received_packed_ids_.size();
+ }
+
+ bool Wait() {
+ {
+ // Can't be sure until this point that rtx_to_media_ssrcs_ etc have
+ // been initialized and are OK to read.
+ rtc::CritScope cs(&lock_);
+ started_ = true;
+ }
+ return done_.Wait(kDefaultTimeoutMs);
+ }
+
+ rtc::CriticalSection lock_;
+ rtc::Event done_;
+ std::unique_ptr<RtpHeaderParser> parser_;
+ SequenceNumberUnwrapper unwrapper_;
+ std::set<int64_t> received_packed_ids_;
+ std::set<uint32_t> streams_observed_;
+ std::map<uint32_t, std::set<uint16_t>> dropped_seq_;
+ const uint32_t& first_media_ssrc_;
+ const std::map<uint32_t, uint32_t>& rtx_to_media_ssrcs_;
+ bool padding_observed_;
+ bool rtx_padding_observed_;
+ bool retransmit_observed_;
+ bool started_;
+ };
+
+ class TransportSequenceNumberTester : public MultiStreamTest {
+ public:
+ explicit TransportSequenceNumberTester(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : MultiStreamTest(task_queue),
+ first_media_ssrc_(0),
+ observer_(nullptr) {}
+ virtual ~TransportSequenceNumberTester() {}
+
+ protected:
+ void Wait() override {
+ RTC_DCHECK(observer_);
+ EXPECT_TRUE(observer_->Wait());
+ }
+
+ void UpdateSendConfig(
+ size_t stream_index,
+ VideoSendStream::Config* send_config,
+ VideoEncoderConfig* encoder_config,
+ test::FrameGeneratorCapturer** frame_generator) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+
+ // Force some padding to be sent. Note that since we do send media
+ // packets we can not guarantee that a padding only packet is sent.
+ // Instead, padding will most likely be send as an RTX packet.
+ const int kPaddingBitrateBps = 50000;
+ encoder_config->max_bitrate_bps = 200000;
+ encoder_config->min_transmit_bitrate_bps =
+ encoder_config->max_bitrate_bps + kPaddingBitrateBps;
+
+ // Configure RTX for redundant payload padding.
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[stream_index]);
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+ rtx_to_media_ssrcs_[kSendRtxSsrcs[stream_index]] =
+ send_config->rtp.ssrcs[0];
+
+ if (stream_index == 0)
+ first_media_ssrc_ = send_config->rtp.ssrcs[0];
+ }
+
+ void UpdateReceiveConfig(
+ size_t stream_index,
+ VideoReceiveStream::Config* receive_config) override {
+ receive_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ receive_config->rtp.extensions.clear();
+ receive_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ receive_config->renderer = &fake_renderer_;
+ }
+
+ test::DirectTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override {
+ std::map<uint8_t, MediaType> payload_type_map =
+ MultiStreamTest::payload_type_map_;
+ RTC_DCHECK(payload_type_map.find(kSendRtxPayloadType) ==
+ payload_type_map.end());
+ payload_type_map[kSendRtxPayloadType] = MediaType::VIDEO;
+ observer_ = new RtpExtensionHeaderObserver(
+ task_queue, sender_call, first_media_ssrc_, rtx_to_media_ssrcs_,
+ payload_type_map);
+ return observer_;
+ }
+
+ private:
+ test::FakeVideoRenderer fake_renderer_;
+ uint32_t first_media_ssrc_;
+ std::map<uint32_t, uint32_t> rtx_to_media_ssrcs_;
+ RtpExtensionHeaderObserver* observer_;
+ } tester(&task_queue_);
+
+ tester.RunTest();
+}
+
+class TransportFeedbackTester : public test::EndToEndTest {
+ public:
+ TransportFeedbackTester(bool feedback_enabled,
+ size_t num_video_streams,
+ size_t num_audio_streams)
+ : EndToEndTest(::webrtc::EndToEndTest::kDefaultTimeoutMs),
+ feedback_enabled_(feedback_enabled),
+ num_video_streams_(num_video_streams),
+ num_audio_streams_(num_audio_streams),
+ receiver_call_(nullptr) {
+ // Only one stream of each supported for now.
+ EXPECT_LE(num_video_streams, 1u);
+ EXPECT_LE(num_audio_streams, 1u);
+ }
+
+ protected:
+ Action OnSendRtcp(const uint8_t* data, size_t length) override {
+ EXPECT_FALSE(HasTransportFeedback(data, length));
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* data, size_t length) override {
+ if (HasTransportFeedback(data, length))
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ bool HasTransportFeedback(const uint8_t* data, size_t length) const {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(data, length));
+ return parser.transport_feedback()->num_packets() > 0;
+ }
+
+ void PerformTest() override {
+ const int64_t kDisabledFeedbackTimeoutMs = 5000;
+ EXPECT_EQ(feedback_enabled_,
+ observation_complete_.Wait(feedback_enabled_
+ ? test::CallTest::kDefaultTimeoutMs
+ : kDisabledFeedbackTimeoutMs));
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ receiver_call_ = receiver_call;
+ }
+
+ size_t GetNumVideoStreams() const override { return num_video_streams_; }
+ size_t GetNumAudioStreams() const override { return num_audio_streams_; }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ (*receive_configs)[0].rtp.transport_cc = feedback_enabled_;
+ }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ (*receive_configs)[0].rtp.extensions.clear();
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ (*receive_configs)[0].rtp.transport_cc = feedback_enabled_;
+ }
+
+ private:
+ static const int kExtensionId = 5;
+ const bool feedback_enabled_;
+ const size_t num_video_streams_;
+ const size_t num_audio_streams_;
+ Call* receiver_call_;
+};
+
+TEST_P(EndToEndTest, VideoReceivesTransportFeedback) {
+ TransportFeedbackTester test(true, 1, 0);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, VideoTransportFeedbackNotConfigured) {
+ TransportFeedbackTester test(false, 1, 0);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, AudioReceivesTransportFeedback) {
+ TransportFeedbackTester test(true, 0, 1);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, AudioTransportFeedbackNotConfigured) {
+ TransportFeedbackTester test(false, 0, 1);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, AudioVideoReceivesTransportFeedback) {
+ TransportFeedbackTester test(true, 1, 1);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, StopsSendingMediaWithoutFeedback) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-CwndExperiment/Enabled-250/");
+
+ class TransportFeedbackTester : public test::EndToEndTest {
+ public:
+ TransportFeedbackTester(size_t num_video_streams, size_t num_audio_streams)
+ : EndToEndTest(::webrtc::EndToEndTest::kDefaultTimeoutMs),
+ num_video_streams_(num_video_streams),
+ num_audio_streams_(num_audio_streams),
+ media_sent_(0),
+ padding_sent_(0) {
+ // Only one stream of each supported for now.
+ EXPECT_LE(num_video_streams, 1u);
+ EXPECT_LE(num_audio_streams, 1u);
+ }
+
+ protected:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ const bool only_padding =
+ header.headerLength + header.paddingLength == length;
+ rtc::CritScope lock(&crit_);
+ if (only_padding) {
+ ++padding_sent_;
+ } else {
+ ++media_sent_;
+ EXPECT_LT(media_sent_, 40) << "Media sent without feedback.";
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* data, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ if (media_sent_ > 20 && HasTransportFeedback(data, length)) {
+ return DROP_PACKET;
+ }
+ return SEND_PACKET;
+ }
+
+ bool HasTransportFeedback(const uint8_t* data, size_t length) const {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(data, length));
+ return parser.transport_feedback()->num_packets() > 0;
+ }
+
+ Call::Config GetSenderCallConfig() override {
+ Call::Config config = EndToEndTest::GetSenderCallConfig();
+ config.bitrate_config.max_bitrate_bps = 300000;
+ return config;
+ }
+
+ void PerformTest() override {
+ const int64_t kDisabledFeedbackTimeoutMs = 10000;
+ observation_complete_.Wait(kDisabledFeedbackTimeoutMs);
+ rtc::CritScope lock(&crit_);
+ EXPECT_GT(padding_sent_, 0);
+ }
+
+ size_t GetNumVideoStreams() const override { return num_video_streams_; }
+ size_t GetNumAudioStreams() const override { return num_audio_streams_; }
+
+ private:
+ const size_t num_video_streams_;
+ const size_t num_audio_streams_;
+ rtc::CriticalSection crit_;
+ int media_sent_ RTC_GUARDED_BY(crit_);
+ int padding_sent_ RTC_GUARDED_BY(crit_);
+ } test(1, 0);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, ObserversEncodedFrames) {
+ class EncodedFrameTestObserver : public EncodedFrameObserver {
+ public:
+ EncodedFrameTestObserver()
+ : length_(0), frame_type_(kEmptyFrame), called_(false, false) {}
+ virtual ~EncodedFrameTestObserver() {}
+
+ virtual void EncodedFrameCallback(const EncodedFrame& encoded_frame) {
+ frame_type_ = encoded_frame.frame_type_;
+ length_ = encoded_frame.length_;
+ buffer_.reset(new uint8_t[length_]);
+ memcpy(buffer_.get(), encoded_frame.data_, length_);
+ called_.Set();
+ }
+
+ bool Wait() { return called_.Wait(kDefaultTimeoutMs); }
+
+ void ExpectEqualFrames(const EncodedFrameTestObserver& observer) {
+ ASSERT_EQ(length_, observer.length_)
+ << "Observed frames are of different lengths.";
+ EXPECT_EQ(frame_type_, observer.frame_type_)
+ << "Observed frames have different frame types.";
+ EXPECT_EQ(0, memcmp(buffer_.get(), observer.buffer_.get(), length_))
+ << "Observed encoded frames have different content.";
+ }
+
+ private:
+ std::unique_ptr<uint8_t[]> buffer_;
+ size_t length_;
+ FrameType frame_type_;
+ rtc::Event called_;
+ };
+
+ EncodedFrameTestObserver post_encode_observer;
+ EncodedFrameTestObserver pre_decode_observer;
+ test::FrameForwarder forwarder;
+ std::unique_ptr<test::FrameGenerator> frame_generator;
+
+ std::unique_ptr<test::DirectTransport> sender_transport;
+ std::unique_ptr<test::DirectTransport> receiver_transport;
+
+ task_queue_.SendTask([&]() {
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+
+ sender_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, sender_call_.get(), payload_type_map_);
+ receiver_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, receiver_call_.get(), payload_type_map_);
+ sender_transport->SetReceiver(receiver_call_->Receiver());
+ receiver_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(1, 0, 0, sender_transport.get());
+ CreateMatchingReceiveConfigs(receiver_transport.get());
+ video_send_config_.post_encode_callback = &post_encode_observer;
+ video_receive_configs_[0].pre_decode_callback = &pre_decode_observer;
+
+ CreateVideoStreams();
+ Start();
+
+ frame_generator = test::FrameGenerator::CreateSquareGenerator(
+ kDefaultWidth, kDefaultHeight);
+ video_send_stream_->SetSource(
+ &forwarder, VideoSendStream::DegradationPreference::kMaintainFramerate);
+ forwarder.IncomingCapturedFrame(*frame_generator->NextFrame());
+ });
+
+ EXPECT_TRUE(post_encode_observer.Wait())
+ << "Timed out while waiting for send-side encoded-frame callback.";
+
+ EXPECT_TRUE(pre_decode_observer.Wait())
+ << "Timed out while waiting for pre-decode encoded-frame callback.";
+
+ post_encode_observer.ExpectEqualFrames(pre_decode_observer);
+
+ task_queue_.SendTask([this, &sender_transport, &receiver_transport]() {
+ Stop();
+ DestroyStreams();
+ sender_transport.reset();
+ receiver_transport.reset();
+ DestroyCalls();
+ });
+}
+
+TEST_P(EndToEndTest, ReceiveStreamSendsRemb) {
+ class RembObserver : public test::EndToEndTest {
+ public:
+ RembObserver() : EndToEndTest(kDefaultTimeoutMs) {}
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ (*receive_configs)[0].rtp.remb = true;
+ (*receive_configs)[0].rtp.transport_cc = false;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ if (parser.remb()->num_packets() > 0) {
+ EXPECT_EQ(kReceiverLocalVideoSsrc, parser.remb()->sender_ssrc());
+ EXPECT_LT(0U, parser.remb()->bitrate_bps());
+ EXPECT_EQ(1U, parser.remb()->ssrcs().size());
+ EXPECT_EQ(kVideoSendSsrcs[0], parser.remb()->ssrcs()[0]);
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a "
+ "receiver RTCP REMB packet to be "
+ "sent.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+class BandwidthStatsTest : public test::EndToEndTest {
+ public:
+ explicit BandwidthStatsTest(bool send_side_bwe)
+ : EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ sender_call_(nullptr),
+ receiver_call_(nullptr),
+ has_seen_pacer_delay_(false),
+ send_side_bwe_(send_side_bwe) {}
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (!send_side_bwe_) {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ (*receive_configs)[0].rtp.remb = true;
+ (*receive_configs)[0].rtp.transport_cc = false;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ Call::Stats sender_stats = sender_call_->GetStats();
+ Call::Stats receiver_stats = receiver_call_->GetStats();
+ if (!has_seen_pacer_delay_)
+ has_seen_pacer_delay_ = sender_stats.pacer_delay_ms > 0;
+ if (sender_stats.send_bandwidth_bps > 0 && has_seen_pacer_delay_) {
+ if (send_side_bwe_ || receiver_stats.recv_bandwidth_bps > 0)
+ observation_complete_.Set();
+ }
+ return SEND_PACKET;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ receiver_call_ = receiver_call;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for "
+ "non-zero bandwidth stats.";
+ }
+
+ private:
+ Call* sender_call_;
+ Call* receiver_call_;
+ bool has_seen_pacer_delay_;
+ const bool send_side_bwe_;
+};
+
+TEST_P(EndToEndTest, VerifySendSideBweStats) {
+ BandwidthStatsTest test(true);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, VerifyRecvSideBweStats) {
+ BandwidthStatsTest test(false);
+ RunBaseTest(&test);
+}
+
+// Verifies that it's possible to limit the send BWE by sending a REMB.
+// This is verified by allowing the send BWE to ramp-up to >1000 kbps,
+// then have the test generate a REMB of 500 kbps and verify that the send BWE
+// is reduced to exactly 500 kbps. Then a REMB of 1000 kbps is generated and the
+// test verifies that the send BWE ramps back up to exactly 1000 kbps.
+TEST_P(EndToEndTest, RembWithSendSideBwe) {
+ class BweObserver : public test::EndToEndTest {
+ public:
+ BweObserver()
+ : EndToEndTest(kDefaultTimeoutMs),
+ sender_call_(nullptr),
+ clock_(Clock::GetRealTimeClock()),
+ sender_ssrc_(0),
+ remb_bitrate_bps_(1000000),
+ receive_transport_(nullptr),
+ stop_event_(false, false),
+ poller_thread_(&BitrateStatsPollingThread,
+ this,
+ "BitrateStatsPollingThread"),
+ state_(kWaitForFirstRampUp),
+ retransmission_rate_limiter_(clock_, 1000) {}
+
+ ~BweObserver() {}
+
+ test::PacketTransport* CreateReceiveTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue) override {
+ receive_transport_ = new test::PacketTransport(
+ task_queue, nullptr, this, test::PacketTransport::kReceiver,
+ payload_type_map_, FakeNetworkPipe::Config());
+ return receive_transport_;
+ }
+
+ Call::Config GetSenderCallConfig() override {
+ Call::Config config(event_log_.get());
+ // Set a high start bitrate to reduce the test completion time.
+ config.bitrate_config.start_bitrate_bps = remb_bitrate_bps_;
+ return config;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ ASSERT_EQ(1u, send_config->rtp.ssrcs.size());
+ sender_ssrc_ = send_config->rtp.ssrcs[0];
+
+ encoder_config->max_bitrate_bps = 2000000;
+
+ ASSERT_EQ(1u, receive_configs->size());
+ RtpRtcp::Configuration config;
+ config.receiver_only = true;
+ config.clock = clock_;
+ config.outgoing_transport = receive_transport_;
+ config.retransmission_rate_limiter = &retransmission_rate_limiter_;
+ rtp_rtcp_.reset(RtpRtcp::CreateRtpRtcp(config));
+ rtp_rtcp_->SetRemoteSSRC((*receive_configs)[0].rtp.remote_ssrc);
+ rtp_rtcp_->SetSSRC((*receive_configs)[0].rtp.local_ssrc);
+ rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize);
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ }
+
+ static void BitrateStatsPollingThread(void* obj) {
+ static_cast<BweObserver*>(obj)->PollStats();
+ }
+
+ void PollStats() {
+ do {
+ if (sender_call_) {
+ Call::Stats stats = sender_call_->GetStats();
+ switch (state_) {
+ case kWaitForFirstRampUp:
+ if (stats.send_bandwidth_bps >= remb_bitrate_bps_) {
+ state_ = kWaitForRemb;
+ remb_bitrate_bps_ /= 2;
+ rtp_rtcp_->SetRemb(
+ remb_bitrate_bps_,
+ std::vector<uint32_t>(&sender_ssrc_, &sender_ssrc_ + 1));
+ rtp_rtcp_->SendRTCP(kRtcpRr);
+ }
+ break;
+
+ case kWaitForRemb:
+ if (stats.send_bandwidth_bps == remb_bitrate_bps_) {
+ state_ = kWaitForSecondRampUp;
+ remb_bitrate_bps_ *= 2;
+ rtp_rtcp_->SetRemb(
+ remb_bitrate_bps_,
+ std::vector<uint32_t>(&sender_ssrc_, &sender_ssrc_ + 1));
+ rtp_rtcp_->SendRTCP(kRtcpRr);
+ }
+ break;
+
+ case kWaitForSecondRampUp:
+ if (stats.send_bandwidth_bps == remb_bitrate_bps_) {
+ observation_complete_.Set();
+ }
+ break;
+ }
+ }
+ } while (!stop_event_.Wait(1000));
+ }
+
+ void PerformTest() override {
+ poller_thread_.Start();
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for bitrate to change according to REMB.";
+ stop_event_.Set();
+ poller_thread_.Stop();
+ }
+
+ private:
+ enum TestState { kWaitForFirstRampUp, kWaitForRemb, kWaitForSecondRampUp };
+
+ Call* sender_call_;
+ Clock* const clock_;
+ uint32_t sender_ssrc_;
+ int remb_bitrate_bps_;
+ std::unique_ptr<RtpRtcp> rtp_rtcp_;
+ test::PacketTransport* receive_transport_;
+ rtc::Event stop_event_;
+ rtc::PlatformThread poller_thread_;
+ TestState state_;
+ RateLimiter retransmission_rate_limiter_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, StopSendingKeyframeRequestsForInactiveStream) {
+ class KeyframeRequestObserver : public test::EndToEndTest {
+ public:
+ explicit KeyframeRequestObserver(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : clock_(Clock::GetRealTimeClock()), task_queue_(task_queue) {}
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ RTC_DCHECK_EQ(1, receive_streams.size());
+ send_stream_ = send_stream;
+ receive_stream_ = receive_streams[0];
+ }
+
+ void PerformTest() override {
+ bool frame_decoded = false;
+ int64_t start_time = clock_->TimeInMilliseconds();
+ while (clock_->TimeInMilliseconds() - start_time <= 5000) {
+ if (receive_stream_->GetStats().frames_decoded > 0) {
+ frame_decoded = true;
+ break;
+ }
+ SleepMs(100);
+ }
+ ASSERT_TRUE(frame_decoded);
+ task_queue_->SendTask([this]() { send_stream_->Stop(); });
+ SleepMs(10000);
+ ASSERT_EQ(
+ 1U, receive_stream_->GetStats().rtcp_packet_type_counts.pli_packets);
+ }
+
+ private:
+ Clock* clock_;
+ VideoSendStream* send_stream_;
+ VideoReceiveStream* receive_stream_;
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ } test(&task_queue_);
+
+ RunBaseTest(&test);
+}
+
+class ProbingTest : public test::EndToEndTest {
+ public:
+ explicit ProbingTest(int start_bitrate_bps)
+ : clock_(Clock::GetRealTimeClock()),
+ start_bitrate_bps_(start_bitrate_bps),
+ state_(0),
+ sender_call_(nullptr) {}
+
+ ~ProbingTest() {}
+
+ Call::Config GetSenderCallConfig() override {
+ Call::Config config(event_log_.get());
+ config.bitrate_config.start_bitrate_bps = start_bitrate_bps_;
+ return config;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ }
+
+ protected:
+ Clock* const clock_;
+ const int start_bitrate_bps_;
+ int state_;
+ Call* sender_call_;
+};
+
+TEST_P(EndToEndTest, MAYBE_InitialProbing) {
+ class InitialProbingTest : public ProbingTest {
+ public:
+ explicit InitialProbingTest(bool* success)
+ : ProbingTest(300000), success_(success) {
+ *success_ = false;
+ }
+
+ void PerformTest() override {
+ int64_t start_time_ms = clock_->TimeInMilliseconds();
+ do {
+ if (clock_->TimeInMilliseconds() - start_time_ms > kTimeoutMs)
+ break;
+
+ Call::Stats stats = sender_call_->GetStats();
+ // Initial probing is done with a x3 and x6 multiplier of the start
+ // bitrate, so a x4 multiplier is a high enough threshold.
+ if (stats.send_bandwidth_bps > 4 * 300000) {
+ *success_ = true;
+ break;
+ }
+ } while (!observation_complete_.Wait(20));
+ }
+
+ private:
+ const int kTimeoutMs = 1000;
+ bool* const success_;
+ };
+
+ bool success = false;
+ const int kMaxAttempts = 3;
+ for (int i = 0; i < kMaxAttempts; ++i) {
+ InitialProbingTest test(&success);
+ RunBaseTest(&test);
+ if (success)
+ return;
+ }
+ EXPECT_TRUE(success) << "Failed to perform mid initial probing ("
+ << kMaxAttempts << " attempts).";
+}
+
+// Fails on Linux MSan: bugs.webrtc.org/7428
+#if defined(MEMORY_SANITIZER)
+TEST_P(EndToEndTest, DISABLED_TriggerMidCallProbing) {
+// Fails on iOS bots: bugs.webrtc.org/7851
+#elif defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
+TEST_P(EndToEndTest, DISABLED_TriggerMidCallProbing) {
+#else
+TEST_P(EndToEndTest, TriggerMidCallProbing) {
+#endif
+
+ class TriggerMidCallProbingTest : public ProbingTest {
+ public:
+ TriggerMidCallProbingTest(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ bool* success)
+ : ProbingTest(300000), success_(success), task_queue_(task_queue) {}
+
+ void PerformTest() override {
+ *success_ = false;
+ int64_t start_time_ms = clock_->TimeInMilliseconds();
+ do {
+ if (clock_->TimeInMilliseconds() - start_time_ms > kTimeoutMs)
+ break;
+
+ Call::Stats stats = sender_call_->GetStats();
+
+ switch (state_) {
+ case 0:
+ if (stats.send_bandwidth_bps > 5 * 300000) {
+ Call::Config::BitrateConfig bitrate_config;
+ bitrate_config.max_bitrate_bps = 100000;
+ task_queue_->SendTask([this, &bitrate_config]() {
+ sender_call_->SetBitrateConfig(bitrate_config);
+ });
+ ++state_;
+ }
+ break;
+ case 1:
+ if (stats.send_bandwidth_bps < 110000) {
+ Call::Config::BitrateConfig bitrate_config;
+ bitrate_config.max_bitrate_bps = 2500000;
+ task_queue_->SendTask([this, &bitrate_config]() {
+ sender_call_->SetBitrateConfig(bitrate_config);
+ });
+ ++state_;
+ }
+ break;
+ case 2:
+ // During high cpu load the pacer will not be able to pace packets
+ // at the correct speed, but if we go from 110 to 1250 kbps
+ // in 5 seconds then it is due to probing.
+ if (stats.send_bandwidth_bps > 1250000) {
+ *success_ = true;
+ observation_complete_.Set();
+ }
+ break;
+ }
+ } while (!observation_complete_.Wait(20));
+ }
+
+ private:
+ const int kTimeoutMs = 5000;
+ bool* const success_;
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ };
+
+ bool success = false;
+ const int kMaxAttempts = 3;
+ for (int i = 0; i < kMaxAttempts; ++i) {
+ TriggerMidCallProbingTest test(&task_queue_, &success);
+ RunBaseTest(&test);
+ if (success)
+ return;
+ }
+ EXPECT_TRUE(success) << "Failed to perform mid call probing (" << kMaxAttempts
+ << " attempts).";
+}
+
+TEST_P(EndToEndTest, VerifyNackStats) {
+ static const int kPacketNumberToDrop = 200;
+ class NackObserver : public test::EndToEndTest {
+ public:
+ NackObserver()
+ : EndToEndTest(kLongTimeoutMs),
+ sent_rtp_packets_(0),
+ dropped_rtp_packet_(0),
+ dropped_rtp_packet_requested_(false),
+ send_stream_(nullptr),
+ start_runtime_ms_(-1) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ if (++sent_rtp_packets_ == kPacketNumberToDrop) {
+ std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
+ RTPHeader header;
+ EXPECT_TRUE(parser->Parse(packet, length, &header));
+ dropped_rtp_packet_ = header.sequenceNumber;
+ return DROP_PACKET;
+ }
+ VerifyStats();
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ test::RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(packet, length);
+ const std::vector<uint16_t>& nacks = rtcp_parser.nack()->packet_ids();
+ if (!nacks.empty() && std::find(
+ nacks.begin(), nacks.end(), dropped_rtp_packet_) != nacks.end()) {
+ dropped_rtp_packet_requested_ = true;
+ }
+ return SEND_PACKET;
+ }
+
+ void VerifyStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_) {
+ if (!dropped_rtp_packet_requested_)
+ return;
+ int send_stream_nack_packets = 0;
+ int receive_stream_nack_packets = 0;
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+ for (std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator it =
+ stats.substreams.begin(); it != stats.substreams.end(); ++it) {
+ const VideoSendStream::StreamStats& stream_stats = it->second;
+ send_stream_nack_packets +=
+ stream_stats.rtcp_packet_type_counts.nack_packets;
+ }
+ for (size_t i = 0; i < receive_streams_.size(); ++i) {
+ VideoReceiveStream::Stats stats = receive_streams_[i]->GetStats();
+ receive_stream_nack_packets +=
+ stats.rtcp_packet_type_counts.nack_packets;
+ }
+ if (send_stream_nack_packets >= 1 && receive_stream_nack_packets >= 1) {
+ // NACK packet sent on receive stream and received on sent stream.
+ if (MinMetricRunTimePassed())
+ observation_complete_.Set();
+ }
+ }
+
+ bool MinMetricRunTimePassed() {
+ int64_t now = Clock::GetRealTimeClock()->TimeInMilliseconds();
+ if (start_runtime_ms_ == -1) {
+ start_runtime_ms_ = now;
+ return false;
+ }
+ int64_t elapsed_sec = (now - start_runtime_ms_) / 1000;
+ return elapsed_sec > metrics::kMinRunTimeInSeconds;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].renderer = &fake_renderer_;
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ send_stream_ = send_stream;
+ receive_streams_ = receive_streams;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for packet to be NACKed.";
+ }
+
+ test::FakeVideoRenderer fake_renderer_;
+ rtc::CriticalSection crit_;
+ uint64_t sent_rtp_packets_;
+ uint16_t dropped_rtp_packet_ RTC_GUARDED_BY(&crit_);
+ bool dropped_rtp_packet_requested_ RTC_GUARDED_BY(&crit_);
+ std::vector<VideoReceiveStream*> receive_streams_;
+ VideoSendStream* send_stream_;
+ int64_t start_runtime_ms_;
+ } test;
+
+ metrics::Reset();
+ RunBaseTest(&test);
+
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.UniqueNackRequestsSentInPercent"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
+ EXPECT_GT(metrics::MinSample("WebRTC.Video.NackPacketsSentPerMinute"), 0);
+}
+
+void EndToEndTest::VerifyHistogramStats(bool use_rtx,
+ bool use_fec,
+ bool screenshare) {
+ class StatsObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ StatsObserver(bool use_rtx, bool use_fec, bool screenshare)
+ : EndToEndTest(kLongTimeoutMs),
+ use_rtx_(use_rtx),
+ use_fec_(use_fec),
+ screenshare_(screenshare),
+ // This test uses NACK, so to send FEC we can't use a fake encoder.
+ vp8_encoder_(use_fec ? VP8Encoder::Create() : nullptr),
+ sender_call_(nullptr),
+ receiver_call_(nullptr),
+ start_runtime_ms_(-1),
+ num_frames_received_(0) {}
+
+ private:
+ void OnFrame(const VideoFrame& video_frame) override {
+ // The RTT is needed to estimate |ntp_time_ms| which is used by
+ // end-to-end delay stats. Therefore, start counting received frames once
+ // |ntp_time_ms| is valid.
+ if (video_frame.ntp_time_ms() > 0 &&
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
+ video_frame.ntp_time_ms()) {
+ rtc::CritScope lock(&crit_);
+ ++num_frames_received_;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (MinMetricRunTimePassed() && MinNumberOfFramesReceived())
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ bool MinMetricRunTimePassed() {
+ int64_t now = Clock::GetRealTimeClock()->TimeInMilliseconds();
+ if (start_runtime_ms_ == -1) {
+ start_runtime_ms_ = now;
+ return false;
+ }
+ int64_t elapsed_sec = (now - start_runtime_ms_) / 1000;
+ return elapsed_sec > metrics::kMinRunTimeInSeconds * 2;
+ }
+
+ bool MinNumberOfFramesReceived() const {
+ const int kMinRequiredHistogramSamples = 200;
+ rtc::CritScope lock(&crit_);
+ return num_frames_received_ > kMinRequiredHistogramSamples;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // NACK
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ (*receive_configs)[0].renderer = this;
+ // FEC
+ if (use_fec_) {
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->encoder_settings.encoder = vp8_encoder_.get();
+ send_config->encoder_settings.payload_name = "VP8";
+ (*receive_configs)[0].decoders[0].payload_name = "VP8";
+ (*receive_configs)[0].rtp.red_payload_type = kRedPayloadType;
+ (*receive_configs)[0].rtp.ulpfec_payload_type = kUlpfecPayloadType;
+ }
+ // RTX
+ if (use_rtx_) {
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+ (*receive_configs)[0].rtp.rtx_ssrc = kSendRtxSsrcs[0];
+ (*receive_configs)[0]
+ .rtp.rtx_associated_payload_types[kSendRtxPayloadType] =
+ kFakeVideoSendPayloadType;
+ if (use_fec_) {
+ send_config->rtp.ulpfec.red_rtx_payload_type = kRtxRedPayloadType;
+ (*receive_configs)[0]
+ .rtp.rtx_associated_payload_types[kRtxRedPayloadType] =
+ kSendRtxPayloadType;
+ }
+ }
+ // RTT needed for RemoteNtpTimeEstimator for the receive stream.
+ (*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report = true;
+ encoder_config->content_type =
+ screenshare_ ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ receiver_call_ = receiver_call;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for packet to be NACKed.";
+ }
+
+ rtc::CriticalSection crit_;
+ const bool use_rtx_;
+ const bool use_fec_;
+ const bool screenshare_;
+ const std::unique_ptr<VideoEncoder> vp8_encoder_;
+ Call* sender_call_;
+ Call* receiver_call_;
+ int64_t start_runtime_ms_;
+ int num_frames_received_ RTC_GUARDED_BY(&crit_);
+ } test(use_rtx, use_fec, screenshare);
+
+ metrics::Reset();
+ RunBaseTest(&test);
+
+ std::string video_prefix =
+ screenshare ? "WebRTC.Video.Screenshare." : "WebRTC.Video.";
+ // The content type extension is disabled in non screenshare test,
+ // therefore no slicing on simulcast id should be present.
+ std::string video_suffix = screenshare ? ".S0" : "";
+ // Verify that stats have been updated once.
+ EXPECT_EQ(2, metrics::NumSamples("WebRTC.Call.LifetimeInSeconds"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.VideoBitrateReceivedInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.RtcpBitrateReceivedInBps"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.BitrateReceivedInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.EstimatedSendBitrateInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.PacerBitrateInKbps"));
+
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+ EXPECT_EQ(1,
+ metrics::NumSamples(video_prefix + "NackPacketsReceivedPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_EQ(1,
+ metrics::NumSamples(video_prefix + "FirPacketsReceivedPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_EQ(1,
+ metrics::NumSamples(video_prefix + "PliPacketsReceivedPerMinute"));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "KeyFramesSentInPermille"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentPacketsLostInPercent"));
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputWidthInPixels"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputHeightInPixels"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentWidthInPixels"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentHeightInPixels"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "ReceivedWidthInPixels"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "ReceivedHeightInPixels"));
+
+ EXPECT_EQ(1, metrics::NumEvents(video_prefix + "InputWidthInPixels",
+ kDefaultWidth));
+ EXPECT_EQ(1, metrics::NumEvents(video_prefix + "InputHeightInPixels",
+ kDefaultHeight));
+ EXPECT_EQ(
+ 1, metrics::NumEvents(video_prefix + "SentWidthInPixels", kDefaultWidth));
+ EXPECT_EQ(1, metrics::NumEvents(video_prefix + "SentHeightInPixels",
+ kDefaultHeight));
+ EXPECT_EQ(1, metrics::NumEvents(video_prefix + "ReceivedWidthInPixels",
+ kDefaultWidth));
+ EXPECT_EQ(1, metrics::NumEvents(video_prefix + "ReceivedHeightInPixels",
+ kDefaultHeight));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "EndToEndDelayInMs" +
+ video_suffix));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "EndToEndDelayMaxInMs" +
+ video_suffix));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InterframeDelayInMs" +
+ video_suffix));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InterframeDelayMaxInMs" +
+ video_suffix));
+
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "EncodeTimeInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "NumberOfPauseEvents"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "PausedTimeInPercent"));
+
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "BitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateReceivedInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "MediaBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.MediaBitrateReceivedInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "PaddingBitrateSentInKbps"));
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.PaddingBitrateReceivedInKbps"));
+ EXPECT_EQ(
+ 1, metrics::NumSamples(video_prefix + "RetransmittedBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.RetransmittedBitrateReceivedInKbps"));
+
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SendSideDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SendSideDelayMaxInMs"));
+
+ int num_rtx_samples = use_rtx ? 1 : 0;
+ EXPECT_EQ(num_rtx_samples,
+ metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_EQ(num_rtx_samples,
+ metrics::NumSamples("WebRTC.Video.RtxBitrateReceivedInKbps"));
+
+ int num_red_samples = use_fec ? 1 : 0;
+ EXPECT_EQ(num_red_samples,
+ metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_EQ(num_red_samples,
+ metrics::NumSamples("WebRTC.Video.FecBitrateReceivedInKbps"));
+ EXPECT_EQ(num_red_samples,
+ metrics::NumSamples("WebRTC.Video.ReceivedFecPacketsInPercent"));
+}
+
+#if defined(WEBRTC_WIN)
+// Disabled due to flakiness on Windows (bugs.webrtc.org/7483).
+#define MAYBE_ContentTypeSwitches DISABLED_ContentTypeSwitches
+#else
+#define MAYBE_ContentTypeSwitches ContentTypeSwitches
+#endif
+TEST_P(EndToEndTest, MAYBE_ContentTypeSwitches) {
+ class StatsObserver : public test::BaseTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ StatsObserver() : BaseTest(kLongTimeoutMs), num_frames_received_(0) {}
+
+ bool ShouldCreateReceivers() const override { return true; }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ // The RTT is needed to estimate |ntp_time_ms| which is used by
+ // end-to-end delay stats. Therefore, start counting received frames once
+ // |ntp_time_ms| is valid.
+ if (video_frame.ntp_time_ms() > 0 &&
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
+ video_frame.ntp_time_ms()) {
+ rtc::CritScope lock(&crit_);
+ ++num_frames_received_;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (MinNumberOfFramesReceived())
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ bool MinNumberOfFramesReceived() const {
+ // Have some room for frames with wrong content type during switch.
+ const int kMinRequiredHistogramSamples = 200+50;
+ rtc::CritScope lock(&crit_);
+ return num_frames_received_ > kMinRequiredHistogramSamples;
+ }
+
+ // May be called several times.
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out waiting for enough packets.";
+ // Reset frame counter so next PerformTest() call will do something.
+ {
+ rtc::CritScope lock(&crit_);
+ num_frames_received_ = 0;
+ }
+ }
+
+ rtc::CriticalSection crit_;
+ int num_frames_received_ RTC_GUARDED_BY(&crit_);
+ } test;
+
+ metrics::Reset();
+
+ Call::Config send_config(test.GetSenderCallConfig());
+ Call::Config recv_config(test.GetReceiverCallConfig());
+ VideoEncoderConfig encoder_config_with_screenshare;
+
+ task_queue_.SendTask([this, &test, &send_config,
+ &recv_config, &encoder_config_with_screenshare]() {
+ CreateSenderCall(send_config);
+ CreateReceiverCall(recv_config);
+
+ receive_transport_.reset(test.CreateReceiveTransport(&task_queue_));
+ send_transport_.reset(
+ test.CreateSendTransport(&task_queue_, sender_call_.get()));
+ send_transport_->SetReceiver(receiver_call_->Receiver());
+ receive_transport_->SetReceiver(sender_call_->Receiver());
+
+ receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+ CreateSendConfig(1, 0, 0, send_transport_.get());
+ CreateMatchingReceiveConfigs(receive_transport_.get());
+
+ // Modify send and receive configs.
+ video_send_config_.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_receive_configs_[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_receive_configs_[0].renderer = &test;
+ // RTT needed for RemoteNtpTimeEstimator for the receive stream.
+ video_receive_configs_[0].rtp.rtcp_xr.receiver_reference_time_report = true;
+ // Start with realtime video.
+ video_encoder_config_.content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ // Second encoder config for the second part of the test uses screenshare
+ encoder_config_with_screenshare = video_encoder_config_.Copy();
+ encoder_config_with_screenshare.content_type =
+ VideoEncoderConfig::ContentType::kScreen;
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ Start();
+ });
+
+ test.PerformTest();
+
+ // Replace old send stream.
+ task_queue_.SendTask([this, &encoder_config_with_screenshare]() {
+ sender_call_->DestroyVideoSendStream(video_send_stream_);
+ video_send_stream_ = sender_call_->CreateVideoSendStream(
+ video_send_config_.Copy(), encoder_config_with_screenshare.Copy());
+ video_send_stream_->SetSource(
+ frame_generator_capturer_.get(),
+ VideoSendStream::DegradationPreference::kBalanced);
+ video_send_stream_->Start();
+ });
+
+ // Continue to run test but now with screenshare.
+ test.PerformTest();
+
+ task_queue_.SendTask([this]() {
+ Stop();
+ DestroyStreams();
+ send_transport_.reset();
+ receive_transport_.reset();
+ DestroyCalls();
+ });
+
+ // Verify that stats have been updated for both screenshare and video.
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayInMs"));
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.Screenshare.EndToEndDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayMaxInMs"));
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.EndToEndDelayMaxInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+}
+
+TEST_P(EndToEndTest, VerifyHistogramStatsWithRtx) {
+ const bool kEnabledRtx = true;
+ const bool kEnabledRed = false;
+ const bool kScreenshare = false;
+ VerifyHistogramStats(kEnabledRtx, kEnabledRed, kScreenshare);
+}
+
+TEST_P(EndToEndTest, VerifyHistogramStatsWithRed) {
+ const bool kEnabledRtx = false;
+ const bool kEnabledRed = true;
+ const bool kScreenshare = false;
+ VerifyHistogramStats(kEnabledRtx, kEnabledRed, kScreenshare);
+}
+
+TEST_P(EndToEndTest, VerifyHistogramStatsWithScreenshare) {
+ const bool kEnabledRtx = false;
+ const bool kEnabledRed = false;
+ const bool kScreenshare = true;
+ VerifyHistogramStats(kEnabledRtx, kEnabledRed, kScreenshare);
+}
+
+void EndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs,
+ bool send_single_ssrc_first) {
+ class SendsSetSsrcs : public test::EndToEndTest {
+ public:
+ SendsSetSsrcs(const uint32_t* ssrcs,
+ size_t num_ssrcs,
+ bool send_single_ssrc_first)
+ : EndToEndTest(kDefaultTimeoutMs),
+ num_ssrcs_(num_ssrcs),
+ send_single_ssrc_first_(send_single_ssrc_first),
+ ssrcs_to_observe_(num_ssrcs),
+ expect_single_ssrc_(send_single_ssrc_first),
+ send_stream_(nullptr) {
+ for (size_t i = 0; i < num_ssrcs; ++i)
+ valid_ssrcs_[ssrcs[i]] = true;
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_TRUE(valid_ssrcs_[header.ssrc])
+ << "Received unknown SSRC: " << header.ssrc;
+
+ if (!valid_ssrcs_[header.ssrc])
+ observation_complete_.Set();
+
+ if (!is_observed_[header.ssrc]) {
+ is_observed_[header.ssrc] = true;
+ --ssrcs_to_observe_;
+ if (expect_single_ssrc_) {
+ expect_single_ssrc_ = false;
+ observation_complete_.Set();
+ }
+ }
+
+ if (ssrcs_to_observe_ == 0)
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ size_t GetNumVideoStreams() const override { return num_ssrcs_; }
+
+ // This test use other VideoStream settings than the the default settings
+ // implemented in DefaultVideoStreamFactory. Therefore this test implement
+ // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created
+ // in ModifyVideoConfigs.
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
+ streams[i].min_bitrate_bps = 10000;
+ streams[i].target_bitrate_bps = 15000;
+ streams[i].max_bitrate_bps = 20000;
+ }
+ return streams;
+ }
+ };
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>();
+ video_encoder_config_all_streams_ = encoder_config->Copy();
+ if (send_single_ssrc_first_)
+ encoder_config->number_of_streams = 1;
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for "
+ << (send_single_ssrc_first_ ? "first SSRC."
+ : "SSRCs.");
+
+ if (send_single_ssrc_first_) {
+ // Set full simulcast and continue with the rest of the SSRCs.
+ send_stream_->ReconfigureVideoEncoder(
+ std::move(video_encoder_config_all_streams_));
+ EXPECT_TRUE(Wait()) << "Timed out while waiting on additional SSRCs.";
+ }
+ }
+
+ private:
+ std::map<uint32_t, bool> valid_ssrcs_;
+ std::map<uint32_t, bool> is_observed_;
+
+ const size_t num_ssrcs_;
+ const bool send_single_ssrc_first_;
+
+ size_t ssrcs_to_observe_;
+ bool expect_single_ssrc_;
+
+ VideoSendStream* send_stream_;
+ VideoEncoderConfig video_encoder_config_all_streams_;
+ } test(kVideoSendSsrcs, num_ssrcs, send_single_ssrc_first);
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, ReportsSetEncoderRates) {
+ class EncoderRateStatsTest : public test::EndToEndTest,
+ public test::FakeEncoder {
+ public:
+ explicit EncoderRateStatsTest(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : EndToEndTest(kDefaultTimeoutMs),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ task_queue_(task_queue),
+ send_stream_(nullptr),
+ bitrate_kbps_(0) {}
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ RTC_DCHECK_EQ(1, encoder_config->number_of_streams);
+ }
+
+ int32_t SetRateAllocation(const BitrateAllocation& rate_allocation,
+ uint32_t framerate) override {
+ // Make sure not to trigger on any default zero bitrates.
+ if (rate_allocation.get_sum_bps() == 0)
+ return 0;
+ rtc::CritScope lock(&crit_);
+ bitrate_kbps_ = rate_allocation.get_sum_kbps();
+ observation_complete_.Set();
+ return 0;
+ }
+
+ void PerformTest() override {
+ ASSERT_TRUE(Wait())
+ << "Timed out while waiting for encoder SetRates() call.";
+
+ task_queue_->SendTask([this]() {
+ WaitForEncoderTargetBitrateMatchStats();
+ send_stream_->Stop();
+ WaitForStatsReportZeroTargetBitrate();
+ send_stream_->Start();
+ WaitForEncoderTargetBitrateMatchStats();
+ });
+ }
+
+ void WaitForEncoderTargetBitrateMatchStats() {
+ for (int i = 0; i < kDefaultTimeoutMs; ++i) {
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+ {
+ rtc::CritScope lock(&crit_);
+ if ((stats.target_media_bitrate_bps + 500) / 1000 ==
+ static_cast<int>(bitrate_kbps_)) {
+ return;
+ }
+ }
+ SleepMs(1);
+ }
+ FAIL()
+ << "Timed out waiting for stats reporting the currently set bitrate.";
+ }
+
+ void WaitForStatsReportZeroTargetBitrate() {
+ for (int i = 0; i < kDefaultTimeoutMs; ++i) {
+ if (send_stream_->GetStats().target_media_bitrate_bps == 0) {
+ return;
+ }
+ SleepMs(1);
+ }
+ FAIL() << "Timed out waiting for stats reporting zero bitrate.";
+ }
+
+ private:
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ rtc::CriticalSection crit_;
+ VideoSendStream* send_stream_;
+ uint32_t bitrate_kbps_ RTC_GUARDED_BY(crit_);
+ } test(&task_queue_);
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, GetStats) {
+ static const int kStartBitrateBps = 3000000;
+ static const int kExpectedRenderDelayMs = 20;
+
+ class ReceiveStreamRenderer : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ ReceiveStreamRenderer() {}
+
+ private:
+ void OnFrame(const VideoFrame& video_frame) override {}
+ };
+
+ class StatsObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ StatsObserver()
+ : EndToEndTest(kLongTimeoutMs),
+ encoder_(Clock::GetRealTimeClock(), 10),
+ send_stream_(nullptr),
+ expected_send_ssrcs_(),
+ check_stats_event_(false, false) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ // Drop every 25th packet => 4% loss.
+ static const int kPacketLossFrac = 25;
+ RTPHeader header;
+ RtpUtility::RtpHeaderParser parser(packet, length);
+ if (parser.Parse(&header) &&
+ expected_send_ssrcs_.find(header.ssrc) !=
+ expected_send_ssrcs_.end() &&
+ header.sequenceNumber % kPacketLossFrac == 0) {
+ return DROP_PACKET;
+ }
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtp(const uint8_t* packet, size_t length) override {
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ check_stats_event_.Set();
+ return SEND_PACKET;
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ // Ensure that we have at least 5ms send side delay.
+ SleepMs(5);
+ }
+
+ bool CheckReceiveStats() {
+ for (size_t i = 0; i < receive_streams_.size(); ++i) {
+ VideoReceiveStream::Stats stats = receive_streams_[i]->GetStats();
+ EXPECT_EQ(expected_receive_ssrcs_[i], stats.ssrc);
+
+ // Make sure all fields have been populated.
+ // TODO(pbos): Use CompoundKey if/when we ever know that all stats are
+ // always filled for all receivers.
+ receive_stats_filled_["IncomingRate"] |=
+ stats.network_frame_rate != 0 || stats.total_bitrate_bps != 0;
+
+ send_stats_filled_["DecoderImplementationName"] |=
+ stats.decoder_implementation_name ==
+ test::FakeDecoder::kImplementationName;
+ receive_stats_filled_["RenderDelayAsHighAsExpected"] |=
+ stats.render_delay_ms >= kExpectedRenderDelayMs;
+
+ receive_stats_filled_["FrameCallback"] |= stats.decode_frame_rate != 0;
+
+ receive_stats_filled_["FrameRendered"] |= stats.render_frame_rate != 0;
+
+ receive_stats_filled_["StatisticsUpdated"] |=
+ stats.rtcp_stats.packets_lost != 0 ||
+ stats.rtcp_stats.extended_highest_sequence_number != 0 ||
+ stats.rtcp_stats.fraction_lost != 0 || stats.rtcp_stats.jitter != 0;
+
+ receive_stats_filled_["DataCountersUpdated"] |=
+ stats.rtp_stats.transmitted.payload_bytes != 0 ||
+ stats.rtp_stats.fec.packets != 0 ||
+ stats.rtp_stats.transmitted.header_bytes != 0 ||
+ stats.rtp_stats.transmitted.packets != 0 ||
+ stats.rtp_stats.transmitted.padding_bytes != 0 ||
+ stats.rtp_stats.retransmitted.packets != 0;
+
+ receive_stats_filled_["CodecStats"] |=
+ stats.target_delay_ms != 0 || stats.discarded_packets != 0;
+
+ receive_stats_filled_["FrameCounts"] |=
+ stats.frame_counts.key_frames != 0 ||
+ stats.frame_counts.delta_frames != 0;
+
+ receive_stats_filled_["CName"] |= !stats.c_name.empty();
+
+ receive_stats_filled_["RtcpPacketTypeCount"] |=
+ stats.rtcp_packet_type_counts.fir_packets != 0 ||
+ stats.rtcp_packet_type_counts.nack_packets != 0 ||
+ stats.rtcp_packet_type_counts.pli_packets != 0 ||
+ stats.rtcp_packet_type_counts.nack_requests != 0 ||
+ stats.rtcp_packet_type_counts.unique_nack_requests != 0;
+
+ assert(stats.current_payload_type == -1 ||
+ stats.current_payload_type == kFakeVideoSendPayloadType);
+ receive_stats_filled_["IncomingPayloadType"] |=
+ stats.current_payload_type == kFakeVideoSendPayloadType;
+ }
+
+ return AllStatsFilled(receive_stats_filled_);
+ }
+
+ bool CheckSendStats() {
+ RTC_DCHECK(send_stream_);
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+
+ size_t expected_num_streams = kNumSsrcs + expected_send_ssrcs_.size();
+ send_stats_filled_["NumStreams"] |=
+ stats.substreams.size() == expected_num_streams;
+
+ send_stats_filled_["CpuOveruseMetrics"] |=
+ stats.avg_encode_time_ms != 0 && stats.encode_usage_percent != 0;
+
+ send_stats_filled_["EncoderImplementationName"] |=
+ stats.encoder_implementation_name ==
+ test::FakeEncoder::kImplementationName;
+
+ send_stats_filled_["EncoderPreferredBitrate"] |=
+ stats.preferred_media_bitrate_bps > 0;
+
+ for (std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator it =
+ stats.substreams.begin();
+ it != stats.substreams.end(); ++it) {
+ if (expected_send_ssrcs_.find(it->first) == expected_send_ssrcs_.end())
+ continue; // Probably RTX.
+
+ send_stats_filled_[CompoundKey("CapturedFrameRate", it->first)] |=
+ stats.input_frame_rate != 0;
+
+ const VideoSendStream::StreamStats& stream_stats = it->second;
+
+ send_stats_filled_[CompoundKey("StatisticsUpdated", it->first)] |=
+ stream_stats.rtcp_stats.packets_lost != 0 ||
+ stream_stats.rtcp_stats.extended_highest_sequence_number != 0 ||
+ stream_stats.rtcp_stats.fraction_lost != 0;
+
+ send_stats_filled_[CompoundKey("DataCountersUpdated", it->first)] |=
+ stream_stats.rtp_stats.fec.packets != 0 ||
+ stream_stats.rtp_stats.transmitted.padding_bytes != 0 ||
+ stream_stats.rtp_stats.retransmitted.packets != 0 ||
+ stream_stats.rtp_stats.transmitted.packets != 0;
+
+ send_stats_filled_[CompoundKey("BitrateStatisticsObserver.Total",
+ it->first)] |=
+ stream_stats.total_bitrate_bps != 0;
+
+ send_stats_filled_[CompoundKey("BitrateStatisticsObserver.Retransmit",
+ it->first)] |=
+ stream_stats.retransmit_bitrate_bps != 0;
+
+ send_stats_filled_[CompoundKey("FrameCountObserver", it->first)] |=
+ stream_stats.frame_counts.delta_frames != 0 ||
+ stream_stats.frame_counts.key_frames != 0;
+
+ send_stats_filled_[CompoundKey("OutgoingRate", it->first)] |=
+ stats.encode_frame_rate != 0;
+
+ send_stats_filled_[CompoundKey("Delay", it->first)] |=
+ stream_stats.avg_delay_ms != 0 || stream_stats.max_delay_ms != 0;
+
+ // TODO(pbos): Use CompoundKey when the test makes sure that all SSRCs
+ // report dropped packets.
+ send_stats_filled_["RtcpPacketTypeCount"] |=
+ stream_stats.rtcp_packet_type_counts.fir_packets != 0 ||
+ stream_stats.rtcp_packet_type_counts.nack_packets != 0 ||
+ stream_stats.rtcp_packet_type_counts.pli_packets != 0 ||
+ stream_stats.rtcp_packet_type_counts.nack_requests != 0 ||
+ stream_stats.rtcp_packet_type_counts.unique_nack_requests != 0;
+ }
+
+ return AllStatsFilled(send_stats_filled_);
+ }
+
+ std::string CompoundKey(const char* name, uint32_t ssrc) {
+ std::ostringstream oss;
+ oss << name << "_" << ssrc;
+ return oss.str();
+ }
+
+ bool AllStatsFilled(const std::map<std::string, bool>& stats_map) {
+ for (const auto& stat : stats_map) {
+ if (!stat.second)
+ return false;
+ }
+ return true;
+ }
+
+ test::PacketTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override {
+ FakeNetworkPipe::Config network_config;
+ network_config.loss_percent = 5;
+ return new test::PacketTransport(task_queue, sender_call, this,
+ test::PacketTransport::kSender,
+ payload_type_map_, network_config);
+ }
+
+ Call::Config GetSenderCallConfig() override {
+ Call::Config config = EndToEndTest::GetSenderCallConfig();
+ config.bitrate_config.start_bitrate_bps = kStartBitrateBps;
+ return config;
+ }
+
+ // This test use other VideoStream settings than the the default settings
+ // implemented in DefaultVideoStreamFactory. Therefore this test implement
+ // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created
+ // in ModifyVideoConfigs.
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
+ streams[i].min_bitrate_bps = 10000;
+ streams[i].target_bitrate_bps = 15000;
+ streams[i].max_bitrate_bps = 20000;
+ }
+ return streams;
+ }
+ };
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>();
+ send_config->pre_encode_callback = this; // Used to inject delay.
+ expected_cname_ = send_config->rtp.c_name = "SomeCName";
+
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+
+ const std::vector<uint32_t>& ssrcs = send_config->rtp.ssrcs;
+ for (size_t i = 0; i < ssrcs.size(); ++i) {
+ expected_send_ssrcs_.insert(ssrcs[i]);
+ expected_receive_ssrcs_.push_back(
+ (*receive_configs)[i].rtp.remote_ssrc);
+ (*receive_configs)[i].render_delay_ms = kExpectedRenderDelayMs;
+ (*receive_configs)[i].renderer = &receive_stream_renderer_;
+ (*receive_configs)[i].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+
+ (*receive_configs)[i].rtp.rtx_ssrc = kSendRtxSsrcs[i];
+ (*receive_configs)[i]
+ .rtp.rtx_associated_payload_types[kSendRtxPayloadType] =
+ kFakeVideoSendPayloadType;
+ }
+
+ for (size_t i = 0; i < kNumSsrcs; ++i)
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
+
+ // Use a delayed encoder to make sure we see CpuOveruseMetrics stats that
+ // are non-zero.
+ send_config->encoder_settings.encoder = &encoder_;
+ }
+
+ size_t GetNumVideoStreams() const override { return kNumSsrcs; }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ send_stream_ = send_stream;
+ receive_streams_ = receive_streams;
+ }
+
+ void PerformTest() override {
+ Clock* clock = Clock::GetRealTimeClock();
+ int64_t now = clock->TimeInMilliseconds();
+ int64_t stop_time = now + test::CallTest::kLongTimeoutMs;
+ bool receive_ok = false;
+ bool send_ok = false;
+
+ while (now < stop_time) {
+ if (!receive_ok)
+ receive_ok = CheckReceiveStats();
+ if (!send_ok)
+ send_ok = CheckSendStats();
+
+ if (receive_ok && send_ok)
+ return;
+
+ int64_t time_until_timout_ = stop_time - now;
+ if (time_until_timout_ > 0)
+ check_stats_event_.Wait(time_until_timout_);
+ now = clock->TimeInMilliseconds();
+ }
+
+ ADD_FAILURE() << "Timed out waiting for filled stats.";
+ for (std::map<std::string, bool>::const_iterator it =
+ receive_stats_filled_.begin();
+ it != receive_stats_filled_.end(); ++it) {
+ if (!it->second) {
+ ADD_FAILURE() << "Missing receive stats: " << it->first;
+ }
+ }
+
+ for (std::map<std::string, bool>::const_iterator it =
+ send_stats_filled_.begin();
+ it != send_stats_filled_.end(); ++it) {
+ if (!it->second) {
+ ADD_FAILURE() << "Missing send stats: " << it->first;
+ }
+ }
+ }
+
+ test::DelayedEncoder encoder_;
+ std::vector<VideoReceiveStream*> receive_streams_;
+ std::map<std::string, bool> receive_stats_filled_;
+
+ VideoSendStream* send_stream_;
+ std::map<std::string, bool> send_stats_filled_;
+
+ std::vector<uint32_t> expected_receive_ssrcs_;
+ std::set<uint32_t> expected_send_ssrcs_;
+ std::string expected_cname_;
+
+ rtc::Event check_stats_event_;
+ ReceiveStreamRenderer receive_stream_renderer_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, TimingFramesAreReported) {
+ static const int kExtensionId = 5;
+
+ class StatsObserver : public test::EndToEndTest {
+ public:
+ StatsObserver() : EndToEndTest(kLongTimeoutMs) {}
+
+ private:
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoTimingUri, kExtensionId));
+ for (size_t i = 0; i < receive_configs->size(); ++i) {
+ (*receive_configs)[i].rtp.extensions.clear();
+ (*receive_configs)[i].rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoTimingUri, kExtensionId));
+ }
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ receive_streams_ = receive_streams;
+ }
+
+ void PerformTest() override {
+ // No frames reported initially.
+ for (size_t i = 0; i < receive_streams_.size(); ++i) {
+ EXPECT_FALSE(receive_streams_[i]->GetStats().timing_frame_info);
+ }
+ // Wait for at least one timing frame to be sent with 100ms grace period.
+ SleepMs(kDefaultTimingFramesDelayMs + 100);
+ // Check that timing frames are reported for each stream.
+ for (size_t i = 0; i < receive_streams_.size(); ++i) {
+ EXPECT_TRUE(receive_streams_[i]->GetStats().timing_frame_info);
+ }
+ }
+
+ std::vector<VideoReceiveStream*> receive_streams_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+class RtcpXrObserver : public test::EndToEndTest {
+ public:
+ RtcpXrObserver(bool enable_rrtr, bool enable_target_bitrate,
+ bool enable_zero_target_bitrate)
+ : EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ enable_rrtr_(enable_rrtr),
+ enable_target_bitrate_(enable_target_bitrate),
+ enable_zero_target_bitrate_(enable_zero_target_bitrate),
+ sent_rtcp_sr_(0),
+ sent_rtcp_rr_(0),
+ sent_rtcp_rrtr_(0),
+ sent_rtcp_target_bitrate_(false),
+ sent_zero_rtcp_target_bitrate_(false),
+ sent_rtcp_dlrr_(0) {}
+
+ private:
+ // Receive stream should send RR packets (and RRTR packets if enabled).
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ sent_rtcp_rr_ += parser.receiver_report()->num_packets();
+ EXPECT_EQ(0, parser.sender_report()->num_packets());
+ EXPECT_GE(1, parser.xr()->num_packets());
+ if (parser.xr()->num_packets() > 0) {
+ if (parser.xr()->rrtr())
+ ++sent_rtcp_rrtr_;
+ EXPECT_FALSE(parser.xr()->dlrr());
+ }
+
+ return SEND_PACKET;
+ }
+ // Send stream should send SR packets (and DLRR packets if enabled).
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ sent_rtcp_sr_ += parser.sender_report()->num_packets();
+ EXPECT_LE(parser.xr()->num_packets(), 1);
+ if (parser.xr()->num_packets() > 0) {
+ EXPECT_FALSE(parser.xr()->rrtr());
+ if (parser.xr()->dlrr())
+ ++sent_rtcp_dlrr_;
+ if (parser.xr()->target_bitrate()) {
+ sent_rtcp_target_bitrate_ = true;
+ for (const rtcp::TargetBitrate::BitrateItem& item :
+ parser.xr()->target_bitrate()->GetTargetBitrates()) {
+ if (item.target_bitrate_kbps == 0) {
+ sent_zero_rtcp_target_bitrate_ = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (sent_rtcp_sr_ > kNumRtcpReportPacketsToObserve &&
+ sent_rtcp_rr_ > kNumRtcpReportPacketsToObserve &&
+ (sent_rtcp_target_bitrate_ || !enable_target_bitrate_) &&
+ (sent_zero_rtcp_target_bitrate_ || !enable_zero_target_bitrate_)) {
+ if (enable_rrtr_) {
+ EXPECT_GT(sent_rtcp_rrtr_, 0);
+ EXPECT_GT(sent_rtcp_dlrr_, 0);
+ } else {
+ EXPECT_EQ(sent_rtcp_rrtr_, 0);
+ EXPECT_EQ(sent_rtcp_dlrr_, 0);
+ }
+ EXPECT_EQ(enable_target_bitrate_, sent_rtcp_target_bitrate_);
+ EXPECT_EQ(enable_zero_target_bitrate_, sent_zero_rtcp_target_bitrate_);
+ observation_complete_.Set();
+ }
+ return SEND_PACKET;
+ }
+
+ size_t GetNumVideoStreams() const override {
+ // When sending a zero target bitrate, we use two spatial layers so that
+ // we'll still have a layer with non-zero bitrate.
+ return enable_zero_target_bitrate_ ? 2 : 1;
+ }
+
+ // This test uses VideoStream settings different from the the default one
+ // implemented in DefaultVideoStreamFactory, so it implements its own
+ // VideoEncoderConfig::VideoStreamFactoryInterface which is created
+ // in ModifyVideoConfigs.
+ class ZeroTargetVideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ ZeroTargetVideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ // Set one of the streams' target bitrates to zero to test that a
+ // bitrate of 0 can be signalled.
+ streams[encoder_config.number_of_streams-1].min_bitrate_bps = 0;
+ streams[encoder_config.number_of_streams-1].target_bitrate_bps = 0;
+ streams[encoder_config.number_of_streams-1].max_bitrate_bps = 0;
+ return streams;
+ }
+ };
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (enable_zero_target_bitrate_) {
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<ZeroTargetVideoStreamFactory>();
+
+ // Configure VP8 to be able to use simulcast.
+ send_config->encoder_settings.payload_name = "VP8";
+ (*receive_configs)[0].decoders.resize(1);
+ (*receive_configs)[0].decoders[0].payload_type =
+ send_config->encoder_settings.payload_type;
+ (*receive_configs)[0].decoders[0].payload_name =
+ send_config->encoder_settings.payload_name;
+ }
+ if (enable_target_bitrate_) {
+ // TargetBitrate only signaled for screensharing.
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+ (*receive_configs)[0].rtp.rtcp_mode = RtcpMode::kReducedSize;
+ (*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report =
+ enable_rrtr_;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for RTCP SR/RR packets to be sent.";
+ }
+
+ static const int kNumRtcpReportPacketsToObserve = 5;
+
+ rtc::CriticalSection crit_;
+ const bool enable_rrtr_;
+ const bool enable_target_bitrate_;
+ const bool enable_zero_target_bitrate_;
+ int sent_rtcp_sr_;
+ int sent_rtcp_rr_ RTC_GUARDED_BY(&crit_);
+ int sent_rtcp_rrtr_ RTC_GUARDED_BY(&crit_);
+ bool sent_rtcp_target_bitrate_ RTC_GUARDED_BY(&crit_);
+ bool sent_zero_rtcp_target_bitrate_ RTC_GUARDED_BY(&crit_);
+ int sent_rtcp_dlrr_;
+};
+
+TEST_P(EndToEndTest, TestExtendedReportsWithRrtrWithoutTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/true, /*enable_target_bitrate=*/false,
+ /*enable_zero_target_bitrate=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, TestExtendedReportsWithoutRrtrWithoutTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/false, /*enable_target_bitrate=*/false,
+ /*enable_zero_target_bitrate=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, TestExtendedReportsWithRrtrWithTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/true, /*enable_target_bitrate=*/true,
+ /*enable_zero_target_bitrate=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, TestExtendedReportsWithoutRrtrWithTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/false, /*enable_target_bitrate=*/true,
+ /*enable_zero_target_bitrate=*/false);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, TestExtendedReportsCanSignalZeroTargetBitrate) {
+ RtcpXrObserver test(/*enable_rrtr=*/false, /*enable_target_bitrate=*/true,
+ /*enable_zero_target_bitrate=*/true);
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, TestReceivedRtpPacketStats) {
+ static const size_t kNumRtpPacketsToSend = 5;
+ class ReceivedRtpStatsObserver : public test::EndToEndTest {
+ public:
+ ReceivedRtpStatsObserver()
+ : EndToEndTest(kDefaultTimeoutMs),
+ receive_stream_(nullptr),
+ sent_rtp_(0) {}
+
+ private:
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ receive_stream_ = receive_streams[0];
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (sent_rtp_ >= kNumRtpPacketsToSend) {
+ VideoReceiveStream::Stats stats = receive_stream_->GetStats();
+ if (kNumRtpPacketsToSend == stats.rtp_stats.transmitted.packets) {
+ observation_complete_.Set();
+ }
+ return DROP_PACKET;
+ }
+ ++sent_rtp_;
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while verifying number of received RTP packets.";
+ }
+
+ VideoReceiveStream* receive_stream_;
+ uint32_t sent_rtp_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, SendsSetSsrc) {
+ TestSendsSetSsrcs(1, false);
+}
+
+TEST_P(EndToEndTest, SendsSetSimulcastSsrcs) {
+ TestSendsSetSsrcs(kNumSsrcs, false);
+}
+
+TEST_P(EndToEndTest, CanSwitchToUseAllSsrcs) {
+ TestSendsSetSsrcs(kNumSsrcs, true);
+}
+
+TEST_P(EndToEndTest, DISABLED_RedundantPayloadsTransmittedOnAllSsrcs) {
+ class ObserveRedundantPayloads: public test::EndToEndTest {
+ public:
+ ObserveRedundantPayloads()
+ : EndToEndTest(kDefaultTimeoutMs), ssrcs_to_observe_(kNumSsrcs) {
+ for (size_t i = 0; i < kNumSsrcs; ++i) {
+ registered_rtx_ssrc_[kSendRtxSsrcs[i]] = true;
+ }
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ if (!registered_rtx_ssrc_[header.ssrc])
+ return SEND_PACKET;
+
+ EXPECT_LE(header.headerLength + header.paddingLength, length);
+ const bool packet_is_redundant_payload =
+ header.headerLength + header.paddingLength < length;
+
+ if (!packet_is_redundant_payload)
+ return SEND_PACKET;
+
+ if (!observed_redundant_retransmission_[header.ssrc]) {
+ observed_redundant_retransmission_[header.ssrc] = true;
+ if (--ssrcs_to_observe_ == 0)
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ size_t GetNumVideoStreams() const override { return kNumSsrcs; }
+
+ // This test use other VideoStream settings than the the default settings
+ // implemented in DefaultVideoStreamFactory. Therefore this test implement
+ // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created
+ // in ModifyVideoConfigs.
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
+ streams[i].min_bitrate_bps = 10000;
+ streams[i].target_bitrate_bps = 15000;
+ streams[i].max_bitrate_bps = 20000;
+ }
+ return streams;
+ }
+ };
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>();
+ send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
+
+ for (size_t i = 0; i < kNumSsrcs; ++i)
+ send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
+
+ // Significantly higher than max bitrates for all video streams -> forcing
+ // padding to trigger redundant padding on all RTX SSRCs.
+ encoder_config->min_transmit_bitrate_bps = 100000;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for redundant payloads on all SSRCs.";
+ }
+
+ private:
+ size_t ssrcs_to_observe_;
+ std::map<uint32_t, bool> observed_redundant_retransmission_;
+ std::map<uint32_t, bool> registered_rtx_ssrc_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+void EndToEndTest::TestRtpStatePreservation(bool use_rtx,
+ bool provoke_rtcpsr_before_rtp) {
+ // This test uses other VideoStream settings than the the default settings
+ // implemented in DefaultVideoStreamFactory. Therefore this test implements
+ // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created
+ // in ModifyVideoConfigs.
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+
+ if (encoder_config.number_of_streams > 1) {
+ // Lower bitrates so that all streams send initially.
+ RTC_DCHECK_EQ(3, encoder_config.number_of_streams);
+ for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
+ streams[i].min_bitrate_bps = 10000;
+ streams[i].target_bitrate_bps = 15000;
+ streams[i].max_bitrate_bps = 20000;
+ }
+ } else {
+ // Use the same total bitrates when sending a single stream to avoid
+ // lowering
+ // the bitrate estimate and requiring a subsequent rampup.
+ streams[0].min_bitrate_bps = 3 * 10000;
+ streams[0].target_bitrate_bps = 3 * 15000;
+ streams[0].max_bitrate_bps = 3 * 20000;
+ }
+ return streams;
+ }
+ };
+
+ class RtpSequenceObserver : public test::RtpRtcpObserver {
+ public:
+ explicit RtpSequenceObserver(bool use_rtx)
+ : test::RtpRtcpObserver(kDefaultTimeoutMs),
+ ssrcs_to_observe_(kNumSsrcs) {
+ for (size_t i = 0; i < kNumSsrcs; ++i) {
+ ssrc_is_rtx_[kVideoSendSsrcs[i]] = false;
+ if (use_rtx)
+ ssrc_is_rtx_[kSendRtxSsrcs[i]] = true;
+ }
+ }
+
+ void ResetExpectedSsrcs(size_t num_expected_ssrcs) {
+ rtc::CritScope lock(&crit_);
+ ssrc_observed_.clear();
+ ssrcs_to_observe_ = num_expected_ssrcs;
+ }
+
+ private:
+ void ValidateTimestampGap(uint32_t ssrc,
+ uint32_t timestamp,
+ bool only_padding)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) {
+ static const int32_t kMaxTimestampGap = kDefaultTimeoutMs * 90;
+ auto timestamp_it = last_observed_timestamp_.find(ssrc);
+ if (timestamp_it == last_observed_timestamp_.end()) {
+ EXPECT_FALSE(only_padding);
+ last_observed_timestamp_[ssrc] = timestamp;
+ } else {
+ // Verify timestamps are reasonably close.
+ uint32_t latest_observed = timestamp_it->second;
+ // Wraparound handling is unnecessary here as long as an int variable
+ // is used to store the result.
+ int32_t timestamp_gap = timestamp - latest_observed;
+ EXPECT_LE(std::abs(timestamp_gap), kMaxTimestampGap)
+ << "Gap in timestamps (" << latest_observed << " -> " << timestamp
+ << ") too large for SSRC: " << ssrc << ".";
+ timestamp_it->second = timestamp;
+ }
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ const uint32_t ssrc = header.ssrc;
+ const int64_t sequence_number =
+ seq_numbers_unwrapper_.Unwrap(header.sequenceNumber);
+ const uint32_t timestamp = header.timestamp;
+ const bool only_padding =
+ header.headerLength + header.paddingLength == length;
+
+ EXPECT_TRUE(ssrc_is_rtx_.find(ssrc) != ssrc_is_rtx_.end())
+ << "Received SSRC that wasn't configured: " << ssrc;
+
+ static const int64_t kMaxSequenceNumberGap = 100;
+ std::list<int64_t>* seq_numbers = &last_observed_seq_numbers_[ssrc];
+ if (seq_numbers->empty()) {
+ seq_numbers->push_back(sequence_number);
+ } else {
+ // We shouldn't get replays of previous sequence numbers.
+ for (int64_t observed : *seq_numbers) {
+ EXPECT_NE(observed, sequence_number)
+ << "Received sequence number " << sequence_number
+ << " for SSRC " << ssrc << " 2nd time.";
+ }
+ // Verify sequence numbers are reasonably close.
+ int64_t latest_observed = seq_numbers->back();
+ int64_t sequence_number_gap = sequence_number - latest_observed;
+ EXPECT_LE(std::abs(sequence_number_gap), kMaxSequenceNumberGap)
+ << "Gap in sequence numbers (" << latest_observed << " -> "
+ << sequence_number << ") too large for SSRC: " << ssrc << ".";
+ seq_numbers->push_back(sequence_number);
+ if (seq_numbers->size() >= kMaxSequenceNumberGap) {
+ seq_numbers->pop_front();
+ }
+ }
+
+ if (!ssrc_is_rtx_[ssrc]) {
+ rtc::CritScope lock(&crit_);
+ ValidateTimestampGap(ssrc, timestamp, only_padding);
+
+ // Wait for media packets on all ssrcs.
+ if (!ssrc_observed_[ssrc] && !only_padding) {
+ ssrc_observed_[ssrc] = true;
+ if (--ssrcs_to_observe_ == 0)
+ observation_complete_.Set();
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser rtcp_parser;
+ rtcp_parser.Parse(packet, length);
+ if (rtcp_parser.sender_report()->num_packets() > 0) {
+ uint32_t ssrc = rtcp_parser.sender_report()->sender_ssrc();
+ uint32_t rtcp_timestamp = rtcp_parser.sender_report()->rtp_timestamp();
+
+ rtc::CritScope lock(&crit_);
+ ValidateTimestampGap(ssrc, rtcp_timestamp, false);
+ }
+ return SEND_PACKET;
+ }
+
+ SequenceNumberUnwrapper seq_numbers_unwrapper_;
+ std::map<uint32_t, std::list<int64_t>> last_observed_seq_numbers_;
+ std::map<uint32_t, uint32_t> last_observed_timestamp_;
+ std::map<uint32_t, bool> ssrc_is_rtx_;
+
+ rtc::CriticalSection crit_;
+ size_t ssrcs_to_observe_ RTC_GUARDED_BY(crit_);
+ std::map<uint32_t, bool> ssrc_observed_ RTC_GUARDED_BY(crit_);
+ } observer(use_rtx);
+
+ std::unique_ptr<test::PacketTransport> send_transport;
+ std::unique_ptr<test::PacketTransport> receive_transport;
+
+ Call::Config config(event_log_.get());
+ VideoEncoderConfig one_stream;
+
+ task_queue_.SendTask([this, &observer, &send_transport, &receive_transport,
+ &config, &one_stream, use_rtx]() {
+ CreateCalls(config, config);
+
+ send_transport = rtc::MakeUnique<test::PacketTransport>(
+ &task_queue_, sender_call_.get(), &observer,
+ test::PacketTransport::kSender, payload_type_map_,
+ FakeNetworkPipe::Config());
+ receive_transport = rtc::MakeUnique<test::PacketTransport>(
+ &task_queue_, nullptr, &observer, test::PacketTransport::kReceiver,
+ payload_type_map_, FakeNetworkPipe::Config());
+ send_transport->SetReceiver(receiver_call_->Receiver());
+ receive_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(kNumSsrcs, 0, 0, send_transport.get());
+
+ if (use_rtx) {
+ for (size_t i = 0; i < kNumSsrcs; ++i) {
+ video_send_config_.rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
+ }
+ video_send_config_.rtp.rtx.payload_type = kSendRtxPayloadType;
+ }
+
+ video_encoder_config_.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>();
+ // Use the same total bitrates when sending a single stream to avoid
+ // lowering the bitrate estimate and requiring a subsequent rampup.
+ one_stream = video_encoder_config_.Copy();
+ // one_stream.streams.resize(1);
+ one_stream.number_of_streams = 1;
+ CreateMatchingReceiveConfigs(receive_transport.get());
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(30, 1280, 720);
+
+ Start();
+ });
+
+ EXPECT_TRUE(observer.Wait())
+ << "Timed out waiting for all SSRCs to send packets.";
+
+ // Test stream resetting more than once to make sure that the state doesn't
+ // get set once (this could be due to using std::map::insert for instance).
+ for (size_t i = 0; i < 3; ++i) {
+ task_queue_.SendTask([&]() {
+ frame_generator_capturer_->Stop();
+ sender_call_->DestroyVideoSendStream(video_send_stream_);
+
+ // Re-create VideoSendStream with only one stream.
+ video_send_stream_ = sender_call_->CreateVideoSendStream(
+ video_send_config_.Copy(), one_stream.Copy());
+ video_send_stream_->Start();
+ if (provoke_rtcpsr_before_rtp) {
+ // Rapid Resync Request forces sending RTCP Sender Report back.
+ // Using this request speeds up this test because then there is no need
+ // to wait for a second for periodic Sender Report.
+ rtcp::RapidResyncRequest force_send_sr_back_request;
+ rtc::Buffer packet = force_send_sr_back_request.Build();
+ static_cast<webrtc::test::DirectTransport*>(receive_transport.get())
+ ->SendRtcp(packet.data(), packet.size());
+ }
+ CreateFrameGeneratorCapturer(30, 1280, 720);
+ frame_generator_capturer_->Start();
+ });
+
+ observer.ResetExpectedSsrcs(1);
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for single RTP packet.";
+
+ // Reconfigure back to use all streams.
+ task_queue_.SendTask([this]() {
+ video_send_stream_->ReconfigureVideoEncoder(video_encoder_config_.Copy());
+ });
+ observer.ResetExpectedSsrcs(kNumSsrcs);
+ EXPECT_TRUE(observer.Wait())
+ << "Timed out waiting for all SSRCs to send packets.";
+
+ // Reconfigure down to one stream.
+ task_queue_.SendTask([this, &one_stream]() {
+ video_send_stream_->ReconfigureVideoEncoder(one_stream.Copy());
+ });
+ observer.ResetExpectedSsrcs(1);
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for single RTP packet.";
+
+ // Reconfigure back to use all streams.
+ task_queue_.SendTask([this]() {
+ video_send_stream_->ReconfigureVideoEncoder(video_encoder_config_.Copy());
+ });
+ observer.ResetExpectedSsrcs(kNumSsrcs);
+ EXPECT_TRUE(observer.Wait())
+ << "Timed out waiting for all SSRCs to send packets.";
+ }
+
+ task_queue_.SendTask([this, &send_transport, &receive_transport]() {
+ Stop();
+ DestroyStreams();
+ send_transport.reset();
+ receive_transport.reset();
+ DestroyCalls();
+ });
+}
+
+TEST_P(EndToEndTest, RestartingSendStreamPreservesRtpState) {
+ TestRtpStatePreservation(false, false);
+}
+
+TEST_P(EndToEndTest, RestartingSendStreamPreservesRtpStatesWithRtx) {
+ TestRtpStatePreservation(true, false);
+}
+
+TEST_P(EndToEndTest, RestartingSendStreamKeepsRtpAndRtcpTimestampsSynced) {
+ TestRtpStatePreservation(true, true);
+}
+
+// This test is flaky on linux_memcheck. Disable on all linux bots until
+// flakyness has been fixed.
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=7737
+#if defined(WEBRTC_LINUX)
+TEST_P(EndToEndTest, DISABLED_TestFlexfecRtpStatePreservation) {
+#else
+TEST_P(EndToEndTest, TestFlexfecRtpStatePreservation) {
+#endif
+ class RtpSequenceObserver : public test::RtpRtcpObserver {
+ public:
+ RtpSequenceObserver()
+ : test::RtpRtcpObserver(kDefaultTimeoutMs),
+ num_flexfec_packets_sent_(0) {}
+
+ void ResetPacketCount() {
+ rtc::CritScope lock(&crit_);
+ num_flexfec_packets_sent_ = 0;
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ const uint16_t sequence_number = header.sequenceNumber;
+ const uint32_t timestamp = header.timestamp;
+ const uint32_t ssrc = header.ssrc;
+
+ if (ssrc == kVideoSendSsrcs[0] || ssrc == kSendRtxSsrcs[0]) {
+ return SEND_PACKET;
+ }
+ EXPECT_EQ(kFlexfecSendSsrc, ssrc) << "Unknown SSRC sent.";
+
+ ++num_flexfec_packets_sent_;
+
+ // If this is the first packet, we have nothing to compare to.
+ if (!last_observed_sequence_number_) {
+ last_observed_sequence_number_.emplace(sequence_number);
+ last_observed_timestamp_.emplace(timestamp);
+
+ return SEND_PACKET;
+ }
+
+ // Verify continuity and monotonicity of RTP sequence numbers.
+ EXPECT_EQ(static_cast<uint16_t>(*last_observed_sequence_number_ + 1),
+ sequence_number);
+ last_observed_sequence_number_.emplace(sequence_number);
+
+ // Timestamps should be non-decreasing...
+ const bool timestamp_is_same_or_newer =
+ timestamp == *last_observed_timestamp_ ||
+ IsNewerTimestamp(timestamp, *last_observed_timestamp_);
+ EXPECT_TRUE(timestamp_is_same_or_newer);
+ // ...but reasonably close in time.
+ const int k10SecondsInRtpTimestampBase = 10 * kVideoPayloadTypeFrequency;
+ EXPECT_TRUE(IsNewerTimestamp(
+ *last_observed_timestamp_ + k10SecondsInRtpTimestampBase, timestamp));
+ last_observed_timestamp_.emplace(timestamp);
+
+ // Pass test when enough packets have been let through.
+ if (num_flexfec_packets_sent_ >= 10) {
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ rtc::Optional<uint16_t> last_observed_sequence_number_
+ RTC_GUARDED_BY(crit_);
+ rtc::Optional<uint32_t> last_observed_timestamp_ RTC_GUARDED_BY(crit_);
+ size_t num_flexfec_packets_sent_ RTC_GUARDED_BY(crit_);
+ rtc::CriticalSection crit_;
+ } observer;
+
+ static constexpr int kFrameMaxWidth = 320;
+ static constexpr int kFrameMaxHeight = 180;
+ static constexpr int kFrameRate = 15;
+
+ Call::Config config(event_log_.get());
+
+ std::unique_ptr<test::PacketTransport> send_transport;
+ std::unique_ptr<test::PacketTransport> receive_transport;
+ std::unique_ptr<VideoEncoder> encoder;
+
+ task_queue_.SendTask([&]() {
+ CreateCalls(config, config);
+
+ FakeNetworkPipe::Config lossy_delayed_link;
+ lossy_delayed_link.loss_percent = 2;
+ lossy_delayed_link.queue_delay_ms = 50;
+
+ send_transport = rtc::MakeUnique<test::PacketTransport>(
+ &task_queue_, sender_call_.get(), &observer,
+ test::PacketTransport::kSender, payload_type_map_, lossy_delayed_link);
+ send_transport->SetReceiver(receiver_call_->Receiver());
+
+ FakeNetworkPipe::Config flawless_link;
+ receive_transport = rtc::MakeUnique<test::PacketTransport>(
+ &task_queue_, nullptr, &observer, test::PacketTransport::kReceiver,
+ payload_type_map_, flawless_link);
+ receive_transport->SetReceiver(sender_call_->Receiver());
+
+ // For reduced flakyness, we use a real VP8 encoder together with NACK
+ // and RTX.
+ const int kNumVideoStreams = 1;
+ const int kNumFlexfecStreams = 1;
+ CreateSendConfig(kNumVideoStreams, 0, kNumFlexfecStreams,
+ send_transport.get());
+ encoder = VP8Encoder::Create();
+ video_send_config_.encoder_settings.encoder = encoder.get();
+ video_send_config_.encoder_settings.payload_name = "VP8";
+ video_send_config_.encoder_settings.payload_type = kVideoSendPayloadType;
+ video_send_config_.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_send_config_.rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
+ video_send_config_.rtp.rtx.payload_type = kSendRtxPayloadType;
+
+ CreateMatchingReceiveConfigs(receive_transport.get());
+ video_receive_configs_[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_receive_configs_[0].rtp.rtx_ssrc = kSendRtxSsrcs[0];
+ video_receive_configs_[0]
+ .rtp.rtx_associated_payload_types[kSendRtxPayloadType] =
+ kVideoSendPayloadType;
+
+ // The matching FlexFEC receive config is not created by
+ // CreateMatchingReceiveConfigs since this is not a test::BaseTest.
+ // Set up the receive config manually instead.
+ FlexfecReceiveStream::Config flexfec_receive_config(
+ receive_transport.get());
+ flexfec_receive_config.payload_type =
+ video_send_config_.rtp.flexfec.payload_type;
+ flexfec_receive_config.remote_ssrc = video_send_config_.rtp.flexfec.ssrc;
+ flexfec_receive_config.protected_media_ssrcs =
+ video_send_config_.rtp.flexfec.protected_media_ssrcs;
+ flexfec_receive_config.local_ssrc = kReceiverLocalVideoSsrc;
+ flexfec_receive_config.transport_cc = true;
+ flexfec_receive_config.rtp_header_extensions.emplace_back(
+ RtpExtension::kTransportSequenceNumberUri,
+ test::kTransportSequenceNumberExtensionId);
+ flexfec_receive_configs_.push_back(flexfec_receive_config);
+
+ CreateFlexfecStreams();
+ CreateVideoStreams();
+
+ // RTCP might be disabled if the network is "down".
+ sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+ receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+
+ Start();
+ });
+
+ // Initial test.
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ task_queue_.SendTask([this, &observer]() {
+ // Ensure monotonicity when the VideoSendStream is restarted.
+ Stop();
+ observer.ResetPacketCount();
+ Start();
+ });
+
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ task_queue_.SendTask([this, &observer]() {
+ // Ensure monotonicity when the VideoSendStream is recreated.
+ frame_generator_capturer_->Stop();
+ sender_call_->DestroyVideoSendStream(video_send_stream_);
+ observer.ResetPacketCount();
+ video_send_stream_ = sender_call_->CreateVideoSendStream(
+ video_send_config_.Copy(), video_encoder_config_.Copy());
+ video_send_stream_->Start();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+ frame_generator_capturer_->Start();
+ });
+
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ // Cleanup.
+ task_queue_.SendTask([this, &send_transport, &receive_transport]() {
+ Stop();
+ DestroyStreams();
+ send_transport.reset();
+ receive_transport.reset();
+ DestroyCalls();
+ });
+}
+
+TEST_P(EndToEndTest, RespectsNetworkState) {
+ // TODO(pbos): Remove accepted downtime packets etc. when signaling network
+ // down blocks until no more packets will be sent.
+
+ // Pacer will send from its packet list and then send required padding before
+ // checking paused_ again. This should be enough for one round of pacing,
+ // otherwise increase.
+ static const int kNumAcceptedDowntimeRtp = 5;
+ // A single RTCP may be in the pipeline.
+ static const int kNumAcceptedDowntimeRtcp = 1;
+ class NetworkStateTest : public test::EndToEndTest, public test::FakeEncoder {
+ public:
+ explicit NetworkStateTest(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : EndToEndTest(kDefaultTimeoutMs),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ task_queue_(task_queue),
+ encoded_frames_(false, false),
+ packet_event_(false, false),
+ sender_call_(nullptr),
+ receiver_call_(nullptr),
+ sender_state_(kNetworkUp),
+ sender_rtp_(0),
+ sender_padding_(0),
+ sender_rtcp_(0),
+ receiver_rtcp_(0),
+ down_frames_(0) {}
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&test_crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ if (length == header.headerLength + header.paddingLength)
+ ++sender_padding_;
+ ++sender_rtp_;
+ packet_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&test_crit_);
+ ++sender_rtcp_;
+ packet_event_.Set();
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtp(const uint8_t* packet, size_t length) override {
+ ADD_FAILURE() << "Unexpected receiver RTP, should not be sending.";
+ return SEND_PACKET;
+ }
+
+ Action OnReceiveRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&test_crit_);
+ ++receiver_rtcp_;
+ packet_event_.Set();
+ return SEND_PACKET;
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ sender_call_ = sender_call;
+ receiver_call_ = receiver_call;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(encoded_frames_.Wait(kDefaultTimeoutMs))
+ << "No frames received by the encoder.";
+
+ task_queue_->SendTask([this]() {
+ // Wait for packets from both sender/receiver.
+ WaitForPacketsOrSilence(false, false);
+
+ // Sender-side network down for audio; there should be no effect on
+ // video
+ sender_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkDown);
+ WaitForPacketsOrSilence(false, false);
+
+ // Receiver-side network down for audio; no change expected
+ receiver_call_->SignalChannelNetworkState(MediaType::AUDIO,
+ kNetworkDown);
+ WaitForPacketsOrSilence(false, false);
+
+ // Sender-side network down.
+ sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkDown);
+ {
+ rtc::CritScope lock(&test_crit_);
+ // After network goes down we shouldn't be encoding more frames.
+ sender_state_ = kNetworkDown;
+ }
+ // Wait for receiver-packets and no sender packets.
+ WaitForPacketsOrSilence(true, false);
+
+ // Receiver-side network down.
+ receiver_call_->SignalChannelNetworkState(MediaType::VIDEO,
+ kNetworkDown);
+ WaitForPacketsOrSilence(true, true);
+
+ // Network up for audio for both sides; video is still not expected to
+ // start
+ sender_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp);
+ receiver_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp);
+ WaitForPacketsOrSilence(true, true);
+
+ // Network back up again for both.
+ {
+ rtc::CritScope lock(&test_crit_);
+ // It's OK to encode frames again, as we're about to bring up the
+ // network.
+ sender_state_ = kNetworkUp;
+ }
+ sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+ receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+ WaitForPacketsOrSilence(false, false);
+
+ // TODO(skvlad): add tests to verify that the audio streams are stopped
+ // when the network goes down for audio once the workaround in
+ // paced_sender.cc is removed.
+ });
+ }
+
+ int32_t Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) override {
+ {
+ rtc::CritScope lock(&test_crit_);
+ if (sender_state_ == kNetworkDown) {
+ ++down_frames_;
+ EXPECT_LE(down_frames_, 1)
+ << "Encoding more than one frame while network is down.";
+ if (down_frames_ > 1)
+ encoded_frames_.Set();
+ } else {
+ encoded_frames_.Set();
+ }
+ }
+ return test::FakeEncoder::Encode(
+ input_image, codec_specific_info, frame_types);
+ }
+
+ private:
+ void WaitForPacketsOrSilence(bool sender_down, bool receiver_down) {
+ int64_t initial_time_ms = clock_->TimeInMilliseconds();
+ int initial_sender_rtp;
+ int initial_sender_rtcp;
+ int initial_receiver_rtcp;
+ {
+ rtc::CritScope lock(&test_crit_);
+ initial_sender_rtp = sender_rtp_;
+ initial_sender_rtcp = sender_rtcp_;
+ initial_receiver_rtcp = receiver_rtcp_;
+ }
+ bool sender_done = false;
+ bool receiver_done = false;
+ while (!sender_done || !receiver_done) {
+ packet_event_.Wait(kSilenceTimeoutMs);
+ int64_t time_now_ms = clock_->TimeInMilliseconds();
+ rtc::CritScope lock(&test_crit_);
+ if (sender_down) {
+ ASSERT_LE(sender_rtp_ - initial_sender_rtp - sender_padding_,
+ kNumAcceptedDowntimeRtp)
+ << "RTP sent during sender-side downtime.";
+ ASSERT_LE(sender_rtcp_ - initial_sender_rtcp,
+ kNumAcceptedDowntimeRtcp)
+ << "RTCP sent during sender-side downtime.";
+ if (time_now_ms - initial_time_ms >=
+ static_cast<int64_t>(kSilenceTimeoutMs)) {
+ sender_done = true;
+ }
+ } else {
+ if (sender_rtp_ > initial_sender_rtp + kNumAcceptedDowntimeRtp)
+ sender_done = true;
+ }
+ if (receiver_down) {
+ ASSERT_LE(receiver_rtcp_ - initial_receiver_rtcp,
+ kNumAcceptedDowntimeRtcp)
+ << "RTCP sent during receiver-side downtime.";
+ if (time_now_ms - initial_time_ms >=
+ static_cast<int64_t>(kSilenceTimeoutMs)) {
+ receiver_done = true;
+ }
+ } else {
+ if (receiver_rtcp_ > initial_receiver_rtcp + kNumAcceptedDowntimeRtcp)
+ receiver_done = true;
+ }
+ }
+ }
+
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ rtc::CriticalSection test_crit_;
+ rtc::Event encoded_frames_;
+ rtc::Event packet_event_;
+ Call* sender_call_;
+ Call* receiver_call_;
+ NetworkState sender_state_ RTC_GUARDED_BY(test_crit_);
+ int sender_rtp_ RTC_GUARDED_BY(test_crit_);
+ int sender_padding_ RTC_GUARDED_BY(test_crit_);
+ int sender_rtcp_ RTC_GUARDED_BY(test_crit_);
+ int receiver_rtcp_ RTC_GUARDED_BY(test_crit_);
+ int down_frames_ RTC_GUARDED_BY(test_crit_);
+ } test(&task_queue_);
+
+ RunBaseTest(&test);
+}
+
+TEST_P(EndToEndTest, CallReportsRttForSender) {
+ static const int kSendDelayMs = 30;
+ static const int kReceiveDelayMs = 70;
+
+ std::unique_ptr<test::DirectTransport> sender_transport;
+ std::unique_ptr<test::DirectTransport> receiver_transport;
+
+ task_queue_.SendTask([this, &sender_transport, &receiver_transport]() {
+ FakeNetworkPipe::Config config;
+ config.queue_delay_ms = kSendDelayMs;
+ CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get()));
+ sender_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, config, sender_call_.get(), payload_type_map_);
+ config.queue_delay_ms = kReceiveDelayMs;
+ receiver_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, config, receiver_call_.get(), payload_type_map_);
+ sender_transport->SetReceiver(receiver_call_->Receiver());
+ receiver_transport->SetReceiver(sender_call_->Receiver());
+
+ CreateSendConfig(1, 0, 0, sender_transport.get());
+ CreateMatchingReceiveConfigs(receiver_transport.get());
+
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ Start();
+ });
+
+ int64_t start_time_ms = clock_->TimeInMilliseconds();
+ while (true) {
+ Call::Stats stats = sender_call_->GetStats();
+ ASSERT_GE(start_time_ms + kDefaultTimeoutMs,
+ clock_->TimeInMilliseconds())
+ << "No RTT stats before timeout!";
+ if (stats.rtt_ms != -1) {
+ // To avoid failures caused by rounding or minor ntp clock adjustments,
+ // relax expectation by 1ms.
+ constexpr int kAllowedErrorMs = 1;
+ EXPECT_GE(stats.rtt_ms, kSendDelayMs + kReceiveDelayMs - kAllowedErrorMs);
+ break;
+ }
+ SleepMs(10);
+ }
+
+ task_queue_.SendTask([this, &sender_transport, &receiver_transport]() {
+ Stop();
+ DestroyStreams();
+ sender_transport.reset();
+ receiver_transport.reset();
+ DestroyCalls();
+ });
+}
+
+void EndToEndTest::VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ VideoEncoder* encoder,
+ Transport* transport) {
+ task_queue_.SendTask([this, network_to_bring_up, encoder, transport]() {
+ CreateSenderCall(Call::Config(event_log_.get()));
+ sender_call_->SignalChannelNetworkState(network_to_bring_up, kNetworkUp);
+
+ CreateSendConfig(1, 0, 0, transport);
+ video_send_config_.encoder_settings.encoder = encoder;
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+
+ Start();
+ });
+
+ SleepMs(kSilenceTimeoutMs);
+
+ task_queue_.SendTask([this]() {
+ Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+void EndToEndTest::VerifyNewVideoReceiveStreamsRespectNetworkState(
+ MediaType network_to_bring_up,
+ Transport* transport) {
+ std::unique_ptr<test::DirectTransport> sender_transport;
+
+ task_queue_.SendTask([this, &sender_transport, network_to_bring_up,
+ transport]() {
+ Call::Config config(event_log_.get());
+ CreateCalls(config, config);
+ receiver_call_->SignalChannelNetworkState(network_to_bring_up, kNetworkUp);
+ sender_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, sender_call_.get(), payload_type_map_);
+ sender_transport->SetReceiver(receiver_call_->Receiver());
+ CreateSendConfig(1, 0, 0, sender_transport.get());
+ CreateMatchingReceiveConfigs(transport);
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ Start();
+ });
+
+ SleepMs(kSilenceTimeoutMs);
+
+ task_queue_.SendTask([this, &sender_transport]() {
+ Stop();
+ DestroyStreams();
+ sender_transport.reset();
+ DestroyCalls();
+ });
+}
+
+TEST_P(EndToEndTest, NewVideoSendStreamsRespectVideoNetworkDown) {
+ class UnusedEncoder : public test::FakeEncoder {
+ public:
+ UnusedEncoder() : FakeEncoder(Clock::GetRealTimeClock()) {}
+
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ EXPECT_GT(config->startBitrate, 0u);
+ return 0;
+ }
+ int32_t Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) override {
+ ADD_FAILURE() << "Unexpected frame encode.";
+ return test::FakeEncoder::Encode(input_image, codec_specific_info,
+ frame_types);
+ }
+ };
+
+ UnusedEncoder unused_encoder;
+ UnusedTransport unused_transport;
+ VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType::AUDIO, &unused_encoder, &unused_transport);
+}
+
+TEST_P(EndToEndTest, NewVideoSendStreamsIgnoreAudioNetworkDown) {
+ class RequiredEncoder : public test::FakeEncoder {
+ public:
+ RequiredEncoder()
+ : FakeEncoder(Clock::GetRealTimeClock()), encoded_frame_(false) {}
+ ~RequiredEncoder() {
+ if (!encoded_frame_) {
+ ADD_FAILURE() << "Didn't encode an expected frame";
+ }
+ }
+ int32_t Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) override {
+ encoded_frame_ = true;
+ return test::FakeEncoder::Encode(input_image, codec_specific_info,
+ frame_types);
+ }
+
+ private:
+ bool encoded_frame_;
+ };
+
+ RequiredTransport required_transport(true /*rtp*/, false /*rtcp*/);
+ RequiredEncoder required_encoder;
+ VerifyNewVideoSendStreamsRespectNetworkState(
+ MediaType::VIDEO, &required_encoder, &required_transport);
+}
+
+TEST_P(EndToEndTest, NewVideoReceiveStreamsRespectVideoNetworkDown) {
+ UnusedTransport transport;
+ VerifyNewVideoReceiveStreamsRespectNetworkState(MediaType::AUDIO, &transport);
+}
+
+TEST_P(EndToEndTest, NewVideoReceiveStreamsIgnoreAudioNetworkDown) {
+ RequiredTransport transport(false /*rtp*/, true /*rtcp*/);
+ VerifyNewVideoReceiveStreamsRespectNetworkState(MediaType::VIDEO, &transport);
+}
+
+void VerifyEmptyNackConfig(const NackConfig& config) {
+ EXPECT_EQ(0, config.rtp_history_ms)
+ << "Enabling NACK requires rtcp-fb: nack negotiation.";
+}
+
+void VerifyEmptyUlpfecConfig(const UlpfecConfig& config) {
+ EXPECT_EQ(-1, config.ulpfec_payload_type)
+ << "Enabling ULPFEC requires rtpmap: ulpfec negotiation.";
+ EXPECT_EQ(-1, config.red_payload_type)
+ << "Enabling ULPFEC requires rtpmap: red negotiation.";
+ EXPECT_EQ(-1, config.red_rtx_payload_type)
+ << "Enabling RTX in ULPFEC requires rtpmap: rtx negotiation.";
+}
+
+void VerifyEmptyFlexfecConfig(
+ const VideoSendStream::Config::Rtp::Flexfec& config) {
+ EXPECT_EQ(-1, config.payload_type)
+ << "Enabling FlexFEC requires rtpmap: flexfec negotiation.";
+ EXPECT_EQ(0U, config.ssrc)
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+ EXPECT_TRUE(config.protected_media_ssrcs.empty())
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+}
+
+TEST_P(EndToEndTest, VerifyDefaultSendConfigParameters) {
+ VideoSendStream::Config default_send_config(nullptr);
+ EXPECT_EQ(0, default_send_config.rtp.nack.rtp_history_ms)
+ << "Enabling NACK require rtcp-fb: nack negotiation.";
+ EXPECT_TRUE(default_send_config.rtp.rtx.ssrcs.empty())
+ << "Enabling RTX requires rtpmap: rtx negotiation.";
+ EXPECT_TRUE(default_send_config.rtp.extensions.empty())
+ << "Enabling RTP extensions require negotiation.";
+
+ VerifyEmptyNackConfig(default_send_config.rtp.nack);
+ VerifyEmptyUlpfecConfig(default_send_config.rtp.ulpfec);
+ VerifyEmptyFlexfecConfig(default_send_config.rtp.flexfec);
+}
+
+TEST_P(EndToEndTest, VerifyDefaultVideoReceiveConfigParameters) {
+ VideoReceiveStream::Config default_receive_config(nullptr);
+ EXPECT_EQ(RtcpMode::kCompound, default_receive_config.rtp.rtcp_mode)
+ << "Reduced-size RTCP require rtcp-rsize to be negotiated.";
+ EXPECT_FALSE(default_receive_config.rtp.remb)
+ << "REMB require rtcp-fb: goog-remb to be negotiated.";
+ EXPECT_FALSE(
+ default_receive_config.rtp.rtcp_xr.receiver_reference_time_report)
+ << "RTCP XR settings require rtcp-xr to be negotiated.";
+ EXPECT_EQ(0U, default_receive_config.rtp.rtx_ssrc)
+ << "Enabling RTX requires ssrc-group: FID negotiation";
+ EXPECT_TRUE(default_receive_config.rtp.rtx_associated_payload_types.empty())
+ << "Enabling RTX requires rtpmap: rtx negotiation.";
+ EXPECT_TRUE(default_receive_config.rtp.extensions.empty())
+ << "Enabling RTP extensions require negotiation.";
+
+ VerifyEmptyNackConfig(default_receive_config.rtp.nack);
+ EXPECT_EQ(-1, default_receive_config.rtp.ulpfec_payload_type)
+ << "Enabling ULPFEC requires rtpmap: ulpfec negotiation.";
+ EXPECT_EQ(-1, default_receive_config.rtp.red_payload_type)
+ << "Enabling ULPFEC requires rtpmap: red negotiation.";
+}
+
+TEST_P(EndToEndTest, VerifyDefaultFlexfecReceiveConfigParameters) {
+ test::NullTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config default_receive_config(&rtcp_send_transport);
+ EXPECT_EQ(-1, default_receive_config.payload_type)
+ << "Enabling FlexFEC requires rtpmap: flexfec negotiation.";
+ EXPECT_EQ(0U, default_receive_config.remote_ssrc)
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+ EXPECT_TRUE(default_receive_config.protected_media_ssrcs.empty())
+ << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation.";
+}
+
+TEST_P(EndToEndTest, TransportSeqNumOnAudioAndVideo) {
+ static constexpr int kExtensionId = 8;
+ static constexpr size_t kMinPacketsToWaitFor = 50;
+ class TransportSequenceNumberTest : public test::EndToEndTest {
+ public:
+ TransportSequenceNumberTest()
+ : EndToEndTest(kDefaultTimeoutMs),
+ video_observed_(false),
+ audio_observed_(false) {
+ parser_->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber,
+ kExtensionId);
+ }
+
+ size_t GetNumVideoStreams() const override { return 1; }
+ size_t GetNumAudioStreams() const override { return 1; }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ (*receive_configs)[0].rtp.extensions.clear();
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ // Unwrap packet id and verify uniqueness.
+ int64_t packet_id =
+ unwrapper_.Unwrap(header.extension.transportSequenceNumber);
+ EXPECT_TRUE(received_packet_ids_.insert(packet_id).second);
+
+ if (header.ssrc == kVideoSendSsrcs[0])
+ video_observed_ = true;
+ if (header.ssrc == kAudioSendSsrc)
+ audio_observed_ = true;
+ if (audio_observed_ && video_observed_ &&
+ received_packet_ids_.size() >= kMinPacketsToWaitFor) {
+ size_t packet_id_range =
+ *received_packet_ids_.rbegin() - *received_packet_ids_.begin() + 1;
+ EXPECT_EQ(received_packet_ids_.size(), packet_id_range);
+ observation_complete_.Set();
+ }
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for audio and video "
+ "packets with transport sequence number.";
+ }
+
+ void ExpectSuccessful() {
+ EXPECT_TRUE(video_observed_);
+ EXPECT_TRUE(audio_observed_);
+ EXPECT_GE(received_packet_ids_.size(), kMinPacketsToWaitFor);
+ }
+
+ private:
+ bool video_observed_;
+ bool audio_observed_;
+ SequenceNumberUnwrapper unwrapper_;
+ std::set<int64_t> received_packet_ids_;
+ } test;
+
+ RunBaseTest(&test);
+ // Double check conditions for successful test to produce better error
+ // message when the test fail.
+ test.ExpectSuccessful();
+}
+
+class EndToEndLogTest : public EndToEndTest {
+ void SetUp() { paths_.clear(); }
+ void TearDown() {
+ for (const auto& path : paths_) {
+ rtc::RemoveFile(path);
+ }
+ }
+
+ public:
+ int AddFile() {
+ paths_.push_back(test::TempFilename(test::OutputPath(), "test_file"));
+ return static_cast<int>(paths_.size()) - 1;
+ }
+
+ rtc::PlatformFile OpenFile(int idx) {
+ return rtc::OpenPlatformFile(paths_[idx]);
+ }
+
+ void LogSend(bool open) {
+ if (open) {
+ video_send_stream_->EnableEncodedFrameRecording(
+ std::vector<rtc::PlatformFile>(1, OpenFile(AddFile())), 0);
+ } else {
+ video_send_stream_->DisableEncodedFrameRecording();
+ }
+ }
+ void LogReceive(bool open) {
+ if (open) {
+ video_receive_streams_[0]->EnableEncodedFrameRecording(
+ OpenFile(AddFile()), 0);
+ } else {
+ video_receive_streams_[0]->DisableEncodedFrameRecording();
+ }
+ }
+
+ std::vector<std::string> paths_;
+};
+
+TEST_P(EndToEndLogTest, LogsEncodedFramesWhenRequested) {
+ static const int kNumFramesToRecord = 10;
+ class LogEncodingObserver : public test::EndToEndTest,
+ public EncodedFrameObserver {
+ public:
+ explicit LogEncodingObserver(EndToEndLogTest* fixture)
+ : EndToEndTest(kDefaultTimeoutMs),
+ fixture_(fixture),
+ recorded_frames_(0) {}
+
+ void PerformTest() override {
+ fixture_->LogSend(true);
+ fixture_->LogReceive(true);
+ ASSERT_TRUE(Wait()) << "Timed out while waiting for frame logging.";
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_ = VP8Encoder::Create();
+ decoder_ = VP8Decoder::Create();
+
+ send_config->post_encode_callback = this;
+ send_config->encoder_settings.payload_name = "VP8";
+ send_config->encoder_settings.encoder = encoder_.get();
+
+ (*receive_configs)[0].decoders.resize(1);
+ (*receive_configs)[0].decoders[0].payload_type =
+ send_config->encoder_settings.payload_type;
+ (*receive_configs)[0].decoders[0].payload_name =
+ send_config->encoder_settings.payload_name;
+ (*receive_configs)[0].decoders[0].decoder = decoder_.get();
+ }
+
+ void EncodedFrameCallback(const EncodedFrame& encoded_frame) override {
+ rtc::CritScope lock(&crit_);
+ if (recorded_frames_++ > kNumFramesToRecord) {
+ fixture_->LogSend(false);
+ fixture_->LogReceive(false);
+ rtc::File send_file(fixture_->OpenFile(0));
+ rtc::File receive_file(fixture_->OpenFile(1));
+ uint8_t out[100];
+ // If logging has worked correctly neither file should be empty, i.e.
+ // we should be able to read something from them.
+ EXPECT_LT(0u, send_file.Read(out, 100));
+ EXPECT_LT(0u, receive_file.Read(out, 100));
+ observation_complete_.Set();
+ }
+ }
+
+ private:
+ EndToEndLogTest* const fixture_;
+ std::unique_ptr<VideoEncoder> encoder_;
+ std::unique_ptr<VideoDecoder> decoder_;
+ rtc::CriticalSection crit_;
+ int recorded_frames_ RTC_GUARDED_BY(crit_);
+ } test(this);
+
+ RunBaseTest(&test);
+}
+
+INSTANTIATE_TEST_CASE_P(RoundRobin,
+ EndToEndTest,
+ ::testing::Values("WebRTC-RoundRobinPacing/Disabled/",
+ "WebRTC-RoundRobinPacing/Enabled/"));
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/full_stack_tests.cc b/third_party/libwebrtc/webrtc/video/full_stack_tests.cc
new file mode 100644
index 0000000000..c3dfb798e6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/full_stack_tests.cc
@@ -0,0 +1,853 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <stdio.h>
+
+#include "modules/pacing/alr_detector.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "video/video_quality_test.h"
+
+namespace webrtc {
+
+namespace {
+static const int kFullStackTestDurationSecs = 45;
+} // namespace
+
+class FullStackTest : public VideoQualityTest {
+ public:
+ void RunTest(const VideoQualityTest::Params &params) {
+ RunWithAnalyzer(params);
+ }
+
+ protected:
+ const std::string kScreenshareSimulcastExperiment =
+ "WebRTC-SimulcastScreenshare/Enabled/";
+ const std::string kAlrProbingExperiment =
+ std::string(AlrDetector::kScreenshareProbingBweExperimentName) +
+ "/1.1,2875,85,20,-20,0/";
+};
+
+// VideoQualityTest::Params params = {
+// { ... }, // Common.
+// { ... }, // Video-specific settings.
+// { ... }, // Screenshare-specific settings.
+// { ... }, // Analyzer settings.
+// pipe, // FakeNetworkPipe::Config
+// { ... }, // Spatial scalability.
+// logs // bool
+// };
+
+#if !defined(RTC_DISABLE_VP9)
+TEST_F(FullStackTest, ForemanCifWithoutPacketLossVp9) {
+ // TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif.
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 700000, 700000, 700000, false,
+ "VP9", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_VP9", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCifPlr5Vp9) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP9", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_VP9", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+#endif // !defined(RTC_DISABLE_VP9)
+
+TEST_F(FullStackTest, ParisQcifWithoutPacketLoss) {
+ VideoQualityTest::Params paris_qcif;
+ paris_qcif.call.send_side_bwe = true;
+ paris_qcif.video = {true, 176, 144, 30, 300000, 300000, 300000, false,
+ "VP8", 1, 0, 0, false, false, "paris_qcif"};
+ paris_qcif.analyzer = {"net_delay_0_0_plr_0", 36.0, 0.96,
+ kFullStackTestDurationSecs};
+ RunTest(paris_qcif);
+}
+
+TEST_F(FullStackTest, ForemanCifWithoutPacketLoss) {
+ // TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif.
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 700000, 700000, 700000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif30kbpsWithoutPacketLoss) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 10, 30000, 30000, 30000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_30kbps_net_delay_0_0_plr_0", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCifPlr5) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCifPlr5Ulpfec) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, true, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_ulpfec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCifPlr5Flexfec) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, true, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_flexfec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif500kbpsPlr3Flexfec) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, true, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_delay_50_0_plr_3_flexfec", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 3;
+ foreman_cif.pipe.link_capacity_kbps = 500;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif500kbpsPlr3Ulpfec) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, true, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_delay_50_0_plr_3_ulpfec", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 3;
+ foreman_cif.pipe.link_capacity_kbps = 500;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+#if defined(WEBRTC_USE_H264)
+TEST_F(FullStackTest, ForemanCifWithoutPacketlossH264) {
+ // TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif.
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 700000,
+ 700000, 700000, false, "H264", 1,
+ 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_H264", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif30kbpsWithoutPacketlossH264) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 10, 30000, 30000, 30000, false,
+ "H264", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_30kbps_net_delay_0_0_plr_0_H264", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCifPlr5H264) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/");
+
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_sps_pps_idr", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+// Verify that this is worth the bot time, before enabling.
+TEST_F(FullStackTest, ForemanCifPlr5H264Flexfec) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0, false, true, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_flexfec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+
+// Ulpfec with H264 is an unsupported combination, so this test is only useful
+// for debugging. It is therefore disabled by default.
+TEST_F(FullStackTest, DISABLED_ForemanCifPlr5H264Ulpfec) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "H264", 1, 0, 0, true, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_ulpfec", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.loss_percent = 5;
+ foreman_cif.pipe.queue_delay_ms = 50;
+ RunTest(foreman_cif);
+}
+#endif // defined(WEBRTC_USE_H264)
+
+TEST_F(FullStackTest, ForemanCif500kbps) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_500kbps", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.queue_length_packets = 0;
+ foreman_cif.pipe.queue_delay_ms = 0;
+ foreman_cif.pipe.link_capacity_kbps = 500;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif500kbpsLimitedQueue) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_32pkts_queue", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.queue_length_packets = 32;
+ foreman_cif.pipe.queue_delay_ms = 0;
+ foreman_cif.pipe.link_capacity_kbps = 500;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif500kbps100ms) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_100ms", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.queue_length_packets = 0;
+ foreman_cif.pipe.queue_delay_ms = 100;
+ foreman_cif.pipe.link_capacity_kbps = 500;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif500kbps100msLimitedQueue) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_100ms_32pkts_queue", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.queue_length_packets = 32;
+ foreman_cif.pipe.queue_delay_ms = 100;
+ foreman_cif.pipe.link_capacity_kbps = 500;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif500kbps100msLimitedQueueRecvBwe) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = false;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_500kbps_100ms_32pkts_queue_recv_bwe",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.pipe.queue_length_packets = 32;
+ foreman_cif.pipe.queue_delay_ms = 100;
+ foreman_cif.pipe.link_capacity_kbps = 500;
+ RunTest(foreman_cif);
+}
+
+TEST_F(FullStackTest, ForemanCif1000kbps100msLimitedQueue) {
+ VideoQualityTest::Params foreman_cif;
+ foreman_cif.call.send_side_bwe = true;
+ foreman_cif.video = {true, 352, 288, 30, 30000, 2000000, 2000000, false,
+ "VP8", 1, 0, 0, false, false, "foreman_cif"};
+ foreman_cif.analyzer = {"foreman_cif_1000kbps_100ms_32pkts_queue", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.pipe.queue_length_packets = 32;
+ foreman_cif.pipe.queue_delay_ms = 100;
+ foreman_cif.pipe.link_capacity_kbps = 1000;
+ RunTest(foreman_cif);
+}
+
+// TODO(sprang): Remove this if we have the similar ModerateLimits below?
+TEST_F(FullStackTest, ConferenceMotionHd2000kbps100msLimitedQueue) {
+ VideoQualityTest::Params conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video = {
+ true, 1280, 720, 50, 30000,
+ 3000000, 3000000, false, "VP8", 1,
+ 0, 0, false, false, "ConferenceMotion_1280_720_50"};
+ conf_motion_hd.analyzer = {"conference_motion_hd_2000kbps_100ms_32pkts_queue",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.pipe.queue_length_packets = 32;
+ conf_motion_hd.pipe.queue_delay_ms = 100;
+ conf_motion_hd.pipe.link_capacity_kbps = 2000;
+ RunTest(conf_motion_hd);
+}
+
+TEST_F(FullStackTest, ConferenceMotionHd1TLModerateLimits) {
+ VideoQualityTest::Params conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video = {
+ true, 1280, 720, 50, 30000,
+ 3000000, 3000000, false, "VP8", 1,
+ -1, 0, false, false, "ConferenceMotion_1280_720_50"};
+ conf_motion_hd.analyzer = {"conference_motion_hd_1tl_moderate_limits", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.pipe.queue_length_packets = 50;
+ conf_motion_hd.pipe.loss_percent = 3;
+ conf_motion_hd.pipe.queue_delay_ms = 100;
+ conf_motion_hd.pipe.link_capacity_kbps = 2000;
+ RunTest(conf_motion_hd);
+}
+
+TEST_F(FullStackTest, ConferenceMotionHd2TLModerateLimits) {
+ VideoQualityTest::Params conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video = {
+ true, 1280, 720, 50, 30000,
+ 3000000, 3000000, false, "VP8", 2,
+ -1, 0, false, false, "ConferenceMotion_1280_720_50"};
+ conf_motion_hd.analyzer = {"conference_motion_hd_2tl_moderate_limits", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.pipe.queue_length_packets = 50;
+ conf_motion_hd.pipe.loss_percent = 3;
+ conf_motion_hd.pipe.queue_delay_ms = 100;
+ conf_motion_hd.pipe.link_capacity_kbps = 2000;
+ RunTest(conf_motion_hd);
+}
+
+TEST_F(FullStackTest, ConferenceMotionHd3TLModerateLimits) {
+ VideoQualityTest::Params conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video = {
+ true, 1280, 720, 50, 30000,
+ 3000000, 3000000, false, "VP8", 3,
+ -1, 0, false, false, "ConferenceMotion_1280_720_50"};
+ conf_motion_hd.analyzer = {"conference_motion_hd_3tl_moderate_limits", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.pipe.queue_length_packets = 50;
+ conf_motion_hd.pipe.loss_percent = 3;
+ conf_motion_hd.pipe.queue_delay_ms = 100;
+ conf_motion_hd.pipe.link_capacity_kbps = 2000;
+ RunTest(conf_motion_hd);
+}
+
+TEST_F(FullStackTest, ConferenceMotionHd4TLModerateLimits) {
+ VideoQualityTest::Params conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video = {
+ true, 1280, 720, 50, 30000,
+ 3000000, 3000000, false, "VP8", 4,
+ -1, 0, false, false, "ConferenceMotion_1280_720_50"};
+ conf_motion_hd.analyzer = {"conference_motion_hd_4tl_moderate_limits", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.pipe.queue_length_packets = 50;
+ conf_motion_hd.pipe.loss_percent = 3;
+ conf_motion_hd.pipe.queue_delay_ms = 100;
+ conf_motion_hd.pipe.link_capacity_kbps = 2000;
+ RunTest(conf_motion_hd);
+}
+
+TEST_F(FullStackTest, ConferenceMotionHd3TLModerateLimitsAltTLPattern) {
+ test::ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
+ VideoQualityTest::Params conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video = {
+ true, 1280, 720, 50, 30000,
+ 3000000, 3000000, false, "VP8", 3,
+ -1, 0, false, false, "ConferenceMotion_1280_720_50"};
+ conf_motion_hd.analyzer = {"conference_motion_hd_3tl_alt_moderate_limits",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ conf_motion_hd.pipe.queue_length_packets = 50;
+ conf_motion_hd.pipe.loss_percent = 3;
+ conf_motion_hd.pipe.queue_delay_ms = 100;
+ conf_motion_hd.pipe.link_capacity_kbps = 2000;
+ RunTest(conf_motion_hd);
+}
+
+#if !defined(RTC_DISABLE_VP9)
+TEST_F(FullStackTest, ConferenceMotionHd2000kbps100msLimitedQueueVP9) {
+ VideoQualityTest::Params conf_motion_hd;
+ conf_motion_hd.call.send_side_bwe = true;
+ conf_motion_hd.video = {
+ true, 1280, 720, 50, 30000,
+ 3000000, 3000000, false, "VP9", 1,
+ 0, 0, false, false, "ConferenceMotion_1280_720_50"};
+ conf_motion_hd.analyzer = {
+ "conference_motion_hd_2000kbps_100ms_32pkts_queue_vp9", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ conf_motion_hd.pipe.queue_length_packets = 32;
+ conf_motion_hd.pipe.queue_delay_ms = 100;
+ conf_motion_hd.pipe.link_capacity_kbps = 2000;
+ RunTest(conf_motion_hd);
+}
+#endif
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL) {
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_3TL_Simulcast) {
+ test::ScopedFieldTrials field_trial(kScreenshareSimulcastExperiment);
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.screenshare = {true, false, 10};
+ screenshare.video = {true, 1850, 1110, 5, 800000,
+ 2500000, 2500000, false, "VP8", 3,
+ 2, 400000, false, false, ""};
+ screenshare.analyzer = {"screenshare_slides_simulcast", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ VideoQualityTest::Params screenshare_params_high;
+ screenshare_params_high.video = {true, 1850, 1110, 5, 800000,
+ 2500000, 2500000, false, "VP8", 3,
+ 0, 400000, false, false, ""};
+ VideoQualityTest::Params screenshare_params_low;
+ screenshare_params_low.video = {true, 1850, 1110, 5, 50000,
+ 200000, 2000000, false, "VP8", 2,
+ 0, 400000, false, false, ""};
+
+ std::vector<VideoStream> streams = {
+ DefaultVideoStream(screenshare_params_low),
+ DefaultVideoStream(screenshare_params_high)};
+ screenshare.ss = {streams, 1, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_Scroll) {
+ VideoQualityTest::Params config;
+ config.call.send_side_bwe = true;
+ config.video = {true, 1850, 1110 / 2, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ config.screenshare = {true, false, 10, 2};
+ config.analyzer = {"screenshare_slides_scrolling", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ RunTest(config);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_LossyNet) {
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_lossy_net", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.pipe.loss_percent = 5;
+ screenshare.pipe.queue_delay_ms = 200;
+ screenshare.pipe.link_capacity_kbps = 500;
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_VeryLossyNet) {
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_very_lossy", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.pipe.loss_percent = 10;
+ screenshare.pipe.queue_delay_ms = 200;
+ screenshare.pipe.link_capacity_kbps = 500;
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_LossyNetRestrictedQueue) {
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_lossy_limited", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.pipe.loss_percent = 5;
+ screenshare.pipe.link_capacity_kbps = 200;
+ screenshare.pipe.queue_length_packets = 30;
+
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_ModeratelyRestricted) {
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_moderately_restricted", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.pipe.loss_percent = 1;
+ screenshare.pipe.link_capacity_kbps = 1200;
+ screenshare.pipe.queue_length_packets = 30;
+
+ RunTest(screenshare);
+}
+
+// TODO(sprang): Retire these tests once experiment is removed.
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_LossyNetRestrictedQueue_ALR) {
+ test::ScopedFieldTrials field_trial(kAlrProbingExperiment);
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_lossy_limited_ALR", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.pipe.loss_percent = 5;
+ screenshare.pipe.link_capacity_kbps = 200;
+ screenshare.pipe.queue_length_packets = 30;
+
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_ALR) {
+ test::ScopedFieldTrials field_trial(kAlrProbingExperiment);
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_ALR", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_ModeratelyRestricted_ALR) {
+ test::ScopedFieldTrials field_trial(kAlrProbingExperiment);
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP8", 2, 1, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_moderately_restricted_ALR", 0.0,
+ 0.0, kFullStackTestDurationSecs};
+ screenshare.pipe.loss_percent = 1;
+ screenshare.pipe.link_capacity_kbps = 1200;
+ screenshare.pipe.queue_length_packets = 30;
+
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, ScreenshareSlidesVP8_3TL_Simulcast_ALR) {
+ test::ScopedFieldTrials field_trial(kScreenshareSimulcastExperiment +
+ kAlrProbingExperiment);
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.screenshare = {true, false, 10};
+ screenshare.video = {true, 1850, 1110, 5, 800000,
+ 2500000, 2500000, false, "VP8", 3,
+ 2, 400000, false, false, ""};
+ screenshare.analyzer = {"screenshare_slides_simulcast_alr", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ VideoQualityTest::Params screenshare_params_high;
+ screenshare_params_high.video = {true, 1850, 1110, 5, 800000,
+ 2500000, 2500000, false, "VP8", 3,
+ 0, 400000, false, false, ""};
+ VideoQualityTest::Params screenshare_params_low;
+ screenshare_params_low.video = {true, 1850, 1110, 5, 50000,
+ 200000, 2000000, false, "VP8", 2,
+ 0, 400000, false, false, ""};
+
+ std::vector<VideoStream> streams = {
+ DefaultVideoStream(screenshare_params_low),
+ DefaultVideoStream(screenshare_params_high)};
+ screenshare.ss = {streams, 1, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(screenshare);
+}
+
+const VideoQualityTest::Params::Video kSvcVp9Video = {
+ true, 1280, 720, 30, 800000,
+ 2500000, 2500000, false, "VP9", 3,
+ 2, 400000, false, false, "ConferenceMotion_1280_720_50"};
+
+const VideoQualityTest::Params::Video kSimulcastVp8VideoHigh = {
+ true, 1280, 720, 30, 800000,
+ 2500000, 2500000, false, "VP8", 3,
+ 2, 400000, false, false, "ConferenceMotion_1280_720_50"};
+
+const VideoQualityTest::Params::Video kSimulcastVp8VideoMedium = {
+ true, 640, 360, 30, 150000,
+ 500000, 700000, false, "VP8", 3,
+ 2, 400000, false, false, "ConferenceMotion_1280_720_50"};
+
+const VideoQualityTest::Params::Video kSimulcastVp8VideoLow = {
+ true, 320, 180, 30, 30000,
+ 150000, 200000, false, "VP8", 3,
+ 2, 400000, false, false, "ConferenceMotion_1280_720_50"};
+
+#if !defined(RTC_DISABLE_VP9)
+TEST_F(FullStackTest, ScreenshareSlidesVP9_2SL) {
+ VideoQualityTest::Params screenshare;
+ screenshare.call.send_side_bwe = true;
+ screenshare.video = {true, 1850, 1110, 5, 50000, 200000, 2000000, false,
+ "VP9", 1, 0, 400000, false, false, ""};
+ screenshare.screenshare = {true, false, 10};
+ screenshare.analyzer = {"screenshare_slides_vp9_2sl", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ screenshare.ss = {std::vector<VideoStream>(), 0, 2, 1,
+ std::vector<SpatialLayer>(), false};
+ RunTest(screenshare);
+}
+
+TEST_F(FullStackTest, VP9SVC_3SL_High) {
+ VideoQualityTest::Params simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video = kSvcVp9Video;
+ simulcast.analyzer = {"vp9svc_3sl_high", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.ss = {std::vector<VideoStream>(), 0, 3, 2,
+ std::vector<SpatialLayer>(), false};
+ RunTest(simulcast);
+}
+
+TEST_F(FullStackTest, VP9SVC_3SL_Medium) {
+ VideoQualityTest::Params simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video = kSvcVp9Video;
+ simulcast.analyzer = {"vp9svc_3sl_medium", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.ss = {std::vector<VideoStream>(), 0, 3, 1,
+ std::vector<SpatialLayer>(), false};
+ RunTest(simulcast);
+}
+
+TEST_F(FullStackTest, VP9SVC_3SL_Low) {
+ VideoQualityTest::Params simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video = kSvcVp9Video;
+ simulcast.analyzer = {"vp9svc_3sl_low", 0.0, 0.0, kFullStackTestDurationSecs};
+ simulcast.ss = {std::vector<VideoStream>(), 0, 3, 0,
+ std::vector<SpatialLayer>(), false};
+ RunTest(simulcast);
+}
+#endif // !defined(RTC_DISABLE_VP9)
+
+// Android bots can't handle FullHD, so disable the test.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SimulcastFullHdOveruse DISABLED_SimulcastFullHdOveruse
+#else
+#define MAYBE_SimulcastFullHdOveruse SimulcastFullHdOveruse
+#endif
+
+TEST_F(FullStackTest, MAYBE_SimulcastFullHdOveruse) {
+ VideoQualityTest::Params simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video = {true, 1920, 1080, 30, 800000,
+ 2500000, 2500000, false, "VP8", 3,
+ 2, 400000, false, false, "Generator"};
+ simulcast.analyzer = {"simulcast_HD_high", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.pipe.loss_percent = 0;
+ simulcast.pipe.queue_delay_ms = 100;
+ std::vector<VideoStream> streams = {DefaultVideoStream(simulcast),
+ DefaultVideoStream(simulcast),
+ DefaultVideoStream(simulcast)};
+ simulcast.ss = {streams, 2, 1, 0, std::vector<SpatialLayer>(), true};
+ webrtc::test::ScopedFieldTrials override_trials(
+ "WebRTC-ForceSimulatedOveruseIntervalMs/1000-50000-300/");
+ RunTest(simulcast);
+}
+
+TEST_F(FullStackTest, SimulcastVP8_3SL_High) {
+ VideoQualityTest::Params simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video = kSimulcastVp8VideoHigh;
+ simulcast.analyzer = {"simulcast_vp8_3sl_high", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.pipe.loss_percent = 0;
+ simulcast.pipe.queue_delay_ms = 100;
+ VideoQualityTest::Params video_params_high;
+ video_params_high.video = kSimulcastVp8VideoHigh;
+ VideoQualityTest::Params video_params_medium;
+ video_params_medium.video = kSimulcastVp8VideoMedium;
+ VideoQualityTest::Params video_params_low;
+ video_params_low.video = kSimulcastVp8VideoLow;
+
+ std::vector<VideoStream> streams = {DefaultVideoStream(video_params_low),
+ DefaultVideoStream(video_params_medium),
+ DefaultVideoStream(video_params_high)};
+ simulcast.ss = {streams, 2, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(simulcast);
+}
+
+TEST_F(FullStackTest, SimulcastVP8_3SL_Medium) {
+ VideoQualityTest::Params simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video = kSimulcastVp8VideoHigh;
+ simulcast.analyzer = {"simulcast_vp8_3sl_medium", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.pipe.loss_percent = 0;
+ simulcast.pipe.queue_delay_ms = 100;
+ VideoQualityTest::Params video_params_high;
+ video_params_high.video = kSimulcastVp8VideoHigh;
+ VideoQualityTest::Params video_params_medium;
+ video_params_medium.video = kSimulcastVp8VideoMedium;
+ VideoQualityTest::Params video_params_low;
+ video_params_low.video = kSimulcastVp8VideoLow;
+
+ std::vector<VideoStream> streams = {DefaultVideoStream(video_params_low),
+ DefaultVideoStream(video_params_medium),
+ DefaultVideoStream(video_params_high)};
+ simulcast.ss = {streams, 1, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(simulcast);
+}
+
+TEST_F(FullStackTest, SimulcastVP8_3SL_Low) {
+ VideoQualityTest::Params simulcast;
+ simulcast.call.send_side_bwe = true;
+ simulcast.video = kSimulcastVp8VideoHigh;
+ simulcast.analyzer = {"simulcast_vp8_3sl_low", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ simulcast.pipe.loss_percent = 0;
+ simulcast.pipe.queue_delay_ms = 100;
+ VideoQualityTest::Params video_params_high;
+ video_params_high.video = kSimulcastVp8VideoHigh;
+ VideoQualityTest::Params video_params_medium;
+ video_params_medium.video = kSimulcastVp8VideoMedium;
+ VideoQualityTest::Params video_params_low;
+ video_params_low.video = kSimulcastVp8VideoLow;
+
+ std::vector<VideoStream> streams = {DefaultVideoStream(video_params_low),
+ DefaultVideoStream(video_params_medium),
+ DefaultVideoStream(video_params_high)};
+ simulcast.ss = {streams, 0, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(simulcast);
+}
+
+TEST_F(FullStackTest, LargeRoomVP8_5thumb) {
+ VideoQualityTest::Params large_room;
+ large_room.call.send_side_bwe = true;
+ large_room.video = kSimulcastVp8VideoHigh;
+ large_room.analyzer = {"largeroom_5thumb", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ large_room.pipe.loss_percent = 0;
+ large_room.pipe.queue_delay_ms = 100;
+ VideoQualityTest::Params video_params_high;
+ video_params_high.video = kSimulcastVp8VideoHigh;
+ VideoQualityTest::Params video_params_medium;
+ video_params_medium.video = kSimulcastVp8VideoMedium;
+ VideoQualityTest::Params video_params_low;
+ video_params_low.video = kSimulcastVp8VideoLow;
+
+ std::vector<VideoStream> streams = {DefaultVideoStream(video_params_low),
+ DefaultVideoStream(video_params_medium),
+ DefaultVideoStream(video_params_high)};
+ large_room.call.num_thumbnails = 5;
+ large_room.ss = {streams, 2, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(large_room);
+}
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+// Fails on mobile devices:
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=7301
+#define MAYBE_LargeRoomVP8_50thumb DISABLED_LargeRoomVP8_50thumb
+#define MAYBE_LargeRoomVP8_15thumb DISABLED_LargeRoomVP8_15thumb
+#else
+#define MAYBE_LargeRoomVP8_50thumb LargeRoomVP8_50thumb
+#define MAYBE_LargeRoomVP8_15thumb LargeRoomVP8_15thumb
+#endif
+
+TEST_F(FullStackTest, MAYBE_LargeRoomVP8_15thumb) {
+ VideoQualityTest::Params large_room;
+ large_room.call.send_side_bwe = true;
+ large_room.video = kSimulcastVp8VideoHigh;
+ large_room.analyzer = {"largeroom_15thumb", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ large_room.pipe.loss_percent = 0;
+ large_room.pipe.queue_delay_ms = 100;
+ VideoQualityTest::Params video_params_high;
+ video_params_high.video = kSimulcastVp8VideoHigh;
+ VideoQualityTest::Params video_params_medium;
+ video_params_medium.video = kSimulcastVp8VideoMedium;
+ VideoQualityTest::Params video_params_low;
+ video_params_low.video = kSimulcastVp8VideoLow;
+
+ std::vector<VideoStream> streams = {DefaultVideoStream(video_params_low),
+ DefaultVideoStream(video_params_medium),
+ DefaultVideoStream(video_params_high)};
+ large_room.call.num_thumbnails = 15;
+ large_room.ss = {streams, 2, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(large_room);
+}
+
+TEST_F(FullStackTest, MAYBE_LargeRoomVP8_50thumb) {
+ VideoQualityTest::Params large_room;
+ large_room.call.send_side_bwe = true;
+ large_room.video = kSimulcastVp8VideoHigh;
+ large_room.analyzer = {"largeroom_50thumb", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ large_room.pipe.loss_percent = 0;
+ large_room.pipe.queue_delay_ms = 100;
+ VideoQualityTest::Params video_params_high;
+ video_params_high.video = kSimulcastVp8VideoHigh;
+ VideoQualityTest::Params video_params_medium;
+ video_params_medium.video = kSimulcastVp8VideoMedium;
+ VideoQualityTest::Params video_params_low;
+ video_params_low.video = kSimulcastVp8VideoLow;
+
+ std::vector<VideoStream> streams = {DefaultVideoStream(video_params_low),
+ DefaultVideoStream(video_params_medium),
+ DefaultVideoStream(video_params_high)};
+ large_room.call.num_thumbnails = 50;
+ large_room.ss = {streams, 2, 1, 0, std::vector<SpatialLayer>(), false};
+ RunTest(large_room);
+}
+
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/full_stack_tests_plot.py b/third_party/libwebrtc/webrtc/video/full_stack_tests_plot.py
new file mode 100755
index 0000000000..3b324da67b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/full_stack_tests_plot.py
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+"""Generate graphs for data generated by loopback tests.
+
+Usage examples:
+ Show end to end time for a single full stack test.
+ ./full_stack_tests_plot.py -df end_to_end -o 600 --frames 1000 vp9_data.txt
+
+ Show simultaneously PSNR and encoded frame size for two different runs of
+ full stack test. Averaged over a cycle of 200 frames. Used e.g. for
+ screenshare slide test.
+ ./full_stack_tests_plot.py -c 200 -df psnr -drf encoded_frame_size \\
+ before.txt after.txt
+
+ Similar to the previous test, but multiple graphs.
+ ./full_stack_tests_plot.py -c 200 -df psnr vp8.txt vp9.txt --next \\
+ -c 200 -df sender_time vp8.txt vp9.txt --next \\
+ -c 200 -df end_to_end vp8.txt vp9.txt
+"""
+
+import argparse
+from collections import defaultdict
+import itertools
+import sys
+import matplotlib.pyplot as plt
+import numpy
+
+# Fields
+DROPPED = 0
+INPUT_TIME = 1 # ms (timestamp)
+SEND_TIME = 2 # ms (timestamp)
+RECV_TIME = 3 # ms (timestamp)
+RENDER_TIME = 4 # ms (timestamp)
+ENCODED_FRAME_SIZE = 5 # bytes
+PSNR = 6
+SSIM = 7
+ENCODE_TIME = 8 # ms (time interval)
+
+TOTAL_RAW_FIELDS = 9
+
+SENDER_TIME = TOTAL_RAW_FIELDS + 0
+RECEIVER_TIME = TOTAL_RAW_FIELDS + 1
+END_TO_END = TOTAL_RAW_FIELDS + 2
+RENDERED_DELTA = TOTAL_RAW_FIELDS + 3
+
+FIELD_MASK = 255
+
+# Options
+HIDE_DROPPED = 256
+RIGHT_Y_AXIS = 512
+
+# internal field id, field name, title
+_FIELDS = [
+ # Raw
+ (DROPPED, "dropped", "dropped"),
+ (INPUT_TIME, "input_time_ms", "input time"),
+ (SEND_TIME, "send_time_ms", "send time"),
+ (RECV_TIME, "recv_time_ms", "recv time"),
+ (ENCODED_FRAME_SIZE, "encoded_frame_size", "encoded frame size"),
+ (PSNR, "psnr", "PSNR"),
+ (SSIM, "ssim", "SSIM"),
+ (RENDER_TIME, "render_time_ms", "render time"),
+ (ENCODE_TIME, "encode_time_ms", "encode time"),
+ # Auto-generated
+ (SENDER_TIME, "sender_time", "sender time"),
+ (RECEIVER_TIME, "receiver_time", "receiver time"),
+ (END_TO_END, "end_to_end", "end to end"),
+ (RENDERED_DELTA, "rendered_delta", "rendered delta"),
+]
+
+NAME_TO_ID = {field[1]: field[0] for field in _FIELDS}
+ID_TO_TITLE = {field[0]: field[2] for field in _FIELDS}
+
+def FieldArgToId(arg):
+ if arg == "none":
+ return None
+ if arg in NAME_TO_ID:
+ return NAME_TO_ID[arg]
+ if arg + "_ms" in NAME_TO_ID:
+ return NAME_TO_ID[arg + "_ms"]
+ raise Exception("Unrecognized field name \"{}\"".format(arg))
+
+
+class PlotLine(object):
+ """Data for a single graph line."""
+
+ def __init__(self, label, values, flags):
+ self.label = label
+ self.values = values
+ self.flags = flags
+
+
+class Data(object):
+ """Object representing one full stack test."""
+
+ def __init__(self, filename):
+ self.title = ""
+ self.length = 0
+ self.samples = defaultdict(list)
+
+ self._ReadSamples(filename)
+
+ def _ReadSamples(self, filename):
+ """Reads graph data from the given file."""
+ f = open(filename)
+ it = iter(f)
+
+ self.title = it.next().strip()
+ self.length = int(it.next())
+ field_names = [name.strip() for name in it.next().split()]
+ field_ids = [NAME_TO_ID[name] for name in field_names]
+
+ for field_id in field_ids:
+ self.samples[field_id] = [0.0] * self.length
+
+ for sample_id in xrange(self.length):
+ for col, value in enumerate(it.next().split()):
+ self.samples[field_ids[col]][sample_id] = float(value)
+
+ self._SubtractFirstInputTime()
+ self._GenerateAdditionalData()
+
+ f.close()
+
+ def _SubtractFirstInputTime(self):
+ offset = self.samples[INPUT_TIME][0]
+ for field in [INPUT_TIME, SEND_TIME, RECV_TIME, RENDER_TIME]:
+ if field in self.samples:
+ self.samples[field] = [x - offset for x in self.samples[field]]
+
+ def _GenerateAdditionalData(self):
+ """Calculates sender time, receiver time etc. from the raw data."""
+ s = self.samples
+ last_render_time = 0
+ for field_id in [SENDER_TIME, RECEIVER_TIME, END_TO_END, RENDERED_DELTA]:
+ s[field_id] = [0] * self.length
+
+ for k in range(self.length):
+ s[SENDER_TIME][k] = s[SEND_TIME][k] - s[INPUT_TIME][k]
+
+ decoded_time = s[RENDER_TIME][k]
+ s[RECEIVER_TIME][k] = decoded_time - s[RECV_TIME][k]
+ s[END_TO_END][k] = decoded_time - s[INPUT_TIME][k]
+ if not s[DROPPED][k]:
+ if k > 0:
+ s[RENDERED_DELTA][k] = decoded_time - last_render_time
+ last_render_time = decoded_time
+
+ def _Hide(self, values):
+ """
+ Replaces values for dropped frames with None.
+ These values are then skipped by the Plot() method.
+ """
+
+ return [None if self.samples[DROPPED][k] else values[k]
+ for k in range(len(values))]
+
+ def AddSamples(self, config, target_lines_list):
+ """Creates graph lines from the current data set with given config."""
+ for field in config.fields:
+ # field is None means the user wants just to skip the color.
+ if field is None:
+ target_lines_list.append(None)
+ continue
+
+ field_id = field & FIELD_MASK
+ values = self.samples[field_id]
+
+ if field & HIDE_DROPPED:
+ values = self._Hide(values)
+
+ target_lines_list.append(PlotLine(
+ self.title + " " + ID_TO_TITLE[field_id],
+ values, field & ~FIELD_MASK))
+
+
+def AverageOverCycle(values, length):
+ """
+ Returns the list:
+ [
+ avg(values[0], values[length], ...),
+ avg(values[1], values[length + 1], ...),
+ ...
+ avg(values[length - 1], values[2 * length - 1], ...),
+ ]
+
+ Skips None values when calculating the average value.
+ """
+
+ total = [0.0] * length
+ count = [0] * length
+ for k in range(len(values)):
+ if values[k] is not None:
+ total[k % length] += values[k]
+ count[k % length] += 1
+
+ result = [0.0] * length
+ for k in range(length):
+ result[k] = total[k] / count[k] if count[k] else None
+ return result
+
+
+class PlotConfig(object):
+ """Object representing a single graph."""
+
+ def __init__(self, fields, data_list, cycle_length=None, frames=None,
+ offset=0, output_filename=None, title="Graph"):
+ self.fields = fields
+ self.data_list = data_list
+ self.cycle_length = cycle_length
+ self.frames = frames
+ self.offset = offset
+ self.output_filename = output_filename
+ self.title = title
+
+ def Plot(self, ax1):
+ lines = []
+ for data in self.data_list:
+ if not data:
+ # Add None lines to skip the colors.
+ lines.extend([None] * len(self.fields))
+ else:
+ data.AddSamples(self, lines)
+
+ def _SliceValues(values):
+ if self.offset:
+ values = values[self.offset:]
+ if self.frames:
+ values = values[:self.frames]
+ return values
+
+ length = None
+ for line in lines:
+ if line is None:
+ continue
+
+ line.values = _SliceValues(line.values)
+ if self.cycle_length:
+ line.values = AverageOverCycle(line.values, self.cycle_length)
+
+ if length is None:
+ length = len(line.values)
+ elif length != len(line.values):
+ raise Exception("All arrays should have the same length!")
+
+ ax1.set_xlabel("Frame", fontsize="large")
+ if any(line.flags & RIGHT_Y_AXIS for line in lines if line):
+ ax2 = ax1.twinx()
+ ax2.set_xlabel("Frame", fontsize="large")
+ else:
+ ax2 = None
+
+ # Have to implement color_cycle manually, due to two scales in a graph.
+ color_cycle = ["b", "r", "g", "c", "m", "y", "k"]
+ color_iter = itertools.cycle(color_cycle)
+
+ for line in lines:
+ if not line:
+ color_iter.next()
+ continue
+
+ if self.cycle_length:
+ x = numpy.array(range(self.cycle_length))
+ else:
+ x = numpy.array(range(self.offset, self.offset + len(line.values)))
+ y = numpy.array(line.values)
+ ax = ax2 if line.flags & RIGHT_Y_AXIS else ax1
+ ax.Plot(x, y, "o-", label=line.label, markersize=3.0, linewidth=1.0,
+ color=color_iter.next())
+
+ ax1.grid(True)
+ if ax2:
+ ax1.legend(loc="upper left", shadow=True, fontsize="large")
+ ax2.legend(loc="upper right", shadow=True, fontsize="large")
+ else:
+ ax1.legend(loc="best", shadow=True, fontsize="large")
+
+
+def LoadFiles(filenames):
+ result = []
+ for filename in filenames:
+ if filename in LoadFiles.cache:
+ result.append(LoadFiles.cache[filename])
+ else:
+ data = Data(filename)
+ LoadFiles.cache[filename] = data
+ result.append(data)
+ return result
+LoadFiles.cache = {}
+
+
+def GetParser():
+ class CustomAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ if "ordered_args" not in namespace:
+ namespace.ordered_args = []
+ namespace.ordered_args.append((self.dest, values))
+
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ parser.add_argument(
+ "-c", "--cycle_length", nargs=1, action=CustomAction,
+ type=int, help="Cycle length over which to average the values.")
+ parser.add_argument(
+ "-f", "--field", nargs=1, action=CustomAction,
+ help="Name of the field to show. Use 'none' to skip a color.")
+ parser.add_argument("-r", "--right", nargs=0, action=CustomAction,
+ help="Use right Y axis for given field.")
+ parser.add_argument("-d", "--drop", nargs=0, action=CustomAction,
+ help="Hide values for dropped frames.")
+ parser.add_argument("-o", "--offset", nargs=1, action=CustomAction, type=int,
+ help="Frame offset.")
+ parser.add_argument("-n", "--next", nargs=0, action=CustomAction,
+ help="Separator for multiple graphs.")
+ parser.add_argument(
+ "--frames", nargs=1, action=CustomAction, type=int,
+ help="Frame count to show or take into account while averaging.")
+ parser.add_argument("-t", "--title", nargs=1, action=CustomAction,
+ help="Title of the graph.")
+ parser.add_argument(
+ "-O", "--output_filename", nargs=1, action=CustomAction,
+ help="Use to save the graph into a file. "
+ "Otherwise, a window will be shown.")
+ parser.add_argument(
+ "files", nargs="+", action=CustomAction,
+ help="List of text-based files generated by loopback tests.")
+ return parser
+
+
+def _PlotConfigFromArgs(args, graph_num):
+ # Pylint complains about using kwargs, so have to do it this way.
+ cycle_length = None
+ frames = None
+ offset = 0
+ output_filename = None
+ title = "Graph"
+
+ fields = []
+ files = []
+ mask = 0
+ for key, values in args:
+ if key == "cycle_length":
+ cycle_length = values[0]
+ elif key == "frames":
+ frames = values[0]
+ elif key == "offset":
+ offset = values[0]
+ elif key == "output_filename":
+ output_filename = values[0]
+ elif key == "title":
+ title = values[0]
+ elif key == "drop":
+ mask |= HIDE_DROPPED
+ elif key == "right":
+ mask |= RIGHT_Y_AXIS
+ elif key == "field":
+ field_id = FieldArgToId(values[0])
+ fields.append(field_id | mask if field_id is not None else None)
+ mask = 0 # Reset mask after the field argument.
+ elif key == "files":
+ files.extend(values)
+
+ if not files:
+ raise Exception("Missing file argument(s) for graph #{}".format(graph_num))
+ if not fields:
+ raise Exception("Missing field argument(s) for graph #{}".format(graph_num))
+
+ return PlotConfig(fields, LoadFiles(files), cycle_length=cycle_length,
+ frames=frames, offset=offset, output_filename=output_filename,
+ title=title)
+
+
+def PlotConfigsFromArgs(args):
+ """Generates plot configs for given command line arguments."""
+ # The way it works:
+ # First we detect separators -n/--next and split arguments into groups, one
+ # for each plot. For each group, we partially parse it with
+ # argparse.ArgumentParser, modified to remember the order of arguments.
+ # Then we traverse the argument list and fill the PlotConfig.
+ args = itertools.groupby(args, lambda x: x in ["-n", "--next"])
+ args = list(list(group) for match, group in args if not match)
+
+ parser = GetParser()
+ plot_configs = []
+ for index, raw_args in enumerate(args):
+ graph_args = parser.parse_args(raw_args).ordered_args
+ plot_configs.append(_PlotConfigFromArgs(graph_args, index))
+ return plot_configs
+
+
+def ShowOrSavePlots(plot_configs):
+ for config in plot_configs:
+ fig = plt.figure(figsize=(14.0, 10.0))
+ ax = fig.add_subPlot(1, 1, 1)
+
+ plt.title(config.title)
+ config.Plot(ax)
+ if config.output_filename:
+ print "Saving to", config.output_filename
+ fig.savefig(config.output_filename)
+ plt.close(fig)
+
+ plt.show()
+
+if __name__ == "__main__":
+ ShowOrSavePlots(PlotConfigsFromArgs(sys.argv[1:]))
diff --git a/third_party/libwebrtc/webrtc/video/overuse_frame_detector.cc b/third_party/libwebrtc/webrtc/video/overuse_frame_detector.cc
new file mode 100644
index 0000000000..7b64c10597
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/overuse_frame_detector.cc
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/overuse_frame_detector.h"
+
+#include <assert.h>
+#include <math.h>
+
+#include <algorithm>
+#include <list>
+#include <map>
+#include <string>
+#include <utility>
+
+#include "api/video/video_frame.h"
+#include "common_video/include/frame_callback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/field_trial.h"
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include <mach/mach.h>
+#endif // defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+
+namespace webrtc {
+
+namespace {
+const int64_t kCheckForOveruseIntervalMs = 5000;
+const int64_t kTimeToFirstCheckForOveruseMs = 100;
+
+// Delay between consecutive rampups. (Used for quick recovery.)
+const int kQuickRampUpDelayMs = 10 * 1000;
+// Delay between rampup attempts. Initially uses standard, scales up to max.
+const int kStandardRampUpDelayMs = 40 * 1000;
+const int kMaxRampUpDelayMs = 240 * 1000;
+// Expontential back-off factor, to prevent annoying up-down behaviour.
+const double kRampUpBackoffFactor = 2.0;
+
+// Max number of overuses detected before always applying the rampup delay.
+const int kMaxOverusesBeforeApplyRampupDelay = 4;
+
+// The maximum exponent to use in VCMExpFilter.
+const float kMaxExp = 7.0f;
+// Default value used before first reconfiguration.
+const int kDefaultFrameRate = 30;
+// Default sample diff, default frame rate.
+const float kDefaultSampleDiffMs = 1000.0f / kDefaultFrameRate;
+// A factor applied to the sample diff on OnTargetFramerateUpdated to determine
+// a max limit for the sample diff. For instance, with a framerate of 30fps,
+// the sample diff is capped to (1000 / 30) * 1.35 = 45ms. This prevents
+// triggering too soon if there are individual very large outliers.
+const float kMaxSampleDiffMarginFactor = 1.35f;
+// Minimum framerate allowed for usage calculation. This prevents crazy long
+// encode times from being accepted if the frame rate happens to be low.
+const int kMinFramerate = 7;
+const int kMaxFramerate = 30;
+
+const auto kScaleReasonCpu = AdaptationObserverInterface::AdaptReason::kCpu;
+} // namespace
+
+CpuOveruseOptions::CpuOveruseOptions()
+ : high_encode_usage_threshold_percent(85),
+ frame_timeout_interval_ms(1500),
+ min_frame_samples(120),
+ min_process_count(3),
+ high_threshold_consecutive_count(2) {
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ // This is proof-of-concept code for letting the physical core count affect
+ // the interval into which we attempt to scale. For now, the code is Mac OS
+ // specific, since that's the platform were we saw most problems.
+ // TODO(torbjorng): Enhance SystemInfo to return this metric.
+
+ mach_port_t mach_host = mach_host_self();
+ host_basic_info hbi = {};
+ mach_msg_type_number_t info_count = HOST_BASIC_INFO_COUNT;
+ kern_return_t kr =
+ host_info(mach_host, HOST_BASIC_INFO, reinterpret_cast<host_info_t>(&hbi),
+ &info_count);
+ mach_port_deallocate(mach_task_self(), mach_host);
+
+ int n_physical_cores;
+ if (kr != KERN_SUCCESS) {
+ // If we couldn't get # of physical CPUs, don't panic. Assume we have 1.
+ n_physical_cores = 1;
+ RTC_LOG(LS_ERROR)
+ << "Failed to determine number of physical cores, assuming 1";
+ } else {
+ n_physical_cores = hbi.physical_cpu;
+ RTC_LOG(LS_INFO) << "Number of physical cores:" << n_physical_cores;
+ }
+
+ // Change init list default for few core systems. The assumption here is that
+ // encoding, which we measure here, takes about 1/4 of the processing of a
+ // two-way call. This is roughly true for x86 using both vp8 and vp9 without
+ // hardware encoding. Since we don't affect the incoming stream here, we only
+ // control about 1/2 of the total processing needs, but this is not taken into
+ // account.
+ if (n_physical_cores == 1)
+ high_encode_usage_threshold_percent = 20; // Roughly 1/4 of 100%.
+ else if (n_physical_cores == 2)
+ high_encode_usage_threshold_percent = 40; // Roughly 1/4 of 200%.
+#endif // defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+
+ // Note that we make the interval 2x+epsilon wide, since libyuv scaling steps
+ // are close to that (when squared). This wide interval makes sure that
+ // scaling up or down does not jump all the way across the interval.
+ low_encode_usage_threshold_percent =
+ (high_encode_usage_threshold_percent - 1) / 2;
+}
+
+// Class for calculating the processing usage on the send-side (the average
+// processing time of a frame divided by the average time difference between
+// captured frames).
+class OveruseFrameDetector::SendProcessingUsage {
+ public:
+ explicit SendProcessingUsage(const CpuOveruseOptions& options)
+ : kWeightFactorFrameDiff(0.998f),
+ kWeightFactorProcessing(0.995f),
+ kInitialSampleDiffMs(40.0f),
+ count_(0),
+ options_(options),
+ max_sample_diff_ms_(kDefaultSampleDiffMs * kMaxSampleDiffMarginFactor),
+ filtered_processing_ms_(new rtc::ExpFilter(kWeightFactorProcessing)),
+ filtered_frame_diff_ms_(new rtc::ExpFilter(kWeightFactorFrameDiff)) {
+ Reset();
+ }
+ virtual ~SendProcessingUsage() {}
+
+ void Reset() {
+ count_ = 0;
+ max_sample_diff_ms_ = kDefaultSampleDiffMs * kMaxSampleDiffMarginFactor;
+ filtered_frame_diff_ms_->Reset(kWeightFactorFrameDiff);
+ filtered_frame_diff_ms_->Apply(1.0f, kInitialSampleDiffMs);
+ filtered_processing_ms_->Reset(kWeightFactorProcessing);
+ filtered_processing_ms_->Apply(1.0f, InitialProcessingMs());
+ }
+
+ void SetMaxSampleDiffMs(float diff_ms) { max_sample_diff_ms_ = diff_ms; }
+
+ void AddCaptureSample(float sample_ms) {
+ float exp = sample_ms / kDefaultSampleDiffMs;
+ exp = std::min(exp, kMaxExp);
+ filtered_frame_diff_ms_->Apply(exp, sample_ms);
+ }
+
+ void AddSample(float processing_ms, int64_t diff_last_sample_ms) {
+ ++count_;
+ float exp = diff_last_sample_ms / kDefaultSampleDiffMs;
+ exp = std::min(exp, kMaxExp);
+ filtered_processing_ms_->Apply(exp, processing_ms);
+ }
+
+ virtual int Value() {
+ if (count_ < static_cast<uint32_t>(options_.min_frame_samples)) {
+ return static_cast<int>(InitialUsageInPercent() + 0.5f);
+ }
+ float frame_diff_ms = std::max(filtered_frame_diff_ms_->filtered(), 1.0f);
+ frame_diff_ms = std::min(frame_diff_ms, max_sample_diff_ms_);
+ float encode_usage_percent =
+ 100.0f * filtered_processing_ms_->filtered() / frame_diff_ms;
+ return static_cast<int>(encode_usage_percent + 0.5);
+ }
+
+ private:
+ float InitialUsageInPercent() const {
+ // Start in between the underuse and overuse threshold.
+ return (options_.low_encode_usage_threshold_percent +
+ options_.high_encode_usage_threshold_percent) / 2.0f;
+ }
+
+ float InitialProcessingMs() const {
+ return InitialUsageInPercent() * kInitialSampleDiffMs / 100;
+ }
+
+ const float kWeightFactorFrameDiff;
+ const float kWeightFactorProcessing;
+ const float kInitialSampleDiffMs;
+ uint64_t count_;
+ const CpuOveruseOptions options_;
+ float max_sample_diff_ms_;
+ std::unique_ptr<rtc::ExpFilter> filtered_processing_ms_;
+ std::unique_ptr<rtc::ExpFilter> filtered_frame_diff_ms_;
+};
+
+// Class used for manual testing of overuse, enabled via field trial flag.
+class OveruseFrameDetector::OverdoseInjector
+ : public OveruseFrameDetector::SendProcessingUsage {
+ public:
+ OverdoseInjector(const CpuOveruseOptions& options,
+ int64_t normal_period_ms,
+ int64_t overuse_period_ms,
+ int64_t underuse_period_ms)
+ : OveruseFrameDetector::SendProcessingUsage(options),
+ normal_period_ms_(normal_period_ms),
+ overuse_period_ms_(overuse_period_ms),
+ underuse_period_ms_(underuse_period_ms),
+ state_(State::kNormal),
+ last_toggling_ms_(-1) {
+ RTC_DCHECK_GT(overuse_period_ms, 0);
+ RTC_DCHECK_GT(normal_period_ms, 0);
+ RTC_LOG(LS_INFO) << "Simulating overuse with intervals " << normal_period_ms
+ << "ms normal mode, " << overuse_period_ms
+ << "ms overuse mode.";
+ }
+
+ ~OverdoseInjector() override {}
+
+ int Value() override {
+ int64_t now_ms = rtc::TimeMillis();
+ if (last_toggling_ms_ == -1) {
+ last_toggling_ms_ = now_ms;
+ } else {
+ switch (state_) {
+ case State::kNormal:
+ if (now_ms > last_toggling_ms_ + normal_period_ms_) {
+ state_ = State::kOveruse;
+ last_toggling_ms_ = now_ms;
+ RTC_LOG(LS_INFO) << "Simulating CPU overuse.";
+ }
+ break;
+ case State::kOveruse:
+ if (now_ms > last_toggling_ms_ + overuse_period_ms_) {
+ state_ = State::kUnderuse;
+ last_toggling_ms_ = now_ms;
+ RTC_LOG(LS_INFO) << "Simulating CPU underuse.";
+ }
+ break;
+ case State::kUnderuse:
+ if (now_ms > last_toggling_ms_ + underuse_period_ms_) {
+ state_ = State::kNormal;
+ last_toggling_ms_ = now_ms;
+ RTC_LOG(LS_INFO) << "Actual CPU overuse measurements in effect.";
+ }
+ break;
+ }
+ }
+
+ rtc::Optional<int> overried_usage_value;
+ switch (state_) {
+ case State::kNormal:
+ break;
+ case State::kOveruse:
+ overried_usage_value.emplace(250);
+ break;
+ case State::kUnderuse:
+ overried_usage_value.emplace(5);
+ break;
+ }
+
+ return overried_usage_value.value_or(SendProcessingUsage::Value());
+ }
+
+ private:
+ const int64_t normal_period_ms_;
+ const int64_t overuse_period_ms_;
+ const int64_t underuse_period_ms_;
+ enum class State { kNormal, kOveruse, kUnderuse } state_;
+ int64_t last_toggling_ms_;
+};
+
+std::unique_ptr<OveruseFrameDetector::SendProcessingUsage>
+OveruseFrameDetector::CreateSendProcessingUsage(
+ const CpuOveruseOptions& options) {
+ std::unique_ptr<SendProcessingUsage> instance;
+ std::string toggling_interval =
+ field_trial::FindFullName("WebRTC-ForceSimulatedOveruseIntervalMs");
+ if (!toggling_interval.empty()) {
+ int normal_period_ms = 0;
+ int overuse_period_ms = 0;
+ int underuse_period_ms = 0;
+ if (sscanf(toggling_interval.c_str(), "%d-%d-%d", &normal_period_ms,
+ &overuse_period_ms, &underuse_period_ms) == 3) {
+ if (normal_period_ms > 0 && overuse_period_ms > 0 &&
+ underuse_period_ms > 0) {
+ instance.reset(new OverdoseInjector(
+ options, normal_period_ms, overuse_period_ms, underuse_period_ms));
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Invalid (non-positive) normal/overuse/underuse periods: "
+ << normal_period_ms << " / " << overuse_period_ms << " / "
+ << underuse_period_ms;
+ }
+ } else {
+ RTC_LOG(LS_WARNING) << "Malformed toggling interval: "
+ << toggling_interval;
+ }
+ }
+
+ if (!instance) {
+ // No valid overuse simulation parameters set, use normal usage class.
+ instance.reset(new SendProcessingUsage(options));
+ }
+
+ return instance;
+}
+
+class OveruseFrameDetector::CheckOveruseTask : public rtc::QueuedTask {
+ public:
+ explicit CheckOveruseTask(OveruseFrameDetector* overuse_detector)
+ : overuse_detector_(overuse_detector) {
+ rtc::TaskQueue::Current()->PostDelayedTask(
+ std::unique_ptr<rtc::QueuedTask>(this), kTimeToFirstCheckForOveruseMs);
+ }
+
+ void Stop() {
+ RTC_CHECK(task_checker_.CalledSequentially());
+ overuse_detector_ = nullptr;
+ }
+
+ private:
+ bool Run() override {
+ RTC_CHECK(task_checker_.CalledSequentially());
+ if (!overuse_detector_)
+ return true; // This will make the task queue delete this task.
+ overuse_detector_->CheckForOveruse();
+
+ rtc::TaskQueue::Current()->PostDelayedTask(
+ std::unique_ptr<rtc::QueuedTask>(this), kCheckForOveruseIntervalMs);
+ // Return false to prevent this task from being deleted. Ownership has been
+ // transferred to the task queue when PostDelayedTask was called.
+ return false;
+ }
+ rtc::SequencedTaskChecker task_checker_;
+ OveruseFrameDetector* overuse_detector_;
+};
+
+OveruseFrameDetector::OveruseFrameDetector(
+ const CpuOveruseOptions& options,
+ AdaptationObserverInterface* observer,
+ EncodedFrameObserver* encoder_timing,
+ CpuOveruseMetricsObserver* metrics_observer)
+ : check_overuse_task_(nullptr),
+ options_(options),
+ observer_(observer),
+ encoder_timing_(encoder_timing),
+ metrics_observer_(metrics_observer),
+ num_process_times_(0),
+ // TODO(nisse): Use rtc::Optional
+ last_capture_time_us_(-1),
+ last_processed_capture_time_us_(-1),
+ num_pixels_(0),
+ max_framerate_(kDefaultFrameRate),
+ last_overuse_time_ms_(-1),
+ checks_above_threshold_(0),
+ num_overuse_detections_(0),
+ last_rampup_time_ms_(-1),
+ in_quick_rampup_(false),
+ current_rampup_delay_ms_(kStandardRampUpDelayMs),
+ usage_(CreateSendProcessingUsage(options)) {
+ task_checker_.Detach();
+}
+
+OveruseFrameDetector::~OveruseFrameDetector() {
+ RTC_DCHECK(!check_overuse_task_) << "StopCheckForOverUse must be called.";
+}
+
+void OveruseFrameDetector::StartCheckForOveruse() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ RTC_DCHECK(!check_overuse_task_);
+ check_overuse_task_ = new CheckOveruseTask(this);
+}
+void OveruseFrameDetector::StopCheckForOveruse() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ check_overuse_task_->Stop();
+ check_overuse_task_ = nullptr;
+}
+
+void OveruseFrameDetector::EncodedFrameTimeMeasured(int encode_duration_ms) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ if (!metrics_)
+ metrics_ = rtc::Optional<CpuOveruseMetrics>(CpuOveruseMetrics());
+ metrics_->encode_usage_percent = usage_->Value();
+
+ metrics_observer_->OnEncodedFrameTimeMeasured(encode_duration_ms, *metrics_);
+}
+
+bool OveruseFrameDetector::FrameSizeChanged(int num_pixels) const {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ if (num_pixels != num_pixels_) {
+ return true;
+ }
+ return false;
+}
+
+bool OveruseFrameDetector::FrameTimeoutDetected(int64_t now_us) const {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ if (last_capture_time_us_ == -1)
+ return false;
+ return (now_us - last_capture_time_us_) >
+ options_.frame_timeout_interval_ms * rtc::kNumMicrosecsPerMillisec;
+}
+
+void OveruseFrameDetector::ResetAll(int num_pixels) {
+ // Reset state, as a result resolution being changed. Do not however change
+ // the current frame rate back to the default.
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ num_pixels_ = num_pixels;
+ usage_->Reset();
+ frame_timing_.clear();
+ last_capture_time_us_ = -1;
+ last_processed_capture_time_us_ = -1;
+ num_process_times_ = 0;
+ metrics_ = rtc::Optional<CpuOveruseMetrics>();
+ OnTargetFramerateUpdated(max_framerate_);
+}
+
+void OveruseFrameDetector::OnTargetFramerateUpdated(int framerate_fps) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ RTC_DCHECK_GE(framerate_fps, 0);
+ max_framerate_ = std::min(kMaxFramerate, framerate_fps);
+ usage_->SetMaxSampleDiffMs((1000 / std::max(kMinFramerate, max_framerate_)) *
+ kMaxSampleDiffMarginFactor);
+}
+
+void OveruseFrameDetector::FrameCaptured(const VideoFrame& frame,
+ int64_t time_when_first_seen_us) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+
+ if (FrameSizeChanged(frame.width() * frame.height()) ||
+ FrameTimeoutDetected(time_when_first_seen_us)) {
+ ResetAll(frame.width() * frame.height());
+ }
+
+ if (last_capture_time_us_ != -1)
+ usage_->AddCaptureSample(
+ 1e-3 * (time_when_first_seen_us - last_capture_time_us_));
+
+ last_capture_time_us_ = time_when_first_seen_us;
+
+ frame_timing_.push_back(FrameTiming(frame.timestamp_us(), frame.timestamp(),
+ time_when_first_seen_us));
+}
+
+void OveruseFrameDetector::FrameSent(uint32_t timestamp,
+ int64_t time_sent_in_us) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ // Delay before reporting actual encoding time, used to have the ability to
+ // detect total encoding time when encoding more than one layer. Encoding is
+ // here assumed to finish within a second (or that we get enough long-time
+ // samples before one second to trigger an overuse even when this is not the
+ // case).
+ static const int64_t kEncodingTimeMeasureWindowMs = 1000;
+ for (auto& it : frame_timing_) {
+ if (it.timestamp == timestamp) {
+ it.last_send_us = time_sent_in_us;
+ break;
+ }
+ }
+ // TODO(pbos): Handle the case/log errors when not finding the corresponding
+ // frame (either very slow encoding or incorrect wrong timestamps returned
+ // from the encoder).
+ // This is currently the case for all frames on ChromeOS, so logging them
+ // would be spammy, and triggering overuse would be wrong.
+ // https://crbug.com/350106
+ while (!frame_timing_.empty()) {
+ FrameTiming timing = frame_timing_.front();
+ if (time_sent_in_us - timing.capture_us <
+ kEncodingTimeMeasureWindowMs * rtc::kNumMicrosecsPerMillisec) {
+ break;
+ }
+ if (timing.last_send_us != -1) {
+ int encode_duration_us =
+ static_cast<int>(timing.last_send_us - timing.capture_us);
+ if (encoder_timing_) {
+ // TODO(nisse): Update encoder_timing_ to also use us units.
+ encoder_timing_->OnEncodeTiming(timing.capture_time_us /
+ rtc::kNumMicrosecsPerMillisec,
+ encode_duration_us /
+ rtc::kNumMicrosecsPerMillisec);
+ }
+ if (last_processed_capture_time_us_ != -1) {
+ int64_t diff_us = timing.capture_us - last_processed_capture_time_us_;
+ usage_->AddSample(1e-3 * encode_duration_us, 1e-3 * diff_us);
+ }
+ last_processed_capture_time_us_ = timing.capture_us;
+ EncodedFrameTimeMeasured(encode_duration_us /
+ rtc::kNumMicrosecsPerMillisec);
+ }
+ frame_timing_.pop_front();
+ }
+}
+
+void OveruseFrameDetector::CheckForOveruse() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ ++num_process_times_;
+ if (num_process_times_ <= options_.min_process_count || !metrics_)
+ return;
+
+ int64_t now_ms = rtc::TimeMillis();
+
+ if (IsOverusing(*metrics_)) {
+ // If the last thing we did was going up, and now have to back down, we need
+ // to check if this peak was short. If so we should back off to avoid going
+ // back and forth between this load, the system doesn't seem to handle it.
+ bool check_for_backoff = last_rampup_time_ms_ > last_overuse_time_ms_;
+ if (check_for_backoff) {
+ if (now_ms - last_rampup_time_ms_ < kStandardRampUpDelayMs ||
+ num_overuse_detections_ > kMaxOverusesBeforeApplyRampupDelay) {
+ // Going up was not ok for very long, back off.
+ current_rampup_delay_ms_ *= kRampUpBackoffFactor;
+ if (current_rampup_delay_ms_ > kMaxRampUpDelayMs)
+ current_rampup_delay_ms_ = kMaxRampUpDelayMs;
+ } else {
+ // Not currently backing off, reset rampup delay.
+ current_rampup_delay_ms_ = kStandardRampUpDelayMs;
+ }
+ }
+
+ last_overuse_time_ms_ = now_ms;
+ in_quick_rampup_ = false;
+ checks_above_threshold_ = 0;
+ ++num_overuse_detections_;
+
+ if (observer_)
+ observer_->AdaptDown(kScaleReasonCpu);
+ } else if (IsUnderusing(*metrics_, now_ms)) {
+ last_rampup_time_ms_ = now_ms;
+ in_quick_rampup_ = true;
+
+ if (observer_)
+ observer_->AdaptUp(kScaleReasonCpu);
+ }
+
+ int rampup_delay =
+ in_quick_rampup_ ? kQuickRampUpDelayMs : current_rampup_delay_ms_;
+
+ RTC_LOG(LS_VERBOSE) << " Frame stats: "
+ << " encode usage " << metrics_->encode_usage_percent
+ << " overuse detections " << num_overuse_detections_
+ << " rampup delay " << rampup_delay;
+}
+
+bool OveruseFrameDetector::IsOverusing(const CpuOveruseMetrics& metrics) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+
+ if (metrics.encode_usage_percent >=
+ options_.high_encode_usage_threshold_percent) {
+ ++checks_above_threshold_;
+ } else {
+ checks_above_threshold_ = 0;
+ }
+ return checks_above_threshold_ >= options_.high_threshold_consecutive_count;
+}
+
+bool OveruseFrameDetector::IsUnderusing(const CpuOveruseMetrics& metrics,
+ int64_t time_now) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ int delay = in_quick_rampup_ ? kQuickRampUpDelayMs : current_rampup_delay_ms_;
+ if (time_now < last_rampup_time_ms_ + delay)
+ return false;
+
+ return metrics.encode_usage_percent <
+ options_.low_encode_usage_threshold_percent;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/overuse_frame_detector.h b/third_party/libwebrtc/webrtc/video/overuse_frame_detector.h
new file mode 100644
index 0000000000..3cc9262a94
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/overuse_frame_detector.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_OVERUSE_FRAME_DETECTOR_H_
+#define VIDEO_OVERUSE_FRAME_DETECTOR_H_
+
+#include <list>
+#include <memory>
+
+#include "api/optional.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/sequenced_task_checker.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class EncodedFrameObserver;
+class VideoFrame;
+
+struct CpuOveruseOptions {
+ CpuOveruseOptions();
+
+ int low_encode_usage_threshold_percent; // Threshold for triggering underuse.
+ int high_encode_usage_threshold_percent; // Threshold for triggering overuse.
+ // General settings.
+ int frame_timeout_interval_ms; // The maximum allowed interval between two
+ // frames before resetting estimations.
+ int min_frame_samples; // The minimum number of frames required.
+ int min_process_count; // The number of initial process times required before
+ // triggering an overuse/underuse.
+ int high_threshold_consecutive_count; // The number of consecutive checks
+ // above the high threshold before
+ // triggering an overuse.
+};
+
+struct CpuOveruseMetrics {
+ CpuOveruseMetrics() : encode_usage_percent(-1) {}
+
+ int encode_usage_percent; // Average encode time divided by the average time
+ // difference between incoming captured frames.
+};
+
+class CpuOveruseMetricsObserver {
+ public:
+ virtual ~CpuOveruseMetricsObserver() {}
+ virtual void OnEncodedFrameTimeMeasured(int encode_duration_ms,
+ const CpuOveruseMetrics& metrics) = 0;
+};
+
+// Use to detect system overuse based on the send-side processing time of
+// incoming frames. All methods must be called on a single task queue but it can
+// be created and destroyed on an arbitrary thread.
+// OveruseFrameDetector::StartCheckForOveruse must be called to periodically
+// check for overuse.
+class OveruseFrameDetector {
+ public:
+ OveruseFrameDetector(const CpuOveruseOptions& options,
+ AdaptationObserverInterface* overuse_observer,
+ EncodedFrameObserver* encoder_timing_,
+ CpuOveruseMetricsObserver* metrics_observer);
+ virtual ~OveruseFrameDetector();
+
+ // Start to periodically check for overuse.
+ void StartCheckForOveruse();
+
+ // StopCheckForOveruse must be called before destruction if
+ // StartCheckForOveruse has been called.
+ void StopCheckForOveruse();
+
+ // Defines the current maximum framerate targeted by the capturer. This is
+ // used to make sure the encode usage percent doesn't drop unduly if the
+ // capturer has quiet periods (for instance caused by screen capturers with
+ // variable capture rate depending on content updates), otherwise we might
+ // experience adaptation toggling.
+ virtual void OnTargetFramerateUpdated(int framerate_fps);
+
+ // Called for each captured frame.
+ void FrameCaptured(const VideoFrame& frame, int64_t time_when_first_seen_us);
+
+ // Called for each sent frame.
+ void FrameSent(uint32_t timestamp, int64_t time_sent_in_us);
+
+ protected:
+ void CheckForOveruse(); // Protected for test purposes.
+
+ private:
+ class OverdoseInjector;
+ class SendProcessingUsage;
+ class CheckOveruseTask;
+ struct FrameTiming {
+ FrameTiming(int64_t capture_time_us, uint32_t timestamp, int64_t now)
+ : capture_time_us(capture_time_us),
+ timestamp(timestamp),
+ capture_us(now),
+ last_send_us(-1) {}
+ int64_t capture_time_us;
+ uint32_t timestamp;
+ int64_t capture_us;
+ int64_t last_send_us;
+ };
+
+ void EncodedFrameTimeMeasured(int encode_duration_ms);
+ bool IsOverusing(const CpuOveruseMetrics& metrics);
+ bool IsUnderusing(const CpuOveruseMetrics& metrics, int64_t time_now);
+
+ bool FrameTimeoutDetected(int64_t now) const;
+ bool FrameSizeChanged(int num_pixels) const;
+
+ void ResetAll(int num_pixels);
+
+ static std::unique_ptr<SendProcessingUsage> CreateSendProcessingUsage(
+ const CpuOveruseOptions& options);
+
+ rtc::SequencedTaskChecker task_checker_;
+ // Owned by the task queue from where StartCheckForOveruse is called.
+ CheckOveruseTask* check_overuse_task_;
+
+ const CpuOveruseOptions options_;
+
+ // Observer getting overuse reports.
+ AdaptationObserverInterface* const observer_;
+ EncodedFrameObserver* const encoder_timing_;
+
+ // Stats metrics.
+ CpuOveruseMetricsObserver* const metrics_observer_;
+ rtc::Optional<CpuOveruseMetrics> metrics_ RTC_GUARDED_BY(task_checker_);
+
+ int64_t num_process_times_ RTC_GUARDED_BY(task_checker_);
+
+ int64_t last_capture_time_us_ RTC_GUARDED_BY(task_checker_);
+ int64_t last_processed_capture_time_us_ RTC_GUARDED_BY(task_checker_);
+
+ // Number of pixels of last captured frame.
+ int num_pixels_ RTC_GUARDED_BY(task_checker_);
+ int max_framerate_ RTC_GUARDED_BY(task_checker_);
+ int64_t last_overuse_time_ms_ RTC_GUARDED_BY(task_checker_);
+ int checks_above_threshold_ RTC_GUARDED_BY(task_checker_);
+ int num_overuse_detections_ RTC_GUARDED_BY(task_checker_);
+ int64_t last_rampup_time_ms_ RTC_GUARDED_BY(task_checker_);
+ bool in_quick_rampup_ RTC_GUARDED_BY(task_checker_);
+ int current_rampup_delay_ms_ RTC_GUARDED_BY(task_checker_);
+
+ // TODO(asapersson): Can these be regular members (avoid separate heap
+ // allocs)?
+ const std::unique_ptr<SendProcessingUsage> usage_
+ RTC_GUARDED_BY(task_checker_);
+ std::list<FrameTiming> frame_timing_ RTC_GUARDED_BY(task_checker_);
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(OveruseFrameDetector);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_OVERUSE_FRAME_DETECTOR_H_
diff --git a/third_party/libwebrtc/webrtc/video/overuse_frame_detector_unittest.cc b/third_party/libwebrtc/webrtc/video/overuse_frame_detector_unittest.cc
new file mode 100644
index 0000000000..0f3dd8643e
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/overuse_frame_detector_unittest.cc
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/video/i420_buffer.h"
+#include "common_video/include/video_frame.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "rtc_base/event.h"
+#include "rtc_base/fakeclock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/overuse_frame_detector.h"
+
+namespace webrtc {
+
+using ::testing::InvokeWithoutArgs;
+
+namespace {
+ const int kWidth = 640;
+ const int kHeight = 480;
+ const int kFrameIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+ const int kProcessTimeUs = 5 * rtc::kNumMicrosecsPerMillisec;
+} // namespace
+
+class MockCpuOveruseObserver : public AdaptationObserverInterface {
+ public:
+ MockCpuOveruseObserver() {}
+ virtual ~MockCpuOveruseObserver() {}
+
+ MOCK_METHOD1(AdaptUp, void(AdaptReason));
+ MOCK_METHOD1(AdaptDown, void(AdaptReason));
+};
+
+class CpuOveruseObserverImpl : public AdaptationObserverInterface {
+ public:
+ CpuOveruseObserverImpl() :
+ overuse_(0),
+ normaluse_(0) {}
+ virtual ~CpuOveruseObserverImpl() {}
+
+ void AdaptDown(AdaptReason) { ++overuse_; }
+ void AdaptUp(AdaptReason) { ++normaluse_; }
+
+ int overuse_;
+ int normaluse_;
+};
+
+class OveruseFrameDetectorUnderTest : public OveruseFrameDetector {
+ public:
+ OveruseFrameDetectorUnderTest(const CpuOveruseOptions& options,
+ AdaptationObserverInterface* overuse_observer,
+ EncodedFrameObserver* encoder_timing,
+ CpuOveruseMetricsObserver* metrics_observer)
+ : OveruseFrameDetector(options,
+ overuse_observer,
+ encoder_timing,
+ metrics_observer) {}
+ ~OveruseFrameDetectorUnderTest() {}
+
+ using OveruseFrameDetector::CheckForOveruse;
+};
+
+class OveruseFrameDetectorTest : public ::testing::Test,
+ public CpuOveruseMetricsObserver {
+ protected:
+ void SetUp() override {
+ observer_.reset(new MockCpuOveruseObserver());
+ options_.min_process_count = 0;
+ ReinitializeOveruseDetector();
+ }
+
+ void ReinitializeOveruseDetector() {
+ overuse_detector_.reset(new OveruseFrameDetectorUnderTest(
+ options_, observer_.get(), nullptr, this));
+ }
+
+ void OnEncodedFrameTimeMeasured(int encode_time_ms,
+ const CpuOveruseMetrics& metrics) override {
+ metrics_ = metrics;
+ }
+
+ int InitialUsage() {
+ return ((options_.low_encode_usage_threshold_percent +
+ options_.high_encode_usage_threshold_percent) / 2.0f) + 0.5;
+ }
+
+ void InsertAndSendFramesWithInterval(int num_frames,
+ int interval_us,
+ int width,
+ int height,
+ int delay_us) {
+ VideoFrame frame(I420Buffer::Create(width, height),
+ webrtc::kVideoRotation_0, 0);
+ uint32_t timestamp = 0;
+ while (num_frames-- > 0) {
+ frame.set_timestamp(timestamp);
+ overuse_detector_->FrameCaptured(frame, rtc::TimeMicros());
+ clock_.AdvanceTimeMicros(delay_us);
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros());
+ clock_.AdvanceTimeMicros(interval_us - delay_us);
+ timestamp += interval_us * 90 / 1000;
+ }
+ }
+
+ void ForceUpdate(int width, int height) {
+ // Insert one frame, wait a second and then put in another to force update
+ // the usage. From the tests where these are used, adding another sample
+ // doesn't affect the expected outcome (this is mainly to check initial
+ // values and whether the overuse detector has been reset or not).
+ InsertAndSendFramesWithInterval(2, rtc::kNumMicrosecsPerSec,
+ width, height, kFrameIntervalUs);
+ }
+ void TriggerOveruse(int num_times) {
+ const int kDelayUs = 32 * rtc::kNumMicrosecsPerMillisec;
+ for (int i = 0; i < num_times; ++i) {
+ InsertAndSendFramesWithInterval(
+ 1000, kFrameIntervalUs, kWidth, kHeight, kDelayUs);
+ overuse_detector_->CheckForOveruse();
+ }
+ }
+
+ void TriggerUnderuse() {
+ const int kDelayUs1 = 5000;
+ const int kDelayUs2 = 6000;
+ InsertAndSendFramesWithInterval(
+ 1300, kFrameIntervalUs, kWidth, kHeight, kDelayUs1);
+ InsertAndSendFramesWithInterval(
+ 1, kFrameIntervalUs, kWidth, kHeight, kDelayUs2);
+ overuse_detector_->CheckForOveruse();
+ }
+
+ int UsagePercent() { return metrics_.encode_usage_percent; }
+
+ int64_t OveruseProcessingTimeLimitForFramerate(int fps) const {
+ int64_t frame_interval = rtc::kNumMicrosecsPerSec / fps;
+ int64_t max_processing_time_us =
+ (frame_interval * options_.high_encode_usage_threshold_percent) / 100;
+ return max_processing_time_us;
+ }
+
+ int64_t UnderuseProcessingTimeLimitForFramerate(int fps) const {
+ int64_t frame_interval = rtc::kNumMicrosecsPerSec / fps;
+ int64_t max_processing_time_us =
+ (frame_interval * options_.low_encode_usage_threshold_percent) / 100;
+ return max_processing_time_us;
+ }
+
+ CpuOveruseOptions options_;
+ rtc::ScopedFakeClock clock_;
+ std::unique_ptr<MockCpuOveruseObserver> observer_;
+ std::unique_ptr<OveruseFrameDetectorUnderTest> overuse_detector_;
+ CpuOveruseMetrics metrics_;
+
+ static const auto reason_ = AdaptationObserverInterface::AdaptReason::kCpu;
+};
+
+
+// UsagePercent() > high_encode_usage_threshold_percent => overuse.
+// UsagePercent() < low_encode_usage_threshold_percent => underuse.
+TEST_F(OveruseFrameDetectorTest, TriggerOveruse) {
+ // usage > high => overuse
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+}
+
+TEST_F(OveruseFrameDetectorTest, OveruseAndRecover) {
+ // usage > high => overuse
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ // usage < low => underuse
+ EXPECT_CALL(*(observer_.get()), AdaptUp(reason_)).Times(testing::AtLeast(1));
+ TriggerUnderuse();
+}
+
+TEST_F(OveruseFrameDetectorTest, OveruseAndRecoverWithNoObserver) {
+ overuse_detector_.reset(new OveruseFrameDetectorUnderTest(
+ options_, nullptr, nullptr, this));
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(0);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ EXPECT_CALL(*(observer_.get()), AdaptUp(reason_)).Times(0);
+ TriggerUnderuse();
+}
+
+TEST_F(OveruseFrameDetectorTest, DoubleOveruseAndRecover) {
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(2);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ EXPECT_CALL(*(observer_.get()), AdaptUp(reason_)).Times(testing::AtLeast(1));
+ TriggerUnderuse();
+}
+
+TEST_F(OveruseFrameDetectorTest, TriggerUnderuseWithMinProcessCount) {
+ const int kProcessIntervalUs = 5 * rtc::kNumMicrosecsPerSec;
+ options_.min_process_count = 1;
+ CpuOveruseObserverImpl overuse_observer;
+ overuse_detector_.reset(new OveruseFrameDetectorUnderTest(
+ options_, &overuse_observer, nullptr, this));
+ InsertAndSendFramesWithInterval(
+ 1200, kFrameIntervalUs, kWidth, kHeight, kProcessTimeUs);
+ overuse_detector_->CheckForOveruse();
+ EXPECT_EQ(0, overuse_observer.normaluse_);
+ clock_.AdvanceTimeMicros(kProcessIntervalUs);
+ overuse_detector_->CheckForOveruse();
+ EXPECT_EQ(1, overuse_observer.normaluse_);
+}
+
+TEST_F(OveruseFrameDetectorTest, ConstantOveruseGivesNoNormalUsage) {
+ EXPECT_CALL(*(observer_.get()), AdaptUp(reason_)).Times(0);
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(64);
+ for (size_t i = 0; i < 64; ++i) {
+ TriggerOveruse(options_.high_threshold_consecutive_count);
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, ConsecutiveCountTriggersOveruse) {
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ options_.high_threshold_consecutive_count = 2;
+ ReinitializeOveruseDetector();
+ TriggerOveruse(2);
+}
+
+TEST_F(OveruseFrameDetectorTest, IncorrectConsecutiveCountTriggersNoOveruse) {
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(0);
+ options_.high_threshold_consecutive_count = 2;
+ ReinitializeOveruseDetector();
+ TriggerOveruse(1);
+}
+
+TEST_F(OveruseFrameDetectorTest, ProcessingUsage) {
+ InsertAndSendFramesWithInterval(
+ 1000, kFrameIntervalUs, kWidth, kHeight, kProcessTimeUs);
+ EXPECT_EQ(kProcessTimeUs * 100 / kFrameIntervalUs, UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, ResetAfterResolutionChange) {
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(
+ 1000, kFrameIntervalUs, kWidth, kHeight, kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ // Verify reset (with new width/height).
+ ForceUpdate(kWidth, kHeight + 1);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, ResetAfterFrameTimeout) {
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(
+ 1000, kFrameIntervalUs, kWidth, kHeight, kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ InsertAndSendFramesWithInterval(
+ 2, options_.frame_timeout_interval_ms *
+ rtc::kNumMicrosecsPerMillisec, kWidth, kHeight, kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+ // Verify reset.
+ InsertAndSendFramesWithInterval(
+ 2, (options_.frame_timeout_interval_ms + 1) *
+ rtc::kNumMicrosecsPerMillisec, kWidth, kHeight, kProcessTimeUs);
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, MinFrameSamplesBeforeUpdating) {
+ options_.min_frame_samples = 40;
+ ReinitializeOveruseDetector();
+ InsertAndSendFramesWithInterval(
+ 40, kFrameIntervalUs, kWidth, kHeight, kProcessTimeUs);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+ // Pass time far enough to digest all previous samples.
+ clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerSec);
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kProcessTimeUs);
+ // The last sample has not been processed here.
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+
+ // Pass time far enough to digest all previous samples, 41 in total.
+ clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerSec);
+ InsertAndSendFramesWithInterval(
+ 1, kFrameIntervalUs, kWidth, kHeight, kProcessTimeUs);
+ EXPECT_NE(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, InitialProcessingUsage) {
+ ForceUpdate(kWidth, kHeight);
+ EXPECT_EQ(InitialUsage(), UsagePercent());
+}
+
+TEST_F(OveruseFrameDetectorTest, MeasuresMultipleConcurrentSamples) {
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_))
+ .Times(testing::AtLeast(1));
+ static const int kIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+ static const size_t kNumFramesEncodingDelay = 3;
+ VideoFrame frame(I420Buffer::Create(kWidth, kHeight),
+ webrtc::kVideoRotation_0, 0);
+ for (size_t i = 0; i < 1000; ++i) {
+ // Unique timestamps.
+ frame.set_timestamp(static_cast<uint32_t>(i));
+ overuse_detector_->FrameCaptured(frame, rtc::TimeMicros());
+ clock_.AdvanceTimeMicros(kIntervalUs);
+ if (i > kNumFramesEncodingDelay) {
+ overuse_detector_->FrameSent(
+ static_cast<uint32_t>(i - kNumFramesEncodingDelay),
+ rtc::TimeMicros());
+ }
+ overuse_detector_->CheckForOveruse();
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, UpdatesExistingSamples) {
+ // >85% encoding time should trigger overuse.
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_))
+ .Times(testing::AtLeast(1));
+ static const int kIntervalUs = 33 * rtc::kNumMicrosecsPerMillisec;
+ static const int kDelayUs = 30 * rtc::kNumMicrosecsPerMillisec;
+ VideoFrame frame(I420Buffer::Create(kWidth, kHeight),
+ webrtc::kVideoRotation_0, 0);
+ uint32_t timestamp = 0;
+ for (size_t i = 0; i < 1000; ++i) {
+ frame.set_timestamp(timestamp);
+ overuse_detector_->FrameCaptured(frame, rtc::TimeMicros());
+ // Encode and send first parts almost instantly.
+ clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerMillisec);
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros());
+ // Encode heavier part, resulting in >85% usage total.
+ clock_.AdvanceTimeMicros(kDelayUs - rtc::kNumMicrosecsPerMillisec);
+ overuse_detector_->FrameSent(timestamp, rtc::TimeMicros());
+ clock_.AdvanceTimeMicros(kIntervalUs - kDelayUs);
+ timestamp += kIntervalUs * 90 / 1000;
+ overuse_detector_->CheckForOveruse();
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, RunOnTqNormalUsage) {
+ rtc::TaskQueue queue("OveruseFrameDetectorTestQueue");
+
+ rtc::Event event(false, false);
+ queue.PostTask([this, &event] {
+ overuse_detector_->StartCheckForOveruse();
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
+
+ // Expect NormalUsage(). When called, stop the |overuse_detector_| and then
+ // set |event| to end the test.
+ EXPECT_CALL(*(observer_.get()), AdaptUp(reason_))
+ .WillOnce(InvokeWithoutArgs([this, &event] {
+ overuse_detector_->StopCheckForOveruse();
+ event.Set();
+ }));
+
+ queue.PostTask([this] {
+ const int kDelayUs1 = 5 * rtc::kNumMicrosecsPerMillisec;
+ const int kDelayUs2 = 6 * rtc::kNumMicrosecsPerMillisec;
+ InsertAndSendFramesWithInterval(1300, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs1);
+ InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
+ kDelayUs2);
+ });
+
+ EXPECT_TRUE(event.Wait(10000));
+}
+
+TEST_F(OveruseFrameDetectorTest, MaxIntervalScalesWithFramerate) {
+ const int kCapturerMaxFrameRate = 30;
+ const int kEncodeMaxFrameRate = 20; // Maximum fps the encoder can sustain.
+
+ // Trigger overuse.
+ int64_t frame_interval_us = rtc::kNumMicrosecsPerSec / kCapturerMaxFrameRate;
+ // Processing time just below over use limit given kEncodeMaxFrameRate.
+ int64_t processing_time_us =
+ (98 * OveruseProcessingTimeLimitForFramerate(kEncodeMaxFrameRate)) / 100;
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+
+ // Simulate frame rate reduction and normal usage.
+ frame_interval_us = rtc::kNumMicrosecsPerSec / kEncodeMaxFrameRate;
+ overuse_detector_->OnTargetFramerateUpdated(kEncodeMaxFrameRate);
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(0);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+
+ // Reduce processing time to trigger underuse.
+ processing_time_us =
+ (98 * UnderuseProcessingTimeLimitForFramerate(kEncodeMaxFrameRate)) / 100;
+ EXPECT_CALL(*(observer_.get()), AdaptUp(reason_)).Times(1);
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse();
+}
+
+TEST_F(OveruseFrameDetectorTest, RespectsMinFramerate) {
+ const int kMinFrameRate = 7; // Minimum fps allowed by current detector impl.
+ overuse_detector_->OnTargetFramerateUpdated(kMinFrameRate);
+
+ // Normal usage just at the limit.
+ int64_t frame_interval_us = rtc::kNumMicrosecsPerSec / kMinFrameRate;
+ // Processing time just below over use limit given kEncodeMaxFrameRate.
+ int64_t processing_time_us =
+ (98 * OveruseProcessingTimeLimitForFramerate(kMinFrameRate)) / 100;
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(0);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+
+ // Over the limit to overuse.
+ processing_time_us =
+ (102 * OveruseProcessingTimeLimitForFramerate(kMinFrameRate)) / 100;
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+
+ // Reduce input frame rate. Should still trigger overuse.
+ overuse_detector_->OnTargetFramerateUpdated(kMinFrameRate - 1);
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, frame_interval_us, kWidth, kHeight,
+ processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+}
+
+TEST_F(OveruseFrameDetectorTest, LimitsMaxFrameInterval) {
+ const int kMaxFrameRate = 20;
+ overuse_detector_->OnTargetFramerateUpdated(kMaxFrameRate);
+ int64_t frame_interval_us = rtc::kNumMicrosecsPerSec / kMaxFrameRate;
+ // Maximum frame interval allowed is 35% above ideal.
+ int64_t max_frame_interval_us = (135 * frame_interval_us) / 100;
+ // Maximum processing time, without triggering overuse, allowed with the above
+ // frame interval.
+ int64_t max_processing_time_us =
+ (max_frame_interval_us * options_.high_encode_usage_threshold_percent) /
+ 100;
+
+ // Processing time just below overuse limit given kMaxFrameRate.
+ int64_t processing_time_us = (98 * max_processing_time_us) / 100;
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(0);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, max_frame_interval_us, kWidth,
+ kHeight, processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+
+ // Go above limit, trigger overuse.
+ processing_time_us = (102 * max_processing_time_us) / 100;
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, max_frame_interval_us, kWidth,
+ kHeight, processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+
+ // Increase frame interval, should still trigger overuse.
+ max_frame_interval_us *= 2;
+ EXPECT_CALL(*(observer_.get()), AdaptDown(reason_)).Times(1);
+ for (int i = 0; i < options_.high_threshold_consecutive_count; ++i) {
+ InsertAndSendFramesWithInterval(1200, max_frame_interval_us, kWidth,
+ kHeight, processing_time_us);
+ overuse_detector_->CheckForOveruse();
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/payload_router.cc b/third_party/libwebrtc/webrtc/video/payload_router.cc
new file mode 100644
index 0000000000..f43a773e5d
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/payload_router.cc
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/payload_router.h"
+
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+// Map information from info into rtp.
+void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
+ RTC_DCHECK(info);
+ switch (info->codecType) {
+ case kVideoCodecVP8: {
+ rtp->codec = kRtpVideoVp8;
+ rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
+ rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
+ rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
+ rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
+ rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
+ rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
+ rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
+ rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
+ return;
+ }
+ case kVideoCodecVP9: {
+ rtp->codec = kRtpVideoVp9;
+ rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
+ rtp->codecHeader.VP9.inter_pic_predicted =
+ info->codecSpecific.VP9.inter_pic_predicted;
+ rtp->codecHeader.VP9.flexible_mode =
+ info->codecSpecific.VP9.flexible_mode;
+ rtp->codecHeader.VP9.ss_data_available =
+ info->codecSpecific.VP9.ss_data_available;
+ rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
+ rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
+ rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
+ rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
+ rtp->codecHeader.VP9.temporal_up_switch =
+ info->codecSpecific.VP9.temporal_up_switch;
+ rtp->codecHeader.VP9.inter_layer_predicted =
+ info->codecSpecific.VP9.inter_layer_predicted;
+ rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
+ rtp->codecHeader.VP9.num_spatial_layers =
+ info->codecSpecific.VP9.num_spatial_layers;
+
+ if (info->codecSpecific.VP9.ss_data_available) {
+ rtp->codecHeader.VP9.spatial_layer_resolution_present =
+ info->codecSpecific.VP9.spatial_layer_resolution_present;
+ if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
+ ++i) {
+ rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
+ rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
+ }
+ }
+ rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
+ }
+
+ rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
+ for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
+ rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
+ return;
+ }
+ case kVideoCodecH264:
+ rtp->codec = kRtpVideoH264;
+ rtp->codecHeader.H264.packetization_mode =
+ info->codecSpecific.H264.packetization_mode;
+ return;
+ case kVideoCodecGeneric:
+ rtp->codec = kRtpVideoGeneric;
+ rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
+ return;
+ default:
+ return;
+ }
+}
+
+} // namespace
+
+// Currently only used if forced fallback for VP8 is enabled.
+// Consider adding tl0idx and set for VP8 and VP9.
+// Make picture id not codec specific.
+class PayloadRouter::RtpPayloadParams final {
+ public:
+ RtpPayloadParams(const uint32_t ssrc, const RtpPayloadState* state)
+ : ssrc_(ssrc) {
+ Random random(rtc::TimeMicros());
+ state_.picture_id =
+ state ? state->picture_id : (random.Rand<int16_t>() & 0x7FFF);
+ }
+ ~RtpPayloadParams() {}
+
+ void Set(RTPVideoHeader* rtp_video_header) {
+ if (rtp_video_header->codec == kRtpVideoVp8 &&
+ rtp_video_header->codecHeader.VP8.pictureId != kNoPictureId) {
+ rtp_video_header->codecHeader.VP8.pictureId = state_.picture_id;
+ state_.picture_id = (state_.picture_id + 1) & 0x7FFF;
+ }
+ }
+
+ uint32_t ssrc() const { return ssrc_; }
+
+ RtpPayloadState state() const { return state_; }
+
+ private:
+ const uint32_t ssrc_;
+ RtpPayloadState state_;
+};
+
+PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
+ const std::vector<uint32_t>& ssrcs,
+ int payload_type,
+ const std::map<uint32_t, RtpPayloadState>& states)
+ : active_(false),
+ rtp_modules_(rtp_modules),
+ payload_type_(payload_type),
+ forced_fallback_enabled_((webrtc::field_trial::IsEnabled(
+ "WebRTC-VP8-Forced-Fallback-Encoder-v2"))) {
+ RTC_DCHECK_EQ(ssrcs.size(), rtp_modules.size());
+ // SSRCs are assumed to be sorted in the same order as |rtp_modules|.
+ for (uint32_t ssrc : ssrcs) {
+ // Restore state if it previously existed.
+ const RtpPayloadState* state = nullptr;
+ auto it = states.find(ssrc);
+ if (it != states.end()) {
+ state = &it->second;
+ }
+ params_.push_back(RtpPayloadParams(ssrc, state));
+ }
+}
+
+PayloadRouter::~PayloadRouter() {}
+
+void PayloadRouter::SetActive(bool active) {
+ rtc::CritScope lock(&crit_);
+ if (active_ == active)
+ return;
+ active_ = active;
+
+ for (auto& module : rtp_modules_) {
+ module->SetSendingStatus(active_);
+ module->SetSendingMediaStatus(active_);
+ }
+}
+
+bool PayloadRouter::IsActive() {
+ rtc::CritScope lock(&crit_);
+ return active_ && !rtp_modules_.empty();
+}
+
+std::map<uint32_t, RtpPayloadState> PayloadRouter::GetRtpPayloadStates() const {
+ rtc::CritScope lock(&crit_);
+ std::map<uint32_t, RtpPayloadState> payload_states;
+ for (const auto& param : params_) {
+ payload_states[param.ssrc()] = param.state();
+ }
+ return payload_states;
+}
+
+EncodedImageCallback::Result PayloadRouter::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
+ rtc::CritScope lock(&crit_);
+ RTC_DCHECK(!rtp_modules_.empty());
+ if (!active_)
+ return Result(Result::ERROR_SEND_FAILED);
+
+ RTPVideoHeader rtp_video_header;
+ memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
+ if (codec_specific_info)
+ CopyCodecSpecific(codec_specific_info, &rtp_video_header);
+ rtp_video_header.rotation = encoded_image.rotation_;
+ rtp_video_header.content_type = encoded_image.content_type_;
+ if (encoded_image.timing_.flags != TimingFrameFlags::kInvalid &&
+ encoded_image.timing_.flags != TimingFrameFlags::kNotTriggered) {
+ rtp_video_header.video_timing.encode_start_delta_ms =
+ VideoSendTiming::GetDeltaCappedMs(
+ encoded_image.capture_time_ms_,
+ encoded_image.timing_.encode_start_ms);
+ rtp_video_header.video_timing.encode_finish_delta_ms =
+ VideoSendTiming::GetDeltaCappedMs(
+ encoded_image.capture_time_ms_,
+ encoded_image.timing_.encode_finish_ms);
+ rtp_video_header.video_timing.packetization_finish_delta_ms = 0;
+ rtp_video_header.video_timing.pacer_exit_delta_ms = 0;
+ rtp_video_header.video_timing.network_timestamp_delta_ms = 0;
+ rtp_video_header.video_timing.network2_timestamp_delta_ms = 0;
+ rtp_video_header.video_timing.flags = encoded_image.timing_.flags;
+ } else {
+ rtp_video_header.video_timing.flags = TimingFrameFlags::kInvalid;
+ }
+ rtp_video_header.playout_delay = encoded_image.playout_delay_;
+
+ int stream_index = rtp_video_header.simulcastIdx;
+ RTC_DCHECK_LT(stream_index, rtp_modules_.size());
+ if (forced_fallback_enabled_) {
+ // Sets picture id. The SW and HW encoder have separate picture id
+ // sequences, set picture id to not cause sequence discontinuties at encoder
+ // changes.
+ params_[stream_index].Set(&rtp_video_header);
+ }
+ uint32_t frame_id;
+ bool send_result = rtp_modules_[stream_index]->SendOutgoingData(
+ encoded_image._frameType, payload_type_, encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, encoded_image._buffer,
+ encoded_image._length, fragmentation, &rtp_video_header, &frame_id);
+ if (!send_result)
+ return Result(Result::ERROR_SEND_FAILED);
+
+ return Result(Result::OK, frame_id);
+}
+
+void PayloadRouter::OnBitrateAllocationUpdated(
+ const BitrateAllocation& bitrate) {
+ rtc::CritScope lock(&crit_);
+ if (IsActive()) {
+ if (rtp_modules_.size() == 1) {
+ // If spatial scalability is enabled, it is covered by a single stream.
+ rtp_modules_[0]->SetVideoBitrateAllocation(bitrate);
+ } else {
+ // Simulcast is in use, split the BitrateAllocation into one struct per
+ // rtp stream, moving over the temporal layer allocation.
+ for (size_t si = 0; si < rtp_modules_.size(); ++si) {
+ // Don't send empty TargetBitrate messages on streams not being relayed.
+ if (!bitrate.IsSpatialLayerUsed(si))
+ break;
+
+ BitrateAllocation layer_bitrate;
+ for (int tl = 0; tl < kMaxTemporalStreams; ++tl) {
+ if (bitrate.HasBitrate(si, tl))
+ layer_bitrate.SetBitrate(0, tl, bitrate.GetBitrate(si, tl));
+ }
+ rtp_modules_[si]->SetVideoBitrateAllocation(layer_bitrate);
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/payload_router.h b/third_party/libwebrtc/webrtc/video/payload_router.h
new file mode 100644
index 0000000000..13b6cae7ce
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/payload_router.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_PAYLOAD_ROUTER_H_
+#define VIDEO_PAYLOAD_ROUTER_H_
+
+#include <map>
+#include <vector>
+
+#include "api/video_codecs/video_encoder.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class RTPFragmentationHeader;
+class RtpRtcp;
+struct RTPVideoHeader;
+
+// PayloadRouter routes outgoing data to the correct sending RTP module, based
+// on the simulcast layer in RTPVideoHeader.
+class PayloadRouter : public EncodedImageCallback {
+ public:
+ // Rtp modules are assumed to be sorted in simulcast index order.
+ PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
+ const std::vector<uint32_t>& ssrcs,
+ int payload_type,
+ const std::map<uint32_t, RtpPayloadState>& states);
+ ~PayloadRouter();
+
+ // PayloadRouter will only route packets if being active, all packets will be
+ // dropped otherwise.
+ void SetActive(bool active);
+ bool IsActive();
+
+ std::map<uint32_t, RtpPayloadState> GetRtpPayloadStates() const;
+
+ // Implements EncodedImageCallback.
+ // Returns 0 if the packet was routed / sent, -1 otherwise.
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ void OnBitrateAllocationUpdated(const BitrateAllocation& bitrate);
+
+ private:
+ class RtpPayloadParams;
+
+ void UpdateModuleSendingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ rtc::CriticalSection crit_;
+ bool active_ RTC_GUARDED_BY(crit_);
+
+ // Rtp modules are assumed to be sorted in simulcast index order. Not owned.
+ const std::vector<RtpRtcp*> rtp_modules_;
+ const int payload_type_;
+
+ const bool forced_fallback_enabled_;
+ std::vector<RtpPayloadParams> params_ RTC_GUARDED_BY(crit_);
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(PayloadRouter);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_PAYLOAD_ROUTER_H_
diff --git a/third_party/libwebrtc/webrtc/video/payload_router_unittest.cc b/third_party/libwebrtc/webrtc/video/payload_router_unittest.cc
new file mode 100644
index 0000000000..d670124ddf
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/payload_router_unittest.cc
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+
+#include "call/video_config.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/payload_router.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::Unused;
+
+namespace webrtc {
+namespace {
+const int8_t kPayloadType = 96;
+const uint32_t kSsrc1 = 12345;
+const uint32_t kSsrc2 = 23456;
+const int16_t kPictureId = 123;
+const int16_t kTl0PicIdx = 20;
+const uint8_t kTemporalIdx = 1;
+const int16_t kInitialPictureId1 = 222;
+const int16_t kInitialPictureId2 = 44;
+} // namespace
+
+TEST(PayloadRouterTest, SendOnOneModule) {
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules(1, &rtp);
+
+ uint8_t payload = 'a';
+ EncodedImage encoded_image;
+ encoded_image._timeStamp = 1;
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = kVideoFrameKey;
+ encoded_image._buffer = &payload;
+ encoded_image._length = 1;
+
+ PayloadRouter payload_router(modules, {kSsrc1}, kPayloadType, {});
+
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, kPayloadType,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _, _))
+ .Times(0);
+ EXPECT_NE(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, nullptr, nullptr).error);
+
+ payload_router.SetActive(true);
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, kPayloadType,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _, _))
+ .Times(1)
+ .WillOnce(Return(true));
+ EXPECT_EQ(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, nullptr, nullptr).error);
+
+ payload_router.SetActive(false);
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, kPayloadType,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _, _))
+ .Times(0);
+ EXPECT_NE(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, nullptr, nullptr).error);
+
+ payload_router.SetActive(true);
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, kPayloadType,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _, _))
+ .Times(1)
+ .WillOnce(Return(true));
+ EXPECT_EQ(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, nullptr, nullptr).error);
+}
+
+TEST(PayloadRouterTest, SendSimulcast) {
+ NiceMock<MockRtpRtcp> rtp_1;
+ NiceMock<MockRtpRtcp> rtp_2;
+ std::vector<RtpRtcp*> modules = {&rtp_1, &rtp_2};
+
+ uint8_t payload = 'a';
+ EncodedImage encoded_image;
+ encoded_image._timeStamp = 1;
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = kVideoFrameKey;
+ encoded_image._buffer = &payload;
+ encoded_image._length = 1;
+
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, {});
+
+ CodecSpecificInfo codec_info_1;
+ memset(&codec_info_1, 0, sizeof(CodecSpecificInfo));
+ codec_info_1.codecType = kVideoCodecVP8;
+ codec_info_1.codecSpecific.VP8.simulcastIdx = 0;
+
+ payload_router.SetActive(true);
+ EXPECT_CALL(rtp_1, SendOutgoingData(encoded_image._frameType, kPayloadType,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _, _))
+ .Times(1)
+ .WillOnce(Return(true));
+ EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _)).Times(0);
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info_1, nullptr)
+ .error);
+
+ CodecSpecificInfo codec_info_2;
+ memset(&codec_info_2, 0, sizeof(CodecSpecificInfo));
+ codec_info_2.codecType = kVideoCodecVP8;
+ codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
+
+ EXPECT_CALL(rtp_2, SendOutgoingData(encoded_image._frameType, kPayloadType,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _, _))
+ .Times(1)
+ .WillOnce(Return(true));
+ EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+ .Times(0);
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info_2, nullptr)
+ .error);
+
+ // Inactive.
+ payload_router.SetActive(false);
+ EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+ .Times(0);
+ EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+ .Times(0);
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info_1, nullptr)
+ .error);
+ EXPECT_NE(EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info_2, nullptr)
+ .error);
+}
+
+TEST(PayloadRouterTest, SimulcastTargetBitrate) {
+ NiceMock<MockRtpRtcp> rtp_1;
+ NiceMock<MockRtpRtcp> rtp_2;
+ std::vector<RtpRtcp*> modules = {&rtp_1, &rtp_2};
+
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ BitrateAllocation bitrate;
+ bitrate.SetBitrate(0, 0, 10000);
+ bitrate.SetBitrate(0, 1, 20000);
+ bitrate.SetBitrate(1, 0, 40000);
+ bitrate.SetBitrate(1, 1, 80000);
+
+ BitrateAllocation layer0_bitrate;
+ layer0_bitrate.SetBitrate(0, 0, 10000);
+ layer0_bitrate.SetBitrate(0, 1, 20000);
+
+ BitrateAllocation layer1_bitrate;
+ layer1_bitrate.SetBitrate(0, 0, 40000);
+ layer1_bitrate.SetBitrate(0, 1, 80000);
+
+ EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(layer0_bitrate)).Times(1);
+ EXPECT_CALL(rtp_2, SetVideoBitrateAllocation(layer1_bitrate)).Times(1);
+
+ payload_router.OnBitrateAllocationUpdated(bitrate);
+}
+
+TEST(PayloadRouterTest, SimulcastTargetBitrateWithInactiveStream) {
+ // Set up two active rtp modules.
+ NiceMock<MockRtpRtcp> rtp_1;
+ NiceMock<MockRtpRtcp> rtp_2;
+ std::vector<RtpRtcp*> modules = {&rtp_1, &rtp_2};
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ // Create bitrate allocation with bitrate only for the first stream.
+ BitrateAllocation bitrate;
+ bitrate.SetBitrate(0, 0, 10000);
+ bitrate.SetBitrate(0, 1, 20000);
+
+ // Expect only the first rtp module to be asked to send a TargetBitrate
+ // message. (No target bitrate with 0bps sent from the second one.)
+ EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(bitrate)).Times(1);
+ EXPECT_CALL(rtp_2, SetVideoBitrateAllocation(_)).Times(0);
+
+ payload_router.OnBitrateAllocationUpdated(bitrate);
+}
+
+TEST(PayloadRouterTest, SvcTargetBitrate) {
+ NiceMock<MockRtpRtcp> rtp_1;
+ std::vector<RtpRtcp*> modules = {&rtp_1};
+ PayloadRouter payload_router(modules, {kSsrc1}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ BitrateAllocation bitrate;
+ bitrate.SetBitrate(0, 0, 10000);
+ bitrate.SetBitrate(0, 1, 20000);
+ bitrate.SetBitrate(1, 0, 40000);
+ bitrate.SetBitrate(1, 1, 80000);
+
+ EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(bitrate)).Times(1);
+
+ payload_router.OnBitrateAllocationUpdated(bitrate);
+}
+
+TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp8) {
+ NiceMock<MockRtpRtcp> rtp1;
+ NiceMock<MockRtpRtcp> rtp2;
+ std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ EncodedImage encoded_image;
+ encoded_image.rotation_ = kVideoRotation_90;
+ encoded_image.content_type_ = VideoContentType::SCREENSHARE;
+
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ codec_info.codecSpecific.VP8.pictureId = kPictureId;
+ codec_info.codecSpecific.VP8.temporalIdx = kTemporalIdx;
+ codec_info.codecSpecific.VP8.tl0PicIdx = kTl0PicIdx;
+ codec_info.codecSpecific.VP8.keyIdx = kNoKeyIdx;
+ codec_info.codecSpecific.VP8.layerSync = true;
+ codec_info.codecSpecific.VP8.nonReference = true;
+
+ EXPECT_CALL(rtp2, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoRotation_90, header->rotation);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
+ EXPECT_EQ(1, header->simulcastIdx);
+ EXPECT_EQ(kRtpVideoVp8, header->codec);
+ EXPECT_EQ(kPictureId, header->codecHeader.VP8.pictureId);
+ EXPECT_EQ(kTemporalIdx, header->codecHeader.VP8.temporalIdx);
+ EXPECT_EQ(kTl0PicIdx, header->codecHeader.VP8.tl0PicIdx);
+ EXPECT_EQ(kNoKeyIdx, header->codecHeader.VP8.keyIdx);
+ EXPECT_TRUE(header->codecHeader.VP8.layerSync);
+ EXPECT_TRUE(header->codecHeader.VP8.nonReference);
+ return true;
+ }));
+
+ EXPECT_EQ(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+}
+
+TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_H264) {
+ NiceMock<MockRtpRtcp> rtp1;
+ std::vector<RtpRtcp*> modules = {&rtp1};
+ PayloadRouter payload_router(modules, {kSsrc1}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecH264;
+ codec_info.codecSpecific.H264.packetization_mode =
+ H264PacketizationMode::SingleNalUnit;
+
+ EXPECT_CALL(rtp1, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(0, header->simulcastIdx);
+ EXPECT_EQ(kRtpVideoH264, header->codec);
+ EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
+ header->codecHeader.H264.packetization_mode);
+ return true;
+ }));
+
+ EXPECT_EQ(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+}
+
+TEST(PayloadRouterTest, CreateWithNoPreviousStates) {
+ NiceMock<MockRtpRtcp> rtp1;
+ NiceMock<MockRtpRtcp> rtp2;
+ std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ std::map<uint32_t, RtpPayloadState> initial_states =
+ payload_router.GetRtpPayloadStates();
+ EXPECT_EQ(2u, initial_states.size());
+ EXPECT_NE(initial_states.find(kSsrc1), initial_states.end());
+ EXPECT_NE(initial_states.find(kSsrc2), initial_states.end());
+}
+
+TEST(PayloadRouterTest, CreateWithPreviousStates) {
+ RtpPayloadState state1;
+ state1.picture_id = kInitialPictureId1;
+ RtpPayloadState state2;
+ state2.picture_id = kInitialPictureId2;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state1},
+ {kSsrc2, state2}};
+
+ NiceMock<MockRtpRtcp> rtp1;
+ NiceMock<MockRtpRtcp> rtp2;
+ std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, states);
+ payload_router.SetActive(true);
+
+ std::map<uint32_t, RtpPayloadState> initial_states =
+ payload_router.GetRtpPayloadStates();
+ EXPECT_EQ(2u, initial_states.size());
+ EXPECT_EQ(kInitialPictureId1, initial_states[kSsrc1].picture_id);
+ EXPECT_EQ(kInitialPictureId2, initial_states[kSsrc2].picture_id);
+}
+
+class PayloadRouterTest : public ::testing::Test {
+ public:
+ explicit PayloadRouterTest(const std::string& field_trials)
+ : override_field_trials_(field_trials) {}
+ virtual ~PayloadRouterTest() {}
+
+ protected:
+ virtual void SetUp() { memset(&codec_info_, 0, sizeof(CodecSpecificInfo)); }
+
+ test::ScopedFieldTrials override_field_trials_;
+ EncodedImage image_;
+ CodecSpecificInfo codec_info_;
+};
+
+class TestWithForcedFallbackDisabled : public PayloadRouterTest {
+ public:
+ TestWithForcedFallbackDisabled()
+ : PayloadRouterTest("WebRTC-VP8-Forced-Fallback-Encoder-v2/Disabled/") {}
+};
+
+class TestWithForcedFallbackEnabled : public PayloadRouterTest {
+ public:
+ TestWithForcedFallbackEnabled()
+ : PayloadRouterTest(
+ "WebRTC-VP8-Forced-Fallback-Encoder-v2/Enabled-1,2,3/") {}
+};
+
+TEST_F(TestWithForcedFallbackDisabled, PictureIdIsNotChangedForVp8) {
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, {});
+ router.SetActive(true);
+
+ codec_info_.codecType = kVideoCodecVP8;
+ codec_info_.codecSpecific.VP8.pictureId = kPictureId;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kRtpVideoVp8, header->codec);
+ EXPECT_EQ(kPictureId, header->codecHeader.VP8.pictureId);
+ return true;
+ }));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(image_, &codec_info_, nullptr).error);
+}
+
+TEST_F(TestWithForcedFallbackEnabled, PictureIdIsSetForVp8) {
+ RtpPayloadState state1;
+ state1.picture_id = kInitialPictureId1;
+ RtpPayloadState state2;
+ state2.picture_id = kInitialPictureId2;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state1},
+ {kSsrc2, state2}};
+
+ NiceMock<MockRtpRtcp> rtp1;
+ NiceMock<MockRtpRtcp> rtp2;
+ std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
+ PayloadRouter router(modules, {kSsrc1, kSsrc2}, kPayloadType, states);
+ router.SetActive(true);
+
+ // OnEncodedImage, simulcastIdx: 0.
+ codec_info_.codecType = kVideoCodecVP8;
+ codec_info_.codecSpecific.VP8.pictureId = kPictureId;
+ codec_info_.codecSpecific.VP8.simulcastIdx = 0;
+
+ EXPECT_CALL(rtp1, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kRtpVideoVp8, header->codec);
+ EXPECT_EQ(kInitialPictureId1, header->codecHeader.VP8.pictureId);
+ return true;
+ }));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(image_, &codec_info_, nullptr).error);
+
+ // OnEncodedImage, simulcastIdx: 1.
+ codec_info_.codecSpecific.VP8.pictureId = kPictureId;
+ codec_info_.codecSpecific.VP8.simulcastIdx = 1;
+
+ EXPECT_CALL(rtp2, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kRtpVideoVp8, header->codec);
+ EXPECT_EQ(kInitialPictureId2, header->codecHeader.VP8.pictureId);
+ return true;
+ }));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(image_, &codec_info_, nullptr).error);
+
+ // State should hold next picture id to use.
+ states = router.GetRtpPayloadStates();
+ EXPECT_EQ(2u, states.size());
+ EXPECT_EQ(kInitialPictureId1 + 1, states[kSsrc1].picture_id);
+ EXPECT_EQ(kInitialPictureId2 + 1, states[kSsrc2].picture_id);
+}
+
+TEST_F(TestWithForcedFallbackEnabled, PictureIdWraps) {
+ RtpPayloadState state1;
+ state1.picture_id = kMaxTwoBytePictureId;
+
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, {{kSsrc1, state1}});
+ router.SetActive(true);
+
+ codec_info_.codecType = kVideoCodecVP8;
+ codec_info_.codecSpecific.VP8.pictureId = kPictureId;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kRtpVideoVp8, header->codec);
+ EXPECT_EQ(kMaxTwoBytePictureId, header->codecHeader.VP8.pictureId);
+ return true;
+ }));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(image_, &codec_info_, nullptr).error);
+
+ // State should hold next picture id to use.
+ std::map<uint32_t, RtpPayloadState> states = router.GetRtpPayloadStates();
+ EXPECT_EQ(1u, states.size());
+ EXPECT_EQ(0, states[kSsrc1].picture_id); // Wrapped.
+}
+
+TEST_F(TestWithForcedFallbackEnabled, PictureIdIsNotSetIfNoPictureId) {
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, {});
+ router.SetActive(true);
+
+ codec_info_.codecType = kVideoCodecVP8;
+ codec_info_.codecSpecific.VP8.pictureId = kNoPictureId;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kRtpVideoVp8, header->codec);
+ EXPECT_EQ(kNoPictureId, header->codecHeader.VP8.pictureId);
+ return true;
+ }));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(image_, &codec_info_, nullptr).error);
+}
+
+TEST_F(TestWithForcedFallbackEnabled, PictureIdIsNotSetForVp9) {
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, {});
+ router.SetActive(true);
+
+ codec_info_.codecType = kVideoCodecVP9;
+ codec_info_.codecSpecific.VP9.picture_id = kPictureId;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kRtpVideoVp9, header->codec);
+ EXPECT_EQ(kPictureId, header->codecHeader.VP9.picture_id);
+ return true;
+ }));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(image_, &codec_info_, nullptr).error);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/picture_id_tests.cc b/third_party/libwebrtc/webrtc/video/picture_id_tests.cc
new file mode 100644
index 0000000000..f1545d80e3
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/picture_id_tests.cc
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "media/engine/internalencoderfactory.h"
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "test/call_test.h"
+#include "test/field_trial.h"
+
+namespace webrtc {
+namespace {
+const int kFrameMaxWidth = 1280;
+const int kFrameMaxHeight = 720;
+const int kFrameRate = 30;
+const int kMaxSecondsLost = 5;
+const int kMaxFramesLost = kFrameRate * kMaxSecondsLost;
+const int kMinPacketsToObserve = 10;
+const int kEncoderBitrateBps = 100000;
+const uint32_t kPictureIdWraparound = (1 << 15);
+
+const char kVp8ForcedFallbackEncoderEnabled[] =
+ "WebRTC-VP8-Forced-Fallback-Encoder-v2/Enabled/";
+} // namespace
+
+class PictureIdObserver : public test::RtpRtcpObserver {
+ public:
+ PictureIdObserver()
+ : test::RtpRtcpObserver(test::CallTest::kDefaultTimeoutMs),
+ max_expected_picture_id_gap_(0),
+ num_ssrcs_to_observe_(1) {}
+
+ void SetExpectedSsrcs(size_t num_expected_ssrcs) {
+ rtc::CritScope lock(&crit_);
+ num_ssrcs_to_observe_ = num_expected_ssrcs;
+ }
+
+ void ResetObservedSsrcs() {
+ rtc::CritScope lock(&crit_);
+ // Do not clear the timestamp and picture_id, to ensure that we check
+ // consistency between reinits and recreations.
+ num_packets_sent_.clear();
+ observed_ssrcs_.clear();
+ }
+
+ void SetMaxExpectedPictureIdGap(int max_expected_picture_id_gap) {
+ rtc::CritScope lock(&crit_);
+ max_expected_picture_id_gap_ = max_expected_picture_id_gap;
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+
+ // RTP header.
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ const uint32_t timestamp = header.timestamp;
+ const uint32_t ssrc = header.ssrc;
+
+ const bool known_ssrc = (ssrc == test::CallTest::kVideoSendSsrcs[0] ||
+ ssrc == test::CallTest::kVideoSendSsrcs[1] ||
+ ssrc == test::CallTest::kVideoSendSsrcs[2]);
+ EXPECT_TRUE(known_ssrc) << "Unknown SSRC sent.";
+
+ const bool is_padding =
+ (length == header.headerLength + header.paddingLength);
+ if (is_padding) {
+ return SEND_PACKET;
+ }
+
+ // VP8 header.
+ std::unique_ptr<RtpDepacketizer> depacketizer(
+ RtpDepacketizer::Create(kRtpVideoVp8));
+ RtpDepacketizer::ParsedPayload parsed_payload;
+ EXPECT_TRUE(depacketizer->Parse(
+ &parsed_payload, &packet[header.headerLength],
+ length - header.headerLength - header.paddingLength));
+ const uint16_t picture_id =
+ parsed_payload.type.Video.codecHeader.VP8.pictureId;
+
+ // If this is the first packet, we have nothing to compare to.
+ if (last_observed_timestamp_.find(ssrc) == last_observed_timestamp_.end()) {
+ last_observed_timestamp_[ssrc] = timestamp;
+ last_observed_picture_id_[ssrc] = picture_id;
+ ++num_packets_sent_[ssrc];
+
+ return SEND_PACKET;
+ }
+
+ // Verify continuity and monotonicity of picture_id sequence.
+ if (last_observed_timestamp_[ssrc] == timestamp) {
+ // Packet belongs to same frame as before.
+ EXPECT_EQ(last_observed_picture_id_[ssrc], picture_id);
+ } else {
+ // Packet is a new frame.
+
+ // Picture id should be increasing.
+ const bool picture_id_is_increasing =
+ AheadOf<uint16_t, kPictureIdWraparound>(
+ picture_id, last_observed_picture_id_[ssrc]);
+ EXPECT_TRUE(picture_id_is_increasing);
+
+ // Picture id should not increase more than expected.
+ const int picture_id_diff = ForwardDiff<uint16_t, kPictureIdWraparound>(
+ last_observed_picture_id_[ssrc], picture_id);
+
+ // For delta frames, expect continuously increasing picture id.
+ if (parsed_payload.frame_type != kVideoFrameKey) {
+ EXPECT_EQ(picture_id_diff, 1);
+ }
+ // Any frames still in queue is lost when a VideoSendStream is destroyed.
+ // The first frame after recreation should be a key frame.
+ if (picture_id_diff > 1) {
+ EXPECT_EQ(kVideoFrameKey, parsed_payload.frame_type);
+ EXPECT_LE(picture_id_diff - 1, max_expected_picture_id_gap_);
+ }
+ }
+ last_observed_timestamp_[ssrc] = timestamp;
+ last_observed_picture_id_[ssrc] = picture_id;
+
+ // Pass the test when enough media packets have been received
+ // on all streams.
+ if (++num_packets_sent_[ssrc] >= kMinPacketsToObserve &&
+ observed_ssrcs_.find(ssrc) == observed_ssrcs_.end()) {
+ observed_ssrcs_.insert(ssrc);
+ if (observed_ssrcs_.size() == num_ssrcs_to_observe_) {
+ observation_complete_.Set();
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ rtc::CriticalSection crit_;
+ std::map<uint32_t, uint32_t> last_observed_timestamp_ RTC_GUARDED_BY(crit_);
+ std::map<uint32_t, uint16_t> last_observed_picture_id_ RTC_GUARDED_BY(crit_);
+ std::map<uint32_t, size_t> num_packets_sent_ RTC_GUARDED_BY(crit_);
+ int max_expected_picture_id_gap_ RTC_GUARDED_BY(crit_);
+ size_t num_ssrcs_to_observe_ RTC_GUARDED_BY(crit_);
+ std::set<uint32_t> observed_ssrcs_ RTC_GUARDED_BY(crit_);
+};
+
+class PictureIdTest : public test::CallTest,
+ public ::testing::WithParamInterface<std::string> {
+ public:
+ PictureIdTest() : scoped_field_trial_(GetParam()) {}
+
+ virtual ~PictureIdTest() {
+ EXPECT_EQ(nullptr, video_send_stream_);
+ EXPECT_TRUE(video_receive_streams_.empty());
+
+ task_queue_.SendTask([this]() {
+ Stop();
+ DestroyStreams();
+ send_transport_.reset();
+ receive_transport_.reset();
+ DestroyCalls();
+ });
+ }
+
+ void SetupEncoder(VideoEncoder* encoder);
+ void TestPictureIdContinuousAfterReconfigure(
+ const std::vector<int>& ssrc_counts);
+ void TestPictureIdIncreaseAfterRecreateStreams(
+ const std::vector<int>& ssrc_counts);
+
+ private:
+ test::ScopedFieldTrials scoped_field_trial_;
+ PictureIdObserver observer;
+};
+
+INSTANTIATE_TEST_CASE_P(TestWithForcedFallbackEncoderEnabled,
+ PictureIdTest,
+ ::testing::Values(kVp8ForcedFallbackEncoderEnabled,
+ ""));
+
+// Use a special stream factory to ensure that all simulcast streams are being
+// sent.
+class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() = default;
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+
+ if (encoder_config.number_of_streams > 1) {
+ RTC_DCHECK_EQ(3, encoder_config.number_of_streams);
+
+ for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
+ streams[i].min_bitrate_bps = kEncoderBitrateBps;
+ streams[i].target_bitrate_bps = kEncoderBitrateBps;
+ streams[i].max_bitrate_bps = kEncoderBitrateBps;
+ }
+
+ // test::CreateVideoStreams does not return frame sizes for the lower
+ // streams that are accepted by VP8Impl::InitEncode.
+ // TODO(brandtr): Fix the problem in test::CreateVideoStreams, rather
+ // than overriding the values here.
+ streams[1].width = streams[2].width / 2;
+ streams[1].height = streams[2].height / 2;
+ streams[0].width = streams[1].width / 2;
+ streams[0].height = streams[1].height / 2;
+ } else {
+ // Use the same total bitrates when sending a single stream to avoid
+ // lowering the bitrate estimate and requiring a subsequent rampup.
+ streams[0].min_bitrate_bps = 3 * kEncoderBitrateBps;
+ streams[0].target_bitrate_bps = 3 * kEncoderBitrateBps;
+ streams[0].max_bitrate_bps = 3 * kEncoderBitrateBps;
+ }
+
+ return streams;
+ }
+};
+
+void PictureIdTest::SetupEncoder(VideoEncoder* encoder) {
+ task_queue_.SendTask([this, &encoder]() {
+ Call::Config config(event_log_.get());
+ CreateCalls(config, config);
+
+ send_transport_.reset(new test::PacketTransport(
+ &task_queue_, sender_call_.get(), &observer,
+ test::PacketTransport::kSender, payload_type_map_,
+ FakeNetworkPipe::Config()));
+
+ CreateSendConfig(kNumSsrcs, 0, 0, send_transport_.get());
+ video_send_config_.encoder_settings.encoder = encoder;
+ video_send_config_.encoder_settings.payload_name = "VP8";
+ video_encoder_config_.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>();
+ video_encoder_config_.number_of_streams = 1;
+ });
+}
+
+void PictureIdTest::TestPictureIdContinuousAfterReconfigure(
+ const std::vector<int>& ssrc_counts) {
+ task_queue_.SendTask([this]() {
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+
+ // Initial test with a single stream.
+ Start();
+ });
+
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ // Reconfigure VideoEncoder and test picture id increase.
+ // Expect continuously increasing picture id, equivalent to no gaps.
+ observer.SetMaxExpectedPictureIdGap(0);
+ for (int ssrc_count : ssrc_counts) {
+ video_encoder_config_.number_of_streams = ssrc_count;
+ observer.SetExpectedSsrcs(ssrc_count);
+ observer.ResetObservedSsrcs();
+ // Make sure the picture_id sequence is continuous on reinit and recreate.
+ task_queue_.SendTask([this]() {
+ video_send_stream_->ReconfigureVideoEncoder(video_encoder_config_.Copy());
+ });
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+ }
+
+ task_queue_.SendTask([this]() {
+ Stop();
+ DestroyStreams();
+ send_transport_.reset();
+ receive_transport_.reset();
+ DestroyCalls();
+ });
+}
+
+void PictureIdTest::TestPictureIdIncreaseAfterRecreateStreams(
+ const std::vector<int>& ssrc_counts) {
+ task_queue_.SendTask([this]() {
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+
+ // Initial test with a single stream.
+ Start();
+ });
+
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+
+ // Recreate VideoSendStream and test picture id increase.
+ // When the VideoSendStream is destroyed, any frames still in queue is lost
+ // with it, therefore it is expected that some frames might be lost.
+ observer.SetMaxExpectedPictureIdGap(kMaxFramesLost);
+ for (int ssrc_count : ssrc_counts) {
+ task_queue_.SendTask([this, &ssrc_count]() {
+ video_encoder_config_.number_of_streams = ssrc_count;
+
+ frame_generator_capturer_->Stop();
+ sender_call_->DestroyVideoSendStream(video_send_stream_);
+
+ observer.SetExpectedSsrcs(ssrc_count);
+ observer.ResetObservedSsrcs();
+
+ video_send_stream_ = sender_call_->CreateVideoSendStream(
+ video_send_config_.Copy(), video_encoder_config_.Copy());
+ video_send_stream_->Start();
+ CreateFrameGeneratorCapturer(kFrameRate, kFrameMaxWidth, kFrameMaxHeight);
+ frame_generator_capturer_->Start();
+ });
+
+ EXPECT_TRUE(observer.Wait()) << "Timed out waiting for packets.";
+ }
+
+ task_queue_.SendTask([this]() {
+ Stop();
+ DestroyStreams();
+ send_transport_.reset();
+ receive_transport_.reset();
+ });
+}
+
+TEST_P(PictureIdTest, PictureIdContinuousAfterReconfigureVp8) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ SetupEncoder(encoder.get());
+ TestPictureIdContinuousAfterReconfigure({1, 3, 3, 1, 1});
+}
+
+TEST_P(PictureIdTest, PictureIdIncreasingAfterRecreateStreamVp8) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ SetupEncoder(encoder.get());
+ TestPictureIdIncreaseAfterRecreateStreams({1, 3, 3, 1, 1});
+}
+
+TEST_P(PictureIdTest, PictureIdIncreasingAfterStreamCountChangeVp8) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ // Make sure that that the picture id is not reset if the stream count goes
+ // down and then up.
+ std::vector<int> ssrc_counts = {3, 1, 3};
+ SetupEncoder(encoder.get());
+ TestPictureIdContinuousAfterReconfigure(ssrc_counts);
+}
+
+TEST_P(PictureIdTest,
+ PictureIdContinuousAfterReconfigureSimulcastEncoderAdapter) {
+ InternalEncoderFactory internal_encoder_factory;
+ SimulcastEncoderAdapter simulcast_encoder_adapter(&internal_encoder_factory);
+ SetupEncoder(&simulcast_encoder_adapter);
+ TestPictureIdContinuousAfterReconfigure({1, 3, 3, 1, 1});
+}
+
+TEST_P(PictureIdTest,
+ PictureIdIncreasingAfterRecreateStreamSimulcastEncoderAdapter) {
+ InternalEncoderFactory internal_encoder_factory;
+ SimulcastEncoderAdapter simulcast_encoder_adapter(&internal_encoder_factory);
+ SetupEncoder(&simulcast_encoder_adapter);
+ TestPictureIdIncreaseAfterRecreateStreams({1, 3, 3, 1, 1});
+}
+
+// When using the simulcast encoder adapter, the picture id is randomly set
+// when the ssrc count is reduced and then increased. This means that we are
+// not spec compliant in that particular case.
+TEST_P(PictureIdTest,
+ PictureIdIncreasingAfterStreamCountChangeSimulcastEncoderAdapter) {
+ // If forced fallback is enabled, the picture id is set in the PayloadRouter
+ // and the sequence should be continuous.
+ if (GetParam() == kVp8ForcedFallbackEncoderEnabled) {
+ InternalEncoderFactory internal_encoder_factory;
+ SimulcastEncoderAdapter simulcast_encoder_adapter(
+ &internal_encoder_factory);
+ // Make sure that that the picture id is not reset if the stream count goes
+ // down and then up.
+ std::vector<int> ssrc_counts = {3, 1, 3};
+ SetupEncoder(&simulcast_encoder_adapter);
+ TestPictureIdContinuousAfterReconfigure(ssrc_counts);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/quality_threshold.cc b/third_party/libwebrtc/webrtc/video/quality_threshold.cc
new file mode 100644
index 0000000000..8ee9fbb1ff
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/quality_threshold.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/quality_threshold.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+QualityThreshold::QualityThreshold(int low_threshold,
+ int high_threshold,
+ float fraction,
+ int max_measurements)
+ : buffer_(new int[max_measurements]),
+ max_measurements_(max_measurements),
+ fraction_(fraction),
+ low_threshold_(low_threshold),
+ high_threshold_(high_threshold),
+ until_full_(max_measurements),
+ next_index_(0),
+ sum_(0),
+ count_low_(0),
+ count_high_(0),
+ num_high_states_(0),
+ num_certain_states_(0) {
+ RTC_CHECK_GT(fraction, 0.5f);
+ RTC_CHECK_GT(max_measurements, 1);
+ RTC_CHECK_LT(low_threshold, high_threshold);
+}
+
+void QualityThreshold::AddMeasurement(int measurement) {
+ int prev_val = until_full_ > 0 ? 0 : buffer_[next_index_];
+ buffer_[next_index_] = measurement;
+ next_index_ = (next_index_ + 1) % max_measurements_;
+
+ sum_ += measurement - prev_val;
+
+ if (until_full_ == 0) {
+ if (prev_val <= low_threshold_) {
+ --count_low_;
+ } else if (prev_val >= high_threshold_) {
+ --count_high_;
+ }
+ }
+
+ if (measurement <= low_threshold_) {
+ ++count_low_;
+ } else if (measurement >= high_threshold_) {
+ ++count_high_;
+ }
+
+ float sufficient_majority = fraction_ * max_measurements_;
+ if (count_high_ >= sufficient_majority) {
+ is_high_ = rtc::Optional<bool>(true);
+ } else if (count_low_ >= sufficient_majority) {
+ is_high_ = rtc::Optional<bool>(false);
+ }
+
+ if (until_full_ > 0)
+ --until_full_;
+
+ if (is_high_) {
+ if (*is_high_)
+ ++num_high_states_;
+ ++num_certain_states_;
+ }
+}
+
+rtc::Optional<bool> QualityThreshold::IsHigh() const {
+ return is_high_;
+}
+
+rtc::Optional<double> QualityThreshold::CalculateVariance() const {
+ if (until_full_ > 0) {
+ return rtc::Optional<double>();
+ }
+
+ double variance = 0;
+ double mean = static_cast<double>(sum_) / max_measurements_;
+ for (int i = 0; i < max_measurements_; ++i) {
+ variance += (buffer_[i] - mean) * (buffer_[i] - mean);
+ }
+ return rtc::Optional<double>(variance / (max_measurements_ - 1));
+}
+
+rtc::Optional<double> QualityThreshold::FractionHigh(
+ int min_required_samples) const {
+ RTC_DCHECK_GT(min_required_samples, 0);
+ if (num_certain_states_ < min_required_samples)
+ return rtc::Optional<double>();
+
+ return rtc::Optional<double>(static_cast<double>(num_high_states_) /
+ num_certain_states_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/quality_threshold.h b/third_party/libwebrtc/webrtc/video/quality_threshold.h
new file mode 100644
index 0000000000..519e2c33de
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/quality_threshold.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_QUALITY_THRESHOLD_H_
+#define VIDEO_QUALITY_THRESHOLD_H_
+
+#include <memory>
+
+#include "api/optional.h"
+
+namespace webrtc {
+
+class QualityThreshold {
+ public:
+ // Both thresholds are inclusive, i.e. measurement >= high signifies a high
+ // state, while measurement <= low signifies a low state.
+ QualityThreshold(int low_threshold,
+ int high_threshold,
+ float fraction,
+ int max_measurements);
+
+ void AddMeasurement(int measurement);
+ rtc::Optional<bool> IsHigh() const;
+ rtc::Optional<double> CalculateVariance() const;
+ rtc::Optional<double> FractionHigh(int min_required_samples) const;
+
+ private:
+ const std::unique_ptr<int[]> buffer_;
+ const int max_measurements_;
+ const float fraction_;
+ const int low_threshold_;
+ const int high_threshold_;
+ int until_full_;
+ int next_index_;
+ rtc::Optional<bool> is_high_;
+ int sum_;
+ int count_low_;
+ int count_high_;
+ int num_high_states_;
+ int num_certain_states_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_QUALITY_THRESHOLD_H_
diff --git a/third_party/libwebrtc/webrtc/video/quality_threshold_unittest.cc b/third_party/libwebrtc/webrtc/video/quality_threshold_unittest.cc
new file mode 100644
index 0000000000..c9396d7188
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/quality_threshold_unittest.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/quality_threshold.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(QualityThresholdTest, BackAndForth) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 1;
+ const float kFraction = 0.75f;
+ const int kMaxMeasurements = 10;
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ const int kNeededMeasurements =
+ static_cast<int>(kFraction * kMaxMeasurements + 1);
+ for (int i = 0; i < kNeededMeasurements; ++i) {
+ EXPECT_FALSE(thresh.IsHigh());
+ thresh.AddMeasurement(kLowThreshold);
+ }
+ ASSERT_TRUE(thresh.IsHigh());
+ for (int i = 0; i < kNeededMeasurements; ++i) {
+ EXPECT_FALSE(*thresh.IsHigh());
+ thresh.AddMeasurement(kHighThreshold);
+ }
+ EXPECT_TRUE(*thresh.IsHigh());
+
+ for (int i = 0; i < kNeededMeasurements; ++i) {
+ EXPECT_TRUE(*thresh.IsHigh());
+ thresh.AddMeasurement(kLowThreshold);
+ }
+ EXPECT_FALSE(*thresh.IsHigh());
+}
+
+TEST(QualityThresholdTest, Variance) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 1;
+ const float kFraction = 0.8f;
+ const int kMaxMeasurements = 10;
+ const double kMaxError = 0.01;
+
+ // Previously randomly generated values...
+ int values[] = {51, 79, 80, 56, 19, 20, 48, 57, 48, 25, 2, 25, 38, 37, 25};
+ // ...with precomputed variances.
+ double variances[] = {476.9, 687.6, 552, 336.4, 278.767, 265.167};
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ for (int i = 0; i < kMaxMeasurements; ++i) {
+ EXPECT_FALSE(thresh.CalculateVariance());
+ thresh.AddMeasurement(values[i]);
+ }
+
+ ASSERT_TRUE(thresh.CalculateVariance());
+ EXPECT_NEAR(variances[0], *thresh.CalculateVariance(), kMaxError);
+ for (unsigned int i = 1; i < sizeof(variances) / sizeof(double); ++i) {
+ thresh.AddMeasurement(values[i + kMaxMeasurements - 1]);
+ EXPECT_NEAR(variances[i], *thresh.CalculateVariance(), kMaxError);
+ }
+
+ for (int i = 0; i < kMaxMeasurements; ++i) {
+ thresh.AddMeasurement(42);
+ }
+ EXPECT_NEAR(0, *thresh.CalculateVariance(), kMaxError);
+}
+
+TEST(QualityThresholdTest, BetweenThresholds) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 2;
+ const float kFraction = 0.6f;
+ const int kMaxMeasurements = 10;
+
+ const int kBetweenThresholds = (kLowThreshold + kHighThreshold) / 2;
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ for (int i = 0; i < 2 * kMaxMeasurements; ++i) {
+ EXPECT_FALSE(thresh.IsHigh());
+ thresh.AddMeasurement(kBetweenThresholds);
+ }
+ EXPECT_FALSE(thresh.IsHigh());
+}
+
+TEST(QualityThresholdTest, FractionHigh) {
+ const int kLowThreshold = 0;
+ const int kHighThreshold = 2;
+ const float kFraction = 0.75f;
+ const int kMaxMeasurements = 10;
+
+ const int kBetweenThresholds = (kLowThreshold + kHighThreshold) / 2;
+ const int kNeededMeasurements =
+ static_cast<int>(kFraction * kMaxMeasurements + 1);
+
+ QualityThreshold thresh(kLowThreshold, kHighThreshold, kFraction,
+ kMaxMeasurements);
+
+ for (int i = 0; i < kMaxMeasurements; ++i) {
+ EXPECT_FALSE(thresh.FractionHigh(1));
+ thresh.AddMeasurement(kBetweenThresholds);
+ }
+
+ for (int i = 0; i < kNeededMeasurements; i++) {
+ EXPECT_FALSE(thresh.FractionHigh(1));
+ thresh.AddMeasurement(kHighThreshold);
+ }
+ EXPECT_FALSE(thresh.FractionHigh(2));
+ ASSERT_TRUE(thresh.FractionHigh(1));
+ EXPECT_NEAR(*thresh.FractionHigh(1), 1, 0.001);
+
+ for (int i = 0; i < kNeededMeasurements; i++) {
+ EXPECT_NEAR(*thresh.FractionHigh(1), 1, 0.001);
+ thresh.AddMeasurement(kLowThreshold);
+ }
+ EXPECT_NEAR(
+ *thresh.FractionHigh(1),
+ static_cast<double>(kNeededMeasurements) / (kNeededMeasurements + 1),
+ 0.001);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/receive_statistics_proxy.cc b/third_party/libwebrtc/webrtc/video/receive_statistics_proxy.cc
new file mode 100644
index 0000000000..0fc298a1bf
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/receive_statistics_proxy.cc
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/receive_statistics_proxy.h"
+
+#include <algorithm>
+#include <cmath>
+#include <sstream>
+#include <utility>
+
+#include "modules/pacing/alr_detector.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+// Periodic time interval for processing samples for |freq_offset_counter_|.
+const int64_t kFreqOffsetProcessIntervalMs = 40000;
+
+// Configuration for bad call detection.
+const int kBadCallMinRequiredSamples = 10;
+const int kMinSampleLengthMs = 990;
+const int kNumMeasurements = 10;
+const int kNumMeasurementsVariance = kNumMeasurements * 1.5;
+const float kBadFraction = 0.8f;
+// For fps:
+// Low means low enough to be bad, high means high enough to be good
+const int kLowFpsThreshold = 12;
+const int kHighFpsThreshold = 14;
+// For qp and fps variance:
+// Low means low enough to be good, high means high enough to be bad
+const int kLowQpThresholdVp8 = 60;
+const int kHighQpThresholdVp8 = 70;
+const int kLowVarianceThreshold = 1;
+const int kHighVarianceThreshold = 2;
+
+// Some metrics are reported as a maximum over this period.
+// This should be synchronized with a typical getStats polling interval in
+// the clients.
+const int kMovingMaxWindowMs = 1000;
+
+// How large window we use to calculate the framerate/bitrate.
+const int kRateStatisticsWindowSizeMs = 1000;
+
+// Some sane ballpark estimate for maximum common value of inter-frame delay.
+// Values below that will be stored explicitly in the array,
+// values above - in the map.
+const int kMaxCommonInterframeDelayMs = 500;
+
+std::string UmaPrefixForContentType(VideoContentType content_type) {
+ std::stringstream ss;
+ ss << "WebRTC.Video";
+ if (videocontenttypehelpers::IsScreenshare(content_type)) {
+ ss << ".Screenshare";
+ }
+ return ss.str();
+}
+
+std::string UmaSuffixForContentType(VideoContentType content_type) {
+ std::stringstream ss;
+ int simulcast_id = videocontenttypehelpers::GetSimulcastId(content_type);
+ if (simulcast_id > 0) {
+ ss << ".S" << simulcast_id - 1;
+ }
+ int experiment_id = videocontenttypehelpers::GetExperimentId(content_type);
+ if (experiment_id > 0) {
+ ss << ".ExperimentGroup" << experiment_id - 1;
+ }
+ return ss.str();
+}
+} // namespace
+
+ReceiveStatisticsProxy::ReceiveStatisticsProxy(
+ const VideoReceiveStream::Config* config,
+ Clock* clock)
+ : clock_(clock),
+ config_(*config),
+ start_ms_(clock->TimeInMilliseconds()),
+ last_sample_time_(clock->TimeInMilliseconds()),
+ fps_threshold_(kLowFpsThreshold,
+ kHighFpsThreshold,
+ kBadFraction,
+ kNumMeasurements),
+ qp_threshold_(kLowQpThresholdVp8,
+ kHighQpThresholdVp8,
+ kBadFraction,
+ kNumMeasurements),
+ variance_threshold_(kLowVarianceThreshold,
+ kHighVarianceThreshold,
+ kBadFraction,
+ kNumMeasurementsVariance),
+ num_bad_states_(0),
+ num_certain_states_(0),
+ // 1000ms window, scale 1000 for ms to s.
+ decode_fps_estimator_(1000, 1000),
+ renders_fps_estimator_(1000, 1000),
+ render_fps_tracker_(100, 10u),
+ render_pixel_tracker_(100, 10u),
+ total_byte_tracker_(100, 10u), // bucket_interval_ms, bucket_count
+ interframe_delay_max_moving_(kMovingMaxWindowMs),
+ freq_offset_counter_(clock, nullptr, kFreqOffsetProcessIntervalMs),
+ first_report_block_time_ms_(-1),
+ avg_rtt_ms_(0),
+ last_content_type_(VideoContentType::UNSPECIFIED),
+ timing_frame_info_counter_(kMovingMaxWindowMs) {
+ stats_.ssrc = config_.rtp.remote_ssrc;
+ // TODO(brandtr): Replace |rtx_stats_| with a single instance of
+ // StreamDataCounters.
+ if (config_.rtp.rtx_ssrc) {
+ rtx_stats_[config_.rtp.rtx_ssrc] = StreamDataCounters();
+ }
+}
+
+ReceiveStatisticsProxy::~ReceiveStatisticsProxy() {
+ UpdateHistograms();
+}
+
+void ReceiveStatisticsProxy::UpdateHistograms() {
+ int stream_duration_sec = (clock_->TimeInMilliseconds() - start_ms_) / 1000;
+ if (stats_.frame_counts.key_frames > 0 ||
+ stats_.frame_counts.delta_frames > 0) {
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Video.ReceiveStreamLifetimeInSeconds",
+ stream_duration_sec);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.ReceiveStreamLifetimeInSeconds "
+ << stream_duration_sec;
+ }
+
+ if (first_report_block_time_ms_ != -1 &&
+ ((clock_->TimeInMilliseconds() - first_report_block_time_ms_) / 1000) >=
+ metrics::kMinRunTimeInSeconds) {
+ int fraction_lost = report_block_stats_.FractionLostInPercent();
+ if (fraction_lost != -1) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.ReceivedPacketsLostInPercent",
+ fraction_lost);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.ReceivedPacketsLostInPercent "
+ << fraction_lost;
+ }
+ }
+
+ const int kMinRequiredSamples = 200;
+ int samples = static_cast<int>(render_fps_tracker_.TotalSampleCount());
+ if (samples >= kMinRequiredSamples) {
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.RenderFramesPerSecond",
+ round(render_fps_tracker_.ComputeTotalRate()));
+ RTC_HISTOGRAM_COUNTS_100000(
+ "WebRTC.Video.RenderSqrtPixelsPerSecond",
+ round(render_pixel_tracker_.ComputeTotalRate()));
+ }
+
+ int sync_offset_ms = sync_offset_counter_.Avg(kMinRequiredSamples);
+ if (sync_offset_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.AVSyncOffsetInMs", sync_offset_ms);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.AVSyncOffsetInMs " << sync_offset_ms;
+ }
+ AggregatedStats freq_offset_stats = freq_offset_counter_.GetStats();
+ if (freq_offset_stats.num_samples > 0) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.RtpToNtpFreqOffsetInKhz",
+ freq_offset_stats.average);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.RtpToNtpFreqOffsetInKhz, "
+ << freq_offset_stats.ToString();
+ }
+
+ int num_total_frames =
+ stats_.frame_counts.key_frames + stats_.frame_counts.delta_frames;
+ if (num_total_frames >= kMinRequiredSamples) {
+ int num_key_frames = stats_.frame_counts.key_frames;
+ int key_frames_permille =
+ (num_key_frames * 1000 + num_total_frames / 2) / num_total_frames;
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.KeyFramesReceivedInPermille",
+ key_frames_permille);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.KeyFramesReceivedInPermille "
+ << key_frames_permille;
+ }
+
+ int qp = qp_counters_.vp8.Avg(kMinRequiredSamples);
+ if (qp != -1) {
+ RTC_HISTOGRAM_COUNTS_200("WebRTC.Video.Decoded.Vp8.Qp", qp);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.Decoded.Vp8.Qp " << qp;
+ }
+ int decode_ms = decode_time_counter_.Avg(kMinRequiredSamples);
+ if (decode_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_1000("WebRTC.Video.DecodeTimeInMs", decode_ms);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.DecodeTimeInMs " << decode_ms;
+ }
+ int jb_delay_ms = jitter_buffer_delay_counter_.Avg(kMinRequiredSamples);
+ if (jb_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.JitterBufferDelayInMs",
+ jb_delay_ms);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.JitterBufferDelayInMs " << jb_delay_ms;
+ }
+
+ int target_delay_ms = target_delay_counter_.Avg(kMinRequiredSamples);
+ if (target_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.TargetDelayInMs", target_delay_ms);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.TargetDelayInMs " << target_delay_ms;
+ }
+ int current_delay_ms = current_delay_counter_.Avg(kMinRequiredSamples);
+ if (current_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.CurrentDelayInMs",
+ current_delay_ms);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.CurrentDelayInMs " << current_delay_ms;
+ }
+ int delay_ms = delay_counter_.Avg(kMinRequiredSamples);
+ if (delay_ms != -1)
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.OnewayDelayInMs", delay_ms);
+
+ // Aggregate content_specific_stats_ by removing experiment or simulcast
+ // information;
+ std::map<VideoContentType, ContentSpecificStats> aggregated_stats;
+ for (auto it : content_specific_stats_) {
+ // Calculate simulcast specific metrics (".S0" ... ".S2" suffixes).
+ VideoContentType content_type = it.first;
+ if (videocontenttypehelpers::GetSimulcastId(content_type) > 0) {
+ // Aggregate on experiment id.
+ videocontenttypehelpers::SetExperimentId(&content_type, 0);
+ aggregated_stats[content_type].Add(it.second);
+ }
+ // Calculate experiment specific metrics (".ExperimentGroup[0-7]" suffixes).
+ content_type = it.first;
+ if (videocontenttypehelpers::GetExperimentId(content_type) > 0) {
+ // Aggregate on simulcast id.
+ videocontenttypehelpers::SetSimulcastId(&content_type, 0);
+ aggregated_stats[content_type].Add(it.second);
+ }
+ // Calculate aggregated metrics (no suffixes. Aggregated on everything).
+ content_type = it.first;
+ videocontenttypehelpers::SetSimulcastId(&content_type, 0);
+ videocontenttypehelpers::SetExperimentId(&content_type, 0);
+ aggregated_stats[content_type].Add(it.second);
+ }
+
+ for (auto it : aggregated_stats) {
+ // For the metric Foo we report the following slices:
+ // WebRTC.Video.Foo,
+ // WebRTC.Video.Screenshare.Foo,
+ // WebRTC.Video.Foo.S[0-3],
+ // WebRTC.Video.Foo.ExperimentGroup[0-7],
+ // WebRTC.Video.Screenshare.Foo.S[0-3],
+ // WebRTC.Video.Screenshare.Foo.ExperimentGroup[0-7].
+ auto content_type = it.first;
+ auto stats = it.second;
+ std::string uma_prefix = UmaPrefixForContentType(content_type);
+ std::string uma_suffix = UmaSuffixForContentType(content_type);
+ // Metrics can be sliced on either simulcast id or experiment id but not
+ // both.
+ RTC_DCHECK(videocontenttypehelpers::GetExperimentId(content_type) == 0 ||
+ videocontenttypehelpers::GetSimulcastId(content_type) == 0);
+
+ int e2e_delay_ms = stats.e2e_delay_counter.Avg(kMinRequiredSamples);
+ if (e2e_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".EndToEndDelayInMs" + uma_suffix, e2e_delay_ms);
+ RTC_LOG(LS_INFO) << uma_prefix << ".EndToEndDelayInMs" << uma_suffix
+ << " " << e2e_delay_ms;
+ }
+ int e2e_delay_max_ms = stats.e2e_delay_counter.Max();
+ if (e2e_delay_max_ms != -1 && e2e_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+ uma_prefix + ".EndToEndDelayMaxInMs" + uma_suffix, e2e_delay_max_ms);
+ RTC_LOG(LS_INFO) << uma_prefix << ".EndToEndDelayMaxInMs" << uma_suffix
+ << " " << e2e_delay_max_ms;
+ }
+ int interframe_delay_ms =
+ stats.interframe_delay_counter.Avg(kMinRequiredSamples);
+ if (interframe_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".InterframeDelayInMs" + uma_suffix,
+ interframe_delay_ms);
+ RTC_LOG(LS_INFO) << uma_prefix << ".InterframeDelayInMs" << uma_suffix
+ << " " << interframe_delay_ms;
+ }
+ int interframe_delay_max_ms = stats.interframe_delay_counter.Max();
+ if (interframe_delay_max_ms != -1 && interframe_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".InterframeDelayMaxInMs" + uma_suffix,
+ interframe_delay_max_ms);
+ RTC_LOG(LS_INFO) << uma_prefix << ".InterframeDelayMaxInMs" << uma_suffix
+ << " " << interframe_delay_max_ms;
+ }
+
+ rtc::Optional<uint32_t> interframe_delay_95p_ms =
+ stats.interframe_delay_percentiles.GetPercentile(0.95f);
+ if (interframe_delay_95p_ms && interframe_delay_ms != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".InterframeDelay95PercentileInMs" + uma_suffix,
+ *interframe_delay_95p_ms);
+ RTC_LOG(LS_INFO) << uma_prefix << ".InterframeDelay95PercentileInMs"
+ << uma_suffix << " " << *interframe_delay_95p_ms;
+ }
+
+ int width = stats.received_width.Avg(kMinRequiredSamples);
+ if (width != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".ReceivedWidthInPixels" + uma_suffix, width);
+ RTC_LOG(LS_INFO) << uma_prefix << ".ReceivedWidthInPixels" << uma_suffix
+ << " " << width;
+ }
+
+ int height = stats.received_height.Avg(kMinRequiredSamples);
+ if (height != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".ReceivedHeightInPixels" + uma_suffix, height);
+ RTC_LOG(LS_INFO) << uma_prefix << ".ReceivedHeightInPixels" << uma_suffix
+ << " " << height;
+ }
+
+ if (content_type != VideoContentType::UNSPECIFIED) {
+ // Don't report these 3 metrics unsliced, as more precise variants
+ // are reported separately in this method.
+ float flow_duration_sec = stats.flow_duration_ms / 1000.0;
+ if (flow_duration_sec >= metrics::kMinRunTimeInSeconds) {
+ int media_bitrate_kbps = static_cast<int>(stats.total_media_bytes * 8 /
+ flow_duration_sec / 1000);
+ RTC_HISTOGRAM_COUNTS_SPARSE_10000(
+ uma_prefix + ".MediaBitrateReceivedInKbps" + uma_suffix,
+ media_bitrate_kbps);
+ RTC_LOG(LS_INFO) << uma_prefix << ".MediaBitrateReceivedInKbps"
+ << uma_suffix << " " << media_bitrate_kbps;
+ }
+
+ int num_total_frames =
+ stats.frame_counts.key_frames + stats.frame_counts.delta_frames;
+ if (num_total_frames >= kMinRequiredSamples) {
+ int num_key_frames = stats.frame_counts.key_frames;
+ int key_frames_permille =
+ (num_key_frames * 1000 + num_total_frames / 2) / num_total_frames;
+ RTC_HISTOGRAM_COUNTS_SPARSE_1000(
+ uma_prefix + ".KeyFramesReceivedInPermille" + uma_suffix,
+ key_frames_permille);
+ RTC_LOG(LS_INFO) << uma_prefix << ".KeyFramesReceivedInPermille"
+ << uma_suffix << " " << key_frames_permille;
+ }
+
+ int qp = stats.qp_counter.Avg(kMinRequiredSamples);
+ if (qp != -1) {
+ RTC_HISTOGRAM_COUNTS_SPARSE_200(
+ uma_prefix + ".Decoded.Vp8.Qp" + uma_suffix, qp);
+ RTC_LOG(LS_INFO) << uma_prefix << ".Decoded.Vp8.Qp" << uma_suffix << " "
+ << qp;
+ }
+ }
+ }
+
+ StreamDataCounters rtp = stats_.rtp_stats;
+ StreamDataCounters rtx;
+ for (auto it : rtx_stats_)
+ rtx.Add(it.second);
+ StreamDataCounters rtp_rtx = rtp;
+ rtp_rtx.Add(rtx);
+ int64_t elapsed_sec =
+ rtp_rtx.TimeSinceFirstPacketInMs(clock_->TimeInMilliseconds()) / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.BitrateReceivedInKbps",
+ static_cast<int>(rtp_rtx.transmitted.TotalBytes() * 8 / elapsed_sec /
+ 1000));
+ int media_bitrate_kbs =
+ static_cast<int>(rtp.MediaPayloadBytes() * 8 / elapsed_sec / 1000);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.MediaBitrateReceivedInKbps",
+ media_bitrate_kbs);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.MediaBitrateReceivedInKbps "
+ << media_bitrate_kbs;
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.PaddingBitrateReceivedInKbps",
+ static_cast<int>(rtp_rtx.transmitted.padding_bytes * 8 / elapsed_sec /
+ 1000));
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.RetransmittedBitrateReceivedInKbps",
+ static_cast<int>(rtp_rtx.retransmitted.TotalBytes() * 8 / elapsed_sec /
+ 1000));
+ if (!rtx_stats_.empty()) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.RtxBitrateReceivedInKbps",
+ static_cast<int>(rtx.transmitted.TotalBytes() *
+ 8 / elapsed_sec / 1000));
+ }
+ if (config_.rtp.ulpfec_payload_type != -1) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.FecBitrateReceivedInKbps",
+ static_cast<int>(rtp_rtx.fec.TotalBytes() * 8 / elapsed_sec / 1000));
+ }
+ const RtcpPacketTypeCounter& counters = stats_.rtcp_packet_type_counts;
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.NackPacketsSentPerMinute",
+ counters.nack_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.FirPacketsSentPerMinute",
+ counters.fir_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.PliPacketsSentPerMinute",
+ counters.pli_packets * 60 / elapsed_sec);
+ if (counters.nack_requests > 0) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.UniqueNackRequestsSentInPercent",
+ counters.UniqueNackRequestsInPercent());
+ }
+ }
+
+ if (num_certain_states_ >= kBadCallMinRequiredSamples) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.Any",
+ 100 * num_bad_states_ / num_certain_states_);
+ }
+ rtc::Optional<double> fps_fraction =
+ fps_threshold_.FractionHigh(kBadCallMinRequiredSamples);
+ if (fps_fraction) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.FrameRate",
+ static_cast<int>(100 * (1 - *fps_fraction)));
+ }
+ rtc::Optional<double> variance_fraction =
+ variance_threshold_.FractionHigh(kBadCallMinRequiredSamples);
+ if (variance_fraction) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.FrameRateVariance",
+ static_cast<int>(100 * *variance_fraction));
+ }
+ rtc::Optional<double> qp_fraction =
+ qp_threshold_.FractionHigh(kBadCallMinRequiredSamples);
+ if (qp_fraction) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.BadCall.Qp",
+ static_cast<int>(100 * *qp_fraction));
+ }
+}
+
+void ReceiveStatisticsProxy::QualitySample() {
+ int64_t now = clock_->TimeInMilliseconds();
+ if (last_sample_time_ + kMinSampleLengthMs > now)
+ return;
+
+ double fps =
+ render_fps_tracker_.ComputeRateForInterval(now - last_sample_time_);
+ int qp = qp_sample_.Avg(1);
+
+ bool prev_fps_bad = !fps_threshold_.IsHigh().value_or(true);
+ bool prev_qp_bad = qp_threshold_.IsHigh().value_or(false);
+ bool prev_variance_bad = variance_threshold_.IsHigh().value_or(false);
+ bool prev_any_bad = prev_fps_bad || prev_qp_bad || prev_variance_bad;
+
+ fps_threshold_.AddMeasurement(static_cast<int>(fps));
+ if (qp != -1)
+ qp_threshold_.AddMeasurement(qp);
+ rtc::Optional<double> fps_variance_opt = fps_threshold_.CalculateVariance();
+ double fps_variance = fps_variance_opt.value_or(0);
+ if (fps_variance_opt) {
+ variance_threshold_.AddMeasurement(static_cast<int>(fps_variance));
+ }
+
+ bool fps_bad = !fps_threshold_.IsHigh().value_or(true);
+ bool qp_bad = qp_threshold_.IsHigh().value_or(false);
+ bool variance_bad = variance_threshold_.IsHigh().value_or(false);
+ bool any_bad = fps_bad || qp_bad || variance_bad;
+
+ if (!prev_any_bad && any_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (any) start: " << now;
+ } else if (prev_any_bad && !any_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (any) end: " << now;
+ }
+
+ if (!prev_fps_bad && fps_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (fps) start: " << now;
+ } else if (prev_fps_bad && !fps_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (fps) end: " << now;
+ }
+
+ if (!prev_qp_bad && qp_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (qp) start: " << now;
+ } else if (prev_qp_bad && !qp_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (qp) end: " << now;
+ }
+
+ if (!prev_variance_bad && variance_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (variance) start: " << now;
+ } else if (prev_variance_bad && !variance_bad) {
+ RTC_LOG(LS_INFO) << "Bad call (variance) end: " << now;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "SAMPLE: sample_length: " << (now - last_sample_time_)
+ << " fps: " << fps << " fps_bad: " << fps_bad
+ << " qp: " << qp << " qp_bad: " << qp_bad
+ << " variance_bad: " << variance_bad
+ << " fps_variance: " << fps_variance;
+
+ last_sample_time_ = now;
+ qp_sample_.Reset();
+
+ if (fps_threshold_.IsHigh() || variance_threshold_.IsHigh() ||
+ qp_threshold_.IsHigh()) {
+ if (any_bad)
+ ++num_bad_states_;
+ ++num_certain_states_;
+ }
+}
+
+void ReceiveStatisticsProxy::UpdateFramerate(int64_t now_ms) const {
+ int64_t old_frames_ms = now_ms - kRateStatisticsWindowSizeMs;
+ while (!frame_window_.empty() &&
+ frame_window_.begin()->first < old_frames_ms) {
+ frame_window_.erase(frame_window_.begin());
+ }
+
+ size_t framerate =
+ (frame_window_.size() * 1000 + 500) / kRateStatisticsWindowSizeMs;
+ stats_.network_frame_rate = static_cast<int>(framerate);
+}
+
+VideoReceiveStream::Stats ReceiveStatisticsProxy::GetStats() const {
+ rtc::CritScope lock(&crit_);
+ // Get current frame rates here, as only updating them on new frames prevents
+ // us from ever correctly displaying frame rate of 0.
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ UpdateFramerate(now_ms);
+ stats_.render_frame_rate = renders_fps_estimator_.Rate(now_ms).value_or(0);
+ stats_.decode_frame_rate = decode_fps_estimator_.Rate(now_ms).value_or(0);
+ stats_.total_bitrate_bps =
+ static_cast<int>(total_byte_tracker_.ComputeRate() * 8);
+ stats_.interframe_delay_max_ms =
+ interframe_delay_max_moving_.Max(now_ms).value_or(-1);
+ stats_.timing_frame_info = timing_frame_info_counter_.Max(now_ms);
+ stats_.content_type = last_content_type_;
+ return stats_;
+}
+
+void ReceiveStatisticsProxy::OnIncomingPayloadType(int payload_type) {
+ rtc::CritScope lock(&crit_);
+ stats_.current_payload_type = payload_type;
+}
+
+void ReceiveStatisticsProxy::OnDecoderImplementationName(
+ const char* implementation_name) {
+ rtc::CritScope lock(&crit_);
+ stats_.decoder_implementation_name = implementation_name;
+}
+void ReceiveStatisticsProxy::OnIncomingRate(unsigned int framerate,
+ unsigned int bitrate_bps) {
+ rtc::CritScope lock(&crit_);
+ if (stats_.rtp_stats.first_packet_time_ms != -1)
+ QualitySample();
+}
+
+void ReceiveStatisticsProxy::OnFrameBufferTimingsUpdated(
+ int decode_ms,
+ int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) {
+ rtc::CritScope lock(&crit_);
+ stats_.decode_ms = decode_ms;
+ stats_.max_decode_ms = max_decode_ms;
+ stats_.current_delay_ms = current_delay_ms;
+ stats_.target_delay_ms = target_delay_ms;
+ stats_.jitter_buffer_ms = jitter_buffer_ms;
+ stats_.min_playout_delay_ms = min_playout_delay_ms;
+ stats_.render_delay_ms = render_delay_ms;
+ decode_time_counter_.Add(decode_ms);
+ jitter_buffer_delay_counter_.Add(jitter_buffer_ms);
+ target_delay_counter_.Add(target_delay_ms);
+ current_delay_counter_.Add(current_delay_ms);
+ // Network delay (rtt/2) + target_delay_ms (jitter delay + decode time +
+ // render delay).
+ delay_counter_.Add(target_delay_ms + avg_rtt_ms_ / 2);
+}
+
+void ReceiveStatisticsProxy::OnTimingFrameInfoUpdated(
+ const TimingFrameInfo& info) {
+ rtc::CritScope lock(&crit_);
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ timing_frame_info_counter_.Add(info, now_ms);
+}
+
+void ReceiveStatisticsProxy::RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) {
+ rtc::CritScope lock(&crit_);
+ if (stats_.ssrc != ssrc)
+ return;
+ stats_.rtcp_packet_type_counts = packet_counter;
+}
+
+void ReceiveStatisticsProxy::StatisticsUpdated(
+ const webrtc::RtcpStatistics& statistics,
+ uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ // TODO(pbos): Handle both local and remote ssrcs here and RTC_DCHECK that we
+ // receive stats from one of them.
+ if (stats_.ssrc != ssrc)
+ return;
+ stats_.rtcp_stats = statistics;
+ report_block_stats_.Store(statistics, ssrc, 0);
+
+ if (first_report_block_time_ms_ == -1)
+ first_report_block_time_ms_ = clock_->TimeInMilliseconds();
+}
+
+void ReceiveStatisticsProxy::CNameChanged(const char* cname, uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ // TODO(pbos): Handle both local and remote ssrcs here and RTC_DCHECK that we
+ // receive stats from one of them.
+ if (stats_.ssrc != ssrc)
+ return;
+ stats_.c_name = cname;
+}
+
+void ReceiveStatisticsProxy::DataCountersUpdated(
+ const webrtc::StreamDataCounters& counters,
+ uint32_t ssrc) {
+ size_t last_total_bytes = 0;
+ size_t total_bytes = 0;
+ rtc::CritScope lock(&crit_);
+ if (ssrc == stats_.ssrc) {
+ last_total_bytes = stats_.rtp_stats.transmitted.TotalBytes();
+ total_bytes = counters.transmitted.TotalBytes();
+ stats_.rtp_stats = counters;
+ } else {
+ auto it = rtx_stats_.find(ssrc);
+ if (it != rtx_stats_.end()) {
+ last_total_bytes = it->second.transmitted.TotalBytes();
+ total_bytes = counters.transmitted.TotalBytes();
+ it->second = counters;
+ } else {
+ RTC_NOTREACHED() << "Unexpected stream ssrc: " << ssrc;
+ }
+ }
+ if (total_bytes > last_total_bytes)
+ total_byte_tracker_.AddSamples(total_bytes - last_total_bytes);
+}
+
+void ReceiveStatisticsProxy::OnDecodedFrame(rtc::Optional<uint8_t> qp,
+ VideoContentType content_type) {
+ rtc::CritScope lock(&crit_);
+
+ uint64_t now = clock_->TimeInMilliseconds();
+
+ ContentSpecificStats* content_specific_stats =
+ &content_specific_stats_[content_type];
+ ++stats_.frames_decoded;
+ if (qp) {
+ if (!stats_.qp_sum) {
+ if (stats_.frames_decoded != 1) {
+ RTC_LOG(LS_WARNING)
+ << "Frames decoded was not 1 when first qp value was received.";
+ stats_.frames_decoded = 1;
+ }
+ stats_.qp_sum = rtc::Optional<uint64_t>(0);
+ }
+ *stats_.qp_sum += *qp;
+ content_specific_stats->qp_counter.Add(*qp);
+ } else if (stats_.qp_sum) {
+ RTC_LOG(LS_WARNING)
+ << "QP sum was already set and no QP was given for a frame.";
+ stats_.qp_sum = rtc::Optional<uint64_t>();
+ }
+ last_content_type_ = content_type;
+ decode_fps_estimator_.Update(1, now);
+ if (last_decoded_frame_time_ms_) {
+ int64_t interframe_delay_ms = now - *last_decoded_frame_time_ms_;
+ RTC_DCHECK_GE(interframe_delay_ms, 0);
+ interframe_delay_max_moving_.Add(interframe_delay_ms, now);
+ content_specific_stats->interframe_delay_counter.Add(interframe_delay_ms);
+ content_specific_stats->interframe_delay_percentiles.Add(
+ interframe_delay_ms);
+ content_specific_stats->flow_duration_ms += interframe_delay_ms;
+ }
+ last_decoded_frame_time_ms_.emplace(now);
+}
+
+void ReceiveStatisticsProxy::OnRenderedFrame(const VideoFrame& frame) {
+ int width = frame.width();
+ int height = frame.height();
+ RTC_DCHECK_GT(width, 0);
+ RTC_DCHECK_GT(height, 0);
+ uint64_t now = clock_->TimeInMilliseconds();
+ rtc::CritScope lock(&crit_);
+ ContentSpecificStats* content_specific_stats =
+ &content_specific_stats_[last_content_type_];
+ renders_fps_estimator_.Update(1, now);
+ ++stats_.frames_rendered;
+ stats_.width = width;
+ stats_.height = height;
+ render_fps_tracker_.AddSamples(1);
+ render_pixel_tracker_.AddSamples(sqrt(width * height));
+ content_specific_stats->received_width.Add(width);
+ content_specific_stats->received_height.Add(height);
+
+ if (frame.ntp_time_ms() > 0) {
+ int64_t delay_ms = clock_->CurrentNtpInMilliseconds() - frame.ntp_time_ms();
+ if (delay_ms >= 0) {
+ content_specific_stats->e2e_delay_counter.Add(delay_ms);
+ }
+ }
+}
+
+void ReceiveStatisticsProxy::OnSyncOffsetUpdated(int64_t sync_offset_ms,
+ double estimated_freq_khz) {
+ rtc::CritScope lock(&crit_);
+ sync_offset_counter_.Add(std::abs(sync_offset_ms));
+ stats_.sync_offset_ms = sync_offset_ms;
+
+ const double kMaxFreqKhz = 10000.0;
+ int offset_khz = kMaxFreqKhz;
+ // Should not be zero or negative. If so, report max.
+ if (estimated_freq_khz < kMaxFreqKhz && estimated_freq_khz > 0.0)
+ offset_khz = static_cast<int>(std::fabs(estimated_freq_khz - 90.0) + 0.5);
+
+ freq_offset_counter_.Add(offset_khz);
+}
+
+void ReceiveStatisticsProxy::OnReceiveRatesUpdated(uint32_t bitRate,
+ uint32_t frameRate) {
+}
+
+void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) {
+ rtc::CritScope lock(&crit_);
+ if (is_keyframe) {
+ ++stats_.frame_counts.key_frames;
+ } else {
+ ++stats_.frame_counts.delta_frames;
+ }
+
+ ContentSpecificStats* content_specific_stats =
+ &content_specific_stats_[content_type];
+
+ content_specific_stats->total_media_bytes += size_bytes;
+ if (is_keyframe) {
+ ++content_specific_stats->frame_counts.key_frames;
+ } else {
+ ++content_specific_stats->frame_counts.delta_frames;
+ }
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ frame_window_.insert(std::make_pair(now_ms, size_bytes));
+ UpdateFramerate(now_ms);
+}
+
+void ReceiveStatisticsProxy::OnFrameCountsUpdated(
+ const FrameCounts& frame_counts) {
+ rtc::CritScope lock(&crit_);
+ stats_.frame_counts = frame_counts;
+}
+
+void ReceiveStatisticsProxy::OnDiscardedPacketsUpdated(int discarded_packets) {
+ rtc::CritScope lock(&crit_);
+ stats_.discarded_packets = discarded_packets;
+}
+
+void ReceiveStatisticsProxy::OnPreDecode(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) {
+ if (!codec_specific_info || encoded_image.qp_ == -1) {
+ return;
+ }
+ if (codec_specific_info->codecType == kVideoCodecVP8) {
+ qp_counters_.vp8.Add(encoded_image.qp_);
+ rtc::CritScope lock(&crit_);
+ qp_sample_.Add(encoded_image.qp_);
+ }
+}
+
+void ReceiveStatisticsProxy::OnStreamInactive() {
+ // TODO(sprang): Figure out any other state that should be reset.
+
+ rtc::CritScope lock(&crit_);
+ // Don't report inter-frame delay if stream was paused.
+ last_decoded_frame_time_ms_.reset();
+}
+
+void ReceiveStatisticsProxy::SampleCounter::Add(int sample) {
+ sum += sample;
+ ++num_samples;
+ if (!max || sample > *max) {
+ max.emplace(sample);
+ }
+}
+
+void ReceiveStatisticsProxy::SampleCounter::Add(const SampleCounter& other) {
+ sum += other.sum;
+ num_samples += other.num_samples;
+ if (other.max && (!max || *max < *other.max))
+ max = other.max;
+}
+
+int ReceiveStatisticsProxy::SampleCounter::Avg(
+ int64_t min_required_samples) const {
+ if (num_samples < min_required_samples || num_samples == 0)
+ return -1;
+ return static_cast<int>(sum / num_samples);
+}
+
+int ReceiveStatisticsProxy::SampleCounter::Max() const {
+ return max.value_or(-1);
+}
+
+void ReceiveStatisticsProxy::SampleCounter::Reset() {
+ num_samples = 0;
+ sum = 0;
+ max.reset();
+}
+
+void ReceiveStatisticsProxy::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t max_rtt_ms) {
+ rtc::CritScope lock(&crit_);
+ avg_rtt_ms_ = avg_rtt_ms;
+}
+
+ReceiveStatisticsProxy::ContentSpecificStats::ContentSpecificStats()
+ : interframe_delay_percentiles(kMaxCommonInterframeDelayMs) {}
+
+void ReceiveStatisticsProxy::ContentSpecificStats::Add(
+ const ContentSpecificStats& other) {
+ e2e_delay_counter.Add(other.e2e_delay_counter);
+ interframe_delay_counter.Add(other.interframe_delay_counter);
+ flow_duration_ms += other.flow_duration_ms;
+ total_media_bytes += other.total_media_bytes;
+ received_height.Add(other.received_height);
+ received_width.Add(other.received_width);
+ qp_counter.Add(other.qp_counter);
+ frame_counts.key_frames += other.frame_counts.key_frames;
+ frame_counts.delta_frames += other.frame_counts.delta_frames;
+ interframe_delay_percentiles.Add(other.interframe_delay_percentiles);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/receive_statistics_proxy.h b/third_party/libwebrtc/webrtc/video/receive_statistics_proxy.h
new file mode 100644
index 0000000000..0e0c5b45b5
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/receive_statistics_proxy.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RECEIVE_STATISTICS_PROXY_H_
+#define VIDEO_RECEIVE_STATISTICS_PROXY_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "call/video_receive_stream.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "common_video/include/frame_callback.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/numerics/histogram_percentile_counter.h"
+#include "rtc_base/numerics/moving_max_counter.h"
+#include "rtc_base/rate_statistics.h"
+#include "rtc_base/ratetracker.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/quality_threshold.h"
+#include "video/report_block_stats.h"
+#include "video/stats_counter.h"
+#include "video/video_stream_decoder.h"
+
+namespace webrtc {
+
+class Clock;
+class ViECodec;
+class ViEDecoderObserver;
+struct CodecSpecificInfo;
+
+class ReceiveStatisticsProxy : public VCMReceiveStatisticsCallback,
+ public RtcpStatisticsCallback,
+ public RtcpPacketTypeCounterObserver,
+ public StreamDataCountersCallback,
+ public CallStatsObserver {
+ public:
+ ReceiveStatisticsProxy(const VideoReceiveStream::Config* config,
+ Clock* clock);
+ virtual ~ReceiveStatisticsProxy();
+
+ VideoReceiveStream::Stats GetStats() const;
+
+ void OnDecodedFrame(rtc::Optional<uint8_t> qp, VideoContentType content_type);
+ void OnSyncOffsetUpdated(int64_t sync_offset_ms, double estimated_freq_khz);
+ void OnRenderedFrame(const VideoFrame& frame);
+ void OnIncomingPayloadType(int payload_type);
+ void OnDecoderImplementationName(const char* implementation_name);
+ void OnIncomingRate(unsigned int framerate, unsigned int bitrate_bps);
+
+ void OnPreDecode(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info);
+
+ // Indicates video stream has been paused (no incoming packets).
+ void OnStreamInactive();
+
+ // Overrides VCMReceiveStatisticsCallback.
+ void OnReceiveRatesUpdated(uint32_t bitRate, uint32_t frameRate) override;
+ void OnFrameCountsUpdated(const FrameCounts& frame_counts) override;
+ void OnDiscardedPacketsUpdated(int discarded_packets) override;
+ void OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) override;
+ void OnFrameBufferTimingsUpdated(int decode_ms,
+ int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) override;
+
+ void OnTimingFrameInfoUpdated(const TimingFrameInfo& info) override;
+
+ // Overrides RtcpStatisticsCallback.
+ void StatisticsUpdated(const webrtc::RtcpStatistics& statistics,
+ uint32_t ssrc) override;
+ void CNameChanged(const char* cname, uint32_t ssrc) override;
+
+ // Overrides RtcpPacketTypeCounterObserver.
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override;
+ // Overrides StreamDataCountersCallback.
+ void DataCountersUpdated(const webrtc::StreamDataCounters& counters,
+ uint32_t ssrc) override;
+
+ // Implements CallStatsObserver.
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+
+ private:
+ struct SampleCounter {
+ SampleCounter() : sum(0), num_samples(0) {}
+ void Add(int sample);
+ int Avg(int64_t min_required_samples) const;
+ int Max() const;
+ void Reset();
+ void Add(const SampleCounter& other);
+
+ private:
+ int64_t sum;
+ int64_t num_samples;
+ rtc::Optional<int> max;
+ };
+
+ struct QpCounters {
+ SampleCounter vp8;
+ };
+
+ struct ContentSpecificStats {
+ ContentSpecificStats();
+
+ void Add(const ContentSpecificStats& other);
+
+ SampleCounter e2e_delay_counter;
+ SampleCounter interframe_delay_counter;
+ int64_t flow_duration_ms = 0;
+ int64_t total_media_bytes = 0;
+ SampleCounter received_width;
+ SampleCounter received_height;
+ SampleCounter qp_counter;
+ FrameCounts frame_counts;
+ rtc::HistogramPercentileCounter interframe_delay_percentiles;
+ };
+
+ void UpdateHistograms() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ void QualitySample() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ // Removes info about old frames and then updates the framerate.
+ void UpdateFramerate(int64_t now_ms) const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ Clock* const clock_;
+ // Ownership of this object lies with the owner of the ReceiveStatisticsProxy
+ // instance. Lifetime is guaranteed to outlive |this|.
+ // TODO(tommi): In practice the config_ reference is only used for accessing
+ // config_.rtp.ulpfec.ulpfec_payload_type. Instead of holding a pointer back,
+ // we could just store the value of ulpfec_payload_type and change the
+ // ReceiveStatisticsProxy() ctor to accept a const& of Config (since we'll
+ // then no longer store a pointer to the object).
+ const VideoReceiveStream::Config& config_;
+ const int64_t start_ms_;
+
+ rtc::CriticalSection crit_;
+ int64_t last_sample_time_ RTC_GUARDED_BY(crit_);
+ QualityThreshold fps_threshold_ RTC_GUARDED_BY(crit_);
+ QualityThreshold qp_threshold_ RTC_GUARDED_BY(crit_);
+ QualityThreshold variance_threshold_ RTC_GUARDED_BY(crit_);
+ SampleCounter qp_sample_ RTC_GUARDED_BY(crit_);
+ int num_bad_states_ RTC_GUARDED_BY(crit_);
+ int num_certain_states_ RTC_GUARDED_BY(crit_);
+ mutable VideoReceiveStream::Stats stats_ RTC_GUARDED_BY(crit_);
+ RateStatistics decode_fps_estimator_ RTC_GUARDED_BY(crit_);
+ RateStatistics renders_fps_estimator_ RTC_GUARDED_BY(crit_);
+ rtc::RateTracker render_fps_tracker_ RTC_GUARDED_BY(crit_);
+ rtc::RateTracker render_pixel_tracker_ RTC_GUARDED_BY(crit_);
+ rtc::RateTracker total_byte_tracker_ RTC_GUARDED_BY(crit_);
+ SampleCounter sync_offset_counter_ RTC_GUARDED_BY(crit_);
+ SampleCounter decode_time_counter_ RTC_GUARDED_BY(crit_);
+ SampleCounter jitter_buffer_delay_counter_ RTC_GUARDED_BY(crit_);
+ SampleCounter target_delay_counter_ RTC_GUARDED_BY(crit_);
+ SampleCounter current_delay_counter_ RTC_GUARDED_BY(crit_);
+ SampleCounter delay_counter_ RTC_GUARDED_BY(crit_);
+ mutable rtc::MovingMaxCounter<int> interframe_delay_max_moving_
+ RTC_GUARDED_BY(crit_);
+ std::map<VideoContentType, ContentSpecificStats> content_specific_stats_
+ RTC_GUARDED_BY(crit_);
+ MaxCounter freq_offset_counter_ RTC_GUARDED_BY(crit_);
+ int64_t first_report_block_time_ms_ RTC_GUARDED_BY(crit_);
+ ReportBlockStats report_block_stats_ RTC_GUARDED_BY(crit_);
+ QpCounters qp_counters_; // Only accessed on the decoding thread.
+ std::map<uint32_t, StreamDataCounters> rtx_stats_ RTC_GUARDED_BY(crit_);
+ int64_t avg_rtt_ms_ RTC_GUARDED_BY(crit_);
+ mutable std::map<int64_t, size_t> frame_window_ RTC_GUARDED_BY(&crit_);
+ VideoContentType last_content_type_ RTC_GUARDED_BY(&crit_);
+ rtc::Optional<int64_t> last_decoded_frame_time_ms_ RTC_GUARDED_BY(&crit_);
+ // Mutable because calling Max() on MovingMaxCounter is not const. Yet it is
+ // called from const GetStats().
+ mutable rtc::MovingMaxCounter<TimingFrameInfo> timing_frame_info_counter_
+ RTC_GUARDED_BY(&crit_);
+};
+
+} // namespace webrtc
+#endif // VIDEO_RECEIVE_STATISTICS_PROXY_H_
diff --git a/third_party/libwebrtc/webrtc/video/receive_statistics_proxy_unittest.cc b/third_party/libwebrtc/webrtc/video/receive_statistics_proxy_unittest.cc
new file mode 100644
index 0000000000..44fdc8bd15
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/receive_statistics_proxy_unittest.cc
@@ -0,0 +1,1002 @@
+/*
+ * Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/receive_statistics_proxy.h"
+
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_rotation.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/metrics_default.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const int64_t kFreqOffsetProcessIntervalInMs = 40000;
+const uint32_t kLocalSsrc = 123;
+const uint32_t kRemoteSsrc = 456;
+const int kMinRequiredSamples = 200;
+} // namespace
+
+// TODO(sakal): ReceiveStatisticsProxy is lacking unittesting.
+class ReceiveStatisticsProxyTest
+ : public ::testing::TestWithParam<webrtc::VideoContentType> {
+ public:
+ ReceiveStatisticsProxyTest() : fake_clock_(1234), config_(GetTestConfig()) {}
+ virtual ~ReceiveStatisticsProxyTest() {}
+
+ protected:
+ virtual void SetUp() {
+ metrics::Reset();
+ statistics_proxy_.reset(new ReceiveStatisticsProxy(&config_, &fake_clock_));
+ }
+
+ VideoReceiveStream::Config GetTestConfig() {
+ VideoReceiveStream::Config config(nullptr);
+ config.rtp.local_ssrc = kLocalSsrc;
+ config.rtp.remote_ssrc = kRemoteSsrc;
+ return config;
+ }
+
+ void InsertFirstRtpPacket(uint32_t ssrc) {
+ StreamDataCounters counters;
+ counters.first_packet_time_ms = fake_clock_.TimeInMilliseconds();
+ statistics_proxy_->DataCountersUpdated(counters, ssrc);
+ }
+
+ VideoFrame CreateFrame(int width, int height) {
+ VideoFrame frame(I420Buffer::Create(width, height), 0, 0, kVideoRotation_0);
+ frame.set_ntp_time_ms(fake_clock_.CurrentNtpInMilliseconds());
+ return frame;
+ }
+
+ SimulatedClock fake_clock_;
+ const VideoReceiveStream::Config config_;
+ std::unique_ptr<ReceiveStatisticsProxy> statistics_proxy_;
+};
+
+TEST_F(ReceiveStatisticsProxyTest, OnDecodedFrameIncreasesFramesDecoded) {
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_decoded);
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(i, statistics_proxy_->GetStats().frames_decoded);
+ }
+}
+
+TEST_F(ReceiveStatisticsProxyTest, OnDecodedFrameWithQpResetsFramesDecoded) {
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_decoded);
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(i, statistics_proxy_->GetStats().frames_decoded);
+ }
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(1u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(1u, statistics_proxy_->GetStats().frames_decoded);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, OnDecodedFrameIncreasesQpSum) {
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(3u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(rtc::Optional<uint64_t>(3u), statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(127u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(rtc::Optional<uint64_t>(130u),
+ statistics_proxy_->GetStats().qp_sum);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, ReportsContentType) {
+ const std::string kRealtimeString("realtime");
+ const std::string kScreenshareString("screen");
+ EXPECT_EQ(kRealtimeString, videocontenttypehelpers::ToString(
+ statistics_proxy_->GetStats().content_type));
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(3u),
+ VideoContentType::SCREENSHARE);
+ EXPECT_EQ(kScreenshareString, videocontenttypehelpers::ToString(
+ statistics_proxy_->GetStats().content_type));
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(3u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kRealtimeString, videocontenttypehelpers::ToString(
+ statistics_proxy_->GetStats().content_type));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, ReportsMaxInterframeDelay) {
+ const int64_t kInterframeDelayMs1 = 100;
+ const int64_t kInterframeDelayMs2 = 200;
+ const int64_t kInterframeDelayMs3 = 100;
+ EXPECT_EQ(-1, statistics_proxy_->GetStats().interframe_delay_max_ms);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(3u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(-1, statistics_proxy_->GetStats().interframe_delay_max_ms);
+
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs1);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(127u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kInterframeDelayMs1,
+ statistics_proxy_->GetStats().interframe_delay_max_ms);
+
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs2);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(127u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kInterframeDelayMs2,
+ statistics_proxy_->GetStats().interframe_delay_max_ms);
+
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs3);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(127u),
+ VideoContentType::UNSPECIFIED);
+ // kInterframeDelayMs3 is smaller than kInterframeDelayMs2.
+ EXPECT_EQ(kInterframeDelayMs2,
+ statistics_proxy_->GetStats().interframe_delay_max_ms);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, ReportInterframeDelayInWindow) {
+ const int64_t kInterframeDelayMs1 = 900;
+ const int64_t kInterframeDelayMs2 = 750;
+ const int64_t kInterframeDelayMs3 = 700;
+ EXPECT_EQ(-1, statistics_proxy_->GetStats().interframe_delay_max_ms);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(3u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(-1, statistics_proxy_->GetStats().interframe_delay_max_ms);
+
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs1);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(127u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(kInterframeDelayMs1,
+ statistics_proxy_->GetStats().interframe_delay_max_ms);
+
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs2);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(127u),
+ VideoContentType::UNSPECIFIED);
+ // Still first delay is the maximum
+ EXPECT_EQ(kInterframeDelayMs1,
+ statistics_proxy_->GetStats().interframe_delay_max_ms);
+
+ fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs3);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(127u),
+ VideoContentType::UNSPECIFIED);
+ // Now the first sample is out of the window, so the second is the maximum.
+ EXPECT_EQ(kInterframeDelayMs2,
+ statistics_proxy_->GetStats().interframe_delay_max_ms);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, OnDecodedFrameWithoutQpQpSumWontExist) {
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, OnDecodedFrameWithoutQpResetsQpSum) {
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(3u),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(rtc::Optional<uint64_t>(3u), statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(),
+ VideoContentType::UNSPECIFIED);
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, OnRenderedFrameIncreasesFramesRendered) {
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_rendered);
+ webrtc::VideoFrame frame(webrtc::I420Buffer::Create(1, 1), 0, 0,
+ webrtc::kVideoRotation_0);
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnRenderedFrame(frame);
+ EXPECT_EQ(i, statistics_proxy_->GetStats().frames_rendered);
+ }
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsSsrc) {
+ EXPECT_EQ(kRemoteSsrc, statistics_proxy_->GetStats().ssrc);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsIncomingPayloadType) {
+ const int kPayloadType = 111;
+ statistics_proxy_->OnIncomingPayloadType(kPayloadType);
+ EXPECT_EQ(kPayloadType, statistics_proxy_->GetStats().current_payload_type);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsDecoderImplementationName) {
+ const char* kName = "decoderName";
+ statistics_proxy_->OnDecoderImplementationName(kName);
+ EXPECT_STREQ(
+ kName, statistics_proxy_->GetStats().decoder_implementation_name.c_str());
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsOnCompleteFrame) {
+ const int kFrameSizeBytes = 1000;
+ statistics_proxy_->OnCompleteFrame(true, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+ VideoReceiveStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(1, stats.network_frame_rate);
+ EXPECT_EQ(1, stats.frame_counts.key_frames);
+ EXPECT_EQ(0, stats.frame_counts.delta_frames);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsDecodeTimingStats) {
+ const int kDecodeMs = 1;
+ const int kMaxDecodeMs = 2;
+ const int kCurrentDelayMs = 3;
+ const int kTargetDelayMs = 4;
+ const int kJitterBufferMs = 5;
+ const int kMinPlayoutDelayMs = 6;
+ const int kRenderDelayMs = 7;
+ const int64_t kRttMs = 8;
+ statistics_proxy_->OnRttUpdate(kRttMs, 0);
+ statistics_proxy_->OnFrameBufferTimingsUpdated(
+ kDecodeMs, kMaxDecodeMs, kCurrentDelayMs, kTargetDelayMs, kJitterBufferMs,
+ kMinPlayoutDelayMs, kRenderDelayMs);
+ VideoReceiveStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kDecodeMs, stats.decode_ms);
+ EXPECT_EQ(kMaxDecodeMs, stats.max_decode_ms);
+ EXPECT_EQ(kCurrentDelayMs, stats.current_delay_ms);
+ EXPECT_EQ(kTargetDelayMs, stats.target_delay_ms);
+ EXPECT_EQ(kJitterBufferMs, stats.jitter_buffer_ms);
+ EXPECT_EQ(kMinPlayoutDelayMs, stats.min_playout_delay_ms);
+ EXPECT_EQ(kRenderDelayMs, stats.render_delay_ms);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsRtcpPacketTypeCounts) {
+ const uint32_t kFirPackets = 33;
+ const uint32_t kPliPackets = 44;
+ const uint32_t kNackPackets = 55;
+ RtcpPacketTypeCounter counter;
+ counter.fir_packets = kFirPackets;
+ counter.pli_packets = kPliPackets;
+ counter.nack_packets = kNackPackets;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
+ VideoReceiveStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kFirPackets, stats.rtcp_packet_type_counts.fir_packets);
+ EXPECT_EQ(kPliPackets, stats.rtcp_packet_type_counts.pli_packets);
+ EXPECT_EQ(kNackPackets, stats.rtcp_packet_type_counts.nack_packets);
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ GetStatsReportsNoRtcpPacketTypeCountsForUnknownSsrc) {
+ RtcpPacketTypeCounter counter;
+ counter.fir_packets = 33;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc + 1, counter);
+ EXPECT_EQ(0u,
+ statistics_proxy_->GetStats().rtcp_packet_type_counts.fir_packets);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsFrameCounts) {
+ const int kKeyFrames = 3;
+ const int kDeltaFrames = 22;
+ FrameCounts frame_counts;
+ frame_counts.key_frames = kKeyFrames;
+ frame_counts.delta_frames = kDeltaFrames;
+ statistics_proxy_->OnFrameCountsUpdated(frame_counts);
+ VideoReceiveStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kKeyFrames, stats.frame_counts.key_frames);
+ EXPECT_EQ(kDeltaFrames, stats.frame_counts.delta_frames);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsDiscardedPackets) {
+ const int kDiscardedPackets = 12;
+ statistics_proxy_->OnDiscardedPacketsUpdated(kDiscardedPackets);
+ EXPECT_EQ(kDiscardedPackets, statistics_proxy_->GetStats().discarded_packets);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsRtcpStats) {
+ const uint8_t kFracLost = 0;
+ const uint32_t kCumLost = 1;
+ const uint32_t kExtSeqNum = 10;
+ const uint32_t kJitter = 4;
+
+ RtcpStatistics rtcp_stats;
+ rtcp_stats.fraction_lost = kFracLost;
+ rtcp_stats.packets_lost = kCumLost;
+ rtcp_stats.extended_highest_sequence_number = kExtSeqNum;
+ rtcp_stats.jitter = kJitter;
+ statistics_proxy_->StatisticsUpdated(rtcp_stats, kRemoteSsrc);
+
+ VideoReceiveStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kFracLost, stats.rtcp_stats.fraction_lost);
+ EXPECT_EQ(kCumLost, stats.rtcp_stats.packets_lost);
+ EXPECT_EQ(kExtSeqNum, stats.rtcp_stats.extended_highest_sequence_number);
+ EXPECT_EQ(kJitter, stats.rtcp_stats.jitter);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsCName) {
+ const char* kName = "cName";
+ statistics_proxy_->CNameChanged(kName, kRemoteSsrc);
+ EXPECT_STREQ(kName, statistics_proxy_->GetStats().c_name.c_str());
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsNoCNameForUnknownSsrc) {
+ const char* kName = "cName";
+ statistics_proxy_->CNameChanged(kName, kRemoteSsrc + 1);
+ EXPECT_STREQ("", statistics_proxy_->GetStats().c_name.c_str());
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ ReportsLongestTimingFrameInfo) {
+ const int64_t kShortEndToEndDelay = 10;
+ const int64_t kMedEndToEndDelay = 20;
+ const int64_t kLongEndToEndDelay = 100;
+ const uint32_t kExpectedRtpTimestamp = 2;
+ TimingFrameInfo info;
+ rtc::Optional<TimingFrameInfo> result;
+ info.rtp_timestamp = kExpectedRtpTimestamp - 1;
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kShortEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ info.rtp_timestamp =
+ kExpectedRtpTimestamp; // this frame should be reported in the end.
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kLongEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ info.rtp_timestamp = kExpectedRtpTimestamp + 1;
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kMedEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ result = statistics_proxy_->GetStats().timing_frame_info;
+ EXPECT_TRUE(result);
+ EXPECT_EQ(kExpectedRtpTimestamp, result->rtp_timestamp);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, RespectsReportingIntervalForTimingFrames) {
+ TimingFrameInfo info;
+ const int64_t kShortEndToEndDelay = 10;
+ const uint32_t kExpectedRtpTimestamp = 2;
+ const int64_t kShortDelayMs = 1000;
+ const int64_t kLongDelayMs = 10000;
+ rtc::Optional<TimingFrameInfo> result;
+ info.rtp_timestamp = kExpectedRtpTimestamp;
+ info.capture_time_ms = 0;
+ info.decode_finish_ms = kShortEndToEndDelay;
+ statistics_proxy_->OnTimingFrameInfoUpdated(info);
+ fake_clock_.AdvanceTimeMilliseconds(kShortDelayMs);
+ result = statistics_proxy_->GetStats().timing_frame_info;
+ EXPECT_TRUE(result);
+ EXPECT_EQ(kExpectedRtpTimestamp, result->rtp_timestamp);
+ fake_clock_.AdvanceTimeMilliseconds(kLongDelayMs);
+ result = statistics_proxy_->GetStats().timing_frame_info;
+ EXPECT_FALSE(result);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, LifetimeHistogramIsUpdated) {
+ const int64_t kTimeSec = 3;
+ fake_clock_.AdvanceTimeMilliseconds(kTimeSec * 1000);
+ // Need at least one frame to report stream lifetime.
+ statistics_proxy_->OnCompleteFrame(true, 1000, VideoContentType::UNSPECIFIED);
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.ReceiveStreamLifetimeInSeconds",
+ kTimeSec));
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ LifetimeHistogramNotReportedForEmptyStreams) {
+ const int64_t kTimeSec = 3;
+ fake_clock_.AdvanceTimeMilliseconds(kTimeSec * 1000);
+ // No frames received.
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, BadCallHistogramsAreUpdated) {
+ // Based on the tuning parameters this will produce 7 uncertain states,
+ // then 10 certainly bad states. There has to be 10 certain states before
+ // any histograms are recorded.
+ const int kNumBadSamples = 17;
+
+ StreamDataCounters counters;
+ counters.first_packet_time_ms = fake_clock_.TimeInMilliseconds();
+ statistics_proxy_->DataCountersUpdated(counters, config_.rtp.remote_ssrc);
+
+ for (int i = 0; i < kNumBadSamples; ++i) {
+ // Since OnRenderedFrame is never called the fps in each sample will be 0,
+ // i.e. bad
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ statistics_proxy_->OnIncomingRate(0, 0);
+ }
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.Any"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BadCall.Any", 100));
+
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.FrameRate"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BadCall.FrameRate", 100));
+
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.BadCall.FrameRateVariance"));
+
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.BadCall.Qp"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, PacketLossHistogramIsUpdated) {
+ const uint32_t kCumLost1 = 1;
+ const uint32_t kExtSeqNum1 = 10;
+ const uint32_t kCumLost2 = 2;
+ const uint32_t kExtSeqNum2 = 20;
+
+ // One report block received.
+ RtcpStatistics rtcp_stats1;
+ rtcp_stats1.packets_lost = kCumLost1;
+ rtcp_stats1.extended_highest_sequence_number = kExtSeqNum1;
+ statistics_proxy_->StatisticsUpdated(rtcp_stats1, kRemoteSsrc);
+
+ // Two report blocks received.
+ RtcpStatistics rtcp_stats2;
+ rtcp_stats2.packets_lost = kCumLost2;
+ rtcp_stats2.extended_highest_sequence_number = kExtSeqNum2;
+ statistics_proxy_->StatisticsUpdated(rtcp_stats2, kRemoteSsrc);
+
+ // Two received report blocks but min run time has not passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
+ SetUp(); // Reset stat proxy causes histograms to be updated.
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+
+ // Two report blocks received.
+ statistics_proxy_->StatisticsUpdated(rtcp_stats1, kRemoteSsrc);
+ statistics_proxy_->StatisticsUpdated(rtcp_stats2, kRemoteSsrc);
+
+ // Two received report blocks and min run time has passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ SetUp();
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.ReceivedPacketsLostInPercent",
+ (kCumLost2 - kCumLost1) * 100 /
+ (kExtSeqNum2 - kExtSeqNum1)));
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ PacketLossHistogramIsNotUpdatedIfLessThanTwoReportBlocksAreReceived) {
+ RtcpStatistics rtcp_stats1;
+ rtcp_stats1.packets_lost = 1;
+ rtcp_stats1.extended_highest_sequence_number = 10;
+
+ // Min run time has passed but no received report block.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ SetUp(); // Reset stat proxy causes histograms to be updated.
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+
+ // Min run time has passed but only one received report block.
+ statistics_proxy_->StatisticsUpdated(rtcp_stats1, kRemoteSsrc);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ SetUp();
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsAvSyncOffset) {
+ const int64_t kSyncOffsetMs = 22;
+ const double kFreqKhz = 90.0;
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ statistics_proxy_->GetStats().sync_offset_ms);
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz);
+ EXPECT_EQ(kSyncOffsetMs, statistics_proxy_->GetStats().sync_offset_ms);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, AvSyncOffsetHistogramIsUpdated) {
+ const int64_t kSyncOffsetMs = 22;
+ const double kFreqKhz = 90.0;
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz);
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AVSyncOffsetInMs"));
+ EXPECT_EQ(1,
+ metrics::NumEvents("WebRTC.Video.AVSyncOffsetInMs", kSyncOffsetMs));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, RtpToNtpFrequencyOffsetHistogramIsUpdated) {
+ const int64_t kSyncOffsetMs = 22;
+ const double kFreqKhz = 90.0;
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz);
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz + 2.2);
+ fake_clock_.AdvanceTimeMilliseconds(kFreqOffsetProcessIntervalInMs);
+ // Process interval passed, max diff: 2.
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz + 1.1);
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz - 4.2);
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz - 0.9);
+ fake_clock_.AdvanceTimeMilliseconds(kFreqOffsetProcessIntervalInMs);
+ // Process interval passed, max diff: 4.
+ statistics_proxy_->OnSyncOffsetUpdated(kSyncOffsetMs, kFreqKhz);
+ statistics_proxy_.reset();
+ // Average reported: (2 + 4) / 2 = 3.
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtpToNtpFreqOffsetInKhz"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtpToNtpFreqOffsetInKhz", 3));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsUpdated) {
+ const int kQp = 22;
+ EncodedImage encoded_image;
+ encoded_image.qp_ = kQp;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnPreDecode(encoded_image, &codec_info);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Decoded.Vp8.Qp", kQp));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsNotUpdatedForTooFewSamples) {
+ EncodedImage encoded_image;
+ encoded_image.qp_ = 22;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i)
+ statistics_proxy_->OnPreDecode(encoded_image, &codec_info);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsNotUpdatedIfNoQpValue) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnPreDecode(encoded_image, &codec_info);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ KeyFrameHistogramNotUpdatedForTooFewSamples) {
+ const bool kIsKeyFrame = false;
+ const int kFrameSizeBytes = 1000;
+
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i)
+ statistics_proxy_->OnCompleteFrame(kIsKeyFrame, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ EXPECT_EQ(0, statistics_proxy_->GetStats().frame_counts.key_frames);
+ EXPECT_EQ(kMinRequiredSamples - 1,
+ statistics_proxy_->GetStats().frame_counts.delta_frames);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ KeyFrameHistogramUpdatedForMinRequiredSamples) {
+ const bool kIsKeyFrame = false;
+ const int kFrameSizeBytes = 1000;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnCompleteFrame(kIsKeyFrame, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ EXPECT_EQ(0, statistics_proxy_->GetStats().frame_counts.key_frames);
+ EXPECT_EQ(kMinRequiredSamples,
+ statistics_proxy_->GetStats().frame_counts.delta_frames);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+ EXPECT_EQ(1,
+ metrics::NumEvents("WebRTC.Video.KeyFramesReceivedInPermille", 0));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, KeyFrameHistogramIsUpdated) {
+ const int kFrameSizeBytes = 1000;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnCompleteFrame(true, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnCompleteFrame(false, kFrameSizeBytes,
+ VideoContentType::UNSPECIFIED);
+
+ EXPECT_EQ(kMinRequiredSamples,
+ statistics_proxy_->GetStats().frame_counts.key_frames);
+ EXPECT_EQ(kMinRequiredSamples,
+ statistics_proxy_->GetStats().frame_counts.delta_frames);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.KeyFramesReceivedInPermille", 500));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, TimingHistogramsNotUpdatedForTooFewSamples) {
+ const int kDecodeMs = 1;
+ const int kMaxDecodeMs = 2;
+ const int kCurrentDelayMs = 3;
+ const int kTargetDelayMs = 4;
+ const int kJitterBufferMs = 5;
+ const int kMinPlayoutDelayMs = 6;
+ const int kRenderDelayMs = 7;
+
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i) {
+ statistics_proxy_->OnFrameBufferTimingsUpdated(
+ kDecodeMs, kMaxDecodeMs, kCurrentDelayMs, kTargetDelayMs,
+ kJitterBufferMs, kMinPlayoutDelayMs, kRenderDelayMs);
+ }
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, TimingHistogramsAreUpdated) {
+ const int kDecodeMs = 1;
+ const int kMaxDecodeMs = 2;
+ const int kCurrentDelayMs = 3;
+ const int kTargetDelayMs = 4;
+ const int kJitterBufferMs = 5;
+ const int kMinPlayoutDelayMs = 6;
+ const int kRenderDelayMs = 7;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnFrameBufferTimingsUpdated(
+ kDecodeMs, kMaxDecodeMs, kCurrentDelayMs, kTargetDelayMs,
+ kJitterBufferMs, kMinPlayoutDelayMs, kRenderDelayMs);
+ }
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.DecodeTimeInMs", kDecodeMs));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.JitterBufferDelayInMs",
+ kJitterBufferMs));
+ EXPECT_EQ(1,
+ metrics::NumEvents("WebRTC.Video.TargetDelayInMs", kTargetDelayMs));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.CurrentDelayInMs", kCurrentDelayMs));
+ EXPECT_EQ(1,
+ metrics::NumEvents("WebRTC.Video.OnewayDelayInMs", kTargetDelayMs));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, DoesNotReportStaleFramerates) {
+ const int kDefaultFps = 30;
+ const int kWidth = 320;
+ const int kHeight = 240;
+
+ rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer(
+ I420Buffer::Create(kWidth, kHeight));
+ VideoFrame frame(video_frame_buffer, kVideoRotation_0, 0);
+
+ for (int i = 0; i < kDefaultFps; ++i) {
+ // Since OnRenderedFrame is never called the fps in each sample will be 0,
+ // i.e. bad
+ frame.set_ntp_time_ms(fake_clock_.CurrentNtpInMilliseconds());
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(),
+ VideoContentType::UNSPECIFIED);
+ statistics_proxy_->OnRenderedFrame(frame);
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kDefaultFps);
+ }
+
+ EXPECT_EQ(kDefaultFps, statistics_proxy_->GetStats().decode_frame_rate);
+ EXPECT_EQ(kDefaultFps, statistics_proxy_->GetStats().render_frame_rate);
+
+ // FPS trackers in stats proxy have a 1000ms sliding window.
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().decode_frame_rate);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().render_frame_rate);
+}
+
+TEST_F(ReceiveStatisticsProxyTest, GetStatsReportsReceivedFrameStats) {
+ const int kWidth = 160;
+ const int kHeight = 120;
+ EXPECT_EQ(0, statistics_proxy_->GetStats().width);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().height);
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_rendered);
+
+ statistics_proxy_->OnRenderedFrame(CreateFrame(kWidth, kHeight));
+
+ EXPECT_EQ(kWidth, statistics_proxy_->GetStats().width);
+ EXPECT_EQ(kHeight, statistics_proxy_->GetStats().height);
+ EXPECT_EQ(1u, statistics_proxy_->GetStats().frames_rendered);
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ ReceivedFrameHistogramsAreNotUpdatedForTooFewSamples) {
+ const int kWidth = 160;
+ const int kHeight = 120;
+
+ for (int i = 0; i < kMinRequiredSamples - 1; ++i)
+ statistics_proxy_->OnRenderedFrame(CreateFrame(kWidth, kHeight));
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, ReceivedFrameHistogramsAreUpdated) {
+ const int kWidth = 160;
+ const int kHeight = 120;
+
+ for (int i = 0; i < kMinRequiredSamples; ++i)
+ statistics_proxy_->OnRenderedFrame(CreateFrame(kWidth, kHeight));
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+ EXPECT_EQ(1,
+ metrics::NumEvents("WebRTC.Video.ReceivedWidthInPixels", kWidth));
+ EXPECT_EQ(1,
+ metrics::NumEvents("WebRTC.Video.ReceivedHeightInPixels", kHeight));
+}
+
+TEST_F(ReceiveStatisticsProxyTest,
+ RtcpHistogramsNotUpdatedIfMinRuntimeHasNotPassed) {
+ InsertFirstRtpPacket(kRemoteSsrc);
+ fake_clock_.AdvanceTimeMilliseconds((metrics::kMinRunTimeInSeconds * 1000) -
+ 1);
+
+ RtcpPacketTypeCounter counter;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+}
+
+TEST_F(ReceiveStatisticsProxyTest, RtcpHistogramsAreUpdated) {
+ InsertFirstRtpPacket(kRemoteSsrc);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+
+ const uint32_t kFirPackets = 100;
+ const uint32_t kPliPackets = 200;
+ const uint32_t kNackPackets = 300;
+
+ RtcpPacketTypeCounter counter;
+ counter.fir_packets = kFirPackets;
+ counter.pli_packets = kPliPackets;
+ counter.nack_packets = kNackPackets;
+ statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.FirPacketsSentPerMinute",
+ kFirPackets * 60 / metrics::kMinRunTimeInSeconds));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PliPacketsSentPerMinute",
+ kPliPackets * 60 / metrics::kMinRunTimeInSeconds));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.NackPacketsSentPerMinute",
+ kNackPackets * 60 / metrics::kMinRunTimeInSeconds));
+}
+
+INSTANTIATE_TEST_CASE_P(ContentTypes,
+ ReceiveStatisticsProxyTest,
+ ::testing::Values(VideoContentType::UNSPECIFIED,
+ VideoContentType::SCREENSHARE));
+
+TEST_P(ReceiveStatisticsProxyTest, InterFrameDelaysAreReported) {
+ const VideoContentType content_type = GetParam();
+ const int kInterFrameDelayMs = 33;
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
+ }
+ // One extra with double the interval.
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+
+ statistics_proxy_.reset();
+ const int kExpectedInterFrame =
+ (kInterFrameDelayMs * (kMinRequiredSamples - 1) +
+ kInterFrameDelayMs * 2) /
+ kMinRequiredSamples;
+ if (videocontenttypehelpers::IsScreenshare(content_type)) {
+ EXPECT_EQ(
+ kExpectedInterFrame,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_EQ(
+ kInterFrameDelayMs * 2,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ } else {
+ EXPECT_EQ(kExpectedInterFrame,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_EQ(kInterFrameDelayMs * 2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxyTest, InterFrameDelaysPercentilesAreReported) {
+ const VideoContentType content_type = GetParam();
+ const int kInterFrameDelayMs = 33;
+ const int kLastFivePercentsSamples = kMinRequiredSamples * 5 / 100;
+ for (int i = 0; i <= kMinRequiredSamples - kLastFivePercentsSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ }
+ // Last 5% of intervals are double in size.
+ for (int i = 0; i < kLastFivePercentsSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(2 * kInterFrameDelayMs);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ }
+ // Final sample is outlier and 10 times as big.
+ fake_clock_.AdvanceTimeMilliseconds(10 * kInterFrameDelayMs);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+
+ statistics_proxy_.reset();
+ const int kExpectedInterFrame = kInterFrameDelayMs * 2;
+ if (videocontenttypehelpers::IsScreenshare(content_type)) {
+ EXPECT_EQ(kExpectedInterFrame,
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.InterframeDelay95PercentileInMs"));
+ } else {
+ EXPECT_EQ(
+ kExpectedInterFrame,
+ metrics::MinSample("WebRTC.Video.InterframeDelay95PercentileInMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxyTest, MaxInterFrameDelayOnlyWithValidAverage) {
+ const VideoContentType content_type = GetParam();
+ const int kInterFrameDelayMs = 33;
+ for (int i = 0; i < kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
+ }
+
+ // |kMinRequiredSamples| samples, and thereby intervals, is required. That
+ // means we're one frame short of having a valid data set.
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+}
+
+TEST_P(ReceiveStatisticsProxyTest, MaxInterFrameDelayOnlyWithPause) {
+ const VideoContentType content_type = GetParam();
+ const int kInterFrameDelayMs = 33;
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
+ }
+
+ // At this state, we should have a valid inter-frame delay.
+ // Indicate stream paused and make a large jump in time.
+ statistics_proxy_->OnStreamInactive();
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+
+ // Insert two more frames. The interval during the pause should be disregarded
+ // in the stats.
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+
+ statistics_proxy_.reset();
+ if (videocontenttypehelpers::IsScreenshare(content_type)) {
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_EQ(
+ kInterFrameDelayMs,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_EQ(
+ kInterFrameDelayMs,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ } else {
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_EQ(kInterFrameDelayMs,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_EQ(kInterFrameDelayMs,
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ }
+}
+
+TEST_P(ReceiveStatisticsProxyTest, StatsAreSlicedOnSimulcastAndExperiment) {
+ VideoContentType content_type = GetParam();
+ const uint8_t experiment_id = 1;
+ videocontenttypehelpers::SetExperimentId(&content_type, experiment_id);
+ const int kInterFrameDelayMs1 = 30;
+ const int kInterFrameDelayMs2 = 50;
+
+ videocontenttypehelpers::SetSimulcastId(&content_type, 1);
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs1);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ }
+
+ videocontenttypehelpers::SetSimulcastId(&content_type, 2);
+ for (int i = 0; i <= kMinRequiredSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs2);
+ statistics_proxy_->OnDecodedFrame(rtc::Optional<uint8_t>(), content_type);
+ }
+ statistics_proxy_.reset();
+
+ if (videocontenttypehelpers::IsScreenshare(content_type)) {
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.S0"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S0"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.S1"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S1"));
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"
+ ".ExperimentGroup0"));
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"
+ ".ExperimentGroup0"));
+ EXPECT_EQ(
+ kInterFrameDelayMs1,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs.S0"));
+ EXPECT_EQ(
+ kInterFrameDelayMs2,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs.S1"));
+ EXPECT_EQ(
+ (kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
+ EXPECT_EQ(
+ kInterFrameDelayMs2,
+ metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_EQ(
+ (kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.ExperimentGroup0"));
+ } else {
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S0"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S0"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S1"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S1"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"
+ ".ExperimentGroup0"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"
+ ".ExperimentGroup0"));
+ EXPECT_EQ(kInterFrameDelayMs1,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S0"));
+ EXPECT_EQ(kInterFrameDelayMs2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S1"));
+ EXPECT_EQ((kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_EQ(kInterFrameDelayMs2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_EQ((kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
+ metrics::MinSample(
+ "WebRTC.Video.InterframeDelayInMs.ExperimentGroup0"));
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/replay.cc b/third_party/libwebrtc/webrtc/video/replay.cc
new file mode 100644
index 0000000000..b9214b8ae6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/replay.cc
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <map>
+#include <memory>
+#include <sstream>
+
+#include "api/video_codecs/video_decoder.h"
+#include "call/call.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "logging/rtc_event_log/rtc_event_log.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/flags.h"
+#include "rtc_base/string_to_number.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/encoder_settings.h"
+#include "test/fake_decoder.h"
+#include "test/gtest.h"
+#include "test/null_transport.h"
+#include "test/rtp_file_reader.h"
+#include "test/run_loop.h"
+#include "test/run_test.h"
+#include "test/testsupport/frame_writer.h"
+#include "test/video_capturer.h"
+#include "test/video_renderer.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace {
+
+static bool ValidatePayloadType(int32_t payload_type) {
+ return payload_type > 0 && payload_type <= 127;
+}
+
+static bool ValidateSsrc(const char* ssrc_string) {
+ return rtc::StringToNumber<uint32_t>(ssrc_string).has_value();
+}
+
+static bool ValidateOptionalPayloadType(int32_t payload_type) {
+ return payload_type == -1 || ValidatePayloadType(payload_type);
+}
+
+static bool ValidateRtpHeaderExtensionId(int32_t extension_id) {
+ return extension_id >= -1 && extension_id < 15;
+}
+
+bool ValidateInputFilenameNotEmpty(const std::string& string) {
+ return !string.empty();
+}
+
+} // namespace
+
+namespace webrtc {
+namespace flags {
+
+// TODO(pbos): Multiple receivers.
+
+// Flag for payload type.
+DEFINE_int(payload_type, test::CallTest::kPayloadTypeVP8, "Payload type");
+static int PayloadType() { return static_cast<int>(FLAG_payload_type); }
+
+DEFINE_int(payload_type_rtx,
+ test::CallTest::kSendRtxPayloadType,
+ "RTX payload type");
+static int PayloadTypeRtx() {
+ return static_cast<int>(FLAG_payload_type_rtx);
+}
+
+// Flag for SSRC.
+const std::string& DefaultSsrc() {
+ static const std::string ssrc = std::to_string(
+ test::CallTest::kVideoSendSsrcs[0]);
+ return ssrc;
+}
+DEFINE_string(ssrc, DefaultSsrc().c_str(), "Incoming SSRC");
+static uint32_t Ssrc() {
+ return rtc::StringToNumber<uint32_t>(FLAG_ssrc).value();
+}
+
+const std::string& DefaultSsrcRtx() {
+ static const std::string ssrc_rtx = std::to_string(
+ test::CallTest::kSendRtxSsrcs[0]);
+ return ssrc_rtx;
+}
+DEFINE_string(ssrc_rtx, DefaultSsrcRtx().c_str(), "Incoming RTX SSRC");
+static uint32_t SsrcRtx() {
+ return rtc::StringToNumber<uint32_t>(FLAG_ssrc_rtx).value();
+}
+
+// Flag for RED payload type.
+DEFINE_int(red_payload_type, -1, "RED payload type");
+static int RedPayloadType() {
+ return static_cast<int>(FLAG_red_payload_type);
+}
+
+// Flag for ULPFEC payload type.
+DEFINE_int(fec_payload_type, -1, "ULPFEC payload type");
+static int FecPayloadType() {
+ return static_cast<int>(FLAG_fec_payload_type);
+}
+
+// Flag for abs-send-time id.
+DEFINE_int(abs_send_time_id, -1, "RTP extension ID for abs-send-time");
+static int AbsSendTimeId() { return static_cast<int>(FLAG_abs_send_time_id); }
+
+// Flag for transmission-offset id.
+DEFINE_int(transmission_offset_id,
+ -1,
+ "RTP extension ID for transmission-offset");
+static int TransmissionOffsetId() {
+ return static_cast<int>(FLAG_transmission_offset_id);
+}
+
+// Flag for rtpdump input file.
+DEFINE_string(input_file, "", "input file");
+static std::string InputFile() {
+ return static_cast<std::string>(FLAG_input_file);
+}
+
+// Flag for raw output files.
+DEFINE_string(out_base, "", "Basename (excluding .jpg) for raw output");
+static std::string OutBase() {
+ return static_cast<std::string>(FLAG_out_base);
+}
+
+DEFINE_string(decoder_bitstream_filename, "", "Decoder bitstream output file");
+static std::string DecoderBitstreamFilename() {
+ return static_cast<std::string>(FLAG_decoder_bitstream_filename);
+}
+
+// Flag for video codec.
+DEFINE_string(codec, "VP8", "Video codec");
+static std::string Codec() { return static_cast<std::string>(FLAG_codec); }
+
+DEFINE_bool(help, false, "Print this message.");
+} // namespace flags
+
+static const uint32_t kReceiverLocalSsrc = 0x123456;
+
+class FileRenderPassthrough : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ FileRenderPassthrough(const std::string& basename,
+ rtc::VideoSinkInterface<VideoFrame>* renderer)
+ : basename_(basename), renderer_(renderer), file_(nullptr), count_(0) {}
+
+ ~FileRenderPassthrough() {
+ if (file_)
+ fclose(file_);
+ }
+
+ private:
+ void OnFrame(const VideoFrame& video_frame) override {
+ if (renderer_)
+ renderer_->OnFrame(video_frame);
+
+ if (basename_.empty())
+ return;
+
+ std::stringstream filename;
+ filename << basename_ << count_++ << "_" << video_frame.timestamp()
+ << ".jpg";
+
+ test::JpegFrameWriter frame_writer(filename.str());
+ RTC_CHECK(frame_writer.WriteFrame(video_frame, 100));
+ }
+
+ const std::string basename_;
+ rtc::VideoSinkInterface<VideoFrame>* const renderer_;
+ FILE* file_;
+ size_t count_;
+};
+
+class DecoderBitstreamFileWriter : public EncodedFrameObserver {
+ public:
+ explicit DecoderBitstreamFileWriter(const char* filename)
+ : file_(fopen(filename, "wb")) {
+ RTC_DCHECK(file_);
+ }
+ ~DecoderBitstreamFileWriter() { fclose(file_); }
+
+ virtual void EncodedFrameCallback(const EncodedFrame& encoded_frame) {
+ fwrite(encoded_frame.data_, 1, encoded_frame.length_, file_);
+ }
+
+ private:
+ FILE* file_;
+};
+
+void RtpReplay() {
+ std::stringstream window_title;
+ window_title << "Playback Video (" << flags::InputFile() << ")";
+ std::unique_ptr<test::VideoRenderer> playback_video(
+ test::VideoRenderer::Create(window_title.str().c_str(), 640, 480));
+ FileRenderPassthrough file_passthrough(flags::OutBase(),
+ playback_video.get());
+
+ webrtc::RtcEventLogNullImpl event_log;
+ std::unique_ptr<Call> call(Call::Create(Call::Config(&event_log)));
+
+ test::NullTransport transport;
+ VideoReceiveStream::Config receive_config(&transport);
+ receive_config.rtp.remote_ssrc = flags::Ssrc();
+ receive_config.rtp.local_ssrc = kReceiverLocalSsrc;
+ receive_config.rtp.rtx_ssrc = flags::SsrcRtx();
+ receive_config.rtp.rtx_associated_payload_types[flags::PayloadTypeRtx()] =
+ flags::PayloadType();
+ receive_config.rtp.ulpfec_payload_type = flags::FecPayloadType();
+ receive_config.rtp.red_payload_type = flags::RedPayloadType();
+ receive_config.rtp.nack.rtp_history_ms = 1000;
+ if (flags::TransmissionOffsetId() != -1) {
+ receive_config.rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTimestampOffsetUri, flags::TransmissionOffsetId()));
+ }
+ if (flags::AbsSendTimeId() != -1) {
+ receive_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTimeUri, flags::AbsSendTimeId()));
+ }
+ receive_config.renderer = &file_passthrough;
+
+ VideoSendStream::Config::EncoderSettings encoder_settings;
+ encoder_settings.payload_name = flags::Codec();
+ encoder_settings.payload_type = flags::PayloadType();
+ VideoReceiveStream::Decoder decoder;
+ std::unique_ptr<DecoderBitstreamFileWriter> bitstream_writer;
+ if (!flags::DecoderBitstreamFilename().empty()) {
+ bitstream_writer.reset(new DecoderBitstreamFileWriter(
+ flags::DecoderBitstreamFilename().c_str()));
+ receive_config.pre_decode_callback = bitstream_writer.get();
+ }
+ decoder = test::CreateMatchingDecoder(encoder_settings);
+ if (!flags::DecoderBitstreamFilename().empty()) {
+ // Replace with a null decoder if we're writing the bitstream to a file
+ // instead.
+ delete decoder.decoder;
+ decoder.decoder = new test::FakeNullDecoder();
+ }
+ receive_config.decoders.push_back(decoder);
+
+ VideoReceiveStream* receive_stream =
+ call->CreateVideoReceiveStream(std::move(receive_config));
+
+ std::unique_ptr<test::RtpFileReader> rtp_reader(test::RtpFileReader::Create(
+ test::RtpFileReader::kRtpDump, flags::InputFile()));
+ if (!rtp_reader) {
+ rtp_reader.reset(test::RtpFileReader::Create(test::RtpFileReader::kPcap,
+ flags::InputFile()));
+ if (!rtp_reader) {
+ fprintf(stderr,
+ "Couldn't open input file as either a rtpdump or .pcap. Note "
+ "that .pcapng is not supported.\nTrying to interpret the file as "
+ "length/packet interleaved.\n");
+ rtp_reader.reset(test::RtpFileReader::Create(
+ test::RtpFileReader::kLengthPacketInterleaved, flags::InputFile()));
+ if (!rtp_reader) {
+ fprintf(stderr,
+ "Unable to open input file with any supported format\n");
+ return;
+ }
+ }
+ }
+ receive_stream->Start();
+
+ uint32_t last_time_ms = 0;
+ int num_packets = 0;
+ std::map<uint32_t, int> unknown_packets;
+ while (true) {
+ test::RtpPacket packet;
+ if (!rtp_reader->NextPacket(&packet))
+ break;
+ ++num_packets;
+ switch (call->Receiver()->DeliverPacket(
+ webrtc::MediaType::VIDEO, packet.data, packet.length, PacketTime())) {
+ case PacketReceiver::DELIVERY_OK:
+ break;
+ case PacketReceiver::DELIVERY_UNKNOWN_SSRC: {
+ RTPHeader header;
+ std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
+ parser->Parse(packet.data, packet.length, &header);
+ if (unknown_packets[header.ssrc] == 0)
+ fprintf(stderr, "Unknown SSRC: %u!\n", header.ssrc);
+ ++unknown_packets[header.ssrc];
+ break;
+ }
+ case PacketReceiver::DELIVERY_PACKET_ERROR: {
+ fprintf(stderr, "Packet error, corrupt packets or incorrect setup?\n");
+ RTPHeader header;
+ std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
+ parser->Parse(packet.data, packet.length, &header);
+ fprintf(stderr, "Packet len=%zu pt=%u seq=%u ts=%u ssrc=0x%8x\n",
+ packet.length, header.payloadType, header.sequenceNumber,
+ header.timestamp, header.ssrc);
+ break;
+ }
+ }
+ if (last_time_ms != 0 && last_time_ms != packet.time_ms) {
+ SleepMs(packet.time_ms - last_time_ms);
+ }
+ last_time_ms = packet.time_ms;
+ }
+ fprintf(stderr, "num_packets: %d\n", num_packets);
+
+ for (std::map<uint32_t, int>::const_iterator it = unknown_packets.begin();
+ it != unknown_packets.end();
+ ++it) {
+ fprintf(
+ stderr, "Packets for unknown ssrc '%u': %d\n", it->first, it->second);
+ }
+
+ call->DestroyVideoReceiveStream(receive_stream);
+
+ delete decoder.decoder;
+}
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true)) {
+ return 1;
+ }
+ if (webrtc::flags::FLAG_help) {
+ rtc::FlagList::Print(nullptr, false);
+ return 0;
+ }
+
+ RTC_CHECK(ValidatePayloadType(webrtc::flags::FLAG_payload_type));
+ RTC_CHECK(ValidatePayloadType(webrtc::flags::FLAG_payload_type_rtx));
+ RTC_CHECK(ValidateSsrc(webrtc::flags::FLAG_ssrc));
+ RTC_CHECK(ValidateSsrc(webrtc::flags::FLAG_ssrc_rtx));
+ RTC_CHECK(ValidateOptionalPayloadType(webrtc::flags::FLAG_red_payload_type));
+ RTC_CHECK(ValidateOptionalPayloadType(webrtc::flags::FLAG_fec_payload_type));
+ RTC_CHECK(ValidateRtpHeaderExtensionId(webrtc::flags::FLAG_abs_send_time_id));
+ RTC_CHECK(ValidateRtpHeaderExtensionId(
+ webrtc::flags::FLAG_transmission_offset_id));
+ RTC_CHECK(ValidateInputFilenameNotEmpty(webrtc::flags::FLAG_input_file));
+
+ webrtc::test::RunTest(webrtc::RtpReplay);
+ return 0;
+}
diff --git a/third_party/libwebrtc/webrtc/video/report_block_stats.cc b/third_party/libwebrtc/webrtc/video/report_block_stats.cc
new file mode 100644
index 0000000000..4726a46745
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/report_block_stats.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/report_block_stats.h"
+
+namespace webrtc {
+
+namespace {
+int FractionLost(uint32_t num_lost_sequence_numbers,
+ uint32_t num_sequence_numbers) {
+ if (num_sequence_numbers == 0) {
+ return 0;
+ }
+ return ((num_lost_sequence_numbers * 255) + (num_sequence_numbers / 2)) /
+ num_sequence_numbers;
+}
+} // namespace
+
+
+// Helper class for rtcp statistics.
+ReportBlockStats::ReportBlockStats()
+ : num_sequence_numbers_(0),
+ num_lost_sequence_numbers_(0) {
+}
+
+void ReportBlockStats::Store(const RtcpStatistics& rtcp_stats,
+ uint32_t remote_ssrc,
+ uint32_t source_ssrc) {
+ RTCPReportBlock block;
+ block.packets_lost = rtcp_stats.packets_lost;
+ block.fraction_lost = rtcp_stats.fraction_lost;
+ block.extended_highest_sequence_number =
+ rtcp_stats.extended_highest_sequence_number;
+ block.jitter = rtcp_stats.jitter;
+ block.sender_ssrc = remote_ssrc;
+ block.source_ssrc = source_ssrc;
+ uint32_t num_sequence_numbers = 0;
+ uint32_t num_lost_sequence_numbers = 0;
+ StoreAndAddPacketIncrement(
+ block, &num_sequence_numbers, &num_lost_sequence_numbers);
+}
+
+RTCPReportBlock ReportBlockStats::AggregateAndStore(
+ const ReportBlockVector& report_blocks) {
+ RTCPReportBlock aggregate;
+ if (report_blocks.empty()) {
+ return aggregate;
+ }
+ uint32_t num_sequence_numbers = 0;
+ uint32_t num_lost_sequence_numbers = 0;
+ ReportBlockVector::const_iterator report_block = report_blocks.begin();
+ for (; report_block != report_blocks.end(); ++report_block) {
+ aggregate.packets_lost += report_block->packets_lost;
+ aggregate.jitter += report_block->jitter;
+ StoreAndAddPacketIncrement(*report_block,
+ &num_sequence_numbers,
+ &num_lost_sequence_numbers);
+ }
+
+ if (report_blocks.size() == 1) {
+ // No aggregation needed.
+ return report_blocks[0];
+ }
+ // Fraction lost since previous report block.
+ aggregate.fraction_lost =
+ FractionLost(num_lost_sequence_numbers, num_sequence_numbers);
+ aggregate.jitter = static_cast<uint32_t>(
+ (aggregate.jitter + report_blocks.size() / 2) / report_blocks.size());
+ return aggregate;
+}
+
+void ReportBlockStats::StoreAndAddPacketIncrement(
+ const RTCPReportBlock& report_block,
+ uint32_t* num_sequence_numbers,
+ uint32_t* num_lost_sequence_numbers) {
+ // Get diff with previous report block.
+ ReportBlockMap::iterator prev_report_block =
+ prev_report_blocks_.find(report_block.source_ssrc);
+ if (prev_report_block != prev_report_blocks_.end()) {
+ int seq_num_diff =
+ report_block.extended_highest_sequence_number -
+ prev_report_block->second.extended_highest_sequence_number;
+ int cum_loss_diff =
+ report_block.packets_lost - prev_report_block->second.packets_lost;
+ if (seq_num_diff >= 0 && cum_loss_diff >= 0) {
+ *num_sequence_numbers += seq_num_diff;
+ *num_lost_sequence_numbers += cum_loss_diff;
+ // Update total number of packets/lost packets.
+ num_sequence_numbers_ += seq_num_diff;
+ num_lost_sequence_numbers_ += cum_loss_diff;
+ }
+ }
+ // Store current report block.
+ prev_report_blocks_[report_block.source_ssrc] = report_block;
+}
+
+int ReportBlockStats::FractionLostInPercent() const {
+ if (num_sequence_numbers_ == 0) {
+ return -1;
+ }
+ return FractionLost(
+ num_lost_sequence_numbers_, num_sequence_numbers_) * 100 / 255;
+}
+
+} // namespace webrtc
+
diff --git a/third_party/libwebrtc/webrtc/video/report_block_stats.h b/third_party/libwebrtc/webrtc/video/report_block_stats.h
new file mode 100644
index 0000000000..033ba7deb5
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/report_block_stats.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_REPORT_BLOCK_STATS_H_
+#define VIDEO_REPORT_BLOCK_STATS_H_
+
+#include <map>
+#include <vector>
+
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+// Helper class for rtcp statistics.
+class ReportBlockStats {
+ public:
+ typedef std::map<uint32_t, RTCPReportBlock> ReportBlockMap;
+ typedef std::vector<RTCPReportBlock> ReportBlockVector;
+ ReportBlockStats();
+ ~ReportBlockStats() {}
+
+ // Updates stats and stores report blocks.
+ // Returns an aggregate of the |report_blocks|.
+ RTCPReportBlock AggregateAndStore(const ReportBlockVector& report_blocks);
+
+ // Updates stats and stores report block.
+ void Store(const RtcpStatistics& rtcp_stats,
+ uint32_t remote_ssrc,
+ uint32_t source_ssrc);
+
+ // Returns the total fraction of lost packets (or -1 if less than two report
+ // blocks have been stored).
+ int FractionLostInPercent() const;
+
+ private:
+ // Updates the total number of packets/lost packets.
+ // Stores the report block.
+ // Returns the number of packets/lost packets since previous report block.
+ void StoreAndAddPacketIncrement(const RTCPReportBlock& report_block,
+ uint32_t* num_sequence_numbers,
+ uint32_t* num_lost_sequence_numbers);
+
+ // The total number of packets/lost packets.
+ uint32_t num_sequence_numbers_;
+ uint32_t num_lost_sequence_numbers_;
+
+ // Map holding the last stored report block (mapped by the source SSRC).
+ ReportBlockMap prev_report_blocks_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_REPORT_BLOCK_STATS_H_
+
diff --git a/third_party/libwebrtc/webrtc/video/report_block_stats_unittest.cc b/third_party/libwebrtc/webrtc/video/report_block_stats_unittest.cc
new file mode 100644
index 0000000000..983dcfcf34
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/report_block_stats_unittest.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/gtest.h"
+#include "video/report_block_stats.h"
+
+namespace webrtc {
+
+class ReportBlockStatsTest : public ::testing::Test {
+ protected:
+ ReportBlockStatsTest() : kSsrc1(0x12345), kSsrc2(0x23456) {}
+
+ void SetUp() override {
+ // kSsrc1: block 1-3.
+ block1_1_.packets_lost = 10;
+ block1_1_.fraction_lost = 123;
+ block1_1_.extended_highest_sequence_number = 24000;
+ block1_1_.jitter = 777;
+ block1_1_.source_ssrc = kSsrc1;
+ block1_2_.packets_lost = 15;
+ block1_2_.fraction_lost = 0;
+ block1_2_.extended_highest_sequence_number = 24100;
+ block1_2_.jitter = 222;
+ block1_2_.source_ssrc = kSsrc1;
+ block1_3_.packets_lost = 50;
+ block1_3_.fraction_lost = 0;
+ block1_3_.extended_highest_sequence_number = 24200;
+ block1_3_.jitter = 333;
+ block1_3_.source_ssrc = kSsrc1;
+ // kSsrc2: block 1,2.
+ block2_1_.packets_lost = 111;
+ block2_1_.fraction_lost = 222;
+ block2_1_.extended_highest_sequence_number = 8500;
+ block2_1_.jitter = 555;
+ block2_1_.source_ssrc = kSsrc2;
+ block2_2_.packets_lost = 136;
+ block2_2_.fraction_lost = 0;
+ block2_2_.extended_highest_sequence_number = 8800;
+ block2_2_.jitter = 888;
+ block2_2_.source_ssrc = kSsrc2;
+
+ ssrc1block1_.push_back(block1_1_);
+ ssrc1block2_.push_back(block1_2_);
+ ssrc12block1_.push_back(block1_1_);
+ ssrc12block1_.push_back(block2_1_);
+ ssrc12block2_.push_back(block1_2_);
+ ssrc12block2_.push_back(block2_2_);
+ }
+
+ RtcpStatistics RtcpReportBlockToRtcpStatistics(
+ const RTCPReportBlock& stats) {
+ RtcpStatistics block;
+ block.packets_lost = stats.packets_lost;
+ block.fraction_lost = stats.fraction_lost;
+ block.extended_highest_sequence_number =
+ stats.extended_highest_sequence_number;
+ block.jitter = stats.jitter;
+ return block;
+ }
+
+ const uint32_t kSsrc1;
+ const uint32_t kSsrc2;
+ RTCPReportBlock block1_1_;
+ RTCPReportBlock block1_2_;
+ RTCPReportBlock block1_3_;
+ RTCPReportBlock block2_1_;
+ RTCPReportBlock block2_2_;
+ std::vector<RTCPReportBlock> ssrc1block1_;
+ std::vector<RTCPReportBlock> ssrc1block2_;
+ std::vector<RTCPReportBlock> ssrc12block1_;
+ std::vector<RTCPReportBlock> ssrc12block2_;
+};
+
+TEST_F(ReportBlockStatsTest, AggregateAndStore_NoSsrc) {
+ ReportBlockStats stats;
+ std::vector<RTCPReportBlock> empty;
+ RTCPReportBlock aggregated = stats.AggregateAndStore(empty);
+ EXPECT_EQ(0U, aggregated.fraction_lost);
+ EXPECT_EQ(0U, aggregated.packets_lost);
+ EXPECT_EQ(0U, aggregated.jitter);
+ EXPECT_EQ(0U, aggregated.extended_highest_sequence_number);
+}
+
+TEST_F(ReportBlockStatsTest, AggregateAndStore_OneSsrc) {
+ ReportBlockStats stats;
+ RTCPReportBlock aggregated = stats.AggregateAndStore(ssrc1block1_);
+ // One ssrc, no aggregation done.
+ EXPECT_EQ(123U, aggregated.fraction_lost);
+ EXPECT_EQ(10U, aggregated.packets_lost);
+ EXPECT_EQ(777U, aggregated.jitter);
+ EXPECT_EQ(24000U, aggregated.extended_highest_sequence_number);
+
+ aggregated = stats.AggregateAndStore(ssrc1block2_);
+ EXPECT_EQ(0U, aggregated.fraction_lost);
+ EXPECT_EQ(15U, aggregated.packets_lost);
+ EXPECT_EQ(222U, aggregated.jitter);
+ EXPECT_EQ(24100U, aggregated.extended_highest_sequence_number);
+
+ // fl: 100 * (15-10) / (24100-24000) = 5%
+ EXPECT_EQ(5, stats.FractionLostInPercent());
+}
+
+TEST_F(ReportBlockStatsTest, AggregateAndStore_TwoSsrcs) {
+ ReportBlockStats stats;
+ RTCPReportBlock aggregated = stats.AggregateAndStore(ssrc12block1_);
+ EXPECT_EQ(0U, aggregated.fraction_lost);
+ EXPECT_EQ(10U + 111U, aggregated.packets_lost);
+ EXPECT_EQ((777U + 555U) / 2, aggregated.jitter);
+ EXPECT_EQ(0U, aggregated.extended_highest_sequence_number);
+
+ aggregated = stats.AggregateAndStore(ssrc12block2_);
+ // fl: 255 * ((15-10) + (136-111)) / ((24100-24000) + (8800-8500)) = 19
+ EXPECT_EQ(19U, aggregated.fraction_lost);
+ EXPECT_EQ(15U + 136U, aggregated.packets_lost);
+ EXPECT_EQ((222U + 888U) / 2, aggregated.jitter);
+ EXPECT_EQ(0U, aggregated.extended_highest_sequence_number);
+
+ // fl: 100 * ((15-10) + (136-111)) / ((24100-24000) + (8800-8500)) = 7%
+ EXPECT_EQ(7, stats.FractionLostInPercent());
+}
+
+TEST_F(ReportBlockStatsTest, StoreAndGetFractionLost) {
+ const uint32_t kRemoteSsrc = 1;
+ ReportBlockStats stats;
+ EXPECT_EQ(-1, stats.FractionLostInPercent());
+
+ // First block.
+ stats.Store(RtcpReportBlockToRtcpStatistics(block1_1_), kRemoteSsrc, kSsrc1);
+ EXPECT_EQ(-1, stats.FractionLostInPercent());
+ // fl: 100 * (15-10) / (24100-24000) = 5%
+ stats.Store(RtcpReportBlockToRtcpStatistics(block1_2_), kRemoteSsrc, kSsrc1);
+ EXPECT_EQ(5, stats.FractionLostInPercent());
+ // fl: 100 * (50-10) / (24200-24000) = 20%
+ stats.Store(RtcpReportBlockToRtcpStatistics(block1_3_), kRemoteSsrc, kSsrc1);
+ EXPECT_EQ(20, stats.FractionLostInPercent());
+}
+
+} // namespace webrtc
+
diff --git a/third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.cc b/third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.cc
new file mode 100644
index 0000000000..c330a4434a
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_streams_synchronizer.h"
+
+#include "call/syncable.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+#include "rtc_base/trace_event.h"
+
+namespace webrtc {
+namespace {
+bool UpdateMeasurements(StreamSynchronization::Measurements* stream,
+ const Syncable::Info& info) {
+ RTC_DCHECK(stream);
+ stream->latest_timestamp = info.latest_received_capture_timestamp;
+ stream->latest_receive_time_ms = info.latest_receive_time_ms;
+ bool new_rtcp_sr = false;
+ if (!stream->rtp_to_ntp.UpdateMeasurements(info.capture_time_ntp_secs,
+ info.capture_time_ntp_frac,
+ info.capture_time_source_clock,
+ &new_rtcp_sr)) {
+ return false;
+ }
+ return true;
+}
+} // namespace
+
+RtpStreamsSynchronizer::RtpStreamsSynchronizer(Syncable* syncable_video)
+ : syncable_video_(syncable_video),
+ syncable_audio_(nullptr),
+ sync_(),
+ last_sync_time_(rtc::TimeNanos()) {
+ RTC_DCHECK(syncable_video);
+ process_thread_checker_.DetachFromThread();
+}
+
+void RtpStreamsSynchronizer::ConfigureSync(Syncable* syncable_audio) {
+ rtc::CritScope lock(&crit_);
+ if (syncable_audio == syncable_audio_) {
+ // This prevents expensive no-ops.
+ return;
+ }
+
+ syncable_audio_ = syncable_audio;
+ sync_.reset(nullptr);
+ if (syncable_audio_) {
+ sync_.reset(new StreamSynchronization(syncable_video_->id(),
+ syncable_audio_->id()));
+ }
+}
+
+int64_t RtpStreamsSynchronizer::TimeUntilNextProcess() {
+ RTC_DCHECK_RUN_ON(&process_thread_checker_);
+ const int64_t kSyncIntervalMs = 1000;
+ return kSyncIntervalMs -
+ (rtc::TimeNanos() - last_sync_time_) / rtc::kNumNanosecsPerMillisec;
+}
+
+void RtpStreamsSynchronizer::Process() {
+ RTC_DCHECK_RUN_ON(&process_thread_checker_);
+ last_sync_time_ = rtc::TimeNanos();
+
+ rtc::CritScope lock(&crit_);
+ if (!syncable_audio_) {
+ return;
+ }
+ RTC_DCHECK(sync_.get());
+
+ rtc::Optional<Syncable::Info> audio_info = syncable_audio_->GetInfo();
+ if (!audio_info || !UpdateMeasurements(&audio_measurement_, *audio_info)) {
+ return;
+ }
+
+ int64_t last_video_receive_ms = video_measurement_.latest_receive_time_ms;
+ rtc::Optional<Syncable::Info> video_info = syncable_video_->GetInfo();
+ if (!video_info || !UpdateMeasurements(&video_measurement_, *video_info)) {
+ return;
+ }
+
+ if (last_video_receive_ms == video_measurement_.latest_receive_time_ms) {
+ // No new video packet has been received since last update.
+ return;
+ }
+
+ int relative_delay_ms;
+ // Calculate how much later or earlier the audio stream is compared to video.
+ if (!sync_->ComputeRelativeDelay(audio_measurement_, video_measurement_,
+ &relative_delay_ms)) {
+ return;
+ }
+
+ TRACE_COUNTER1("webrtc", "SyncCurrentVideoDelay",
+ video_info->current_delay_ms);
+ TRACE_COUNTER1("webrtc", "SyncCurrentAudioDelay",
+ audio_info->current_delay_ms);
+ TRACE_COUNTER1("webrtc", "SyncRelativeDelay", relative_delay_ms);
+ int target_audio_delay_ms = 0;
+ int target_video_delay_ms = video_info->current_delay_ms;
+ // Calculate the necessary extra audio delay and desired total video
+ // delay to get the streams in sync.
+ if (!sync_->ComputeDelays(relative_delay_ms,
+ audio_info->current_delay_ms,
+ &target_audio_delay_ms,
+ &target_video_delay_ms)) {
+ return;
+ }
+
+ syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms);
+ syncable_video_->SetMinimumPlayoutDelay(target_video_delay_ms);
+}
+
+bool RtpStreamsSynchronizer::GetStreamSyncOffsetInMs(
+ uint32_t timestamp,
+ int64_t render_time_ms,
+ int64_t* stream_offset_ms,
+ double* estimated_freq_khz) const {
+ rtc::CritScope lock(&crit_);
+ if (!syncable_audio_) {
+ return false;
+ }
+
+ uint32_t playout_timestamp = syncable_audio_->GetPlayoutTimestamp();
+
+ int64_t latest_audio_ntp;
+ if (!audio_measurement_.rtp_to_ntp.Estimate(playout_timestamp,
+ &latest_audio_ntp)) {
+ return false;
+ }
+
+ int64_t latest_video_ntp;
+ if (!video_measurement_.rtp_to_ntp.Estimate(timestamp, &latest_video_ntp)) {
+ return false;
+ }
+
+ int64_t time_to_render_ms = render_time_ms - rtc::TimeMillis();
+ if (time_to_render_ms > 0)
+ latest_video_ntp += time_to_render_ms;
+
+ *stream_offset_ms = latest_audio_ntp - latest_video_ntp;
+ *estimated_freq_khz = video_measurement_.rtp_to_ntp.params()->frequency_khz;
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.h b/third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.h
new file mode 100644
index 0000000000..0d3b205b41
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// RtpStreamsSynchronizer is responsible for synchronization audio and video for
+// a given voice engine channel and video receive stream.
+
+#ifndef VIDEO_RTP_STREAMS_SYNCHRONIZER_H_
+#define VIDEO_RTP_STREAMS_SYNCHRONIZER_H_
+
+#include <memory>
+
+#include "modules/include/module.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_checker.h"
+#include "video/stream_synchronization.h"
+
+namespace webrtc {
+
+class Syncable;
+
+namespace vcm {
+class VideoReceiver;
+} // namespace vcm
+
+class RtpStreamsSynchronizer : public Module {
+ public:
+ explicit RtpStreamsSynchronizer(Syncable* syncable_video);
+
+ void ConfigureSync(Syncable* syncable_audio);
+
+ // Implements Module.
+ int64_t TimeUntilNextProcess() override;
+ void Process() override;
+
+ // Gets the sync offset between the current played out audio frame and the
+ // video |frame|. Returns true on success, false otherwise.
+ // The estimated frequency is the frequency used in the RTP to NTP timestamp
+ // conversion.
+ bool GetStreamSyncOffsetInMs(uint32_t timestamp,
+ int64_t render_time_ms,
+ int64_t* stream_offset_ms,
+ double* estimated_freq_khz) const;
+
+ private:
+ Syncable* syncable_video_;
+
+ rtc::CriticalSection crit_;
+ Syncable* syncable_audio_ RTC_GUARDED_BY(crit_);
+ std::unique_ptr<StreamSynchronization> sync_ RTC_GUARDED_BY(crit_);
+ StreamSynchronization::Measurements audio_measurement_ RTC_GUARDED_BY(crit_);
+ StreamSynchronization::Measurements video_measurement_ RTC_GUARDED_BY(crit_);
+
+ rtc::ThreadChecker process_thread_checker_;
+ int64_t last_sync_time_ RTC_ACCESS_ON(&process_thread_checker_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RTP_STREAMS_SYNCHRONIZER_H_
diff --git a/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.cc b/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.cc
new file mode 100644
index 0000000000..7ed05621bc
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.cc
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver.h"
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include "call/video_config.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "media/base/mediaconstants.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_cvo.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/include/ulpfec_receiver.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/h264_sprop_parameter_sets.h"
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/timestamp_extrapolator.h"
+#include "video/receive_statistics_proxy.h"
+
+namespace webrtc {
+
+namespace {
+// TODO(philipel): Change kPacketBufferStartSize back to 32 in M63 see:
+// crbug.com/752886
+constexpr int kPacketBufferStartSize = 512;
+constexpr int kPacketBufferMaxSixe = 2048;
+}
+
+std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
+ ReceiveStatistics* receive_statistics,
+ Transport* outgoing_transport,
+ RtcpEventObserver* rtcp_event_observer,
+ RtcpRttStats* rtt_stats,
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ TransportSequenceNumberAllocator* transport_sequence_number_allocator) {
+ RtpRtcp::Configuration configuration;
+ configuration.audio = false;
+ configuration.receiver_only = true;
+ configuration.receive_statistics = receive_statistics;
+ configuration.outgoing_transport = outgoing_transport;
+ configuration.event_callback = rtcp_event_observer;
+ configuration.intra_frame_callback = nullptr;
+ configuration.rtt_stats = rtt_stats;
+ configuration.rtcp_packet_type_counter_observer =
+ rtcp_packet_type_counter_observer;
+ configuration.transport_sequence_number_allocator =
+ transport_sequence_number_allocator;
+ configuration.send_bitrate_observer = nullptr;
+ configuration.send_frame_count_observer = nullptr;
+ configuration.send_side_delay_observer = nullptr;
+ configuration.send_packet_observer = nullptr;
+ configuration.bandwidth_callback = nullptr;
+ configuration.transport_feedback_callback = nullptr;
+
+ std::unique_ptr<RtpRtcp> rtp_rtcp(RtpRtcp::CreateRtpRtcp(configuration));
+ rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
+
+ return rtp_rtcp;
+}
+
+static const int kPacketLogIntervalMs = 10000;
+
+RtpVideoStreamReceiver::RtpVideoStreamReceiver(
+ Transport* transport,
+ RtcpRttStats* rtt_stats,
+ PacketRouter* packet_router,
+ const VideoReceiveStream::Config* config,
+ ReceiveStatistics* rtp_receive_statistics,
+ ReceiveStatisticsProxy* receive_stats_proxy,
+ ProcessThread* process_thread,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ video_coding::OnCompleteFrameCallback* complete_frame_callback,
+ VCMTiming* timing)
+ : clock_(Clock::GetRealTimeClock()),
+ config_(*config),
+ packet_router_(packet_router),
+ process_thread_(process_thread),
+ ntp_estimator_(clock_),
+ rtp_header_extensions_(config_.rtp.extensions),
+ rtp_receiver_(RtpReceiver::CreateVideoReceiver(clock_,
+ this,
+ this,
+ &rtp_payload_registry_)),
+ rtp_receive_statistics_(rtp_receive_statistics),
+ ulpfec_receiver_(UlpfecReceiver::Create(config->rtp.remote_ssrc, this)),
+ receiving_(false),
+ last_packet_log_ms_(-1),
+ rtp_rtcp_(CreateRtpRtcpModule(rtp_receive_statistics_,
+ transport,
+ config->rtcp_event_observer,
+ rtt_stats,
+ receive_stats_proxy,
+ packet_router)),
+ complete_frame_callback_(complete_frame_callback),
+ keyframe_request_sender_(keyframe_request_sender),
+ timing_(timing),
+ has_received_frame_(false) {
+ constexpr bool remb_candidate = true;
+ packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate);
+ rtp_receive_statistics_->RegisterRtpStatisticsCallback(receive_stats_proxy);
+ rtp_receive_statistics_->RegisterRtcpStatisticsCallback(receive_stats_proxy);
+
+ RTC_DCHECK(config_.rtp.rtcp_mode != RtcpMode::kOff)
+ << "A stream should not be configured with RTCP disabled. This value is "
+ "reserved for internal usage.";
+ RTC_DCHECK(config_.rtp.remote_ssrc != 0);
+ // TODO(pbos): What's an appropriate local_ssrc for receive-only streams?
+ RTC_DCHECK(config_.rtp.local_ssrc != 0);
+ RTC_DCHECK(config_.rtp.remote_ssrc != config_.rtp.local_ssrc);
+
+ rtp_rtcp_->SetRTCPStatus(config_.rtp.rtcp_mode);
+ rtp_rtcp_->SetSSRC(config_.rtp.local_ssrc);
+ rtp_rtcp_->SetRemoteSSRC(config_.rtp.remote_ssrc);
+ rtp_rtcp_->SetKeyFrameRequestMethod(kKeyFrameReqPliRtcp);
+
+ static const int kMaxPacketAgeToNack = 450;
+ const int max_reordering_threshold = (config_.rtp.nack.rtp_history_ms > 0)
+ ? kMaxPacketAgeToNack
+ : kDefaultMaxReorderingThreshold;
+ rtp_receive_statistics_->SetMaxReorderingThreshold(max_reordering_threshold);
+
+ if (config_.rtp.rtx_ssrc) {
+ // Needed for rtp_payload_registry_.RtxEnabled().
+ rtp_payload_registry_.SetRtxSsrc(config_.rtp.rtx_ssrc);
+ }
+
+ if (IsUlpfecEnabled()) {
+ VideoCodec ulpfec_codec = {};
+ ulpfec_codec.codecType = kVideoCodecULPFEC;
+ strncpy(ulpfec_codec.plName, "ulpfec", sizeof(ulpfec_codec.plName));
+ ulpfec_codec.plType = config_.rtp.ulpfec_payload_type;
+ RTC_CHECK(AddReceiveCodec(ulpfec_codec));
+ }
+
+ if (IsRedEnabled()) {
+ VideoCodec red_codec = {};
+ red_codec.codecType = kVideoCodecRED;
+ strncpy(red_codec.plName, "red", sizeof(red_codec.plName));
+ red_codec.plType = config_.rtp.red_payload_type;
+ RTC_CHECK(AddReceiveCodec(red_codec));
+ }
+
+ rtp_rtcp_->SetTMMBRStatus(config_.rtp.tmmbr);
+
+ rtp_rtcp_->SetKeyFrameRequestMethod(config_.rtp.keyframe_method);
+
+ if (config_.rtp.rtcp_xr.receiver_reference_time_report)
+ rtp_rtcp_->SetRtcpXrRrtrStatus(true);
+
+ // Stats callback for CNAME changes.
+ rtp_rtcp_->RegisterRtcpStatisticsCallback(receive_stats_proxy);
+
+ process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE);
+
+ if (config_.rtp.nack.rtp_history_ms != 0) {
+ nack_module_.reset(
+ new NackModule(clock_, nack_sender, keyframe_request_sender));
+ process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE);
+ }
+
+ packet_buffer_ = video_coding::PacketBuffer::Create(
+ clock_, kPacketBufferStartSize, kPacketBufferMaxSixe, this);
+ reference_finder_.reset(new video_coding::RtpFrameReferenceFinder(this));
+}
+
+RtpVideoStreamReceiver::~RtpVideoStreamReceiver() {
+ RTC_DCHECK(secondary_sinks_.empty());
+
+ if (nack_module_) {
+ process_thread_->DeRegisterModule(nack_module_.get());
+ }
+
+ process_thread_->DeRegisterModule(rtp_rtcp_.get());
+
+ packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get());
+ UpdateHistograms();
+}
+
+bool RtpVideoStreamReceiver::AddReceiveCodec(
+ const VideoCodec& video_codec,
+ const std::map<std::string, std::string>& codec_params) {
+ pt_codec_params_.insert(make_pair(video_codec.plType, codec_params));
+ return AddReceiveCodec(video_codec);
+}
+
+bool RtpVideoStreamReceiver::AddReceiveCodec(const VideoCodec& video_codec) {
+ int8_t old_pltype = -1;
+ if (rtp_payload_registry_.ReceivePayloadType(video_codec, &old_pltype) !=
+ -1) {
+ rtp_payload_registry_.DeRegisterReceivePayload(old_pltype);
+ }
+ return rtp_payload_registry_.RegisterReceivePayload(video_codec) == 0;
+}
+
+uint32_t RtpVideoStreamReceiver::GetRemoteSsrc() const {
+ return config_.rtp.remote_ssrc;
+}
+
+int RtpVideoStreamReceiver::GetCsrcs(uint32_t* csrcs) const {
+ return rtp_receiver_->CSRCs(csrcs);
+}
+
+void RtpVideoStreamReceiver::GetRID(char rid[256]) const {
+ rtp_receiver_->GetRID(rid);
+}
+
+RtpReceiver* RtpVideoStreamReceiver::GetRtpReceiver() const {
+ return rtp_receiver_.get();
+}
+
+int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const WebRtcRTPHeader* rtp_header) {
+ WebRtcRTPHeader rtp_header_with_ntp = *rtp_header;
+ rtp_header_with_ntp.ntp_time_ms =
+ ntp_estimator_.Estimate(rtp_header->header.timestamp);
+ VCMPacket packet(payload_data, payload_size, rtp_header_with_ntp);
+ packet.timesNacked =
+ nack_module_ ? nack_module_->OnReceivedPacket(packet) : -1;
+ packet.receive_time_ms = clock_->TimeInMilliseconds();
+
+ // In the case of a video stream without picture ids and no rtx the
+ // RtpFrameReferenceFinder will need to know about padding to
+ // correctly calculate frame references.
+ if (packet.sizeBytes == 0) {
+ reference_finder_->PaddingReceived(packet.seqNum);
+ packet_buffer_->PaddingReceived(packet.seqNum);
+ return 0;
+ }
+
+ if (packet.codec == kVideoCodecH264) {
+ // Only when we start to receive packets will we know what payload type
+ // that will be used. When we know the payload type insert the correct
+ // sps/pps into the tracker.
+ if (packet.payloadType != last_payload_type_) {
+ last_payload_type_ = packet.payloadType;
+ InsertSpsPpsIntoTracker(packet.payloadType);
+ }
+
+ switch (tracker_.CopyAndFixBitstream(&packet)) {
+ case video_coding::H264SpsPpsTracker::kRequestKeyframe:
+ keyframe_request_sender_->RequestKeyFrame();
+ FALLTHROUGH();
+ case video_coding::H264SpsPpsTracker::kDrop:
+ return 0;
+ case video_coding::H264SpsPpsTracker::kInsert:
+ break;
+ }
+
+ } else {
+ uint8_t* data = new uint8_t[packet.sizeBytes];
+ memcpy(data, packet.dataPtr, packet.sizeBytes);
+ packet.dataPtr = data;
+ }
+
+ packet_buffer_->InsertPacket(&packet);
+ return 0;
+}
+
+void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
+ size_t rtp_packet_length) {
+ RtpPacketReceived packet;
+ if (!packet.Parse(rtp_packet, rtp_packet_length))
+ return;
+ packet.IdentifyExtensions(rtp_header_extensions_);
+ packet.set_payload_type_frequency(kVideoPayloadTypeFrequency);
+
+ RTPHeader header;
+ packet.GetHeader(&header);
+ ReceivePacket(rtp_packet, rtp_packet_length, header);
+}
+
+// TODO(pbos): Remove as soon as audio can handle a changing payload type
+// without this callback.
+int32_t RtpVideoStreamReceiver::OnInitializeDecoder(
+ const int payload_type,
+ const SdpAudioFormat& audio_format,
+ const uint32_t rate) {
+ RTC_NOTREACHED();
+ return 0;
+}
+
+// This method handles both regular RTP packets and packets recovered
+// via FlexFEC.
+void RtpVideoStreamReceiver::OnRtpPacket(const RtpPacketReceived& packet) {
+ {
+ rtc::CritScope lock(&receive_cs_);
+ if (!receiving_) {
+ return;
+ }
+ }
+
+ if (!packet.recovered()) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+
+ // Periodically log the RTP header of incoming packets.
+ if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) {
+ std::stringstream ss;
+ ss << "Packet received on SSRC: " << packet.Ssrc()
+ << " with payload type: " << static_cast<int>(packet.PayloadType())
+ << ", timestamp: " << packet.Timestamp()
+ << ", sequence number: " << packet.SequenceNumber()
+ << ", arrival time: " << packet.arrival_time_ms();
+ int32_t time_offset;
+ if (packet.GetExtension<TransmissionOffset>(&time_offset)) {
+ ss << ", toffset: " << time_offset;
+ }
+ uint32_t send_time;
+ if (packet.GetExtension<AbsoluteSendTime>(&send_time)) {
+ ss << ", abs send time: " << send_time;
+ }
+ StringRtpHeaderExtension rtp_stream_id;
+ if (packet.GetExtension<RtpStreamId>(&rtp_stream_id)) {
+ ss << ", rid: " << rtp_stream_id.data();
+ }
+ StringRtpHeaderExtension repaired_rtp_stream_id;
+ if (packet.GetExtension<RepairedRtpStreamId>(&repaired_rtp_stream_id)) {
+ ss << ", repaired rid: " << repaired_rtp_stream_id.data();
+ }
+ StringRtpHeaderExtension mid;
+ if (packet.GetExtension<RtpMid>(&mid)) {
+ ss << ", mid: " << mid.data();
+ }
+ CsrcAudioLevelList csrc_audio_levels;
+ if (packet.GetExtension<CsrcAudioLevel>(&csrc_audio_levels)) {
+ if (csrc_audio_levels.numAudioLevels) {
+ ss << ", csrc audio levels : {" << csrc_audio_levels.arrOfAudioLevels[0];
+ for (uint8_t i = 1; i < csrc_audio_levels.numAudioLevels; i++) {
+ ss << ", " << csrc_audio_levels.arrOfAudioLevels[i];
+ }
+ ss << "}";
+ }
+ }
+ RTC_LOG(LS_INFO) << ss.str();
+ last_packet_log_ms_ = now_ms;
+ }
+ }
+
+ // TODO(nisse): Delete use of GetHeader, but needs refactoring of
+ // ReceivePacket and IncomingPacket methods below.
+ RTPHeader header;
+ packet.GetHeader(&header);
+
+ header.payload_type_frequency = kVideoPayloadTypeFrequency;
+
+ bool in_order = IsPacketInOrder(header);
+ if (!packet.recovered()) {
+ // TODO(nisse): Why isn't this done for recovered packets?
+ rtp_payload_registry_.SetIncomingPayloadType(header);
+ }
+ ReceivePacket(packet.data(), packet.size(), header);
+ // Update receive statistics after ReceivePacket.
+ // Receive statistics will be reset if the payload type changes (make sure
+ // that the first packet is included in the stats).
+ if (!packet.recovered()) {
+ // TODO(nisse): We should pass a recovered flag to stats, to aid
+ // fixing bug bugs.webrtc.org/6339.
+ rtp_receive_statistics_->IncomingPacket(
+ header, packet.size(), IsPacketRetransmitted(header, in_order));
+ }
+
+ rtc::CritScope lock(&receive_cs_);
+ for (RtpPacketSinkInterface* secondary_sink : secondary_sinks_) {
+ secondary_sink->OnRtpPacket(packet);
+ }
+}
+
+int32_t RtpVideoStreamReceiver::RequestKeyFrame() {
+ return rtp_rtcp_->RequestKeyFrame();
+}
+
+bool RtpVideoStreamReceiver::IsUlpfecEnabled() const {
+ return config_.rtp.ulpfec_payload_type != -1;
+}
+
+bool RtpVideoStreamReceiver::IsRedEnabled() const {
+ return config_.rtp.red_payload_type != -1;
+}
+
+bool RtpVideoStreamReceiver::IsRetransmissionsEnabled() const {
+ return config_.rtp.nack.rtp_history_ms > 0;
+}
+
+void RtpVideoStreamReceiver::RequestPacketRetransmit(
+ const std::vector<uint16_t>& sequence_numbers) {
+ rtp_rtcp_->SendNack(sequence_numbers);
+}
+
+int32_t RtpVideoStreamReceiver::ResendPackets(const uint16_t* sequence_numbers,
+ uint16_t length) {
+ return rtp_rtcp_->SendNACK(sequence_numbers, length);
+}
+
+void RtpVideoStreamReceiver::OnReceivedFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) {
+ if (!has_received_frame_) {
+ has_received_frame_ = true;
+ if (frame->FrameType() != kVideoFrameKey)
+ keyframe_request_sender_->RequestKeyFrame();
+ }
+
+ if (!frame->delayed_by_retransmission())
+ timing_->IncomingTimestamp(frame->timestamp, clock_->TimeInMilliseconds());
+ reference_finder_->ManageFrame(std::move(frame));
+}
+
+void RtpVideoStreamReceiver::OnCompleteFrame(
+ std::unique_ptr<video_coding::FrameObject> frame) {
+ {
+ rtc::CritScope lock(&last_seq_num_cs_);
+ video_coding::RtpFrameObject* rtp_frame =
+ static_cast<video_coding::RtpFrameObject*>(frame.get());
+ last_seq_num_for_pic_id_[rtp_frame->picture_id] = rtp_frame->last_seq_num();
+ }
+ complete_frame_callback_->OnCompleteFrame(std::move(frame));
+}
+
+void RtpVideoStreamReceiver::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t max_rtt_ms) {
+ if (nack_module_)
+ nack_module_->UpdateRtt(max_rtt_ms);
+}
+
+rtc::Optional<int64_t> RtpVideoStreamReceiver::LastReceivedPacketMs() const {
+ return packet_buffer_->LastReceivedPacketMs();
+}
+
+rtc::Optional<int64_t> RtpVideoStreamReceiver::LastReceivedKeyframePacketMs()
+ const {
+ return packet_buffer_->LastReceivedKeyframePacketMs();
+}
+
+void RtpVideoStreamReceiver::AddSecondarySink(RtpPacketSinkInterface* sink) {
+ rtc::CritScope lock(&receive_cs_);
+ RTC_DCHECK(std::find(secondary_sinks_.cbegin(), secondary_sinks_.cend(),
+ sink) == secondary_sinks_.cend());
+ secondary_sinks_.push_back(sink);
+}
+
+void RtpVideoStreamReceiver::RemoveSecondarySink(
+ const RtpPacketSinkInterface* sink) {
+ rtc::CritScope lock(&receive_cs_);
+ auto it = std::find(secondary_sinks_.begin(), secondary_sinks_.end(), sink);
+ if (it == secondary_sinks_.end()) {
+ // We might be rolling-back a call whose setup failed mid-way. In such a
+ // case, it's simpler to remove "everything" rather than remember what
+ // has already been added.
+ RTC_LOG(LS_WARNING) << "Removal of unknown sink.";
+ return;
+ }
+ secondary_sinks_.erase(it);
+}
+
+void RtpVideoStreamReceiver::ReceivePacket(const uint8_t* packet,
+ size_t packet_length,
+ const RTPHeader& header) {
+ if (rtp_payload_registry_.IsRed(header)) {
+ ParseAndHandleEncapsulatingHeader(packet, packet_length, header);
+ return;
+ }
+ const uint8_t* payload = packet + header.headerLength;
+ assert(packet_length >= header.headerLength);
+ size_t payload_length = packet_length - header.headerLength;
+ const auto pl =
+ rtp_payload_registry_.PayloadTypeToPayload(header.payloadType);
+ if (pl) {
+ rtp_receiver_->IncomingRtpPacket(header, payload, payload_length,
+ pl->typeSpecific);
+ }
+}
+
+void RtpVideoStreamReceiver::ParseAndHandleEncapsulatingHeader(
+ const uint8_t* packet, size_t packet_length, const RTPHeader& header) {
+ if (rtp_payload_registry_.IsRed(header)) {
+ int8_t ulpfec_pt = rtp_payload_registry_.ulpfec_payload_type();
+ if (packet[header.headerLength] == ulpfec_pt) {
+ rtp_receive_statistics_->FecPacketReceived(header, packet_length);
+ // Notify video_receiver about received FEC packets to avoid NACKing these
+ // packets.
+ NotifyReceiverOfFecPacket(header);
+ }
+ if (ulpfec_receiver_->AddReceivedRedPacket(header, packet, packet_length,
+ ulpfec_pt) != 0) {
+ return;
+ }
+ ulpfec_receiver_->ProcessReceivedFec();
+ }
+}
+
+void RtpVideoStreamReceiver::NotifyReceiverOfFecPacket(
+ const RTPHeader& header) {
+ int8_t last_media_payload_type =
+ rtp_payload_registry_.last_received_media_payload_type();
+ if (last_media_payload_type < 0) {
+ RTC_LOG(LS_WARNING) << "Failed to get last media payload type.";
+ return;
+ }
+ // Fake an empty media packet.
+ WebRtcRTPHeader rtp_header = {};
+ rtp_header.header = header;
+ rtp_header.header.payloadType = last_media_payload_type;
+ rtp_header.header.paddingLength = 0;
+ const auto pl =
+ rtp_payload_registry_.PayloadTypeToPayload(last_media_payload_type);
+ if (!pl) {
+ RTC_LOG(LS_WARNING) << "Failed to get payload specifics.";
+ return;
+ }
+ rtp_header.type.Video.codec = pl->typeSpecific.video_payload().videoCodecType;
+ rtp_header.type.Video.rotation = kVideoRotation_0;
+ if (header.extension.hasVideoRotation) {
+ rtp_header.type.Video.rotation = header.extension.videoRotation;
+ }
+ rtp_header.type.Video.content_type = VideoContentType::UNSPECIFIED;
+ if (header.extension.hasVideoContentType) {
+ rtp_header.type.Video.content_type = header.extension.videoContentType;
+ }
+ rtp_header.type.Video.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
+ if (header.extension.has_video_timing) {
+ rtp_header.type.Video.video_timing = header.extension.video_timing;
+ }
+ rtp_header.type.Video.playout_delay = header.extension.playout_delay;
+
+ OnReceivedPayloadData(nullptr, 0, &rtp_header);
+}
+
+bool RtpVideoStreamReceiver::DeliverRtcp(const uint8_t* rtcp_packet,
+ size_t rtcp_packet_length) {
+ {
+ rtc::CritScope lock(&receive_cs_);
+ if (!receiving_) {
+ return false;
+ }
+ }
+
+ rtp_rtcp_->IncomingRtcpPacket(rtcp_packet, rtcp_packet_length);
+
+ int64_t rtt = 0;
+ rtp_rtcp_->RTT(rtp_receiver_->SSRC(), &rtt, nullptr, nullptr, nullptr);
+ if (rtt == 0) {
+ // Waiting for valid rtt.
+ return true;
+ }
+ uint32_t ntp_secs = 0;
+ uint32_t ntp_frac = 0;
+ uint32_t rtp_timestamp = 0;
+ uint32_t recieved_ntp_secs = 0;
+ uint32_t recieved_ntp_frac = 0;
+ if (rtp_rtcp_->RemoteNTP(&ntp_secs, &ntp_frac, &recieved_ntp_secs,
+ &recieved_ntp_frac, &rtp_timestamp) != 0) {
+ // Waiting for RTCP.
+ return true;
+ }
+ NtpTime recieved_ntp(recieved_ntp_secs, recieved_ntp_frac);
+ int64_t time_since_recieved =
+ clock_->CurrentNtpInMilliseconds() - recieved_ntp.ToMs();
+ // Don't use old SRs to estimate time.
+ if (time_since_recieved <= 1) {
+ ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
+ }
+
+ return true;
+}
+
+void RtpVideoStreamReceiver::FrameContinuous(int64_t picture_id) {
+ if (!nack_module_)
+ return;
+
+ int seq_num = -1;
+ {
+ rtc::CritScope lock(&last_seq_num_cs_);
+ auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
+ if (seq_num_it != last_seq_num_for_pic_id_.end())
+ seq_num = seq_num_it->second;
+ }
+ if (seq_num != -1)
+ nack_module_->ClearUpTo(seq_num);
+}
+
+void RtpVideoStreamReceiver::FrameDecoded(int64_t picture_id) {
+ int seq_num = -1;
+ {
+ rtc::CritScope lock(&last_seq_num_cs_);
+ auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
+ if (seq_num_it != last_seq_num_for_pic_id_.end()) {
+ seq_num = seq_num_it->second;
+ last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(),
+ ++seq_num_it);
+ }
+ }
+ if (seq_num != -1) {
+ packet_buffer_->ClearTo(seq_num);
+ reference_finder_->ClearTo(seq_num);
+ }
+}
+
+void RtpVideoStreamReceiver::SignalNetworkState(NetworkState state) {
+ rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode
+ : RtcpMode::kOff);
+}
+
+void RtpVideoStreamReceiver::StartReceive() {
+ rtc::CritScope lock(&receive_cs_);
+ receiving_ = true;
+}
+
+void RtpVideoStreamReceiver::StopReceive() {
+ rtc::CritScope lock(&receive_cs_);
+ receiving_ = false;
+}
+
+bool RtpVideoStreamReceiver::IsPacketInOrder(const RTPHeader& header) const {
+ StreamStatistician* statistician =
+ rtp_receive_statistics_->GetStatistician(header.ssrc);
+ if (!statistician)
+ return false;
+ return statistician->IsPacketInOrder(header.sequenceNumber);
+}
+
+bool RtpVideoStreamReceiver::IsPacketRetransmitted(const RTPHeader& header,
+ bool in_order) const {
+ // Retransmissions are handled separately if RTX is enabled.
+ if (rtp_payload_registry_.RtxEnabled())
+ return false;
+ StreamStatistician* statistician =
+ rtp_receive_statistics_->GetStatistician(header.ssrc);
+ if (!statistician)
+ return false;
+ // Check if this is a retransmission.
+ int64_t min_rtt = 0;
+ rtp_rtcp_->RTT(config_.rtp.remote_ssrc, nullptr, nullptr, &min_rtt, nullptr);
+ return !in_order &&
+ statistician->IsRetransmitOfOldPacket(header, min_rtt);
+}
+
+void RtpVideoStreamReceiver::UpdateHistograms() {
+ FecPacketCounter counter = ulpfec_receiver_->GetPacketCounter();
+ if (counter.first_packet_time_ms == -1)
+ return;
+
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - counter.first_packet_time_ms) / 1000;
+ if (elapsed_sec < metrics::kMinRunTimeInSeconds)
+ return;
+
+ if (counter.num_packets > 0) {
+ RTC_HISTOGRAM_PERCENTAGE(
+ "WebRTC.Video.ReceivedFecPacketsInPercent",
+ static_cast<int>(counter.num_fec_packets * 100 / counter.num_packets));
+ }
+ if (counter.num_fec_packets > 0) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.RecoveredMediaPacketsInPercentOfFec",
+ static_cast<int>(counter.num_recovered_packets *
+ 100 / counter.num_fec_packets));
+ }
+}
+
+void RtpVideoStreamReceiver::InsertSpsPpsIntoTracker(uint8_t payload_type) {
+ auto codec_params_it = pt_codec_params_.find(payload_type);
+ if (codec_params_it == pt_codec_params_.end())
+ return;
+
+ RTC_LOG(LS_INFO) << "Found out of band supplied codec parameters for"
+ << " payload type: " << static_cast<int>(payload_type);
+
+ H264SpropParameterSets sprop_decoder;
+ auto sprop_base64_it =
+ codec_params_it->second.find(cricket::kH264FmtpSpropParameterSets);
+
+ if (sprop_base64_it == codec_params_it->second.end())
+ return;
+
+ if (!sprop_decoder.DecodeSprop(sprop_base64_it->second.c_str()))
+ return;
+
+ tracker_.InsertSpsPpsNalus(sprop_decoder.sps_nalu(),
+ sprop_decoder.pps_nalu());
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.h b/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.h
new file mode 100644
index 0000000000..7874687c39
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RTP_VIDEO_STREAM_RECEIVER_H_
+#define VIDEO_RTP_VIDEO_STREAM_RECEIVER_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "call/rtp_packet_sink_interface.h"
+#include "call/video_receive_stream.h"
+#include "modules/include/module_common_types.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/sequenced_task_checker.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class NackModule;
+class PacedSender;
+class PacketRouter;
+class ProcessThread;
+class ReceiveStatistics;
+class ReceiveStatisticsProxy;
+class RemoteNtpTimeEstimator;
+class RtcpRttStats;
+class RtpHeaderParser;
+class RtpPacketReceived;
+class RTPPayloadRegistry;
+class RtpReceiver;
+class Transport;
+class UlpfecReceiver;
+class VCMTiming;
+
+namespace vcm {
+class VideoReceiver;
+} // namespace vcm
+
+class RtpVideoStreamReceiver : public RtpData,
+ public RecoveredPacketReceiver,
+ public RtpFeedback,
+ public RtpPacketSinkInterface,
+ public VCMFrameTypeCallback,
+ public VCMPacketRequestCallback,
+ public video_coding::OnReceivedFrameCallback,
+ public video_coding::OnCompleteFrameCallback,
+ public CallStatsObserver {
+ public:
+ RtpVideoStreamReceiver(
+ Transport* transport,
+ RtcpRttStats* rtt_stats,
+ PacketRouter* packet_router,
+ const VideoReceiveStream::Config* config,
+ ReceiveStatistics* rtp_receive_statistics,
+ ReceiveStatisticsProxy* receive_stats_proxy,
+ ProcessThread* process_thread,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ video_coding::OnCompleteFrameCallback* complete_frame_callback,
+ VCMTiming* timing);
+ ~RtpVideoStreamReceiver();
+
+ bool AddReceiveCodec(const VideoCodec& video_codec,
+ const std::map<std::string, std::string>& codec_params);
+ uint32_t GetRemoteSsrc() const;
+ int GetCsrcs(uint32_t* csrcs) const;
+ void GetRID(char rid[256]) const;
+
+ RtpReceiver* GetRtpReceiver() const;
+ RtpRtcp* rtp_rtcp() const { return rtp_rtcp_.get(); }
+
+ void StartReceive();
+ void StopReceive();
+
+ bool DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length);
+
+ void FrameContinuous(int64_t seq_num);
+
+ void FrameDecoded(int64_t seq_num);
+
+ void SignalNetworkState(NetworkState state);
+
+ // Implements RtpPacketSinkInterface.
+ void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+ // Implements RtpData.
+ int32_t OnReceivedPayloadData(const uint8_t* payload_data,
+ size_t payload_size,
+ const WebRtcRTPHeader* rtp_header) override;
+ // Implements RecoveredPacketReceiver.
+ void OnRecoveredPacket(const uint8_t* packet, size_t packet_length) override;
+
+ // Implements RtpFeedback.
+ int32_t OnInitializeDecoder(int payload_type,
+ const SdpAudioFormat& audio_format,
+ uint32_t rate) override;
+ void OnIncomingSSRCChanged(uint32_t ssrc) override {}
+ void OnIncomingCSRCChanged(uint32_t CSRC, bool added) override {}
+
+ // Implements VCMFrameTypeCallback.
+ int32_t RequestKeyFrame() override;
+
+ bool IsUlpfecEnabled() const;
+ bool IsRetransmissionsEnabled() const;
+ // Don't use, still experimental.
+ void RequestPacketRetransmit(const std::vector<uint16_t>& sequence_numbers);
+
+ // Implements VCMPacketRequestCallback.
+ int32_t ResendPackets(const uint16_t* sequenceNumbers,
+ uint16_t length) override;
+
+ // Implements OnReceivedFrameCallback.
+ void OnReceivedFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) override;
+
+ // Implements OnCompleteFrameCallback.
+ void OnCompleteFrame(
+ std::unique_ptr<video_coding::FrameObject> frame) override;
+
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+
+ rtc::Optional<int64_t> LastReceivedPacketMs() const;
+ rtc::Optional<int64_t> LastReceivedKeyframePacketMs() const;
+
+ // RtpDemuxer only forwards a given RTP packet to one sink. However, some
+ // sinks, such as FlexFEC, might wish to be informed of all of the packets
+ // a given sink receives (or any set of sinks). They may do so by registering
+ // themselves as secondary sinks.
+ void AddSecondarySink(RtpPacketSinkInterface* sink);
+ void RemoveSecondarySink(const RtpPacketSinkInterface* sink);
+
+ private:
+ bool AddReceiveCodec(const VideoCodec& video_codec);
+ void ReceivePacket(const uint8_t* packet,
+ size_t packet_length,
+ const RTPHeader& header);
+ // Parses and handles for instance RTX and RED headers.
+ // This function assumes that it's being called from only one thread.
+ void ParseAndHandleEncapsulatingHeader(const uint8_t* packet,
+ size_t packet_length,
+ const RTPHeader& header);
+ void NotifyReceiverOfFecPacket(const RTPHeader& header);
+ bool IsPacketInOrder(const RTPHeader& header) const;
+ bool IsPacketRetransmitted(const RTPHeader& header, bool in_order) const;
+ void UpdateHistograms();
+ bool IsRedEnabled() const;
+ void InsertSpsPpsIntoTracker(uint8_t payload_type);
+
+ Clock* const clock_;
+ // Ownership of this object lies with VideoReceiveStream, which owns |this|.
+ const VideoReceiveStream::Config& config_;
+ PacketRouter* const packet_router_;
+ ProcessThread* const process_thread_;
+
+ RemoteNtpTimeEstimator ntp_estimator_;
+ RTPPayloadRegistry rtp_payload_registry_;
+
+ RtpHeaderExtensionMap rtp_header_extensions_;
+ const std::unique_ptr<RtpReceiver> rtp_receiver_;
+ ReceiveStatistics* const rtp_receive_statistics_;
+ std::unique_ptr<UlpfecReceiver> ulpfec_receiver_;
+
+ rtc::CriticalSection receive_cs_;
+ bool receiving_ RTC_GUARDED_BY(receive_cs_);
+ int64_t last_packet_log_ms_ RTC_GUARDED_BY(receive_cs_);
+
+ const std::unique_ptr<RtpRtcp> rtp_rtcp_;
+
+ // Members for the new jitter buffer experiment.
+ video_coding::OnCompleteFrameCallback* complete_frame_callback_;
+ KeyFrameRequestSender* keyframe_request_sender_;
+ VCMTiming* timing_;
+ std::unique_ptr<NackModule> nack_module_;
+ rtc::scoped_refptr<video_coding::PacketBuffer> packet_buffer_;
+ std::unique_ptr<video_coding::RtpFrameReferenceFinder> reference_finder_;
+ rtc::CriticalSection last_seq_num_cs_;
+ std::map<int64_t, uint16_t> last_seq_num_for_pic_id_
+ RTC_GUARDED_BY(last_seq_num_cs_);
+ video_coding::H264SpsPpsTracker tracker_;
+ // TODO(johan): Remove pt_codec_params_ once
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=6883 is resolved.
+ // Maps a payload type to a map of out-of-band supplied codec parameters.
+ std::map<uint8_t, std::map<std::string, std::string>> pt_codec_params_;
+ int16_t last_payload_type_ = -1;
+
+ bool has_received_frame_;
+
+ std::vector<RtpPacketSinkInterface*> secondary_sinks_
+ RTC_GUARDED_BY(receive_cs_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RTP_VIDEO_STREAM_RECEIVER_H_
diff --git a/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver_unittest.cc b/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver_unittest.cc
new file mode 100644
index 0000000000..297218205f
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver_unittest.cc
@@ -0,0 +1,476 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/gtest.h"
+#include "test/gmock.h"
+
+#include "common_video/h264/h264_common.h"
+#include "media/base/mediaconstants.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/utility/include/process_thread.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/packet.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "modules/video_coding/timing.h"
+#include "rtc_base/bytebuffer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/field_trial_default.h"
+#include "test/field_trial.h"
+#include "video/rtp_video_stream_receiver.h"
+
+using testing::_;
+
+namespace webrtc {
+
+namespace {
+
+const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
+
+class MockTransport : public Transport {
+ public:
+ MOCK_METHOD3(SendRtp,
+ bool(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options));
+ MOCK_METHOD2(SendRtcp, bool(const uint8_t* packet, size_t length));
+};
+
+class MockNackSender : public NackSender {
+ public:
+ MOCK_METHOD1(SendNack, void(const std::vector<uint16_t>& sequence_numbers));
+};
+
+class MockKeyFrameRequestSender : public KeyFrameRequestSender {
+ public:
+ MOCK_METHOD0(RequestKeyFrame, void());
+};
+
+class MockOnCompleteFrameCallback
+ : public video_coding::OnCompleteFrameCallback {
+ public:
+ MockOnCompleteFrameCallback() : buffer_(rtc::ByteBuffer::ORDER_NETWORK) {}
+
+ MOCK_METHOD1(DoOnCompleteFrame, void(video_coding::FrameObject* frame));
+ MOCK_METHOD1(DoOnCompleteFrameFailNullptr,
+ void(video_coding::FrameObject* frame));
+ MOCK_METHOD1(DoOnCompleteFrameFailLength,
+ void(video_coding::FrameObject* frame));
+ MOCK_METHOD1(DoOnCompleteFrameFailBitstream,
+ void(video_coding::FrameObject* frame));
+ void OnCompleteFrame(std::unique_ptr<video_coding::FrameObject> frame) {
+ if (!frame) {
+ DoOnCompleteFrameFailNullptr(nullptr);
+ return;
+ }
+ EXPECT_EQ(buffer_.Length(), frame->size());
+ if (buffer_.Length() != frame->size()) {
+ DoOnCompleteFrameFailLength(frame.get());
+ return;
+ }
+ std::vector<uint8_t> actual_data(frame->size());
+ frame->GetBitstream(actual_data.data());
+ if (memcmp(buffer_.Data(), actual_data.data(), buffer_.Length()) != 0) {
+ DoOnCompleteFrameFailBitstream(frame.get());
+ return;
+ }
+ DoOnCompleteFrame(frame.get());
+ }
+ void AppendExpectedBitstream(const uint8_t data[], size_t size_in_bytes) {
+ // TODO(Johan): Let rtc::ByteBuffer handle uint8_t* instead of char*.
+ buffer_.WriteBytes(reinterpret_cast<const char*>(data), size_in_bytes);
+ }
+ rtc::ByteBufferWriter buffer_;
+};
+
+class MockRtpPacketSink : public RtpPacketSinkInterface {
+ public:
+ MOCK_METHOD1(OnRtpPacket, void(const RtpPacketReceived&));
+};
+
+constexpr uint32_t kSsrc = 111;
+constexpr uint16_t kSequenceNumber = 222;
+std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived(
+ uint32_t ssrc = kSsrc,
+ uint16_t sequence_number = kSequenceNumber) {
+ auto packet = rtc::MakeUnique<RtpPacketReceived>();
+ packet->SetSsrc(ssrc);
+ packet->SetSequenceNumber(sequence_number);
+ return packet;
+}
+
+MATCHER_P(SamePacketAs, other, "") {
+ return arg.Ssrc() == other.Ssrc() &&
+ arg.SequenceNumber() == other.SequenceNumber();
+}
+
+} // namespace
+
+class RtpVideoStreamReceiverTest : public testing::Test {
+ public:
+ RtpVideoStreamReceiverTest() : RtpVideoStreamReceiverTest("") {}
+ explicit RtpVideoStreamReceiverTest(std::string field_trials)
+ : override_field_trials_(field_trials),
+ config_(CreateConfig()),
+ timing_(Clock::GetRealTimeClock()),
+ process_thread_(ProcessThread::Create("TestThread")) {}
+
+ void SetUp() {
+ rtp_receive_statistics_ =
+ rtc::WrapUnique(ReceiveStatistics::Create(Clock::GetRealTimeClock()));
+ rtp_video_stream_receiver_ = rtc::MakeUnique<RtpVideoStreamReceiver>(
+ &mock_transport_, nullptr, &packet_router_, &config_,
+ rtp_receive_statistics_.get(), nullptr, process_thread_.get(),
+ &mock_nack_sender_,
+ &mock_key_frame_request_sender_, &mock_on_complete_frame_callback_,
+ &timing_);
+ }
+
+ WebRtcRTPHeader GetDefaultPacket() {
+ WebRtcRTPHeader packet;
+ memset(&packet, 0, sizeof(packet));
+ packet.type.Video.codec = kRtpVideoH264;
+ return packet;
+ }
+
+ // TODO(Johan): refactor h264_sps_pps_tracker_unittests.cc to avoid duplicate
+ // code.
+ void AddSps(WebRtcRTPHeader* packet,
+ uint8_t sps_id,
+ std::vector<uint8_t>* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kSps;
+ info.sps_id = sps_id;
+ info.pps_id = -1;
+ data->push_back(H264::NaluType::kSps);
+ data->push_back(sps_id);
+ packet->type.Video.codecHeader.H264
+ .nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
+ }
+
+ void AddPps(WebRtcRTPHeader* packet,
+ uint8_t sps_id,
+ uint8_t pps_id,
+ std::vector<uint8_t>* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kPps;
+ info.sps_id = sps_id;
+ info.pps_id = pps_id;
+ data->push_back(H264::NaluType::kPps);
+ data->push_back(pps_id);
+ packet->type.Video.codecHeader.H264
+ .nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
+ }
+
+ void AddIdr(WebRtcRTPHeader* packet, int pps_id) {
+ NaluInfo info;
+ info.type = H264::NaluType::kIdr;
+ info.sps_id = -1;
+ info.pps_id = pps_id;
+ packet->type.Video.codecHeader.H264
+ .nalus[packet->type.Video.codecHeader.H264.nalus_length++] = info;
+ }
+
+ protected:
+ static VideoReceiveStream::Config CreateConfig() {
+ VideoReceiveStream::Config config(nullptr);
+ config.rtp.remote_ssrc = 1111;
+ config.rtp.local_ssrc = 2222;
+ return config;
+ }
+
+ const webrtc::test::ScopedFieldTrials override_field_trials_;
+ VideoReceiveStream::Config config_;
+ MockNackSender mock_nack_sender_;
+ MockKeyFrameRequestSender mock_key_frame_request_sender_;
+ MockTransport mock_transport_;
+ MockOnCompleteFrameCallback mock_on_complete_frame_callback_;
+ PacketRouter packet_router_;
+ VCMTiming timing_;
+ std::unique_ptr<ProcessThread> process_thread_;
+ std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+ std::unique_ptr<RtpVideoStreamReceiver> rtp_video_stream_receiver_;
+};
+
+TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
+ WebRtcRTPHeader rtp_header;
+ const std::vector<uint8_t> data({1, 2, 3, 4});
+ memset(&rtp_header, 0, sizeof(rtp_header));
+ rtp_header.header.sequenceNumber = 1;
+ rtp_header.header.markerBit = 1;
+ rtp_header.type.Video.is_first_packet_in_frame = true;
+ rtp_header.frameType = kVideoFrameKey;
+ rtp_header.type.Video.codec = kRtpVideoGeneric;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
+ &rtp_header);
+}
+
+TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
+ WebRtcRTPHeader rtp_header;
+ const std::vector<uint8_t> data({1, 2, 3, 4});
+ memset(&rtp_header, 0, sizeof(rtp_header));
+ rtp_header.header.sequenceNumber = 1;
+ rtp_header.header.markerBit = 1;
+ rtp_header.type.Video.is_first_packet_in_frame = true;
+ rtp_header.frameType = kVideoFrameKey;
+ rtp_header.type.Video.codec = kRtpVideoGeneric;
+ constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ expected_bitsteam, sizeof(expected_bitsteam));
+ EXPECT_CALL(mock_on_complete_frame_callback_,
+ DoOnCompleteFrameFailBitstream(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
+ &rtp_header);
+}
+
+class RtpVideoStreamReceiverTestH264
+ : public RtpVideoStreamReceiverTest,
+ public testing::WithParamInterface<std::string> {
+ protected:
+ RtpVideoStreamReceiverTestH264() : RtpVideoStreamReceiverTest(GetParam()) {}
+};
+
+INSTANTIATE_TEST_CASE_P(
+ SpsPpsIdrIsKeyframe,
+ RtpVideoStreamReceiverTestH264,
+ ::testing::Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
+
+TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
+ std::vector<uint8_t> sps_data;
+ WebRtcRTPHeader sps_packet = GetDefaultPacket();
+ AddSps(&sps_packet, 0, &sps_data);
+ sps_packet.header.sequenceNumber = 0;
+ sps_packet.type.Video.is_first_packet_in_frame = true;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
+ sps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(
+ sps_data.data(), sps_data.size(), &sps_packet);
+
+ std::vector<uint8_t> pps_data;
+ WebRtcRTPHeader pps_packet = GetDefaultPacket();
+ AddPps(&pps_packet, 0, 1, &pps_data);
+ pps_packet.header.sequenceNumber = 1;
+ pps_packet.type.Video.is_first_packet_in_frame = true;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
+ pps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(
+ pps_data.data(), pps_data.size(), &pps_packet);
+
+ std::vector<uint8_t> idr_data;
+ WebRtcRTPHeader idr_packet = GetDefaultPacket();
+ AddIdr(&idr_packet, 1);
+ idr_packet.type.Video.is_first_packet_in_frame = true;
+ idr_packet.header.sequenceNumber = 2;
+ idr_packet.header.markerBit = 1;
+ idr_packet.frameType = kVideoFrameKey;
+ idr_data.insert(idr_data.end(), {0x65, 1, 2, 3});
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
+ idr_data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(
+ idr_data.data(), idr_data.size(), &idr_packet);
+}
+
+TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
+ constexpr int kPayloadType = 99;
+ VideoCodec codec;
+ codec.plType = kPayloadType;
+ std::map<std::string, std::string> codec_params;
+ // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
+ // .
+ codec_params.insert(
+ {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params);
+ const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96,
+ 0x53, 0x05, 0x89, 0x88};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_sps,
+ sizeof(binary_sps));
+ const uint8_t binary_pps[] = {0x68, 0xc9, 0x63, 0x88};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
+ sizeof(binary_pps));
+
+ std::vector<uint8_t> data;
+ WebRtcRTPHeader idr_packet = GetDefaultPacket();
+ AddIdr(&idr_packet, 0);
+ idr_packet.header.payloadType = kPayloadType;
+ idr_packet.type.Video.is_first_packet_in_frame = true;
+ idr_packet.header.sequenceNumber = 2;
+ idr_packet.header.markerBit = 1;
+ idr_packet.type.Video.is_first_packet_in_frame = true;
+ idr_packet.frameType = kVideoFrameKey;
+ idr_packet.type.Video.codec = kRtpVideoH264;
+ data.insert(data.end(), {1, 2, 3});
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
+ &idr_packet);
+}
+
+TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
+ WebRtcRTPHeader header = GetDefaultPacket();
+ std::vector<uint8_t> data;
+ data.insert(data.end(), {1, 2, 3});
+ header.header.payloadType = 99;
+ header.type.Video.is_first_packet_in_frame = true;
+ header.header.sequenceNumber = 2;
+ header.header.markerBit = true;
+ header.frameType = kVideoFrameKey;
+ header.type.Video.codec = kRtpVideoGeneric;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
+ &header);
+
+ header.header.sequenceNumber = 3;
+ rtp_video_stream_receiver_->OnReceivedPayloadData(nullptr, 0, &header);
+
+ header.frameType = kVideoFrameDelta;
+ header.header.sequenceNumber = 4;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
+ &header);
+
+ header.header.sequenceNumber = 6;
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
+ &header);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ header.header.sequenceNumber = 5;
+ rtp_video_stream_receiver_->OnReceivedPayloadData(nullptr, 0, &header);
+}
+
+TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
+ WebRtcRTPHeader rtp_header;
+ const std::vector<uint8_t> data({1, 2, 3, 4});
+ memset(&rtp_header, 0, sizeof(rtp_header));
+ rtp_header.header.sequenceNumber = 1;
+ rtp_header.header.markerBit = 1;
+ rtp_header.type.Video.is_first_packet_in_frame = true;
+ rtp_header.frameType = kVideoFrameDelta;
+ rtp_header.type.Video.codec = kRtpVideoGeneric;
+
+ EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
+ &rtp_header);
+}
+
+TEST_F(RtpVideoStreamReceiverTest, SecondarySinksGetRtpNotifications) {
+ rtp_video_stream_receiver_->StartReceive();
+
+ MockRtpPacketSink secondary_sink_1;
+ MockRtpPacketSink secondary_sink_2;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_1);
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_2);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(secondary_sink_1, OnRtpPacket(SamePacketAs(*rtp_packet)));
+ EXPECT_CALL(secondary_sink_2, OnRtpPacket(SamePacketAs(*rtp_packet)));
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->StopReceive();
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_1);
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_2);
+}
+
+TEST_F(RtpVideoStreamReceiverTest, RemovedSecondarySinksGetNoRtpNotifications) {
+ rtp_video_stream_receiver_->StartReceive();
+
+ MockRtpPacketSink secondary_sink;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+
+ EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->StopReceive();
+}
+
+TEST_F(RtpVideoStreamReceiverTest,
+ OnlyRemovedSecondarySinksExcludedFromNotifications) {
+ rtp_video_stream_receiver_->StartReceive();
+
+ MockRtpPacketSink kept_secondary_sink;
+ MockRtpPacketSink removed_secondary_sink;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&kept_secondary_sink);
+ rtp_video_stream_receiver_->AddSecondarySink(&removed_secondary_sink);
+ rtp_video_stream_receiver_->RemoveSecondarySink(&removed_secondary_sink);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(kept_secondary_sink, OnRtpPacket(SamePacketAs(*rtp_packet)));
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->StopReceive();
+ rtp_video_stream_receiver_->RemoveSecondarySink(&kept_secondary_sink);
+}
+
+TEST_F(RtpVideoStreamReceiverTest,
+ SecondariesOfNonStartedStreamGetNoNotifications) {
+ // Explicitly showing that the stream is not in the |started| state,
+ // regardless of whether streams start out |started| or |stopped|.
+ rtp_video_stream_receiver_->StopReceive();
+
+ MockRtpPacketSink secondary_sink;
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST_F(RtpVideoStreamReceiverTest, RepeatedSecondarySinkDisallowed) {
+ MockRtpPacketSink secondary_sink;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
+ EXPECT_DEATH(rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink),
+ "");
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
+}
+#endif
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/screenshare_loopback.cc b/third_party/libwebrtc/webrtc/video/screenshare_loopback.cc
new file mode 100644
index 0000000000..8cb81937e2
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/screenshare_loopback.cc
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "rtc_base/flags.h"
+#include "rtc_base/stringencode.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/run_test.h"
+#include "video/video_quality_test.h"
+
+namespace webrtc {
+namespace flags {
+
+// Flags common with video loopback, with different default values.
+DEFINE_int(width, 1850, "Video width (crops source).");
+size_t Width() {
+ return static_cast<size_t>(FLAG_width);
+}
+
+DEFINE_int(height, 1110, "Video height (crops source).");
+size_t Height() {
+ return static_cast<size_t>(FLAG_height);
+}
+
+DEFINE_int(fps, 5, "Frames per second.");
+int Fps() {
+ return static_cast<int>(FLAG_fps);
+}
+
+DEFINE_int(min_bitrate, 50, "Call and stream min bitrate in kbps.");
+int MinBitrateKbps() {
+ return static_cast<int>(FLAG_min_bitrate);
+}
+
+DEFINE_int(start_bitrate,
+ Call::Config::kDefaultStartBitrateBps / 1000,
+ "Call start bitrate in kbps.");
+int StartBitrateKbps() {
+ return static_cast<int>(FLAG_start_bitrate);
+}
+
+DEFINE_int(target_bitrate, 200, "Stream target bitrate in kbps.");
+int TargetBitrateKbps() {
+ return static_cast<int>(FLAG_target_bitrate);
+}
+
+DEFINE_int(max_bitrate, 2000, "Call and stream max bitrate in kbps.");
+int MaxBitrateKbps() {
+ return static_cast<int>(FLAG_max_bitrate);
+}
+
+DEFINE_int(num_temporal_layers, 2, "Number of temporal layers to use.");
+int NumTemporalLayers() {
+ return static_cast<int>(FLAG_num_temporal_layers);
+}
+
+// Flags common with video loopback, with equal default values.
+DEFINE_string(codec, "VP8", "Video codec to use.");
+std::string Codec() {
+ return static_cast<std::string>(FLAG_codec);
+}
+
+DEFINE_string(rtc_event_log_name, "", "Filename for rtc event log.");
+std::string RtcEventLogName() {
+ return static_cast<std::string>(FLAG_rtc_event_log_name);
+}
+
+DEFINE_string(rtp_dump_name, "", "Filename for dumped received RTP stream.");
+std::string RtpDumpName() {
+ return static_cast<std::string>(FLAG_rtp_dump_name);
+}
+
+DEFINE_int(selected_tl,
+ -1,
+ "Temporal layer to show or analyze. -1 to disable filtering.");
+int SelectedTL() {
+ return static_cast<int>(FLAG_selected_tl);
+}
+
+DEFINE_int(
+ duration,
+ 0,
+ "Duration of the test in seconds. If 0, rendered will be shown instead.");
+int DurationSecs() {
+ return static_cast<int>(FLAG_duration);
+}
+
+DEFINE_string(output_filename, "", "Target graph data filename.");
+std::string OutputFilename() {
+ return static_cast<std::string>(FLAG_output_filename);
+}
+
+DEFINE_string(graph_title,
+ "",
+ "If empty, title will be generated automatically.");
+std::string GraphTitle() {
+ return static_cast<std::string>(FLAG_graph_title);
+}
+
+DEFINE_int(loss_percent, 0, "Percentage of packets randomly lost.");
+int LossPercent() {
+ return static_cast<int>(FLAG_loss_percent);
+}
+
+DEFINE_int(link_capacity,
+ 0,
+ "Capacity (kbps) of the fake link. 0 means infinite.");
+int LinkCapacityKbps() {
+ return static_cast<int>(FLAG_link_capacity);
+}
+
+DEFINE_int(queue_size, 0, "Size of the bottleneck link queue in packets.");
+int QueueSize() {
+ return static_cast<int>(FLAG_queue_size);
+}
+
+DEFINE_int(avg_propagation_delay_ms,
+ 0,
+ "Average link propagation delay in ms.");
+int AvgPropagationDelayMs() {
+ return static_cast<int>(FLAG_avg_propagation_delay_ms);
+}
+
+DEFINE_int(std_propagation_delay_ms,
+ 0,
+ "Link propagation delay standard deviation in ms.");
+int StdPropagationDelayMs() {
+ return static_cast<int>(FLAG_std_propagation_delay_ms);
+}
+
+DEFINE_int(num_streams, 0, "Number of streams to show or analyze.");
+int NumStreams() {
+ return static_cast<int>(FLAG_num_streams);
+}
+
+DEFINE_int(selected_stream,
+ 0,
+ "ID of the stream to show or analyze. "
+ "Set to the number of streams to show them all.");
+int SelectedStream() {
+ return static_cast<int>(FLAG_selected_stream);
+}
+
+DEFINE_int(num_spatial_layers, 1, "Number of spatial layers to use.");
+int NumSpatialLayers() {
+ return static_cast<int>(FLAG_num_spatial_layers);
+}
+
+DEFINE_int(selected_sl,
+ -1,
+ "Spatial layer to show or analyze. -1 to disable filtering.");
+int SelectedSL() {
+ return static_cast<int>(FLAG_selected_sl);
+}
+
+DEFINE_string(stream0,
+ "",
+ "Comma separated values describing VideoStream for stream #0.");
+std::string Stream0() {
+ return static_cast<std::string>(FLAG_stream0);
+}
+
+DEFINE_string(stream1,
+ "",
+ "Comma separated values describing VideoStream for stream #1.");
+std::string Stream1() {
+ return static_cast<std::string>(FLAG_stream1);
+}
+
+DEFINE_string(sl0,
+ "",
+ "Comma separated values describing SpatialLayer for layer #0.");
+std::string SL0() {
+ return static_cast<std::string>(FLAG_sl0);
+}
+
+DEFINE_string(sl1,
+ "",
+ "Comma separated values describing SpatialLayer for layer #1.");
+std::string SL1() {
+ return static_cast<std::string>(FLAG_sl1);
+}
+
+DEFINE_string(encoded_frame_path,
+ "",
+ "The base path for encoded frame logs. Created files will have "
+ "the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
+std::string EncodedFramePath() {
+ return static_cast<std::string>(FLAG_encoded_frame_path);
+}
+
+DEFINE_bool(logs, false, "print logs to stderr");
+
+DEFINE_bool(send_side_bwe, true, "Use send-side bandwidth estimation");
+
+DEFINE_bool(allow_reordering, false, "Allow packet reordering to occur");
+
+DEFINE_string(
+ force_fieldtrials,
+ "",
+ "Field trials control experimental feature code which can be forced. "
+ "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
+ " will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
+ "trials are separated by \"/\"");
+
+// Screenshare-specific flags.
+DEFINE_int(min_transmit_bitrate, 400, "Min transmit bitrate incl. padding.");
+int MinTransmitBitrateKbps() {
+ return FLAG_min_transmit_bitrate;
+}
+
+DEFINE_bool(generate_slides,
+ false,
+ "Whether to use randomly generated slides or read them from files.");
+bool GenerateSlides() {
+ return static_cast<int>(FLAG_generate_slides);
+}
+
+DEFINE_int(slide_change_interval,
+ 10,
+ "Interval (in seconds) between simulated slide changes.");
+int SlideChangeInterval() {
+ return static_cast<int>(FLAG_slide_change_interval);
+}
+
+DEFINE_int(
+ scroll_duration,
+ 0,
+ "Duration (in seconds) during which a slide will be scrolled into place.");
+int ScrollDuration() {
+ return static_cast<int>(FLAG_scroll_duration);
+}
+
+DEFINE_string(slides,
+ "",
+ "Comma-separated list of *.yuv files to display as slides.");
+std::vector<std::string> Slides() {
+ std::vector<std::string> slides;
+ std::string slides_list = FLAG_slides;
+ rtc::tokenize(slides_list, ',', &slides);
+ return slides;
+}
+
+DEFINE_bool(help, false, "prints this message");
+
+} // namespace flags
+
+void Loopback() {
+ FakeNetworkPipe::Config pipe_config;
+ pipe_config.loss_percent = flags::LossPercent();
+ pipe_config.link_capacity_kbps = flags::LinkCapacityKbps();
+ pipe_config.queue_length_packets = flags::QueueSize();
+ pipe_config.queue_delay_ms = flags::AvgPropagationDelayMs();
+ pipe_config.delay_standard_deviation_ms = flags::StdPropagationDelayMs();
+ pipe_config.allow_reordering = flags::FLAG_allow_reordering;
+
+ Call::Config::BitrateConfig call_bitrate_config;
+ call_bitrate_config.min_bitrate_bps = flags::MinBitrateKbps() * 1000;
+ call_bitrate_config.start_bitrate_bps = flags::StartBitrateKbps() * 1000;
+ call_bitrate_config.max_bitrate_bps = flags::MaxBitrateKbps() * 1000;
+
+ VideoQualityTest::Params params;
+ params.call = {flags::FLAG_send_side_bwe, call_bitrate_config};
+ params.video = {true,
+ flags::Width(),
+ flags::Height(),
+ flags::Fps(),
+ flags::MinBitrateKbps() * 1000,
+ flags::TargetBitrateKbps() * 1000,
+ flags::MaxBitrateKbps() * 1000,
+ false,
+ flags::Codec(),
+ flags::NumTemporalLayers(),
+ flags::SelectedTL(),
+ flags::MinTransmitBitrateKbps() * 1000,
+ false, // ULPFEC disabled.
+ false, // FlexFEC disabled.
+ ""};
+ params.screenshare = {true, flags::GenerateSlides(),
+ flags::SlideChangeInterval(),
+ flags::ScrollDuration(), flags::Slides()};
+ params.analyzer = {"screenshare", 0.0, 0.0, flags::DurationSecs(),
+ flags::OutputFilename(), flags::GraphTitle()};
+ params.pipe = pipe_config;
+ params.logging = {flags::FLAG_logs, flags::RtcEventLogName(),
+ flags::RtpDumpName(), flags::EncodedFramePath()};
+
+ if (flags::NumStreams() > 1 && flags::Stream0().empty() &&
+ flags::Stream1().empty()) {
+ params.ss.infer_streams = true;
+ }
+
+ std::vector<std::string> stream_descriptors;
+ stream_descriptors.push_back(flags::Stream0());
+ stream_descriptors.push_back(flags::Stream1());
+ std::vector<std::string> SL_descriptors;
+ SL_descriptors.push_back(flags::SL0());
+ SL_descriptors.push_back(flags::SL1());
+ VideoQualityTest::FillScalabilitySettings(
+ &params, stream_descriptors, flags::NumStreams(), flags::SelectedStream(),
+ flags::NumSpatialLayers(), flags::SelectedSL(), SL_descriptors);
+
+ VideoQualityTest test;
+ if (flags::DurationSecs()) {
+ test.RunWithAnalyzer(params);
+ } else {
+ test.RunWithRenderers(params);
+ }
+}
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ if (webrtc::flags::FLAG_help) {
+ rtc::FlagList::Print(nullptr, false);
+ return 0;
+ }
+
+ // InitFieldTrialsFromString needs a reference to an std::string instance,
+ // with a scope that outlives the test.
+ std::string field_trials = webrtc::flags::FLAG_force_fieldtrials;
+ webrtc::test::InitFieldTrialsFromString(field_trials);
+
+ webrtc::test::RunTest(webrtc::Loopback);
+ return 0;
+}
diff --git a/third_party/libwebrtc/webrtc/video/send_delay_stats.cc b/third_party/libwebrtc/webrtc/video/send_delay_stats.cc
new file mode 100644
index 0000000000..a243eda292
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/send_delay_stats.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_delay_stats.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+// Packet with a larger delay are removed and excluded from the delay stats.
+// Set to larger than max histogram delay which is 10000.
+const int64_t kMaxSentPacketDelayMs = 11000;
+const size_t kMaxPacketMapSize = 2000;
+
+// Limit for the maximum number of streams to calculate stats for.
+const size_t kMaxSsrcMapSize = 50;
+const int kMinRequiredPeriodicSamples = 5;
+} // namespace
+
+SendDelayStats::SendDelayStats(Clock* clock)
+ : clock_(clock), num_old_packets_(0), num_skipped_packets_(0) {}
+
+SendDelayStats::~SendDelayStats() {
+ if (num_old_packets_ > 0 || num_skipped_packets_ > 0) {
+ RTC_LOG(LS_WARNING) << "Delay stats: number of old packets "
+ << num_old_packets_ << ", skipped packets "
+ << num_skipped_packets_ << ". Number of streams "
+ << send_delay_counters_.size();
+ }
+ UpdateHistograms();
+}
+
+void SendDelayStats::UpdateHistograms() {
+ rtc::CritScope lock(&crit_);
+ for (const auto& it : send_delay_counters_) {
+ AggregatedStats stats = it.second->GetStats();
+ if (stats.num_samples >= kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAM_COUNTS_10000("WebRTC.Video.SendDelayInMs", stats.average);
+ RTC_LOG(LS_INFO) << "WebRTC.Video.SendDelayInMs, " << stats.ToString();
+ }
+ }
+}
+
+void SendDelayStats::AddSsrcs(const VideoSendStream::Config& config) {
+ rtc::CritScope lock(&crit_);
+ if (ssrcs_.size() > kMaxSsrcMapSize)
+ return;
+ for (const auto& ssrc : config.rtp.ssrcs)
+ ssrcs_.insert(ssrc);
+}
+
+AvgCounter* SendDelayStats::GetSendDelayCounter(uint32_t ssrc) {
+ const auto& it = send_delay_counters_.find(ssrc);
+ if (it != send_delay_counters_.end())
+ return it->second.get();
+
+ AvgCounter* counter = new AvgCounter(clock_, nullptr, false);
+ send_delay_counters_[ssrc].reset(counter);
+ return counter;
+}
+
+void SendDelayStats::OnSendPacket(uint16_t packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) {
+ // Packet sent to transport.
+ rtc::CritScope lock(&crit_);
+ if (ssrcs_.find(ssrc) == ssrcs_.end())
+ return;
+
+ int64_t now = clock_->TimeInMilliseconds();
+ RemoveOld(now, &packets_);
+
+ if (packets_.size() > kMaxPacketMapSize) {
+ ++num_skipped_packets_;
+ return;
+ }
+ packets_.insert(
+ std::make_pair(packet_id, Packet(ssrc, capture_time_ms, now)));
+}
+
+bool SendDelayStats::OnSentPacket(int packet_id, int64_t time_ms) {
+ // Packet leaving socket.
+ if (packet_id == -1)
+ return false;
+
+ rtc::CritScope lock(&crit_);
+ auto it = packets_.find(packet_id);
+ if (it == packets_.end())
+ return false;
+
+ // TODO(asapersson): Remove SendSideDelayUpdated(), use capture -> sent.
+ // Elapsed time from send (to transport) -> sent (leaving socket).
+ int diff_ms = time_ms - it->second.send_time_ms;
+ GetSendDelayCounter(it->second.ssrc)->Add(diff_ms);
+ packets_.erase(it);
+ return true;
+}
+
+void SendDelayStats::RemoveOld(int64_t now, PacketMap* packets) {
+ while (!packets->empty()) {
+ auto it = packets->begin();
+ if (now - it->second.capture_time_ms < kMaxSentPacketDelayMs)
+ break;
+
+ packets->erase(it);
+ ++num_old_packets_;
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/send_delay_stats.h b/third_party/libwebrtc/webrtc/video/send_delay_stats.h
new file mode 100644
index 0000000000..71bca2d37d
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/send_delay_stats.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_SEND_DELAY_STATS_H_
+#define VIDEO_SEND_DELAY_STATS_H_
+
+#include <map>
+#include <memory>
+#include <set>
+
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/include/module_common_types.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "video/stats_counter.h"
+#include "call/video_send_stream.h"
+
+namespace webrtc {
+
+class SendDelayStats : public SendPacketObserver {
+ public:
+ explicit SendDelayStats(Clock* clock);
+ virtual ~SendDelayStats();
+
+ // Adds the configured ssrcs for the rtp streams.
+ // Stats will be calculated for these streams.
+ void AddSsrcs(const VideoSendStream::Config& config);
+
+ // Called when a packet is sent (leaving socket).
+ bool OnSentPacket(int packet_id, int64_t time_ms);
+
+ protected:
+ // From SendPacketObserver.
+ // Called when a packet is sent to the transport.
+ void OnSendPacket(uint16_t packet_id,
+ int64_t capture_time_ms,
+ uint32_t ssrc) override;
+
+ private:
+ // Map holding sent packets (mapped by sequence number).
+ struct SequenceNumberOlderThan {
+ bool operator()(uint16_t seq1, uint16_t seq2) const {
+ return IsNewerSequenceNumber(seq2, seq1);
+ }
+ };
+ struct Packet {
+ Packet(uint32_t ssrc, int64_t capture_time_ms, int64_t send_time_ms)
+ : ssrc(ssrc),
+ capture_time_ms(capture_time_ms),
+ send_time_ms(send_time_ms) {}
+ uint32_t ssrc;
+ int64_t capture_time_ms;
+ int64_t send_time_ms;
+ };
+ typedef std::map<uint16_t, Packet, SequenceNumberOlderThan> PacketMap;
+
+ void UpdateHistograms();
+ void RemoveOld(int64_t now, PacketMap* packets)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ AvgCounter* GetSendDelayCounter(uint32_t ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ Clock* const clock_;
+ rtc::CriticalSection crit_;
+
+ PacketMap packets_ RTC_GUARDED_BY(crit_);
+ size_t num_old_packets_ RTC_GUARDED_BY(crit_);
+ size_t num_skipped_packets_ RTC_GUARDED_BY(crit_);
+
+ std::set<uint32_t> ssrcs_ RTC_GUARDED_BY(crit_);
+
+ // Mapped by SSRC.
+ std::map<uint32_t, std::unique_ptr<AvgCounter>> send_delay_counters_
+ RTC_GUARDED_BY(crit_);
+};
+
+} // namespace webrtc
+#endif // VIDEO_SEND_DELAY_STATS_H_
diff --git a/third_party/libwebrtc/webrtc/video/send_delay_stats_unittest.cc b/third_party/libwebrtc/webrtc/video/send_delay_stats_unittest.cc
new file mode 100644
index 0000000000..174e70df10
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/send_delay_stats_unittest.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_delay_stats.h"
+
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/metrics_default.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const uint32_t kSsrc1 = 17;
+const uint32_t kSsrc2 = 42;
+const uint32_t kRtxSsrc1 = 18;
+const uint32_t kRtxSsrc2 = 43;
+const uint16_t kPacketId = 2345;
+const int64_t kMaxPacketDelayMs = 11000;
+const int kMinRequiredPeriodicSamples = 5;
+const int kProcessIntervalMs = 2000;
+} // namespace
+
+class SendDelayStatsTest : public ::testing::Test {
+ public:
+ SendDelayStatsTest() : clock_(1234), config_(CreateConfig()) {}
+ virtual ~SendDelayStatsTest() {}
+
+ protected:
+ virtual void SetUp() {
+ stats_.reset(new SendDelayStats(&clock_));
+ stats_->AddSsrcs(config_);
+ }
+
+ VideoSendStream::Config CreateConfig() {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kSsrc1);
+ config.rtp.ssrcs.push_back(kSsrc2);
+ config.rtp.rtx.ssrcs.push_back(kRtxSsrc1);
+ config.rtp.rtx.ssrcs.push_back(kRtxSsrc2);
+ return config;
+ }
+
+ void OnSendPacket(uint16_t id, uint32_t ssrc) {
+ OnSendPacket(id, ssrc, clock_.TimeInMilliseconds());
+ }
+
+ void OnSendPacket(uint16_t id, uint32_t ssrc, int64_t capture_ms) {
+ SendPacketObserver* observer = stats_.get();
+ observer->OnSendPacket(id, capture_ms, ssrc);
+ }
+
+ bool OnSentPacket(uint16_t id) {
+ return stats_->OnSentPacket(id, clock_.TimeInMilliseconds());
+ }
+
+ SimulatedClock clock_;
+ VideoSendStream::Config config_;
+ std::unique_ptr<SendDelayStats> stats_;
+};
+
+TEST_F(SendDelayStatsTest, SentPacketFound) {
+ EXPECT_FALSE(OnSentPacket(kPacketId));
+ OnSendPacket(kPacketId, kSsrc1);
+ EXPECT_TRUE(OnSentPacket(kPacketId)); // Packet found.
+ EXPECT_FALSE(OnSentPacket(kPacketId)); // Packet removed when found.
+}
+
+TEST_F(SendDelayStatsTest, SentPacketNotFoundForNonRegisteredSsrc) {
+ OnSendPacket(kPacketId, kSsrc1);
+ EXPECT_TRUE(OnSentPacket(kPacketId));
+ OnSendPacket(kPacketId + 1, kSsrc2);
+ EXPECT_TRUE(OnSentPacket(kPacketId + 1));
+ OnSendPacket(kPacketId + 2, kRtxSsrc1); // RTX SSRC not registered.
+ EXPECT_FALSE(OnSentPacket(kPacketId + 2));
+}
+
+TEST_F(SendDelayStatsTest, SentPacketFoundWithMaxSendDelay) {
+ OnSendPacket(kPacketId, kSsrc1);
+ clock_.AdvanceTimeMilliseconds(kMaxPacketDelayMs - 1);
+ OnSendPacket(kPacketId + 1, kSsrc1); // kPacketId -> not old/removed.
+ EXPECT_TRUE(OnSentPacket(kPacketId)); // Packet found.
+ EXPECT_TRUE(OnSentPacket(kPacketId + 1)); // Packet found.
+}
+
+TEST_F(SendDelayStatsTest, OldPacketsRemoved) {
+ const int64_t kCaptureTimeMs = clock_.TimeInMilliseconds();
+ OnSendPacket(0xffffu, kSsrc1, kCaptureTimeMs);
+ OnSendPacket(0u, kSsrc1, kCaptureTimeMs);
+ OnSendPacket(1u, kSsrc1, kCaptureTimeMs + 1);
+ clock_.AdvanceTimeMilliseconds(kMaxPacketDelayMs); // 0xffff, 0 -> old.
+ OnSendPacket(2u, kSsrc1, kCaptureTimeMs + 2);
+
+ EXPECT_FALSE(OnSentPacket(0xffffu)); // Old removed.
+ EXPECT_FALSE(OnSentPacket(0u)); // Old removed.
+ EXPECT_TRUE(OnSentPacket(1u));
+ EXPECT_TRUE(OnSentPacket(2u));
+}
+
+TEST_F(SendDelayStatsTest, HistogramsAreUpdated) {
+ metrics::Reset();
+ const int64_t kDelayMs1 = 5;
+ const int64_t kDelayMs2 = 15;
+ const int kNumSamples = kMinRequiredPeriodicSamples * kProcessIntervalMs /
+ (kDelayMs1 + kDelayMs2) + 1;
+
+ uint16_t id = 0;
+ for (int i = 0; i < kNumSamples; ++i) {
+ OnSendPacket(++id, kSsrc1);
+ clock_.AdvanceTimeMilliseconds(kDelayMs1);
+ EXPECT_TRUE(OnSentPacket(id));
+ OnSendPacket(++id, kSsrc2);
+ clock_.AdvanceTimeMilliseconds(kDelayMs2);
+ EXPECT_TRUE(OnSentPacket(id));
+ }
+ stats_.reset();
+ EXPECT_EQ(2, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs1));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs2));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/send_statistics_proxy.cc b/third_party/libwebrtc/webrtc/video/send_statistics_proxy.cc
new file mode 100644
index 0000000000..71c9348e3a
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/send_statistics_proxy.cc
@@ -0,0 +1,1145 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_statistics_proxy.h"
+
+#include <algorithm>
+#include <cmath>
+#include <map>
+#include <vector>
+
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+const float kEncodeTimeWeigthFactor = 0.5f;
+const size_t kMaxEncodedFrameMapSize = 150;
+const int64_t kMaxEncodedFrameWindowMs = 800;
+const int64_t kBucketSizeMs = 100;
+const size_t kBucketCount = 10;
+
+const char kVp8ForcedFallbackEncoderFieldTrial[] =
+ "WebRTC-VP8-Forced-Fallback-Encoder-v2";
+const char kVp8SwCodecName[] = "libvpx";
+
+// Used by histograms. Values of entries should not be changed.
+enum HistogramCodecType {
+ kVideoUnknown = 0,
+ kVideoVp8 = 1,
+ kVideoVp9 = 2,
+ kVideoH264 = 3,
+ kVideoMax = 64,
+};
+
+const char* kRealtimePrefix = "WebRTC.Video.";
+const char* kScreenPrefix = "WebRTC.Video.Screenshare.";
+
+const char* GetUmaPrefix(VideoEncoderConfig::ContentType content_type) {
+ switch (content_type) {
+ case VideoEncoderConfig::ContentType::kRealtimeVideo:
+ return kRealtimePrefix;
+ case VideoEncoderConfig::ContentType::kScreen:
+ return kScreenPrefix;
+ }
+ RTC_NOTREACHED();
+ return nullptr;
+}
+
+HistogramCodecType PayloadNameToHistogramCodecType(
+ const std::string& payload_name) {
+ VideoCodecType codecType = PayloadStringToCodecType(payload_name);
+ switch (codecType) {
+ case kVideoCodecVP8:
+ return kVideoVp8;
+ case kVideoCodecVP9:
+ return kVideoVp9;
+ case kVideoCodecH264:
+ return kVideoH264;
+ default:
+ return kVideoUnknown;
+ }
+}
+
+void UpdateCodecTypeHistogram(const std::string& payload_name) {
+ RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.Encoder.CodecType",
+ PayloadNameToHistogramCodecType(payload_name),
+ kVideoMax);
+}
+
+bool IsForcedFallbackPossible(const CodecSpecificInfo* codec_info) {
+ return codec_info->codecType == kVideoCodecVP8 &&
+ codec_info->codecSpecific.VP8.simulcastIdx == 0 &&
+ (codec_info->codecSpecific.VP8.temporalIdx == 0 ||
+ codec_info->codecSpecific.VP8.temporalIdx == kNoTemporalIdx);
+}
+
+rtc::Optional<int> GetFallbackMaxPixels(const std::string& group) {
+ if (group.empty())
+ return rtc::Optional<int>();
+
+ int min_pixels;
+ int max_pixels;
+ int min_bps;
+ if (sscanf(group.c_str(), "-%d,%d,%d", &min_pixels, &max_pixels, &min_bps) !=
+ 3) {
+ return rtc::Optional<int>();
+ }
+
+ if (min_pixels <= 0 || max_pixels <= 0 || max_pixels < min_pixels)
+ return rtc::Optional<int>();
+
+ return rtc::Optional<int>(max_pixels);
+}
+
+rtc::Optional<int> GetFallbackMaxPixelsIfFieldTrialEnabled() {
+ std::string group =
+ webrtc::field_trial::FindFullName(kVp8ForcedFallbackEncoderFieldTrial);
+ return (group.find("Enabled") == 0) ? GetFallbackMaxPixels(group.substr(7))
+ : rtc::Optional<int>();
+}
+
+rtc::Optional<int> GetFallbackMaxPixelsIfFieldTrialDisabled() {
+ std::string group =
+ webrtc::field_trial::FindFullName(kVp8ForcedFallbackEncoderFieldTrial);
+ return (group.find("Disabled") == 0) ? GetFallbackMaxPixels(group.substr(8))
+ : rtc::Optional<int>();
+}
+} // namespace
+
+
+const int SendStatisticsProxy::kStatsTimeoutMs = 5000;
+
+SendStatisticsProxy::SendStatisticsProxy(
+ Clock* clock,
+ const VideoSendStream::Config& config,
+ VideoEncoderConfig::ContentType content_type)
+ : clock_(clock),
+ payload_name_(config.encoder_settings.payload_name),
+ rtp_config_(config.rtp),
+ fallback_max_pixels_(GetFallbackMaxPixelsIfFieldTrialEnabled()),
+ fallback_max_pixels_disabled_(GetFallbackMaxPixelsIfFieldTrialDisabled()),
+ content_type_(content_type),
+ start_ms_(clock->TimeInMilliseconds()),
+ encode_time_(kEncodeTimeWeigthFactor),
+ quality_downscales_(-1),
+ cpu_downscales_(-1),
+ media_byte_rate_tracker_(kBucketSizeMs, kBucketCount),
+ encoded_frame_rate_tracker_(kBucketSizeMs, kBucketCount),
+ uma_container_(
+ new UmaSamplesContainer(GetUmaPrefix(content_type_), stats_, clock)) {
+}
+
+SendStatisticsProxy::~SendStatisticsProxy() {
+ rtc::CritScope lock(&crit_);
+ uma_container_->UpdateHistograms(rtp_config_, stats_);
+
+ int64_t elapsed_sec = (clock_->TimeInMilliseconds() - start_ms_) / 1000;
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Video.SendStreamLifetimeInSeconds",
+ elapsed_sec);
+
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds)
+ UpdateCodecTypeHistogram(payload_name_);
+}
+
+SendStatisticsProxy::UmaSamplesContainer::UmaSamplesContainer(
+ const char* prefix,
+ const VideoSendStream::Stats& stats,
+ Clock* const clock)
+ : uma_prefix_(prefix),
+ clock_(clock),
+ input_frame_rate_tracker_(100, 10u),
+ input_fps_counter_(clock, nullptr, true),
+ sent_fps_counter_(clock, nullptr, true),
+ total_byte_counter_(clock, nullptr, true),
+ media_byte_counter_(clock, nullptr, true),
+ rtx_byte_counter_(clock, nullptr, true),
+ padding_byte_counter_(clock, nullptr, true),
+ retransmit_byte_counter_(clock, nullptr, true),
+ fec_byte_counter_(clock, nullptr, true),
+ first_rtcp_stats_time_ms_(-1),
+ first_rtp_stats_time_ms_(-1),
+ start_stats_(stats) {
+ InitializeBitrateCounters(stats);
+}
+
+SendStatisticsProxy::UmaSamplesContainer::~UmaSamplesContainer() {}
+
+void SendStatisticsProxy::UmaSamplesContainer::InitializeBitrateCounters(
+ const VideoSendStream::Stats& stats) {
+ for (const auto& it : stats.substreams) {
+ uint32_t ssrc = it.first;
+ total_byte_counter_.SetLast(it.second.rtp_stats.transmitted.TotalBytes(),
+ ssrc);
+ padding_byte_counter_.SetLast(it.second.rtp_stats.transmitted.padding_bytes,
+ ssrc);
+ retransmit_byte_counter_.SetLast(
+ it.second.rtp_stats.retransmitted.TotalBytes(), ssrc);
+ fec_byte_counter_.SetLast(it.second.rtp_stats.fec.TotalBytes(), ssrc);
+ if (it.second.is_rtx) {
+ rtx_byte_counter_.SetLast(it.second.rtp_stats.transmitted.TotalBytes(),
+ ssrc);
+ } else {
+ media_byte_counter_.SetLast(it.second.rtp_stats.MediaPayloadBytes(),
+ ssrc);
+ }
+ }
+}
+
+void SendStatisticsProxy::UmaSamplesContainer::RemoveOld(int64_t now_ms) {
+ while (!encoded_frames_.empty()) {
+ auto it = encoded_frames_.begin();
+ if (now_ms - it->second.send_ms < kMaxEncodedFrameWindowMs)
+ break;
+
+ // Use max per timestamp.
+ sent_width_counter_.Add(it->second.max_width);
+ sent_height_counter_.Add(it->second.max_height);
+ encoded_frames_.erase(it);
+ }
+}
+
+bool SendStatisticsProxy::UmaSamplesContainer::InsertEncodedFrame(
+ const EncodedImage& encoded_frame) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ RemoveOld(now_ms);
+ if (encoded_frames_.size() > kMaxEncodedFrameMapSize) {
+ encoded_frames_.clear();
+ }
+
+ auto it = encoded_frames_.find(encoded_frame._timeStamp);
+ if (it == encoded_frames_.end()) {
+ // First frame with this timestamp.
+ encoded_frames_.insert(std::make_pair(
+ encoded_frame._timeStamp, Frame(now_ms, encoded_frame._encodedWidth,
+ encoded_frame._encodedHeight)));
+ sent_fps_counter_.Add(1);
+ return true;
+ }
+
+ it->second.max_width =
+ std::max(it->second.max_width, encoded_frame._encodedWidth);
+ it->second.max_height =
+ std::max(it->second.max_height, encoded_frame._encodedHeight);
+ return false;
+}
+
+void SendStatisticsProxy::UmaSamplesContainer::UpdateHistograms(
+ const VideoSendStream::Config::Rtp& rtp_config,
+ const VideoSendStream::Stats& current_stats) {
+ RTC_DCHECK(uma_prefix_ == kRealtimePrefix || uma_prefix_ == kScreenPrefix);
+ const int kIndex = uma_prefix_ == kScreenPrefix ? 1 : 0;
+ const int kMinRequiredPeriodicSamples = 6;
+ int in_width = input_width_counter_.Avg(kMinRequiredMetricsSamples);
+ int in_height = input_height_counter_.Avg(kMinRequiredMetricsSamples);
+ if (in_width != -1) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "InputWidthInPixels",
+ in_width);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "InputHeightInPixels",
+ in_height);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "InputWidthInPixels " << in_width;
+ RTC_LOG(LS_INFO) << uma_prefix_ << "InputHeightInPixels " << in_height;
+ }
+ AggregatedStats in_fps = input_fps_counter_.GetStats();
+ if (in_fps.num_samples >= kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_100(kIndex, uma_prefix_ + "InputFramesPerSecond",
+ in_fps.average);
+ RTC_LOG(LS_INFO) << uma_prefix_ + "InputFramesPerSecond, "
+ << in_fps.ToString();
+ }
+
+ int sent_width = sent_width_counter_.Avg(kMinRequiredMetricsSamples);
+ int sent_height = sent_height_counter_.Avg(kMinRequiredMetricsSamples);
+ if (sent_width != -1) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "SentWidthInPixels",
+ sent_width);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "SentHeightInPixels",
+ sent_height);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "SentWidthInPixels " << sent_width;
+ RTC_LOG(LS_INFO) << uma_prefix_ << "SentHeightInPixels " << sent_height;
+ }
+ AggregatedStats sent_fps = sent_fps_counter_.GetStats();
+ if (sent_fps.num_samples >= kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_100(kIndex, uma_prefix_ + "SentFramesPerSecond",
+ sent_fps.average);
+ RTC_LOG(LS_INFO) << uma_prefix_ + "SentFramesPerSecond, "
+ << sent_fps.ToString();
+ }
+
+ if (in_fps.num_samples > kMinRequiredPeriodicSamples &&
+ sent_fps.num_samples >= kMinRequiredPeriodicSamples) {
+ int in_fps_avg = in_fps.average;
+ if (in_fps_avg > 0) {
+ int sent_fps_avg = sent_fps.average;
+ int sent_to_in_fps_ratio_percent =
+ (100 * sent_fps_avg + in_fps_avg / 2) / in_fps_avg;
+ // If reported period is small, it may happen that sent_fps is larger than
+ // input_fps briefly on average. This should be treated as 100% sent to
+ // input ratio.
+ if (sent_to_in_fps_ratio_percent > 100)
+ sent_to_in_fps_ratio_percent = 100;
+ RTC_HISTOGRAMS_PERCENTAGE(kIndex,
+ uma_prefix_ + "SentToInputFpsRatioPercent",
+ sent_to_in_fps_ratio_percent);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "SentToInputFpsRatioPercent "
+ << sent_to_in_fps_ratio_percent;
+ }
+ }
+
+ int encode_ms = encode_time_counter_.Avg(kMinRequiredMetricsSamples);
+ if (encode_ms != -1) {
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "EncodeTimeInMs",
+ encode_ms);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "EncodeTimeInMs " << encode_ms;
+ }
+ int key_frames_permille =
+ key_frame_counter_.Permille(kMinRequiredMetricsSamples);
+ if (key_frames_permille != -1) {
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "KeyFramesSentInPermille",
+ key_frames_permille);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "KeyFramesSentInPermille "
+ << key_frames_permille;
+ }
+ int quality_limited =
+ quality_limited_frame_counter_.Percent(kMinRequiredMetricsSamples);
+ if (quality_limited != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(kIndex,
+ uma_prefix_ + "QualityLimitedResolutionInPercent",
+ quality_limited);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "QualityLimitedResolutionInPercent "
+ << quality_limited;
+ }
+ int downscales = quality_downscales_counter_.Avg(kMinRequiredMetricsSamples);
+ if (downscales != -1) {
+ RTC_HISTOGRAMS_ENUMERATION(
+ kIndex, uma_prefix_ + "QualityLimitedResolutionDownscales", downscales,
+ 20);
+ }
+ int cpu_limited =
+ cpu_limited_frame_counter_.Percent(kMinRequiredMetricsSamples);
+ if (cpu_limited != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "CpuLimitedResolutionInPercent", cpu_limited);
+ }
+ int bw_limited =
+ bw_limited_frame_counter_.Percent(kMinRequiredMetricsSamples);
+ if (bw_limited != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "BandwidthLimitedResolutionInPercent",
+ bw_limited);
+ }
+ int num_disabled =
+ bw_resolutions_disabled_counter_.Avg(kMinRequiredMetricsSamples);
+ if (num_disabled != -1) {
+ RTC_HISTOGRAMS_ENUMERATION(
+ kIndex, uma_prefix_ + "BandwidthLimitedResolutionsDisabled",
+ num_disabled, 10);
+ }
+ int delay_ms = delay_counter_.Avg(kMinRequiredMetricsSamples);
+ if (delay_ms != -1)
+ RTC_HISTOGRAMS_COUNTS_100000(kIndex, uma_prefix_ + "SendSideDelayInMs",
+ delay_ms);
+
+ int max_delay_ms = max_delay_counter_.Avg(kMinRequiredMetricsSamples);
+ if (max_delay_ms != -1) {
+ RTC_HISTOGRAMS_COUNTS_100000(kIndex, uma_prefix_ + "SendSideDelayMaxInMs",
+ max_delay_ms);
+ }
+
+ for (const auto& it : qp_counters_) {
+ int qp_vp8 = it.second.vp8.Avg(kMinRequiredMetricsSamples);
+ if (qp_vp8 != -1) {
+ int spatial_idx = it.first;
+ if (spatial_idx == -1) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8",
+ qp_vp8);
+ } else if (spatial_idx == 0) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8.S0",
+ qp_vp8);
+ } else if (spatial_idx == 1) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8.S1",
+ qp_vp8);
+ } else if (spatial_idx == 2) {
+ RTC_HISTOGRAMS_COUNTS_200(kIndex, uma_prefix_ + "Encoded.Qp.Vp8.S2",
+ qp_vp8);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "QP stats not recorded for VP8 spatial idx " << spatial_idx;
+ }
+ }
+ int qp_vp9 = it.second.vp9.Avg(kMinRequiredMetricsSamples);
+ if (qp_vp9 != -1) {
+ int spatial_idx = it.first;
+ if (spatial_idx == -1) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9",
+ qp_vp9);
+ } else if (spatial_idx == 0) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9.S0",
+ qp_vp9);
+ } else if (spatial_idx == 1) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9.S1",
+ qp_vp9);
+ } else if (spatial_idx == 2) {
+ RTC_HISTOGRAMS_COUNTS_500(kIndex, uma_prefix_ + "Encoded.Qp.Vp9.S2",
+ qp_vp9);
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "QP stats not recorded for VP9 spatial layer " << spatial_idx;
+ }
+ }
+ int qp_h264 = it.second.h264.Avg(kMinRequiredMetricsSamples);
+ if (qp_h264 != -1) {
+ int spatial_idx = it.first;
+ RTC_DCHECK_EQ(-1, spatial_idx);
+ RTC_HISTOGRAMS_COUNTS_100(kIndex, uma_prefix_ + "Encoded.Qp.H264",
+ qp_h264);
+ }
+ }
+
+ if (first_rtp_stats_time_ms_ != -1) {
+ quality_adapt_timer_.Stop(clock_->TimeInMilliseconds());
+ int64_t elapsed_sec = quality_adapt_timer_.total_ms / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int quality_changes = current_stats.number_of_quality_adapt_changes -
+ start_stats_.number_of_quality_adapt_changes;
+ RTC_HISTOGRAMS_COUNTS_100(kIndex,
+ uma_prefix_ + "AdaptChangesPerMinute.Quality",
+ quality_changes * 60 / elapsed_sec);
+ }
+ cpu_adapt_timer_.Stop(clock_->TimeInMilliseconds());
+ elapsed_sec = cpu_adapt_timer_.total_ms / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int cpu_changes = current_stats.number_of_cpu_adapt_changes -
+ start_stats_.number_of_cpu_adapt_changes;
+ RTC_HISTOGRAMS_COUNTS_100(kIndex,
+ uma_prefix_ + "AdaptChangesPerMinute.Cpu",
+ cpu_changes * 60 / elapsed_sec);
+ }
+ }
+
+ if (first_rtcp_stats_time_ms_ != -1) {
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - first_rtcp_stats_time_ms_) / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ int fraction_lost = report_block_stats_.FractionLostInPercent();
+ if (fraction_lost != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "SentPacketsLostInPercent", fraction_lost);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "SentPacketsLostInPercent "
+ << fraction_lost;
+ }
+
+ // The RTCP packet type counters, delivered via the
+ // RtcpPacketTypeCounterObserver interface, are aggregates over the entire
+ // life of the send stream and are not reset when switching content type.
+ // For the purpose of these statistics though, we want new counts when
+ // switching since we switch histogram name. On every reset of the
+ // UmaSamplesContainer, we save the initial state of the counters, so that
+ // we can calculate the delta here and aggregate over all ssrcs.
+ RtcpPacketTypeCounter counters;
+ for (uint32_t ssrc : rtp_config.ssrcs) {
+ auto kv = current_stats.substreams.find(ssrc);
+ if (kv == current_stats.substreams.end())
+ continue;
+
+ RtcpPacketTypeCounter stream_counters =
+ kv->second.rtcp_packet_type_counts;
+ kv = start_stats_.substreams.find(ssrc);
+ if (kv != start_stats_.substreams.end())
+ stream_counters.Subtract(kv->second.rtcp_packet_type_counts);
+
+ counters.Add(stream_counters);
+ }
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "NackPacketsReceivedPerMinute",
+ counters.nack_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "FirPacketsReceivedPerMinute",
+ counters.fir_packets * 60 / elapsed_sec);
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "PliPacketsReceivedPerMinute",
+ counters.pli_packets * 60 / elapsed_sec);
+ if (counters.nack_requests > 0) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "UniqueNackRequestsReceivedInPercent",
+ counters.UniqueNackRequestsInPercent());
+ }
+ }
+ }
+
+ if (first_rtp_stats_time_ms_ != -1) {
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - first_rtp_stats_time_ms_) / 1000;
+ if (elapsed_sec >= metrics::kMinRunTimeInSeconds) {
+ RTC_HISTOGRAMS_COUNTS_100(kIndex, uma_prefix_ + "NumberOfPauseEvents",
+ target_rate_updates_.pause_resume_events);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "NumberOfPauseEvents "
+ << target_rate_updates_.pause_resume_events;
+
+ int paused_time_percent =
+ paused_time_counter_.Percent(metrics::kMinRunTimeInSeconds * 1000);
+ if (paused_time_percent != -1) {
+ RTC_HISTOGRAMS_PERCENTAGE(kIndex, uma_prefix_ + "PausedTimeInPercent",
+ paused_time_percent);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "PausedTimeInPercent "
+ << paused_time_percent;
+ }
+ }
+ }
+
+ if (fallback_info_.is_possible) {
+ // Double interval since there is some time before fallback may occur.
+ const int kMinRunTimeMs = 2 * metrics::kMinRunTimeInSeconds * 1000;
+ int64_t elapsed_ms = fallback_info_.elapsed_ms;
+ int fallback_time_percent = fallback_active_counter_.Percent(kMinRunTimeMs);
+ if (fallback_time_percent != -1 && elapsed_ms >= kMinRunTimeMs) {
+ RTC_HISTOGRAMS_PERCENTAGE(
+ kIndex, uma_prefix_ + "Encoder.ForcedSwFallbackTimeInPercent.Vp8",
+ fallback_time_percent);
+ RTC_HISTOGRAMS_COUNTS_100(
+ kIndex, uma_prefix_ + "Encoder.ForcedSwFallbackChangesPerMinute.Vp8",
+ fallback_info_.on_off_events * 60 / (elapsed_ms / 1000));
+ }
+ }
+
+ AggregatedStats total_bytes_per_sec = total_byte_counter_.GetStats();
+ if (total_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "BitrateSentInKbps",
+ total_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "BitrateSentInBps, "
+ << total_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ AggregatedStats media_bytes_per_sec = media_byte_counter_.GetStats();
+ if (media_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "MediaBitrateSentInKbps",
+ media_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "MediaBitrateSentInBps, "
+ << media_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ AggregatedStats padding_bytes_per_sec = padding_byte_counter_.GetStats();
+ if (padding_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "PaddingBitrateSentInKbps",
+ padding_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "PaddingBitrateSentInBps, "
+ << padding_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ AggregatedStats retransmit_bytes_per_sec =
+ retransmit_byte_counter_.GetStats();
+ if (retransmit_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex,
+ uma_prefix_ + "RetransmittedBitrateSentInKbps",
+ retransmit_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "RetransmittedBitrateSentInBps, "
+ << retransmit_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ if (!rtp_config.rtx.ssrcs.empty()) {
+ AggregatedStats rtx_bytes_per_sec = rtx_byte_counter_.GetStats();
+ int rtx_bytes_per_sec_avg = -1;
+ if (rtx_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ rtx_bytes_per_sec_avg = rtx_bytes_per_sec.average;
+ RTC_LOG(LS_INFO) << uma_prefix_ << "RtxBitrateSentInBps, "
+ << rtx_bytes_per_sec.ToStringWithMultiplier(8);
+ } else if (total_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ rtx_bytes_per_sec_avg = 0; // RTX enabled but no RTX data sent, record 0.
+ }
+ if (rtx_bytes_per_sec_avg != -1) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "RtxBitrateSentInKbps",
+ rtx_bytes_per_sec_avg * 8 / 1000);
+ }
+ }
+ if (rtp_config.flexfec.payload_type != -1 ||
+ rtp_config.ulpfec.red_payload_type != -1) {
+ AggregatedStats fec_bytes_per_sec = fec_byte_counter_.GetStats();
+ if (fec_bytes_per_sec.num_samples > kMinRequiredPeriodicSamples) {
+ RTC_HISTOGRAMS_COUNTS_10000(kIndex, uma_prefix_ + "FecBitrateSentInKbps",
+ fec_bytes_per_sec.average * 8 / 1000);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "FecBitrateSentInBps, "
+ << fec_bytes_per_sec.ToStringWithMultiplier(8);
+ }
+ }
+ RTC_LOG(LS_INFO) << "Frames encoded " << current_stats.frames_encoded;
+ RTC_LOG(LS_INFO) << uma_prefix_ << "DroppedFrames.Capturer "
+ << current_stats.frames_dropped_by_capturer;
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Capturer",
+ current_stats.frames_dropped_by_capturer);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "DroppedFrames.EncoderQueue "
+ << current_stats.frames_dropped_by_encoder_queue;
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.EncoderQueue",
+ current_stats.frames_dropped_by_encoder_queue);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "DroppedFrames.Encoder "
+ << current_stats.frames_dropped_by_encoder;
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Encoder",
+ current_stats.frames_dropped_by_encoder);
+ RTC_LOG(LS_INFO) << uma_prefix_ << "DroppedFrames.Ratelimiter "
+ << current_stats.frames_dropped_by_rate_limiter;
+ RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Ratelimiter",
+ current_stats.frames_dropped_by_rate_limiter);
+}
+
+void SendStatisticsProxy::OnEncoderReconfigured(
+ const VideoEncoderConfig& config,
+ uint32_t preferred_bitrate_bps) {
+ rtc::CritScope lock(&crit_);
+ stats_.preferred_media_bitrate_bps = preferred_bitrate_bps;
+
+ if (content_type_ != config.content_type) {
+ uma_container_->UpdateHistograms(rtp_config_, stats_);
+ uma_container_.reset(new UmaSamplesContainer(
+ GetUmaPrefix(config.content_type), stats_, clock_));
+ content_type_ = config.content_type;
+ }
+}
+
+void SendStatisticsProxy::OnEncodedFrameTimeMeasured(
+ int encode_time_ms,
+ const CpuOveruseMetrics& metrics) {
+ rtc::CritScope lock(&crit_);
+ uma_container_->encode_time_counter_.Add(encode_time_ms);
+ encode_time_.Apply(1.0f, encode_time_ms);
+ stats_.avg_encode_time_ms = round(encode_time_.filtered());
+ stats_.encode_usage_percent = metrics.encode_usage_percent;
+}
+
+void SendStatisticsProxy::OnSuspendChange(bool is_suspended) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ rtc::CritScope lock(&crit_);
+ stats_.suspended = is_suspended;
+ if (is_suspended) {
+ // Pause framerate (add min pause time since there may be frames/packets
+ // that are not yet sent).
+ const int64_t kMinMs = 500;
+ uma_container_->input_fps_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->sent_fps_counter_.ProcessAndPauseForDuration(kMinMs);
+ // Pause bitrate stats.
+ uma_container_->total_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->media_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->rtx_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->padding_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->retransmit_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ uma_container_->fec_byte_counter_.ProcessAndPauseForDuration(kMinMs);
+ // Stop adaptation stats.
+ uma_container_->cpu_adapt_timer_.Stop(now_ms);
+ uma_container_->quality_adapt_timer_.Stop(now_ms);
+ } else {
+ // Start adaptation stats if scaling is enabled.
+ if (cpu_downscales_ >= 0)
+ uma_container_->cpu_adapt_timer_.Start(now_ms);
+ if (quality_downscales_ >= 0)
+ uma_container_->quality_adapt_timer_.Start(now_ms);
+ // Stop pause explicitly for stats that may be zero/not updated for some
+ // time.
+ uma_container_->rtx_byte_counter_.ProcessAndStopPause();
+ uma_container_->padding_byte_counter_.ProcessAndStopPause();
+ uma_container_->retransmit_byte_counter_.ProcessAndStopPause();
+ uma_container_->fec_byte_counter_.ProcessAndStopPause();
+ }
+}
+
+VideoSendStream::Stats SendStatisticsProxy::GetStats() {
+ rtc::CritScope lock(&crit_);
+ PurgeOldStats();
+ stats_.input_frame_rate =
+ round(uma_container_->input_frame_rate_tracker_.ComputeRate());
+ stats_.content_type =
+ content_type_ == VideoEncoderConfig::ContentType::kRealtimeVideo
+ ? VideoContentType::UNSPECIFIED
+ : VideoContentType::SCREENSHARE;
+ stats_.encode_frame_rate = round(encoded_frame_rate_tracker_.ComputeRate());
+ stats_.media_bitrate_bps = media_byte_rate_tracker_.ComputeRate() * 8;
+ return stats_;
+}
+
+void SendStatisticsProxy::PurgeOldStats() {
+ int64_t old_stats_ms = clock_->TimeInMilliseconds() - kStatsTimeoutMs;
+ for (std::map<uint32_t, VideoSendStream::StreamStats>::iterator it =
+ stats_.substreams.begin();
+ it != stats_.substreams.end(); ++it) {
+ uint32_t ssrc = it->first;
+ if (update_times_[ssrc].resolution_update_ms <= old_stats_ms) {
+ it->second.width = 0;
+ it->second.height = 0;
+ }
+ }
+}
+
+VideoSendStream::StreamStats* SendStatisticsProxy::GetStatsEntry(
+ uint32_t ssrc) {
+ std::map<uint32_t, VideoSendStream::StreamStats>::iterator it =
+ stats_.substreams.find(ssrc);
+ if (it != stats_.substreams.end())
+ return &it->second;
+
+ bool is_media = std::find(rtp_config_.ssrcs.begin(), rtp_config_.ssrcs.end(),
+ ssrc) != rtp_config_.ssrcs.end();
+ bool is_flexfec = rtp_config_.flexfec.payload_type != -1 &&
+ ssrc == rtp_config_.flexfec.ssrc;
+ bool is_rtx =
+ std::find(rtp_config_.rtx.ssrcs.begin(), rtp_config_.rtx.ssrcs.end(),
+ ssrc) != rtp_config_.rtx.ssrcs.end();
+ if (!is_media && !is_flexfec && !is_rtx)
+ return nullptr;
+
+ // Insert new entry and return ptr.
+ VideoSendStream::StreamStats* entry = &stats_.substreams[ssrc];
+ entry->is_rtx = is_rtx;
+ entry->is_flexfec = is_flexfec;
+
+ return entry;
+}
+
+void SendStatisticsProxy::OnInactiveSsrc(uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->total_bitrate_bps = 0;
+ stats->retransmit_bitrate_bps = 0;
+ stats->height = 0;
+ stats->width = 0;
+}
+
+void SendStatisticsProxy::OnSetEncoderTargetRate(uint32_t bitrate_bps) {
+ rtc::CritScope lock(&crit_);
+ if (uma_container_->target_rate_updates_.last_ms == -1 && bitrate_bps == 0)
+ return; // Start on first non-zero bitrate, may initially be zero.
+
+ int64_t now = clock_->TimeInMilliseconds();
+ if (uma_container_->target_rate_updates_.last_ms != -1) {
+ bool was_paused = stats_.target_media_bitrate_bps == 0;
+ int64_t diff_ms = now - uma_container_->target_rate_updates_.last_ms;
+ uma_container_->paused_time_counter_.Add(was_paused, diff_ms);
+
+ // Use last to not include update when stream is stopped and video disabled.
+ if (uma_container_->target_rate_updates_.last_paused_or_resumed)
+ ++uma_container_->target_rate_updates_.pause_resume_events;
+
+ // Check if video is paused/resumed.
+ uma_container_->target_rate_updates_.last_paused_or_resumed =
+ (bitrate_bps == 0) != was_paused;
+ }
+ uma_container_->target_rate_updates_.last_ms = now;
+
+ stats_.target_media_bitrate_bps = bitrate_bps;
+}
+
+void SendStatisticsProxy::UpdateEncoderFallbackStats(
+ const CodecSpecificInfo* codec_info,
+ int pixels) {
+ UpdateFallbackDisabledStats(codec_info, pixels);
+
+ if (!fallback_max_pixels_ || !uma_container_->fallback_info_.is_possible) {
+ return;
+ }
+
+ if (!IsForcedFallbackPossible(codec_info)) {
+ uma_container_->fallback_info_.is_possible = false;
+ return;
+ }
+
+ FallbackEncoderInfo* fallback_info = &uma_container_->fallback_info_;
+
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ bool is_active = fallback_info->is_active;
+ if (codec_info->codec_name != stats_.encoder_implementation_name) {
+ // Implementation changed.
+ is_active = strcmp(codec_info->codec_name, kVp8SwCodecName) == 0;
+ if (!is_active && stats_.encoder_implementation_name != kVp8SwCodecName) {
+ // First or not a VP8 SW change, update stats on next call.
+ return;
+ }
+ if (is_active && (pixels > *fallback_max_pixels_)) {
+ // Pixels should not be above |fallback_max_pixels_|. If above skip to
+ // avoid fallbacks due to failure.
+ fallback_info->is_possible = false;
+ return;
+ }
+ stats_.has_entered_low_resolution = true;
+ ++fallback_info->on_off_events;
+ }
+
+ if (fallback_info->last_update_ms) {
+ int64_t diff_ms = now_ms - *(fallback_info->last_update_ms);
+ // If the time diff since last update is greater than |max_frame_diff_ms|,
+ // video is considered paused/muted and the change is not included.
+ if (diff_ms < fallback_info->max_frame_diff_ms) {
+ uma_container_->fallback_active_counter_.Add(fallback_info->is_active,
+ diff_ms);
+ fallback_info->elapsed_ms += diff_ms;
+ }
+ }
+ fallback_info->is_active = is_active;
+ fallback_info->last_update_ms.emplace(now_ms);
+}
+
+void SendStatisticsProxy::UpdateFallbackDisabledStats(
+ const CodecSpecificInfo* codec_info,
+ int pixels) {
+ if (!fallback_max_pixels_disabled_ ||
+ !uma_container_->fallback_info_disabled_.is_possible ||
+ stats_.has_entered_low_resolution) {
+ return;
+ }
+
+ if (!IsForcedFallbackPossible(codec_info) ||
+ strcmp(codec_info->codec_name, kVp8SwCodecName) == 0) {
+ uma_container_->fallback_info_disabled_.is_possible = false;
+ return;
+ }
+
+ if (pixels <= *fallback_max_pixels_disabled_ ||
+ uma_container_->fallback_info_disabled_.min_pixel_limit_reached) {
+ stats_.has_entered_low_resolution = true;
+ }
+}
+
+void SendStatisticsProxy::OnMinPixelLimitReached() {
+ rtc::CritScope lock(&crit_);
+ uma_container_->fallback_info_disabled_.min_pixel_limit_reached = true;
+}
+
+void SendStatisticsProxy::OnSendEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_info) {
+ size_t simulcast_idx = 0;
+
+ rtc::CritScope lock(&crit_);
+ ++stats_.frames_encoded;
+ if (codec_info) {
+ if (codec_info->codecType == kVideoCodecVP8) {
+ simulcast_idx = codec_info->codecSpecific.VP8.simulcastIdx;
+ } else if (codec_info->codecType == kVideoCodecGeneric) {
+ simulcast_idx = codec_info->codecSpecific.generic.simulcast_idx;
+ }
+ if (codec_info->codec_name) {
+ UpdateEncoderFallbackStats(codec_info, encoded_image._encodedWidth *
+ encoded_image._encodedHeight);
+ stats_.encoder_implementation_name = codec_info->codec_name;
+ }
+ }
+
+ if (simulcast_idx >= rtp_config_.ssrcs.size()) {
+ RTC_LOG(LS_ERROR) << "Encoded image outside simulcast range ("
+ << simulcast_idx << " >= " << rtp_config_.ssrcs.size()
+ << ").";
+ return;
+ }
+ uint32_t ssrc = rtp_config_.ssrcs[simulcast_idx];
+
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->width = encoded_image._encodedWidth;
+ stats->height = encoded_image._encodedHeight;
+ update_times_[ssrc].resolution_update_ms = clock_->TimeInMilliseconds();
+
+ uma_container_->key_frame_counter_.Add(encoded_image._frameType ==
+ kVideoFrameKey);
+ stats_.bw_limited_resolution =
+ encoded_image.adapt_reason_.bw_resolutions_disabled > 0 ||
+ quality_downscales_ > 0;
+
+ if (quality_downscales_ != -1) {
+ uma_container_->quality_limited_frame_counter_.Add(quality_downscales_ > 0);
+ if (quality_downscales_ > 0)
+ uma_container_->quality_downscales_counter_.Add(quality_downscales_);
+ }
+ if (encoded_image.adapt_reason_.bw_resolutions_disabled != -1) {
+ bool bw_limited = encoded_image.adapt_reason_.bw_resolutions_disabled > 0;
+ uma_container_->bw_limited_frame_counter_.Add(bw_limited);
+ if (bw_limited) {
+ uma_container_->bw_resolutions_disabled_counter_.Add(
+ encoded_image.adapt_reason_.bw_resolutions_disabled);
+ }
+ }
+
+ if (encoded_image.qp_ != -1) {
+ if (!stats_.qp_sum)
+ stats_.qp_sum = rtc::Optional<uint64_t>(0);
+ *stats_.qp_sum += encoded_image.qp_;
+
+ if (codec_info) {
+ if (codec_info->codecType == kVideoCodecVP8) {
+ int spatial_idx = (rtp_config_.ssrcs.size() == 1)
+ ? -1
+ : static_cast<int>(simulcast_idx);
+ uma_container_->qp_counters_[spatial_idx].vp8.Add(encoded_image.qp_);
+ } else if (codec_info->codecType == kVideoCodecVP9) {
+ int spatial_idx =
+ (codec_info->codecSpecific.VP9.num_spatial_layers == 1)
+ ? -1
+ : codec_info->codecSpecific.VP9.spatial_idx;
+ uma_container_->qp_counters_[spatial_idx].vp9.Add(encoded_image.qp_);
+ } else if (codec_info->codecType == kVideoCodecH264) {
+ int spatial_idx = -1;
+ uma_container_->qp_counters_[spatial_idx].h264.Add(encoded_image.qp_);
+ }
+ }
+ }
+
+ media_byte_rate_tracker_.AddSamples(encoded_image._length);
+ if (uma_container_->InsertEncodedFrame(encoded_image))
+ encoded_frame_rate_tracker_.AddSamples(1);
+}
+
+int SendStatisticsProxy::GetSendFrameRate() const {
+ rtc::CritScope lock(&crit_);
+ return round(encoded_frame_rate_tracker_.ComputeRate());
+}
+
+void SendStatisticsProxy::OnIncomingFrame(int width, int height) {
+ rtc::CritScope lock(&crit_);
+ uma_container_->input_frame_rate_tracker_.AddSamples(1);
+ uma_container_->input_fps_counter_.Add(1);
+ uma_container_->input_width_counter_.Add(width);
+ uma_container_->input_height_counter_.Add(height);
+ if (cpu_downscales_ >= 0) {
+ uma_container_->cpu_limited_frame_counter_.Add(
+ stats_.cpu_limited_resolution);
+ }
+ if (encoded_frame_rate_tracker_.TotalSampleCount() == 0) {
+ // Set start time now instead of when first key frame is encoded to avoid a
+ // too high initial estimate.
+ encoded_frame_rate_tracker_.AddSamples(0);
+ }
+}
+
+void SendStatisticsProxy::OnFrameDroppedBySource() {
+ rtc::CritScope lock(&crit_);
+ ++stats_.frames_dropped_by_capturer;
+}
+
+void SendStatisticsProxy::OnFrameDroppedInEncoderQueue() {
+ rtc::CritScope lock(&crit_);
+ ++stats_.frames_dropped_by_encoder_queue;
+}
+
+void SendStatisticsProxy::OnFrameDroppedByEncoder() {
+ rtc::CritScope lock(&crit_);
+ ++stats_.frames_dropped_by_encoder;
+}
+
+void SendStatisticsProxy::OnFrameDroppedByMediaOptimizations() {
+ rtc::CritScope lock(&crit_);
+ ++stats_.frames_dropped_by_rate_limiter;
+}
+
+void SendStatisticsProxy::SetAdaptationStats(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts) {
+ rtc::CritScope lock(&crit_);
+ SetAdaptTimer(cpu_counts, &uma_container_->cpu_adapt_timer_);
+ SetAdaptTimer(quality_counts, &uma_container_->quality_adapt_timer_);
+ UpdateAdaptationStats(cpu_counts, quality_counts);
+}
+
+void SendStatisticsProxy::OnCpuAdaptationChanged(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts) {
+ rtc::CritScope lock(&crit_);
+ ++stats_.number_of_cpu_adapt_changes;
+ UpdateAdaptationStats(cpu_counts, quality_counts);
+}
+
+void SendStatisticsProxy::OnQualityAdaptationChanged(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts) {
+ rtc::CritScope lock(&crit_);
+ ++stats_.number_of_quality_adapt_changes;
+ UpdateAdaptationStats(cpu_counts, quality_counts);
+}
+
+void SendStatisticsProxy::UpdateAdaptationStats(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts) {
+ cpu_downscales_ = cpu_counts.resolution;
+ quality_downscales_ = quality_counts.resolution;
+
+ stats_.cpu_limited_resolution = cpu_counts.resolution > 0;
+ stats_.cpu_limited_framerate = cpu_counts.fps > 0;
+ stats_.bw_limited_resolution = quality_counts.resolution > 0;
+ stats_.bw_limited_framerate = quality_counts.fps > 0;
+}
+
+void SendStatisticsProxy::SetAdaptTimer(
+ const VideoStreamEncoder::AdaptCounts& counts,
+ StatsTimer* timer) {
+ if (counts.resolution >= 0 || counts.fps >= 0) {
+ // Adaptation enabled.
+ if (!stats_.suspended)
+ timer->Start(clock_->TimeInMilliseconds());
+ return;
+ }
+ timer->Stop(clock_->TimeInMilliseconds());
+}
+
+void SendStatisticsProxy::RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) {
+ rtc::CritScope lock(&crit_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->rtcp_packet_type_counts = packet_counter;
+ if (uma_container_->first_rtcp_stats_time_ms_ == -1)
+ uma_container_->first_rtcp_stats_time_ms_ = clock_->TimeInMilliseconds();
+}
+
+void SendStatisticsProxy::StatisticsUpdated(const RtcpStatistics& statistics,
+ uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->rtcp_stats = statistics;
+ uma_container_->report_block_stats_.Store(statistics, 0, ssrc);
+}
+
+void SendStatisticsProxy::CNameChanged(const char* cname, uint32_t ssrc) {}
+
+void SendStatisticsProxy::DataCountersUpdated(
+ const StreamDataCounters& counters,
+ uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ RTC_DCHECK(stats) << "DataCountersUpdated reported for unknown ssrc " << ssrc;
+
+ if (stats->is_flexfec) {
+ // The same counters are reported for both the media ssrc and flexfec ssrc.
+ // Bitrate stats are summed for all SSRCs. Use fec stats from media update.
+ return;
+ }
+
+ stats->rtp_stats = counters;
+ if (uma_container_->first_rtp_stats_time_ms_ == -1) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ uma_container_->first_rtp_stats_time_ms_ = now_ms;
+ uma_container_->cpu_adapt_timer_.Restart(now_ms);
+ uma_container_->quality_adapt_timer_.Restart(now_ms);
+ }
+
+ uma_container_->total_byte_counter_.Set(counters.transmitted.TotalBytes(),
+ ssrc);
+ uma_container_->padding_byte_counter_.Set(counters.transmitted.padding_bytes,
+ ssrc);
+ uma_container_->retransmit_byte_counter_.Set(
+ counters.retransmitted.TotalBytes(), ssrc);
+ uma_container_->fec_byte_counter_.Set(counters.fec.TotalBytes(), ssrc);
+ if (stats->is_rtx) {
+ uma_container_->rtx_byte_counter_.Set(counters.transmitted.TotalBytes(),
+ ssrc);
+ } else {
+ uma_container_->media_byte_counter_.Set(counters.MediaPayloadBytes(), ssrc);
+ }
+}
+
+void SendStatisticsProxy::Notify(uint32_t total_bitrate_bps,
+ uint32_t retransmit_bitrate_bps,
+ uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->total_bitrate_bps = total_bitrate_bps;
+ stats->retransmit_bitrate_bps = retransmit_bitrate_bps;
+}
+
+void SendStatisticsProxy::FrameCountUpdated(const FrameCounts& frame_counts,
+ uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+
+ stats->frame_counts = frame_counts;
+}
+
+void SendStatisticsProxy::SendSideDelayUpdated(int avg_delay_ms,
+ int max_delay_ms,
+ uint32_t ssrc) {
+ rtc::CritScope lock(&crit_);
+ VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
+ if (!stats)
+ return;
+ stats->avg_delay_ms = avg_delay_ms;
+ stats->max_delay_ms = max_delay_ms;
+
+ uma_container_->delay_counter_.Add(avg_delay_ms);
+ uma_container_->max_delay_counter_.Add(max_delay_ms);
+}
+
+void SendStatisticsProxy::StatsTimer::Start(int64_t now_ms) {
+ if (start_ms == -1)
+ start_ms = now_ms;
+}
+
+void SendStatisticsProxy::StatsTimer::Stop(int64_t now_ms) {
+ if (start_ms != -1) {
+ total_ms += now_ms - start_ms;
+ start_ms = -1;
+ }
+}
+
+void SendStatisticsProxy::StatsTimer::Restart(int64_t now_ms) {
+ total_ms = 0;
+ if (start_ms != -1)
+ start_ms = now_ms;
+}
+
+void SendStatisticsProxy::SampleCounter::Add(int sample) {
+ sum += sample;
+ ++num_samples;
+}
+
+int SendStatisticsProxy::SampleCounter::Avg(
+ int64_t min_required_samples) const {
+ if (num_samples < min_required_samples || num_samples == 0)
+ return -1;
+ return static_cast<int>((sum + (num_samples / 2)) / num_samples);
+}
+
+void SendStatisticsProxy::BoolSampleCounter::Add(bool sample) {
+ if (sample)
+ ++sum;
+ ++num_samples;
+}
+
+void SendStatisticsProxy::BoolSampleCounter::Add(bool sample, int64_t count) {
+ if (sample)
+ sum += count;
+ num_samples += count;
+}
+int SendStatisticsProxy::BoolSampleCounter::Percent(
+ int64_t min_required_samples) const {
+ return Fraction(min_required_samples, 100.0f);
+}
+
+int SendStatisticsProxy::BoolSampleCounter::Permille(
+ int64_t min_required_samples) const {
+ return Fraction(min_required_samples, 1000.0f);
+}
+
+int SendStatisticsProxy::BoolSampleCounter::Fraction(
+ int64_t min_required_samples,
+ float multiplier) const {
+ if (num_samples < min_required_samples || num_samples == 0)
+ return -1;
+ return static_cast<int>((sum * multiplier / num_samples) + 0.5f);
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/send_statistics_proxy.h b/third_party/libwebrtc/webrtc/video/send_statistics_proxy.h
new file mode 100644
index 0000000000..094e78fa71
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/send_statistics_proxy.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_SEND_STATISTICS_PROXY_H_
+#define VIDEO_SEND_STATISTICS_PROXY_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/ratetracker.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "video/overuse_frame_detector.h"
+#include "video/report_block_stats.h"
+#include "video/stats_counter.h"
+#include "video/video_stream_encoder.h"
+#include "call/video_send_stream.h"
+
+namespace webrtc {
+
+class SendStatisticsProxy : public CpuOveruseMetricsObserver,
+ public RtcpStatisticsCallback,
+ public RtcpPacketTypeCounterObserver,
+ public StreamDataCountersCallback,
+ public BitrateStatisticsObserver,
+ public FrameCountObserver,
+ public SendSideDelayObserver {
+ public:
+ static const int kStatsTimeoutMs;
+ // Number of required samples to be collected before a metric is added
+ // to a rtc histogram.
+ static const int kMinRequiredMetricsSamples = 200;
+
+ SendStatisticsProxy(Clock* clock,
+ const VideoSendStream::Config& config,
+ VideoEncoderConfig::ContentType content_type);
+ virtual ~SendStatisticsProxy();
+
+ virtual VideoSendStream::Stats GetStats();
+
+ virtual void OnSendEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_info);
+ // Used to update incoming frame rate.
+ void OnIncomingFrame(int width, int height);
+
+ // Dropped frame stats.
+ void OnFrameDroppedBySource();
+ void OnFrameDroppedInEncoderQueue();
+ void OnFrameDroppedByEncoder();
+ void OnFrameDroppedByMediaOptimizations();
+
+ // Adaptation stats.
+ void SetAdaptationStats(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts);
+ void OnCpuAdaptationChanged(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts);
+ void OnQualityAdaptationChanged(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts);
+ void OnMinPixelLimitReached();
+
+ void OnSuspendChange(bool is_suspended);
+ void OnInactiveSsrc(uint32_t ssrc);
+
+ // Used to indicate change in content type, which may require a change in
+ // how stats are collected and set the configured preferred media bitrate.
+ void OnEncoderReconfigured(const VideoEncoderConfig& encoder_config,
+ uint32_t preferred_bitrate_bps);
+
+ // Used to update the encoder target rate.
+ void OnSetEncoderTargetRate(uint32_t bitrate_bps);
+
+ // Implements CpuOveruseMetricsObserver.
+ void OnEncodedFrameTimeMeasured(int encode_time_ms,
+ const CpuOveruseMetrics& metrics) override;
+
+ int GetSendFrameRate() const;
+
+ protected:
+ // From RtcpStatisticsCallback.
+ void StatisticsUpdated(const RtcpStatistics& statistics,
+ uint32_t ssrc) override;
+ void CNameChanged(const char* cname, uint32_t ssrc) override;
+ // From RtcpPacketTypeCounterObserver.
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override;
+ // From StreamDataCountersCallback.
+ void DataCountersUpdated(const StreamDataCounters& counters,
+ uint32_t ssrc) override;
+
+ // From BitrateStatisticsObserver.
+ void Notify(uint32_t total_bitrate_bps,
+ uint32_t retransmit_bitrate_bps,
+ uint32_t ssrc) override;
+
+ // From FrameCountObserver.
+ void FrameCountUpdated(const FrameCounts& frame_counts,
+ uint32_t ssrc) override;
+
+ void SendSideDelayUpdated(int avg_delay_ms,
+ int max_delay_ms,
+ uint32_t ssrc) override;
+
+ private:
+ class SampleCounter {
+ public:
+ SampleCounter() : sum(0), num_samples(0) {}
+ ~SampleCounter() {}
+ void Add(int sample);
+ int Avg(int64_t min_required_samples) const;
+
+ private:
+ int64_t sum;
+ int64_t num_samples;
+ };
+ class BoolSampleCounter {
+ public:
+ BoolSampleCounter() : sum(0), num_samples(0) {}
+ ~BoolSampleCounter() {}
+ void Add(bool sample);
+ void Add(bool sample, int64_t count);
+ int Percent(int64_t min_required_samples) const;
+ int Permille(int64_t min_required_samples) const;
+
+ private:
+ int Fraction(int64_t min_required_samples, float multiplier) const;
+ int64_t sum;
+ int64_t num_samples;
+ };
+ struct StatsUpdateTimes {
+ StatsUpdateTimes() : resolution_update_ms(0), bitrate_update_ms(0) {}
+ int64_t resolution_update_ms;
+ int64_t bitrate_update_ms;
+ };
+ struct TargetRateUpdates {
+ TargetRateUpdates()
+ : pause_resume_events(0), last_paused_or_resumed(false), last_ms(-1) {}
+ int pause_resume_events;
+ bool last_paused_or_resumed;
+ int64_t last_ms;
+ };
+ struct FallbackEncoderInfo {
+ bool is_possible = true;
+ bool is_active = false;
+ int on_off_events = 0;
+ int64_t elapsed_ms = 0;
+ rtc::Optional<int64_t> last_update_ms;
+ const int max_frame_diff_ms = 2000;
+ };
+ struct FallbackEncoderInfoDisabled {
+ bool is_possible = true;
+ bool min_pixel_limit_reached = false;
+ };
+ struct StatsTimer {
+ void Start(int64_t now_ms);
+ void Stop(int64_t now_ms);
+ void Restart(int64_t now_ms);
+ int64_t start_ms = -1;
+ int64_t total_ms = 0;
+ };
+ struct QpCounters {
+ SampleCounter vp8; // QP range: 0-127.
+ SampleCounter vp9; // QP range: 0-255.
+ SampleCounter h264; // QP range: 0-51.
+ };
+
+ // Map holding encoded frames (mapped by timestamp).
+ // If simulcast layers are encoded on different threads, there is no guarantee
+ // that one frame of all layers are encoded before the next start.
+ struct TimestampOlderThan {
+ bool operator()(uint32_t ts1, uint32_t ts2) const {
+ return IsNewerTimestamp(ts2, ts1);
+ }
+ };
+ struct Frame {
+ Frame(int64_t send_ms, uint32_t width, uint32_t height)
+ : send_ms(send_ms), max_width(width), max_height(height) {}
+ const int64_t
+ send_ms; // Time when first frame with this timestamp is sent.
+ uint32_t max_width; // Max width with this timestamp.
+ uint32_t max_height; // Max height with this timestamp.
+ };
+ typedef std::map<uint32_t, Frame, TimestampOlderThan> EncodedFrameMap;
+
+ void PurgeOldStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ VideoSendStream::StreamStats* GetStatsEntry(uint32_t ssrc)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ void SetAdaptTimer(const VideoStreamEncoder::AdaptCounts& counts,
+ StatsTimer* timer) RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ void UpdateAdaptationStats(
+ const VideoStreamEncoder::AdaptCounts& cpu_counts,
+ const VideoStreamEncoder::AdaptCounts& quality_counts)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ void UpdateEncoderFallbackStats(const CodecSpecificInfo* codec_info,
+ int pixels)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ void UpdateFallbackDisabledStats(const CodecSpecificInfo* codec_info,
+ int pixels)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ Clock* const clock_;
+ const std::string payload_name_;
+ const VideoSendStream::Config::Rtp rtp_config_;
+ const rtc::Optional<int> fallback_max_pixels_;
+ const rtc::Optional<int> fallback_max_pixels_disabled_;
+ rtc::CriticalSection crit_;
+ VideoEncoderConfig::ContentType content_type_ RTC_GUARDED_BY(crit_);
+ const int64_t start_ms_;
+ VideoSendStream::Stats stats_ RTC_GUARDED_BY(crit_);
+ std::map<uint32_t, StatsUpdateTimes> update_times_ RTC_GUARDED_BY(crit_);
+ rtc::ExpFilter encode_time_ RTC_GUARDED_BY(crit_);
+ int quality_downscales_ RTC_GUARDED_BY(crit_);
+ int cpu_downscales_ RTC_GUARDED_BY(crit_);
+ rtc::RateTracker media_byte_rate_tracker_ RTC_GUARDED_BY(crit_);
+ rtc::RateTracker encoded_frame_rate_tracker_ RTC_GUARDED_BY(crit_);
+
+ // Contains stats used for UMA histograms. These stats will be reset if
+ // content type changes between real-time video and screenshare, since these
+ // will be reported separately.
+ struct UmaSamplesContainer {
+ UmaSamplesContainer(const char* prefix,
+ const VideoSendStream::Stats& start_stats,
+ Clock* clock);
+ ~UmaSamplesContainer();
+
+ void UpdateHistograms(const VideoSendStream::Config::Rtp& rtp_config,
+ const VideoSendStream::Stats& current_stats);
+
+ void InitializeBitrateCounters(const VideoSendStream::Stats& stats);
+
+ bool InsertEncodedFrame(const EncodedImage& encoded_frame);
+ void RemoveOld(int64_t now_ms);
+
+ const std::string uma_prefix_;
+ Clock* const clock_;
+ SampleCounter input_width_counter_;
+ SampleCounter input_height_counter_;
+ SampleCounter sent_width_counter_;
+ SampleCounter sent_height_counter_;
+ SampleCounter encode_time_counter_;
+ BoolSampleCounter key_frame_counter_;
+ BoolSampleCounter quality_limited_frame_counter_;
+ SampleCounter quality_downscales_counter_;
+ BoolSampleCounter cpu_limited_frame_counter_;
+ BoolSampleCounter bw_limited_frame_counter_;
+ SampleCounter bw_resolutions_disabled_counter_;
+ SampleCounter delay_counter_;
+ SampleCounter max_delay_counter_;
+ rtc::RateTracker input_frame_rate_tracker_;
+ RateCounter input_fps_counter_;
+ RateCounter sent_fps_counter_;
+ RateAccCounter total_byte_counter_;
+ RateAccCounter media_byte_counter_;
+ RateAccCounter rtx_byte_counter_;
+ RateAccCounter padding_byte_counter_;
+ RateAccCounter retransmit_byte_counter_;
+ RateAccCounter fec_byte_counter_;
+ int64_t first_rtcp_stats_time_ms_;
+ int64_t first_rtp_stats_time_ms_;
+ StatsTimer cpu_adapt_timer_;
+ StatsTimer quality_adapt_timer_;
+ BoolSampleCounter paused_time_counter_;
+ TargetRateUpdates target_rate_updates_;
+ BoolSampleCounter fallback_active_counter_;
+ FallbackEncoderInfo fallback_info_;
+ FallbackEncoderInfoDisabled fallback_info_disabled_;
+ ReportBlockStats report_block_stats_;
+ const VideoSendStream::Stats start_stats_;
+ EncodedFrameMap encoded_frames_;
+
+ std::map<int, QpCounters>
+ qp_counters_; // QP counters mapped by spatial idx.
+ };
+
+ std::unique_ptr<UmaSamplesContainer> uma_container_ RTC_GUARDED_BY(crit_);
+};
+
+} // namespace webrtc
+#endif // VIDEO_SEND_STATISTICS_PROXY_H_
diff --git a/third_party/libwebrtc/webrtc/video/send_statistics_proxy_unittest.cc b/third_party/libwebrtc/webrtc/video/send_statistics_proxy_unittest.cc
new file mode 100644
index 0000000000..a204a469aa
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/send_statistics_proxy_unittest.cc
@@ -0,0 +1,2032 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/send_statistics_proxy.h"
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/metrics_default.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const uint32_t kFirstSsrc = 17;
+const uint32_t kSecondSsrc = 42;
+const uint32_t kFirstRtxSsrc = 18;
+const uint32_t kSecondRtxSsrc = 43;
+const uint32_t kFlexFecSsrc = 55;
+const int kFpsPeriodicIntervalMs = 2000;
+const int kWidth = 640;
+const int kHeight = 480;
+const int kQpIdx0 = 21;
+const int kQpIdx1 = 39;
+const CodecSpecificInfo kDefaultCodecInfo = []() {
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
+ return codec_info;
+}();
+} // namespace
+
+class SendStatisticsProxyTest : public ::testing::Test {
+ public:
+ SendStatisticsProxyTest() : SendStatisticsProxyTest("") {}
+ explicit SendStatisticsProxyTest(const std::string& field_trials)
+ : override_field_trials_(field_trials),
+ fake_clock_(1234),
+ config_(GetTestConfig()),
+ avg_delay_ms_(0),
+ max_delay_ms_(0) {}
+ virtual ~SendStatisticsProxyTest() {}
+
+ protected:
+ virtual void SetUp() {
+ metrics::Reset();
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, GetTestConfig(),
+ VideoEncoderConfig::ContentType::kRealtimeVideo));
+ expected_ = VideoSendStream::Stats();
+ for (const auto& ssrc : config_.rtp.ssrcs)
+ expected_.substreams[ssrc].is_rtx = false;
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs)
+ expected_.substreams[ssrc].is_rtx = true;
+ }
+
+ VideoSendStream::Config GetTestConfig() {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ config.rtp.ssrcs.push_back(kSecondSsrc);
+ config.rtp.rtx.ssrcs.push_back(kFirstRtxSsrc);
+ config.rtp.rtx.ssrcs.push_back(kSecondRtxSsrc);
+ config.rtp.ulpfec.red_payload_type = 17;
+ return config;
+ }
+
+ VideoSendStream::Config GetTestConfigWithFlexFec() {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ config.rtp.ssrcs.push_back(kSecondSsrc);
+ config.rtp.rtx.ssrcs.push_back(kFirstRtxSsrc);
+ config.rtp.rtx.ssrcs.push_back(kSecondRtxSsrc);
+ config.rtp.flexfec.payload_type = 50;
+ config.rtp.flexfec.ssrc = kFlexFecSsrc;
+ return config;
+ }
+
+ VideoSendStream::StreamStats GetStreamStats(uint32_t ssrc) {
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ std::map<uint32_t, VideoSendStream::StreamStats>::iterator it =
+ stats.substreams.find(ssrc);
+ EXPECT_NE(it, stats.substreams.end());
+ return it->second;
+ }
+
+ void UpdateDataCounters(uint32_t ssrc) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ proxy->DataCountersUpdated(counters, ssrc);
+ }
+
+ void ExpectEqual(VideoSendStream::Stats one, VideoSendStream::Stats other) {
+ EXPECT_EQ(one.input_frame_rate, other.input_frame_rate);
+ EXPECT_EQ(one.encode_frame_rate, other.encode_frame_rate);
+ EXPECT_EQ(one.media_bitrate_bps, other.media_bitrate_bps);
+ EXPECT_EQ(one.preferred_media_bitrate_bps,
+ other.preferred_media_bitrate_bps);
+ EXPECT_EQ(one.suspended, other.suspended);
+
+ EXPECT_EQ(one.substreams.size(), other.substreams.size());
+ for (std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator it =
+ one.substreams.begin();
+ it != one.substreams.end(); ++it) {
+ std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator
+ corresponding_it = other.substreams.find(it->first);
+ ASSERT_TRUE(corresponding_it != other.substreams.end());
+ const VideoSendStream::StreamStats& a = it->second;
+ const VideoSendStream::StreamStats& b = corresponding_it->second;
+
+ EXPECT_EQ(a.is_rtx, b.is_rtx);
+ EXPECT_EQ(a.frame_counts.key_frames, b.frame_counts.key_frames);
+ EXPECT_EQ(a.frame_counts.delta_frames, b.frame_counts.delta_frames);
+ EXPECT_EQ(a.total_bitrate_bps, b.total_bitrate_bps);
+ EXPECT_EQ(a.avg_delay_ms, b.avg_delay_ms);
+ EXPECT_EQ(a.max_delay_ms, b.max_delay_ms);
+
+ EXPECT_EQ(a.rtp_stats.transmitted.payload_bytes,
+ b.rtp_stats.transmitted.payload_bytes);
+ EXPECT_EQ(a.rtp_stats.transmitted.header_bytes,
+ b.rtp_stats.transmitted.header_bytes);
+ EXPECT_EQ(a.rtp_stats.transmitted.padding_bytes,
+ b.rtp_stats.transmitted.padding_bytes);
+ EXPECT_EQ(a.rtp_stats.transmitted.packets,
+ b.rtp_stats.transmitted.packets);
+ EXPECT_EQ(a.rtp_stats.retransmitted.packets,
+ b.rtp_stats.retransmitted.packets);
+ EXPECT_EQ(a.rtp_stats.fec.packets, b.rtp_stats.fec.packets);
+
+ EXPECT_EQ(a.rtcp_stats.fraction_lost, b.rtcp_stats.fraction_lost);
+ EXPECT_EQ(a.rtcp_stats.packets_lost, b.rtcp_stats.packets_lost);
+ EXPECT_EQ(a.rtcp_stats.extended_highest_sequence_number,
+ b.rtcp_stats.extended_highest_sequence_number);
+ EXPECT_EQ(a.rtcp_stats.jitter, b.rtcp_stats.jitter);
+ }
+ }
+
+ test::ScopedFieldTrials override_field_trials_;
+ SimulatedClock fake_clock_;
+ std::unique_ptr<SendStatisticsProxy> statistics_proxy_;
+ VideoSendStream::Config config_;
+ int avg_delay_ms_;
+ int max_delay_ms_;
+ VideoSendStream::Stats expected_;
+ typedef std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator
+ StreamIterator;
+};
+
+TEST_F(SendStatisticsProxyTest, RtcpStatistics) {
+ RtcpStatisticsCallback* callback = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ VideoSendStream::StreamStats& ssrc_stats = expected_.substreams[ssrc];
+
+ // Add statistics with some arbitrary, but unique, numbers.
+ uint32_t offset = ssrc * sizeof(RtcpStatistics);
+ ssrc_stats.rtcp_stats.packets_lost = offset;
+ ssrc_stats.rtcp_stats.extended_highest_sequence_number = offset + 1;
+ ssrc_stats.rtcp_stats.fraction_lost = offset + 2;
+ ssrc_stats.rtcp_stats.jitter = offset + 3;
+ callback->StatisticsUpdated(ssrc_stats.rtcp_stats, ssrc);
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ VideoSendStream::StreamStats& ssrc_stats = expected_.substreams[ssrc];
+
+ // Add statistics with some arbitrary, but unique, numbers.
+ uint32_t offset = ssrc * sizeof(RtcpStatistics);
+ ssrc_stats.rtcp_stats.packets_lost = offset;
+ ssrc_stats.rtcp_stats.extended_highest_sequence_number = offset + 1;
+ ssrc_stats.rtcp_stats.fraction_lost = offset + 2;
+ ssrc_stats.rtcp_stats.jitter = offset + 3;
+ callback->StatisticsUpdated(ssrc_stats.rtcp_stats, ssrc);
+ }
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, Suspended) {
+ // Verify that the value is false by default.
+ EXPECT_FALSE(statistics_proxy_->GetStats().suspended);
+
+ // Verify that we can set it to true.
+ statistics_proxy_->OnSuspendChange(true);
+ EXPECT_TRUE(statistics_proxy_->GetStats().suspended);
+
+ // Verify that we can set it back to false again.
+ statistics_proxy_->OnSuspendChange(false);
+ EXPECT_FALSE(statistics_proxy_->GetStats().suspended);
+}
+
+TEST_F(SendStatisticsProxyTest, FrameCounts) {
+ FrameCountObserver* observer = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ // Add statistics with some arbitrary, but unique, numbers.
+ VideoSendStream::StreamStats& stats = expected_.substreams[ssrc];
+ uint32_t offset = ssrc * sizeof(VideoSendStream::StreamStats);
+ FrameCounts frame_counts;
+ frame_counts.key_frames = offset;
+ frame_counts.delta_frames = offset + 1;
+ stats.frame_counts = frame_counts;
+ observer->FrameCountUpdated(frame_counts, ssrc);
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ // Add statistics with some arbitrary, but unique, numbers.
+ VideoSendStream::StreamStats& stats = expected_.substreams[ssrc];
+ uint32_t offset = ssrc * sizeof(VideoSendStream::StreamStats);
+ FrameCounts frame_counts;
+ frame_counts.key_frames = offset;
+ frame_counts.delta_frames = offset + 1;
+ stats.frame_counts = frame_counts;
+ observer->FrameCountUpdated(frame_counts, ssrc);
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, DataCounters) {
+ StreamDataCountersCallback* callback = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ StreamDataCounters& counters = expected_.substreams[ssrc].rtp_stats;
+ // Add statistics with some arbitrary, but unique, numbers.
+ size_t offset = ssrc * sizeof(StreamDataCounters);
+ uint32_t offset_uint32 = static_cast<uint32_t>(offset);
+ counters.transmitted.payload_bytes = offset;
+ counters.transmitted.header_bytes = offset + 1;
+ counters.fec.packets = offset_uint32 + 2;
+ counters.transmitted.padding_bytes = offset + 3;
+ counters.retransmitted.packets = offset_uint32 + 4;
+ counters.transmitted.packets = offset_uint32 + 5;
+ callback->DataCountersUpdated(counters, ssrc);
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ StreamDataCounters& counters = expected_.substreams[ssrc].rtp_stats;
+ // Add statistics with some arbitrary, but unique, numbers.
+ size_t offset = ssrc * sizeof(StreamDataCounters);
+ uint32_t offset_uint32 = static_cast<uint32_t>(offset);
+ counters.transmitted.payload_bytes = offset;
+ counters.transmitted.header_bytes = offset + 1;
+ counters.fec.packets = offset_uint32 + 2;
+ counters.transmitted.padding_bytes = offset + 3;
+ counters.retransmitted.packets = offset_uint32 + 4;
+ counters.transmitted.packets = offset_uint32 + 5;
+ callback->DataCountersUpdated(counters, ssrc);
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, Bitrate) {
+ BitrateStatisticsObserver* observer = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ uint32_t total;
+ uint32_t retransmit;
+ // Use ssrc as bitrate_bps to get a unique value for each stream.
+ total = ssrc;
+ retransmit = ssrc + 1;
+ observer->Notify(total, retransmit, ssrc);
+ expected_.substreams[ssrc].total_bitrate_bps = total;
+ expected_.substreams[ssrc].retransmit_bitrate_bps = retransmit;
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ uint32_t total;
+ uint32_t retransmit;
+ // Use ssrc as bitrate_bps to get a unique value for each stream.
+ total = ssrc;
+ retransmit = ssrc + 1;
+ observer->Notify(total, retransmit, ssrc);
+ expected_.substreams[ssrc].total_bitrate_bps = total;
+ expected_.substreams[ssrc].retransmit_bitrate_bps = retransmit;
+ }
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, SendSideDelay) {
+ SendSideDelayObserver* observer = statistics_proxy_.get();
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ // Use ssrc as avg_delay_ms and max_delay_ms to get a unique value for each
+ // stream.
+ int avg_delay_ms = ssrc;
+ int max_delay_ms = ssrc + 1;
+ observer->SendSideDelayUpdated(avg_delay_ms, max_delay_ms, ssrc);
+ expected_.substreams[ssrc].avg_delay_ms = avg_delay_ms;
+ expected_.substreams[ssrc].max_delay_ms = max_delay_ms;
+ }
+ for (const auto& ssrc : config_.rtp.rtx.ssrcs) {
+ // Use ssrc as avg_delay_ms and max_delay_ms to get a unique value for each
+ // stream.
+ int avg_delay_ms = ssrc;
+ int max_delay_ms = ssrc + 1;
+ observer->SendSideDelayUpdated(avg_delay_ms, max_delay_ms, ssrc);
+ expected_.substreams[ssrc].avg_delay_ms = avg_delay_ms;
+ expected_.substreams[ssrc].max_delay_ms = max_delay_ms;
+ }
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ ExpectEqual(expected_, stats);
+}
+
+TEST_F(SendStatisticsProxyTest, OnEncodedFrameTimeMeasured) {
+ const int kEncodeTimeMs = 11;
+ CpuOveruseMetrics metrics;
+ metrics.encode_usage_percent = 80;
+ statistics_proxy_->OnEncodedFrameTimeMeasured(kEncodeTimeMs, metrics);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodeTimeMs, stats.avg_encode_time_ms);
+ EXPECT_EQ(metrics.encode_usage_percent, stats.encode_usage_percent);
+}
+
+TEST_F(SendStatisticsProxyTest, OnEncoderReconfiguredChangePreferredBitrate) {
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(0, stats.preferred_media_bitrate_bps);
+ const int kPreferredMediaBitrateBps = 50;
+
+ VideoEncoderConfig config;
+ statistics_proxy_->OnEncoderReconfigured(config, kPreferredMediaBitrateBps);
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kPreferredMediaBitrateBps, stats.preferred_media_bitrate_bps);
+}
+
+TEST_F(SendStatisticsProxyTest, OnSendEncodedImageIncreasesFramesEncoded) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ EXPECT_EQ(0u, statistics_proxy_->GetStats().frames_encoded);
+ for (uint32_t i = 1; i <= 3; ++i) {
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(i, statistics_proxy_->GetStats().frames_encoded);
+ }
+}
+
+TEST_F(SendStatisticsProxyTest, OnSendEncodedImageIncreasesQpSum) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+ encoded_image.qp_ = 3;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(rtc::Optional<uint64_t>(3u), statistics_proxy_->GetStats().qp_sum);
+ encoded_image.qp_ = 127;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(rtc::Optional<uint64_t>(130u),
+ statistics_proxy_->GetStats().qp_sum);
+}
+
+TEST_F(SendStatisticsProxyTest, OnSendEncodedImageWithoutQpQpSumWontExist) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ encoded_image.qp_ = -1;
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_EQ(rtc::Optional<uint64_t>(), statistics_proxy_->GetStats().qp_sum);
+}
+
+TEST_F(SendStatisticsProxyTest, GetCpuAdaptationStats) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ cpu_counts.fps = 1;
+ cpu_counts.resolution = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ cpu_counts.fps = 0;
+ cpu_counts.resolution = 1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ cpu_counts.fps = 1;
+ cpu_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ cpu_counts.fps = -1;
+ cpu_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_resolution);
+}
+
+TEST_F(SendStatisticsProxyTest, GetQualityAdaptationStats) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ quality_counts.fps = 1;
+ quality_counts.resolution = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ quality_counts.fps = 0;
+ quality_counts.resolution = 1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+ quality_counts.fps = 1;
+ quality_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ quality_counts.fps = -1;
+ quality_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsCpuAdaptChanges) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ cpu_counts.resolution = 1;
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ cpu_counts.resolution = 2;
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_TRUE(statistics_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsQualityAdaptChanges) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ quality_counts.fps = 1;
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ quality_counts.fps = 0;
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, statistics_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().number_of_cpu_adapt_changes);
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesNotReported_AdaptationNotEnabled) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Min runtime has passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesNotReported_MinRuntimeNotPassed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ // Min runtime has not passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest, ZeroAdaptChangesReported) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ // Min runtime has passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 0));
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuAdaptChangesReported) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ // Enable adaptation.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ // Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsExcludesDisabledTime) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Disable quality adaptation.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ quality_counts.fps = -1;
+ quality_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Enable quality adaptation.
+ // Adapt changes: 2, elapsed time: 20 sec.
+ quality_counts.fps = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(9000);
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(6000);
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+
+ // Disable quality adaptation.
+ quality_counts.fps = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Enable quality adaptation.
+ // Adapt changes: 1, elapsed time: 10 sec.
+ quality_counts.resolution = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Disable quality adaptation.
+ quality_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(20000);
+
+ // Adapt changes: 3, elapsed time: 30 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ AdaptChangesNotReported_ScalingNotEnabledVideoResumed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Suspend and resume video.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->OnSuspendChange(false);
+
+ // Min runtime has passed but scaling not enabled.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest, QualityAdaptChangesStatsExcludesSuspendedTime) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Enable adaptation.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ // Adapt changes: 2, elapsed time: 20 sec.
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(20000);
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+
+ // Suspend and resume video.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+ statistics_proxy_->OnSuspendChange(false);
+
+ // Adapt changes: 1, elapsed time: 10 sec.
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Adapt changes: 3, elapsed time: 30 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuAdaptChangesStatsExcludesSuspendedTime) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Video not suspended.
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Enable adaptation.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ // Adapt changes: 1, elapsed time: 20 sec.
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+
+ // Video not suspended, stats time already started.
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Disable adaptation.
+ cpu_counts.fps = -1;
+ cpu_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Suspend and resume video, stats time not started when scaling not enabled.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(30000);
+
+ // Enable adaptation.
+ // Adapt changes: 1, elapsed time: 10 sec.
+ cpu_counts.fps = 0;
+ cpu_counts.resolution = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+
+ // Adapt changes: 2, elapsed time: 30 sec => 4 per minute.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 4));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsNotStartedIfVideoSuspended) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Video suspended.
+ statistics_proxy_->OnSuspendChange(true);
+
+ // Enable adaptation, stats time not started when suspended.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+
+ // Resume video, stats time started.
+ // Adapt changes: 1, elapsed time: 10 sec.
+ statistics_proxy_->OnSuspendChange(false);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+
+ // Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsRestartsOnFirstSentPacket) {
+ // Send first packet, adaptation enabled.
+ // Elapsed time before first packet is sent should be excluded.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ UpdateDataCounters(kFirstSsrc);
+
+ // Adapt changes: 1, elapsed time: 10 sec.
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ UpdateDataCounters(kFirstSsrc);
+
+ // Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesStatsStartedAfterFirstSentPacket) {
+ // Enable and disable adaptation.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(60000);
+ cpu_counts.fps = -1;
+ cpu_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+
+ // Send first packet, scaling disabled.
+ // Elapsed time before first packet is sent should be excluded.
+ UpdateDataCounters(kFirstSsrc);
+ fake_clock_.AdvanceTimeMilliseconds(60000);
+
+ // Enable adaptation.
+ cpu_counts.resolution = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ UpdateDataCounters(kFirstSsrc);
+
+ // Adapt changes: 1, elapsed time: 20 sec.
+ fake_clock_.AdvanceTimeMilliseconds(10000);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+
+ // Adapt changes: 1, elapsed time: 20 sec => 3 per minute.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 3));
+}
+
+TEST_F(SendStatisticsProxyTest, AdaptChangesReportedAfterContentSwitch) {
+ // First RTP packet sent, cpu adaptation enabled.
+ UpdateDataCounters(kFirstSsrc);
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ quality_counts.fps = -1;
+ quality_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+
+ // Adapt changes: 2, elapsed time: 15 sec => 8 per minute.
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(6000);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(9000);
+
+ // Switch content type, real-time stats should be updated.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, 50);
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 8));
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+
+ // First RTP packet sent, scaling enabled.
+ UpdateDataCounters(kFirstSsrc);
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+
+ // Adapt changes: 4, elapsed time: 120 sec => 2 per minute.
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+ fake_clock_.AdvanceTimeMilliseconds(120000);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu", 2));
+ EXPECT_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Quality"));
+}
+
+TEST_F(SendStatisticsProxyTest, SwitchContentTypeUpdatesHistograms) {
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ // No switch, stats should not be updated.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
+ statistics_proxy_->OnEncoderReconfigured(config, 50);
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+
+ // Switch to screenshare, real-time stats should be updated.
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, 50);
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+}
+
+TEST_F(SendStatisticsProxyTest, InputResolutionHistogramsAreUpdated) {
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputWidthInPixels", kWidth));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputHeightInPixels"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputHeightInPixels", kHeight));
+}
+
+TEST_F(SendStatisticsProxyTest, SentResolutionHistogramsAreUpdated) {
+ const int64_t kMaxEncodedFrameWindowMs = 800;
+ const int kFps = 20;
+ const int kNumFramesPerWindow = kFps * kMaxEncodedFrameWindowMs / 1000;
+ const int kMinSamples = // Sample added when removed from EncodedFrameMap.
+ SendStatisticsProxy::kMinRequiredMetricsSamples + kNumFramesPerWindow;
+ EncodedImage encoded_image;
+
+ // Not enough samples, stats should not be updated.
+ for (int i = 0; i < kMinSamples - 1; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ ++encoded_image._timeStamp;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ SetUp(); // Reset stats proxy also causes histograms to be reported.
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
+
+ // Enough samples, max resolution per frame should be reported.
+ encoded_image._timeStamp = 0xfffffff0; // Will wrap.
+ for (int i = 0; i < kMinSamples; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ ++encoded_image._timeStamp;
+ encoded_image._encodedWidth = kWidth;
+ encoded_image._encodedHeight = kHeight;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ encoded_image._encodedWidth = kWidth / 2;
+ encoded_image._encodedHeight = kHeight / 2;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentWidthInPixels", kWidth));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentHeightInPixels", kHeight));
+}
+
+TEST_F(SendStatisticsProxyTest, InputFpsHistogramIsUpdated) {
+ const int kFps = 20;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
+ for (int i = 0; i <= frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+ }
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, SentFpsHistogramIsUpdated) {
+ EncodedImage encoded_image;
+ const int kFps = 20;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000 + 1;
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ ++encoded_image._timeStamp;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ // Frame with same timestamp should not be counted.
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, InputFpsHistogramExcludesSuspendedTime) {
+ const int kFps = 20;
+ const int kSuspendTimeMs = 10000;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+ }
+ // Suspend.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(kSuspendTimeMs);
+
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+ }
+ // Suspended time interval should not affect the framerate.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, SentFpsHistogramExcludesSuspendedTime) {
+ EncodedImage encoded_image;
+ const int kFps = 20;
+ const int kSuspendTimeMs = 10000;
+ const int kMinPeriodicSamples = 6;
+ int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image._timeStamp = i + 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ // Suspend.
+ statistics_proxy_->OnSuspendChange(true);
+ fake_clock_.AdvanceTimeMilliseconds(kSuspendTimeMs);
+
+ for (int i = 0; i < frames; ++i) {
+ fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
+ encoded_image._timeStamp = i + 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ }
+ // Suspended time interval should not affect the framerate.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramNotUpdatedWhenDisabled) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ cpu_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+}
+
+TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramUpdated) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ cpu_counts.resolution = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ cpu_counts.resolution = 1;
+ statistics_proxy_->OnCpuAdaptationChanged(cpu_counts, quality_counts);
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.CpuLimitedResolutionInPercent", 50));
+}
+
+TEST_F(SendStatisticsProxyTest, LifetimeHistogramIsUpdated) {
+ const int64_t kTimeSec = 3;
+ fake_clock_.AdvanceTimeMilliseconds(kTimeSec * 1000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SendStreamLifetimeInSeconds",
+ kTimeSec));
+}
+
+TEST_F(SendStatisticsProxyTest, CodecTypeHistogramIsUpdated) {
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoder.CodecType"));
+}
+
+TEST_F(SendStatisticsProxyTest, PauseEventHistogramIsUpdated) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Min runtime has passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ PauseEventHistogramIsNotUpdatedIfMinRuntimeHasNotPassed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Min runtime has not passed.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ PauseEventHistogramIsNotUpdatedIfNoMediaIsSent) {
+ // First RTP packet not sent.
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+}
+
+TEST_F(SendStatisticsProxyTest, NoPauseEvent) {
+ // First RTP packet sent and min runtime passed.
+ UpdateDataCounters(kFirstSsrc);
+
+ // No change. Video: 10000 ms, paused: 0 ms (0%).
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, OnePauseEvent) {
+ // First RTP packet sent and min runtime passed.
+ UpdateDataCounters(kFirstSsrc);
+
+ // One change. Video: 7000 ms, paused: 3000 ms (30%).
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(7000);
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(3000);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 1));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 30));
+}
+
+TEST_F(SendStatisticsProxyTest, TwoPauseEvents) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+
+ // Two changes. Video: 19000 ms, paused: 1000 ms (5%).
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(1000);
+ statistics_proxy_->OnSetEncoderTargetRate(50000); // Starts on bitrate > 0.
+ fake_clock_.AdvanceTimeMilliseconds(7000);
+ statistics_proxy_->OnSetEncoderTargetRate(60000);
+ fake_clock_.AdvanceTimeMilliseconds(3000);
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(250);
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ fake_clock_.AdvanceTimeMilliseconds(750);
+ statistics_proxy_->OnSetEncoderTargetRate(60000);
+ fake_clock_.AdvanceTimeMilliseconds(5000);
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(4000);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 2));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 5));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ PausedTimeHistogramIsNotUpdatedIfMinRuntimeHasNotPassed) {
+ // First RTP packet sent.
+ UpdateDataCounters(kFirstSsrc);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+
+ // Min runtime has not passed.
+ statistics_proxy_->OnSetEncoderTargetRate(50000);
+ fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
+ statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
+ encoded_image.qp_ = kQpIdx0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ encoded_image.qp_ = kQpIdx1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S0", kQpIdx0));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S1"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S1", kQpIdx1));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo));
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
+ encoded_image.qp_ = kQpIdx0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8", kQpIdx0));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 2;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.qp_ = kQpIdx0;
+ codec_info.codecSpecific.VP9.spatial_idx = 0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ encoded_image.qp_ = kQpIdx1;
+ codec_info.codecSpecific.VP9.spatial_idx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S0", kQpIdx0));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S1"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S1", kQpIdx1));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9OneSpatialLayer) {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc);
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo));
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 1;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.qp_ = kQpIdx0;
+ codec_info.codecSpecific.VP9.spatial_idx = 0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9", kQpIdx0));
+}
+
+TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_H264) {
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecH264;
+
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ encoded_image.qp_ = kQpIdx0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ }
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.H264"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.H264", kQpIdx0));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ BandwidthLimitedHistogramsNotUpdatedWhenDisabled) {
+ EncodedImage encoded_image;
+ // encoded_image.adapt_reason_.bw_resolutions_disabled by default: -1
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ BandwidthLimitedHistogramsUpdatedWhenEnabled_NoResolutionDisabled) {
+ const int kResolutionsDisabled = 0;
+ EncodedImage encoded_image;
+ encoded_image.adapt_reason_.bw_resolutions_disabled = kResolutionsDisabled;
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent", 0));
+ // No resolution disabled.
+ EXPECT_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ BandwidthLimitedHistogramsUpdatedWhenEnabled_OneResolutionDisabled) {
+ const int kResolutionsDisabled = 1;
+ EncodedImage encoded_image;
+ encoded_image.adapt_reason_.bw_resolutions_disabled = kResolutionsDisabled;
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent", 100));
+ // Resolutions disabled.
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.BandwidthLimitedResolutionsDisabled",
+ kResolutionsDisabled));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitedHistogramsNotUpdatedWhenDisabled) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ quality_counts.resolution = -1;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EncodedImage encoded_image;
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &kDefaultCodecInfo);
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
+ EXPECT_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitedHistogramsUpdatedWhenEnabled_NoResolutionDownscale) {
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ quality_counts.resolution = 0;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EncodedImage encoded_image;
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &kDefaultCodecInfo);
+
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.QualityLimitedResolutionInPercent", 0));
+ // No resolution downscale.
+ EXPECT_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
+}
+
+TEST_F(SendStatisticsProxyTest,
+ QualityLimitedHistogramsUpdatedWhenEnabled_TwoResolutionDownscales) {
+ const int kDownscales = 2;
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ quality_counts.resolution = kDownscales;
+ statistics_proxy_->SetAdaptationStats(cpu_counts, quality_counts);
+ EncodedImage encoded_image;
+ for (int i = 0; i < SendStatisticsProxy::kMinRequiredMetricsSamples; ++i)
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &kDefaultCodecInfo);
+ // Histograms are updated when the statistics_proxy_ is deleted.
+ statistics_proxy_.reset();
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.QualityLimitedResolutionInPercent", 100));
+ // Resolution downscales.
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.QualityLimitedResolutionDownscales",
+ kDownscales));
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsBandwidthLimitedResolution) {
+ // Initially false.
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+ // No resolution scale by default.
+ EncodedImage encoded_image;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Simulcast disabled resolutions
+ encoded_image.adapt_reason_.bw_resolutions_disabled = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ encoded_image.adapt_reason_.bw_resolutions_disabled = 0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Resolution scaled due to quality.
+ VideoStreamEncoder::AdaptCounts cpu_counts;
+ VideoStreamEncoder::AdaptCounts quality_counts;
+ quality_counts.resolution = 1;
+ statistics_proxy_->OnQualityAdaptationChanged(cpu_counts, quality_counts);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsTargetMediaBitrate) {
+ // Initially zero.
+ EXPECT_EQ(0, statistics_proxy_->GetStats().target_media_bitrate_bps);
+
+ const int kBitrate = 100000;
+ statistics_proxy_->OnSetEncoderTargetRate(kBitrate);
+ EXPECT_EQ(kBitrate, statistics_proxy_->GetStats().target_media_bitrate_bps);
+
+ statistics_proxy_->OnSetEncoderTargetRate(0);
+ EXPECT_EQ(0, statistics_proxy_->GetStats().target_media_bitrate_bps);
+}
+
+TEST_F(SendStatisticsProxyTest, NoSubstreams) {
+ uint32_t excluded_ssrc =
+ std::max(
+ *std::max_element(config_.rtp.ssrcs.begin(), config_.rtp.ssrcs.end()),
+ *std::max_element(config_.rtp.rtx.ssrcs.begin(),
+ config_.rtp.rtx.ssrcs.end())) +
+ 1;
+ // From RtcpStatisticsCallback.
+ RtcpStatistics rtcp_stats;
+ RtcpStatisticsCallback* rtcp_callback = statistics_proxy_.get();
+ rtcp_callback->StatisticsUpdated(rtcp_stats, excluded_ssrc);
+
+ // From BitrateStatisticsObserver.
+ uint32_t total = 0;
+ uint32_t retransmit = 0;
+ BitrateStatisticsObserver* bitrate_observer = statistics_proxy_.get();
+ bitrate_observer->Notify(total, retransmit, excluded_ssrc);
+
+ // From FrameCountObserver.
+ FrameCountObserver* fps_observer = statistics_proxy_.get();
+ FrameCounts frame_counts;
+ frame_counts.key_frames = 1;
+ fps_observer->FrameCountUpdated(frame_counts, excluded_ssrc);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_TRUE(stats.substreams.empty());
+}
+
+TEST_F(SendStatisticsProxyTest, EncodedResolutionTimesOut) {
+ static const int kEncodedWidth = 123;
+ static const int kEncodedHeight = 81;
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kEncodedWidth;
+ encoded_image._encodedHeight = kEncodedHeight;
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
+
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[0]].height);
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[1]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[1]].height);
+
+ // Forward almost to timeout, this should not have removed stats.
+ fake_clock_.AdvanceTimeMilliseconds(SendStatisticsProxy::kStatsTimeoutMs - 1);
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[0]].height);
+
+ // Update the first SSRC with bogus RTCP stats to make sure that encoded
+ // resolution still times out (no global timeout for all stats).
+ RtcpStatistics rtcp_statistics;
+ RtcpStatisticsCallback* rtcp_stats = statistics_proxy_.get();
+ rtcp_stats->StatisticsUpdated(rtcp_statistics, config_.rtp.ssrcs[0]);
+
+ // Report stats for second SSRC to make sure it's not outdated along with the
+ // first SSRC.
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+
+ // Forward 1 ms, reach timeout, substream 0 should have no resolution
+ // reported, but substream 1 should.
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[0]].height);
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[1]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[1]].height);
+}
+
+TEST_F(SendStatisticsProxyTest, ClearsResolutionFromInactiveSsrcs) {
+ static const int kEncodedWidth = 123;
+ static const int kEncodedHeight = 81;
+ EncodedImage encoded_image;
+ encoded_image._encodedWidth = kEncodedWidth;
+ encoded_image._encodedHeight = kEncodedHeight;
+
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
+
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+
+ statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
+ EXPECT_EQ(kEncodedHeight, stats.substreams[config_.rtp.ssrcs[0]].height);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].width);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].height);
+}
+
+TEST_F(SendStatisticsProxyTest, ClearsBitratesFromInactiveSsrcs) {
+ uint32_t bitrate = 42;
+ BitrateStatisticsObserver* observer = statistics_proxy_.get();
+ observer->Notify(bitrate, bitrate, config_.rtp.ssrcs[0]);
+ observer->Notify(bitrate, bitrate, config_.rtp.ssrcs[1]);
+
+ statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
+
+ VideoSendStream::Stats stats = statistics_proxy_->GetStats();
+ EXPECT_EQ(static_cast<int>(bitrate),
+ stats.substreams[config_.rtp.ssrcs[0]].total_bitrate_bps);
+ EXPECT_EQ(static_cast<int>(bitrate),
+ stats.substreams[config_.rtp.ssrcs[0]].retransmit_bitrate_bps);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].total_bitrate_bps);
+ EXPECT_EQ(0, stats.substreams[config_.rtp.ssrcs[1]].retransmit_bitrate_bps);
+}
+
+TEST_F(SendStatisticsProxyTest, ResetsRtcpCountersOnContentChange) {
+ RtcpPacketTypeCounterObserver* proxy =
+ static_cast<RtcpPacketTypeCounterObserver*>(statistics_proxy_.get());
+ RtcpPacketTypeCounter counters;
+ counters.first_packet_time_ms = fake_clock_.TimeInMilliseconds();
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ fake_clock_.AdvanceTimeMilliseconds(1000 * metrics::kMinRunTimeInSeconds);
+
+ counters.nack_packets += 1 * metrics::kMinRunTimeInSeconds;
+ counters.fir_packets += 2 * metrics::kMinRunTimeInSeconds;
+ counters.pli_packets += 3 * metrics::kMinRunTimeInSeconds;
+ counters.unique_nack_requests += 4 * metrics::kMinRunTimeInSeconds;
+ counters.nack_requests += 5 * metrics::kMinRunTimeInSeconds;
+
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ // Changing content type causes histograms to be reported.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, 50);
+
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.NackPacketsReceivedPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FirPacketsReceivedPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PliPacketsReceivedPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
+
+ const int kRate = 60 * 2; // Packets per minute with two streams.
+
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NackPacketsReceivedPerMinute",
+ 1 * kRate));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FirPacketsReceivedPerMinute",
+ 2 * kRate));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PliPacketsReceivedPerMinute",
+ 3 * kRate));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.UniqueNackRequestsReceivedInPercent",
+ 4 * 100 / 5));
+
+ // New start time but same counter values.
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ fake_clock_.AdvanceTimeMilliseconds(1000 * metrics::kMinRunTimeInSeconds);
+
+ counters.nack_packets += 1 * metrics::kMinRunTimeInSeconds;
+ counters.fir_packets += 2 * metrics::kMinRunTimeInSeconds;
+ counters.pli_packets += 3 * metrics::kMinRunTimeInSeconds;
+ counters.unique_nack_requests += 4 * metrics::kMinRunTimeInSeconds;
+ counters.nack_requests += 5 * metrics::kMinRunTimeInSeconds;
+
+ proxy->RtcpPacketTypesCounterUpdated(kFirstSsrc, counters);
+ proxy->RtcpPacketTypesCounterUpdated(kSecondSsrc, counters);
+
+ SetUp(); // Reset stats proxy also causes histograms to be reported.
+
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute"));
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute"));
+ EXPECT_EQ(
+ 1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.UniqueNackRequestsReceivedInPercent"));
+
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute",
+ 1 * kRate));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute",
+ 2 * kRate));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute",
+ 3 * kRate));
+ EXPECT_EQ(1,
+ metrics::NumEvents(
+ "WebRTC.Video.Screenshare.UniqueNackRequestsReceivedInPercent",
+ 4 * 100 / 5));
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsIsFlexFec) {
+ statistics_proxy_.reset(
+ new SendStatisticsProxy(&fake_clock_, GetTestConfigWithFlexFec(),
+ VideoEncoderConfig::ContentType::kRealtimeVideo));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kFlexFecSsrc);
+
+ EXPECT_FALSE(GetStreamStats(kFirstSsrc).is_flexfec);
+ EXPECT_TRUE(GetStreamStats(kFlexFecSsrc).is_flexfec);
+}
+
+TEST_F(SendStatisticsProxyTest, SendBitratesAreReportedWithFlexFecEnabled) {
+ statistics_proxy_.reset(
+ new SendStatisticsProxy(&fake_clock_, GetTestConfigWithFlexFec(),
+ VideoEncoderConfig::ContentType::kRealtimeVideo));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.padding_bytes += 1000;
+ counters.transmitted.payload_bytes += 2000;
+ counters.retransmitted.packets += 2;
+ counters.retransmitted.header_bytes += 25;
+ counters.retransmitted.padding_bytes += 100;
+ counters.retransmitted.payload_bytes += 250;
+ counters.fec = counters.retransmitted;
+ rtx_counters.transmitted = counters.transmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kSecondSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kFirstRtxSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kSecondRtxSsrc);
+ proxy->DataCountersUpdated(counters, kFlexFecSsrc);
+ }
+
+ statistics_proxy_.reset();
+ // Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
+ // Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
+ // Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
+ // Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.RetransmittedBitrateSentInKbps", 3));
+}
+
+TEST_F(SendStatisticsProxyTest, ResetsRtpCountersOnContentChange) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+ counters.first_packet_time_ms = fake_clock_.TimeInMilliseconds();
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.padding_bytes += 1000;
+ counters.transmitted.payload_bytes += 2000;
+ counters.retransmitted.packets += 2;
+ counters.retransmitted.header_bytes += 25;
+ counters.retransmitted.padding_bytes += 100;
+ counters.retransmitted.payload_bytes += 250;
+ counters.fec = counters.retransmitted;
+ rtx_counters.transmitted = counters.transmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kSecondSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kFirstRtxSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kSecondRtxSsrc);
+ }
+
+ // Changing content type causes histograms to be reported.
+ VideoEncoderConfig config;
+ config.content_type = VideoEncoderConfig::ContentType::kScreen;
+ statistics_proxy_->OnEncoderReconfigured(config, 50000);
+
+ // Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
+ // Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
+ // Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
+ // Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.RetransmittedBitrateSentInKbps", 3));
+
+ // New metric counters but same data counters.
+ // Double counter values, this should result in the same counts as before but
+ // with new histogram names.
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.padding_bytes += 1000;
+ counters.transmitted.payload_bytes += 2000;
+ counters.retransmitted.packets += 2;
+ counters.retransmitted.header_bytes += 25;
+ counters.retransmitted.padding_bytes += 100;
+ counters.retransmitted.payload_bytes += 250;
+ counters.fec = counters.retransmitted;
+ rtx_counters.transmitted = counters.transmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kSecondSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kFirstRtxSsrc);
+ proxy->DataCountersUpdated(rtx_counters, kSecondRtxSsrc);
+ }
+
+ // Reset stats proxy also causes histograms to be reported.
+ statistics_proxy_.reset();
+
+ // Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.Screenshare.BitrateSentInKbps"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.BitrateSentInKbps", 56));
+ // Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.RtxBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.RtxBitrateSentInKbps", 28));
+ // Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.MediaBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.MediaBitrateSentInKbps", 12));
+ // Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.PaddingBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.PaddingBitrateSentInKbps", 16));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.FecBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.FecBitrateSentInKbps", 3));
+ // Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
+ EXPECT_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps"));
+ EXPECT_EQ(1,
+ metrics::NumEvents(
+ "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps", 3));
+}
+
+TEST_F(SendStatisticsProxyTest, RtxBitrateIsZeroWhenEnabledAndNoRtxDataIsSent) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ counters.fec = counters.retransmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // RTX enabled. No data sent over RTX.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, RtxBitrateNotReportedWhenNotEnabled) {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc); // RTX not configured.
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ counters.fec = counters.retransmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // RTX not enabled.
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+}
+
+TEST_F(SendStatisticsProxyTest, FecBitrateIsZeroWhenEnabledAndNoFecDataIsSent) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ StreamDataCounters rtx_counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // FEC enabled. No FEC data sent.
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 0));
+}
+
+TEST_F(SendStatisticsProxyTest, FecBitrateNotReportedWhenNotEnabled) {
+ VideoSendStream::Config config(nullptr);
+ config.rtp.ssrcs.push_back(kFirstSsrc); // FEC not configured.
+ statistics_proxy_.reset(new SendStatisticsProxy(
+ &fake_clock_, config, VideoEncoderConfig::ContentType::kRealtimeVideo));
+
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+
+ const int kMinRequiredPeriodSamples = 8;
+ const int kPeriodIntervalMs = 2000;
+ for (int i = 0; i < kMinRequiredPeriodSamples; ++i) {
+ counters.transmitted.packets += 20;
+ counters.transmitted.header_bytes += 500;
+ counters.transmitted.payload_bytes += 2000;
+ counters.fec = counters.retransmitted;
+ // Advance one interval and update counters.
+ fake_clock_.AdvanceTimeMilliseconds(kPeriodIntervalMs);
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ }
+
+ // FEC not enabled.
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsEncoderImplementationName) {
+ const char* kName = "encoderName";
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ codec_info.codec_name = kName;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ EXPECT_STREQ(
+ kName, statistics_proxy_->GetStats().encoder_implementation_name.c_str());
+}
+
+class ForcedFallbackTest : public SendStatisticsProxyTest {
+ public:
+ explicit ForcedFallbackTest(const std::string& field_trials)
+ : SendStatisticsProxyTest(field_trials) {
+ codec_info_.codecType = kVideoCodecVP8;
+ codec_info_.codecSpecific.VP8.simulcastIdx = 0;
+ codec_info_.codecSpecific.VP8.temporalIdx = 0;
+ codec_info_.codec_name = "fake_codec";
+ encoded_image_._encodedWidth = kWidth;
+ encoded_image_._encodedHeight = kHeight;
+ }
+
+ ~ForcedFallbackTest() override {}
+
+ protected:
+ void InsertEncodedFrames(int num_frames, int interval_ms) {
+ // First frame is not updating stats, insert initial frame.
+ if (statistics_proxy_->GetStats().frames_encoded == 0) {
+ statistics_proxy_->OnSendEncodedImage(encoded_image_, &codec_info_);
+ }
+ for (int i = 0; i < num_frames; ++i) {
+ statistics_proxy_->OnSendEncodedImage(encoded_image_, &codec_info_);
+ fake_clock_.AdvanceTimeMilliseconds(interval_ms);
+ }
+ // Add frame to include last time interval.
+ statistics_proxy_->OnSendEncodedImage(encoded_image_, &codec_info_);
+ }
+
+ EncodedImage encoded_image_;
+ CodecSpecificInfo codec_info_;
+ const std::string kPrefix = "WebRTC.Video.Encoder.ForcedSw";
+ const int kFrameIntervalMs = 1000;
+ const int kMinFrames = 20; // Min run time 20 sec.
+};
+
+class ForcedFallbackDisabled : public ForcedFallbackTest {
+ public:
+ ForcedFallbackDisabled()
+ : ForcedFallbackTest("WebRTC-VP8-Forced-Fallback-Encoder-v2/Disabled-1," +
+ std::to_string(kWidth * kHeight) + ",3/") {}
+};
+
+class ForcedFallbackEnabled : public ForcedFallbackTest {
+ public:
+ ForcedFallbackEnabled()
+ : ForcedFallbackTest("WebRTC-VP8-Forced-Fallback-Encoder-v2/Enabled-1," +
+ std::to_string(kWidth * kHeight) + ",3/") {}
+};
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedIfMinRunTimeHasNotPassed) {
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs - 1);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsUpdated) {
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 0));
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 0));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedIfNotVp8) {
+ codec_info_.codecType = kVideoCodecVP9;
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedForTemporalLayers) {
+ codec_info_.codecSpecific.VP8.temporalIdx = 1;
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, StatsNotUpdatedForSimulcast) {
+ codec_info_.codecSpecific.VP8.simulcastIdx = 1;
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackDisabled, StatsNotUpdatedIfNoFieldTrial) {
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackDisabled, EnteredLowResolutionSetIfAtMaxPixels) {
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackEnabled, EnteredLowResolutionNotSetIfNotLibvpx) {
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackEnabled, EnteredLowResolutionSetIfLibvpx) {
+ codec_info_.codec_name = "libvpx";
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackDisabled, EnteredLowResolutionNotSetIfAboveMaxPixels) {
+ encoded_image_._encodedWidth = kWidth + 1;
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackDisabled, EnteredLowResolutionNotSetIfLibvpx) {
+ codec_info_.codec_name = "libvpx";
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackDisabled,
+ EnteredLowResolutionSetIfOnMinPixelLimitReached) {
+ encoded_image_._encodedWidth = kWidth + 1;
+ statistics_proxy_->OnMinPixelLimitReached();
+ InsertEncodedFrames(1, kFrameIntervalMs);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+}
+
+TEST_F(ForcedFallbackEnabled, OneFallbackEvent) {
+ // One change. Video: 20000 ms, fallback: 5000 ms (25%).
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ InsertEncodedFrames(15, 1000);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_info_.codec_name = "libvpx";
+ InsertEncodedFrames(5, 1000);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
+}
+
+TEST_F(ForcedFallbackEnabled, ThreeFallbackEvents) {
+ codec_info_.codecSpecific.VP8.temporalIdx = kNoTemporalIdx; // Should work.
+ const int kMaxFrameDiffMs = 2000;
+
+ // Three changes. Video: 60000 ms, fallback: 15000 ms (25%).
+ InsertEncodedFrames(10, 1000);
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_info_.codec_name = "libvpx";
+ InsertEncodedFrames(15, 500);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_info_.codec_name = "notlibvpx";
+ InsertEncodedFrames(20, 1000);
+ InsertEncodedFrames(3, kMaxFrameDiffMs); // Should not be included.
+ InsertEncodedFrames(10, 1000);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_info_.codec_name = "notlibvpx2";
+ InsertEncodedFrames(10, 500);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ codec_info_.codec_name = "libvpx";
+ InsertEncodedFrames(15, 500);
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
+}
+
+TEST_F(ForcedFallbackEnabled, NoFallbackIfAboveMaxPixels) {
+ encoded_image_._encodedWidth = kWidth + 1;
+ codec_info_.codec_name = "libvpx";
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+
+ EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ statistics_proxy_.reset();
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+TEST_F(ForcedFallbackEnabled, FallbackIfAtMaxPixels) {
+ encoded_image_._encodedWidth = kWidth;
+ codec_info_.codec_name = "libvpx";
+ InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
+
+ EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
+ statistics_proxy_.reset();
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/stats_counter.cc b/third_party/libwebrtc/webrtc/video/stats_counter.cc
new file mode 100644
index 0000000000..bcbf9124a2
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/stats_counter.cc
@@ -0,0 +1,462 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/stats_counter.h"
+
+#include <algorithm>
+#include <limits>
+#include <map>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace {
+// Default periodic time interval for processing samples.
+const int64_t kDefaultProcessIntervalMs = 2000;
+const uint32_t kStreamId0 = 0;
+} // namespace
+
+std::string AggregatedStats::ToString() const {
+ return ToStringWithMultiplier(1);
+}
+
+std::string AggregatedStats::ToStringWithMultiplier(int multiplier) const {
+ std::stringstream ss;
+ ss << "periodic_samples:" << num_samples << ", {";
+ ss << "min:" << (min * multiplier) << ", ";
+ ss << "avg:" << (average * multiplier) << ", ";
+ ss << "max:" << (max * multiplier) << "}";
+ return ss.str();
+}
+
+// Class holding periodically computed metrics.
+class AggregatedCounter {
+ public:
+ AggregatedCounter() : last_sample_(0), sum_samples_(0) {}
+ ~AggregatedCounter() {}
+
+ void Add(int sample) {
+ last_sample_ = sample;
+ sum_samples_ += sample;
+ ++stats_.num_samples;
+ if (stats_.num_samples == 1) {
+ stats_.min = sample;
+ stats_.max = sample;
+ }
+ stats_.min = std::min(sample, stats_.min);
+ stats_.max = std::max(sample, stats_.max);
+ }
+
+ AggregatedStats ComputeStats() {
+ Compute();
+ return stats_;
+ }
+
+ bool Empty() const { return stats_.num_samples == 0; }
+
+ int last_sample() const { return last_sample_; }
+
+ private:
+ void Compute() {
+ if (stats_.num_samples == 0)
+ return;
+
+ stats_.average =
+ (sum_samples_ + stats_.num_samples / 2) / stats_.num_samples;
+ }
+ int last_sample_;
+ int64_t sum_samples_;
+ AggregatedStats stats_;
+};
+
+// Class holding gathered samples within a process interval.
+class Samples {
+ public:
+ Samples() : total_count_(0) {}
+ ~Samples() {}
+
+ void Add(int sample, uint32_t stream_id) {
+ samples_[stream_id].Add(sample);
+ ++total_count_;
+ }
+ void Set(int64_t sample, uint32_t stream_id) {
+ samples_[stream_id].Set(sample);
+ ++total_count_;
+ }
+ void SetLast(int64_t sample, uint32_t stream_id) {
+ samples_[stream_id].SetLast(sample);
+ }
+ int64_t GetLast(uint32_t stream_id) { return samples_[stream_id].GetLast(); }
+
+ int64_t Count() const { return total_count_; }
+ bool Empty() const { return total_count_ == 0; }
+
+ int64_t Sum() const {
+ int64_t sum = 0;
+ for (const auto& it : samples_)
+ sum += it.second.sum_;
+ return sum;
+ }
+
+ int Max() const {
+ int max = std::numeric_limits<int>::min();
+ for (const auto& it : samples_)
+ max = std::max(it.second.max_, max);
+ return max;
+ }
+
+ void Reset() {
+ for (auto& it : samples_)
+ it.second.Reset();
+ total_count_ = 0;
+ }
+
+ int64_t Diff() const {
+ int64_t sum_diff = 0;
+ int count = 0;
+ for (const auto& it : samples_) {
+ if (it.second.count_ > 0) {
+ int64_t diff = it.second.sum_ - it.second.last_sum_;
+ if (diff >= 0) {
+ sum_diff += diff;
+ ++count;
+ }
+ }
+ }
+ return (count > 0) ? sum_diff : -1;
+ }
+
+ private:
+ struct Stats {
+ void Add(int sample) {
+ sum_ += sample;
+ ++count_;
+ max_ = std::max(sample, max_);
+ }
+ void Set(int64_t sample) {
+ sum_ = sample;
+ ++count_;
+ }
+ void SetLast(int64_t sample) { last_sum_ = sample; }
+ int64_t GetLast() const { return last_sum_; }
+ void Reset() {
+ if (count_ > 0)
+ last_sum_ = sum_;
+ sum_ = 0;
+ count_ = 0;
+ max_ = std::numeric_limits<int>::min();
+ }
+
+ int max_ = std::numeric_limits<int>::min();
+ int64_t count_ = 0;
+ int64_t sum_ = 0;
+ int64_t last_sum_ = 0;
+ };
+
+ int64_t total_count_;
+ std::map<uint32_t, Stats> samples_; // Gathered samples mapped by stream id.
+};
+
+// StatsCounter class.
+StatsCounter::StatsCounter(Clock* clock,
+ int64_t process_intervals_ms,
+ bool include_empty_intervals,
+ StatsCounterObserver* observer)
+ : include_empty_intervals_(include_empty_intervals),
+ process_intervals_ms_(process_intervals_ms),
+ aggregated_counter_(new AggregatedCounter()),
+ samples_(new Samples()),
+ clock_(clock),
+ observer_(observer),
+ last_process_time_ms_(-1),
+ paused_(false),
+ pause_time_ms_(-1),
+ min_pause_time_ms_(0) {
+ RTC_DCHECK_GT(process_intervals_ms_, 0);
+}
+
+StatsCounter::~StatsCounter() {}
+
+AggregatedStats StatsCounter::GetStats() {
+ return aggregated_counter_->ComputeStats();
+}
+
+AggregatedStats StatsCounter::ProcessAndGetStats() {
+ if (HasSample())
+ TryProcess();
+ return aggregated_counter_->ComputeStats();
+}
+
+void StatsCounter::ProcessAndPauseForDuration(int64_t min_pause_time_ms) {
+ ProcessAndPause();
+ min_pause_time_ms_ = min_pause_time_ms;
+}
+
+void StatsCounter::ProcessAndPause() {
+ if (HasSample())
+ TryProcess();
+ paused_ = true;
+ pause_time_ms_ = clock_->TimeInMilliseconds();
+}
+
+void StatsCounter::ProcessAndStopPause() {
+ if (HasSample())
+ TryProcess();
+ Resume();
+}
+
+bool StatsCounter::HasSample() const {
+ return last_process_time_ms_ != -1;
+}
+
+bool StatsCounter::TimeToProcess(int* elapsed_intervals) {
+ int64_t now = clock_->TimeInMilliseconds();
+ if (last_process_time_ms_ == -1)
+ last_process_time_ms_ = now;
+
+ int64_t diff_ms = now - last_process_time_ms_;
+ if (diff_ms < process_intervals_ms_)
+ return false;
+
+ // Advance number of complete |process_intervals_ms_| that have passed.
+ int64_t num_intervals = diff_ms / process_intervals_ms_;
+ last_process_time_ms_ += num_intervals * process_intervals_ms_;
+
+ *elapsed_intervals = num_intervals;
+ return true;
+}
+
+void StatsCounter::Add(int sample) {
+ TryProcess();
+ samples_->Add(sample, kStreamId0);
+ ResumeIfMinTimePassed();
+}
+
+void StatsCounter::Set(int64_t sample, uint32_t stream_id) {
+ if (paused_ && sample == samples_->GetLast(stream_id)) {
+ // Do not add same sample while paused (will reset pause).
+ return;
+ }
+ TryProcess();
+ samples_->Set(sample, stream_id);
+ ResumeIfMinTimePassed();
+}
+
+void StatsCounter::SetLast(int64_t sample, uint32_t stream_id) {
+ RTC_DCHECK(!HasSample()) << "Should be set before first sample is added.";
+ samples_->SetLast(sample, stream_id);
+}
+
+// Reports periodically computed metric.
+void StatsCounter::ReportMetricToAggregatedCounter(
+ int value,
+ int num_values_to_add) const {
+ for (int i = 0; i < num_values_to_add; ++i) {
+ aggregated_counter_->Add(value);
+ if (observer_)
+ observer_->OnMetricUpdated(value);
+ }
+}
+
+void StatsCounter::TryProcess() {
+ int elapsed_intervals;
+ if (!TimeToProcess(&elapsed_intervals))
+ return;
+
+ // Get and report periodically computed metric.
+ int metric;
+ if (GetMetric(&metric))
+ ReportMetricToAggregatedCounter(metric, 1);
+
+ // Report value for elapsed intervals without samples.
+ if (IncludeEmptyIntervals()) {
+ // If there are no samples, all elapsed intervals are empty (otherwise one
+ // interval contains sample(s), discard this interval).
+ int empty_intervals =
+ samples_->Empty() ? elapsed_intervals : (elapsed_intervals - 1);
+ ReportMetricToAggregatedCounter(GetValueForEmptyInterval(),
+ empty_intervals);
+ }
+
+ // Reset samples for elapsed interval.
+ samples_->Reset();
+}
+
+bool StatsCounter::IncludeEmptyIntervals() const {
+ return include_empty_intervals_ && !paused_ && !aggregated_counter_->Empty();
+}
+void StatsCounter::ResumeIfMinTimePassed() {
+ if (paused_ &&
+ (clock_->TimeInMilliseconds() - pause_time_ms_) >= min_pause_time_ms_) {
+ Resume();
+ }
+}
+
+void StatsCounter::Resume() {
+ paused_ = false;
+ min_pause_time_ms_ = 0;
+}
+
+// StatsCounter sub-classes.
+AvgCounter::AvgCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ include_empty_intervals,
+ observer) {}
+
+void AvgCounter::Add(int sample) {
+ StatsCounter::Add(sample);
+}
+
+bool AvgCounter::GetMetric(int* metric) const {
+ int64_t count = samples_->Count();
+ if (count == 0)
+ return false;
+
+ *metric = (samples_->Sum() + count / 2) / count;
+ return true;
+}
+
+int AvgCounter::GetValueForEmptyInterval() const {
+ return aggregated_counter_->last_sample();
+}
+
+MaxCounter::MaxCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ int64_t process_intervals_ms)
+ : StatsCounter(clock,
+ process_intervals_ms,
+ false, // |include_empty_intervals|
+ observer) {}
+
+void MaxCounter::Add(int sample) {
+ StatsCounter::Add(sample);
+}
+
+bool MaxCounter::GetMetric(int* metric) const {
+ if (samples_->Empty())
+ return false;
+
+ *metric = samples_->Max();
+ return true;
+}
+
+int MaxCounter::GetValueForEmptyInterval() const {
+ RTC_NOTREACHED();
+ return 0;
+}
+
+PercentCounter::PercentCounter(Clock* clock, StatsCounterObserver* observer)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ false, // |include_empty_intervals|
+ observer) {}
+
+void PercentCounter::Add(bool sample) {
+ StatsCounter::Add(sample ? 1 : 0);
+}
+
+bool PercentCounter::GetMetric(int* metric) const {
+ int64_t count = samples_->Count();
+ if (count == 0)
+ return false;
+
+ *metric = (samples_->Sum() * 100 + count / 2) / count;
+ return true;
+}
+
+int PercentCounter::GetValueForEmptyInterval() const {
+ RTC_NOTREACHED();
+ return 0;
+}
+
+PermilleCounter::PermilleCounter(Clock* clock, StatsCounterObserver* observer)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ false, // |include_empty_intervals|
+ observer) {}
+
+void PermilleCounter::Add(bool sample) {
+ StatsCounter::Add(sample ? 1 : 0);
+}
+
+bool PermilleCounter::GetMetric(int* metric) const {
+ int64_t count = samples_->Count();
+ if (count == 0)
+ return false;
+
+ *metric = (samples_->Sum() * 1000 + count / 2) / count;
+ return true;
+}
+
+int PermilleCounter::GetValueForEmptyInterval() const {
+ RTC_NOTREACHED();
+ return 0;
+}
+
+RateCounter::RateCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ include_empty_intervals,
+ observer) {}
+
+void RateCounter::Add(int sample) {
+ StatsCounter::Add(sample);
+}
+
+bool RateCounter::GetMetric(int* metric) const {
+ if (samples_->Empty())
+ return false;
+
+ *metric = (samples_->Sum() * 1000 + process_intervals_ms_ / 2) /
+ process_intervals_ms_;
+ return true;
+}
+
+int RateCounter::GetValueForEmptyInterval() const {
+ return 0;
+}
+
+RateAccCounter::RateAccCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals)
+ : StatsCounter(clock,
+ kDefaultProcessIntervalMs,
+ include_empty_intervals,
+ observer) {}
+
+void RateAccCounter::Set(int64_t sample, uint32_t stream_id) {
+ StatsCounter::Set(sample, stream_id);
+}
+
+void RateAccCounter::SetLast(int64_t sample, uint32_t stream_id) {
+ StatsCounter::SetLast(sample, stream_id);
+}
+
+bool RateAccCounter::GetMetric(int* metric) const {
+ int64_t diff = samples_->Diff();
+ if (diff < 0 || (!include_empty_intervals_ && diff == 0))
+ return false;
+
+ *metric = (diff * 1000 + process_intervals_ms_ / 2) / process_intervals_ms_;
+ return true;
+}
+
+int RateAccCounter::GetValueForEmptyInterval() const {
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/stats_counter.h b/third_party/libwebrtc/webrtc/video/stats_counter.h
new file mode 100644
index 0000000000..5a52e86bc9
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/stats_counter.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_STATS_COUNTER_H_
+#define VIDEO_STATS_COUNTER_H_
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class AggregatedCounter;
+class Clock;
+class Samples;
+
+// |StatsCounterObserver| is called periodically when a metric is updated.
+class StatsCounterObserver {
+ public:
+ virtual void OnMetricUpdated(int sample) = 0;
+
+ virtual ~StatsCounterObserver() {}
+};
+
+struct AggregatedStats {
+ std::string ToString() const;
+ std::string ToStringWithMultiplier(int multiplier) const;
+
+ int64_t num_samples = 0;
+ int min = -1;
+ int max = -1;
+ int average = -1;
+ // TODO(asapersson): Consider adding median/percentiles.
+};
+
+// Classes which periodically computes a metric.
+//
+// During a period, |kProcessIntervalMs|, different metrics can be computed e.g:
+// - |AvgCounter|: average of samples
+// - |PercentCounter|: percentage of samples
+// - |PermilleCounter|: permille of samples
+//
+// Each periodic metric can be either:
+// - reported to an |observer| each period
+// - aggregated during the call (e.g. min, max, average)
+//
+// periodically computed
+// GetMetric() GetMetric() => AggregatedStats
+// ^ ^ (e.g. min/max/avg)
+// | |
+// | * * * * | ** * * * * | ...
+// |<- process interval ->|
+//
+// (*) - samples
+//
+//
+// Example usage:
+//
+// AvgCounter counter(&clock, nullptr);
+// counter.Add(5);
+// counter.Add(1);
+// counter.Add(6); // process interval passed -> GetMetric() avg:4
+// counter.Add(7);
+// counter.Add(3); // process interval passed -> GetMetric() avg:5
+// counter.Add(10);
+// counter.Add(20); // process interval passed -> GetMetric() avg:15
+// AggregatedStats stats = counter.GetStats();
+// stats: {min:4, max:15, avg:8}
+//
+
+// Note: StatsCounter takes ownership of |observer|.
+
+class StatsCounter {
+ public:
+ virtual ~StatsCounter();
+
+ // Gets metric within an interval. Returns true on success false otherwise.
+ virtual bool GetMetric(int* metric) const = 0;
+
+ // Gets the value to use for an interval without samples.
+ virtual int GetValueForEmptyInterval() const = 0;
+
+ // Gets aggregated stats (i.e. aggregate of periodically computed metrics).
+ AggregatedStats GetStats();
+
+ // Reports metrics for elapsed intervals to AggregatedCounter and GetStats.
+ AggregatedStats ProcessAndGetStats();
+
+ // Reports metrics for elapsed intervals to AggregatedCounter and pauses stats
+ // (i.e. empty intervals will be discarded until next sample is added).
+ void ProcessAndPause();
+
+ // As above with a minimum pause time. Added samples within this interval will
+ // not resume the stats (i.e. stop the pause).
+ void ProcessAndPauseForDuration(int64_t min_pause_time_ms);
+
+ // Reports metrics for elapsed intervals to AggregatedCounter and stops pause.
+ void ProcessAndStopPause();
+
+ // Checks if a sample has been added (i.e. Add or Set called).
+ bool HasSample() const;
+
+ protected:
+ StatsCounter(Clock* clock,
+ int64_t process_intervals_ms,
+ bool include_empty_intervals,
+ StatsCounterObserver* observer);
+
+ void Add(int sample);
+ void Set(int64_t sample, uint32_t stream_id);
+ void SetLast(int64_t sample, uint32_t stream_id);
+
+ const bool include_empty_intervals_;
+ const int64_t process_intervals_ms_;
+ const std::unique_ptr<AggregatedCounter> aggregated_counter_;
+ const std::unique_ptr<Samples> samples_;
+
+ private:
+ bool TimeToProcess(int* num_elapsed_intervals);
+ void TryProcess();
+ void ReportMetricToAggregatedCounter(int value, int num_values_to_add) const;
+ bool IncludeEmptyIntervals() const;
+ void Resume();
+ void ResumeIfMinTimePassed();
+
+ Clock* const clock_;
+ const std::unique_ptr<StatsCounterObserver> observer_;
+ int64_t last_process_time_ms_;
+ bool paused_;
+ int64_t pause_time_ms_;
+ int64_t min_pause_time_ms_;
+};
+
+// AvgCounter: average of samples
+//
+// | * * * | * * | ...
+// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
+// GetMetric | (5 + 1 + 6) / 3 | (5 + 5) / 2 |
+//
+// |include_empty_intervals|: If set, intervals without samples will be included
+// in the stats. The value for an interval is
+// determined by GetValueForEmptyInterval().
+//
+class AvgCounter : public StatsCounter {
+ public:
+ AvgCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals);
+ ~AvgCounter() override {}
+
+ void Add(int sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+
+ // Returns the last computed metric (i.e. from GetMetric).
+ int GetValueForEmptyInterval() const override;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(AvgCounter);
+};
+
+// MaxCounter: maximum of samples
+//
+// | * * * | * * | ...
+// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
+// GetMetric | max: (5, 1, 6) | max: (5, 5) |
+//
+class MaxCounter : public StatsCounter {
+ public:
+ MaxCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ int64_t process_intervals_ms);
+ ~MaxCounter() override {}
+
+ void Add(int sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(MaxCounter);
+};
+
+// PercentCounter: percentage of samples
+//
+// | * * * | * * | ...
+// | Add(T) Add(F) Add(T) | Add(F) Add(T) |
+// GetMetric | 100 * 2 / 3 | 100 * 1 / 2 |
+//
+class PercentCounter : public StatsCounter {
+ public:
+ PercentCounter(Clock* clock, StatsCounterObserver* observer);
+ ~PercentCounter() override {}
+
+ void Add(bool sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(PercentCounter);
+};
+
+// PermilleCounter: permille of samples
+//
+// | * * * | * * | ...
+// | Add(T) Add(F) Add(T) | Add(F) Add(T) |
+// GetMetric | 1000 * 2 / 3 | 1000 * 1 / 2 |
+//
+class PermilleCounter : public StatsCounter {
+ public:
+ PermilleCounter(Clock* clock, StatsCounterObserver* observer);
+ ~PermilleCounter() override {}
+
+ void Add(bool sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(PermilleCounter);
+};
+
+// RateCounter: units per second
+//
+// | * * * | * * | ...
+// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
+// |<------ 2 sec ------->| |
+// GetMetric | (5 + 1 + 6) / 2 | (5 + 5) / 2 |
+//
+// |include_empty_intervals|: If set, intervals without samples will be included
+// in the stats. The value for an interval is
+// determined by GetValueForEmptyInterval().
+//
+class RateCounter : public StatsCounter {
+ public:
+ RateCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals);
+ ~RateCounter() override {}
+
+ void Add(int sample);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override; // Returns zero.
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(RateCounter);
+};
+
+// RateAccCounter: units per second (used for counters)
+//
+// | * * * | * * | ...
+// | Set(5) Set(6) Set(8) | Set(11) Set(13) |
+// |<------ 2 sec ------->| |
+// GetMetric | (8 - 0) / 2 | (13 - 8) / 2 |
+//
+// |include_empty_intervals|: If set, intervals without samples will be included
+// in the stats. The value for an interval is
+// determined by GetValueForEmptyInterval().
+//
+class RateAccCounter : public StatsCounter {
+ public:
+ RateAccCounter(Clock* clock,
+ StatsCounterObserver* observer,
+ bool include_empty_intervals);
+ ~RateAccCounter() override {}
+
+ void Set(int64_t sample, uint32_t stream_id);
+
+ // Sets the value for previous interval.
+ // To be used if a value other than zero is initially required.
+ void SetLast(int64_t sample, uint32_t stream_id);
+
+ private:
+ bool GetMetric(int* metric) const override;
+ int GetValueForEmptyInterval() const override; // Returns zero.
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(RateAccCounter);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_STATS_COUNTER_H_
diff --git a/third_party/libwebrtc/webrtc/video/stats_counter_unittest.cc b/third_party/libwebrtc/webrtc/video/stats_counter_unittest.cc
new file mode 100644
index 0000000000..3f00d24eec
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/stats_counter_unittest.cc
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/stats_counter.h"
+
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const int kDefaultProcessIntervalMs = 2000;
+const uint32_t kStreamId = 123456;
+
+class StatsCounterObserverImpl : public StatsCounterObserver {
+ public:
+ StatsCounterObserverImpl() : num_calls_(0), last_sample_(-1) {}
+ void OnMetricUpdated(int sample) override {
+ ++num_calls_;
+ last_sample_ = sample;
+ }
+ int num_calls_;
+ int last_sample_;
+};
+} // namespace
+
+class StatsCounterTest : public ::testing::Test {
+ protected:
+ StatsCounterTest()
+ : clock_(1234) {}
+
+ void AddSampleAndAdvance(int sample, int interval_ms, AvgCounter* counter) {
+ counter->Add(sample);
+ clock_.AdvanceTimeMilliseconds(interval_ms);
+ }
+
+ void SetSampleAndAdvance(int sample,
+ int interval_ms,
+ RateAccCounter* counter) {
+ counter->Set(sample, kStreamId);
+ clock_.AdvanceTimeMilliseconds(interval_ms);
+ }
+
+ void VerifyStatsIsNotSet(const AggregatedStats& stats) {
+ EXPECT_EQ(0, stats.num_samples);
+ EXPECT_EQ(-1, stats.min);
+ EXPECT_EQ(-1, stats.max);
+ EXPECT_EQ(-1, stats.average);
+ }
+
+ SimulatedClock clock_;
+};
+
+TEST_F(StatsCounterTest, NoSamples) {
+ AvgCounter counter(&clock_, nullptr, false);
+ VerifyStatsIsNotSet(counter.GetStats());
+}
+
+TEST_F(StatsCounterTest, TestRegisterObserver) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample = 22;
+ AvgCounter counter(&clock_, observer, false);
+ AddSampleAndAdvance(kSample, kDefaultProcessIntervalMs, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ EXPECT_EQ(1, observer->num_calls_);
+}
+
+TEST_F(StatsCounterTest, HasSample) {
+ AvgCounter counter(&clock_, nullptr, false);
+ EXPECT_FALSE(counter.HasSample());
+ counter.Add(1);
+ EXPECT_TRUE(counter.HasSample());
+}
+
+TEST_F(StatsCounterTest, VerifyProcessInterval) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, false);
+ counter.Add(4);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs - 1);
+ // Try trigger process (interval has not passed).
+ counter.Add(8);
+ EXPECT_EQ(0, observer->num_calls_);
+ VerifyStatsIsNotSet(counter.GetStats());
+ // Make process interval pass.
+ clock_.AdvanceTimeMilliseconds(1);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+}
+
+TEST_F(StatsCounterTest, TestMetric_AvgCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, false);
+ counter.Add(4);
+ counter.Add(8);
+ counter.Add(9);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ // Average per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(7, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(7, stats.min);
+ EXPECT_EQ(7, stats.max);
+ EXPECT_EQ(7, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestMetric_MaxCounter) {
+ const int64_t kProcessIntervalMs = 1000;
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ MaxCounter counter(&clock_, observer, kProcessIntervalMs);
+ counter.Add(4);
+ counter.Add(9);
+ counter.Add(8);
+ clock_.AdvanceTimeMilliseconds(kProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ // Average per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(9, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(9, stats.min);
+ EXPECT_EQ(9, stats.max);
+ EXPECT_EQ(9, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestMetric_PercentCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ PercentCounter counter(&clock_, observer);
+ counter.Add(true);
+ counter.Add(false);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(false);
+ // Percentage per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(50, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(50, stats.min);
+ EXPECT_EQ(50, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_PermilleCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ PermilleCounter counter(&clock_, observer);
+ counter.Add(true);
+ counter.Add(false);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(false);
+ // Permille per interval.
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(500, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(500, stats.min);
+ EXPECT_EQ(500, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateCounter counter(&clock_, observer, true);
+ counter.Add(186);
+ counter.Add(350);
+ counter.Add(22);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ // Rate per interval, (186 + 350 + 22) / 2 sec = 279 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(279, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(279, stats.min);
+ EXPECT_EQ(279, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateAccCounter) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ counter.Set(175, kStreamId);
+ counter.Set(188, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(192, kStreamId);
+ // Rate per interval: (188 - 0) / 2 sec = 94 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(94, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(94, stats.min);
+ EXPECT_EQ(94, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateAccCounterWithSetLast) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ counter.SetLast(98, kStreamId);
+ counter.Set(175, kStreamId);
+ counter.Set(188, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(192, kStreamId);
+ // Rate per interval: (188 - 98) / 2 sec = 45 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(45, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestMetric_RateAccCounterWithMultipleStreamIds) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ counter.Set(175, kStreamId);
+ counter.Set(188, kStreamId);
+ counter.Set(100, kStreamId + 1);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(150, kStreamId + 1);
+ // Rate per interval: ((188 - 0) + (100 - 0)) / 2 sec = 144 samples/sec
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(144, observer->last_sample_);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(198, kStreamId);
+ // Rate per interval: (0 + (150 - 100)) / 2 sec = 25 samples/sec
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(25, observer->last_sample_);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process (sample included in next interval).
+ counter.Set(200, kStreamId);
+ // Rate per interval: ((198 - 188) + (0)) / 2 sec = 5 samples/sec
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(5, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(3, stats.num_samples);
+ EXPECT_EQ(5, stats.min);
+ EXPECT_EQ(144, stats.max);
+}
+
+TEST_F(StatsCounterTest, TestGetStats_MultipleIntervals) {
+ AvgCounter counter(&clock_, nullptr, false);
+ const int kSample1 = 1;
+ const int kSample2 = 5;
+ const int kSample3 = 8;
+ const int kSample4 = 11;
+ const int kSample5 = 50;
+ AddSampleAndAdvance(kSample1, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample2, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample3, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample4, kDefaultProcessIntervalMs, &counter);
+ AddSampleAndAdvance(kSample5, kDefaultProcessIntervalMs, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(111);
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(5, stats.num_samples);
+ EXPECT_EQ(kSample1, stats.min);
+ EXPECT_EQ(kSample5, stats.max);
+ EXPECT_EQ(15, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestGetStatsTwice) {
+ const int kSample1 = 4;
+ const int kSample2 = 7;
+ AvgCounter counter(&clock_, nullptr, false);
+ AddSampleAndAdvance(kSample1, kDefaultProcessIntervalMs, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(kSample2);
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(1, stats.num_samples);
+ EXPECT_EQ(kSample1, stats.min);
+ EXPECT_EQ(kSample1, stats.max);
+ // Trigger process (sample included in next interval).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.Add(111);
+ stats = counter.GetStats();
+ EXPECT_EQ(2, stats.num_samples);
+ EXPECT_EQ(kSample1, stats.min);
+ EXPECT_EQ(kSample2, stats.max);
+ EXPECT_EQ(6, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_NegativeRateIgnored) {
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample1 = 200; // 200 / 2 sec
+ const int kSample2 = 100; // -100 / 2 sec - negative ignored
+ const int kSample3 = 700; // 600 / 2 sec
+ RateAccCounter counter(&clock_, observer, true);
+ SetSampleAndAdvance(kSample1, kDefaultProcessIntervalMs, &counter);
+ SetSampleAndAdvance(kSample2, kDefaultProcessIntervalMs, &counter);
+ SetSampleAndAdvance(kSample3, kDefaultProcessIntervalMs, &counter);
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(100, observer->last_sample_);
+ // Trigger process (sample included in next interval).
+ counter.Set(2000, kStreamId);
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(300, observer->last_sample_);
+ // Aggregated stats.
+ AggregatedStats stats = counter.GetStats();
+ EXPECT_EQ(2, stats.num_samples);
+ EXPECT_EQ(100, stats.min);
+ EXPECT_EQ(300, stats.max);
+ EXPECT_EQ(200, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_IntervalsWithoutSamplesIncluded) {
+ // Samples: | 6 | x | x | 8 | // x: empty interval
+ // Stats: | 6 | 6 | 6 | 8 | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs * 4 - 1, &counter);
+ // Trigger process (sample included in next interval).
+ counter.Add(8);
+ // [6:3], 3 intervals passed (2 without samples -> last value reported).
+ AggregatedStats stats = counter.ProcessAndGetStats();
+ EXPECT_EQ(3, stats.num_samples);
+ EXPECT_EQ(6, stats.min);
+ EXPECT_EQ(6, stats.max);
+ // Make next interval pass and verify stats: [6:3],[8:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(4, observer->num_calls_);
+ EXPECT_EQ(8, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_WithPause) {
+ // Samples: | 6 | x | x | x | - | 22 | x | // x: empty interval, -: paused
+ // Stats: | 6 | 6 | 6 | 6 | - | 22 | 22 | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ // Add sample and advance 3 intervals (2 w/o samples -> last value reported).
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs * 4 - 1, &counter);
+ // Trigger process and verify stats: [6:3]
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass (1 without samples).
+ // Process and pause. Verify stats: [6:4].
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndPause();
+ EXPECT_EQ(4, observer->num_calls_); // Last value reported.
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass (1 without samples -> ignored while paused).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 2 - 1);
+ counter.Add(22); // Stops pause.
+ EXPECT_EQ(4, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass, [6:4][22:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(5, observer->num_calls_);
+ EXPECT_EQ(22, observer->last_sample_);
+ // Make 1 interval pass (1 w/o samples -> pause stopped, last value reported).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(6, observer->num_calls_);
+ EXPECT_EQ(22, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_AddSampleStopsPause) {
+ // Samples: | 12 | 24 | // -: paused
+ // Stats: | 6 | 6 |
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndPause();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add sample and advance 1 intervals.
+ counter.Set(24, kStreamId); // Pause stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_AddSameSampleDoesNotStopPause) {
+ // Samples: | 12 | 12 | 24 | // -: paused
+ // Stats: | 6 | - | 6 |
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndPause();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add same sample and advance 1 intervals.
+ counter.Set(12, kStreamId); // Pause not stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add new sample and advance 1 intervals.
+ counter.Set(24, kStreamId); // Pause stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_PauseAndStopPause) {
+ // Samples: | 12 | 12 | 12 | // -: paused
+ // Stats: | 6 | - | 0 |
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndPause();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Add same sample and advance 1 intervals.
+ counter.Set(12, kStreamId); // Pause not stopped.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Stop pause, add sample and advance 1 intervals.
+ counter.ProcessAndStopPause();
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_WithoutMinPauseTimePassed) {
+ // Samples: | 6 | 2 | - | // x: empty interval, -: paused
+ // Stats: | 6 | 2 | - | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs, &counter);
+ // Process and pause. Verify stats: [6:1].
+ const int64_t kMinMs = 500;
+ counter.ProcessAndPauseForDuration(kMinMs);
+ EXPECT_EQ(1, observer->num_calls_); // Last value reported.
+ EXPECT_EQ(6, observer->last_sample_);
+ // Min pause time has not pass.
+ clock_.AdvanceTimeMilliseconds(kMinMs - 1);
+ counter.Add(2); // Pause not stopped.
+ // Make two intervals pass (1 without samples -> ignored while paused).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 2 - (kMinMs - 1));
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(2, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestAvgCounter_WithMinPauseTimePassed) {
+ // Samples: | 6 | 2 | x | // x: empty interval, -: paused
+ // Stats: | 6 | 2 | 2 | // x -> last value reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ AvgCounter counter(&clock_, observer, true);
+ // Add sample and advance 1 intervals.
+ AddSampleAndAdvance(6, kDefaultProcessIntervalMs, &counter);
+ // Process and pause. Verify stats: [6:1].
+ const int64_t kMinMs = 500;
+ counter.ProcessAndPauseForDuration(kMinMs);
+ EXPECT_EQ(1, observer->num_calls_); // Last value reported.
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make min pause time pass.
+ clock_.AdvanceTimeMilliseconds(kMinMs);
+ counter.Add(2); // Stop pause.
+ // Make two intervals pass (1 without samples -> last value reported).
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 2 - kMinMs);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(2, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateCounter_IntervalsWithoutSamplesIgnored) {
+ // Samples: | 50 | x | 20 | // x: empty interval
+ // Stats: | 25 | x | 10 | // x -> ignored
+ const bool kIncludeEmptyIntervals = false;
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample1 = 50; // 50 / 2 sec
+ const int kSample2 = 20; // 20 / 2 sec
+ RateCounter counter(&clock_, observer, kIncludeEmptyIntervals);
+ counter.Add(kSample1);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 3 - 1);
+ // Trigger process (sample included in next interval).
+ counter.Add(kSample2);
+ // [25:1], 2 intervals passed (1 without samples -> ignored).
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(25, observer->last_sample_);
+ // Make next interval pass and verify stats: [10:1],[25:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(10, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateCounter_IntervalsWithoutSamplesIncluded) {
+ // Samples: | 50 | x | 20 | // x: empty interval
+ // Stats: | 25 | 0 | 10 | // x -> zero reported
+ const bool kIncludeEmptyIntervals = true;
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ const int kSample1 = 50; // 50 / 2 sec
+ const int kSample2 = 20; // 20 / 2 sec
+ RateCounter counter(&clock_, observer, kIncludeEmptyIntervals);
+ counter.Add(kSample1);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 3 - 1);
+ // Trigger process (sample included in next interval).
+ counter.Add(kSample2);
+ // [0:1],[25:1], 2 intervals passed (1 without samples -> zero reported).
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+ // Make last interval pass and verify stats: [0:1],[10:1],[25:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ AggregatedStats stats = counter.ProcessAndGetStats();
+ EXPECT_EQ(25, stats.max);
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(10, observer->last_sample_);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_IntervalsWithoutSamplesIncluded) {
+ // Samples: | 12 | x | x | x | 60 | // x: empty interval
+ // Stats: | 6 | 0 | 0 | 0 | 24 | // x -> zero reported
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, true);
+ VerifyStatsIsNotSet(counter.ProcessAndGetStats());
+ // Advance one interval and verify stats.
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs);
+ VerifyStatsIsNotSet(counter.ProcessAndGetStats());
+ // Add sample and advance 3 intervals (2 w/o samples -> zero reported).
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 4 - 1);
+ // Trigger process and verify stats: [0:2][6:1]
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(3, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+ // Make next interval pass (1 w/o samples -> zero reported), [0:3][6:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(4, observer->num_calls_);
+ EXPECT_EQ(0, observer->last_sample_);
+ // Insert sample and advance non-complete interval, no change, [0:3][6:1]
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs - 1);
+ counter.Set(60, kStreamId);
+ EXPECT_EQ(4, observer->num_calls_);
+ // Make next interval pass, [0:3][6:1][24:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ AggregatedStats stats = counter.ProcessAndGetStats();
+ EXPECT_EQ(5, observer->num_calls_);
+ EXPECT_EQ(24, observer->last_sample_);
+ EXPECT_EQ(6, stats.average);
+}
+
+TEST_F(StatsCounterTest, TestRateAccCounter_IntervalsWithoutSamplesIgnored) {
+ // Samples: | 12 | x | x | x | 60 | // x: empty interval
+ // Stats: | 6 | x | x | x | 24 | // x -> ignored
+ StatsCounterObserverImpl* observer = new StatsCounterObserverImpl();
+ RateAccCounter counter(&clock_, observer, false);
+ // Add sample and advance 3 intervals (2 w/o samples -> ignored).
+ counter.Set(12, kStreamId);
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs * 4 - 1);
+ // Trigger process and verify stats: [6:1]
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ EXPECT_EQ(6, observer->last_sample_);
+ // Make next interval pass (1 w/o samples -> ignored), [6:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ // Insert sample and advance non-complete interval, no change, [6:1]
+ clock_.AdvanceTimeMilliseconds(kDefaultProcessIntervalMs - 1);
+ counter.Set(60, kStreamId);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(1, observer->num_calls_);
+ // Make next interval pass, [6:1][24:1]
+ clock_.AdvanceTimeMilliseconds(1);
+ counter.ProcessAndGetStats();
+ EXPECT_EQ(2, observer->num_calls_);
+ EXPECT_EQ(24, observer->last_sample_);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/stream_synchronization.cc b/third_party/libwebrtc/webrtc/video/stream_synchronization.cc
new file mode 100644
index 0000000000..6b800d1119
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/stream_synchronization.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/stream_synchronization.h"
+
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+
+#include <algorithm>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+static const int kMaxChangeMs = 80;
+static const int kMaxDeltaDelayMs = 10000;
+static const int kFilterLength = 4;
+// Minimum difference between audio and video to warrant a change.
+static const int kMinDeltaMs = 30;
+
+StreamSynchronization::StreamSynchronization(int video_stream_id,
+ int audio_stream_id)
+ : video_stream_id_(video_stream_id),
+ audio_stream_id_(audio_stream_id),
+ base_target_delay_ms_(0),
+ avg_diff_ms_(0) {
+}
+
+bool StreamSynchronization::ComputeRelativeDelay(
+ const Measurements& audio_measurement,
+ const Measurements& video_measurement,
+ int* relative_delay_ms) {
+ assert(relative_delay_ms);
+ int64_t audio_last_capture_time_ms;
+ if (!audio_measurement.rtp_to_ntp.Estimate(audio_measurement.latest_timestamp,
+ &audio_last_capture_time_ms)) {
+ return false;
+ }
+ int64_t video_last_capture_time_ms;
+ if (!video_measurement.rtp_to_ntp.Estimate(video_measurement.latest_timestamp,
+ &video_last_capture_time_ms)) {
+ return false;
+ }
+ if (video_last_capture_time_ms < 0) {
+ return false;
+ }
+ // Positive diff means that video_measurement is behind audio_measurement.
+ *relative_delay_ms = video_measurement.latest_receive_time_ms -
+ audio_measurement.latest_receive_time_ms -
+ (video_last_capture_time_ms - audio_last_capture_time_ms);
+ if (*relative_delay_ms > kMaxDeltaDelayMs ||
+ *relative_delay_ms < -kMaxDeltaDelayMs) {
+ return false;
+ }
+ return true;
+}
+
+bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
+ int current_audio_delay_ms,
+ int* total_audio_delay_target_ms,
+ int* total_video_delay_target_ms) {
+ assert(total_audio_delay_target_ms && total_video_delay_target_ms);
+
+ int current_video_delay_ms = *total_video_delay_target_ms;
+ RTC_LOG(LS_VERBOSE) << "Audio delay: " << current_audio_delay_ms
+ << " current diff: " << relative_delay_ms
+ << " for stream " << audio_stream_id_;
+ // Calculate the difference between the lowest possible video delay and
+ // the current audio delay.
+ int current_diff_ms = current_video_delay_ms - current_audio_delay_ms +
+ relative_delay_ms;
+
+ avg_diff_ms_ = ((kFilterLength - 1) * avg_diff_ms_ +
+ current_diff_ms) / kFilterLength;
+ if (abs(avg_diff_ms_) < kMinDeltaMs) {
+ // Don't adjust if the diff is within our margin.
+ return false;
+ }
+
+ // Make sure we don't move too fast.
+ int diff_ms = avg_diff_ms_ / 2;
+ diff_ms = std::min(diff_ms, kMaxChangeMs);
+ diff_ms = std::max(diff_ms, -kMaxChangeMs);
+
+ // Reset the average after a move to prevent overshooting reaction.
+ avg_diff_ms_ = 0;
+
+ if (diff_ms > 0) {
+ // The minimum video delay is longer than the current audio delay.
+ // We need to decrease extra video delay, or add extra audio delay.
+ if (channel_delay_.extra_video_delay_ms > base_target_delay_ms_) {
+ // We have extra delay added to ViE. Reduce this delay before adding
+ // extra delay to VoE.
+ channel_delay_.extra_video_delay_ms -= diff_ms;
+ channel_delay_.extra_audio_delay_ms = base_target_delay_ms_;
+ } else { // channel_delay_.extra_video_delay_ms > 0
+ // We have no extra video delay to remove, increase the audio delay.
+ channel_delay_.extra_audio_delay_ms += diff_ms;
+ channel_delay_.extra_video_delay_ms = base_target_delay_ms_;
+ }
+ } else { // if (diff_ms > 0)
+ // The video delay is lower than the current audio delay.
+ // We need to decrease extra audio delay, or add extra video delay.
+ if (channel_delay_.extra_audio_delay_ms > base_target_delay_ms_) {
+ // We have extra delay in VoiceEngine.
+ // Start with decreasing the voice delay.
+ // Note: diff_ms is negative; add the negative difference.
+ channel_delay_.extra_audio_delay_ms += diff_ms;
+ channel_delay_.extra_video_delay_ms = base_target_delay_ms_;
+ } else { // channel_delay_.extra_audio_delay_ms > base_target_delay_ms_
+ // We have no extra delay in VoiceEngine, increase the video delay.
+ // Note: diff_ms is negative; subtract the negative difference.
+ channel_delay_.extra_video_delay_ms -= diff_ms; // X - (-Y) = X + Y.
+ channel_delay_.extra_audio_delay_ms = base_target_delay_ms_;
+ }
+ }
+
+ // Make sure that video is never below our target.
+ channel_delay_.extra_video_delay_ms = std::max(
+ channel_delay_.extra_video_delay_ms, base_target_delay_ms_);
+
+ int new_video_delay_ms;
+ if (channel_delay_.extra_video_delay_ms > base_target_delay_ms_) {
+ new_video_delay_ms = channel_delay_.extra_video_delay_ms;
+ } else {
+ // No change to the extra video delay. We are changing audio and we only
+ // allow to change one at the time.
+ new_video_delay_ms = channel_delay_.last_video_delay_ms;
+ }
+
+ // Make sure that we don't go below the extra video delay.
+ new_video_delay_ms = std::max(
+ new_video_delay_ms, channel_delay_.extra_video_delay_ms);
+
+ // Verify we don't go above the maximum allowed video delay.
+ new_video_delay_ms =
+ std::min(new_video_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
+
+ int new_audio_delay_ms;
+ if (channel_delay_.extra_audio_delay_ms > base_target_delay_ms_) {
+ new_audio_delay_ms = channel_delay_.extra_audio_delay_ms;
+ } else {
+ // No change to the audio delay. We are changing video and we only
+ // allow to change one at the time.
+ new_audio_delay_ms = channel_delay_.last_audio_delay_ms;
+ }
+
+ // Make sure that we don't go below the extra audio delay.
+ new_audio_delay_ms = std::max(
+ new_audio_delay_ms, channel_delay_.extra_audio_delay_ms);
+
+ // Verify we don't go above the maximum allowed audio delay.
+ new_audio_delay_ms =
+ std::min(new_audio_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
+
+ // Remember our last audio and video delays.
+ channel_delay_.last_video_delay_ms = new_video_delay_ms;
+ channel_delay_.last_audio_delay_ms = new_audio_delay_ms;
+
+ RTC_LOG(LS_VERBOSE) << "Sync video delay " << new_video_delay_ms
+ << " for video stream " << video_stream_id_
+ << " and audio delay "
+ << channel_delay_.extra_audio_delay_ms
+ << " for audio stream " << audio_stream_id_;
+
+ // Return values.
+ *total_video_delay_target_ms = new_video_delay_ms;
+ *total_audio_delay_target_ms = new_audio_delay_ms;
+ return true;
+}
+
+void StreamSynchronization::SetTargetBufferingDelay(int target_delay_ms) {
+ // Initial extra delay for audio (accounting for existing extra delay).
+ channel_delay_.extra_audio_delay_ms +=
+ target_delay_ms - base_target_delay_ms_;
+ channel_delay_.last_audio_delay_ms +=
+ target_delay_ms - base_target_delay_ms_;
+
+ // The video delay is compared to the last value (and how much we can update
+ // is limited by that as well).
+ channel_delay_.last_video_delay_ms +=
+ target_delay_ms - base_target_delay_ms_;
+
+ channel_delay_.extra_video_delay_ms +=
+ target_delay_ms - base_target_delay_ms_;
+
+ // Video is already delayed by the desired amount.
+ base_target_delay_ms_ = target_delay_ms;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/stream_synchronization.h b/third_party/libwebrtc/webrtc/video/stream_synchronization.h
new file mode 100644
index 0000000000..52b8bde21d
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/stream_synchronization.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_STREAM_SYNCHRONIZATION_H_
+#define VIDEO_STREAM_SYNCHRONIZATION_H_
+
+#include <list>
+
+#include "system_wrappers/include/rtp_to_ntp_estimator.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class StreamSynchronization {
+ public:
+ struct Measurements {
+ Measurements() : latest_receive_time_ms(0), latest_timestamp(0) {}
+ RtpToNtpEstimator rtp_to_ntp;
+ int64_t latest_receive_time_ms;
+ uint32_t latest_timestamp;
+ };
+
+ StreamSynchronization(int video_stream_id, int audio_stream_id);
+
+ bool ComputeDelays(int relative_delay_ms,
+ int current_audio_delay_ms,
+ int* extra_audio_delay_ms,
+ int* total_video_delay_target_ms);
+
+ // On success |relative_delay| contains the number of milliseconds later video
+ // is rendered relative audio. If audio is played back later than video a
+ // |relative_delay| will be negative.
+ static bool ComputeRelativeDelay(const Measurements& audio_measurement,
+ const Measurements& video_measurement,
+ int* relative_delay_ms);
+ // Set target buffering delay - All audio and video will be delayed by at
+ // least target_delay_ms.
+ void SetTargetBufferingDelay(int target_delay_ms);
+
+ private:
+ struct SynchronizationDelays {
+ int extra_video_delay_ms = 0;
+ int last_video_delay_ms = 0;
+ int extra_audio_delay_ms = 0;
+ int last_audio_delay_ms = 0;
+ };
+
+ SynchronizationDelays channel_delay_;
+ const int video_stream_id_;
+ const int audio_stream_id_;
+ int base_target_delay_ms_;
+ int avg_diff_ms_;
+};
+} // namespace webrtc
+
+#endif // VIDEO_STREAM_SYNCHRONIZATION_H_
diff --git a/third_party/libwebrtc/webrtc/video/stream_synchronization_unittest.cc b/third_party/libwebrtc/webrtc/video/stream_synchronization_unittest.cc
new file mode 100644
index 0000000000..f9ae37d0bb
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/stream_synchronization_unittest.cc
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include <algorithm>
+
+#include "test/gtest.h"
+#include "video/stream_synchronization.h"
+
+namespace webrtc {
+
+// These correspond to the same constants defined in vie_sync_module.cc.
+enum { kMaxVideoDiffMs = 80 };
+enum { kMaxAudioDiffMs = 80 };
+enum { kMaxDelay = 1500 };
+
+// Test constants.
+enum { kDefaultAudioFrequency = 8000 };
+enum { kDefaultVideoFrequency = 90000 };
+const double kNtpFracPerMs = 4.294967296E6;
+static const int kSmoothingFilter = 4 * 2;
+
+class Time {
+ public:
+ explicit Time(int64_t offset)
+ : kNtpJan1970(2208988800UL),
+ time_now_ms_(offset) {}
+
+ NtpTime GetNowNtp() const {
+ uint32_t ntp_secs = time_now_ms_ / 1000 + kNtpJan1970;
+ int64_t remainder_ms = time_now_ms_ % 1000;
+ uint32_t ntp_frac = static_cast<uint32_t>(
+ static_cast<double>(remainder_ms) * kNtpFracPerMs + 0.5);
+ return NtpTime(ntp_secs, ntp_frac);
+ }
+
+ uint32_t GetNowRtp(int frequency, uint32_t offset) const {
+ return frequency * time_now_ms_ / 1000 + offset;
+ }
+
+ void IncreaseTimeMs(int64_t inc) {
+ time_now_ms_ += inc;
+ }
+
+ int64_t time_now_ms() const {
+ return time_now_ms_;
+ }
+
+ private:
+ // January 1970, in NTP seconds.
+ const uint32_t kNtpJan1970;
+ int64_t time_now_ms_;
+};
+
+class StreamSynchronizationTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ sync_ = new StreamSynchronization(0, 0);
+ send_time_ = new Time(kSendTimeOffsetMs);
+ receive_time_ = new Time(kReceiveTimeOffsetMs);
+ audio_clock_drift_ = 1.0;
+ video_clock_drift_ = 1.0;
+ }
+
+ virtual void TearDown() {
+ delete sync_;
+ delete send_time_;
+ delete receive_time_;
+ }
+
+ // Generates the necessary RTCP measurements and RTP timestamps and computes
+ // the audio and video delays needed to get the two streams in sync.
+ // |audio_delay_ms| and |video_delay_ms| are the number of milliseconds after
+ // capture which the frames are rendered.
+ // |current_audio_delay_ms| is the number of milliseconds which audio is
+ // currently being delayed by the receiver.
+ bool DelayedStreams(int audio_delay_ms,
+ int video_delay_ms,
+ int current_audio_delay_ms,
+ int* extra_audio_delay_ms,
+ int* total_video_delay_ms) {
+ int audio_frequency = static_cast<int>(kDefaultAudioFrequency *
+ audio_clock_drift_ + 0.5);
+ int audio_offset = 0;
+ int video_frequency = static_cast<int>(kDefaultVideoFrequency *
+ video_clock_drift_ + 0.5);
+ bool new_sr;
+ int video_offset = 0;
+ StreamSynchronization::Measurements audio;
+ StreamSynchronization::Measurements video;
+ // Generate NTP/RTP timestamp pair for both streams corresponding to RTCP.
+ NtpTime ntp_time = send_time_->GetNowNtp();
+ uint32_t rtp_timestamp =
+ send_time_->GetNowRtp(audio_frequency, audio_offset);
+ EXPECT_TRUE(audio.rtp_to_ntp.UpdateMeasurements(
+ ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
+ send_time_->IncreaseTimeMs(100);
+ receive_time_->IncreaseTimeMs(100);
+ ntp_time = send_time_->GetNowNtp();
+ rtp_timestamp = send_time_->GetNowRtp(video_frequency, video_offset);
+ EXPECT_TRUE(video.rtp_to_ntp.UpdateMeasurements(
+ ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
+ send_time_->IncreaseTimeMs(900);
+ receive_time_->IncreaseTimeMs(900);
+ ntp_time = send_time_->GetNowNtp();
+ rtp_timestamp = send_time_->GetNowRtp(audio_frequency, audio_offset);
+ EXPECT_TRUE(audio.rtp_to_ntp.UpdateMeasurements(
+ ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
+ send_time_->IncreaseTimeMs(100);
+ receive_time_->IncreaseTimeMs(100);
+ ntp_time = send_time_->GetNowNtp();
+ rtp_timestamp = send_time_->GetNowRtp(video_frequency, video_offset);
+ EXPECT_TRUE(video.rtp_to_ntp.UpdateMeasurements(
+ ntp_time.seconds(), ntp_time.fractions(), rtp_timestamp, &new_sr));
+
+ send_time_->IncreaseTimeMs(900);
+ receive_time_->IncreaseTimeMs(900);
+
+ // Capture an audio and a video frame at the same time.
+ audio.latest_timestamp =
+ send_time_->GetNowRtp(audio_frequency, audio_offset);
+ video.latest_timestamp =
+ send_time_->GetNowRtp(video_frequency, video_offset);
+
+ if (audio_delay_ms > video_delay_ms) {
+ // Audio later than video.
+ receive_time_->IncreaseTimeMs(video_delay_ms);
+ video.latest_receive_time_ms = receive_time_->time_now_ms();
+ receive_time_->IncreaseTimeMs(audio_delay_ms - video_delay_ms);
+ audio.latest_receive_time_ms = receive_time_->time_now_ms();
+ } else {
+ // Video later than audio.
+ receive_time_->IncreaseTimeMs(audio_delay_ms);
+ audio.latest_receive_time_ms = receive_time_->time_now_ms();
+ receive_time_->IncreaseTimeMs(video_delay_ms - audio_delay_ms);
+ video.latest_receive_time_ms = receive_time_->time_now_ms();
+ }
+ int relative_delay_ms;
+ StreamSynchronization::ComputeRelativeDelay(audio, video,
+ &relative_delay_ms);
+ EXPECT_EQ(video_delay_ms - audio_delay_ms, relative_delay_ms);
+ return sync_->ComputeDelays(relative_delay_ms,
+ current_audio_delay_ms,
+ extra_audio_delay_ms,
+ total_video_delay_ms);
+ }
+
+ // Simulate audio playback 300 ms after capture and video rendering 100 ms
+ // after capture. Verify that the correct extra delays are calculated for
+ // audio and video, and that they change correctly when we simulate that
+ // NetEQ or the VCM adds more delay to the streams.
+ // TODO(holmer): This is currently wrong! We should simply change
+ // audio_delay_ms or video_delay_ms since those now include VCM and NetEQ
+ // delays.
+ void BothDelayedAudioLaterTest(int base_target_delay) {
+ int current_audio_delay_ms = base_target_delay;
+ int audio_delay_ms = base_target_delay + 300;
+ int video_delay_ms = base_target_delay + 100;
+ int extra_audio_delay_ms = 0;
+ int total_video_delay_ms = base_target_delay;
+ int filtered_move = (audio_delay_ms - video_delay_ms) / kSmoothingFilter;
+ const int kNeteqDelayIncrease = 50;
+ const int kNeteqDelayDecrease = 10;
+
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+ current_audio_delay_ms = extra_audio_delay_ms;
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
+ video_delay_ms));
+ // Simulate base_target_delay minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay;
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay + 2 * filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+ current_audio_delay_ms = extra_audio_delay_ms;
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
+ video_delay_ms));
+ // Simulate base_target_delay minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay;
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay + 3 * filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+
+ // Simulate that NetEQ introduces some audio delay.
+ current_audio_delay_ms = base_target_delay + kNeteqDelayIncrease;
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
+ video_delay_ms));
+ // Simulate base_target_delay minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay;
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ filtered_move = 3 * filtered_move +
+ (kNeteqDelayIncrease + audio_delay_ms - video_delay_ms) /
+ kSmoothingFilter;
+ EXPECT_EQ(base_target_delay + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+
+ // Simulate that NetEQ reduces its delay.
+ current_audio_delay_ms = base_target_delay + kNeteqDelayDecrease;
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
+ video_delay_ms));
+ // Simulate base_target_delay minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay;
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+
+ filtered_move = filtered_move +
+ (kNeteqDelayDecrease + audio_delay_ms - video_delay_ms) /
+ kSmoothingFilter;
+
+ EXPECT_EQ(base_target_delay + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+ }
+
+ void BothDelayedVideoLaterTest(int base_target_delay) {
+ int current_audio_delay_ms = base_target_delay;
+ int audio_delay_ms = base_target_delay + 100;
+ int video_delay_ms = base_target_delay + 300;
+ int extra_audio_delay_ms = 0;
+ int total_video_delay_ms = base_target_delay;
+
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay, total_video_delay_ms);
+ // The audio delay is not allowed to change more than this in 1 second.
+ EXPECT_GE(base_target_delay + kMaxAudioDiffMs, extra_audio_delay_ms);
+ current_audio_delay_ms = extra_audio_delay_ms;
+ int current_extra_delay_ms = extra_audio_delay_ms;
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay, total_video_delay_ms);
+ // The audio delay is not allowed to change more than the half of the
+ // required change in delay.
+ EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+ current_audio_delay_ms,
+ base_target_delay + video_delay_ms - audio_delay_ms),
+ extra_audio_delay_ms);
+ current_audio_delay_ms = extra_audio_delay_ms;
+ current_extra_delay_ms = extra_audio_delay_ms;
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay, total_video_delay_ms);
+ // The audio delay is not allowed to change more than the half of the
+ // required change in delay.
+ EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+ current_audio_delay_ms,
+ base_target_delay + video_delay_ms - audio_delay_ms),
+ extra_audio_delay_ms);
+ current_extra_delay_ms = extra_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason reduced the delay.
+ current_audio_delay_ms = base_target_delay + 10;
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay, total_video_delay_ms);
+ // Since we only can ask NetEQ for a certain amount of extra delay, and
+ // we only measure the total NetEQ delay, we will ask for additional delay
+ // here to try to stay in sync.
+ EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+ current_audio_delay_ms,
+ base_target_delay + video_delay_ms - audio_delay_ms),
+ extra_audio_delay_ms);
+ current_extra_delay_ms = extra_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason significantly increased the delay.
+ current_audio_delay_ms = base_target_delay + 350;
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+ video_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms,
+ &total_video_delay_ms));
+ EXPECT_EQ(base_target_delay, total_video_delay_ms);
+ // The audio delay is not allowed to change more than the half of the
+ // required change in delay.
+ EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+ current_audio_delay_ms,
+ base_target_delay + video_delay_ms - audio_delay_ms),
+ extra_audio_delay_ms);
+ }
+
+ int MaxAudioDelayIncrease(int current_audio_delay_ms, int delay_ms) {
+ return std::min((delay_ms - current_audio_delay_ms) / kSmoothingFilter,
+ static_cast<int>(kMaxAudioDiffMs));
+ }
+
+ int MaxAudioDelayDecrease(int current_audio_delay_ms, int delay_ms) {
+ return std::max((delay_ms - current_audio_delay_ms) / kSmoothingFilter,
+ -kMaxAudioDiffMs);
+ }
+
+ enum { kSendTimeOffsetMs = 98765 };
+ enum { kReceiveTimeOffsetMs = 43210 };
+
+ StreamSynchronization* sync_;
+ Time* send_time_; // The simulated clock at the sender.
+ Time* receive_time_; // The simulated clock at the receiver.
+ double audio_clock_drift_;
+ double video_clock_drift_;
+};
+
+TEST_F(StreamSynchronizationTest, NoDelay) {
+ uint32_t current_audio_delay_ms = 0;
+ int extra_audio_delay_ms = 0;
+ int total_video_delay_ms = 0;
+
+ EXPECT_FALSE(DelayedStreams(0, 0, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, extra_audio_delay_ms);
+ EXPECT_EQ(0, total_video_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, VideoDelay) {
+ uint32_t current_audio_delay_ms = 0;
+ int delay_ms = 200;
+ int extra_audio_delay_ms = 0;
+ int total_video_delay_ms = 0;
+
+ EXPECT_TRUE(DelayedStreams(delay_ms, 0, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, extra_audio_delay_ms);
+ // The video delay is not allowed to change more than this in 1 second.
+ EXPECT_EQ(delay_ms / kSmoothingFilter, total_video_delay_ms);
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ // Simulate 0 minimum delay in the VCM.
+ total_video_delay_ms = 0;
+ EXPECT_TRUE(DelayedStreams(delay_ms, 0, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, extra_audio_delay_ms);
+ // The video delay is not allowed to change more than this in 1 second.
+ EXPECT_EQ(2 * delay_ms / kSmoothingFilter, total_video_delay_ms);
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ // Simulate 0 minimum delay in the VCM.
+ total_video_delay_ms = 0;
+ EXPECT_TRUE(DelayedStreams(delay_ms, 0, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, extra_audio_delay_ms);
+ EXPECT_EQ(3 * delay_ms / kSmoothingFilter, total_video_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, AudioDelay) {
+ int current_audio_delay_ms = 0;
+ int delay_ms = 200;
+ int extra_audio_delay_ms = 0;
+ int total_video_delay_ms = 0;
+
+ EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ // The audio delay is not allowed to change more than this in 1 second.
+ EXPECT_EQ(delay_ms / kSmoothingFilter, extra_audio_delay_ms);
+ current_audio_delay_ms = extra_audio_delay_ms;
+ int current_extra_delay_ms = extra_audio_delay_ms;
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ // The audio delay is not allowed to change more than the half of the required
+ // change in delay.
+ EXPECT_EQ(current_extra_delay_ms +
+ MaxAudioDelayIncrease(current_audio_delay_ms, delay_ms),
+ extra_audio_delay_ms);
+ current_audio_delay_ms = extra_audio_delay_ms;
+ current_extra_delay_ms = extra_audio_delay_ms;
+
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ // The audio delay is not allowed to change more than the half of the required
+ // change in delay.
+ EXPECT_EQ(current_extra_delay_ms +
+ MaxAudioDelayIncrease(current_audio_delay_ms, delay_ms),
+ extra_audio_delay_ms);
+ current_extra_delay_ms = extra_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason reduced the delay.
+ current_audio_delay_ms = 10;
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ // Since we only can ask NetEQ for a certain amount of extra delay, and
+ // we only measure the total NetEQ delay, we will ask for additional delay
+ // here to try to
+ EXPECT_EQ(current_extra_delay_ms +
+ MaxAudioDelayIncrease(current_audio_delay_ms, delay_ms),
+ extra_audio_delay_ms);
+ current_extra_delay_ms = extra_audio_delay_ms;
+
+ // Simulate that NetEQ for some reason significantly increased the delay.
+ current_audio_delay_ms = 350;
+ send_time_->IncreaseTimeMs(1000);
+ receive_time_->IncreaseTimeMs(800);
+ EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_video_delay_ms);
+ // The audio delay is not allowed to change more than the half of the required
+ // change in delay.
+ EXPECT_EQ(current_extra_delay_ms +
+ MaxAudioDelayDecrease(current_audio_delay_ms, delay_ms),
+ extra_audio_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLater) {
+ BothDelayedVideoLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterAudioClockDrift) {
+ audio_clock_drift_ = 1.05;
+ BothDelayedVideoLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterVideoClockDrift) {
+ video_clock_drift_ = 1.05;
+ BothDelayedVideoLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioLater) {
+ BothDelayedAudioLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDrift) {
+ audio_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDrift) {
+ video_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(0);
+}
+
+TEST_F(StreamSynchronizationTest, BaseDelay) {
+ int base_target_delay_ms = 2000;
+ int current_audio_delay_ms = 2000;
+ int extra_audio_delay_ms = 0;
+ int total_video_delay_ms = base_target_delay_ms;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ // We are in sync don't change.
+ EXPECT_FALSE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ // Triggering another call with the same values. Delay should not be modified.
+ base_target_delay_ms = 2000;
+ current_audio_delay_ms = base_target_delay_ms;
+ total_video_delay_ms = base_target_delay_ms;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ // We are in sync don't change.
+ EXPECT_FALSE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+ // Changing delay value - intended to test this module only. In practice it
+ // would take VoE time to adapt.
+ base_target_delay_ms = 5000;
+ current_audio_delay_ms = base_target_delay_ms;
+ total_video_delay_ms = base_target_delay_ms;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ // We are in sync don't change.
+ EXPECT_FALSE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
+ current_audio_delay_ms,
+ &extra_audio_delay_ms, &total_video_delay_ms));
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioLaterWithBaseDelay) {
+ int base_target_delay_ms = 3000;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ BothDelayedAudioLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDriftWithBaseDelay) {
+ int base_target_delay_ms = 3000;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ audio_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDriftWithBaseDelay) {
+ int base_target_delay_ms = 3000;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ video_clock_drift_ = 1.05;
+ BothDelayedAudioLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterWithBaseDelay) {
+ int base_target_delay_ms = 2000;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ BothDelayedVideoLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest,
+ BothDelayedVideoLaterAudioClockDriftWithBaseDelay) {
+ int base_target_delay_ms = 2000;
+ audio_clock_drift_ = 1.05;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ BothDelayedVideoLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest,
+ BothDelayedVideoLaterVideoClockDriftWithBaseDelay) {
+ int base_target_delay_ms = 2000;
+ video_clock_drift_ = 1.05;
+ sync_->SetTargetBufferingDelay(base_target_delay_ms);
+ BothDelayedVideoLaterTest(base_target_delay_ms);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/transport_adapter.cc b/third_party/libwebrtc/webrtc/video/transport_adapter.cc
new file mode 100644
index 0000000000..4a478858ba
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/transport_adapter.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/transport_adapter.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace internal {
+
+TransportAdapter::TransportAdapter(Transport* transport)
+ : transport_(transport), enabled_(false) {
+ RTC_DCHECK(nullptr != transport);
+}
+
+bool TransportAdapter::SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) {
+ if (!enabled_.load())
+ return false;
+
+ return transport_->SendRtp(packet, length, options);
+}
+
+bool TransportAdapter::SendRtcp(const uint8_t* packet, size_t length) {
+ if (!enabled_.load())
+ return false;
+
+ return transport_->SendRtcp(packet, length);
+}
+
+void TransportAdapter::Enable() {
+ enabled_.store(true);
+}
+
+void TransportAdapter::Disable() {
+ enabled_.store(false);
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/transport_adapter.h b/third_party/libwebrtc/webrtc/video/transport_adapter.h
new file mode 100644
index 0000000000..0168cc5b86
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/transport_adapter.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_TRANSPORT_ADAPTER_H_
+#define VIDEO_TRANSPORT_ADAPTER_H_
+
+#include <atomic>
+
+#include "api/call/transport.h"
+#include "common_types.h" // NOLINT(build/include)
+
+namespace webrtc {
+namespace internal {
+
+class TransportAdapter : public Transport {
+ public:
+ explicit TransportAdapter(Transport* transport);
+
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override;
+ bool SendRtcp(const uint8_t* packet, size_t length) override;
+
+ void Enable();
+ void Disable();
+
+ private:
+ Transport *transport_;
+ std::atomic<bool> enabled_;
+};
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_TRANSPORT_ADAPTER_H_
diff --git a/third_party/libwebrtc/webrtc/video/video_gn/moz.build b/third_party/libwebrtc/webrtc/video/video_gn/moz.build
new file mode 100644
index 0000000000..939d2f4b99
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_gn/moz.build
@@ -0,0 +1,250 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+ ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ###
+ ### DO NOT edit it by hand. ###
+
+COMPILE_FLAGS["OS_INCLUDES"] = []
+AllowCompilerWarnings()
+
+DEFINES["CHROMIUM_BUILD"] = True
+DEFINES["V8_DEPRECATION_WARNINGS"] = True
+DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0"
+DEFINES["WEBRTC_MOZILLA_BUILD"] = True
+DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0"
+DEFINES["WEBRTC_RESTRICT_LOGGING"] = True
+
+FINAL_LIBRARY = "webrtc"
+
+
+LOCAL_INCLUDES += [
+ "!/ipc/ipdl/_ipdlheaders",
+ "/ipc/chromium/src",
+ "/ipc/glue",
+ "/media/libyuv/libyuv/include/",
+ "/third_party/libwebrtc/webrtc/",
+ "/third_party/libwebrtc/webrtc/common_video/include/",
+ "/third_party/libwebrtc/webrtc/modules/audio_coding/include/",
+ "/third_party/libwebrtc/webrtc/modules/include/"
+]
+
+SOURCES += [
+ "/third_party/libwebrtc/webrtc/video/overuse_frame_detector.cc"
+]
+
+UNIFIED_SOURCES += [
+ "/third_party/libwebrtc/webrtc/video/call_stats.cc",
+ "/third_party/libwebrtc/webrtc/video/encoder_rtcp_feedback.cc",
+ "/third_party/libwebrtc/webrtc/video/payload_router.cc",
+ "/third_party/libwebrtc/webrtc/video/quality_threshold.cc",
+ "/third_party/libwebrtc/webrtc/video/receive_statistics_proxy.cc",
+ "/third_party/libwebrtc/webrtc/video/report_block_stats.cc",
+ "/third_party/libwebrtc/webrtc/video/rtp_streams_synchronizer.cc",
+ "/third_party/libwebrtc/webrtc/video/rtp_video_stream_receiver.cc",
+ "/third_party/libwebrtc/webrtc/video/send_delay_stats.cc",
+ "/third_party/libwebrtc/webrtc/video/send_statistics_proxy.cc",
+ "/third_party/libwebrtc/webrtc/video/stats_counter.cc",
+ "/third_party/libwebrtc/webrtc/video/stream_synchronization.cc",
+ "/third_party/libwebrtc/webrtc/video/transport_adapter.cc",
+ "/third_party/libwebrtc/webrtc/video/video_receive_stream.cc",
+ "/third_party/libwebrtc/webrtc/video/video_send_stream.cc",
+ "/third_party/libwebrtc/webrtc/video/video_stream_decoder.cc",
+ "/third_party/libwebrtc/webrtc/video/video_stream_encoder.cc"
+]
+
+if not CONFIG["MOZ_DEBUG"]:
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0"
+ DEFINES["NDEBUG"] = True
+ DEFINES["NVALGRIND"] = True
+
+if CONFIG["MOZ_DEBUG"] == "1":
+
+ DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1"
+ DEFINES["WTF_USE_DYNAMIC_ANNOTATIONS"] = "1"
+
+if CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["ANDROID"] = True
+ DEFINES["ANDROID_NDK_VERSION"] = "r12b"
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["HAVE_SYS_UIO_H"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["USE_OPENSSL_CERTS"] = "1"
+ DEFINES["WEBRTC_ANDROID"] = True
+ DEFINES["WEBRTC_ANDROID_OPENSLES"] = True
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+ DEFINES["__GNU_SOURCE"] = "1"
+
+ OS_LIBS += [
+ "log"
+ ]
+
+if CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["WEBRTC_MAC"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORE"] = "0"
+
+ OS_LIBS += [
+ "-framework Foundation"
+ ]
+
+if CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["USE_NSS_CERTS"] = "1"
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_LINUX"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+ OS_LIBS += [
+ "m",
+ "rt"
+ ]
+
+if CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["USE_X11"] = "1"
+ DEFINES["WEBRTC_BSD"] = True
+ DEFINES["WEBRTC_POSIX"] = True
+ DEFINES["_FILE_OFFSET_BITS"] = "64"
+
+if CONFIG["OS_TARGET"] == "WINNT":
+
+ DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True
+ DEFINES["NOMINMAX"] = True
+ DEFINES["NO_TCMALLOC"] = True
+ DEFINES["NTDDI_VERSION"] = "0x0A000000"
+ DEFINES["PSAPI_VERSION"] = "1"
+ DEFINES["UNICODE"] = True
+ DEFINES["WEBRTC_WIN"] = True
+ DEFINES["WIN32"] = True
+ DEFINES["WIN32_LEAN_AND_MEAN"] = True
+ DEFINES["WINVER"] = "0x0A00"
+ DEFINES["_ATL_NO_OPENGL"] = True
+ DEFINES["_CRT_RAND_S"] = True
+ DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_CRT_SECURE_NO_WARNINGS"] = True
+ DEFINES["_HAS_EXCEPTIONS"] = "0"
+ DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True
+ DEFINES["_SECURE_ATL"] = True
+ DEFINES["_UNICODE"] = True
+ DEFINES["_USING_V110_SDK71_"] = True
+ DEFINES["_WIN32_WINNT"] = "0x0A00"
+ DEFINES["_WINDOWS"] = True
+ DEFINES["__STD_C"] = True
+
+ OS_LIBS += [
+ "winmm"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+
+ DEFINES["WEBRTC_ARCH_ARM64"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if CONFIG["CPU_ARCH"] == "arm":
+
+ CXXFLAGS += [
+ "-mfpu=neon"
+ ]
+
+ DEFINES["WEBRTC_ARCH_ARM"] = True
+ DEFINES["WEBRTC_ARCH_ARM_V7"] = True
+ DEFINES["WEBRTC_HAS_NEON"] = True
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Android":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "DragonFly":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "NetBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if not CONFIG["MOZ_DEBUG"] and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ DEFINES["_FORTIFY_SOURCE"] = "2"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Android":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0120"
+
+if CONFIG["CPU_ARCH"] == "x86_64" and CONFIG["OS_TARGET"] == "Darwin":
+
+ DEFINES["CR_XCODE_VERSION"] = "0920"
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "FreeBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "aarch64" and CONFIG["OS_TARGET"] == "Linux":
+
+ DEFINES["DISABLE_NACL"] = True
+ DEFINES["NO_TCMALLOC"] = True
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "Linux":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "NetBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+if CONFIG["CPU_ARCH"] == "x86" and CONFIG["OS_TARGET"] == "OpenBSD":
+
+ CXXFLAGS += [
+ "-msse2"
+ ]
+
+Library("video_gn")
diff --git a/third_party/libwebrtc/webrtc/video/video_loopback.cc b/third_party/libwebrtc/webrtc/video/video_loopback.cc
new file mode 100644
index 0000000000..d5d525aa12
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_loopback.cc
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "rtc_base/flags.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/run_test.h"
+#include "video/video_quality_test.h"
+
+namespace webrtc {
+namespace flags {
+
+// Flags common with screenshare loopback, with different default values.
+DEFINE_int(width, 640, "Video width.");
+size_t Width() {
+ return static_cast<size_t>(FLAG_width);
+}
+
+DEFINE_int(height, 480, "Video height.");
+size_t Height() {
+ return static_cast<size_t>(FLAG_height);
+}
+
+DEFINE_int(fps, 30, "Frames per second.");
+int Fps() {
+ return static_cast<int>(FLAG_fps);
+}
+
+DEFINE_int(capture_device_index, 0, "Capture device to select");
+size_t GetCaptureDevice() {
+ return static_cast<size_t>(FLAG_capture_device_index);
+}
+
+DEFINE_int(min_bitrate, 50, "Call and stream min bitrate in kbps.");
+int MinBitrateKbps() {
+ return static_cast<int>(FLAG_min_bitrate);
+}
+
+DEFINE_int(start_bitrate, 300, "Call start bitrate in kbps.");
+int StartBitrateKbps() {
+ return static_cast<int>(FLAG_start_bitrate);
+}
+
+DEFINE_int(target_bitrate, 800, "Stream target bitrate in kbps.");
+int TargetBitrateKbps() {
+ return static_cast<int>(FLAG_target_bitrate);
+}
+
+DEFINE_int(max_bitrate, 800, "Call and stream max bitrate in kbps.");
+int MaxBitrateKbps() {
+ return static_cast<int>(FLAG_max_bitrate);
+}
+
+DEFINE_bool(suspend_below_min_bitrate,
+ false,
+ "Suspends video below the configured min bitrate.");
+
+DEFINE_int(num_temporal_layers,
+ 1,
+ "Number of temporal layers. Set to 1-4 to override.");
+int NumTemporalLayers() {
+ return static_cast<int>(FLAG_num_temporal_layers);
+}
+
+// Flags common with screenshare loopback, with equal default values.
+DEFINE_string(codec, "VP8", "Video codec to use.");
+std::string Codec() {
+ return static_cast<std::string>(FLAG_codec);
+}
+
+DEFINE_int(selected_tl,
+ -1,
+ "Temporal layer to show or analyze. -1 to disable filtering.");
+int SelectedTL() {
+ return static_cast<int>(FLAG_selected_tl);
+}
+
+DEFINE_int(
+ duration,
+ 0,
+ "Duration of the test in seconds. If 0, rendered will be shown instead.");
+int DurationSecs() {
+ return static_cast<int>(FLAG_duration);
+}
+
+DEFINE_string(output_filename, "", "Target graph data filename.");
+std::string OutputFilename() {
+ return static_cast<std::string>(FLAG_output_filename);
+}
+
+DEFINE_string(graph_title,
+ "",
+ "If empty, title will be generated automatically.");
+std::string GraphTitle() {
+ return static_cast<std::string>(FLAG_graph_title);
+}
+
+DEFINE_int(loss_percent, 0, "Percentage of packets randomly lost.");
+int LossPercent() {
+ return static_cast<int>(FLAG_loss_percent);
+}
+
+DEFINE_int(avg_burst_loss_length, -1, "Average burst length of lost packets.");
+int AvgBurstLossLength() {
+ return static_cast<int>(FLAG_avg_burst_loss_length);
+}
+
+DEFINE_int(link_capacity,
+ 0,
+ "Capacity (kbps) of the fake link. 0 means infinite.");
+int LinkCapacityKbps() {
+ return static_cast<int>(FLAG_link_capacity);
+}
+
+DEFINE_int(queue_size, 0, "Size of the bottleneck link queue in packets.");
+int QueueSize() {
+ return static_cast<int>(FLAG_queue_size);
+}
+
+DEFINE_int(avg_propagation_delay_ms,
+ 0,
+ "Average link propagation delay in ms.");
+int AvgPropagationDelayMs() {
+ return static_cast<int>(FLAG_avg_propagation_delay_ms);
+}
+
+DEFINE_string(rtc_event_log_name, "", "Filename for rtc event log.");
+std::string RtcEventLogName() {
+ return static_cast<std::string>(FLAG_rtc_event_log_name);
+}
+
+DEFINE_string(rtp_dump_name, "", "Filename for dumped received RTP stream.");
+std::string RtpDumpName() {
+ return static_cast<std::string>(FLAG_rtp_dump_name);
+}
+
+DEFINE_int(std_propagation_delay_ms,
+ 0,
+ "Link propagation delay standard deviation in ms.");
+int StdPropagationDelayMs() {
+ return static_cast<int>(FLAG_std_propagation_delay_ms);
+}
+
+DEFINE_int(num_streams, 0, "Number of streams to show or analyze.");
+int NumStreams() {
+ return static_cast<int>(FLAG_num_streams);
+}
+
+DEFINE_int(selected_stream,
+ 0,
+ "ID of the stream to show or analyze. "
+ "Set to the number of streams to show them all.");
+int SelectedStream() {
+ return static_cast<int>(FLAG_selected_stream);
+}
+
+DEFINE_int(num_spatial_layers, 1, "Number of spatial layers to use.");
+int NumSpatialLayers() {
+ return static_cast<int>(FLAG_num_spatial_layers);
+}
+
+DEFINE_int(selected_sl,
+ -1,
+ "Spatial layer to show or analyze. -1 to disable filtering.");
+int SelectedSL() {
+ return static_cast<int>(FLAG_selected_sl);
+}
+
+DEFINE_string(stream0,
+ "",
+ "Comma separated values describing VideoStream for stream #0.");
+std::string Stream0() {
+ return static_cast<std::string>(FLAG_stream0);
+}
+
+DEFINE_string(stream1,
+ "",
+ "Comma separated values describing VideoStream for stream #1.");
+std::string Stream1() {
+ return static_cast<std::string>(FLAG_stream1);
+}
+
+DEFINE_string(sl0,
+ "",
+ "Comma separated values describing SpatialLayer for layer #0.");
+std::string SL0() {
+ return static_cast<std::string>(FLAG_sl0);
+}
+
+DEFINE_string(sl1,
+ "",
+ "Comma separated values describing SpatialLayer for layer #1.");
+std::string SL1() {
+ return static_cast<std::string>(FLAG_sl1);
+}
+
+DEFINE_string(encoded_frame_path,
+ "",
+ "The base path for encoded frame logs. Created files will have "
+ "the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
+std::string EncodedFramePath() {
+ return static_cast<std::string>(FLAG_encoded_frame_path);
+}
+
+DEFINE_bool(logs, false, "print logs to stderr");
+
+DEFINE_bool(send_side_bwe, true, "Use send-side bandwidth estimation");
+
+DEFINE_bool(allow_reordering, false, "Allow packet reordering to occur");
+
+DEFINE_bool(use_ulpfec, false, "Use RED+ULPFEC forward error correction.");
+
+DEFINE_bool(use_flexfec, false, "Use FlexFEC forward error correction.");
+
+DEFINE_bool(audio, false, "Add audio stream");
+
+DEFINE_bool(audio_video_sync, false, "Sync audio and video stream (no effect if"
+ " audio is false)");
+
+DEFINE_bool(audio_dtx, false, "Enable audio DTX (no effect if audio is false)");
+
+DEFINE_bool(video, true, "Add video stream");
+
+DEFINE_string(
+ force_fieldtrials,
+ "",
+ "Field trials control experimental feature code which can be forced. "
+ "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
+ " will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
+ "trials are separated by \"/\"");
+
+// Video-specific flags.
+DEFINE_string(clip,
+ "",
+ "Name of the clip to show. If empty, using chroma generator.");
+std::string Clip() {
+ return static_cast<std::string>(FLAG_clip);
+}
+
+DEFINE_bool(help, false, "prints this message");
+
+} // namespace flags
+
+void Loopback() {
+ FakeNetworkPipe::Config pipe_config;
+ pipe_config.loss_percent = flags::LossPercent();
+ pipe_config.avg_burst_loss_length = flags::AvgBurstLossLength();
+ pipe_config.link_capacity_kbps = flags::LinkCapacityKbps();
+ pipe_config.queue_length_packets = flags::QueueSize();
+ pipe_config.queue_delay_ms = flags::AvgPropagationDelayMs();
+ pipe_config.delay_standard_deviation_ms = flags::StdPropagationDelayMs();
+ pipe_config.allow_reordering = flags::FLAG_allow_reordering;
+
+ Call::Config::BitrateConfig call_bitrate_config;
+ call_bitrate_config.min_bitrate_bps = flags::MinBitrateKbps() * 1000;
+ call_bitrate_config.start_bitrate_bps = flags::StartBitrateKbps() * 1000;
+ call_bitrate_config.max_bitrate_bps = flags::MaxBitrateKbps() * 1000;
+
+ VideoQualityTest::Params params;
+ params.call = {flags::FLAG_send_side_bwe, call_bitrate_config, 0};
+ params.video = {flags::FLAG_video,
+ flags::Width(),
+ flags::Height(),
+ flags::Fps(),
+ flags::MinBitrateKbps() * 1000,
+ flags::TargetBitrateKbps() * 1000,
+ flags::MaxBitrateKbps() * 1000,
+ flags::FLAG_suspend_below_min_bitrate,
+ flags::Codec(),
+ flags::NumTemporalLayers(),
+ flags::SelectedTL(),
+ 0, // No min transmit bitrate.
+ flags::FLAG_use_ulpfec,
+ flags::FLAG_use_flexfec,
+ flags::Clip(),
+ flags::GetCaptureDevice()};
+ params.audio = {flags::FLAG_audio, flags::FLAG_audio_video_sync,
+ flags::FLAG_audio_dtx};
+ params.logging = {flags::FLAG_logs, flags::FLAG_rtc_event_log_name,
+ flags::FLAG_rtp_dump_name, flags::FLAG_encoded_frame_path};
+ params.screenshare.enabled = false;
+ params.analyzer = {"video", 0.0, 0.0, flags::DurationSecs(),
+ flags::OutputFilename(), flags::GraphTitle()};
+ params.pipe = pipe_config;
+
+ if (flags::NumStreams() > 1 && flags::Stream0().empty() &&
+ flags::Stream1().empty()) {
+ params.ss.infer_streams = true;
+ }
+
+ std::vector<std::string> stream_descriptors;
+ stream_descriptors.push_back(flags::Stream0());
+ stream_descriptors.push_back(flags::Stream1());
+ std::vector<std::string> SL_descriptors;
+ SL_descriptors.push_back(flags::SL0());
+ SL_descriptors.push_back(flags::SL1());
+ VideoQualityTest::FillScalabilitySettings(
+ &params, stream_descriptors, flags::NumStreams(), flags::SelectedStream(),
+ flags::NumSpatialLayers(), flags::SelectedSL(), SL_descriptors);
+
+ VideoQualityTest test;
+ if (flags::DurationSecs()) {
+ test.RunWithAnalyzer(params);
+ } else {
+ test.RunWithRenderers(params);
+ }
+}
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ if (webrtc::flags::FLAG_help) {
+ rtc::FlagList::Print(nullptr, false);
+ return 0;
+ }
+
+ // InitFieldTrialsFromString needs a reference to an std::string instance,
+ // with a scope that outlives the test.
+ std::string field_trials = webrtc::flags::FLAG_force_fieldtrials;
+ webrtc::test::InitFieldTrialsFromString(field_trials);
+
+ webrtc::test::RunTest(webrtc::Loopback);
+ return 0;
+}
diff --git a/third_party/libwebrtc/webrtc/video/video_quality_test.cc b/third_party/libwebrtc/webrtc/video/video_quality_test.cc
new file mode 100644
index 0000000000..e40d2955f0
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_quality_test.cc
@@ -0,0 +1,2177 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_quality_test.h"
+
+#include <stdio.h>
+#include <algorithm>
+#include <deque>
+#include <map>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "call/call.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "logging/rtc_event_log/output/rtc_event_log_output_file.h"
+#include "logging/rtc_event_log/rtc_event_log.h"
+#include "media/engine/internalencoderfactory.h"
+#include "media/engine/webrtcvideoengine.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_utility.h"
+#include "modules/video_coding/codecs/h264/include/h264.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/cpu_time.h"
+#include "rtc_base/event.h"
+#include "rtc_base/flags.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/memory_usage.h"
+#include "rtc_base/pathutils.h"
+#include "rtc_base/platform_file.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/cpu_info.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/gtest.h"
+#include "test/layer_filtering_transport.h"
+#include "test/run_loop.h"
+#include "test/statistics.h"
+#include "test/testsupport/fileutils.h"
+#include "test/testsupport/frame_writer.h"
+#include "test/testsupport/test_artifacts.h"
+#include "test/vcm_capturer.h"
+#include "test/video_renderer.h"
+#include "voice_engine/include/voe_base.h"
+
+#include "test/rtp_file_writer.h"
+
+DEFINE_bool(save_worst_frame,
+ false,
+ "Enable saving a frame with the lowest PSNR to a jpeg file in the "
+ "test_artifacts_dir");
+
+namespace {
+
+constexpr int kSendStatsPollingIntervalMs = 1000;
+
+constexpr size_t kMaxComparisons = 10;
+constexpr char kSyncGroup[] = "av_sync";
+constexpr int kOpusMinBitrateBps = 6000;
+constexpr int kOpusBitrateFbBps = 32000;
+constexpr int kFramesSentInQuickTest = 1;
+constexpr uint32_t kThumbnailSendSsrcStart = 0xE0000;
+constexpr uint32_t kThumbnailRtxSsrcStart = 0xF0000;
+
+constexpr int kDefaultMaxQp = cricket::WebRtcVideoChannel::kDefaultQpMax;
+
+struct VoiceEngineState {
+ VoiceEngineState()
+ : voice_engine(nullptr),
+ base(nullptr),
+ send_channel_id(-1),
+ receive_channel_id(-1) {}
+
+ webrtc::VoiceEngine* voice_engine;
+ webrtc::VoEBase* base;
+ int send_channel_id;
+ int receive_channel_id;
+};
+
+void CreateVoiceEngine(
+ VoiceEngineState* voe,
+ webrtc::AudioDeviceModule* adm,
+ webrtc::AudioProcessing* apm,
+ rtc::scoped_refptr<webrtc::AudioDecoderFactory> decoder_factory) {
+ voe->voice_engine = webrtc::VoiceEngine::Create();
+ voe->base = webrtc::VoEBase::GetInterface(voe->voice_engine);
+ EXPECT_EQ(0, adm->Init());
+ EXPECT_EQ(0, voe->base->Init(adm, apm, decoder_factory));
+ webrtc::VoEBase::ChannelConfig config;
+ config.enable_voice_pacing = true;
+ voe->send_channel_id = voe->base->CreateChannel(config);
+ EXPECT_GE(voe->send_channel_id, 0);
+ voe->receive_channel_id = voe->base->CreateChannel();
+ EXPECT_GE(voe->receive_channel_id, 0);
+}
+
+void DestroyVoiceEngine(VoiceEngineState* voe) {
+ voe->base->DeleteChannel(voe->send_channel_id);
+ voe->send_channel_id = -1;
+ voe->base->DeleteChannel(voe->receive_channel_id);
+ voe->receive_channel_id = -1;
+ voe->base->Release();
+ voe->base = nullptr;
+
+ webrtc::VoiceEngine::Delete(voe->voice_engine);
+ voe->voice_engine = nullptr;
+}
+
+class VideoStreamFactory
+ : public webrtc::VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ explicit VideoStreamFactory(const std::vector<webrtc::VideoStream>& streams)
+ : streams_(streams) {}
+
+ private:
+ std::vector<webrtc::VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const webrtc::VideoEncoderConfig& encoder_config) override {
+ // The highest layer must match the incoming resolution.
+ std::vector<webrtc::VideoStream> streams = streams_;
+ streams[streams_.size() - 1].height = height;
+ streams[streams_.size() - 1].width = width;
+ return streams;
+ }
+
+ std::vector<webrtc::VideoStream> streams_;
+};
+
+bool IsFlexfec(int payload_type) {
+ return payload_type == webrtc::VideoQualityTest::kFlexfecPayloadType;
+}
+
+} // namespace
+
+namespace webrtc {
+
+class VideoAnalyzer : public PacketReceiver,
+ public Transport,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ VideoAnalyzer(test::LayerFilteringTransport* transport,
+ const std::string& test_label,
+ double avg_psnr_threshold,
+ double avg_ssim_threshold,
+ int duration_frames,
+ FILE* graph_data_output_file,
+ const std::string& graph_title,
+ uint32_t ssrc_to_analyze,
+ uint32_t rtx_ssrc_to_analyze,
+ size_t selected_stream,
+ int selected_sl,
+ int selected_tl,
+ bool is_quick_test_enabled,
+ Clock* clock,
+ std::string rtp_dump_name)
+ : transport_(transport),
+ receiver_(nullptr),
+ call_(nullptr),
+ send_stream_(nullptr),
+ receive_stream_(nullptr),
+ captured_frame_forwarder_(this, clock),
+ test_label_(test_label),
+ graph_data_output_file_(graph_data_output_file),
+ graph_title_(graph_title),
+ ssrc_to_analyze_(ssrc_to_analyze),
+ rtx_ssrc_to_analyze_(rtx_ssrc_to_analyze),
+ selected_stream_(selected_stream),
+ selected_sl_(selected_sl),
+ selected_tl_(selected_tl),
+ pre_encode_proxy_(this),
+ encode_timing_proxy_(this),
+ last_fec_bytes_(0),
+ frames_to_process_(duration_frames),
+ frames_recorded_(0),
+ frames_processed_(0),
+ dropped_frames_(0),
+ dropped_frames_before_first_encode_(0),
+ dropped_frames_before_rendering_(0),
+ last_render_time_(0),
+ rtp_timestamp_delta_(0),
+ total_media_bytes_(0),
+ first_sending_time_(0),
+ last_sending_time_(0),
+ cpu_time_(0),
+ wallclock_time_(0),
+ avg_psnr_threshold_(avg_psnr_threshold),
+ avg_ssim_threshold_(avg_ssim_threshold),
+ is_quick_test_enabled_(is_quick_test_enabled),
+ stats_polling_thread_(&PollStatsThread, this, "StatsPoller"),
+ comparison_available_event_(false, false),
+ done_(true, false),
+ clock_(clock),
+ start_ms_(clock->TimeInMilliseconds()) {
+ // Create thread pool for CPU-expensive PSNR/SSIM calculations.
+
+ // Try to use about as many threads as cores, but leave kMinCoresLeft alone,
+ // so that we don't accidentally starve "real" worker threads (codec etc).
+ // Also, don't allocate more than kMaxComparisonThreads, even if there are
+ // spare cores.
+
+ uint32_t num_cores = CpuInfo::DetectNumberOfCores();
+ RTC_DCHECK_GE(num_cores, 1);
+ static const uint32_t kMinCoresLeft = 4;
+ static const uint32_t kMaxComparisonThreads = 8;
+
+ if (num_cores <= kMinCoresLeft) {
+ num_cores = 1;
+ } else {
+ num_cores -= kMinCoresLeft;
+ num_cores = std::min(num_cores, kMaxComparisonThreads);
+ }
+
+ for (uint32_t i = 0; i < num_cores; ++i) {
+ rtc::PlatformThread* thread =
+ new rtc::PlatformThread(&FrameComparisonThread, this, "Analyzer");
+ thread->Start();
+ comparison_thread_pool_.push_back(thread);
+ }
+
+ if (!rtp_dump_name.empty()) {
+ fprintf(stdout, "Writing rtp dump to %s\n", rtp_dump_name.c_str());
+ rtp_file_writer_.reset(test::RtpFileWriter::Create(
+ test::RtpFileWriter::kRtpDump, rtp_dump_name));
+ }
+ }
+
+ ~VideoAnalyzer() {
+ for (rtc::PlatformThread* thread : comparison_thread_pool_) {
+ thread->Stop();
+ delete thread;
+ }
+ }
+
+ virtual void SetReceiver(PacketReceiver* receiver) { receiver_ = receiver; }
+
+ void SetSource(test::VideoCapturer* video_capturer, bool respect_sink_wants) {
+ if (respect_sink_wants)
+ captured_frame_forwarder_.SetSource(video_capturer);
+ rtc::VideoSinkWants wants;
+ video_capturer->AddOrUpdateSink(InputInterface(), wants);
+ }
+
+ void SetCall(Call* call) {
+ rtc::CritScope lock(&crit_);
+ RTC_DCHECK(!call_);
+ call_ = call;
+ }
+
+ void SetSendStream(VideoSendStream* stream) {
+ rtc::CritScope lock(&crit_);
+ RTC_DCHECK(!send_stream_);
+ send_stream_ = stream;
+ }
+
+ void SetReceiveStream(VideoReceiveStream* stream) {
+ rtc::CritScope lock(&crit_);
+ RTC_DCHECK(!receive_stream_);
+ receive_stream_ = stream;
+ }
+
+ rtc::VideoSinkInterface<VideoFrame>* InputInterface() {
+ return &captured_frame_forwarder_;
+ }
+ rtc::VideoSourceInterface<VideoFrame>* OutputInterface() {
+ return &captured_frame_forwarder_;
+ }
+
+ DeliveryStatus DeliverPacket(MediaType media_type,
+ const uint8_t* packet,
+ size_t length,
+ const PacketTime& packet_time) override {
+ // Ignore timestamps of RTCP packets. They're not synchronized with
+ // RTP packet timestamps and so they would confuse wrap_handler_.
+ if (RtpHeaderParser::IsRtcp(packet, length)) {
+ return receiver_->DeliverPacket(media_type, packet, length, packet_time);
+ }
+
+ if (rtp_file_writer_) {
+ test::RtpPacket p;
+ memcpy(p.data, packet, length);
+ p.length = length;
+ p.original_length = length;
+ p.time_ms = clock_->TimeInMilliseconds() - start_ms_;
+ rtp_file_writer_->WritePacket(&p);
+ }
+
+ RtpUtility::RtpHeaderParser parser(packet, length);
+ RTPHeader header;
+ parser.Parse(&header);
+ if (!IsFlexfec(header.payloadType) &&
+ (header.ssrc == ssrc_to_analyze_ ||
+ header.ssrc == rtx_ssrc_to_analyze_)) {
+ // Ignore FlexFEC timestamps, to avoid collisions with media timestamps.
+ // (FlexFEC and media are sent on different SSRCs, which have different
+ // timestamps spaces.)
+ // Also ignore packets from wrong SSRC, but include retransmits.
+ rtc::CritScope lock(&crit_);
+ int64_t timestamp =
+ wrap_handler_.Unwrap(header.timestamp - rtp_timestamp_delta_);
+ recv_times_[timestamp] =
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds();
+ }
+
+ return receiver_->DeliverPacket(media_type, packet, length, packet_time);
+ }
+
+ void MeasuredEncodeTiming(int64_t ntp_time_ms, int encode_time_ms) {
+ rtc::CritScope crit(&comparison_lock_);
+ samples_encode_time_ms_[ntp_time_ms] = encode_time_ms;
+ }
+
+ void PreEncodeOnFrame(const VideoFrame& video_frame) {
+ rtc::CritScope lock(&crit_);
+ if (!first_encoded_timestamp_) {
+ while (frames_.front().timestamp() != video_frame.timestamp()) {
+ ++dropped_frames_before_first_encode_;
+ frames_.pop_front();
+ RTC_CHECK(!frames_.empty());
+ }
+ first_encoded_timestamp_ =
+ rtc::Optional<uint32_t>(video_frame.timestamp());
+ }
+ }
+
+ void PostEncodeFrameCallback(const EncodedFrame& encoded_frame) {
+ rtc::CritScope lock(&crit_);
+ if (!first_sent_timestamp_ &&
+ encoded_frame.stream_id_ == selected_stream_) {
+ first_sent_timestamp_ = rtc::Optional<uint32_t>(encoded_frame.timestamp_);
+ }
+ }
+
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) override {
+ RtpUtility::RtpHeaderParser parser(packet, length);
+ RTPHeader header;
+ parser.Parse(&header);
+
+ int64_t current_time =
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds();
+
+ bool result = transport_->SendRtp(packet, length, options);
+ {
+ rtc::CritScope lock(&crit_);
+ if (rtp_timestamp_delta_ == 0 && header.ssrc == ssrc_to_analyze_) {
+ RTC_CHECK(static_cast<bool>(first_sent_timestamp_));
+ rtp_timestamp_delta_ = header.timestamp - *first_sent_timestamp_;
+ }
+
+ if (!IsFlexfec(header.payloadType) && header.ssrc == ssrc_to_analyze_) {
+ // Ignore FlexFEC timestamps, to avoid collisions with media timestamps.
+ // (FlexFEC and media are sent on different SSRCs, which have different
+ // timestamps spaces.)
+ // Also ignore packets from wrong SSRC and retransmits.
+ int64_t timestamp =
+ wrap_handler_.Unwrap(header.timestamp - rtp_timestamp_delta_);
+ send_times_[timestamp] = current_time;
+
+ if (IsInSelectedSpatialAndTemporalLayer(packet, length, header)) {
+ encoded_frame_sizes_[timestamp] +=
+ length - (header.headerLength + header.paddingLength);
+ total_media_bytes_ +=
+ length - (header.headerLength + header.paddingLength);
+ }
+ if (first_sending_time_ == 0)
+ first_sending_time_ = current_time;
+ last_sending_time_ = current_time;
+ }
+ }
+ return result;
+ }
+
+ bool SendRtcp(const uint8_t* packet, size_t length) override {
+ return transport_->SendRtcp(packet, length);
+ }
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ int64_t render_time_ms =
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds();
+
+ rtc::CritScope lock(&crit_);
+
+ StartExcludingCpuThreadTime();
+
+ int64_t send_timestamp =
+ wrap_handler_.Unwrap(video_frame.timestamp() - rtp_timestamp_delta_);
+
+ while (wrap_handler_.Unwrap(frames_.front().timestamp()) < send_timestamp) {
+ if (!last_rendered_frame_) {
+ // No previous frame rendered, this one was dropped after sending but
+ // before rendering.
+ ++dropped_frames_before_rendering_;
+ } else {
+ AddFrameComparison(frames_.front(), *last_rendered_frame_, true,
+ render_time_ms);
+ }
+ frames_.pop_front();
+ RTC_DCHECK(!frames_.empty());
+ }
+
+ VideoFrame reference_frame = frames_.front();
+ frames_.pop_front();
+ int64_t reference_timestamp =
+ wrap_handler_.Unwrap(reference_frame.timestamp());
+ if (send_timestamp == reference_timestamp - 1) {
+ // TODO(ivica): Make this work for > 2 streams.
+ // Look at RTPSender::BuildRTPHeader.
+ ++send_timestamp;
+ }
+ ASSERT_EQ(reference_timestamp, send_timestamp);
+
+ AddFrameComparison(reference_frame, video_frame, false, render_time_ms);
+
+ last_rendered_frame_ = rtc::Optional<VideoFrame>(video_frame);
+
+ StopExcludingCpuThreadTime();
+ }
+
+ void Wait() {
+ // Frame comparisons can be very expensive. Wait for test to be done, but
+ // at time-out check if frames_processed is going up. If so, give it more
+ // time, otherwise fail. Hopefully this will reduce test flakiness.
+
+ stats_polling_thread_.Start();
+
+ int last_frames_processed = -1;
+ int iteration = 0;
+ while (!done_.Wait(VideoQualityTest::kDefaultTimeoutMs)) {
+ int frames_processed;
+ {
+ rtc::CritScope crit(&comparison_lock_);
+ frames_processed = frames_processed_;
+ }
+
+ // Print some output so test infrastructure won't think we've crashed.
+ const char* kKeepAliveMessages[3] = {
+ "Uh, I'm-I'm not quite dead, sir.",
+ "Uh, I-I think uh, I could pull through, sir.",
+ "Actually, I think I'm all right to come with you--"};
+ printf("- %s\n", kKeepAliveMessages[iteration++ % 3]);
+
+ if (last_frames_processed == -1) {
+ last_frames_processed = frames_processed;
+ continue;
+ }
+ if (frames_processed == last_frames_processed) {
+ EXPECT_GT(frames_processed, last_frames_processed)
+ << "Analyzer stalled while waiting for test to finish.";
+ done_.Set();
+ break;
+ }
+ last_frames_processed = frames_processed;
+ }
+
+ if (iteration > 0)
+ printf("- Farewell, sweet Concorde!\n");
+
+ stats_polling_thread_.Stop();
+ }
+
+ rtc::VideoSinkInterface<VideoFrame>* pre_encode_proxy() {
+ return &pre_encode_proxy_;
+ }
+ EncodedFrameObserver* encode_timing_proxy() { return &encode_timing_proxy_; }
+
+ void StartMeasuringCpuProcessTime() {
+ rtc::CritScope lock(&cpu_measurement_lock_);
+ cpu_time_ -= rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ -= rtc::SystemTimeNanos();
+ }
+
+ void StopMeasuringCpuProcessTime() {
+ rtc::CritScope lock(&cpu_measurement_lock_);
+ cpu_time_ += rtc::GetProcessCpuTimeNanos();
+ wallclock_time_ += rtc::SystemTimeNanos();
+ }
+
+ void StartExcludingCpuThreadTime() {
+ rtc::CritScope lock(&cpu_measurement_lock_);
+ cpu_time_ += rtc::GetThreadCpuTimeNanos();
+ }
+
+ void StopExcludingCpuThreadTime() {
+ rtc::CritScope lock(&cpu_measurement_lock_);
+ cpu_time_ -= rtc::GetThreadCpuTimeNanos();
+ }
+
+ double GetCpuUsagePercent() {
+ rtc::CritScope lock(&cpu_measurement_lock_);
+ return static_cast<double>(cpu_time_) / wallclock_time_ * 100.0;
+ }
+
+ test::LayerFilteringTransport* const transport_;
+ PacketReceiver* receiver_;
+
+ private:
+ struct FrameComparison {
+ FrameComparison()
+ : dropped(false),
+ input_time_ms(0),
+ send_time_ms(0),
+ recv_time_ms(0),
+ render_time_ms(0),
+ encoded_frame_size(0) {}
+
+ FrameComparison(const VideoFrame& reference,
+ const VideoFrame& render,
+ bool dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size)
+ : reference(reference),
+ render(render),
+ dropped(dropped),
+ input_time_ms(input_time_ms),
+ send_time_ms(send_time_ms),
+ recv_time_ms(recv_time_ms),
+ render_time_ms(render_time_ms),
+ encoded_frame_size(encoded_frame_size) {}
+
+ FrameComparison(bool dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size)
+ : dropped(dropped),
+ input_time_ms(input_time_ms),
+ send_time_ms(send_time_ms),
+ recv_time_ms(recv_time_ms),
+ render_time_ms(render_time_ms),
+ encoded_frame_size(encoded_frame_size) {}
+
+ rtc::Optional<VideoFrame> reference;
+ rtc::Optional<VideoFrame> render;
+ bool dropped;
+ int64_t input_time_ms;
+ int64_t send_time_ms;
+ int64_t recv_time_ms;
+ int64_t render_time_ms;
+ size_t encoded_frame_size;
+ };
+
+ struct Sample {
+ Sample(int dropped,
+ int64_t input_time_ms,
+ int64_t send_time_ms,
+ int64_t recv_time_ms,
+ int64_t render_time_ms,
+ size_t encoded_frame_size,
+ double psnr,
+ double ssim)
+ : dropped(dropped),
+ input_time_ms(input_time_ms),
+ send_time_ms(send_time_ms),
+ recv_time_ms(recv_time_ms),
+ render_time_ms(render_time_ms),
+ encoded_frame_size(encoded_frame_size),
+ psnr(psnr),
+ ssim(ssim) {}
+
+ int dropped;
+ int64_t input_time_ms;
+ int64_t send_time_ms;
+ int64_t recv_time_ms;
+ int64_t render_time_ms;
+ size_t encoded_frame_size;
+ double psnr;
+ double ssim;
+ };
+
+ // This class receives the send-side OnEncodeTiming and is provided to not
+ // conflict with the receiver-side pre_decode_callback.
+ class OnEncodeTimingProxy : public EncodedFrameObserver {
+ public:
+ explicit OnEncodeTimingProxy(VideoAnalyzer* parent) : parent_(parent) {}
+
+ void OnEncodeTiming(int64_t ntp_time_ms, int encode_time_ms) override {
+ parent_->MeasuredEncodeTiming(ntp_time_ms, encode_time_ms);
+ }
+ void EncodedFrameCallback(const EncodedFrame& frame) override {
+ parent_->PostEncodeFrameCallback(frame);
+ }
+
+ private:
+ VideoAnalyzer* const parent_;
+ };
+
+ // This class receives the send-side OnFrame callback and is provided to not
+ // conflict with the receiver-side renderer callback.
+ class PreEncodeProxy : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ explicit PreEncodeProxy(VideoAnalyzer* parent) : parent_(parent) {}
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ parent_->PreEncodeOnFrame(video_frame);
+ }
+
+ private:
+ VideoAnalyzer* const parent_;
+ };
+
+ bool IsInSelectedSpatialAndTemporalLayer(const uint8_t* packet,
+ size_t length,
+ const RTPHeader& header) {
+ if (header.payloadType != test::CallTest::kPayloadTypeVP9 &&
+ header.payloadType != test::CallTest::kPayloadTypeVP8) {
+ return true;
+ } else {
+ // Get VP8 and VP9 specific header to check layers indexes.
+ const uint8_t* payload = packet + header.headerLength;
+ const size_t payload_length = length - header.headerLength;
+ const size_t payload_data_length = payload_length - header.paddingLength;
+ const bool is_vp8 = header.payloadType == test::CallTest::kPayloadTypeVP8;
+ std::unique_ptr<RtpDepacketizer> depacketizer(
+ RtpDepacketizer::Create(is_vp8 ? kRtpVideoVp8 : kRtpVideoVp9));
+ RtpDepacketizer::ParsedPayload parsed_payload;
+ bool result =
+ depacketizer->Parse(&parsed_payload, payload, payload_data_length);
+ RTC_DCHECK(result);
+ const int temporal_idx = static_cast<int>(
+ is_vp8 ? parsed_payload.type.Video.codecHeader.VP8.temporalIdx
+ : parsed_payload.type.Video.codecHeader.VP9.temporal_idx);
+ const int spatial_idx = static_cast<int>(
+ is_vp8 ? kNoSpatialIdx
+ : parsed_payload.type.Video.codecHeader.VP9.spatial_idx);
+ return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
+ temporal_idx <= selected_tl_) &&
+ (selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||
+ spatial_idx <= selected_sl_);
+ }
+ }
+
+ void AddFrameComparison(const VideoFrame& reference,
+ const VideoFrame& render,
+ bool dropped,
+ int64_t render_time_ms)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) {
+ int64_t reference_timestamp = wrap_handler_.Unwrap(reference.timestamp());
+ int64_t send_time_ms = send_times_[reference_timestamp];
+ send_times_.erase(reference_timestamp);
+ int64_t recv_time_ms = recv_times_[reference_timestamp];
+ recv_times_.erase(reference_timestamp);
+
+ // TODO(ivica): Make this work for > 2 streams.
+ auto it = encoded_frame_sizes_.find(reference_timestamp);
+ if (it == encoded_frame_sizes_.end())
+ it = encoded_frame_sizes_.find(reference_timestamp - 1);
+ size_t encoded_size = it == encoded_frame_sizes_.end() ? 0 : it->second;
+ if (it != encoded_frame_sizes_.end())
+ encoded_frame_sizes_.erase(it);
+
+ rtc::CritScope crit(&comparison_lock_);
+ if (comparisons_.size() < kMaxComparisons) {
+ comparisons_.push_back(FrameComparison(reference, render, dropped,
+ reference.ntp_time_ms(),
+ send_time_ms, recv_time_ms,
+ render_time_ms, encoded_size));
+ } else {
+ comparisons_.push_back(FrameComparison(dropped,
+ reference.ntp_time_ms(),
+ send_time_ms, recv_time_ms,
+ render_time_ms, encoded_size));
+ }
+ comparison_available_event_.Set();
+ }
+
+ static void PollStatsThread(void* obj) {
+ static_cast<VideoAnalyzer*>(obj)->PollStats();
+ }
+
+ void PollStats() {
+ while (!done_.Wait(kSendStatsPollingIntervalMs)) {
+ rtc::CritScope crit(&comparison_lock_);
+
+ Call::Stats call_stats = call_->GetStats();
+ send_bandwidth_bps_.AddSample(call_stats.send_bandwidth_bps);
+
+ VideoSendStream::Stats send_stats = send_stream_->GetStats();
+ // It's not certain that we yet have estimates for any of these stats.
+ // Check that they are positive before mixing them in.
+ if (send_stats.encode_frame_rate > 0)
+ encode_frame_rate_.AddSample(send_stats.encode_frame_rate);
+ if (send_stats.avg_encode_time_ms > 0)
+ encode_time_ms_.AddSample(send_stats.avg_encode_time_ms);
+ if (send_stats.encode_usage_percent > 0)
+ encode_usage_percent_.AddSample(send_stats.encode_usage_percent);
+ if (send_stats.media_bitrate_bps > 0)
+ media_bitrate_bps_.AddSample(send_stats.media_bitrate_bps);
+ size_t fec_bytes = 0;
+ for (auto kv : send_stats.substreams) {
+ fec_bytes += kv.second.rtp_stats.fec.payload_bytes +
+ kv.second.rtp_stats.fec.padding_bytes;
+ }
+ fec_bitrate_bps_.AddSample((fec_bytes - last_fec_bytes_) * 8);
+ last_fec_bytes_ = fec_bytes;
+
+ if (receive_stream_ != nullptr) {
+ VideoReceiveStream::Stats receive_stats = receive_stream_->GetStats();
+ if (receive_stats.decode_ms > 0)
+ decode_time_ms_.AddSample(receive_stats.decode_ms);
+ if (receive_stats.max_decode_ms > 0)
+ decode_time_max_ms_.AddSample(receive_stats.max_decode_ms);
+ }
+
+ memory_usage_.AddSample(rtc::GetProcessResidentSizeBytes());
+ }
+ }
+
+ static bool FrameComparisonThread(void* obj) {
+ return static_cast<VideoAnalyzer*>(obj)->CompareFrames();
+ }
+
+ bool CompareFrames() {
+ if (AllFramesRecorded())
+ return false;
+
+ FrameComparison comparison;
+
+ if (!PopComparison(&comparison)) {
+ // Wait until new comparison task is available, or test is done.
+ // If done, wake up remaining threads waiting.
+ comparison_available_event_.Wait(1000);
+ if (AllFramesRecorded()) {
+ comparison_available_event_.Set();
+ return false;
+ }
+ return true; // Try again.
+ }
+
+ StartExcludingCpuThreadTime();
+
+ PerformFrameComparison(comparison);
+
+ StopExcludingCpuThreadTime();
+
+ if (FrameProcessed()) {
+ PrintResults();
+ if (graph_data_output_file_)
+ PrintSamplesToFile();
+ done_.Set();
+ comparison_available_event_.Set();
+ return false;
+ }
+
+ return true;
+ }
+
+ bool PopComparison(FrameComparison* comparison) {
+ rtc::CritScope crit(&comparison_lock_);
+ // If AllFramesRecorded() is true, it means we have already popped
+ // frames_to_process_ frames from comparisons_, so there is no more work
+ // for this thread to be done. frames_processed_ might still be lower if
+ // all comparisons are not done, but those frames are currently being
+ // worked on by other threads.
+ if (comparisons_.empty() || AllFramesRecorded())
+ return false;
+
+ *comparison = comparisons_.front();
+ comparisons_.pop_front();
+
+ FrameRecorded();
+ return true;
+ }
+
+ // Increment counter for number of frames received for comparison.
+ void FrameRecorded() {
+ rtc::CritScope crit(&comparison_lock_);
+ ++frames_recorded_;
+ }
+
+ // Returns true if all frames to be compared have been taken from the queue.
+ bool AllFramesRecorded() {
+ rtc::CritScope crit(&comparison_lock_);
+ assert(frames_recorded_ <= frames_to_process_);
+ return frames_recorded_ == frames_to_process_;
+ }
+
+ // Increase count of number of frames processed. Returns true if this was the
+ // last frame to be processed.
+ bool FrameProcessed() {
+ rtc::CritScope crit(&comparison_lock_);
+ ++frames_processed_;
+ assert(frames_processed_ <= frames_to_process_);
+ return frames_processed_ == frames_to_process_;
+ }
+
+ void PrintResults() {
+ StopMeasuringCpuProcessTime();
+ rtc::CritScope crit(&comparison_lock_);
+ PrintResult("psnr", psnr_, " dB");
+ PrintResult("ssim", ssim_, " score");
+ PrintResult("sender_time", sender_time_, " ms");
+ PrintResult("receiver_time", receiver_time_, " ms");
+ PrintResult("total_delay_incl_network", end_to_end_, " ms");
+ PrintResult("time_between_rendered_frames", rendered_delta_, " ms");
+ PrintResult("encode_frame_rate", encode_frame_rate_, " fps");
+ PrintResult("encode_time", encode_time_ms_, " ms");
+ PrintResult("media_bitrate", media_bitrate_bps_, " bps");
+ PrintResult("fec_bitrate", fec_bitrate_bps_, " bps");
+ PrintResult("send_bandwidth", send_bandwidth_bps_, " bps");
+
+ if (worst_frame_) {
+ printf("RESULT min_psnr: %s = %lf dB\n", test_label_.c_str(),
+ worst_frame_->psnr);
+ }
+
+ if (receive_stream_ != nullptr) {
+ PrintResult("decode_time", decode_time_ms_, " ms");
+ }
+
+ printf("RESULT dropped_frames: %s = %d frames\n", test_label_.c_str(),
+ dropped_frames_);
+ printf("RESULT cpu_usage: %s = %lf %%\n", test_label_.c_str(),
+ GetCpuUsagePercent());
+
+#if defined(WEBRTC_WIN)
+ // On Linux and Mac in Resident Set some unused pages may be counted.
+ // Therefore this metric will depend on order in which tests are run and
+ // will be flaky.
+ PrintResult("memory_usage", memory_usage_, " bytes");
+#endif
+
+ // Saving only the worst frame for manual analysis. Intention here is to
+ // only detect video corruptions and not to track picture quality. Thus,
+ // jpeg is used here.
+ if (FLAG_save_worst_frame && worst_frame_) {
+ std::string output_dir;
+ test::GetTestArtifactsDir(&output_dir);
+ std::string output_path =
+ rtc::Pathname(output_dir, test_label_ + ".jpg").pathname();
+ RTC_LOG(LS_INFO) << "Saving worst frame to " << output_path;
+ test::JpegFrameWriter frame_writer(output_path);
+ RTC_CHECK(frame_writer.WriteFrame(worst_frame_->frame,
+ 100 /*best quality*/));
+ }
+
+ // Disable quality check for quick test, as quality checks may fail
+ // because too few samples were collected.
+ if (!is_quick_test_enabled_) {
+ EXPECT_GT(psnr_.Mean(), avg_psnr_threshold_);
+ EXPECT_GT(ssim_.Mean(), avg_ssim_threshold_);
+ }
+ }
+
+ void PerformFrameComparison(const FrameComparison& comparison) {
+ // Perform expensive psnr and ssim calculations while not holding lock.
+ double psnr = -1.0;
+ double ssim = -1.0;
+ if (comparison.reference && !comparison.dropped) {
+ psnr = I420PSNR(&*comparison.reference, &*comparison.render);
+ ssim = I420SSIM(&*comparison.reference, &*comparison.render);
+ }
+
+ rtc::CritScope crit(&comparison_lock_);
+
+ if (psnr >= 0.0 && (!worst_frame_ || worst_frame_->psnr > psnr)) {
+ worst_frame_.emplace(FrameWithPsnr{psnr, *comparison.render});
+ }
+
+ if (graph_data_output_file_) {
+ samples_.push_back(Sample(
+ comparison.dropped, comparison.input_time_ms, comparison.send_time_ms,
+ comparison.recv_time_ms, comparison.render_time_ms,
+ comparison.encoded_frame_size, psnr, ssim));
+ }
+ if (psnr >= 0.0)
+ psnr_.AddSample(psnr);
+ if (ssim >= 0.0)
+ ssim_.AddSample(ssim);
+
+ if (comparison.dropped) {
+ ++dropped_frames_;
+ return;
+ }
+ if (last_render_time_ != 0)
+ rendered_delta_.AddSample(comparison.render_time_ms - last_render_time_);
+ last_render_time_ = comparison.render_time_ms;
+
+ sender_time_.AddSample(comparison.send_time_ms - comparison.input_time_ms);
+ if (comparison.recv_time_ms > 0) {
+ // If recv_time_ms == 0, this frame consisted of a packets which were all
+ // lost in the transport. Since we were able to render the frame, however,
+ // the dropped packets were recovered by FlexFEC. The FlexFEC recovery
+ // happens internally in Call, and we can therefore here not know which
+ // FEC packets that protected the lost media packets. Consequently, we
+ // were not able to record a meaningful recv_time_ms. We therefore skip
+ // this sample.
+ //
+ // The reasoning above does not hold for ULPFEC and RTX, as for those
+ // strategies the timestamp of the received packets is set to the
+ // timestamp of the protected/retransmitted media packet. I.e., then
+ // recv_time_ms != 0, even though the media packets were lost.
+ receiver_time_.AddSample(comparison.render_time_ms -
+ comparison.recv_time_ms);
+ }
+ end_to_end_.AddSample(comparison.render_time_ms - comparison.input_time_ms);
+ encoded_frame_size_.AddSample(comparison.encoded_frame_size);
+ }
+
+ void PrintResult(const char* result_type,
+ test::Statistics stats,
+ const char* unit) {
+ printf("RESULT %s: %s = {%f, %f}%s\n",
+ result_type,
+ test_label_.c_str(),
+ stats.Mean(),
+ stats.StandardDeviation(),
+ unit);
+ }
+
+ void PrintSamplesToFile(void) {
+ FILE* out = graph_data_output_file_;
+ rtc::CritScope crit(&comparison_lock_);
+ std::sort(samples_.begin(), samples_.end(),
+ [](const Sample& A, const Sample& B) -> bool {
+ return A.input_time_ms < B.input_time_ms;
+ });
+
+ fprintf(out, "%s\n", graph_title_.c_str());
+ fprintf(out, "%" PRIuS "\n", samples_.size());
+ fprintf(out,
+ "dropped "
+ "input_time_ms "
+ "send_time_ms "
+ "recv_time_ms "
+ "render_time_ms "
+ "encoded_frame_size "
+ "psnr "
+ "ssim "
+ "encode_time_ms\n");
+ int missing_encode_time_samples = 0;
+ for (const Sample& sample : samples_) {
+ auto it = samples_encode_time_ms_.find(sample.input_time_ms);
+ int encode_time_ms;
+ if (it != samples_encode_time_ms_.end()) {
+ encode_time_ms = it->second;
+ } else {
+ ++missing_encode_time_samples;
+ encode_time_ms = -1;
+ }
+ fprintf(out, "%d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRIuS
+ " %lf %lf %d\n",
+ sample.dropped, sample.input_time_ms, sample.send_time_ms,
+ sample.recv_time_ms, sample.render_time_ms,
+ sample.encoded_frame_size, sample.psnr, sample.ssim,
+ encode_time_ms);
+ }
+ if (missing_encode_time_samples) {
+ fprintf(stderr,
+ "Warning: Missing encode_time_ms samples for %d frame(s).\n",
+ missing_encode_time_samples);
+ }
+ }
+
+ double GetAverageMediaBitrateBps() {
+ if (last_sending_time_ == first_sending_time_) {
+ return 0;
+ } else {
+ return static_cast<double>(total_media_bytes_) * 8 /
+ (last_sending_time_ - first_sending_time_) *
+ rtc::kNumMillisecsPerSec;
+ }
+ }
+
+ // Implements VideoSinkInterface to receive captured frames from a
+ // FrameGeneratorCapturer. Implements VideoSourceInterface to be able to act
+ // as a source to VideoSendStream.
+ // It forwards all input frames to the VideoAnalyzer for later comparison and
+ // forwards the captured frames to the VideoSendStream.
+ class CapturedFrameForwarder : public rtc::VideoSinkInterface<VideoFrame>,
+ public rtc::VideoSourceInterface<VideoFrame> {
+ public:
+ explicit CapturedFrameForwarder(VideoAnalyzer* analyzer, Clock* clock)
+ : analyzer_(analyzer),
+ send_stream_input_(nullptr),
+ video_capturer_(nullptr),
+ clock_(clock) {}
+
+ void SetSource(test::VideoCapturer* video_capturer) {
+ video_capturer_ = video_capturer;
+ }
+
+ private:
+ void OnFrame(const VideoFrame& video_frame) override {
+ VideoFrame copy = video_frame;
+ // Frames from the capturer does not have a rtp timestamp.
+ // Create one so it can be used for comparison.
+ RTC_DCHECK_EQ(0, video_frame.timestamp());
+ if (video_frame.ntp_time_ms() == 0)
+ copy.set_ntp_time_ms(clock_->CurrentNtpInMilliseconds());
+ copy.set_timestamp(copy.ntp_time_ms() * 90);
+ analyzer_->AddCapturedFrameForComparison(copy);
+ rtc::CritScope lock(&crit_);
+ if (send_stream_input_)
+ send_stream_input_->OnFrame(copy);
+ }
+
+ // Called when |send_stream_.SetSource()| is called.
+ void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ {
+ rtc::CritScope lock(&crit_);
+ RTC_DCHECK(!send_stream_input_ || send_stream_input_ == sink);
+ send_stream_input_ = sink;
+ }
+ if (video_capturer_) {
+ video_capturer_->AddOrUpdateSink(this, wants);
+ }
+ }
+
+ // Called by |send_stream_| when |send_stream_.SetSource()| is called.
+ void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink) override {
+ rtc::CritScope lock(&crit_);
+ RTC_DCHECK(sink == send_stream_input_);
+ send_stream_input_ = nullptr;
+ }
+
+ VideoAnalyzer* const analyzer_;
+ rtc::CriticalSection crit_;
+ rtc::VideoSinkInterface<VideoFrame>* send_stream_input_
+ RTC_GUARDED_BY(crit_);
+ test::VideoCapturer* video_capturer_;
+ Clock* clock_;
+ };
+
+ void AddCapturedFrameForComparison(const VideoFrame& video_frame) {
+ rtc::CritScope lock(&crit_);
+ frames_.push_back(video_frame);
+ }
+
+ Call* call_;
+ VideoSendStream* send_stream_;
+ VideoReceiveStream* receive_stream_;
+ CapturedFrameForwarder captured_frame_forwarder_;
+ const std::string test_label_;
+ FILE* const graph_data_output_file_;
+ const std::string graph_title_;
+ const uint32_t ssrc_to_analyze_;
+ const uint32_t rtx_ssrc_to_analyze_;
+ const size_t selected_stream_;
+ const int selected_sl_;
+ const int selected_tl_;
+ PreEncodeProxy pre_encode_proxy_;
+ OnEncodeTimingProxy encode_timing_proxy_;
+ std::vector<Sample> samples_ RTC_GUARDED_BY(comparison_lock_);
+ std::map<int64_t, int> samples_encode_time_ms_
+ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics sender_time_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics receiver_time_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics psnr_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics ssim_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics end_to_end_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics rendered_delta_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics encoded_frame_size_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics encode_frame_rate_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics encode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics encode_usage_percent_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics decode_time_ms_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics decode_time_max_ms_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics media_bitrate_bps_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics fec_bitrate_bps_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics send_bandwidth_bps_ RTC_GUARDED_BY(comparison_lock_);
+ test::Statistics memory_usage_ RTC_GUARDED_BY(comparison_lock_);
+
+ struct FrameWithPsnr {
+ double psnr;
+ VideoFrame frame;
+ };
+
+ // Rendered frame with worst PSNR is saved for further analysis.
+ rtc::Optional<FrameWithPsnr> worst_frame_ RTC_GUARDED_BY(comparison_lock_);
+
+ size_t last_fec_bytes_;
+
+ const int frames_to_process_;
+ int frames_recorded_;
+ int frames_processed_;
+ int dropped_frames_;
+ int dropped_frames_before_first_encode_;
+ int dropped_frames_before_rendering_;
+ int64_t last_render_time_;
+ uint32_t rtp_timestamp_delta_;
+ int64_t total_media_bytes_;
+ int64_t first_sending_time_;
+ int64_t last_sending_time_;
+
+ int64_t cpu_time_ RTC_GUARDED_BY(cpu_measurement_lock_);
+ int64_t wallclock_time_ RTC_GUARDED_BY(cpu_measurement_lock_);
+ rtc::CriticalSection cpu_measurement_lock_;
+
+ rtc::CriticalSection crit_;
+ std::deque<VideoFrame> frames_ RTC_GUARDED_BY(crit_);
+ rtc::Optional<VideoFrame> last_rendered_frame_ RTC_GUARDED_BY(crit_);
+ rtc::TimestampWrapAroundHandler wrap_handler_ RTC_GUARDED_BY(crit_);
+ std::map<int64_t, int64_t> send_times_ RTC_GUARDED_BY(crit_);
+ std::map<int64_t, int64_t> recv_times_ RTC_GUARDED_BY(crit_);
+ std::map<int64_t, size_t> encoded_frame_sizes_ RTC_GUARDED_BY(crit_);
+ rtc::Optional<uint32_t> first_encoded_timestamp_ RTC_GUARDED_BY(crit_);
+ rtc::Optional<uint32_t> first_sent_timestamp_ RTC_GUARDED_BY(crit_);
+ const double avg_psnr_threshold_;
+ const double avg_ssim_threshold_;
+ bool is_quick_test_enabled_;
+
+ rtc::CriticalSection comparison_lock_;
+ std::vector<rtc::PlatformThread*> comparison_thread_pool_;
+ rtc::PlatformThread stats_polling_thread_;
+ rtc::Event comparison_available_event_;
+ std::deque<FrameComparison> comparisons_ RTC_GUARDED_BY(comparison_lock_);
+ rtc::Event done_;
+
+ std::unique_ptr<test::RtpFileWriter> rtp_file_writer_;
+ Clock* const clock_;
+ const int64_t start_ms_;
+};
+
+VideoQualityTest::VideoQualityTest()
+ : clock_(Clock::GetRealTimeClock()), receive_logs_(0), send_logs_(0) {
+ payload_type_map_ = test::CallTest::payload_type_map_;
+ RTC_DCHECK(payload_type_map_.find(kPayloadTypeH264) ==
+ payload_type_map_.end());
+ RTC_DCHECK(payload_type_map_.find(kPayloadTypeVP8) ==
+ payload_type_map_.end());
+ RTC_DCHECK(payload_type_map_.find(kPayloadTypeVP9) ==
+ payload_type_map_.end());
+ payload_type_map_[kPayloadTypeH264] = webrtc::MediaType::VIDEO;
+ payload_type_map_[kPayloadTypeVP8] = webrtc::MediaType::VIDEO;
+ payload_type_map_[kPayloadTypeVP9] = webrtc::MediaType::VIDEO;
+}
+
+VideoQualityTest::Params::Params()
+ : call({false, Call::Config::BitrateConfig(), 0}),
+ video({false, 640, 480, 30, 50, 800, 800, false, "VP8", 1, -1, 0, false,
+ false, ""}),
+ audio({false, false, false}),
+ screenshare({false, false, 10, 0}),
+ analyzer({"", 0.0, 0.0, 0, "", ""}),
+ pipe(),
+ ss({std::vector<VideoStream>(), 0, 0, -1, std::vector<SpatialLayer>()}),
+ logging({false, "", "", ""}) {}
+
+VideoQualityTest::Params::~Params() = default;
+
+void VideoQualityTest::TestBody() {}
+
+std::string VideoQualityTest::GenerateGraphTitle() const {
+ std::stringstream ss;
+ ss << params_.video.codec;
+ ss << " (" << params_.video.target_bitrate_bps / 1000 << "kbps";
+ ss << ", " << params_.video.fps << " FPS";
+ if (params_.screenshare.scroll_duration)
+ ss << ", " << params_.screenshare.scroll_duration << "s scroll";
+ if (params_.ss.streams.size() > 1)
+ ss << ", Stream #" << params_.ss.selected_stream;
+ if (params_.ss.num_spatial_layers > 1)
+ ss << ", Layer #" << params_.ss.selected_sl;
+ ss << ")";
+ return ss.str();
+}
+
+void VideoQualityTest::CheckParams() {
+ if (!params_.video.enabled)
+ return;
+ // Add a default stream in none specified.
+ if (params_.ss.streams.empty())
+ params_.ss.streams.push_back(VideoQualityTest::DefaultVideoStream(params_));
+ if (params_.ss.num_spatial_layers == 0)
+ params_.ss.num_spatial_layers = 1;
+
+ if (params_.pipe.loss_percent != 0 ||
+ params_.pipe.queue_length_packets != 0) {
+ // Since LayerFilteringTransport changes the sequence numbers, we can't
+ // use that feature with pack loss, since the NACK request would end up
+ // retransmitting the wrong packets.
+ RTC_CHECK(params_.ss.selected_sl == -1 ||
+ params_.ss.selected_sl == params_.ss.num_spatial_layers - 1);
+ RTC_CHECK(params_.video.selected_tl == -1 ||
+ params_.video.selected_tl ==
+ params_.video.num_temporal_layers - 1);
+ }
+
+ // TODO(ivica): Should max_bitrate_bps == -1 represent inf max bitrate, as it
+ // does in some parts of the code?
+ RTC_CHECK_GE(params_.video.max_bitrate_bps, params_.video.target_bitrate_bps);
+ RTC_CHECK_GE(params_.video.target_bitrate_bps, params_.video.min_bitrate_bps);
+ RTC_CHECK_LT(params_.video.selected_tl, params_.video.num_temporal_layers);
+ RTC_CHECK_LE(params_.ss.selected_stream, params_.ss.streams.size());
+ for (const VideoStream& stream : params_.ss.streams) {
+ RTC_CHECK_GE(stream.min_bitrate_bps, 0);
+ RTC_CHECK_GE(stream.target_bitrate_bps, stream.min_bitrate_bps);
+ RTC_CHECK_GE(stream.max_bitrate_bps, stream.target_bitrate_bps);
+ }
+ // TODO(ivica): Should we check if the sum of all streams/layers is equal to
+ // the total bitrate? We anyway have to update them in the case bitrate
+ // estimator changes the total bitrates.
+ RTC_CHECK_GE(params_.ss.num_spatial_layers, 1);
+ RTC_CHECK_LE(params_.ss.selected_sl, params_.ss.num_spatial_layers);
+ RTC_CHECK(params_.ss.spatial_layers.empty() ||
+ params_.ss.spatial_layers.size() ==
+ static_cast<size_t>(params_.ss.num_spatial_layers));
+ if (params_.video.codec == "VP8") {
+ RTC_CHECK_EQ(params_.ss.num_spatial_layers, 1);
+ } else if (params_.video.codec == "VP9") {
+ RTC_CHECK_EQ(params_.ss.streams.size(), 1);
+ }
+ RTC_CHECK_GE(params_.call.num_thumbnails, 0);
+ if (params_.call.num_thumbnails > 0) {
+ RTC_CHECK_EQ(params_.ss.num_spatial_layers, 1);
+ RTC_CHECK_EQ(params_.ss.streams.size(), 3);
+ RTC_CHECK_EQ(params_.video.num_temporal_layers, 3);
+ RTC_CHECK_EQ(params_.video.codec, "VP8");
+ }
+}
+
+// Static.
+std::vector<int> VideoQualityTest::ParseCSV(const std::string& str) {
+ // Parse comma separated nonnegative integers, where some elements may be
+ // empty. The empty values are replaced with -1.
+ // E.g. "10,-20,,30,40" --> {10, 20, -1, 30,40}
+ // E.g. ",,10,,20," --> {-1, -1, 10, -1, 20, -1}
+ std::vector<int> result;
+ if (str.empty())
+ return result;
+
+ const char* p = str.c_str();
+ int value = -1;
+ int pos;
+ while (*p) {
+ if (*p == ',') {
+ result.push_back(value);
+ value = -1;
+ ++p;
+ continue;
+ }
+ RTC_CHECK_EQ(sscanf(p, "%d%n", &value, &pos), 1)
+ << "Unexpected non-number value.";
+ p += pos;
+ }
+ result.push_back(value);
+ return result;
+}
+
+// Static.
+VideoStream VideoQualityTest::DefaultVideoStream(const Params& params) {
+ VideoStream stream;
+ stream.width = params.video.width;
+ stream.height = params.video.height;
+ stream.max_framerate = params.video.fps;
+ stream.min_bitrate_bps = params.video.min_bitrate_bps;
+ stream.target_bitrate_bps = params.video.target_bitrate_bps;
+ stream.max_bitrate_bps = params.video.max_bitrate_bps;
+ stream.max_qp = kDefaultMaxQp;
+ // TODO(sprang): Can we make this less of a hack?
+ if (params.video.num_temporal_layers == 2) {
+ stream.temporal_layer_thresholds_bps.push_back(stream.target_bitrate_bps);
+ } else if (params.video.num_temporal_layers == 3) {
+ stream.temporal_layer_thresholds_bps.push_back(stream.max_bitrate_bps / 4);
+ stream.temporal_layer_thresholds_bps.push_back(stream.target_bitrate_bps);
+ } else {
+ RTC_CHECK_LE(params.video.num_temporal_layers, kMaxTemporalStreams);
+ for (int i = 0; i < params.video.num_temporal_layers - 1; ++i) {
+ stream.temporal_layer_thresholds_bps.push_back(static_cast<int>(
+ stream.max_bitrate_bps * kVp8LayerRateAlloction[0][i] + 0.5));
+ }
+ }
+ return stream;
+}
+
+// Static.
+VideoStream VideoQualityTest::DefaultThumbnailStream() {
+ VideoStream stream;
+ stream.width = 320;
+ stream.height = 180;
+ stream.max_framerate = 7;
+ stream.min_bitrate_bps = 7500;
+ stream.target_bitrate_bps = 37500;
+ stream.max_bitrate_bps = 50000;
+ stream.max_qp = kDefaultMaxQp;
+ return stream;
+}
+
+// Static.
+void VideoQualityTest::FillScalabilitySettings(
+ Params* params,
+ const std::vector<std::string>& stream_descriptors,
+ int num_streams,
+ size_t selected_stream,
+ int num_spatial_layers,
+ int selected_sl,
+ const std::vector<std::string>& sl_descriptors) {
+ if (params->ss.streams.empty() && params->ss.infer_streams) {
+ webrtc::VideoEncoderConfig encoder_config;
+ encoder_config.content_type =
+ params->screenshare.enabled
+ ? webrtc::VideoEncoderConfig::ContentType::kScreen
+ : webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo;
+ encoder_config.max_bitrate_bps = params->video.max_bitrate_bps;
+ encoder_config.min_transmit_bitrate_bps = params->video.min_transmit_bps;
+ encoder_config.number_of_streams = num_streams;
+ encoder_config.spatial_layers = params->ss.spatial_layers;
+ encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<cricket::EncoderStreamFactory>(
+ params->video.codec, kDefaultMaxQp, params->video.fps,
+ params->screenshare.enabled, true);
+ params->ss.streams =
+ encoder_config.video_stream_factory->CreateEncoderStreams(
+ static_cast<int>(params->video.width),
+ static_cast<int>(params->video.height), encoder_config);
+ } else {
+ // Read VideoStream and SpatialLayer elements from a list of comma separated
+ // lists. To use a default value for an element, use -1 or leave empty.
+ // Validity checks performed in CheckParams.
+ RTC_CHECK(params->ss.streams.empty());
+ for (auto descriptor : stream_descriptors) {
+ if (descriptor.empty())
+ continue;
+ VideoStream stream = VideoQualityTest::DefaultVideoStream(*params);
+ std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
+ if (v[0] != -1)
+ stream.width = static_cast<size_t>(v[0]);
+ if (v[1] != -1)
+ stream.height = static_cast<size_t>(v[1]);
+ if (v[2] != -1)
+ stream.max_framerate = v[2];
+ if (v[3] != -1)
+ stream.min_bitrate_bps = v[3];
+ if (v[4] != -1)
+ stream.target_bitrate_bps = v[4];
+ if (v[5] != -1)
+ stream.max_bitrate_bps = v[5];
+ if (v.size() > 6 && v[6] != -1)
+ stream.max_qp = v[6];
+ if (v.size() > 7) {
+ stream.temporal_layer_thresholds_bps.clear();
+ stream.temporal_layer_thresholds_bps.insert(
+ stream.temporal_layer_thresholds_bps.end(), v.begin() + 7, v.end());
+ } else {
+ // Automatic TL thresholds for more than two layers not supported.
+ RTC_CHECK_LE(params->video.num_temporal_layers, 2);
+ }
+ params->ss.streams.push_back(stream);
+ }
+ }
+
+ params->ss.num_spatial_layers = std::max(1, num_spatial_layers);
+ params->ss.selected_stream = selected_stream;
+
+ params->ss.selected_sl = selected_sl;
+ RTC_CHECK(params->ss.spatial_layers.empty());
+ for (auto descriptor : sl_descriptors) {
+ if (descriptor.empty())
+ continue;
+ std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
+ RTC_CHECK_GT(v[2], 0);
+
+ SpatialLayer layer;
+ layer.scaling_factor_num = v[0] == -1 ? 1 : v[0];
+ layer.scaling_factor_den = v[1] == -1 ? 1 : v[1];
+ layer.target_bitrate_bps = v[2];
+ params->ss.spatial_layers.push_back(layer);
+ }
+}
+
+void VideoQualityTest::SetupVideo(Transport* send_transport,
+ Transport* recv_transport) {
+ size_t num_video_streams = params_.ss.streams.size();
+ size_t num_flexfec_streams = params_.video.flexfec ? 1 : 0;
+ CreateSendConfig(num_video_streams, 0, num_flexfec_streams, send_transport);
+
+ int payload_type;
+ if (params_.video.codec == "H264") {
+ video_encoder_ = H264Encoder::Create(cricket::VideoCodec("H264"));
+ payload_type = kPayloadTypeH264;
+ } else if (params_.video.codec == "VP8") {
+ if (params_.screenshare.enabled && params_.ss.streams.size() > 1) {
+ // Simulcast screenshare needs a simulcast encoder adapter to work, since
+ // encoders usually can't natively do simulcast with different frame rates
+ // for the different layers.
+ video_encoder_.reset(
+ new SimulcastEncoderAdapter(new InternalEncoderFactory()));
+ } else {
+ video_encoder_ = VP8Encoder::Create();
+ }
+ payload_type = kPayloadTypeVP8;
+ } else if (params_.video.codec == "VP9") {
+ video_encoder_ = VP9Encoder::Create();
+ payload_type = kPayloadTypeVP9;
+ } else {
+ RTC_NOTREACHED() << "Codec not supported!";
+ return;
+ }
+ video_send_config_.encoder_settings.encoder = video_encoder_.get();
+ video_send_config_.encoder_settings.payload_name = params_.video.codec;
+ video_send_config_.encoder_settings.payload_type = payload_type;
+ video_send_config_.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_send_config_.rtp.rtx.payload_type = kSendRtxPayloadType;
+ for (size_t i = 0; i < num_video_streams; ++i)
+ video_send_config_.rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
+
+ video_send_config_.rtp.extensions.clear();
+ if (params_.call.send_side_bwe) {
+ video_send_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ test::kTransportSequenceNumberExtensionId));
+ } else {
+ video_send_config_.rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ }
+ video_send_config_.rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kVideoContentTypeUri, test::kVideoContentTypeExtensionId));
+ video_send_config_.rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kVideoTimingUri, test::kVideoTimingExtensionId));
+
+ video_encoder_config_.min_transmit_bitrate_bps =
+ params_.video.min_transmit_bps;
+
+ video_send_config_.suspend_below_min_bitrate =
+ params_.video.suspend_below_min_bitrate;
+
+ video_encoder_config_.number_of_streams = params_.ss.streams.size();
+ video_encoder_config_.max_bitrate_bps = 0;
+ for (size_t i = 0; i < params_.ss.streams.size(); ++i) {
+ video_encoder_config_.max_bitrate_bps +=
+ params_.ss.streams[i].max_bitrate_bps;
+ }
+ if (params_.ss.infer_streams) {
+ video_encoder_config_.video_stream_factory =
+ new rtc::RefCountedObject<cricket::EncoderStreamFactory>(
+ params_.video.codec, params_.ss.streams[0].max_qp,
+ params_.video.fps, params_.screenshare.enabled, true);
+ } else {
+ video_encoder_config_.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(params_.ss.streams);
+ }
+
+ video_encoder_config_.spatial_layers = params_.ss.spatial_layers;
+
+ CreateMatchingReceiveConfigs(recv_transport);
+
+ const bool decode_all_receive_streams =
+ params_.ss.selected_stream == params_.ss.streams.size();
+
+ for (size_t i = 0; i < num_video_streams; ++i) {
+ video_receive_configs_[i].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ video_receive_configs_[i].rtp.rtx_ssrc = kSendRtxSsrcs[i];
+ video_receive_configs_[i]
+ .rtp.rtx_associated_payload_types[kSendRtxPayloadType] = payload_type;
+ video_receive_configs_[i].rtp.transport_cc = params_.call.send_side_bwe;
+ video_receive_configs_[i].rtp.remb = !params_.call.send_side_bwe;
+ // Enable RTT calculation so NTP time estimator will work.
+ video_receive_configs_[i].rtp.rtcp_xr.receiver_reference_time_report = true;
+ // Force fake decoders on non-selected simulcast streams.
+ if (!decode_all_receive_streams && i != params_.ss.selected_stream) {
+ VideoReceiveStream::Decoder decoder;
+ decoder.decoder = new test::FakeDecoder();
+ decoder.payload_type = video_send_config_.encoder_settings.payload_type;
+ decoder.payload_name = video_send_config_.encoder_settings.payload_name;
+ video_receive_configs_[i].decoders.clear();
+ allocated_decoders_.emplace_back(decoder.decoder);
+ video_receive_configs_[i].decoders.push_back(decoder);
+ }
+ }
+
+ if (params_.video.flexfec) {
+ // Override send config constructed by CreateSendConfig.
+ if (decode_all_receive_streams) {
+ for (uint32_t media_ssrc : video_send_config_.rtp.ssrcs) {
+ video_send_config_.rtp.flexfec.protected_media_ssrcs.push_back(
+ media_ssrc);
+ }
+ } else {
+ video_send_config_.rtp.flexfec.protected_media_ssrcs = {
+ kVideoSendSsrcs[params_.ss.selected_stream]};
+ }
+
+ // The matching receive config is _not_ created by
+ // CreateMatchingReceiveConfigs, since VideoQualityTest is not a BaseTest.
+ // Set up the receive config manually instead.
+ FlexfecReceiveStream::Config flexfec_receive_config(recv_transport);
+ flexfec_receive_config.payload_type =
+ video_send_config_.rtp.flexfec.payload_type;
+ flexfec_receive_config.remote_ssrc = video_send_config_.rtp.flexfec.ssrc;
+ flexfec_receive_config.protected_media_ssrcs =
+ video_send_config_.rtp.flexfec.protected_media_ssrcs;
+ flexfec_receive_config.local_ssrc = kReceiverLocalVideoSsrc;
+ flexfec_receive_config.transport_cc = params_.call.send_side_bwe;
+ if (params_.call.send_side_bwe) {
+ flexfec_receive_config.rtp_header_extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ test::kTransportSequenceNumberExtensionId));
+ } else {
+ flexfec_receive_config.rtp_header_extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ }
+ flexfec_receive_configs_.push_back(flexfec_receive_config);
+ if (num_video_streams > 0) {
+ video_receive_configs_[0].rtp.protected_by_flexfec = true;
+ }
+ }
+
+ if (params_.video.ulpfec) {
+ video_send_config_.rtp.ulpfec.red_payload_type = kRedPayloadType;
+ video_send_config_.rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ video_send_config_.rtp.ulpfec.red_rtx_payload_type = kRtxRedPayloadType;
+
+ if (decode_all_receive_streams) {
+ for (auto it = video_receive_configs_.begin();
+ it != video_receive_configs_.end(); ++it) {
+ it->rtp.red_payload_type =
+ video_send_config_.rtp.ulpfec.red_payload_type;
+ it->rtp.ulpfec_payload_type =
+ video_send_config_.rtp.ulpfec.ulpfec_payload_type;
+ it->rtp.rtx_associated_payload_types[video_send_config_.rtp.ulpfec
+ .red_rtx_payload_type] =
+ video_send_config_.rtp.ulpfec.red_payload_type;
+ }
+ } else {
+ video_receive_configs_[params_.ss.selected_stream].rtp.red_payload_type =
+ video_send_config_.rtp.ulpfec.red_payload_type;
+ video_receive_configs_[params_.ss.selected_stream]
+ .rtp.ulpfec_payload_type =
+ video_send_config_.rtp.ulpfec.ulpfec_payload_type;
+ video_receive_configs_[params_.ss.selected_stream]
+ .rtp.rtx_associated_payload_types[video_send_config_.rtp.ulpfec
+ .red_rtx_payload_type] =
+ video_send_config_.rtp.ulpfec.red_payload_type;
+ }
+ }
+}
+
+void VideoQualityTest::SetupThumbnails(Transport* send_transport,
+ Transport* recv_transport) {
+ for (int i = 0; i < params_.call.num_thumbnails; ++i) {
+ thumbnail_encoders_.emplace_back(VP8Encoder::Create());
+
+ // Thumbnails will be send in the other way: from receiver_call to
+ // sender_call.
+ VideoSendStream::Config thumbnail_send_config(recv_transport);
+ thumbnail_send_config.rtp.ssrcs.push_back(kThumbnailSendSsrcStart + i);
+ thumbnail_send_config.encoder_settings.encoder =
+ thumbnail_encoders_.back().get();
+ thumbnail_send_config.encoder_settings.payload_name = params_.video.codec;
+ thumbnail_send_config.encoder_settings.payload_type = kPayloadTypeVP8;
+ thumbnail_send_config.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ thumbnail_send_config.rtp.rtx.payload_type = kSendRtxPayloadType;
+ thumbnail_send_config.rtp.rtx.ssrcs.push_back(kThumbnailRtxSsrcStart + i);
+ thumbnail_send_config.rtp.extensions.clear();
+ if (params_.call.send_side_bwe) {
+ thumbnail_send_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ test::kTransportSequenceNumberExtensionId));
+ } else {
+ thumbnail_send_config.rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ }
+
+ VideoEncoderConfig thumbnail_encoder_config;
+ thumbnail_encoder_config.min_transmit_bitrate_bps = 7500;
+ thumbnail_send_config.suspend_below_min_bitrate =
+ params_.video.suspend_below_min_bitrate;
+ thumbnail_encoder_config.number_of_streams = 1;
+ thumbnail_encoder_config.max_bitrate_bps = 50000;
+ if (params_.ss.infer_streams) {
+ thumbnail_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(params_.ss.streams);
+ } else {
+ thumbnail_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<cricket::EncoderStreamFactory>(
+ params_.video.codec, params_.ss.streams[0].max_qp,
+ params_.video.fps, params_.screenshare.enabled, true);
+ }
+ thumbnail_encoder_config.spatial_layers = params_.ss.spatial_layers;
+
+ VideoReceiveStream::Config thumbnail_receive_config(send_transport);
+ thumbnail_receive_config.rtp.remb = false;
+ thumbnail_receive_config.rtp.transport_cc = true;
+ thumbnail_receive_config.rtp.local_ssrc = kReceiverLocalVideoSsrc;
+ for (const RtpExtension& extension : thumbnail_send_config.rtp.extensions)
+ thumbnail_receive_config.rtp.extensions.push_back(extension);
+ thumbnail_receive_config.renderer = &fake_renderer_;
+
+ VideoReceiveStream::Decoder decoder =
+ test::CreateMatchingDecoder(thumbnail_send_config.encoder_settings);
+ allocated_decoders_.push_back(
+ std::unique_ptr<VideoDecoder>(decoder.decoder));
+ thumbnail_receive_config.decoders.clear();
+ thumbnail_receive_config.decoders.push_back(decoder);
+ thumbnail_receive_config.rtp.remote_ssrc =
+ thumbnail_send_config.rtp.ssrcs[0];
+
+ thumbnail_receive_config.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ thumbnail_receive_config.rtp.rtx_ssrc = kThumbnailRtxSsrcStart + i;
+ thumbnail_receive_config.rtp
+ .rtx_associated_payload_types[kSendRtxPayloadType] = kPayloadTypeVP8;
+ thumbnail_receive_config.rtp.transport_cc = params_.call.send_side_bwe;
+ thumbnail_receive_config.rtp.remb = !params_.call.send_side_bwe;
+
+ thumbnail_encoder_configs_.push_back(thumbnail_encoder_config.Copy());
+ thumbnail_send_configs_.push_back(thumbnail_send_config.Copy());
+ thumbnail_receive_configs_.push_back(thumbnail_receive_config.Copy());
+ }
+
+ for (int i = 0; i < params_.call.num_thumbnails; ++i) {
+ thumbnail_send_streams_.push_back(receiver_call_->CreateVideoSendStream(
+ thumbnail_send_configs_[i].Copy(),
+ thumbnail_encoder_configs_[i].Copy()));
+ thumbnail_receive_streams_.push_back(sender_call_->CreateVideoReceiveStream(
+ thumbnail_receive_configs_[i].Copy()));
+ }
+}
+
+void VideoQualityTest::DestroyThumbnailStreams() {
+ for (VideoSendStream* thumbnail_send_stream : thumbnail_send_streams_)
+ receiver_call_->DestroyVideoSendStream(thumbnail_send_stream);
+ thumbnail_send_streams_.clear();
+ for (VideoReceiveStream* thumbnail_receive_stream :
+ thumbnail_receive_streams_)
+ sender_call_->DestroyVideoReceiveStream(thumbnail_receive_stream);
+ thumbnail_send_streams_.clear();
+ thumbnail_receive_streams_.clear();
+ for (std::unique_ptr<test::VideoCapturer>& video_caputurer :
+ thumbnail_capturers_) {
+ video_caputurer.reset();
+ }
+}
+
+void VideoQualityTest::SetupScreenshareOrSVC() {
+ if (params_.screenshare.enabled) {
+ // Fill out codec settings.
+ video_encoder_config_.content_type =
+ VideoEncoderConfig::ContentType::kScreen;
+ degradation_preference_ =
+ VideoSendStream::DegradationPreference::kMaintainResolution;
+ if (params_.video.codec == "VP8") {
+ VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
+ vp8_settings.denoisingOn = false;
+ vp8_settings.frameDroppingOn = false;
+ vp8_settings.numberOfTemporalLayers =
+ static_cast<unsigned char>(params_.video.num_temporal_layers);
+ video_encoder_config_.encoder_specific_settings =
+ new rtc::RefCountedObject<
+ VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
+ } else if (params_.video.codec == "VP9") {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.denoisingOn = false;
+ vp9_settings.frameDroppingOn = false;
+ vp9_settings.numberOfTemporalLayers =
+ static_cast<unsigned char>(params_.video.num_temporal_layers);
+ vp9_settings.numberOfSpatialLayers =
+ static_cast<unsigned char>(params_.ss.num_spatial_layers);
+ video_encoder_config_.encoder_specific_settings =
+ new rtc::RefCountedObject<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ }
+ // Setup frame generator.
+ const size_t kWidth = 1850;
+ const size_t kHeight = 1110;
+ if (params_.screenshare.generate_slides) {
+ frame_generator_ = test::FrameGenerator::CreateSlideGenerator(
+ kWidth, kHeight,
+ params_.screenshare.slide_change_interval * params_.video.fps);
+ } else {
+ std::vector<std::string> slides = params_.screenshare.slides;
+ if (slides.size() == 0) {
+ slides.push_back(test::ResourcePath("web_screenshot_1850_1110", "yuv"));
+ slides.push_back(test::ResourcePath("presentation_1850_1110", "yuv"));
+ slides.push_back(test::ResourcePath("photo_1850_1110", "yuv"));
+ slides.push_back(
+ test::ResourcePath("difficult_photo_1850_1110", "yuv"));
+ }
+ if (params_.screenshare.scroll_duration == 0) {
+ // Cycle image every slide_change_interval seconds.
+ frame_generator_ = test::FrameGenerator::CreateFromYuvFile(
+ slides, kWidth, kHeight,
+ params_.screenshare.slide_change_interval * params_.video.fps);
+ } else {
+ RTC_CHECK_LE(params_.video.width, kWidth);
+ RTC_CHECK_LE(params_.video.height, kHeight);
+ RTC_CHECK_GT(params_.screenshare.slide_change_interval, 0);
+ const int kPauseDurationMs =
+ (params_.screenshare.slide_change_interval -
+ params_.screenshare.scroll_duration) *
+ 1000;
+ RTC_CHECK_LE(params_.screenshare.scroll_duration,
+ params_.screenshare.slide_change_interval);
+
+ frame_generator_ =
+ test::FrameGenerator::CreateScrollingInputFromYuvFiles(
+ clock_, slides, kWidth, kHeight, params_.video.width,
+ params_.video.height,
+ params_.screenshare.scroll_duration * 1000, kPauseDurationMs);
+ }
+ }
+ } else if (params_.ss.num_spatial_layers > 1) { // For non-screenshare case.
+ RTC_CHECK(params_.video.codec == "VP9");
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfTemporalLayers =
+ static_cast<unsigned char>(params_.video.num_temporal_layers);
+ vp9_settings.numberOfSpatialLayers =
+ static_cast<unsigned char>(params_.ss.num_spatial_layers);
+ video_encoder_config_.encoder_specific_settings = new rtc::RefCountedObject<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ }
+}
+
+void VideoQualityTest::SetupThumbnailCapturers(size_t num_thumbnail_streams) {
+ VideoStream thumbnail = DefaultThumbnailStream();
+ for (size_t i = 0; i < num_thumbnail_streams; ++i) {
+ thumbnail_capturers_.emplace_back(test::FrameGeneratorCapturer::Create(
+ static_cast<int>(thumbnail.width), static_cast<int>(thumbnail.height),
+ thumbnail.max_framerate, clock_));
+ RTC_DCHECK(thumbnail_capturers_.back());
+ }
+}
+
+void VideoQualityTest::CreateCapturer() {
+ if (params_.screenshare.enabled) {
+ test::FrameGeneratorCapturer* frame_generator_capturer =
+ new test::FrameGeneratorCapturer(clock_, std::move(frame_generator_),
+ params_.video.fps);
+ EXPECT_TRUE(frame_generator_capturer->Init());
+ video_capturer_.reset(frame_generator_capturer);
+ } else {
+ if (params_.video.clip_name == "Generator") {
+ video_capturer_.reset(test::FrameGeneratorCapturer::Create(
+ static_cast<int>(params_.video.width),
+ static_cast<int>(params_.video.height), params_.video.fps, clock_));
+ } else if (params_.video.clip_name.empty()) {
+ video_capturer_.reset(test::VcmCapturer::Create(
+ params_.video.width, params_.video.height, params_.video.fps,
+ params_.video.capture_device_index));
+ if (!video_capturer_) {
+ // Failed to get actual camera, use chroma generator as backup.
+ video_capturer_.reset(test::FrameGeneratorCapturer::Create(
+ static_cast<int>(params_.video.width),
+ static_cast<int>(params_.video.height), params_.video.fps, clock_));
+ }
+ } else {
+ video_capturer_.reset(test::FrameGeneratorCapturer::CreateFromYuvFile(
+ test::ResourcePath(params_.video.clip_name, "yuv"),
+ params_.video.width, params_.video.height, params_.video.fps,
+ clock_));
+ ASSERT_TRUE(video_capturer_) << "Could not create capturer for "
+ << params_.video.clip_name
+ << ".yuv. Is this resource file present?";
+ }
+ }
+ RTC_DCHECK(video_capturer_.get());
+}
+
+std::unique_ptr<test::LayerFilteringTransport>
+VideoQualityTest::CreateSendTransport() {
+ return rtc::MakeUnique<test::LayerFilteringTransport>(
+ &task_queue_, params_.pipe, sender_call_.get(), kPayloadTypeVP8,
+ kPayloadTypeVP9, params_.video.selected_tl, params_.ss.selected_sl,
+ payload_type_map_);
+}
+
+std::unique_ptr<test::DirectTransport>
+VideoQualityTest::CreateReceiveTransport() {
+ return rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, params_.pipe, receiver_call_.get(), payload_type_map_);
+}
+
+void VideoQualityTest::RunWithAnalyzer(const Params& params) {
+ std::unique_ptr<test::LayerFilteringTransport> send_transport;
+ std::unique_ptr<test::DirectTransport> recv_transport;
+ FILE* graph_data_output_file = nullptr;
+ std::unique_ptr<VideoAnalyzer> analyzer;
+
+ params_ = params;
+
+ RTC_CHECK(!params_.audio.enabled);
+ // TODO(ivica): Merge with RunWithRenderer and use a flag / argument to
+ // differentiate between the analyzer and the renderer case.
+ CheckParams();
+
+ if (!params_.analyzer.graph_data_output_filename.empty()) {
+ graph_data_output_file =
+ fopen(params_.analyzer.graph_data_output_filename.c_str(), "w");
+ RTC_CHECK(graph_data_output_file)
+ << "Can't open the file " << params_.analyzer.graph_data_output_filename
+ << "!";
+ }
+
+ if (!params.logging.rtc_event_log_name.empty()) {
+ event_log_ = RtcEventLog::Create(clock_, RtcEventLog::EncodingType::Legacy);
+ std::unique_ptr<RtcEventLogOutputFile> output(
+ rtc::MakeUnique<RtcEventLogOutputFile>(
+ params.logging.rtc_event_log_name, RtcEventLog::kUnlimitedOutput));
+ bool event_log_started = event_log_->StartLogging(
+ std::move(output), RtcEventLog::kImmediateOutput);
+ RTC_DCHECK(event_log_started);
+ }
+
+ Call::Config call_config(event_log_.get());
+ call_config.bitrate_config = params.call.call_bitrate_config;
+
+ task_queue_.SendTask(
+ [this, &call_config, &send_transport, &recv_transport]() {
+ CreateCalls(call_config, call_config);
+ send_transport = CreateSendTransport();
+ recv_transport = CreateReceiveTransport();
+ });
+
+ std::string graph_title = params_.analyzer.graph_title;
+ if (graph_title.empty())
+ graph_title = VideoQualityTest::GenerateGraphTitle();
+ bool is_quick_test_enabled = field_trial::IsEnabled("WebRTC-QuickPerfTest");
+ analyzer = rtc::MakeUnique<VideoAnalyzer>(
+ send_transport.get(), params_.analyzer.test_label,
+ params_.analyzer.avg_psnr_threshold, params_.analyzer.avg_ssim_threshold,
+ is_quick_test_enabled
+ ? kFramesSentInQuickTest
+ : params_.analyzer.test_durations_secs * params_.video.fps,
+ graph_data_output_file, graph_title,
+ kVideoSendSsrcs[params_.ss.selected_stream],
+ kSendRtxSsrcs[params_.ss.selected_stream],
+ static_cast<size_t>(params_.ss.selected_stream), params.ss.selected_sl,
+ params_.video.selected_tl, is_quick_test_enabled, clock_,
+ params_.logging.rtp_dump_name);
+
+ task_queue_.SendTask([&]() {
+ analyzer->SetCall(sender_call_.get());
+ analyzer->SetReceiver(receiver_call_->Receiver());
+ send_transport->SetReceiver(analyzer.get());
+ recv_transport->SetReceiver(sender_call_->Receiver());
+
+ SetupVideo(analyzer.get(), recv_transport.get());
+ SetupThumbnails(analyzer.get(), recv_transport.get());
+ video_receive_configs_[params_.ss.selected_stream].renderer =
+ analyzer.get();
+ video_send_config_.pre_encode_callback = analyzer->pre_encode_proxy();
+ RTC_DCHECK(!video_send_config_.post_encode_callback);
+ video_send_config_.post_encode_callback = analyzer->encode_timing_proxy();
+
+ SetupScreenshareOrSVC();
+
+ CreateFlexfecStreams();
+ CreateVideoStreams();
+ analyzer->SetSendStream(video_send_stream_);
+ if (video_receive_streams_.size() == 1)
+ analyzer->SetReceiveStream(video_receive_streams_[0]);
+
+ video_send_stream_->SetSource(analyzer->OutputInterface(),
+ degradation_preference_);
+
+ SetupThumbnailCapturers(params_.call.num_thumbnails);
+ for (size_t i = 0; i < thumbnail_send_streams_.size(); ++i) {
+ thumbnail_send_streams_[i]->SetSource(thumbnail_capturers_[i].get(),
+ degradation_preference_);
+ }
+
+ CreateCapturer();
+
+ analyzer->SetSource(video_capturer_.get(), params_.ss.infer_streams);
+
+ StartEncodedFrameLogs(video_send_stream_);
+ StartEncodedFrameLogs(video_receive_streams_[params_.ss.selected_stream]);
+ video_send_stream_->Start();
+ for (VideoSendStream* thumbnail_send_stream : thumbnail_send_streams_)
+ thumbnail_send_stream->Start();
+ for (VideoReceiveStream* receive_stream : video_receive_streams_)
+ receive_stream->Start();
+ for (VideoReceiveStream* thumbnail_receive_stream :
+ thumbnail_receive_streams_)
+ thumbnail_receive_stream->Start();
+
+ analyzer->StartMeasuringCpuProcessTime();
+
+ video_capturer_->Start();
+ for (std::unique_ptr<test::VideoCapturer>& video_caputurer :
+ thumbnail_capturers_) {
+ video_caputurer->Start();
+ }
+ });
+
+ analyzer->Wait();
+
+ event_log_->StopLogging();
+
+ task_queue_.SendTask([&]() {
+ for (std::unique_ptr<test::VideoCapturer>& video_caputurer :
+ thumbnail_capturers_)
+ video_caputurer->Stop();
+ video_capturer_->Stop();
+ for (VideoReceiveStream* thumbnail_receive_stream :
+ thumbnail_receive_streams_)
+ thumbnail_receive_stream->Stop();
+ for (VideoReceiveStream* receive_stream : video_receive_streams_)
+ receive_stream->Stop();
+ for (VideoSendStream* thumbnail_send_stream : thumbnail_send_streams_)
+ thumbnail_send_stream->Stop();
+ video_send_stream_->Stop();
+
+ DestroyStreams();
+ DestroyThumbnailStreams();
+
+ if (graph_data_output_file)
+ fclose(graph_data_output_file);
+
+ video_capturer_.reset();
+ send_transport.reset();
+ recv_transport.reset();
+
+ DestroyCalls();
+ });
+}
+
+void VideoQualityTest::SetupAudio(int send_channel_id,
+ int receive_channel_id,
+ Transport* transport,
+ AudioReceiveStream** audio_receive_stream) {
+ audio_send_config_ = AudioSendStream::Config(transport);
+ audio_send_config_.voe_channel_id = send_channel_id;
+ audio_send_config_.rtp.ssrc = kAudioSendSsrc;
+
+ // Add extension to enable audio send side BWE, and allow audio bit rate
+ // adaptation.
+ audio_send_config_.rtp.extensions.clear();
+ if (params_.call.send_side_bwe) {
+ audio_send_config_.rtp.extensions.push_back(
+ webrtc::RtpExtension(webrtc::RtpExtension::kTransportSequenceNumberUri,
+ test::kTransportSequenceNumberExtensionId));
+ audio_send_config_.min_bitrate_bps = kOpusMinBitrateBps;
+ audio_send_config_.max_bitrate_bps = kOpusBitrateFbBps;
+ }
+ audio_send_config_.send_codec_spec =
+ rtc::Optional<AudioSendStream::Config::SendCodecSpec>(
+ {kAudioSendPayloadType,
+ {"OPUS", 48000, 2,
+ {{"usedtx", (params_.audio.dtx ? "1" : "0")},
+ {"stereo", "1"}}}});
+ audio_send_config_.encoder_factory = encoder_factory_;
+ audio_send_stream_ = sender_call_->CreateAudioSendStream(audio_send_config_);
+
+ AudioReceiveStream::Config audio_config;
+ audio_config.rtp.local_ssrc = kReceiverLocalAudioSsrc;
+ audio_config.rtcp_send_transport = transport;
+ audio_config.voe_channel_id = receive_channel_id;
+ audio_config.rtp.remote_ssrc = audio_send_config_.rtp.ssrc;
+ audio_config.rtp.transport_cc = params_.call.send_side_bwe;
+ audio_config.rtp.extensions = audio_send_config_.rtp.extensions;
+ audio_config.decoder_factory = decoder_factory_;
+ audio_config.decoder_map = {{kAudioSendPayloadType, {"OPUS", 48000, 2}}};
+ if (params_.video.enabled && params_.audio.sync_video)
+ audio_config.sync_group = kSyncGroup;
+
+ *audio_receive_stream =
+ receiver_call_->CreateAudioReceiveStream(audio_config);
+}
+
+void VideoQualityTest::RunWithRenderers(const Params& params) {
+ std::unique_ptr<test::LayerFilteringTransport> send_transport;
+ std::unique_ptr<test::DirectTransport> recv_transport;
+ std::unique_ptr<test::FakeAudioDevice> fake_audio_device;
+ ::VoiceEngineState voe;
+ std::unique_ptr<test::VideoRenderer> local_preview;
+ std::vector<std::unique_ptr<test::VideoRenderer>> loopback_renderers;
+ AudioReceiveStream* audio_receive_stream = nullptr;
+
+ task_queue_.SendTask([&]() {
+ params_ = params;
+ CheckParams();
+
+ // TODO(ivica): Remove bitrate_config and use the default Call::Config(), to
+ // match the full stack tests.
+ Call::Config call_config(event_log_.get());
+ call_config.bitrate_config = params_.call.call_bitrate_config;
+
+ fake_audio_device.reset(new test::FakeAudioDevice(
+ test::FakeAudioDevice::CreatePulsedNoiseCapturer(32000, 48000),
+ test::FakeAudioDevice::CreateDiscardRenderer(48000),
+ 1.f));
+
+ rtc::scoped_refptr<webrtc::AudioProcessing> audio_processing(
+ webrtc::AudioProcessing::Create());
+
+ if (params_.audio.enabled) {
+ CreateVoiceEngine(&voe, fake_audio_device.get(), audio_processing.get(),
+ decoder_factory_);
+ AudioState::Config audio_state_config;
+ audio_state_config.voice_engine = voe.voice_engine;
+ audio_state_config.audio_mixer = AudioMixerImpl::Create();
+ audio_state_config.audio_processing = audio_processing;
+ call_config.audio_state = AudioState::Create(audio_state_config);
+ fake_audio_device->RegisterAudioCallback(
+ call_config.audio_state->audio_transport());
+ }
+
+ CreateCalls(call_config, call_config);
+
+ // TODO(minyue): consider if this is a good transport even for audio only
+ // calls.
+ send_transport = rtc::MakeUnique<test::LayerFilteringTransport>(
+ &task_queue_, params.pipe, sender_call_.get(), kPayloadTypeVP8,
+ kPayloadTypeVP9, params.video.selected_tl, params_.ss.selected_sl,
+ payload_type_map_);
+
+ recv_transport = rtc::MakeUnique<test::DirectTransport>(
+ &task_queue_, params_.pipe, receiver_call_.get(), payload_type_map_);
+
+ // TODO(ivica): Use two calls to be able to merge with RunWithAnalyzer or at
+ // least share as much code as possible. That way this test would also match
+ // the full stack tests better.
+ send_transport->SetReceiver(receiver_call_->Receiver());
+ recv_transport->SetReceiver(sender_call_->Receiver());
+
+ if (params_.video.enabled) {
+ // Create video renderers.
+ local_preview.reset(test::VideoRenderer::Create(
+ "Local Preview", params_.video.width, params_.video.height));
+
+ const size_t selected_stream_id = params_.ss.selected_stream;
+ const size_t num_streams = params_.ss.streams.size();
+
+ if (selected_stream_id == num_streams) {
+ for (size_t stream_id = 0; stream_id < num_streams; ++stream_id) {
+ std::ostringstream oss;
+ oss << "Loopback Video - Stream #" << static_cast<int>(stream_id);
+ loopback_renderers.emplace_back(test::VideoRenderer::Create(
+ oss.str().c_str(), params_.ss.streams[stream_id].width,
+ params_.ss.streams[stream_id].height));
+ }
+ } else {
+ loopback_renderers.emplace_back(test::VideoRenderer::Create(
+ "Loopback Video", params_.ss.streams[selected_stream_id].width,
+ params_.ss.streams[selected_stream_id].height));
+ }
+
+ SetupVideo(send_transport.get(), recv_transport.get());
+
+ video_send_config_.pre_encode_callback = local_preview.get();
+ if (selected_stream_id == num_streams) {
+ for (size_t stream_id = 0; stream_id < num_streams; ++stream_id) {
+ video_receive_configs_[stream_id].renderer =
+ loopback_renderers[stream_id].get();
+ if (params_.audio.enabled && params_.audio.sync_video)
+ video_receive_configs_[stream_id].sync_group = kSyncGroup;
+ }
+ } else {
+ video_receive_configs_[selected_stream_id].renderer =
+ loopback_renderers.back().get();
+ if (params_.audio.enabled && params_.audio.sync_video)
+ video_receive_configs_[selected_stream_id].sync_group = kSyncGroup;
+ }
+
+ SetupScreenshareOrSVC();
+
+ CreateFlexfecStreams();
+ CreateVideoStreams();
+
+ CreateCapturer();
+ video_send_stream_->SetSource(video_capturer_.get(),
+ degradation_preference_);
+ }
+
+ if (params_.audio.enabled) {
+ SetupAudio(voe.send_channel_id, voe.receive_channel_id,
+ send_transport.get(), &audio_receive_stream);
+ }
+
+ for (VideoReceiveStream* receive_stream : video_receive_streams_)
+ StartEncodedFrameLogs(receive_stream);
+ StartEncodedFrameLogs(video_send_stream_);
+
+ // Start sending and receiving video.
+ if (params_.video.enabled) {
+ for (VideoReceiveStream* video_receive_stream : video_receive_streams_)
+ video_receive_stream->Start();
+
+ video_send_stream_->Start();
+ video_capturer_->Start();
+ }
+
+ if (params_.audio.enabled) {
+ // Start receiving audio.
+ audio_receive_stream->Start();
+ EXPECT_EQ(0, voe.base->StartPlayout(voe.receive_channel_id));
+
+ // Start sending audio.
+ audio_send_stream_->Start();
+ EXPECT_EQ(0, voe.base->StartSend(voe.send_channel_id));
+ }
+ });
+
+ test::PressEnterToContinue();
+
+ task_queue_.SendTask([&]() {
+ if (params_.audio.enabled) {
+ // Stop sending audio.
+ EXPECT_EQ(0, voe.base->StopSend(voe.send_channel_id));
+ audio_send_stream_->Stop();
+
+ // Stop receiving audio.
+ EXPECT_EQ(0, voe.base->StopPlayout(voe.receive_channel_id));
+ audio_receive_stream->Stop();
+ sender_call_->DestroyAudioSendStream(audio_send_stream_);
+ receiver_call_->DestroyAudioReceiveStream(audio_receive_stream);
+ }
+
+ // Stop receiving and sending video.
+ if (params_.video.enabled) {
+ video_capturer_->Stop();
+ video_send_stream_->Stop();
+ for (FlexfecReceiveStream* flexfec_receive_stream :
+ flexfec_receive_streams_) {
+ for (VideoReceiveStream* video_receive_stream :
+ video_receive_streams_) {
+ video_receive_stream->RemoveSecondarySink(flexfec_receive_stream);
+ }
+ receiver_call_->DestroyFlexfecReceiveStream(flexfec_receive_stream);
+ }
+ for (VideoReceiveStream* receive_stream : video_receive_streams_) {
+ receive_stream->Stop();
+ receiver_call_->DestroyVideoReceiveStream(receive_stream);
+ }
+ sender_call_->DestroyVideoSendStream(video_send_stream_);
+ }
+
+ video_capturer_.reset();
+ send_transport.reset();
+ recv_transport.reset();
+
+ if (params_.audio.enabled)
+ DestroyVoiceEngine(&voe);
+
+ local_preview.reset();
+ loopback_renderers.clear();
+
+ DestroyCalls();
+ });
+}
+
+void VideoQualityTest::StartEncodedFrameLogs(VideoSendStream* stream) {
+ if (!params_.logging.encoded_frame_base_path.empty()) {
+ std::ostringstream str;
+ str << send_logs_++;
+ std::string prefix =
+ params_.logging.encoded_frame_base_path + "." + str.str() + ".send.";
+ stream->EnableEncodedFrameRecording(
+ std::vector<rtc::PlatformFile>(
+ {rtc::CreatePlatformFile(prefix + "1.ivf"),
+ rtc::CreatePlatformFile(prefix + "2.ivf"),
+ rtc::CreatePlatformFile(prefix + "3.ivf")}),
+ 100000000);
+ }
+}
+
+void VideoQualityTest::StartEncodedFrameLogs(VideoReceiveStream* stream) {
+ if (!params_.logging.encoded_frame_base_path.empty()) {
+ std::ostringstream str;
+ str << receive_logs_++;
+ std::string path =
+ params_.logging.encoded_frame_base_path + "." + str.str() + ".recv.ivf";
+ stream->EnableEncodedFrameRecording(rtc::CreatePlatformFile(path),
+ 100000000);
+ }
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/video_quality_test.h b/third_party/libwebrtc/webrtc/video/video_quality_test.h
new file mode 100644
index 0000000000..b7ba74dca6
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_quality_test.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VIDEO_VIDEO_QUALITY_TEST_H_
+#define VIDEO_VIDEO_QUALITY_TEST_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "media/engine/simulcast_encoder_adapter.h"
+#include "test/call_test.h"
+#include "test/frame_generator.h"
+#include "test/layer_filtering_transport.h"
+
+namespace webrtc {
+
+class VideoQualityTest : public test::CallTest {
+ public:
+ // Parameters are grouped into smaller structs to make it easier to set
+ // the desired elements and skip unused, using aggregate initialization.
+ // Unfortunately, C++11 (as opposed to C11) doesn't support unnamed structs,
+ // which makes the implementation of VideoQualityTest a bit uglier.
+ struct Params {
+ Params();
+ ~Params();
+ struct CallConfig {
+ bool send_side_bwe;
+ Call::Config::BitrateConfig call_bitrate_config;
+ int num_thumbnails;
+ } call;
+ struct Video {
+ bool enabled;
+ size_t width;
+ size_t height;
+ int32_t fps;
+ int min_bitrate_bps;
+ int target_bitrate_bps;
+ int max_bitrate_bps;
+ bool suspend_below_min_bitrate;
+ std::string codec;
+ int num_temporal_layers;
+ int selected_tl;
+ int min_transmit_bps;
+ bool ulpfec;
+ bool flexfec;
+ std::string clip_name; // "Generator" to generate frames instead.
+ size_t capture_device_index;
+ } video;
+ struct Audio {
+ bool enabled;
+ bool sync_video;
+ bool dtx;
+ } audio;
+ struct Screenshare {
+ bool enabled;
+ bool generate_slides;
+ int32_t slide_change_interval;
+ int32_t scroll_duration;
+ std::vector<std::string> slides;
+ } screenshare;
+ struct Analyzer {
+ std::string test_label;
+ double avg_psnr_threshold; // (*)
+ double avg_ssim_threshold; // (*)
+ int test_durations_secs;
+ std::string graph_data_output_filename;
+ std::string graph_title;
+ } analyzer;
+ FakeNetworkPipe::Config pipe;
+ struct SS { // Spatial scalability.
+ std::vector<VideoStream> streams; // If empty, one stream is assumed.
+ size_t selected_stream;
+ int num_spatial_layers;
+ int selected_sl;
+ // If empty, bitrates are generated in VP9Impl automatically.
+ std::vector<SpatialLayer> spatial_layers;
+ // If set, default parameters will be used instead of |streams|.
+ bool infer_streams;
+ } ss;
+ struct Logging {
+ bool logs;
+ std::string rtc_event_log_name;
+ std::string rtp_dump_name;
+ std::string encoded_frame_base_path;
+ } logging;
+ };
+
+ VideoQualityTest();
+ void RunWithAnalyzer(const Params& params);
+ void RunWithRenderers(const Params& params);
+
+ static void FillScalabilitySettings(
+ Params* params,
+ const std::vector<std::string>& stream_descriptors,
+ int num_streams,
+ size_t selected_stream,
+ int num_spatial_layers,
+ int selected_sl,
+ const std::vector<std::string>& sl_descriptors);
+
+ protected:
+ std::map<uint8_t, webrtc::MediaType> payload_type_map_;
+
+ // No-op implementation to be able to instantiate this class from non-TEST_F
+ // locations.
+ void TestBody() override;
+
+ // Helper methods accessing only params_.
+ std::string GenerateGraphTitle() const;
+ void CheckParams();
+
+ // Helper static methods.
+ static VideoStream DefaultVideoStream(const Params& params);
+ static VideoStream DefaultThumbnailStream();
+ static std::vector<int> ParseCSV(const std::string& str);
+
+ // Helper methods for setting up the call.
+ void CreateCapturer();
+ void SetupThumbnailCapturers(size_t num_thumbnail_streams);
+ void SetupVideo(Transport* send_transport, Transport* recv_transport);
+ void SetupThumbnails(Transport* send_transport, Transport* recv_transport);
+ void DestroyThumbnailStreams();
+ void SetupScreenshareOrSVC();
+ void SetupAudio(int send_channel_id,
+ int receive_channel_id,
+ Transport* transport,
+ AudioReceiveStream** audio_receive_stream);
+
+ void StartEncodedFrameLogs(VideoSendStream* stream);
+ void StartEncodedFrameLogs(VideoReceiveStream* stream);
+
+ virtual std::unique_ptr<test::LayerFilteringTransport> CreateSendTransport();
+ virtual std::unique_ptr<test::DirectTransport> CreateReceiveTransport();
+
+ // We need a more general capturer than the FrameGeneratorCapturer.
+ std::unique_ptr<test::VideoCapturer> video_capturer_;
+ std::vector<std::unique_ptr<test::VideoCapturer>> thumbnail_capturers_;
+ std::unique_ptr<test::FrameGenerator> frame_generator_;
+ std::unique_ptr<VideoEncoder> video_encoder_;
+
+ std::vector<std::unique_ptr<VideoEncoder>> thumbnail_encoders_;
+ std::vector<VideoSendStream::Config> thumbnail_send_configs_;
+ std::vector<VideoEncoderConfig> thumbnail_encoder_configs_;
+ std::vector<VideoSendStream*> thumbnail_send_streams_;
+ std::vector<VideoReceiveStream::Config> thumbnail_receive_configs_;
+ std::vector<VideoReceiveStream*> thumbnail_receive_streams_;
+
+ Clock* const clock_;
+
+ int receive_logs_;
+ int send_logs_;
+
+ VideoSendStream::DegradationPreference degradation_preference_ =
+ VideoSendStream::DegradationPreference::kMaintainFramerate;
+ Params params_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_QUALITY_TEST_H_
diff --git a/third_party/libwebrtc/webrtc/video/video_receive_stream.cc b/third_party/libwebrtc/webrtc/video/video_receive_stream.cc
new file mode 100644
index 0000000000..33a9173a5d
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_receive_stream.cc
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_receive_stream.h"
+
+#include <stdlib.h>
+
+#include <set>
+#include <string>
+#include <utility>
+
+#include "api/optional.h"
+#include "call/rtp_stream_receiver_controller_interface.h"
+#include "call/rtx_receive_stream.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "common_video/h264/profile_level_id.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/utility/include/process_thread.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/jitter_estimator.h"
+#include "modules/video_coding/timing.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/field_trial.h"
+#include "video/call_stats.h"
+#include "video/receive_statistics_proxy.h"
+
+namespace webrtc {
+
+namespace {
+VideoCodec CreateDecoderVideoCodec(const VideoReceiveStream::Decoder& decoder) {
+ VideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+
+ codec.plType = decoder.payload_type;
+ strncpy(codec.plName, decoder.payload_name.c_str(), sizeof(codec.plName));
+ codec.codecType = PayloadStringToCodecType(decoder.payload_name);
+
+ if (codec.codecType == kVideoCodecVP8) {
+ *(codec.VP8()) = VideoEncoder::GetDefaultVp8Settings();
+ } else if (codec.codecType == kVideoCodecVP9) {
+ *(codec.VP9()) = VideoEncoder::GetDefaultVp9Settings();
+ } else if (codec.codecType == kVideoCodecH264) {
+ *(codec.H264()) = VideoEncoder::GetDefaultH264Settings();
+ codec.H264()->profile =
+ H264::ParseSdpProfileLevelId(decoder.codec_params)->profile;
+ }
+
+ codec.width = 320;
+ codec.height = 180;
+ const int kDefaultStartBitrate = 300;
+ codec.startBitrate = codec.minBitrate = codec.maxBitrate =
+ kDefaultStartBitrate;
+
+ return codec;
+}
+} // namespace
+
+namespace internal {
+
+VideoReceiveStream::VideoReceiveStream(
+ RtpStreamReceiverControllerInterface* receiver_controller,
+ int num_cpu_cores,
+ PacketRouter* packet_router,
+ VideoReceiveStream::Config config,
+ ProcessThread* process_thread,
+ CallStats* call_stats)
+ : transport_adapter_(config.rtcp_send_transport),
+ config_(std::move(config)),
+ num_cpu_cores_(num_cpu_cores),
+ process_thread_(process_thread),
+ clock_(Clock::GetRealTimeClock()),
+ decode_thread_(&DecodeThreadFunction,
+ this,
+ "DecodingThread",
+ rtc::kHighestPriority),
+ call_stats_(call_stats),
+ rtp_receive_statistics_(ReceiveStatistics::Create(clock_)),
+ timing_(new VCMTiming(clock_)),
+ video_receiver_(clock_, nullptr, this, timing_.get(), this, this),
+ stats_proxy_(&config_, clock_),
+ rtp_video_stream_receiver_(&transport_adapter_,
+ call_stats_->rtcp_rtt_stats(),
+ packet_router,
+ &config_,
+ rtp_receive_statistics_.get(),
+ &stats_proxy_,
+ process_thread_,
+ this, // NackSender
+ this, // KeyFrameRequestSender
+ this, // OnCompleteFrameCallback
+ timing_.get()),
+ rtp_stream_sync_(this) {
+ RTC_LOG(LS_INFO) << "VideoReceiveStream: " << config_.ToString();
+
+ RTC_DCHECK(process_thread_);
+ RTC_DCHECK(call_stats_);
+
+ module_process_sequence_checker_.Detach();
+
+ RTC_DCHECK(!config_.decoders.empty());
+ std::set<int> decoder_payload_types;
+ for (const Decoder& decoder : config_.decoders) {
+ RTC_CHECK(decoder.decoder);
+ RTC_CHECK(decoder_payload_types.find(decoder.payload_type) ==
+ decoder_payload_types.end())
+ << "Duplicate payload type (" << decoder.payload_type
+ << ") for different decoders.";
+ decoder_payload_types.insert(decoder.payload_type);
+ }
+
+ video_receiver_.SetRenderDelay(config_.render_delay_ms);
+
+ jitter_estimator_.reset(new VCMJitterEstimator(clock_));
+ frame_buffer_.reset(new video_coding::FrameBuffer(
+ clock_, jitter_estimator_.get(), timing_.get(), &stats_proxy_));
+
+ process_thread_->RegisterModule(&rtp_stream_sync_, RTC_FROM_HERE);
+
+ // Register with RtpStreamReceiverController.
+ media_receiver_ = receiver_controller->CreateReceiver(
+ config_.rtp.remote_ssrc, &rtp_video_stream_receiver_);
+ if (config_.rtp.rtx_ssrc) {
+ rtx_receive_stream_ = rtc::MakeUnique<RtxReceiveStream>(
+ &rtp_video_stream_receiver_, config.rtp.rtx_associated_payload_types,
+ config_.rtp.remote_ssrc, rtp_receive_statistics_.get());
+ rtx_receiver_ = receiver_controller->CreateReceiver(
+ config_.rtp.rtx_ssrc, rtx_receive_stream_.get());
+ }
+}
+
+VideoReceiveStream::~VideoReceiveStream() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
+ RTC_LOG(LS_INFO) << "~VideoReceiveStream: " << config_.ToString();
+ Stop();
+
+ process_thread_->DeRegisterModule(&rtp_stream_sync_);
+}
+
+void VideoReceiveStream::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
+ rtp_video_stream_receiver_.SignalNetworkState(state);
+}
+
+bool VideoReceiveStream::DeliverRtcp(const uint8_t* packet, size_t length) {
+ return rtp_video_stream_receiver_.DeliverRtcp(packet, length);
+}
+
+void VideoReceiveStream::SetSync(Syncable* audio_syncable) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
+ rtp_stream_sync_.ConfigureSync(audio_syncable);
+}
+
+void VideoReceiveStream::Start() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
+ if (decode_thread_.IsRunning())
+ return;
+
+ bool protected_by_fec = config_.rtp.protected_by_flexfec ||
+ rtp_video_stream_receiver_.IsUlpfecEnabled();
+
+ frame_buffer_->Start();
+ call_stats_->RegisterStatsObserver(&rtp_video_stream_receiver_);
+ call_stats_->RegisterStatsObserver(this);
+
+ if (rtp_video_stream_receiver_.IsRetransmissionsEnabled() &&
+ protected_by_fec) {
+ frame_buffer_->SetProtectionMode(kProtectionNackFEC);
+ }
+
+ transport_adapter_.Enable();
+ rtc::VideoSinkInterface<VideoFrame>* renderer = nullptr;
+ if (config_.renderer) {
+ if (config_.disable_prerenderer_smoothing) {
+ renderer = this;
+ } else {
+ incoming_video_stream_.reset(
+ new IncomingVideoStream(config_.render_delay_ms, this));
+ renderer = incoming_video_stream_.get();
+ }
+ }
+ RTC_DCHECK(renderer != nullptr);
+
+ for (const Decoder& decoder : config_.decoders) {
+ video_receiver_.RegisterExternalDecoder(decoder.decoder,
+ decoder.payload_type);
+ VideoCodec codec = CreateDecoderVideoCodec(decoder);
+ RTC_CHECK(rtp_video_stream_receiver_.AddReceiveCodec(codec,
+ decoder.codec_params));
+ RTC_CHECK_EQ(VCM_OK, video_receiver_.RegisterReceiveCodec(
+ &codec, num_cpu_cores_, false));
+ }
+
+ video_stream_decoder_.reset(new VideoStreamDecoder(
+ &video_receiver_, &rtp_video_stream_receiver_,
+ &rtp_video_stream_receiver_,
+ rtp_video_stream_receiver_.IsRetransmissionsEnabled(), protected_by_fec,
+ &stats_proxy_, renderer));
+ // Register the channel to receive stats updates.
+ call_stats_->RegisterStatsObserver(video_stream_decoder_.get());
+
+ process_thread_->RegisterModule(&video_receiver_, RTC_FROM_HERE);
+
+ // Start the decode thread
+ decode_thread_.Start();
+ rtp_video_stream_receiver_.StartReceive();
+}
+
+void VideoReceiveStream::Stop() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
+ rtp_video_stream_receiver_.StopReceive();
+
+ frame_buffer_->Stop();
+ call_stats_->DeregisterStatsObserver(this);
+ call_stats_->DeregisterStatsObserver(&rtp_video_stream_receiver_);
+ process_thread_->DeRegisterModule(&video_receiver_);
+
+ if (decode_thread_.IsRunning()) {
+ // TriggerDecoderShutdown will release any waiting decoder thread and make
+ // it stop immediately, instead of waiting for a timeout. Needs to be called
+ // before joining the decoder thread.
+ video_receiver_.TriggerDecoderShutdown();
+
+ decode_thread_.Stop();
+ // Deregister external decoders so they are no longer running during
+ // destruction. This effectively stops the VCM since the decoder thread is
+ // stopped, the VCM is deregistered and no asynchronous decoder threads are
+ // running.
+ for (const Decoder& decoder : config_.decoders)
+ video_receiver_.RegisterExternalDecoder(nullptr, decoder.payload_type);
+ }
+
+ call_stats_->DeregisterStatsObserver(video_stream_decoder_.get());
+ video_stream_decoder_.reset();
+ incoming_video_stream_.reset();
+ transport_adapter_.Disable();
+}
+
+VideoReceiveStream::Stats VideoReceiveStream::GetStats() const {
+ auto stats = stats_proxy_.GetStats();
+ stats.rtcp_sender_packets_sent = 0;
+ stats.rtcp_sender_octets_sent = 0;
+ RtpRtcp* rtp_rtcp = rtp_video_stream_receiver_.rtp_rtcp();
+ if (rtp_rtcp) {
+ rtp_rtcp->RemoteRTCPSenderInfo(&stats.rtcp_sender_packets_sent,
+ &stats.rtcp_sender_octets_sent,
+ &stats.rtcp_sender_ntp_timestamp);
+ }
+
+ return stats;
+}
+
+void VideoReceiveStream::EnableEncodedFrameRecording(rtc::PlatformFile file,
+ size_t byte_limit) {
+ {
+ rtc::CritScope lock(&ivf_writer_lock_);
+ if (file == rtc::kInvalidPlatformFileValue) {
+ ivf_writer_.reset();
+ } else {
+ ivf_writer_ = IvfFileWriter::Wrap(rtc::File(file), byte_limit);
+ }
+ }
+
+ if (file != rtc::kInvalidPlatformFileValue) {
+ // Make a keyframe appear as early as possible in the logs, to give actually
+ // decodable output.
+ RequestKeyFrame();
+ }
+}
+
+void VideoReceiveStream::AddSecondarySink(RtpPacketSinkInterface* sink) {
+ rtp_video_stream_receiver_.AddSecondarySink(sink);
+}
+
+void VideoReceiveStream::RemoveSecondarySink(
+ const RtpPacketSinkInterface* sink) {
+ rtp_video_stream_receiver_.RemoveSecondarySink(sink);
+}
+
+// TODO(tommi): This method grabs a lock 6 times.
+void VideoReceiveStream::OnFrame(const VideoFrame& video_frame) {
+ int64_t sync_offset_ms;
+ double estimated_freq_khz;
+ // TODO(tommi): GetStreamSyncOffsetInMs grabs three locks. One inside the
+ // function itself, another in GetChannel() and a third in
+ // GetPlayoutTimestamp. Seems excessive. Anyhow, I'm assuming the function
+ // succeeds most of the time, which leads to grabbing a fourth lock.
+ if (rtp_stream_sync_.GetStreamSyncOffsetInMs(video_frame.timestamp(),
+ video_frame.render_time_ms(),
+ &sync_offset_ms,
+ &estimated_freq_khz)) {
+ // TODO(tommi): OnSyncOffsetUpdated grabs a lock.
+ stats_proxy_.OnSyncOffsetUpdated(sync_offset_ms, estimated_freq_khz);
+ }
+ // config_.renderer must never be null if we're getting this callback.
+ config_.renderer->OnFrame(video_frame);
+
+ // TODO(tommi): OnRenderFrame grabs a lock too.
+ stats_proxy_.OnRenderedFrame(video_frame);
+}
+
+// TODO(asapersson): Consider moving callback from video_encoder.h or
+// creating a different callback.
+EncodedImageCallback::Result VideoReceiveStream::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
+ stats_proxy_.OnPreDecode(encoded_image, codec_specific_info);
+ size_t simulcast_idx = 0;
+ if (codec_specific_info->codecType == kVideoCodecVP8) {
+ simulcast_idx = codec_specific_info->codecSpecific.VP8.simulcastIdx;
+ }
+ if (config_.pre_decode_callback) {
+ config_.pre_decode_callback->EncodedFrameCallback(EncodedFrame(
+ encoded_image._buffer, encoded_image._length, encoded_image._frameType,
+ simulcast_idx, encoded_image._timeStamp));
+ }
+ {
+ rtc::CritScope lock(&ivf_writer_lock_);
+ if (ivf_writer_.get()) {
+ RTC_DCHECK(codec_specific_info);
+ bool ok = ivf_writer_->WriteFrame(encoded_image,
+ codec_specific_info->codecType);
+ RTC_DCHECK(ok);
+ }
+ }
+
+ return Result(Result::OK, encoded_image._timeStamp);
+}
+
+void VideoReceiveStream::SendNack(
+ const std::vector<uint16_t>& sequence_numbers) {
+ rtp_video_stream_receiver_.RequestPacketRetransmit(sequence_numbers);
+}
+
+void VideoReceiveStream::RequestKeyFrame() {
+ rtp_video_stream_receiver_.RequestKeyFrame();
+}
+
+void VideoReceiveStream::OnCompleteFrame(
+ std::unique_ptr<video_coding::FrameObject> frame) {
+ int last_continuous_pid = frame_buffer_->InsertFrame(std::move(frame));
+ if (last_continuous_pid != -1)
+ rtp_video_stream_receiver_.FrameContinuous(last_continuous_pid);
+}
+
+void VideoReceiveStream::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) {
+ frame_buffer_->UpdateRtt(max_rtt_ms);
+}
+
+int VideoReceiveStream::id() const {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
+ return config_.rtp.remote_ssrc;
+}
+
+rtc::Optional<Syncable::Info> VideoReceiveStream::GetInfo() const {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&module_process_sequence_checker_);
+ Syncable::Info info;
+
+ RtpReceiver* rtp_receiver = rtp_video_stream_receiver_.GetRtpReceiver();
+ RTC_DCHECK(rtp_receiver);
+ if (!rtp_receiver->GetLatestTimestamps(
+ &info.latest_received_capture_timestamp,
+ &info.latest_receive_time_ms))
+ return rtc::Optional<Syncable::Info>();
+
+ RtpRtcp* rtp_rtcp = rtp_video_stream_receiver_.rtp_rtcp();
+ RTC_DCHECK(rtp_rtcp);
+ if (rtp_rtcp->RemoteNTP(&info.capture_time_ntp_secs,
+ &info.capture_time_ntp_frac,
+ nullptr,
+ nullptr,
+ &info.capture_time_source_clock) != 0) {
+ return rtc::Optional<Syncable::Info>();
+ }
+
+ info.current_delay_ms = video_receiver_.Delay();
+ return rtc::Optional<Syncable::Info>(info);
+}
+
+uint32_t VideoReceiveStream::GetPlayoutTimestamp() const {
+ RTC_NOTREACHED();
+ return 0;
+}
+
+void VideoReceiveStream::SetMinimumPlayoutDelay(int delay_ms) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&module_process_sequence_checker_);
+ video_receiver_.SetMinimumPlayoutDelay(delay_ms);
+}
+
+void VideoReceiveStream::DecodeThreadFunction(void* ptr) {
+ while (static_cast<VideoReceiveStream*>(ptr)->Decode()) {
+ }
+}
+
+bool VideoReceiveStream::Decode() {
+ TRACE_EVENT0("webrtc", "VideoReceiveStream::Decode");
+ static const int kMaxWaitForFrameMs = 3000;
+ static const int kMaxWaitForKeyFrameMs = 200;
+
+ int wait_ms = keyframe_required_ ? kMaxWaitForKeyFrameMs : kMaxWaitForFrameMs;
+ std::unique_ptr<video_coding::FrameObject> frame;
+ // TODO(philipel): Call NextFrame with |keyframe_required| argument when
+ // downstream project has been fixed.
+ video_coding::FrameBuffer::ReturnReason res =
+ frame_buffer_->NextFrame(wait_ms, &frame);
+
+ if (res == video_coding::FrameBuffer::ReturnReason::kStopped) {
+ video_receiver_.DecodingStopped();
+ return false;
+ }
+
+ if (frame) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ RTC_DCHECK_EQ(res, video_coding::FrameBuffer::ReturnReason::kFrameFound);
+ if (video_receiver_.Decode(frame.get()) == VCM_OK) {
+ keyframe_required_ = false;
+ frame_decoded_ = true;
+ rtp_video_stream_receiver_.FrameDecoded(frame->picture_id);
+ } else if (!frame_decoded_ || !keyframe_required_ ||
+ (last_keyframe_request_ms_ + kMaxWaitForKeyFrameMs < now_ms)) {
+ keyframe_required_ = true;
+ // TODO(philipel): Remove this keyframe request when downstream project
+ // has been fixed.
+ RequestKeyFrame();
+ last_keyframe_request_ms_ = now_ms;
+ }
+ } else {
+ RTC_DCHECK_EQ(res, video_coding::FrameBuffer::ReturnReason::kTimeout);
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ rtc::Optional<int64_t> last_packet_ms =
+ rtp_video_stream_receiver_.LastReceivedPacketMs();
+ rtc::Optional<int64_t> last_keyframe_packet_ms =
+ rtp_video_stream_receiver_.LastReceivedKeyframePacketMs();
+
+ // To avoid spamming keyframe requests for a stream that is not active we
+ // check if we have received a packet within the last 5 seconds.
+ bool stream_is_active = last_packet_ms && now_ms - *last_packet_ms < 5000;
+ if (!stream_is_active)
+ stats_proxy_.OnStreamInactive();
+
+ // If we recently have been receiving packets belonging to a keyframe then
+ // we assume a keyframe is currently being received.
+ bool receiving_keyframe =
+ last_keyframe_packet_ms &&
+ now_ms - *last_keyframe_packet_ms < kMaxWaitForKeyFrameMs;
+
+ if (stream_is_active && !receiving_keyframe) {
+ RTC_LOG(LS_WARNING) << "No decodable frame in " << wait_ms
+ << " ms, requesting keyframe.";
+ RequestKeyFrame();
+ }
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/video_receive_stream.h b/third_party/libwebrtc/webrtc/video/video_receive_stream.h
new file mode 100644
index 0000000000..a5f9108b4a
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_receive_stream.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_RECEIVE_STREAM_H_
+#define VIDEO_VIDEO_RECEIVE_STREAM_H_
+
+#include <memory>
+#include <vector>
+
+#include "call/rtp_packet_sink_interface.h"
+#include "call/syncable.h"
+#include "call/video_receive_stream.h"
+#include "common_video/include/incoming_video_stream.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/rtp_rtcp/include/flexfec_receiver.h"
+#include "modules/video_coding/frame_buffer2.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "rtc_base/sequenced_task_checker.h"
+#include "system_wrappers/include/clock.h"
+#include "video/receive_statistics_proxy.h"
+#include "video/rtp_streams_synchronizer.h"
+#include "video/rtp_video_stream_receiver.h"
+#include "video/transport_adapter.h"
+#include "video/video_stream_decoder.h"
+
+namespace webrtc {
+
+class CallStats;
+class IvfFileWriter;
+class ProcessThread;
+class RTPFragmentationHeader;
+class RtpStreamReceiverInterface;
+class RtpStreamReceiverControllerInterface;
+class RtxReceiveStream;
+class VCMTiming;
+class VCMJitterEstimator;
+
+namespace internal {
+
+class VideoReceiveStream : public webrtc::VideoReceiveStream,
+ public rtc::VideoSinkInterface<VideoFrame>,
+ public EncodedImageCallback,
+ public NackSender,
+ public KeyFrameRequestSender,
+ public video_coding::OnCompleteFrameCallback,
+ public Syncable,
+ public CallStatsObserver {
+ public:
+ VideoReceiveStream(RtpStreamReceiverControllerInterface* receiver_controller,
+ int num_cpu_cores,
+ PacketRouter* packet_router,
+ VideoReceiveStream::Config config,
+ ProcessThread* process_thread,
+ CallStats* call_stats);
+ ~VideoReceiveStream() override;
+
+ const Config& config() const { return config_; }
+
+ void SignalNetworkState(NetworkState state);
+ bool DeliverRtcp(const uint8_t* packet, size_t length);
+
+ void SetSync(Syncable* audio_syncable);
+
+ // Implements webrtc::VideoReceiveStream.
+ void Start() override;
+ void Stop() override;
+
+ webrtc::VideoReceiveStream::Stats GetStats() const override;
+
+ // Takes ownership of the file, is responsible for closing it later.
+ // Calling this method will close and finalize any current log.
+ // Giving rtc::kInvalidPlatformFileValue disables logging.
+ // If a frame to be written would make the log too large the write fails and
+ // the log is closed and finalized. A |byte_limit| of 0 means no limit.
+ void EnableEncodedFrameRecording(rtc::PlatformFile file,
+ size_t byte_limit) override;
+
+ void AddSecondarySink(RtpPacketSinkInterface* sink) override;
+ void RemoveSecondarySink(const RtpPacketSinkInterface* sink) override;
+
+ // Implements rtc::VideoSinkInterface<VideoFrame>.
+ void OnFrame(const VideoFrame& video_frame) override;
+
+ // Implements EncodedImageCallback.
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ // Implements NackSender.
+ void SendNack(const std::vector<uint16_t>& sequence_numbers) override;
+
+ // Implements KeyFrameRequestSender.
+ void RequestKeyFrame() override;
+
+ // Implements video_coding::OnCompleteFrameCallback.
+ void OnCompleteFrame(
+ std::unique_ptr<video_coding::FrameObject> frame) override;
+
+ // Implements CallStatsObserver::OnRttUpdate
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+
+ // Implements Syncable.
+ int id() const override;
+ rtc::Optional<Syncable::Info> GetInfo() const override;
+ uint32_t GetPlayoutTimestamp() const override;
+ void SetMinimumPlayoutDelay(int delay_ms) override;
+
+ private:
+ static void DecodeThreadFunction(void* ptr);
+ bool Decode();
+
+ rtc::SequencedTaskChecker worker_sequence_checker_;
+ rtc::SequencedTaskChecker module_process_sequence_checker_;
+
+ TransportAdapter transport_adapter_;
+ const VideoReceiveStream::Config config_;
+ const int num_cpu_cores_;
+ ProcessThread* const process_thread_;
+ Clock* const clock_;
+
+ rtc::PlatformThread decode_thread_;
+
+ CallStats* const call_stats_;
+
+ // Shared by media and rtx stream receivers, since the latter has no RtpRtcp
+ // module of its own.
+ const std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+
+ std::unique_ptr<VCMTiming> timing_; // Jitter buffer experiment.
+ vcm::VideoReceiver video_receiver_;
+ std::unique_ptr<rtc::VideoSinkInterface<VideoFrame>> incoming_video_stream_;
+ ReceiveStatisticsProxy stats_proxy_;
+ RtpVideoStreamReceiver rtp_video_stream_receiver_;
+ std::unique_ptr<VideoStreamDecoder> video_stream_decoder_;
+ RtpStreamsSynchronizer rtp_stream_sync_;
+
+ rtc::CriticalSection ivf_writer_lock_;
+ std::unique_ptr<IvfFileWriter> ivf_writer_ RTC_GUARDED_BY(ivf_writer_lock_);
+
+ // Members for the new jitter buffer experiment.
+ std::unique_ptr<VCMJitterEstimator> jitter_estimator_;
+ std::unique_ptr<video_coding::FrameBuffer> frame_buffer_;
+
+ std::unique_ptr<RtpStreamReceiverInterface> media_receiver_;
+ std::unique_ptr<RtxReceiveStream> rtx_receive_stream_;
+ std::unique_ptr<RtpStreamReceiverInterface> rtx_receiver_;
+
+ // Whenever we are in an undecodable state (stream has just started or due to
+ // a decoding error) we require a keyframe to restart the stream.
+ bool keyframe_required_ = true;
+
+ // If we have successfully decoded any frame.
+ bool frame_decoded_ = false;
+
+ int64_t last_keyframe_request_ms_ = 0;
+};
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_RECEIVE_STREAM_H_
diff --git a/third_party/libwebrtc/webrtc/video/video_receive_stream_unittest.cc b/third_party/libwebrtc/webrtc/video/video_receive_stream_unittest.cc
new file mode 100644
index 0000000000..97a447ba77
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_receive_stream_unittest.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <vector>
+
+#include "test/gtest.h"
+#include "test/gmock.h"
+
+#include "api/video_codecs/video_decoder.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "media/base/fakevideorenderer.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "system_wrappers/include/clock.h"
+#include "test/field_trial.h"
+#include "video/call_stats.h"
+#include "video/video_receive_stream.h"
+
+namespace webrtc {
+namespace {
+
+using testing::_;
+using testing::Invoke;
+
+constexpr int kDefaultTimeOutMs = 50;
+
+const char kNewJitterBufferFieldTrialEnabled[] =
+ "WebRTC-NewVideoJitterBuffer/Enabled/";
+
+class MockTransport : public Transport {
+ public:
+ MOCK_METHOD3(SendRtp,
+ bool(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options));
+ MOCK_METHOD2(SendRtcp, bool(const uint8_t* packet, size_t length));
+};
+
+class MockVideoDecoder : public VideoDecoder {
+ public:
+ MOCK_METHOD2(InitDecode,
+ int32_t(const VideoCodec* config, int32_t number_of_cores));
+ MOCK_METHOD5(Decode,
+ int32_t(const EncodedImage& input,
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t render_time_ms));
+ MOCK_METHOD1(RegisterDecodeCompleteCallback,
+ int32_t(DecodedImageCallback* callback));
+ MOCK_METHOD0(Release, int32_t(void));
+ const char* ImplementationName() const { return "MockVideoDecoder"; }
+};
+
+} // namespace
+
+class VideoReceiveStreamTest : public testing::Test {
+ public:
+ VideoReceiveStreamTest()
+ : override_field_trials_(kNewJitterBufferFieldTrialEnabled),
+ config_(&mock_transport_),
+ call_stats_(Clock::GetRealTimeClock()),
+ process_thread_(ProcessThread::Create("TestThread")) {}
+
+ void SetUp() {
+ constexpr int kDefaultNumCpuCores = 2;
+ config_.rtp.remote_ssrc = 1111;
+ config_.rtp.local_ssrc = 2222;
+ config_.renderer = &fake_renderer_;
+ VideoReceiveStream::Decoder h264_decoder;
+ h264_decoder.payload_type = 99;
+ h264_decoder.payload_name = "H264";
+ h264_decoder.codec_params.insert(
+ {"sprop-parameter-sets", "Z0IACpZTBYmI,aMljiA=="});
+ h264_decoder.decoder = &mock_h264_video_decoder_;
+ config_.decoders.push_back(h264_decoder);
+ VideoReceiveStream::Decoder null_decoder;
+ null_decoder.payload_type = 98;
+ null_decoder.payload_name = "null";
+ null_decoder.decoder = &mock_null_video_decoder_;
+ config_.decoders.push_back(null_decoder);
+
+ video_receive_stream_.reset(new webrtc::internal::VideoReceiveStream(
+ &rtp_stream_receiver_controller_, kDefaultNumCpuCores,
+ &packet_router_, config_.Copy(), process_thread_.get(), &call_stats_));
+ }
+
+ protected:
+ webrtc::test::ScopedFieldTrials override_field_trials_;
+ VideoReceiveStream::Config config_;
+ CallStats call_stats_;
+ MockVideoDecoder mock_h264_video_decoder_;
+ MockVideoDecoder mock_null_video_decoder_;
+ cricket::FakeVideoRenderer fake_renderer_;
+ MockTransport mock_transport_;
+ PacketRouter packet_router_;
+ std::unique_ptr<ProcessThread> process_thread_;
+ RtpStreamReceiverController rtp_stream_receiver_controller_;
+ std::unique_ptr<webrtc::internal::VideoReceiveStream> video_receive_stream_;
+};
+
+TEST_F(VideoReceiveStreamTest, CreateFrameFromH264FmtpSpropAndIdr) {
+ constexpr uint8_t idr_nalu[] = {0x05, 0xFF, 0xFF, 0xFF};
+ RtpPacketToSend rtppacket(nullptr);
+ uint8_t* payload = rtppacket.AllocatePayload(sizeof(idr_nalu));
+ memcpy(payload, idr_nalu, sizeof(idr_nalu));
+ rtppacket.SetMarker(true);
+ rtppacket.SetSsrc(1111);
+ rtppacket.SetPayloadType(99);
+ rtppacket.SetSequenceNumber(1);
+ rtppacket.SetTimestamp(0);
+ rtc::Event init_decode_event_(false, false);
+ EXPECT_CALL(mock_h264_video_decoder_, InitDecode(_, _))
+ .WillOnce(Invoke([&init_decode_event_](const VideoCodec* config,
+ int32_t number_of_cores) {
+ init_decode_event_.Set();
+ return 0;
+ }));
+ EXPECT_CALL(mock_h264_video_decoder_, RegisterDecodeCompleteCallback(_));
+ video_receive_stream_->Start();
+ EXPECT_CALL(mock_h264_video_decoder_, Decode(_, false, _, _, _));
+ RtpPacketReceived parsed_packet;
+ ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
+ rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
+ EXPECT_CALL(mock_h264_video_decoder_, Release());
+ // Make sure the decoder thread had a chance to run.
+ init_decode_event_.Wait(kDefaultTimeOutMs);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/video_send_stream.cc b/third_party/libwebrtc/webrtc/video/video_send_stream.cc
new file mode 100644
index 0000000000..765a096857
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_send_stream.cc
@@ -0,0 +1,1336 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "video/video_send_stream.h"
+
+#include <algorithm>
+#include <cmath>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "call/rtp_transport_controller_send_interface.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "common_video/include/video_bitrate_allocator.h"
+#include "modules/bitrate_controller/include/bitrate_controller.h"
+#include "modules/congestion_controller/include/send_side_congestion_controller.h"
+#include "modules/pacing/alr_detector.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/rtp_sender.h"
+#include "modules/utility/include/process_thread.h"
+#include "modules/video_coding/utility/ivf_file_writer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/file.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/trace_event.h"
+#include "rtc_base/weak_ptr.h"
+#include "system_wrappers/include/field_trial.h"
+#include "video/call_stats.h"
+#include "video/payload_router.h"
+#include "call/video_send_stream.h"
+
+namespace webrtc {
+
+static const int kMinSendSidePacketHistorySize = 600;
+namespace {
+
+// We don't do MTU discovery, so assume that we have the standard ethernet MTU.
+const size_t kPathMTU = 1500;
+
+std::vector<RtpRtcp*> CreateRtpRtcpModules(
+ Transport* outgoing_transport,
+ RtcpIntraFrameObserver* intra_frame_callback,
+ RtcpBandwidthObserver* bandwidth_callback,
+ RtpTransportControllerSendInterface* transport,
+ RtcpRttStats* rtt_stats,
+ FlexfecSender* flexfec_sender,
+ SendStatisticsProxy* stats_proxy,
+ SendDelayStats* send_delay_stats,
+ RtcEventLog* event_log,
+ RateLimiter* retransmission_rate_limiter,
+ OverheadObserver* overhead_observer,
+ size_t num_modules,
+ RtpKeepAliveConfig keepalive_config) {
+ RTC_DCHECK_GT(num_modules, 0);
+ RtpRtcp::Configuration configuration;
+ configuration.audio = false;
+ configuration.receiver_only = false;
+ configuration.flexfec_sender = flexfec_sender;
+ configuration.outgoing_transport = outgoing_transport;
+ configuration.intra_frame_callback = intra_frame_callback;
+ configuration.bandwidth_callback = bandwidth_callback;
+ configuration.transport_feedback_callback =
+ transport->transport_feedback_observer();
+ configuration.rtt_stats = rtt_stats;
+ configuration.rtcp_packet_type_counter_observer = stats_proxy;
+ configuration.paced_sender = transport->packet_sender();
+ configuration.transport_sequence_number_allocator =
+ transport->packet_router();
+ configuration.send_bitrate_observer = stats_proxy;
+ configuration.send_frame_count_observer = stats_proxy;
+ configuration.send_side_delay_observer = stats_proxy;
+ configuration.send_packet_observer = send_delay_stats;
+ configuration.event_log = event_log;
+ configuration.retransmission_rate_limiter = retransmission_rate_limiter;
+ configuration.overhead_observer = overhead_observer;
+ configuration.keepalive_config = keepalive_config;
+ std::vector<RtpRtcp*> modules;
+ for (size_t i = 0; i < num_modules; ++i) {
+ RtpRtcp* rtp_rtcp = RtpRtcp::CreateRtpRtcp(configuration);
+ rtp_rtcp->SetSendingStatus(false);
+ rtp_rtcp->SetSendingMediaStatus(false);
+ rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
+ modules.push_back(rtp_rtcp);
+ }
+ return modules;
+}
+
+// TODO(brandtr): Update this function when we support multistream protection.
+std::unique_ptr<FlexfecSender> MaybeCreateFlexfecSender(
+ const VideoSendStream::Config& config,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs) {
+ if (config.rtp.flexfec.payload_type < 0) {
+ return nullptr;
+ }
+ RTC_DCHECK_GE(config.rtp.flexfec.payload_type, 0);
+ RTC_DCHECK_LE(config.rtp.flexfec.payload_type, 127);
+ if (config.rtp.flexfec.ssrc == 0) {
+ RTC_LOG(LS_WARNING) << "FlexFEC is enabled, but no FlexFEC SSRC given. "
+ "Therefore disabling FlexFEC.";
+ return nullptr;
+ }
+ if (config.rtp.flexfec.protected_media_ssrcs.empty()) {
+ RTC_LOG(LS_WARNING)
+ << "FlexFEC is enabled, but no protected media SSRC given. "
+ "Therefore disabling FlexFEC.";
+ return nullptr;
+ }
+
+ if (config.rtp.ssrcs.size() > 1) {
+ RTC_LOG(LS_WARNING)
+ << "Both FlexFEC and simulcast are enabled. This "
+ "combination is however not supported by our current "
+ "FlexFEC implementation. Therefore disabling FlexFEC.";
+ return nullptr;
+ }
+
+ if (config.rtp.flexfec.protected_media_ssrcs.size() > 1) {
+ RTC_LOG(LS_WARNING)
+ << "The supplied FlexfecConfig contained multiple protected "
+ "media streams, but our implementation currently only "
+ "supports protecting a single media stream. "
+ "To avoid confusion, disabling FlexFEC completely.";
+ return nullptr;
+ }
+
+ const RtpState* rtp_state = nullptr;
+ auto it = suspended_ssrcs.find(config.rtp.flexfec.ssrc);
+ if (it != suspended_ssrcs.end()) {
+ rtp_state = &it->second;
+ }
+
+ RTC_DCHECK_EQ(1U, config.rtp.flexfec.protected_media_ssrcs.size());
+ return std::unique_ptr<FlexfecSender>(new FlexfecSender(
+ config.rtp.flexfec.payload_type, config.rtp.flexfec.ssrc,
+ config.rtp.flexfec.protected_media_ssrcs[0], config.rtp.extensions,
+ RTPSender::FecExtensionSizes(), rtp_state, Clock::GetRealTimeClock()));
+}
+
+bool TransportSeqNumExtensionConfigured(const VideoSendStream::Config& config) {
+ const std::vector<RtpExtension>& extensions = config.rtp.extensions;
+ return std::find_if(
+ extensions.begin(), extensions.end(), [](const RtpExtension& ext) {
+ return ext.uri == RtpExtension::kTransportSequenceNumberUri;
+ }) != extensions.end();
+}
+
+const char kForcedFallbackFieldTrial[] =
+ "WebRTC-VP8-Forced-Fallback-Encoder-v2";
+
+rtc::Optional<int> GetFallbackMinBpsFromFieldTrial() {
+ if (!webrtc::field_trial::IsEnabled(kForcedFallbackFieldTrial))
+ return rtc::Optional<int>();
+
+ std::string group =
+ webrtc::field_trial::FindFullName(kForcedFallbackFieldTrial);
+ if (group.empty())
+ return rtc::Optional<int>();
+
+ int min_pixels;
+ int max_pixels;
+ int min_bps;
+ if (sscanf(group.c_str(), "Enabled-%d,%d,%d", &min_pixels, &max_pixels,
+ &min_bps) != 3) {
+ return rtc::Optional<int>();
+ }
+
+ if (min_bps <= 0)
+ return rtc::Optional<int>();
+
+ return rtc::Optional<int>(min_bps);
+}
+
+int GetEncoderMinBitrateBps() {
+ const int kDefaultEncoderMinBitrateBps = 30000;
+ return GetFallbackMinBpsFromFieldTrial().value_or(
+ kDefaultEncoderMinBitrateBps);
+}
+
+bool PayloadTypeSupportsSkippingFecPackets(const std::string& payload_name) {
+ const VideoCodecType codecType = PayloadStringToCodecType(payload_name);
+ if (codecType == kVideoCodecVP8 || codecType == kVideoCodecVP9) {
+ return true;
+ }
+ return false;
+}
+
+int CalculateMaxPadBitrateBps(std::vector<VideoStream> streams,
+ int min_transmit_bitrate_bps,
+ bool pad_to_min_bitrate) {
+ int pad_up_to_bitrate_bps = 0;
+ // Calculate max padding bitrate for a multi layer codec.
+ if (streams.size() > 1) {
+ // Pad to min bitrate of the highest layer.
+ pad_up_to_bitrate_bps = streams[streams.size() - 1].min_bitrate_bps;
+ // Add target_bitrate_bps of the lower layers.
+ for (size_t i = 0; i < streams.size() - 1; ++i)
+ pad_up_to_bitrate_bps += streams[i].target_bitrate_bps;
+ } else if (pad_to_min_bitrate) {
+ pad_up_to_bitrate_bps = streams[0].min_bitrate_bps;
+ }
+
+ pad_up_to_bitrate_bps =
+ std::max(pad_up_to_bitrate_bps, min_transmit_bitrate_bps);
+
+ return pad_up_to_bitrate_bps;
+}
+
+uint32_t CalculateOverheadRateBps(int packets_per_second,
+ size_t overhead_bytes_per_packet,
+ uint32_t max_overhead_bps) {
+ uint32_t overhead_bps =
+ static_cast<uint32_t>(8 * overhead_bytes_per_packet * packets_per_second);
+ return std::min(overhead_bps, max_overhead_bps);
+}
+
+int CalculatePacketRate(uint32_t bitrate_bps, size_t packet_size_bytes) {
+ size_t packet_size_bits = 8 * packet_size_bytes;
+ // Ceil for int value of bitrate_bps / packet_size_bits.
+ return static_cast<int>((bitrate_bps + packet_size_bits - 1) /
+ packet_size_bits);
+}
+
+} // namespace
+
+namespace internal {
+
+// VideoSendStreamImpl implements internal::VideoSendStream.
+// It is created and destroyed on |worker_queue|. The intent is to decrease the
+// need for locking and to ensure methods are called in sequence.
+// Public methods except |DeliverRtcp| must be called on |worker_queue|.
+// DeliverRtcp is called on the libjingle worker thread or a network thread.
+// An encoder may deliver frames through the EncodedImageCallback on an
+// arbitrary thread.
+class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver,
+ public webrtc::OverheadObserver,
+ public webrtc::VCMProtectionCallback,
+ public VideoStreamEncoder::EncoderSink,
+ public VideoBitrateAllocationObserver {
+ public:
+ VideoSendStreamImpl(
+ SendStatisticsProxy* stats_proxy,
+ rtc::TaskQueue* worker_queue,
+ CallStats* call_stats,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocator* bitrate_allocator,
+ SendDelayStats* send_delay_stats,
+ VideoStreamEncoder* video_stream_encoder,
+ RtcEventLog* event_log,
+ const VideoSendStream::Config* config,
+ int initial_encoder_max_bitrate,
+ std::map<uint32_t, RtpState> suspended_ssrcs,
+ std::map<uint32_t, RtpPayloadState> suspended_payload_states,
+ VideoEncoderConfig::ContentType content_type);
+ ~VideoSendStreamImpl() override;
+
+ // RegisterProcessThread register |module_process_thread| with those objects
+ // that use it. Registration has to happen on the thread were
+ // |module_process_thread| was created (libjingle's worker thread).
+ // TODO(perkj): Replace the use of |module_process_thread| with a TaskQueue,
+ // maybe |worker_queue|.
+ void RegisterProcessThread(ProcessThread* module_process_thread);
+ void DeRegisterProcessThread();
+
+ void SignalNetworkState(NetworkState state);
+ bool DeliverRtcp(const uint8_t* packet, size_t length);
+ void Start();
+ void Stop();
+
+ VideoSendStream::RtpStateMap GetRtpStates() const;
+ VideoSendStream::RtpPayloadStateMap GetRtpPayloadStates() const;
+
+ void EnableEncodedFrameRecording(const std::vector<rtc::PlatformFile>& files,
+ size_t byte_limit);
+
+ void SetTransportOverhead(size_t transport_overhead_per_packet);
+
+ private:
+ class CheckEncoderActivityTask;
+ class EncoderReconfiguredTask;
+
+ // Implements BitrateAllocatorObserver.
+ uint32_t OnBitrateUpdated(uint32_t bitrate_bps,
+ uint8_t fraction_loss,
+ int64_t rtt,
+ int64_t probing_interval_ms) override;
+
+ // Implements webrtc::VCMProtectionCallback.
+ int ProtectionRequest(const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps) override;
+
+ // Implements OverheadObserver.
+ void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
+
+ void OnEncoderConfigurationChanged(std::vector<VideoStream> streams,
+ int min_transmit_bitrate_bps) override;
+
+ // Implements EncodedImageCallback. The implementation routes encoded frames
+ // to the |payload_router_| and |config.pre_encode_callback| if set.
+ // Called on an arbitrary encoder callback thread.
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ // Implements VideoBitrateAllocationObserver.
+ void OnBitrateAllocationUpdated(const BitrateAllocation& allocation) override;
+
+ void ConfigureProtection();
+ void ConfigureSsrcs();
+ void SignalEncoderTimedOut();
+ void SignalEncoderActive();
+
+ const bool send_side_bwe_with_overhead_;
+
+ SendStatisticsProxy* const stats_proxy_;
+ const VideoSendStream::Config* const config_;
+ std::map<uint32_t, RtpState> suspended_ssrcs_;
+
+ ProcessThread* module_process_thread_;
+ rtc::ThreadChecker module_process_thread_checker_;
+ rtc::TaskQueue* const worker_queue_;
+
+ rtc::CriticalSection encoder_activity_crit_sect_;
+ CheckEncoderActivityTask* check_encoder_activity_task_
+ RTC_GUARDED_BY(encoder_activity_crit_sect_);
+
+ CallStats* const call_stats_;
+ RtpTransportControllerSendInterface* const transport_;
+ BitrateAllocator* const bitrate_allocator_;
+
+ // TODO(brandtr): Move ownership to PayloadRouter.
+ std::unique_ptr<FlexfecSender> flexfec_sender_;
+
+ rtc::CriticalSection ivf_writers_crit_;
+ std::unique_ptr<IvfFileWriter>
+ file_writers_[kMaxSimulcastStreams] RTC_GUARDED_BY(ivf_writers_crit_);
+
+ int max_padding_bitrate_;
+ int encoder_min_bitrate_bps_;
+ uint32_t encoder_max_bitrate_bps_;
+ uint32_t encoder_target_rate_bps_;
+
+ VideoStreamEncoder* const video_stream_encoder_;
+ EncoderRtcpFeedback encoder_feedback_;
+ ProtectionBitrateCalculator protection_bitrate_calculator_;
+
+ RtcpBandwidthObserver* const bandwidth_observer_;
+ // RtpRtcp modules, declared here as they use other members on construction.
+ const std::vector<RtpRtcp*> rtp_rtcp_modules_;
+ PayloadRouter payload_router_;
+
+ // |weak_ptr_| to our self. This is used since we can not call
+ // |weak_ptr_factory_.GetWeakPtr| from multiple sequences but it is ok to copy
+ // an existing WeakPtr.
+ rtc::WeakPtr<VideoSendStreamImpl> weak_ptr_;
+ // |weak_ptr_factory_| must be declared last to make sure all WeakPtr's are
+ // invalidated before any other members are destroyed.
+ rtc::WeakPtrFactory<VideoSendStreamImpl> weak_ptr_factory_;
+
+ rtc::CriticalSection overhead_bytes_per_packet_crit_;
+ size_t overhead_bytes_per_packet_
+ RTC_GUARDED_BY(overhead_bytes_per_packet_crit_);
+ size_t transport_overhead_bytes_per_packet_;
+};
+
+// TODO(tommi): See if there's a more elegant way to create a task that creates
+// an object on the correct task queue.
+class VideoSendStream::ConstructionTask : public rtc::QueuedTask {
+ public:
+ ConstructionTask(
+ std::unique_ptr<VideoSendStreamImpl>* send_stream,
+ rtc::Event* done_event,
+ SendStatisticsProxy* stats_proxy,
+ VideoStreamEncoder* video_stream_encoder,
+ ProcessThread* module_process_thread,
+ CallStats* call_stats,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocator* bitrate_allocator,
+ SendDelayStats* send_delay_stats,
+ RtcEventLog* event_log,
+ const VideoSendStream::Config* config,
+ int initial_encoder_max_bitrate,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states,
+ VideoEncoderConfig::ContentType content_type)
+ : send_stream_(send_stream),
+ done_event_(done_event),
+ stats_proxy_(stats_proxy),
+ video_stream_encoder_(video_stream_encoder),
+ call_stats_(call_stats),
+ transport_(transport),
+ bitrate_allocator_(bitrate_allocator),
+ send_delay_stats_(send_delay_stats),
+ event_log_(event_log),
+ config_(config),
+ initial_encoder_max_bitrate_(initial_encoder_max_bitrate),
+ suspended_ssrcs_(suspended_ssrcs),
+ suspended_payload_states_(suspended_payload_states),
+ content_type_(content_type) {}
+
+ ~ConstructionTask() override { done_event_->Set(); }
+
+ private:
+ bool Run() override {
+ send_stream_->reset(new VideoSendStreamImpl(
+ stats_proxy_, rtc::TaskQueue::Current(), call_stats_, transport_,
+ bitrate_allocator_, send_delay_stats_, video_stream_encoder_,
+ event_log_, config_, initial_encoder_max_bitrate_,
+ std::move(suspended_ssrcs_), std::move(suspended_payload_states_),
+ content_type_));
+ return true;
+ }
+
+ std::unique_ptr<VideoSendStreamImpl>* const send_stream_;
+ rtc::Event* const done_event_;
+ SendStatisticsProxy* const stats_proxy_;
+ VideoStreamEncoder* const video_stream_encoder_;
+ CallStats* const call_stats_;
+ RtpTransportControllerSendInterface* const transport_;
+ BitrateAllocator* const bitrate_allocator_;
+ SendDelayStats* const send_delay_stats_;
+ RtcEventLog* const event_log_;
+ const VideoSendStream::Config* config_;
+ int initial_encoder_max_bitrate_;
+ std::map<uint32_t, RtpState> suspended_ssrcs_;
+ std::map<uint32_t, RtpPayloadState> suspended_payload_states_;
+ const VideoEncoderConfig::ContentType content_type_;
+};
+
+class VideoSendStream::DestructAndGetRtpStateTask : public rtc::QueuedTask {
+ public:
+ DestructAndGetRtpStateTask(
+ VideoSendStream::RtpStateMap* state_map,
+ VideoSendStream::RtpPayloadStateMap* payload_state_map,
+ std::unique_ptr<VideoSendStreamImpl> send_stream,
+ rtc::Event* done_event)
+ : state_map_(state_map),
+ payload_state_map_(payload_state_map),
+ send_stream_(std::move(send_stream)),
+ done_event_(done_event) {}
+
+ ~DestructAndGetRtpStateTask() override { RTC_CHECK(!send_stream_); }
+
+ private:
+ bool Run() override {
+ send_stream_->Stop();
+ *state_map_ = send_stream_->GetRtpStates();
+ *payload_state_map_ = send_stream_->GetRtpPayloadStates();
+ send_stream_.reset();
+ done_event_->Set();
+ return true;
+ }
+
+ VideoSendStream::RtpStateMap* state_map_;
+ VideoSendStream::RtpPayloadStateMap* payload_state_map_;
+ std::unique_ptr<VideoSendStreamImpl> send_stream_;
+ rtc::Event* done_event_;
+};
+
+// CheckEncoderActivityTask is used for tracking when the encoder last produced
+// and encoded video frame. If the encoder has not produced anything the last
+// kEncoderTimeOutMs we also want to stop sending padding.
+class VideoSendStreamImpl::CheckEncoderActivityTask : public rtc::QueuedTask {
+ public:
+ static const int kEncoderTimeOutMs = 2000;
+ explicit CheckEncoderActivityTask(
+ const rtc::WeakPtr<VideoSendStreamImpl>& send_stream)
+ : activity_(0), send_stream_(std::move(send_stream)), timed_out_(false) {}
+
+ void Stop() {
+ RTC_CHECK(task_checker_.CalledSequentially());
+ send_stream_.reset();
+ }
+
+ void UpdateEncoderActivity() {
+ // UpdateEncoderActivity is called from VideoSendStreamImpl::Encoded on
+ // whatever thread the real encoder implementation run on. In the case of
+ // hardware encoders, there might be several encoders
+ // running in parallel on different threads.
+ rtc::AtomicOps::ReleaseStore(&activity_, 1);
+ }
+
+ private:
+ bool Run() override {
+ RTC_CHECK(task_checker_.CalledSequentially());
+ if (!send_stream_)
+ return true;
+ if (!rtc::AtomicOps::AcquireLoad(&activity_)) {
+ if (!timed_out_) {
+ send_stream_->SignalEncoderTimedOut();
+ }
+ timed_out_ = true;
+ } else if (timed_out_) {
+ send_stream_->SignalEncoderActive();
+ timed_out_ = false;
+ }
+ rtc::AtomicOps::ReleaseStore(&activity_, 0);
+
+ rtc::TaskQueue::Current()->PostDelayedTask(
+ std::unique_ptr<rtc::QueuedTask>(this), kEncoderTimeOutMs);
+ // Return false to prevent this task from being deleted. Ownership has been
+ // transferred to the task queue when PostDelayedTask was called.
+ return false;
+ }
+ volatile int activity_;
+
+ rtc::SequencedTaskChecker task_checker_;
+ rtc::WeakPtr<VideoSendStreamImpl> send_stream_;
+ bool timed_out_;
+};
+
+class VideoSendStreamImpl::EncoderReconfiguredTask : public rtc::QueuedTask {
+ public:
+ EncoderReconfiguredTask(const rtc::WeakPtr<VideoSendStreamImpl>& send_stream,
+ std::vector<VideoStream> streams,
+ int min_transmit_bitrate_bps)
+ : send_stream_(std::move(send_stream)),
+ streams_(std::move(streams)),
+ min_transmit_bitrate_bps_(min_transmit_bitrate_bps) {}
+
+ private:
+ bool Run() override {
+ if (send_stream_)
+ send_stream_->OnEncoderConfigurationChanged(std::move(streams_),
+ min_transmit_bitrate_bps_);
+ return true;
+ }
+
+ rtc::WeakPtr<VideoSendStreamImpl> send_stream_;
+ std::vector<VideoStream> streams_;
+ int min_transmit_bitrate_bps_;
+};
+
+VideoSendStream::VideoSendStream(
+ int num_cpu_cores,
+ ProcessThread* module_process_thread,
+ rtc::TaskQueue* worker_queue,
+ CallStats* call_stats,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocator* bitrate_allocator,
+ SendDelayStats* send_delay_stats,
+ RtcEventLog* event_log,
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states)
+ : worker_queue_(worker_queue),
+ thread_sync_event_(false /* manual_reset */, false),
+ stats_proxy_(Clock::GetRealTimeClock(),
+ config,
+ encoder_config.content_type),
+ config_(std::move(config)),
+ content_type_(encoder_config.content_type) {
+ video_stream_encoder_.reset(
+ new VideoStreamEncoder(num_cpu_cores, &stats_proxy_,
+ config_.encoder_settings,
+ config_.pre_encode_callback,
+ config_.post_encode_callback,
+ std::unique_ptr<OveruseFrameDetector>()));
+ worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(new ConstructionTask(
+ &send_stream_, &thread_sync_event_, &stats_proxy_,
+ video_stream_encoder_.get(), module_process_thread, call_stats, transport,
+ bitrate_allocator, send_delay_stats, event_log, &config_,
+ encoder_config.max_bitrate_bps, suspended_ssrcs, suspended_payload_states,
+ encoder_config.content_type)));
+
+ // Wait for ConstructionTask to complete so that |send_stream_| can be used.
+ // |module_process_thread| must be registered and deregistered on the thread
+ // it was created on.
+ thread_sync_event_.Wait(rtc::Event::kForever);
+ send_stream_->RegisterProcessThread(module_process_thread);
+ // TODO(sprang): Enable this also for regular video calls if it works well.
+ if (encoder_config.content_type == VideoEncoderConfig::ContentType::kScreen) {
+ // Only signal target bitrate for screenshare streams, for now.
+ video_stream_encoder_->SetBitrateObserver(send_stream_.get());
+ }
+
+ ReconfigureVideoEncoder(std::move(encoder_config));
+}
+
+VideoSendStream::~VideoSendStream() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(!send_stream_);
+}
+
+void VideoSendStream::Start() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "VideoSendStream::Start";
+ VideoSendStreamImpl* send_stream = send_stream_.get();
+ worker_queue_->PostTask([this, send_stream] {
+ send_stream->Start();
+ thread_sync_event_.Set();
+ });
+
+ // It is expected that after VideoSendStream::Start has been called, incoming
+ // frames are not dropped in VideoStreamEncoder. To ensure this, Start has to
+ // be synchronized.
+ thread_sync_event_.Wait(rtc::Event::kForever);
+}
+
+void VideoSendStream::Stop() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_LOG(LS_INFO) << "VideoSendStream::Stop";
+ VideoSendStreamImpl* send_stream = send_stream_.get();
+ worker_queue_->PostTask([send_stream] { send_stream->Stop(); });
+}
+
+void VideoSendStream::SetSource(
+ rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
+ const DegradationPreference& degradation_preference) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ video_stream_encoder_->SetSource(source, degradation_preference);
+}
+
+void VideoSendStream::ReconfigureVideoEncoder(VideoEncoderConfig config) {
+ // TODO(perkj): Some test cases in VideoSendStreamTest call
+ // ReconfigureVideoEncoder from the network thread.
+ // RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(content_type_ == config.content_type);
+ video_stream_encoder_->ConfigureEncoder(std::move(config),
+ config_.rtp.max_packet_size,
+ config_.rtp.nack.rtp_history_ms > 0);
+}
+
+VideoSendStream::Stats VideoSendStream::GetStats() {
+ // TODO(perkj, solenberg): Some test cases in EndToEndTest call GetStats from
+ // a network thread. See comment in Call::GetStats().
+ // RTC_DCHECK_RUN_ON(&thread_checker_);
+ return stats_proxy_.GetStats();
+}
+
+void VideoSendStream::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ VideoSendStreamImpl* send_stream = send_stream_.get();
+ worker_queue_->PostTask(
+ [send_stream, state] { send_stream->SignalNetworkState(state); });
+}
+
+void VideoSendStream::StopPermanentlyAndGetRtpStates(
+ VideoSendStream::RtpStateMap* rtp_state_map,
+ VideoSendStream::RtpPayloadStateMap* payload_state_map) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ video_stream_encoder_->Stop();
+ send_stream_->DeRegisterProcessThread();
+ worker_queue_->PostTask(
+ std::unique_ptr<rtc::QueuedTask>(new DestructAndGetRtpStateTask(
+ rtp_state_map, payload_state_map, std::move(send_stream_),
+ &thread_sync_event_)));
+ thread_sync_event_.Wait(rtc::Event::kForever);
+}
+
+void VideoSendStream::SetTransportOverhead(
+ size_t transport_overhead_per_packet) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ VideoSendStreamImpl* send_stream = send_stream_.get();
+ worker_queue_->PostTask([send_stream, transport_overhead_per_packet] {
+ send_stream->SetTransportOverhead(transport_overhead_per_packet);
+ });
+}
+
+bool VideoSendStream::DeliverRtcp(const uint8_t* packet, size_t length) {
+ // Called on a network thread.
+ return send_stream_->DeliverRtcp(packet, length);
+}
+
+void VideoSendStream::EnableEncodedFrameRecording(
+ const std::vector<rtc::PlatformFile>& files,
+ size_t byte_limit) {
+ send_stream_->EnableEncodedFrameRecording(files, byte_limit);
+}
+
+VideoSendStreamImpl::VideoSendStreamImpl(
+ SendStatisticsProxy* stats_proxy,
+ rtc::TaskQueue* worker_queue,
+ CallStats* call_stats,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocator* bitrate_allocator,
+ SendDelayStats* send_delay_stats,
+ VideoStreamEncoder* video_stream_encoder,
+ RtcEventLog* event_log,
+ const VideoSendStream::Config* config,
+ int initial_encoder_max_bitrate,
+ std::map<uint32_t, RtpState> suspended_ssrcs,
+ std::map<uint32_t, RtpPayloadState> suspended_payload_states,
+ VideoEncoderConfig::ContentType content_type)
+ : send_side_bwe_with_overhead_(
+ webrtc::field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead")),
+ stats_proxy_(stats_proxy),
+ config_(config),
+ suspended_ssrcs_(std::move(suspended_ssrcs)),
+ module_process_thread_(nullptr),
+ worker_queue_(worker_queue),
+ check_encoder_activity_task_(nullptr),
+ call_stats_(call_stats),
+ transport_(transport),
+ bitrate_allocator_(bitrate_allocator),
+ flexfec_sender_(MaybeCreateFlexfecSender(*config_, suspended_ssrcs_)),
+ max_padding_bitrate_(0),
+ encoder_min_bitrate_bps_(0),
+ encoder_max_bitrate_bps_(initial_encoder_max_bitrate),
+ encoder_target_rate_bps_(0),
+ video_stream_encoder_(video_stream_encoder),
+ encoder_feedback_(Clock::GetRealTimeClock(),
+ config_->rtp.ssrcs,
+ video_stream_encoder),
+ protection_bitrate_calculator_(Clock::GetRealTimeClock(), this),
+ bandwidth_observer_(transport->send_side_cc()->GetBandwidthObserver()),
+ rtp_rtcp_modules_(CreateRtpRtcpModules(
+ config_->send_transport,
+ &encoder_feedback_,
+ bandwidth_observer_,
+ transport,
+ call_stats_->rtcp_rtt_stats(),
+ flexfec_sender_.get(),
+ stats_proxy_,
+ send_delay_stats,
+ event_log,
+ transport->send_side_cc()->GetRetransmissionRateLimiter(),
+ this,
+ config_->rtp.ssrcs.size(),
+ transport->keepalive_config())),
+ payload_router_(rtp_rtcp_modules_,
+ config_->rtp.ssrcs,
+ config_->encoder_settings.payload_type,
+ suspended_payload_states),
+ weak_ptr_factory_(this),
+ overhead_bytes_per_packet_(0),
+ transport_overhead_bytes_per_packet_(0) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_LOG(LS_INFO) << "VideoSendStreamInternal: " << config_->ToString();
+ weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
+ module_process_thread_checker_.DetachFromThread();
+
+ RTC_DCHECK(!config_->rtp.ssrcs.empty());
+ RTC_DCHECK(call_stats_);
+ RTC_DCHECK(transport_);
+ RTC_DCHECK(transport_->send_side_cc());
+ RTC_CHECK(field_trial::FindFullName(
+ AlrDetector::kStrictPacingAndProbingExperimentName)
+ .empty() ||
+ field_trial::FindFullName(
+ AlrDetector::kScreenshareProbingBweExperimentName)
+ .empty());
+ // If send-side BWE is enabled, check if we should apply updated probing and
+ // pacing settings.
+ if (TransportSeqNumExtensionConfigured(*config_)) {
+ rtc::Optional<AlrDetector::AlrExperimentSettings> alr_settings;
+ if (content_type == VideoEncoderConfig::ContentType::kScreen) {
+ alr_settings = AlrDetector::ParseAlrSettingsFromFieldTrial(
+ AlrDetector::kScreenshareProbingBweExperimentName);
+ } else {
+ alr_settings = AlrDetector::ParseAlrSettingsFromFieldTrial(
+ AlrDetector::kStrictPacingAndProbingExperimentName);
+ }
+ if (alr_settings) {
+ transport->send_side_cc()->EnablePeriodicAlrProbing(true);
+ transport->pacer()->SetPacingFactor(alr_settings->pacing_factor);
+ transport->pacer()->SetQueueTimeLimit(alr_settings->max_paced_queue_time);
+ }
+ }
+
+ if (config_->periodic_alr_bandwidth_probing) {
+ transport->send_side_cc()->EnablePeriodicAlrProbing(true);
+ }
+
+ // RTP/RTCP initialization.
+
+ // We add the highest spatial layer first to ensure it'll be prioritized
+ // when sending padding, with the hope that the packet rate will be smaller,
+ // and that it's more important to protect than the lower layers.
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ constexpr bool remb_candidate = true;
+ transport->packet_router()->AddSendRtpModule(rtp_rtcp, remb_candidate);
+ }
+
+ for (size_t i = 0; i < config_->rtp.extensions.size(); ++i) {
+ const std::string& extension = config_->rtp.extensions[i].uri;
+ int id = config_->rtp.extensions[i].id;
+ // One-byte-extension local identifiers are in the range 1-14 inclusive.
+ RTC_DCHECK_GE(id, 1);
+ RTC_DCHECK_LE(id, 14);
+ RTC_DCHECK(RtpExtension::IsSupportedForVideo(extension));
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ RTC_CHECK_EQ(0, rtp_rtcp->RegisterSendRtpHeaderExtension(
+ StringToRtpExtensionType(extension), id));
+ }
+ }
+
+ ConfigureProtection();
+ ConfigureSsrcs();
+
+ // Configure the mid for each of the rtp modules
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ rtp_rtcp->SetMID(config_->rtp.mid.c_str());
+ }
+
+ // TODO(pbos): Should we set CNAME on all RTP modules?
+ rtp_rtcp_modules_.front()->SetCNAME(config_->rtp.c_name.c_str());
+
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ rtp_rtcp->RegisterRtcpStatisticsCallback(stats_proxy_);
+ rtp_rtcp->RegisterSendChannelRtpStatisticsCallback(stats_proxy_);
+ rtp_rtcp->SetMaxRtpPacketSize(config_->rtp.max_packet_size);
+ rtp_rtcp->RegisterVideoSendPayload(
+ config_->encoder_settings.payload_type,
+ config_->encoder_settings.payload_name.c_str());
+ }
+
+ RTC_DCHECK(config_->encoder_settings.encoder);
+ RTC_DCHECK_GE(config_->encoder_settings.payload_type, 0);
+ RTC_DCHECK_LE(config_->encoder_settings.payload_type, 127);
+
+ video_stream_encoder_->SetStartBitrate(
+ bitrate_allocator_->GetStartBitrate(this));
+
+ // Only request rotation at the source when we positively know that the remote
+ // side doesn't support the rotation extension. This allows us to prepare the
+ // encoder in the expectation that rotation is supported - which is the common
+ // case.
+ bool rotation_applied =
+ std::find_if(config_->rtp.extensions.begin(),
+ config_->rtp.extensions.end(),
+ [](const RtpExtension& extension) {
+ return extension.uri == RtpExtension::kVideoRotationUri;
+ }) == config_->rtp.extensions.end();
+
+ video_stream_encoder_->SetSink(this, rotation_applied);
+}
+
+void VideoSendStreamImpl::RegisterProcessThread(
+ ProcessThread* module_process_thread) {
+ RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
+ RTC_DCHECK(!module_process_thread_);
+ module_process_thread_ = module_process_thread;
+
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_)
+ module_process_thread_->RegisterModule(rtp_rtcp, RTC_FROM_HERE);
+}
+
+void VideoSendStreamImpl::DeRegisterProcessThread() {
+ RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_)
+ module_process_thread_->DeRegisterModule(rtp_rtcp);
+}
+
+VideoSendStreamImpl::~VideoSendStreamImpl() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(!payload_router_.IsActive())
+ << "VideoSendStreamImpl::Stop not called";
+ RTC_LOG(LS_INFO) << "~VideoSendStreamInternal: " << config_->ToString();
+
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ transport_->packet_router()->RemoveSendRtpModule(rtp_rtcp);
+ delete rtp_rtcp;
+ }
+}
+
+bool VideoSendStreamImpl::DeliverRtcp(const uint8_t* packet, size_t length) {
+ // Runs on a network thread.
+ RTC_DCHECK(!worker_queue_->IsCurrent());
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_)
+ rtp_rtcp->IncomingRtcpPacket(packet, length);
+ return true;
+}
+
+void VideoSendStreamImpl::Start() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_LOG(LS_INFO) << "VideoSendStream::Start";
+ if (payload_router_.IsActive())
+ return;
+ TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Start");
+ payload_router_.SetActive(true);
+
+ bitrate_allocator_->AddObserver(
+ this, encoder_min_bitrate_bps_, encoder_max_bitrate_bps_,
+ max_padding_bitrate_, !config_->suspend_below_min_bitrate,
+ config_->track_id);
+
+ // Start monitoring encoder activity.
+ {
+ rtc::CritScope lock(&encoder_activity_crit_sect_);
+ RTC_DCHECK(!check_encoder_activity_task_);
+ check_encoder_activity_task_ = new CheckEncoderActivityTask(weak_ptr_);
+ worker_queue_->PostDelayedTask(
+ std::unique_ptr<rtc::QueuedTask>(check_encoder_activity_task_),
+ CheckEncoderActivityTask::kEncoderTimeOutMs);
+ }
+
+ video_stream_encoder_->SendKeyFrame();
+}
+
+void VideoSendStreamImpl::Stop() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_LOG(LS_INFO) << "VideoSendStream::Stop";
+ if (!payload_router_.IsActive())
+ return;
+ TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Stop");
+ payload_router_.SetActive(false);
+ bitrate_allocator_->RemoveObserver(this);
+ {
+ rtc::CritScope lock(&encoder_activity_crit_sect_);
+ check_encoder_activity_task_->Stop();
+ check_encoder_activity_task_ = nullptr;
+ }
+ video_stream_encoder_->OnBitrateUpdated(0, 0, 0);
+ stats_proxy_->OnSetEncoderTargetRate(0);
+}
+
+void VideoSendStreamImpl::SignalEncoderTimedOut() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ // If the encoder has not produced anything the last kEncoderTimeOutMs and it
+ // is supposed to, deregister as BitrateAllocatorObserver. This can happen
+ // if a camera stops producing frames.
+ if (encoder_target_rate_bps_ > 0) {
+ RTC_LOG(LS_INFO) << "SignalEncoderTimedOut, Encoder timed out.";
+ bitrate_allocator_->RemoveObserver(this);
+ }
+}
+
+void VideoSendStreamImpl::OnBitrateAllocationUpdated(
+ const BitrateAllocation& allocation) {
+ payload_router_.OnBitrateAllocationUpdated(allocation);
+}
+
+void VideoSendStreamImpl::SignalEncoderActive() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_LOG(LS_INFO) << "SignalEncoderActive, Encoder is active.";
+ bitrate_allocator_->AddObserver(
+ this, encoder_min_bitrate_bps_, encoder_max_bitrate_bps_,
+ max_padding_bitrate_, !config_->suspend_below_min_bitrate,
+ config_->track_id);
+}
+
+void VideoSendStreamImpl::OnEncoderConfigurationChanged(
+ std::vector<VideoStream> streams,
+ int min_transmit_bitrate_bps) {
+ if (!worker_queue_->IsCurrent()) {
+ worker_queue_->PostTask(
+ std::unique_ptr<rtc::QueuedTask>(new EncoderReconfiguredTask(
+ weak_ptr_, std::move(streams), min_transmit_bitrate_bps)));
+ return;
+ }
+ RTC_DCHECK_GE(config_->rtp.ssrcs.size(), streams.size());
+ TRACE_EVENT0("webrtc", "VideoSendStream::OnEncoderConfigurationChanged");
+ RTC_DCHECK_GE(config_->rtp.ssrcs.size(), streams.size());
+ RTC_DCHECK_RUN_ON(worker_queue_);
+
+ encoder_min_bitrate_bps_ =
+ std::max(streams[0].min_bitrate_bps, GetEncoderMinBitrateBps());
+ encoder_max_bitrate_bps_ = 0;
+ for (const auto& stream : streams)
+ encoder_max_bitrate_bps_ += stream.max_bitrate_bps;
+ max_padding_bitrate_ = CalculateMaxPadBitrateBps(
+ streams, min_transmit_bitrate_bps, config_->suspend_below_min_bitrate);
+
+ // Clear stats for disabled layers.
+ for (size_t i = streams.size(); i < config_->rtp.ssrcs.size(); ++i) {
+ stats_proxy_->OnInactiveSsrc(config_->rtp.ssrcs[i]);
+ }
+
+ size_t number_of_temporal_layers =
+ streams.back().temporal_layer_thresholds_bps.size() + 1;
+ protection_bitrate_calculator_.SetEncodingData(
+ streams[0].width, streams[0].height, number_of_temporal_layers,
+ config_->rtp.max_packet_size);
+
+ if (payload_router_.IsActive()) {
+ // The send stream is started already. Update the allocator with new bitrate
+ // limits.
+ bitrate_allocator_->AddObserver(
+ this, encoder_min_bitrate_bps_, encoder_max_bitrate_bps_,
+ max_padding_bitrate_, !config_->suspend_below_min_bitrate,
+ config_->track_id);
+ }
+}
+
+EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
+ // Encoded is called on whatever thread the real encoder implementation run
+ // on. In the case of hardware encoders, there might be several encoders
+ // running in parallel on different threads.
+ size_t simulcast_idx = 0;
+ if (codec_specific_info->codecType == kVideoCodecVP8) {
+ simulcast_idx = codec_specific_info->codecSpecific.VP8.simulcastIdx;
+ }
+ if (config_->post_encode_callback) {
+ config_->post_encode_callback->EncodedFrameCallback(EncodedFrame(
+ encoded_image._buffer, encoded_image._length, encoded_image._frameType,
+ simulcast_idx, encoded_image._timeStamp));
+ }
+ {
+ rtc::CritScope lock(&encoder_activity_crit_sect_);
+ if (check_encoder_activity_task_)
+ check_encoder_activity_task_->UpdateEncoderActivity();
+ }
+
+ protection_bitrate_calculator_.UpdateWithEncodedData(encoded_image);
+ EncodedImageCallback::Result result = payload_router_.OnEncodedImage(
+ encoded_image, codec_specific_info, fragmentation);
+
+ RTC_DCHECK(codec_specific_info);
+
+ int layer = codec_specific_info->codecType == kVideoCodecVP8
+ ? codec_specific_info->codecSpecific.VP8.simulcastIdx
+ : 0;
+ {
+ rtc::CritScope lock(&ivf_writers_crit_);
+ if (file_writers_[layer].get()) {
+ bool ok = file_writers_[layer]->WriteFrame(
+ encoded_image, codec_specific_info->codecType);
+ RTC_DCHECK(ok);
+ }
+ }
+
+ return result;
+}
+
+void VideoSendStreamImpl::ConfigureProtection() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+
+ // Consistency of FlexFEC parameters is checked in MaybeCreateFlexfecSender.
+ const bool flexfec_enabled = (flexfec_sender_ != nullptr);
+
+ // Consistency of NACK and RED+ULPFEC parameters is checked in this function.
+ const bool nack_enabled = config_->rtp.nack.rtp_history_ms > 0;
+ int red_payload_type = config_->rtp.ulpfec.red_payload_type;
+ int ulpfec_payload_type = config_->rtp.ulpfec.ulpfec_payload_type;
+
+ // Shorthands.
+ auto IsRedEnabled = [&]() { return red_payload_type >= 0; };
+ auto DisableRed = [&]() { red_payload_type = -1; };
+ auto IsUlpfecEnabled = [&]() { return ulpfec_payload_type >= 0; };
+ auto DisableUlpfec = [&]() { ulpfec_payload_type = -1; };
+
+ if (webrtc::field_trial::IsEnabled("WebRTC-DisableUlpFecExperiment")) {
+ RTC_LOG(LS_INFO) << "Experiment to disable sending ULPFEC is enabled.";
+ DisableUlpfec();
+ }
+
+ // If enabled, FlexFEC takes priority over RED+ULPFEC.
+ if (flexfec_enabled) {
+ // We can safely disable RED here, because if the remote supports FlexFEC,
+ // we know that it has a receiver without the RED/RTX workaround.
+ // See http://crbug.com/webrtc/6650 for more information.
+ if (IsRedEnabled()) {
+ RTC_LOG(LS_INFO) << "Both FlexFEC and RED are configured. Disabling RED.";
+ DisableRed();
+ }
+ if (IsUlpfecEnabled()) {
+ RTC_LOG(LS_INFO)
+ << "Both FlexFEC and ULPFEC are configured. Disabling ULPFEC.";
+ DisableUlpfec();
+ }
+ }
+
+ // Payload types without picture ID cannot determine that a stream is complete
+ // without retransmitting FEC, so using ULPFEC + NACK for H.264 (for instance)
+ // is a waste of bandwidth since FEC packets still have to be transmitted.
+ // Note that this is not the case with FlexFEC.
+ if (nack_enabled && IsUlpfecEnabled() &&
+ !PayloadTypeSupportsSkippingFecPackets(
+ config_->encoder_settings.payload_name)) {
+ RTC_LOG(LS_WARNING)
+ << "Transmitting payload type without picture ID using "
+ "NACK+ULPFEC is a waste of bandwidth since ULPFEC packets "
+ "also have to be retransmitted. Disabling ULPFEC.";
+ DisableUlpfec();
+ }
+
+ // Verify payload types.
+ //
+ // Due to how old receivers work, we need to always send RED if it has been
+ // negotiated. This is a remnant of an old RED/RTX workaround, see
+ // https://codereview.webrtc.org/2469093003.
+ // TODO(brandtr): This change went into M56, so we can remove it in ~M59.
+ // At that time, we can disable RED whenever ULPFEC is disabled, as there is
+ // no point in using RED without ULPFEC.
+ if (IsRedEnabled()) {
+ RTC_DCHECK_GE(red_payload_type, 0);
+ RTC_DCHECK_LE(red_payload_type, 127);
+ }
+ if (IsUlpfecEnabled()) {
+ RTC_DCHECK_GE(ulpfec_payload_type, 0);
+ RTC_DCHECK_LE(ulpfec_payload_type, 127);
+ if (!IsRedEnabled()) {
+ RTC_LOG(LS_WARNING)
+ << "ULPFEC is enabled but RED is disabled. Disabling ULPFEC.";
+ DisableUlpfec();
+ }
+ }
+
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ // Set NACK.
+ rtp_rtcp->SetStorePacketsStatus(
+ true,
+ kMinSendSidePacketHistorySize);
+ // Set RED/ULPFEC information.
+ rtp_rtcp->SetUlpfecConfig(red_payload_type, ulpfec_payload_type);
+ }
+
+ // Currently, both ULPFEC and FlexFEC use the same FEC rate calculation logic,
+ // so enable that logic if either of those FEC schemes are enabled.
+ protection_bitrate_calculator_.SetProtectionMethod(
+ flexfec_enabled || IsUlpfecEnabled(), nack_enabled);
+}
+
+void VideoSendStreamImpl::ConfigureSsrcs() {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ // Configure regular SSRCs.
+ bool has_rids = false;
+ if (config_->rtp.rids.size() != 0) {
+ has_rids = true;
+ // if we have rids, we must have a rid entry for every ssrc (even if it's "")
+ RTC_DCHECK(config_->rtp.rids.size() == config_->rtp.ssrcs.size());
+ }
+ for (size_t i = 0; i < config_->rtp.ssrcs.size(); ++i) {
+ uint32_t ssrc = config_->rtp.ssrcs[i];
+ RtpRtcp* const rtp_rtcp = rtp_rtcp_modules_[i];
+ rtp_rtcp->SetSSRC(ssrc);
+ if (has_rids && config_->rtp.rids[i] != "") {
+ rtp_rtcp->SetRID(config_->rtp.rids[i].c_str());
+ }
+
+ // Restore RTP state if previous existed.
+ VideoSendStream::RtpStateMap::iterator it = suspended_ssrcs_.find(ssrc);
+ if (it != suspended_ssrcs_.end())
+ rtp_rtcp->SetRtpState(it->second);
+ }
+
+ // Set up RTX if available.
+ if (config_->rtp.rtx.ssrcs.empty())
+ return;
+
+ // Configure RTX SSRCs.
+ RTC_DCHECK_EQ(config_->rtp.rtx.ssrcs.size(), config_->rtp.ssrcs.size());
+ for (size_t i = 0; i < config_->rtp.rtx.ssrcs.size(); ++i) {
+ uint32_t ssrc = config_->rtp.rtx.ssrcs[i];
+ RtpRtcp* const rtp_rtcp = rtp_rtcp_modules_[i];
+ rtp_rtcp->SetRtxSsrc(ssrc);
+ VideoSendStream::RtpStateMap::iterator it = suspended_ssrcs_.find(ssrc);
+ if (it != suspended_ssrcs_.end())
+ rtp_rtcp->SetRtxState(it->second);
+ }
+
+ // Configure RTX payload types.
+ RTC_DCHECK_GE(config_->rtp.rtx.payload_type, 0);
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ rtp_rtcp->SetRtxSendPayloadType(config_->rtp.rtx.payload_type,
+ config_->encoder_settings.payload_type);
+ rtp_rtcp->SetRtxSendStatus(kRtxRetransmitted | kRtxRedundantPayloads);
+ }
+ if (config_->rtp.ulpfec.red_payload_type != -1 &&
+ config_->rtp.ulpfec.red_rtx_payload_type != -1) {
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ rtp_rtcp->SetRtxSendPayloadType(config_->rtp.ulpfec.red_rtx_payload_type,
+ config_->rtp.ulpfec.red_payload_type);
+ }
+ }
+}
+
+std::map<uint32_t, RtpState> VideoSendStreamImpl::GetRtpStates() const {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ std::map<uint32_t, RtpState> rtp_states;
+
+ for (size_t i = 0; i < config_->rtp.ssrcs.size(); ++i) {
+ uint32_t ssrc = config_->rtp.ssrcs[i];
+ RTC_DCHECK_EQ(ssrc, rtp_rtcp_modules_[i]->SSRC());
+ rtp_states[ssrc] = rtp_rtcp_modules_[i]->GetRtpState();
+ }
+
+ for (size_t i = 0; i < config_->rtp.rtx.ssrcs.size(); ++i) {
+ uint32_t ssrc = config_->rtp.rtx.ssrcs[i];
+ rtp_states[ssrc] = rtp_rtcp_modules_[i]->GetRtxState();
+ }
+
+ if (flexfec_sender_) {
+ uint32_t ssrc = config_->rtp.flexfec.ssrc;
+ rtp_states[ssrc] = flexfec_sender_->GetRtpState();
+ }
+
+ return rtp_states;
+}
+
+std::map<uint32_t, RtpPayloadState> VideoSendStreamImpl::GetRtpPayloadStates()
+ const {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ return payload_router_.GetRtpPayloadStates();
+}
+
+void VideoSendStreamImpl::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ rtp_rtcp->SetRTCPStatus(state == kNetworkUp ? config_->rtp.rtcp_mode
+ : RtcpMode::kOff);
+ }
+}
+
+uint32_t VideoSendStreamImpl::OnBitrateUpdated(uint32_t bitrate_bps,
+ uint8_t fraction_loss,
+ int64_t rtt,
+ int64_t probing_interval_ms) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ RTC_DCHECK(payload_router_.IsActive())
+ << "VideoSendStream::Start has not been called.";
+
+ // Substract overhead from bitrate.
+ rtc::CritScope lock(&overhead_bytes_per_packet_crit_);
+ uint32_t payload_bitrate_bps = bitrate_bps;
+ if (send_side_bwe_with_overhead_) {
+ payload_bitrate_bps -= CalculateOverheadRateBps(
+ CalculatePacketRate(bitrate_bps,
+ config_->rtp.max_packet_size +
+ transport_overhead_bytes_per_packet_),
+ overhead_bytes_per_packet_ + transport_overhead_bytes_per_packet_,
+ bitrate_bps);
+ }
+
+ // Get the encoder target rate. It is the estimated network rate -
+ // protection overhead.
+ encoder_target_rate_bps_ = protection_bitrate_calculator_.SetTargetRates(
+ payload_bitrate_bps, stats_proxy_->GetSendFrameRate(), fraction_loss,
+ rtt);
+
+ uint32_t encoder_overhead_rate_bps =
+ send_side_bwe_with_overhead_
+ ? CalculateOverheadRateBps(
+ CalculatePacketRate(encoder_target_rate_bps_,
+ config_->rtp.max_packet_size +
+ transport_overhead_bytes_per_packet_ -
+ overhead_bytes_per_packet_),
+ overhead_bytes_per_packet_ +
+ transport_overhead_bytes_per_packet_,
+ bitrate_bps - encoder_target_rate_bps_)
+ : 0;
+
+ // When the field trial "WebRTC-SendSideBwe-WithOverhead" is enabled
+ // protection_bitrate includes overhead.
+ uint32_t protection_bitrate =
+ bitrate_bps - (encoder_target_rate_bps_ + encoder_overhead_rate_bps);
+
+ encoder_target_rate_bps_ =
+ std::min(encoder_max_bitrate_bps_, encoder_target_rate_bps_);
+ video_stream_encoder_->OnBitrateUpdated(encoder_target_rate_bps_,
+ fraction_loss, rtt);
+ stats_proxy_->OnSetEncoderTargetRate(encoder_target_rate_bps_);
+ return protection_bitrate;
+}
+
+void VideoSendStreamImpl::EnableEncodedFrameRecording(
+ const std::vector<rtc::PlatformFile>& files,
+ size_t byte_limit) {
+ {
+ rtc::CritScope lock(&ivf_writers_crit_);
+ for (unsigned int i = 0; i < kMaxSimulcastStreams; ++i) {
+ if (i < files.size()) {
+ file_writers_[i] = IvfFileWriter::Wrap(rtc::File(files[i]), byte_limit);
+ } else {
+ file_writers_[i].reset();
+ }
+ }
+ }
+
+ if (!files.empty()) {
+ // Make a keyframe appear as early as possible in the logs, to give actually
+ // decodable output.
+ video_stream_encoder_->SendKeyFrame();
+ }
+}
+
+int VideoSendStreamImpl::ProtectionRequest(
+ const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps) {
+ RTC_DCHECK_RUN_ON(worker_queue_);
+ *sent_video_rate_bps = 0;
+ *sent_nack_rate_bps = 0;
+ *sent_fec_rate_bps = 0;
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ uint32_t not_used = 0;
+ uint32_t module_video_rate = 0;
+ uint32_t module_fec_rate = 0;
+ uint32_t module_nack_rate = 0;
+ rtp_rtcp->SetFecParameters(*delta_params, *key_params);
+ rtp_rtcp->BitrateSent(&not_used, &module_video_rate, &module_fec_rate,
+ &module_nack_rate);
+ *sent_video_rate_bps += module_video_rate;
+ *sent_nack_rate_bps += module_nack_rate;
+ *sent_fec_rate_bps += module_fec_rate;
+ }
+ return 0;
+}
+
+void VideoSendStreamImpl::OnOverheadChanged(size_t overhead_bytes_per_packet) {
+ rtc::CritScope lock(&overhead_bytes_per_packet_crit_);
+ overhead_bytes_per_packet_ = overhead_bytes_per_packet;
+}
+
+void VideoSendStreamImpl::SetTransportOverhead(
+ size_t transport_overhead_bytes_per_packet) {
+ if (transport_overhead_bytes_per_packet >= static_cast<int>(kPathMTU)) {
+ RTC_LOG(LS_ERROR) << "Transport overhead exceeds size of ethernet frame";
+ return;
+ }
+
+ transport_overhead_bytes_per_packet_ = transport_overhead_bytes_per_packet;
+
+ transport_->send_side_cc()->SetTransportOverhead(
+ transport_overhead_bytes_per_packet_);
+
+ size_t rtp_packet_size =
+ std::min(config_->rtp.max_packet_size,
+ kPathMTU - transport_overhead_bytes_per_packet_);
+
+ for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+ rtp_rtcp->SetMaxRtpPacketSize(rtp_packet_size);
+ }
+}
+
+} // namespace internal
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/video_send_stream.h b/third_party/libwebrtc/webrtc/video/video_send_stream.h
new file mode 100644
index 0000000000..998250cd6e
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_send_stream.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_SEND_STREAM_H_
+#define VIDEO_VIDEO_SEND_STREAM_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "call/bitrate_allocator.h"
+#include "call/video_receive_stream.h"
+#include "call/video_send_stream.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/protection_bitrate_calculator.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_queue.h"
+#include "video/encoder_rtcp_feedback.h"
+#include "video/send_delay_stats.h"
+#include "video/send_statistics_proxy.h"
+#include "video/video_stream_encoder.h"
+
+namespace webrtc {
+
+class CallStats;
+class SendSideCongestionController;
+class IvfFileWriter;
+class ProcessThread;
+class RtpRtcp;
+class RtpTransportControllerSendInterface;
+class RtcEventLog;
+
+namespace internal {
+
+class VideoSendStreamImpl;
+
+// VideoSendStream implements webrtc::VideoSendStream.
+// Internally, it delegates all public methods to VideoSendStreamImpl and / or
+// VideoStreamEncoder. VideoSendStreamInternal is created and deleted on
+// |worker_queue|.
+class VideoSendStream : public webrtc::VideoSendStream {
+ public:
+ VideoSendStream(
+ int num_cpu_cores,
+ ProcessThread* module_process_thread,
+ rtc::TaskQueue* worker_queue,
+ CallStats* call_stats,
+ RtpTransportControllerSendInterface* transport,
+ BitrateAllocator* bitrate_allocator,
+ SendDelayStats* send_delay_stats,
+ RtcEventLog* event_log,
+ VideoSendStream::Config config,
+ VideoEncoderConfig encoder_config,
+ const std::map<uint32_t, RtpState>& suspended_ssrcs,
+ const std::map<uint32_t, RtpPayloadState>& suspended_payload_states);
+
+ ~VideoSendStream() override;
+
+ void SignalNetworkState(NetworkState state);
+ bool DeliverRtcp(const uint8_t* packet, size_t length);
+
+ // webrtc::VideoSendStream implementation.
+ void Start() override;
+ void Stop() override;
+
+ void SetSource(rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
+ const DegradationPreference& degradation_preference) override;
+
+ void ReconfigureVideoEncoder(VideoEncoderConfig) override;
+ Stats GetStats() override;
+
+ typedef std::map<uint32_t, RtpState> RtpStateMap;
+ typedef std::map<uint32_t, RtpPayloadState> RtpPayloadStateMap;
+
+ // Takes ownership of each file, is responsible for closing them later.
+ // Calling this method will close and finalize any current logs.
+ // Giving rtc::kInvalidPlatformFileValue in any position disables logging
+ // for the corresponding stream.
+ // If a frame to be written would make the log too large the write fails and
+ // the log is closed and finalized. A |byte_limit| of 0 means no limit.
+ void EnableEncodedFrameRecording(const std::vector<rtc::PlatformFile>& files,
+ size_t byte_limit) override;
+
+ void StopPermanentlyAndGetRtpStates(RtpStateMap* rtp_state_map,
+ RtpPayloadStateMap* payload_state_map);
+
+ void SetTransportOverhead(size_t transport_overhead_per_packet);
+
+ private:
+ class ConstructionTask;
+ class DestructAndGetRtpStateTask;
+
+ rtc::ThreadChecker thread_checker_;
+ rtc::TaskQueue* const worker_queue_;
+ rtc::Event thread_sync_event_;
+
+ SendStatisticsProxy stats_proxy_;
+ const VideoSendStream::Config config_;
+ const VideoEncoderConfig::ContentType content_type_;
+ std::unique_ptr<VideoSendStreamImpl> send_stream_;
+ std::unique_ptr<VideoStreamEncoder> video_stream_encoder_;
+};
+
+} // namespace internal
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_SEND_STREAM_H_
diff --git a/third_party/libwebrtc/webrtc/video/video_send_stream_tests.cc b/third_party/libwebrtc/webrtc/video/video_send_stream_tests.cc
new file mode 100644
index 0000000000..98a32cc858
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_send_stream_tests.cc
@@ -0,0 +1,3619 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <algorithm> // max
+#include <memory>
+#include <vector>
+
+#include "call/call.h"
+#include "call/rtp_transport_controller_send.h"
+#include "common_video/include/frame_callback.h"
+#include "common_video/include/video_frame.h"
+#include "modules/pacing/alr_detector.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/rtcp_sender.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp9/include/vp9.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/call_test.h"
+#include "test/configurable_frame_size_encoder.h"
+#include "test/fake_texture_frame.h"
+#include "test/field_trial.h"
+#include "test/frame_generator.h"
+#include "test/frame_generator_capturer.h"
+#include "test/frame_utils.h"
+#include "test/gtest.h"
+#include "test/null_transport.h"
+#include "test/rtcp_packet_parser.h"
+#include "test/testsupport/perf_test.h"
+
+#include "video/send_statistics_proxy.h"
+#include "video/transport_adapter.h"
+#include "call/video_send_stream.h"
+
+namespace webrtc {
+
+enum VideoFormat { kGeneric, kVP8, };
+
+void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1,
+ const std::vector<VideoFrame>& frames2);
+VideoFrame CreateVideoFrame(int width, int height, uint8_t data);
+
+class VideoSendStreamTest : public test::CallTest {
+ protected:
+ void TestNackRetransmission(uint32_t retransmit_ssrc,
+ uint8_t retransmit_payload_type);
+ void TestPacketFragmentationSize(VideoFormat format, bool with_fec);
+
+ void TestVp9NonFlexMode(uint8_t num_temporal_layers,
+ uint8_t num_spatial_layers);
+
+ void TestRequestSourceRotateVideo(bool support_orientation_ext);
+};
+
+TEST_F(VideoSendStreamTest, CanStartStartedStream) {
+ task_queue_.SendTask([this]() {
+ CreateSenderCall(Call::Config(event_log_.get()));
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateVideoStreams();
+ video_send_stream_->Start();
+ video_send_stream_->Start();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(VideoSendStreamTest, CanStopStoppedStream) {
+ task_queue_.SendTask([this]() {
+ CreateSenderCall(Call::Config(event_log_.get()));
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ CreateVideoStreams();
+ video_send_stream_->Stop();
+ video_send_stream_->Stop();
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(VideoSendStreamTest, SupportsCName) {
+ static std::string kCName = "PjQatC14dGfbVwGPUOA9IH7RlsFDbWl4AhXEiDsBizo=";
+ class CNameObserver : public test::SendTest {
+ public:
+ CNameObserver() : SendTest(kDefaultTimeoutMs) {}
+
+ private:
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+ if (parser.sdes()->num_packets() > 0) {
+ EXPECT_EQ(1u, parser.sdes()->chunks().size());
+ EXPECT_EQ(kCName, parser.sdes()->chunks()[0].cname);
+
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.c_name = kCName;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP with CNAME.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsAbsoluteSendTime) {
+ class AbsoluteSendTimeObserver : public test::SendTest {
+ public:
+ AbsoluteSendTimeObserver() : SendTest(kDefaultTimeoutMs) {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionAbsoluteSendTime, test::kAbsSendTimeExtensionId));
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
+ EXPECT_EQ(header.extension.transmissionTimeOffset, 0);
+ if (header.extension.absoluteSendTime != 0) {
+ // Wait for at least one packet with a non-zero send time. The send time
+ // is a 16-bit value derived from the system clock, and it is valid
+ // for a packet to have a zero send time. To tell that from an
+ // unpopulated value we'll wait for a packet with non-zero send time.
+ observation_complete_.Set();
+ } else {
+ RTC_LOG(LS_WARNING)
+ << "Got a packet with zero absoluteSendTime, waiting"
+ " for another packet...";
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsTransmissionTimeOffset) {
+ static const int kEncodeDelayMs = 5;
+ class TransmissionTimeOffsetObserver : public test::SendTest {
+ public:
+ TransmissionTimeOffsetObserver()
+ : SendTest(kDefaultTimeoutMs),
+ encoder_(Clock::GetRealTimeClock(), kEncodeDelayMs) {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionTransmissionTimeOffset, test::kTOffsetExtensionId));
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_TRUE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
+ EXPECT_GT(header.extension.transmissionTimeOffset, 0);
+ EXPECT_EQ(header.extension.absoluteSendTime, 0u);
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = &encoder_;
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTimestampOffsetUri, test::kTOffsetExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+ }
+
+ test::DelayedEncoder encoder_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsTransportWideSequenceNumbers) {
+ static const uint8_t kExtensionId = test::kTransportSequenceNumberExtensionId;
+ class TransportWideSequenceNumberObserver : public test::SendTest {
+ public:
+ TransportWideSequenceNumberObserver()
+ : SendTest(kDefaultTimeoutMs), encoder_(Clock::GetRealTimeClock()) {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionTransportSequenceNumber, kExtensionId));
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
+
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = &encoder_;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+ }
+
+ test::FakeEncoder encoder_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsVideoRotation) {
+ class VideoRotationObserver : public test::SendTest {
+ public:
+ VideoRotationObserver() : SendTest(kDefaultTimeoutMs) {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionVideoRotation, test::kVideoRotationExtensionId));
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ // Only the last packet of the frame is required to have the extension.
+ if (!header.markerBit)
+ return SEND_PACKET;
+ EXPECT_TRUE(header.extension.hasVideoRotation);
+ EXPECT_EQ(kVideoRotation_90, header.extension.videoRotation);
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kVideoRotationUri, test::kVideoRotationExtensionId));
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ frame_generator_capturer->SetFakeRotation(kVideoRotation_90);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsVideoContentType) {
+ class VideoContentTypeObserver : public test::SendTest {
+ public:
+ VideoContentTypeObserver() : SendTest(kDefaultTimeoutMs) {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionVideoContentType, test::kVideoContentTypeExtensionId));
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ // Only the last packet of the frame must have extension.
+ if (!header.markerBit)
+ return SEND_PACKET;
+ EXPECT_TRUE(header.extension.hasVideoContentType);
+ EXPECT_TRUE(videocontenttypehelpers::IsScreenshare(
+ header.extension.videoContentType));
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoContentTypeUri,
+ test::kVideoContentTypeExtensionId));
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsVideoTimingFrames) {
+ class VideoTimingObserver : public test::SendTest {
+ public:
+ VideoTimingObserver() : SendTest(kDefaultTimeoutMs) {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionVideoTiming, test::kVideoTimingExtensionId));
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ // Only the last packet of the frame must have extension.
+ if (!header.markerBit)
+ return SEND_PACKET;
+ EXPECT_TRUE(header.extension.has_video_timing);
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kVideoTimingUri, test::kVideoTimingExtensionId));
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for timing frames.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+class FakeReceiveStatistics : public ReceiveStatisticsProvider {
+ public:
+ FakeReceiveStatistics(uint32_t send_ssrc,
+ uint32_t last_sequence_number,
+ uint32_t cumulative_lost,
+ uint8_t fraction_lost) {
+ stat_.SetMediaSsrc(send_ssrc);
+ stat_.SetExtHighestSeqNum(last_sequence_number);
+ stat_.SetCumulativeLost(cumulative_lost);
+ stat_.SetFractionLost(fraction_lost);
+ }
+
+ std::vector<rtcp::ReportBlock> RtcpReportBlocks(size_t max_blocks) override {
+ EXPECT_GE(max_blocks, 1u);
+ return {stat_};
+ }
+
+ private:
+ rtcp::ReportBlock stat_;
+};
+
+class UlpfecObserver : public test::EndToEndTest {
+ public:
+ UlpfecObserver(bool header_extensions_enabled,
+ bool use_nack,
+ bool expect_red,
+ bool expect_ulpfec,
+ const std::string& codec,
+ VideoEncoder* encoder)
+ : EndToEndTest(kTimeoutMs),
+ encoder_(encoder),
+ payload_name_(codec),
+ use_nack_(use_nack),
+ expect_red_(expect_red),
+ expect_ulpfec_(expect_ulpfec),
+ sent_media_(false),
+ sent_ulpfec_(false),
+ header_extensions_enabled_(header_extensions_enabled) {}
+
+ // Some of the test cases are expected to time out and thus we are using
+ // a shorter timeout window than the default here.
+ static constexpr size_t kTimeoutMs = 10000;
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ int encapsulated_payload_type = -1;
+ if (header.payloadType == VideoSendStreamTest::kRedPayloadType) {
+ EXPECT_TRUE(expect_red_);
+ encapsulated_payload_type = static_cast<int>(packet[header.headerLength]);
+ if (encapsulated_payload_type !=
+ VideoSendStreamTest::kFakeVideoSendPayloadType) {
+ EXPECT_EQ(VideoSendStreamTest::kUlpfecPayloadType,
+ encapsulated_payload_type);
+ }
+ } else {
+ EXPECT_EQ(VideoSendStreamTest::kFakeVideoSendPayloadType,
+ header.payloadType);
+ if (static_cast<size_t>(header.headerLength + header.paddingLength) <
+ length) {
+ // Not padding-only, media received outside of RED.
+ EXPECT_FALSE(expect_red_);
+ sent_media_ = true;
+ }
+ }
+
+ if (header_extensions_enabled_) {
+ EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
+ uint32_t kHalf24BitsSpace = 0xFFFFFF / 2;
+ if (header.extension.absoluteSendTime <= kHalf24BitsSpace &&
+ prev_header_.extension.absoluteSendTime > kHalf24BitsSpace) {
+ // 24 bits wrap.
+ EXPECT_GT(prev_header_.extension.absoluteSendTime,
+ header.extension.absoluteSendTime);
+ } else {
+ EXPECT_GE(header.extension.absoluteSendTime,
+ prev_header_.extension.absoluteSendTime);
+ }
+ EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ uint16_t seq_num_diff = header.extension.transportSequenceNumber -
+ prev_header_.extension.transportSequenceNumber;
+ EXPECT_EQ(1, seq_num_diff);
+ }
+
+ if (encapsulated_payload_type != -1) {
+ if (encapsulated_payload_type ==
+ VideoSendStreamTest::kUlpfecPayloadType) {
+ EXPECT_TRUE(expect_ulpfec_);
+ sent_ulpfec_ = true;
+ } else {
+ sent_media_ = true;
+ }
+ }
+
+ if (sent_media_ && sent_ulpfec_) {
+ observation_complete_.Set();
+ }
+
+ prev_header_ = header;
+
+ return SEND_PACKET;
+ }
+
+ test::PacketTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ // Configure some network delay.
+ const int kNetworkDelayMs = 100;
+ FakeNetworkPipe::Config config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return new test::PacketTransport(
+ task_queue, sender_call, this, test::PacketTransport::kSender,
+ VideoSendStreamTest::payload_type_map_, config);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (use_nack_) {
+ send_config->rtp.nack.rtp_history_ms =
+ (*receive_configs)[0].rtp.nack.rtp_history_ms =
+ VideoSendStreamTest::kNackRtpHistoryMs;
+ }
+ send_config->encoder_settings.encoder = encoder_;
+ send_config->encoder_settings.payload_name = payload_name_;
+ send_config->rtp.ulpfec.red_payload_type =
+ VideoSendStreamTest::kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type =
+ VideoSendStreamTest::kUlpfecPayloadType;
+ EXPECT_FALSE(send_config->rtp.extensions.empty());
+ if (!header_extensions_enabled_) {
+ send_config->rtp.extensions.clear();
+ } else {
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ }
+ (*receive_configs)[0].rtp.red_payload_type =
+ send_config->rtp.ulpfec.red_payload_type;
+ (*receive_configs)[0].rtp.ulpfec_payload_type =
+ send_config->rtp.ulpfec.ulpfec_payload_type;
+ }
+
+ void PerformTest() override {
+ EXPECT_EQ(expect_ulpfec_, Wait())
+ << "Timed out waiting for ULPFEC and/or media packets.";
+ }
+
+ VideoEncoder* const encoder_;
+ std::string payload_name_;
+ const bool use_nack_;
+ const bool expect_red_;
+ const bool expect_ulpfec_;
+ bool sent_media_;
+ bool sent_ulpfec_;
+ bool header_extensions_enabled_;
+ RTPHeader prev_header_;
+};
+
+TEST_F(VideoSendStreamTest, SupportsUlpfecWithExtensions) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ UlpfecObserver test(true, false, true, true, "VP8", encoder.get());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsUlpfecWithoutExtensions) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ UlpfecObserver test(false, false, true, true, "VP8", encoder.get());
+ RunBaseTest(&test);
+}
+
+class VideoSendStreamWithoutUlpfecTest : public VideoSendStreamTest {
+ protected:
+ VideoSendStreamWithoutUlpfecTest()
+ : field_trial_("WebRTC-DisableUlpFecExperiment/Enabled/") {}
+
+ test::ScopedFieldTrials field_trial_;
+};
+
+TEST_F(VideoSendStreamWithoutUlpfecTest, NoUlpfecIfDisabledThroughFieldTrial) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ UlpfecObserver test(false, false, true, false, "VP8", encoder.get());
+ RunBaseTest(&test);
+}
+
+// The FEC scheme used is not efficient for H264, so we should not use RED/FEC
+// since we'll still have to re-request FEC packets, effectively wasting
+// bandwidth since the receiver has to wait for FEC retransmissions to determine
+// that the received state is actually decodable.
+TEST_F(VideoSendStreamTest, DoesNotUtilizeUlpfecForH264WithNackEnabled) {
+ std::unique_ptr<VideoEncoder> encoder(
+ new test::FakeH264Encoder(Clock::GetRealTimeClock()));
+ UlpfecObserver test(false, true, true, false, "H264", encoder.get());
+ RunBaseTest(&test);
+}
+
+// Without retransmissions FEC for H264 is fine.
+TEST_F(VideoSendStreamTest, DoesUtilizeUlpfecForH264WithoutNackEnabled) {
+ std::unique_ptr<VideoEncoder> encoder(
+ new test::FakeH264Encoder(Clock::GetRealTimeClock()));
+ UlpfecObserver test(false, false, true, true, "H264", encoder.get());
+ RunBaseTest(&test);
+}
+
+// Disabled as flaky, see https://crbug.com/webrtc/7285 for details.
+TEST_F(VideoSendStreamTest, DISABLED_DoesUtilizeUlpfecForVp8WithNackEnabled) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ UlpfecObserver test(false, true, true, true, "VP8", encoder.get());
+ RunBaseTest(&test);
+}
+
+#if !defined(RTC_DISABLE_VP9)
+// Disabled as flaky, see https://crbug.com/webrtc/7285 for details.
+TEST_F(VideoSendStreamTest, DISABLED_DoesUtilizeUlpfecForVp9WithNackEnabled) {
+ std::unique_ptr<VideoEncoder> encoder(VP9Encoder::Create());
+ UlpfecObserver test(false, true, true, true, "VP9", encoder.get());
+ RunBaseTest(&test);
+}
+#endif // !defined(RTC_DISABLE_VP9)
+
+TEST_F(VideoSendStreamTest, SupportsUlpfecWithMultithreadedH264) {
+ std::unique_ptr<VideoEncoder> encoder(
+ new test::MultithreadedFakeH264Encoder(Clock::GetRealTimeClock()));
+ UlpfecObserver test(false, false, true, true, "H264", encoder.get());
+ RunBaseTest(&test);
+}
+
+// TODO(brandtr): Move these FlexFEC tests when we have created
+// FlexfecSendStream.
+class FlexfecObserver : public test::EndToEndTest {
+ public:
+ FlexfecObserver(bool header_extensions_enabled,
+ bool use_nack,
+ const std::string& codec,
+ VideoEncoder* encoder)
+ : EndToEndTest(VideoSendStreamTest::kDefaultTimeoutMs),
+ encoder_(encoder),
+ payload_name_(codec),
+ use_nack_(use_nack),
+ sent_media_(false),
+ sent_flexfec_(false),
+ header_extensions_enabled_(header_extensions_enabled) {}
+
+ size_t GetNumFlexfecStreams() const override { return 1; }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ if (header.payloadType == VideoSendStreamTest::kFlexfecPayloadType) {
+ EXPECT_EQ(VideoSendStreamTest::kFlexfecSendSsrc, header.ssrc);
+ sent_flexfec_ = true;
+ } else {
+ EXPECT_EQ(VideoSendStreamTest::kFakeVideoSendPayloadType,
+ header.payloadType);
+ EXPECT_EQ(VideoSendStreamTest::kVideoSendSsrcs[0], header.ssrc);
+ sent_media_ = true;
+ }
+
+ if (header_extensions_enabled_) {
+ EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
+ EXPECT_TRUE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ }
+
+ if (sent_media_ && sent_flexfec_) {
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ test::PacketTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override {
+ // At low RTT (< kLowRttNackMs) -> NACK only, no FEC.
+ // Therefore we need some network delay.
+ const int kNetworkDelayMs = 100;
+ FakeNetworkPipe::Config config;
+ config.loss_percent = 5;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return new test::PacketTransport(
+ task_queue, sender_call, this, test::PacketTransport::kSender,
+ VideoSendStreamTest::payload_type_map_, config);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ if (use_nack_) {
+ send_config->rtp.nack.rtp_history_ms =
+ (*receive_configs)[0].rtp.nack.rtp_history_ms =
+ VideoSendStreamTest::kNackRtpHistoryMs;
+ }
+ send_config->encoder_settings.encoder = encoder_;
+ send_config->encoder_settings.payload_name = payload_name_;
+ if (header_extensions_enabled_) {
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId));
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTimestampOffsetUri, test::kTOffsetExtensionId));
+ } else {
+ send_config->rtp.extensions.clear();
+ }
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for FlexFEC and/or media packets.";
+ }
+
+ VideoEncoder* const encoder_;
+ std::string payload_name_;
+ const bool use_nack_;
+ bool sent_media_;
+ bool sent_flexfec_;
+ bool header_extensions_enabled_;
+};
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecVp8) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ FlexfecObserver test(false, false, "VP8", encoder.get());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithNackVp8) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ FlexfecObserver test(false, true, "VP8", encoder.get());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithRtpExtensionsVp8) {
+ std::unique_ptr<VideoEncoder> encoder(VP8Encoder::Create());
+ FlexfecObserver test(true, false, "VP8", encoder.get());
+ RunBaseTest(&test);
+}
+
+#if !defined(RTC_DISABLE_VP9)
+TEST_F(VideoSendStreamTest, SupportsFlexfecVp9) {
+ std::unique_ptr<VideoEncoder> encoder(VP9Encoder::Create());
+ FlexfecObserver test(false, false, "VP9", encoder.get());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithNackVp9) {
+ std::unique_ptr<VideoEncoder> encoder(VP9Encoder::Create());
+ FlexfecObserver test(false, true, "VP9", encoder.get());
+ RunBaseTest(&test);
+}
+#endif // defined(RTC_DISABLE_VP9)
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecH264) {
+ std::unique_ptr<VideoEncoder> encoder(
+ new test::FakeH264Encoder(Clock::GetRealTimeClock()));
+ FlexfecObserver test(false, false, "H264", encoder.get());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithNackH264) {
+ std::unique_ptr<VideoEncoder> encoder(
+ new test::FakeH264Encoder(Clock::GetRealTimeClock()));
+ FlexfecObserver test(false, true, "H264", encoder.get());
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SupportsFlexfecWithMultithreadedH264) {
+ std::unique_ptr<VideoEncoder> encoder(
+ new test::MultithreadedFakeH264Encoder(Clock::GetRealTimeClock()));
+ FlexfecObserver test(false, false, "H264", encoder.get());
+ RunBaseTest(&test);
+}
+
+void VideoSendStreamTest::TestNackRetransmission(
+ uint32_t retransmit_ssrc,
+ uint8_t retransmit_payload_type) {
+ class NackObserver : public test::SendTest {
+ public:
+ explicit NackObserver(uint32_t retransmit_ssrc,
+ uint8_t retransmit_payload_type)
+ : SendTest(kDefaultTimeoutMs),
+ send_count_(0),
+ retransmit_ssrc_(retransmit_ssrc),
+ retransmit_payload_type_(retransmit_payload_type),
+ nacked_sequence_number_(-1) {
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ // Nack second packet after receiving the third one.
+ if (++send_count_ == 3) {
+ uint16_t nack_sequence_number = header.sequenceNumber - 1;
+ nacked_sequence_number_ = nack_sequence_number;
+ RTCPSender rtcp_sender(false, Clock::GetRealTimeClock(), nullptr,
+ nullptr, nullptr, transport_adapter_.get());
+
+ rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
+
+ RTCPSender::FeedbackState feedback_state;
+
+ EXPECT_EQ(0,
+ rtcp_sender.SendRTCP(
+ feedback_state, kRtcpNack, 1, &nack_sequence_number));
+ }
+
+ uint16_t sequence_number = header.sequenceNumber;
+
+ if (header.ssrc == retransmit_ssrc_ &&
+ retransmit_ssrc_ != kVideoSendSsrcs[0]) {
+ // Not kVideoSendSsrcs[0], assume correct RTX packet. Extract sequence
+ // number.
+ const uint8_t* rtx_header = packet + header.headerLength;
+ sequence_number = (rtx_header[0] << 8) + rtx_header[1];
+ }
+
+ if (sequence_number == nacked_sequence_number_) {
+ EXPECT_EQ(retransmit_ssrc_, header.ssrc);
+ EXPECT_EQ(retransmit_payload_type_, header.payloadType);
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ transport_adapter_.reset(
+ new internal::TransportAdapter(send_config->send_transport));
+ transport_adapter_->Enable();
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->rtp.rtx.payload_type = retransmit_payload_type_;
+ if (retransmit_ssrc_ != kVideoSendSsrcs[0])
+ send_config->rtp.rtx.ssrcs.push_back(retransmit_ssrc_);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for NACK retransmission.";
+ }
+
+ std::unique_ptr<internal::TransportAdapter> transport_adapter_;
+ int send_count_;
+ uint32_t retransmit_ssrc_;
+ uint8_t retransmit_payload_type_;
+ int nacked_sequence_number_;
+ } test(retransmit_ssrc, retransmit_payload_type);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, RetransmitsNack) {
+ // Normal NACKs should use the send SSRC.
+ TestNackRetransmission(kVideoSendSsrcs[0], kFakeVideoSendPayloadType);
+}
+
+TEST_F(VideoSendStreamTest, RetransmitsNackOverRtx) {
+ // NACKs over RTX should use a separate SSRC.
+ TestNackRetransmission(kSendRtxSsrcs[0], kSendRtxPayloadType);
+}
+
+void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
+ bool with_fec) {
+ // Use a fake encoder to output a frame of every size in the range [90, 290],
+ // for each size making sure that the exact number of payload bytes received
+ // is correct and that packets are fragmented to respect max packet size.
+ static const size_t kMaxPacketSize = 128;
+ static const size_t start = 90;
+ static const size_t stop = 290;
+
+ // Observer that verifies that the expected number of packets and bytes
+ // arrive for each frame size, from start_size to stop_size.
+ class FrameFragmentationTest : public test::SendTest,
+ public EncodedFrameObserver {
+ public:
+ FrameFragmentationTest(size_t max_packet_size,
+ size_t start_size,
+ size_t stop_size,
+ bool test_generic_packetization,
+ bool use_fec)
+ : SendTest(kLongTimeoutMs),
+ encoder_(stop),
+ max_packet_size_(max_packet_size),
+ stop_size_(stop_size),
+ test_generic_packetization_(test_generic_packetization),
+ use_fec_(use_fec),
+ packet_count_(0),
+ accumulated_size_(0),
+ accumulated_payload_(0),
+ fec_packet_received_(false),
+ current_size_rtp_(start_size),
+ current_size_frame_(static_cast<int>(start_size)) {
+ // Fragmentation required, this test doesn't make sense without it.
+ encoder_.SetFrameSize(start_size);
+ RTC_DCHECK_GT(stop_size, max_packet_size);
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t size) override {
+ size_t length = size;
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_LE(length, max_packet_size_);
+
+ if (use_fec_) {
+ uint8_t payload_type = packet[header.headerLength];
+ bool is_fec = header.payloadType == kRedPayloadType &&
+ payload_type == kUlpfecPayloadType;
+ if (is_fec) {
+ fec_packet_received_ = true;
+ return SEND_PACKET;
+ }
+ }
+
+ accumulated_size_ += length;
+
+ if (use_fec_)
+ TriggerLossReport(header);
+
+ if (test_generic_packetization_) {
+ size_t overhead = header.headerLength + header.paddingLength;
+ // Only remove payload header and RED header if the packet actually
+ // contains payload.
+ if (length > overhead) {
+ overhead += (1 /* Generic header */);
+ if (use_fec_)
+ overhead += 1; // RED for FEC header.
+ }
+ EXPECT_GE(length, overhead);
+ accumulated_payload_ += length - overhead;
+ }
+
+ // Marker bit set indicates last packet of a frame.
+ if (header.markerBit) {
+ if (use_fec_ && accumulated_payload_ == current_size_rtp_ - 1) {
+ // With FEC enabled, frame size is incremented asynchronously, so
+ // "old" frames one byte too small may arrive. Accept, but don't
+ // increase expected frame size.
+ accumulated_size_ = 0;
+ accumulated_payload_ = 0;
+ return SEND_PACKET;
+ }
+
+ EXPECT_GE(accumulated_size_, current_size_rtp_);
+ if (test_generic_packetization_) {
+ EXPECT_EQ(current_size_rtp_, accumulated_payload_);
+ }
+
+ // Last packet of frame; reset counters.
+ accumulated_size_ = 0;
+ accumulated_payload_ = 0;
+ if (current_size_rtp_ == stop_size_) {
+ // Done! (Don't increase size again, might arrive more @ stop_size).
+ observation_complete_.Set();
+ } else {
+ // Increase next expected frame size. If testing with FEC, make sure
+ // a FEC packet has been received for this frame size before
+ // proceeding, to make sure that redundancy packets don't exceed
+ // size limit.
+ if (!use_fec_) {
+ ++current_size_rtp_;
+ } else if (fec_packet_received_) {
+ fec_packet_received_ = false;
+ ++current_size_rtp_;
+
+ rtc::CritScope lock(&mutex_);
+ ++current_size_frame_;
+ }
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ void TriggerLossReport(const RTPHeader& header) {
+ // Send lossy receive reports to trigger FEC enabling.
+ const int kLossPercent = 5;
+ if (packet_count_++ % (100 / kLossPercent) != 0) {
+ FakeReceiveStatistics lossy_receive_stats(
+ kVideoSendSsrcs[0], header.sequenceNumber,
+ (packet_count_ * (100 - kLossPercent)) / 100, // Cumulative lost.
+ static_cast<uint8_t>((255 * kLossPercent) / 100)); // Loss percent.
+ RTCPSender rtcp_sender(false, Clock::GetRealTimeClock(),
+ &lossy_receive_stats, nullptr, nullptr,
+ transport_adapter_.get());
+
+ rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
+
+ RTCPSender::FeedbackState feedback_state;
+
+ EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
+ }
+ }
+
+ void EncodedFrameCallback(const EncodedFrame& encoded_frame) override {
+ rtc::CritScope lock(&mutex_);
+ // Increase frame size for next encoded frame, in the context of the
+ // encoder thread.
+ if (!use_fec_ && current_size_frame_ < static_cast<int32_t>(stop_size_)) {
+ ++current_size_frame_;
+ }
+ encoder_.SetFrameSize(static_cast<size_t>(current_size_frame_));
+ }
+
+ Call::Config GetSenderCallConfig() override {
+ Call::Config config(event_log_.get());
+ const int kMinBitrateBps = 30000;
+ config.bitrate_config.min_bitrate_bps = kMinBitrateBps;
+ return config;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ transport_adapter_.reset(
+ new internal::TransportAdapter(send_config->send_transport));
+ transport_adapter_->Enable();
+ if (use_fec_) {
+ send_config->rtp.ulpfec.red_payload_type = kRedPayloadType;
+ send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType;
+ }
+
+ if (!test_generic_packetization_)
+ send_config->encoder_settings.payload_name = "VP8";
+
+ send_config->encoder_settings.encoder = &encoder_;
+ send_config->rtp.max_packet_size = kMaxPacketSize;
+ send_config->post_encode_callback = this;
+
+ // Make sure there is at least one extension header, to make the RTP
+ // header larger than the base length of 12 bytes.
+ EXPECT_FALSE(send_config->rtp.extensions.empty());
+
+ // Setup screen content disables frame dropping which makes this easier.
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ explicit VideoStreamFactory(size_t num_temporal_layers)
+ : num_temporal_layers_(num_temporal_layers) {
+ EXPECT_GT(num_temporal_layers, 0u);
+ }
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ for (VideoStream& stream : streams) {
+ stream.temporal_layer_thresholds_bps.resize(num_temporal_layers_ -
+ 1);
+ }
+ return streams;
+ }
+ const size_t num_temporal_layers_;
+ };
+
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(2);
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while observing incoming RTP packets.";
+ }
+
+ std::unique_ptr<internal::TransportAdapter> transport_adapter_;
+ test::ConfigurableFrameSizeEncoder encoder_;
+
+ const size_t max_packet_size_;
+ const size_t stop_size_;
+ const bool test_generic_packetization_;
+ const bool use_fec_;
+
+ uint32_t packet_count_;
+ size_t accumulated_size_;
+ size_t accumulated_payload_;
+ bool fec_packet_received_;
+
+ size_t current_size_rtp_;
+ rtc::CriticalSection mutex_;
+ int current_size_frame_ RTC_GUARDED_BY(mutex_);
+ };
+
+ // Don't auto increment if FEC is used; continue sending frame size until
+ // a FEC packet has been received.
+ FrameFragmentationTest test(
+ kMaxPacketSize, start, stop, format == kGeneric, with_fec);
+
+ RunBaseTest(&test);
+}
+
+// TODO(sprang): Is there any way of speeding up these tests?
+TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSize) {
+ TestPacketFragmentationSize(kGeneric, false);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSizeWithFec) {
+ TestPacketFragmentationSize(kGeneric, true);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSize) {
+ TestPacketFragmentationSize(kVP8, false);
+}
+
+TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSizeWithFec) {
+ TestPacketFragmentationSize(kVP8, true);
+}
+
+// The test will go through a number of phases.
+// 1. Start sending packets.
+// 2. As soon as the RTP stream has been detected, signal a low REMB value to
+// suspend the stream.
+// 3. Wait until |kSuspendTimeFrames| have been captured without seeing any RTP
+// packets.
+// 4. Signal a high REMB and then wait for the RTP stream to start again.
+// When the stream is detected again, and the stats show that the stream
+// is no longer suspended, the test ends.
+TEST_F(VideoSendStreamTest, SuspendBelowMinBitrate) {
+ static const int kSuspendTimeFrames = 60; // Suspend for 2 seconds @ 30 fps.
+
+ class RembObserver : public test::SendTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ RembObserver()
+ : SendTest(kDefaultTimeoutMs),
+ clock_(Clock::GetRealTimeClock()),
+ stream_(nullptr),
+ test_state_(kBeforeSuspend),
+ rtp_count_(0),
+ last_sequence_number_(0),
+ suspended_frame_count_(0),
+ low_remb_bps_(0),
+ high_remb_bps_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ ++rtp_count_;
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ last_sequence_number_ = header.sequenceNumber;
+
+ if (test_state_ == kBeforeSuspend) {
+ // The stream has started. Try to suspend it.
+ SendRtcpFeedback(low_remb_bps_);
+ test_state_ = kDuringSuspend;
+ } else if (test_state_ == kDuringSuspend) {
+ if (header.paddingLength == 0) {
+ // Received non-padding packet during suspension period. Reset the
+ // counter.
+ suspended_frame_count_ = 0;
+ }
+ SendRtcpFeedback(0); // REMB is only sent if value is > 0.
+ } else if (test_state_ == kWaitingForPacket) {
+ if (header.paddingLength == 0) {
+ // Non-padding packet observed. Test is almost complete. Will just
+ // have to wait for the stats to change.
+ test_state_ = kWaitingForStats;
+ }
+ SendRtcpFeedback(0); // REMB is only sent if value is > 0.
+ } else if (test_state_ == kWaitingForStats) {
+ VideoSendStream::Stats stats = stream_->GetStats();
+ if (stats.suspended == false) {
+ // Stats flipped to false. Test is complete.
+ observation_complete_.Set();
+ }
+ SendRtcpFeedback(0); // REMB is only sent if value is > 0.
+ }
+
+ return SEND_PACKET;
+ }
+
+ // This method implements the rtc::VideoSinkInterface. This is called when
+ // a frame is provided to the VideoSendStream.
+ void OnFrame(const VideoFrame& video_frame) override {
+ rtc::CritScope lock(&crit_);
+ if (test_state_ == kDuringSuspend &&
+ ++suspended_frame_count_ > kSuspendTimeFrames) {
+ VideoSendStream::Stats stats = stream_->GetStats();
+ EXPECT_TRUE(stats.suspended);
+ SendRtcpFeedback(high_remb_bps_);
+ test_state_ = kWaitingForPacket;
+ }
+ }
+
+ void set_low_remb_bps(int value) {
+ rtc::CritScope lock(&crit_);
+ low_remb_bps_ = value;
+ }
+
+ void set_high_remb_bps(int value) {
+ rtc::CritScope lock(&crit_);
+ high_remb_bps_ = value;
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ stream_ = send_stream;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ RTC_DCHECK_EQ(1, encoder_config->number_of_streams);
+ transport_adapter_.reset(
+ new internal::TransportAdapter(send_config->send_transport));
+ transport_adapter_->Enable();
+ send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config->pre_encode_callback = this;
+ send_config->suspend_below_min_bitrate = true;
+ int min_bitrate_bps =
+ test::DefaultVideoStreamFactory::kDefaultMinBitratePerStream[0];
+ set_low_remb_bps(min_bitrate_bps - 10000);
+ int threshold_window = std::max(min_bitrate_bps / 10, 20000);
+ ASSERT_GT(encoder_config->max_bitrate_bps,
+ min_bitrate_bps + threshold_window + 5000);
+ set_high_remb_bps(min_bitrate_bps + threshold_window + 5000);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out during suspend-below-min-bitrate test.";
+ }
+
+ enum TestState {
+ kBeforeSuspend,
+ kDuringSuspend,
+ kWaitingForPacket,
+ kWaitingForStats
+ };
+
+ virtual void SendRtcpFeedback(int remb_value)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) {
+ FakeReceiveStatistics receive_stats(kVideoSendSsrcs[0],
+ last_sequence_number_, rtp_count_, 0);
+ RTCPSender rtcp_sender(false, clock_, &receive_stats, nullptr, nullptr,
+ transport_adapter_.get());
+
+ rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
+ if (remb_value > 0) {
+ rtcp_sender.SetRemb(remb_value, std::vector<uint32_t>());
+ }
+ RTCPSender::FeedbackState feedback_state;
+ EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
+ }
+
+ std::unique_ptr<internal::TransportAdapter> transport_adapter_;
+ Clock* const clock_;
+ VideoSendStream* stream_;
+
+ rtc::CriticalSection crit_;
+ TestState test_state_ RTC_GUARDED_BY(crit_);
+ int rtp_count_ RTC_GUARDED_BY(crit_);
+ int last_sequence_number_ RTC_GUARDED_BY(crit_);
+ int suspended_frame_count_ RTC_GUARDED_BY(crit_);
+ int low_remb_bps_ RTC_GUARDED_BY(crit_);
+ int high_remb_bps_ RTC_GUARDED_BY(crit_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+// This test that padding stops being send after a while if the Camera stops
+// producing video frames and that padding resumes if the camera restarts.
+TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
+ class NoPaddingWhenVideoIsMuted : public test::SendTest {
+ public:
+ NoPaddingWhenVideoIsMuted()
+ : SendTest(kDefaultTimeoutMs),
+ clock_(Clock::GetRealTimeClock()),
+ last_packet_time_ms_(-1),
+ capturer_(nullptr) {
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ last_packet_time_ms_ = clock_->TimeInMilliseconds();
+
+ RTPHeader header;
+ parser_->Parse(packet, length, &header);
+ const bool only_padding =
+ header.headerLength + header.paddingLength == length;
+
+ if (test_state_ == kBeforeStopCapture) {
+ capturer_->Stop();
+ test_state_ = kWaitingForPadding;
+ } else if (test_state_ == kWaitingForPadding && only_padding) {
+ test_state_ = kWaitingForNoPackets;
+ } else if (test_state_ == kWaitingForPaddingAfterCameraRestart &&
+ only_padding) {
+ observation_complete_.Set();
+ }
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ const int kNoPacketsThresholdMs = 2000;
+ if (test_state_ == kWaitingForNoPackets &&
+ (last_packet_time_ms_ > 0 &&
+ clock_->TimeInMilliseconds() - last_packet_time_ms_ >
+ kNoPacketsThresholdMs)) {
+ capturer_->Start();
+ test_state_ = kWaitingForPaddingAfterCameraRestart;
+ }
+ return SEND_PACKET;
+ }
+
+ size_t GetNumVideoStreams() const override { return 3; }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ rtc::CritScope lock(&crit_);
+ capturer_ = frame_generator_capturer;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for RTP packets to stop being sent.";
+ }
+
+ enum TestState {
+ kBeforeStopCapture,
+ kWaitingForPadding,
+ kWaitingForNoPackets,
+ kWaitingForPaddingAfterCameraRestart
+ };
+
+ TestState test_state_ = kBeforeStopCapture;
+ Clock* const clock_;
+ std::unique_ptr<internal::TransportAdapter> transport_adapter_;
+ rtc::CriticalSection crit_;
+ int64_t last_packet_time_ms_ RTC_GUARDED_BY(crit_);
+ test::FrameGeneratorCapturer* capturer_ RTC_GUARDED_BY(crit_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, PaddingIsPrimarilyRetransmissions) {
+ const int kCapacityKbps = 10000; // 10 Mbps
+ class PaddingIsPrimarilyRetransmissions : public test::EndToEndTest {
+ public:
+ PaddingIsPrimarilyRetransmissions()
+ : EndToEndTest(kDefaultTimeoutMs),
+ clock_(Clock::GetRealTimeClock()),
+ padding_length_(0),
+ total_length_(0),
+ call_(nullptr) {}
+
+ private:
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+
+ RTPHeader header;
+ parser_->Parse(packet, length, &header);
+ padding_length_ += header.paddingLength;
+ total_length_ += length;
+ return SEND_PACKET;
+ }
+
+ test::PacketTransport* CreateSendTransport(
+ test::SingleThreadedTaskQueueForTesting* task_queue,
+ Call* sender_call) override {
+ const int kNetworkDelayMs = 50;
+ FakeNetworkPipe::Config config;
+ config.loss_percent = 10;
+ config.link_capacity_kbps = kCapacityKbps;
+ config.queue_delay_ms = kNetworkDelayMs;
+ return new test::PacketTransport(task_queue, sender_call, this,
+ test::PacketTransport::kSender,
+ payload_type_map_, config);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Turn on RTX.
+ send_config->rtp.rtx.payload_type = kFakeVideoSendPayloadType;
+ send_config->rtp.rtx.ssrcs.push_back(kVideoSendSsrcs[0]);
+ }
+
+ void PerformTest() override {
+ // TODO(isheriff): Some platforms do not ramp up as expected to full
+ // capacity due to packet scheduling delays. Fix that before getting
+ // rid of this.
+ SleepMs(5000);
+ {
+ rtc::CritScope lock(&crit_);
+ // Expect padding to be a small percentage of total bytes sent.
+ EXPECT_LT(padding_length_, .1 * total_length_);
+ }
+ }
+
+ rtc::CriticalSection crit_;
+ Clock* const clock_;
+ size_t padding_length_ RTC_GUARDED_BY(crit_);
+ size_t total_length_ RTC_GUARDED_BY(crit_);
+ Call* call_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+// This test first observes "high" bitrate use at which point it sends a REMB to
+// indicate that it should be lowered significantly. The test then observes that
+// the bitrate observed is sinking well below the min-transmit-bitrate threshold
+// to verify that the min-transmit bitrate respects incoming REMB.
+//
+// Note that the test starts at "high" bitrate and does not ramp up to "higher"
+// bitrate since no receiver block or remb is sent in the initial phase.
+TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
+ static const int kMinTransmitBitrateBps = 400000;
+ static const int kHighBitrateBps = 150000;
+ static const int kRembBitrateBps = 80000;
+ static const int kRembRespectedBitrateBps = 100000;
+ class BitrateObserver : public test::SendTest {
+ public:
+ BitrateObserver()
+ : SendTest(kDefaultTimeoutMs),
+ retranmission_rate_limiter_(Clock::GetRealTimeClock(), 1000),
+ stream_(nullptr),
+ bitrate_capped_(false) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (RtpHeaderParser::IsRtcp(packet, length))
+ return DROP_PACKET;
+
+ RTPHeader header;
+ if (!parser_->Parse(packet, length, &header))
+ return DROP_PACKET;
+ RTC_DCHECK(stream_);
+ VideoSendStream::Stats stats = stream_->GetStats();
+ if (!stats.substreams.empty()) {
+ EXPECT_EQ(1u, stats.substreams.size());
+ int total_bitrate_bps =
+ stats.substreams.begin()->second.total_bitrate_bps;
+ test::PrintResult("bitrate_stats_",
+ "min_transmit_bitrate_low_remb",
+ "bitrate_bps",
+ static_cast<size_t>(total_bitrate_bps),
+ "bps",
+ false);
+ if (total_bitrate_bps > kHighBitrateBps) {
+ rtp_rtcp_->SetRemb(kRembBitrateBps,
+ std::vector<uint32_t>(1, header.ssrc));
+ rtp_rtcp_->Process();
+ bitrate_capped_ = true;
+ } else if (bitrate_capped_ &&
+ total_bitrate_bps < kRembRespectedBitrateBps) {
+ observation_complete_.Set();
+ }
+ }
+ // Packets don't have to be delivered since the test is the receiver.
+ return DROP_PACKET;
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ stream_ = send_stream;
+ RtpRtcp::Configuration config;
+ config.outgoing_transport = feedback_transport_.get();
+ config.retransmission_rate_limiter = &retranmission_rate_limiter_;
+ rtp_rtcp_.reset(RtpRtcp::CreateRtpRtcp(config));
+ rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize);
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ feedback_transport_.reset(
+ new internal::TransportAdapter(send_config->send_transport));
+ feedback_transport_->Enable();
+ encoder_config->min_transmit_bitrate_bps = kMinTransmitBitrateBps;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timeout while waiting for low bitrate stats after REMB.";
+ }
+
+ std::unique_ptr<RtpRtcp> rtp_rtcp_;
+ std::unique_ptr<internal::TransportAdapter> feedback_transport_;
+ RateLimiter retranmission_rate_limiter_;
+ VideoSendStream* stream_;
+ bool bitrate_capped_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ChangingNetworkRoute) {
+ static const int kStartBitrateBps = 300000;
+ static const int kNewMaxBitrateBps = 1234567;
+ static const uint8_t kExtensionId = test::kTransportSequenceNumberExtensionId;
+ class ChangingNetworkRouteTest : public test::EndToEndTest {
+ public:
+ explicit ChangingNetworkRouteTest(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ task_queue_(task_queue),
+ call_(nullptr) {
+ EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionTransportSequenceNumber, kExtensionId));
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ (*receive_configs)[0].rtp.transport_cc = true;
+ }
+
+ void ModifyAudioConfigs(
+ AudioSendStream::Config* send_config,
+ std::vector<AudioReceiveStream::Config>* receive_configs) override {
+ send_config->rtp.extensions.clear();
+ send_config->rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+ (*receive_configs)[0].rtp.extensions.clear();
+ (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
+ (*receive_configs)[0].rtp.transport_cc = true;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ if (call_->GetStats().send_bandwidth_bps > kStartBitrateBps) {
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ rtc::NetworkRoute new_route(true, 10, 20, -1);
+ Call::Config::BitrateConfig bitrate_config;
+
+ task_queue_->SendTask([this, &new_route, &bitrate_config]() {
+ call_->OnNetworkRouteChanged("transport", new_route);
+ bitrate_config.start_bitrate_bps = kStartBitrateBps;
+ call_->SetBitrateConfig(bitrate_config);
+ });
+
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for start bitrate to be exceeded.";
+
+ task_queue_->SendTask([this, &new_route, &bitrate_config]() {
+ bitrate_config.start_bitrate_bps = -1;
+ bitrate_config.max_bitrate_bps = kNewMaxBitrateBps;
+ call_->SetBitrateConfig(bitrate_config);
+ // TODO(holmer): We should set the last sent packet id here and verify
+ // that we correctly ignore any packet loss reported prior to that id.
+ ++new_route.local_network_id;
+ call_->OnNetworkRouteChanged("transport", new_route);
+ EXPECT_GE(call_->GetStats().send_bandwidth_bps, kStartBitrateBps);
+ });
+ }
+
+ private:
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ Call* call_;
+ } test(&task_queue_);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ChangingTransportOverhead) {
+ class ChangingTransportOverheadTest : public test::EndToEndTest {
+ public:
+ explicit ChangingTransportOverheadTest(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ task_queue_(task_queue),
+ call_(nullptr),
+ packets_sent_(0),
+ transport_overhead_(0) {}
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ EXPECT_LE(length, kMaxRtpPacketSize);
+ rtc::CritScope cs(&lock_);
+ if (++packets_sent_ < 100)
+ return SEND_PACKET;
+ observation_complete_.Set();
+ return SEND_PACKET;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.max_packet_size = kMaxRtpPacketSize;
+ }
+
+ void PerformTest() override {
+ task_queue_->SendTask([this]() {
+ transport_overhead_ = 100;
+ call_->OnTransportOverheadChanged(webrtc::MediaType::VIDEO,
+ transport_overhead_);
+ });
+
+ EXPECT_TRUE(Wait());
+
+ {
+ rtc::CritScope cs(&lock_);
+ packets_sent_ = 0;
+ }
+
+ task_queue_->SendTask([this]() {
+ transport_overhead_ = 500;
+ call_->OnTransportOverheadChanged(webrtc::MediaType::VIDEO,
+ transport_overhead_);
+ });
+
+ EXPECT_TRUE(Wait());
+ }
+
+ private:
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ Call* call_;
+ rtc::CriticalSection lock_;
+ int packets_sent_ RTC_GUARDED_BY(lock_);
+ int transport_overhead_;
+ const size_t kMaxRtpPacketSize = 1000;
+ } test(&task_queue_);
+
+ RunBaseTest(&test);
+}
+
+// Test class takes takes as argument a switch selecting if type switch should
+// occur and a function pointer to reset the send stream. This is necessary
+// since you cannot change the content type of a VideoSendStream, you need to
+// recreate it. Stopping and recreating the stream can only be done on the main
+// thread and in the context of VideoSendStreamTest (not BaseTest).
+template <typename T>
+class MaxPaddingSetTest : public test::SendTest {
+ public:
+ static const uint32_t kMinTransmitBitrateBps = 400000;
+ static const uint32_t kActualEncodeBitrateBps = 40000;
+ static const uint32_t kMinPacketsToSend = 50;
+
+ explicit MaxPaddingSetTest(bool test_switch_content_type, T* stream_reset_fun)
+ : SendTest(test::CallTest::kDefaultTimeoutMs),
+ content_switch_event_(false, false),
+ call_(nullptr),
+ send_stream_(nullptr),
+ send_stream_config_(nullptr),
+ packets_sent_(0),
+ running_without_padding_(test_switch_content_type),
+ stream_resetter_(stream_reset_fun) {
+ RTC_DCHECK(stream_resetter_);
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ rtc::CritScope lock(&crit_);
+ send_stream_ = send_stream;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ RTC_DCHECK_EQ(1, encoder_config->number_of_streams);
+ if (RunningWithoutPadding()) {
+ encoder_config->min_transmit_bitrate_bps = 0;
+ encoder_config->content_type =
+ VideoEncoderConfig::ContentType::kRealtimeVideo;
+ } else {
+ encoder_config->min_transmit_bitrate_bps = kMinTransmitBitrateBps;
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+ send_stream_config_ = send_config->Copy();
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ if (running_without_padding_)
+ EXPECT_EQ(0, call_->GetStats().max_padding_bitrate_bps);
+
+ // Wait until at least kMinPacketsToSend frames have been encoded, so that
+ // we have reliable data.
+ if (++packets_sent_ < kMinPacketsToSend)
+ return SEND_PACKET;
+
+ if (running_without_padding_) {
+ // We've sent kMinPacketsToSend packets with default configuration, switch
+ // to enabling screen content and setting min transmit bitrate.
+ // Note that we need to recreate the stream if changing content type.
+ packets_sent_ = 0;
+ encoder_config_.min_transmit_bitrate_bps = kMinTransmitBitrateBps;
+ encoder_config_.content_type = VideoEncoderConfig::ContentType::kScreen;
+ running_without_padding_ = false;
+ content_switch_event_.Set();
+ return SEND_PACKET;
+ }
+
+ // Make sure the pacer has been configured with a min transmit bitrate.
+ if (call_->GetStats().max_padding_bitrate_bps > 0)
+ observation_complete_.Set();
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ if (RunningWithoutPadding()) {
+ ASSERT_TRUE(
+ content_switch_event_.Wait(test::CallTest::kDefaultTimeoutMs));
+ (*stream_resetter_)(send_stream_config_, encoder_config_);
+ }
+
+ ASSERT_TRUE(Wait()) << "Timed out waiting for a valid padding bitrate.";
+ }
+
+ private:
+ bool RunningWithoutPadding() const {
+ rtc::CritScope lock(&crit_);
+ return running_without_padding_;
+ }
+
+ rtc::CriticalSection crit_;
+ rtc::Event content_switch_event_;
+ Call* call_;
+ VideoSendStream* send_stream_ RTC_GUARDED_BY(crit_);
+ VideoSendStream::Config send_stream_config_;
+ VideoEncoderConfig encoder_config_;
+ uint32_t packets_sent_ RTC_GUARDED_BY(crit_);
+ bool running_without_padding_;
+ T* const stream_resetter_;
+};
+
+TEST_F(VideoSendStreamTest, RespectsMinTransmitBitrate) {
+ auto reset_fun = [](const VideoSendStream::Config& send_stream_config,
+ const VideoEncoderConfig& encoder_config) {};
+ MaxPaddingSetTest<decltype(reset_fun)> test(false, &reset_fun);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, RespectsMinTransmitBitrateAfterContentSwitch) {
+ // Function for removing and recreating the send stream with a new config.
+ auto reset_fun = [this](const VideoSendStream::Config& send_stream_config,
+ const VideoEncoderConfig& encoder_config) {
+ task_queue_.SendTask([this, &send_stream_config, &encoder_config]() {
+ Stop();
+ sender_call_->DestroyVideoSendStream(video_send_stream_);
+ video_send_config_ = send_stream_config.Copy();
+ video_encoder_config_ = encoder_config.Copy();
+ video_send_stream_ = sender_call_->CreateVideoSendStream(
+ video_send_config_.Copy(), video_encoder_config_.Copy());
+ video_send_stream_->SetSource(
+ frame_generator_capturer_.get(),
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ Start();
+ });
+ };
+ MaxPaddingSetTest<decltype(reset_fun)> test(true, &reset_fun);
+ RunBaseTest(&test);
+}
+
+// This test verifies that new frame sizes reconfigures encoders even though not
+// (yet) sending. The purpose of this is to permit encoding as quickly as
+// possible once we start sending. Likely the frames being input are from the
+// same source that will be sent later, which just means that we're ready
+// earlier.
+TEST_F(VideoSendStreamTest,
+ EncoderReconfigureOnResolutionChangeWhenNotSending) {
+ class EncoderObserver : public test::FakeEncoder {
+ public:
+ EncoderObserver()
+ : FakeEncoder(Clock::GetRealTimeClock()),
+ init_encode_called_(false, false),
+ number_of_initializations_(0),
+ last_initialized_frame_width_(0),
+ last_initialized_frame_height_(0) {}
+
+ void WaitForResolution(int width, int height) {
+ {
+ rtc::CritScope lock(&crit_);
+ if (last_initialized_frame_width_ == width &&
+ last_initialized_frame_height_ == height) {
+ return;
+ }
+ }
+ EXPECT_TRUE(
+ init_encode_called_.Wait(VideoSendStreamTest::kDefaultTimeoutMs));
+ {
+ rtc::CritScope lock(&crit_);
+ EXPECT_EQ(width, last_initialized_frame_width_);
+ EXPECT_EQ(height, last_initialized_frame_height_);
+ }
+ }
+
+ private:
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ rtc::CritScope lock(&crit_);
+ last_initialized_frame_width_ = config->width;
+ last_initialized_frame_height_ = config->height;
+ ++number_of_initializations_;
+ init_encode_called_.Set();
+ return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
+ }
+
+ int32_t Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) override {
+ ADD_FAILURE()
+ << "Unexpected Encode call since the send stream is not started";
+ return 0;
+ }
+
+ rtc::CriticalSection crit_;
+ rtc::Event init_encode_called_;
+ size_t number_of_initializations_ RTC_GUARDED_BY(&crit_);
+ int last_initialized_frame_width_ RTC_GUARDED_BY(&crit_);
+ int last_initialized_frame_height_ RTC_GUARDED_BY(&crit_);
+ };
+
+ test::NullTransport transport;
+ EncoderObserver encoder;
+
+ task_queue_.SendTask([this, &transport, &encoder]() {
+ CreateSenderCall(Call::Config(event_log_.get()));
+ CreateSendConfig(1, 0, 0, &transport);
+ video_send_config_.encoder_settings.encoder = &encoder;
+ CreateVideoStreams();
+ CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth,
+ kDefaultHeight);
+ frame_generator_capturer_->Start();
+ });
+
+ encoder.WaitForResolution(kDefaultWidth, kDefaultHeight);
+
+ task_queue_.SendTask([this]() {
+ frame_generator_capturer_->ChangeResolution(kDefaultWidth * 2,
+ kDefaultHeight * 2);
+ });
+
+ encoder.WaitForResolution(kDefaultWidth * 2, kDefaultHeight * 2);
+
+ task_queue_.SendTask([this]() {
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(VideoSendStreamTest, CanReconfigureToUseStartBitrateAbovePreviousMax) {
+ class StartBitrateObserver : public test::FakeEncoder {
+ public:
+ StartBitrateObserver()
+ : FakeEncoder(Clock::GetRealTimeClock()),
+ start_bitrate_changed_(false, false),
+ start_bitrate_kbps_(0) {}
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ rtc::CritScope lock(&crit_);
+ start_bitrate_kbps_ = config->startBitrate;
+ start_bitrate_changed_.Set();
+ return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
+ }
+
+ int32_t SetRates(uint32_t new_target_bitrate, uint32_t framerate) override {
+ rtc::CritScope lock(&crit_);
+ start_bitrate_kbps_ = new_target_bitrate;
+ start_bitrate_changed_.Set();
+ return FakeEncoder::SetRates(new_target_bitrate, framerate);
+ }
+
+ int GetStartBitrateKbps() const {
+ rtc::CritScope lock(&crit_);
+ return start_bitrate_kbps_;
+ }
+
+ bool WaitForStartBitrate() {
+ return start_bitrate_changed_.Wait(
+ VideoSendStreamTest::kDefaultTimeoutMs);
+ }
+
+ private:
+ rtc::CriticalSection crit_;
+ rtc::Event start_bitrate_changed_;
+ int start_bitrate_kbps_ RTC_GUARDED_BY(crit_);
+ };
+
+ CreateSenderCall(Call::Config(event_log_.get()));
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+
+ Call::Config::BitrateConfig bitrate_config;
+ bitrate_config.start_bitrate_bps = 2 * video_encoder_config_.max_bitrate_bps;
+ sender_call_->SetBitrateConfig(bitrate_config);
+
+ StartBitrateObserver encoder;
+ video_send_config_.encoder_settings.encoder = &encoder;
+ // Since this test does not use a capturer, set |internal_source| = true.
+ // Encoder configuration is otherwise updated on the next video frame.
+ video_send_config_.encoder_settings.internal_source = true;
+
+ CreateVideoStreams();
+
+ EXPECT_TRUE(encoder.WaitForStartBitrate());
+ EXPECT_EQ(video_encoder_config_.max_bitrate_bps / 1000,
+ encoder.GetStartBitrateKbps());
+
+ video_encoder_config_.max_bitrate_bps = 2 * bitrate_config.start_bitrate_bps;
+ video_send_stream_->ReconfigureVideoEncoder(video_encoder_config_.Copy());
+
+ // New bitrate should be reconfigured above the previous max. As there's no
+ // network connection this shouldn't be flaky, as no bitrate should've been
+ // reported in between.
+ EXPECT_TRUE(encoder.WaitForStartBitrate());
+ EXPECT_EQ(bitrate_config.start_bitrate_bps / 1000,
+ encoder.GetStartBitrateKbps());
+
+ DestroyStreams();
+}
+
+// This test that if the encoder use an internal source, VideoEncoder::SetRates
+// will be called with zero bitrate during initialization and that
+// VideoSendStream::Stop also triggers VideoEncoder::SetRates Start to be called
+// with zero bitrate.
+TEST_F(VideoSendStreamTest, VideoSendStreamStopSetEncoderRateToZero) {
+ class StartStopBitrateObserver : public test::FakeEncoder {
+ public:
+ StartStopBitrateObserver()
+ : FakeEncoder(Clock::GetRealTimeClock()),
+ encoder_init_(false, false),
+ bitrate_changed_(false, false) {}
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ rtc::CritScope lock(&crit_);
+ encoder_init_.Set();
+ return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
+ }
+
+ int32_t SetRateAllocation(const BitrateAllocation& bitrate,
+ uint32_t framerate) override {
+ rtc::CritScope lock(&crit_);
+ bitrate_kbps_ = rtc::Optional<int>(bitrate.get_sum_kbps());
+ bitrate_changed_.Set();
+ return FakeEncoder::SetRateAllocation(bitrate, framerate);
+ }
+
+ bool WaitForEncoderInit() {
+ return encoder_init_.Wait(VideoSendStreamTest::kDefaultTimeoutMs);
+ }
+
+ bool WaitBitrateChanged(bool non_zero) {
+ do {
+ rtc::Optional<int> bitrate_kbps;
+ {
+ rtc::CritScope lock(&crit_);
+ bitrate_kbps = bitrate_kbps_;
+ }
+ if (!bitrate_kbps)
+ continue;
+
+ if ((non_zero && *bitrate_kbps > 0) ||
+ (!non_zero && *bitrate_kbps == 0)) {
+ return true;
+ }
+ } while (bitrate_changed_.Wait(VideoSendStreamTest::kDefaultTimeoutMs));
+ return false;
+ }
+
+ private:
+ rtc::CriticalSection crit_;
+ rtc::Event encoder_init_;
+ rtc::Event bitrate_changed_;
+ rtc::Optional<int> bitrate_kbps_ RTC_GUARDED_BY(crit_);
+ };
+
+ test::NullTransport transport;
+ StartStopBitrateObserver encoder;
+
+ task_queue_.SendTask([this, &transport, &encoder]() {
+ CreateSenderCall(Call::Config(event_log_.get()));
+ CreateSendConfig(1, 0, 0, &transport);
+
+ sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
+
+ video_send_config_.encoder_settings.encoder = &encoder;
+ video_send_config_.encoder_settings.internal_source = true;
+
+ CreateVideoStreams();
+ });
+
+ EXPECT_TRUE(encoder.WaitForEncoderInit());
+
+ task_queue_.SendTask([this]() {
+ video_send_stream_->Start();
+ });
+ EXPECT_TRUE(encoder.WaitBitrateChanged(true));
+
+ task_queue_.SendTask([this]() {
+ video_send_stream_->Stop();
+ });
+ EXPECT_TRUE(encoder.WaitBitrateChanged(false));
+
+ task_queue_.SendTask([this]() {
+ video_send_stream_->Start();
+ });
+ EXPECT_TRUE(encoder.WaitBitrateChanged(true));
+
+ task_queue_.SendTask([this]() {
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+TEST_F(VideoSendStreamTest, CapturesTextureAndVideoFrames) {
+ class FrameObserver : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ FrameObserver() : output_frame_event_(false, false) {}
+
+ void OnFrame(const VideoFrame& video_frame) override {
+ output_frames_.push_back(video_frame);
+ output_frame_event_.Set();
+ }
+
+ void WaitOutputFrame() {
+ const int kWaitFrameTimeoutMs = 3000;
+ EXPECT_TRUE(output_frame_event_.Wait(kWaitFrameTimeoutMs))
+ << "Timeout while waiting for output frames.";
+ }
+
+ const std::vector<VideoFrame>& output_frames() const {
+ return output_frames_;
+ }
+
+ private:
+ // Delivered output frames.
+ std::vector<VideoFrame> output_frames_;
+
+ // Indicate an output frame has arrived.
+ rtc::Event output_frame_event_;
+ };
+
+ test::NullTransport transport;
+ FrameObserver observer;
+ std::vector<VideoFrame> input_frames;
+
+ task_queue_.SendTask([this, &transport, &observer, &input_frames]() {
+ // Initialize send stream.
+ CreateSenderCall(Call::Config(event_log_.get()));
+
+ CreateSendConfig(1, 0, 0, &transport);
+ video_send_config_.pre_encode_callback = &observer;
+ CreateVideoStreams();
+
+ // Prepare five input frames. Send ordinary VideoFrame and texture frames
+ // alternatively.
+ int width = 168;
+ int height = 132;
+
+ input_frames.push_back(test::FakeNativeBuffer::CreateFrame(
+ width, height, 1, 1, kVideoRotation_0));
+ input_frames.push_back(test::FakeNativeBuffer::CreateFrame(
+ width, height, 2, 2, kVideoRotation_0));
+ input_frames.push_back(CreateVideoFrame(width, height, 3));
+ input_frames.push_back(CreateVideoFrame(width, height, 4));
+ input_frames.push_back(test::FakeNativeBuffer::CreateFrame(
+ width, height, 5, 5, kVideoRotation_0));
+
+ video_send_stream_->Start();
+ test::FrameForwarder forwarder;
+ video_send_stream_->SetSource(
+ &forwarder, VideoSendStream::DegradationPreference::kMaintainFramerate);
+ for (size_t i = 0; i < input_frames.size(); i++) {
+ forwarder.IncomingCapturedFrame(input_frames[i]);
+ // Wait until the output frame is received before sending the next input
+ // frame. Or the previous input frame may be replaced without delivering.
+ observer.WaitOutputFrame();
+ }
+ video_send_stream_->Stop();
+ video_send_stream_->SetSource(
+ nullptr, VideoSendStream::DegradationPreference::kMaintainFramerate);
+ });
+
+ // Test if the input and output frames are the same. render_time_ms and
+ // timestamp are not compared because capturer sets those values.
+ ExpectEqualFramesVector(input_frames, observer.output_frames());
+
+ task_queue_.SendTask([this]() {
+ DestroyStreams();
+ DestroyCalls();
+ });
+}
+
+void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1,
+ const std::vector<VideoFrame>& frames2) {
+ EXPECT_EQ(frames1.size(), frames2.size());
+ for (size_t i = 0; i < std::min(frames1.size(), frames2.size()); ++i)
+ // Compare frame buffers, since we don't care about differing timestamps.
+ EXPECT_TRUE(test::FrameBufsEqual(frames1[i].video_frame_buffer(),
+ frames2[i].video_frame_buffer()));
+}
+
+VideoFrame CreateVideoFrame(int width, int height, uint8_t data) {
+ const int kSizeY = width * height * 2;
+ std::unique_ptr<uint8_t[]> buffer(new uint8_t[kSizeY]);
+ memset(buffer.get(), data, kSizeY);
+ VideoFrame frame(I420Buffer::Create(width, height), kVideoRotation_0, data);
+ frame.set_timestamp(data);
+ // Use data as a ms timestamp.
+ frame.set_timestamp_us(data * rtc::kNumMicrosecsPerMillisec);
+ return frame;
+}
+
+TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
+ class EncoderStateObserver : public test::SendTest, public VideoEncoder {
+ public:
+ explicit EncoderStateObserver(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : SendTest(kDefaultTimeoutMs),
+ task_queue_(task_queue),
+ stream_(nullptr),
+ initialized_(false),
+ callback_registered_(false),
+ num_releases_(0),
+ released_(false) {}
+
+ bool IsReleased() {
+ rtc::CritScope lock(&crit_);
+ return released_;
+ }
+
+ bool IsReadyForEncode() {
+ rtc::CritScope lock(&crit_);
+ return initialized_ && callback_registered_;
+ }
+
+ size_t num_releases() {
+ rtc::CritScope lock(&crit_);
+ return num_releases_;
+ }
+
+ private:
+ int32_t InitEncode(const VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize) override {
+ rtc::CritScope lock(&crit_);
+ EXPECT_FALSE(initialized_);
+ initialized_ = true;
+ released_ = false;
+ return 0;
+ }
+
+ int32_t Encode(const VideoFrame& inputImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>* frame_types) override {
+ EXPECT_TRUE(IsReadyForEncode());
+
+ observation_complete_.Set();
+ return 0;
+ }
+
+ int32_t RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) override {
+ rtc::CritScope lock(&crit_);
+ EXPECT_TRUE(initialized_);
+ callback_registered_ = true;
+ return 0;
+ }
+
+ int32_t Release() override {
+ rtc::CritScope lock(&crit_);
+ EXPECT_TRUE(IsReadyForEncode());
+ EXPECT_FALSE(released_);
+ initialized_ = false;
+ callback_registered_ = false;
+ released_ = true;
+ ++num_releases_;
+ return 0;
+ }
+
+ int32_t SetChannelParameters(uint32_t packetLoss, int64_t rtt) override {
+ EXPECT_TRUE(IsReadyForEncode());
+ return 0;
+ }
+
+ int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override {
+ EXPECT_TRUE(IsReadyForEncode());
+ return 0;
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ stream_ = send_stream;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for Encode.";
+
+ task_queue_->SendTask([this]() {
+ EXPECT_EQ(0u, num_releases());
+ stream_->ReconfigureVideoEncoder(std::move(encoder_config_));
+ EXPECT_EQ(0u, num_releases());
+ stream_->Stop();
+ // Encoder should not be released before destroying the VideoSendStream.
+ EXPECT_FALSE(IsReleased());
+ EXPECT_TRUE(IsReadyForEncode());
+ stream_->Start();
+ });
+
+ // Sanity check, make sure we still encode frames with this encoder.
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for Encode.";
+ }
+
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ rtc::CriticalSection crit_;
+ VideoSendStream* stream_;
+ bool initialized_ RTC_GUARDED_BY(crit_);
+ bool callback_registered_ RTC_GUARDED_BY(crit_);
+ size_t num_releases_ RTC_GUARDED_BY(crit_);
+ bool released_ RTC_GUARDED_BY(crit_);
+ VideoEncoderConfig encoder_config_;
+ } test_encoder(&task_queue_);
+
+ RunBaseTest(&test_encoder);
+
+ EXPECT_TRUE(test_encoder.IsReleased());
+ EXPECT_EQ(1u, test_encoder.num_releases());
+}
+
+TEST_F(VideoSendStreamTest, EncoderSetupPropagatesCommonEncoderConfigValues) {
+ class VideoCodecConfigObserver : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ VideoCodecConfigObserver()
+ : SendTest(kDefaultTimeoutMs),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ init_encode_event_(false, false),
+ num_initializations_(0),
+ stream_(nullptr) {}
+
+ private:
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ encoder_config->max_bitrate_bps = kFirstMaxBitrateBps;
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ stream_ = send_stream;
+ }
+
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ if (num_initializations_ == 0) {
+ // Verify default values.
+ EXPECT_EQ(kFirstMaxBitrateBps / 1000, config->maxBitrate);
+ } else {
+ // Verify that changed values are propagated.
+ EXPECT_EQ(kSecondMaxBitrateBps / 1000, config->maxBitrate);
+ }
+ ++num_initializations_;
+ init_encode_event_.Set();
+ return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(init_encode_event_.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(1u, num_initializations_) << "VideoEncoder not initialized.";
+
+ encoder_config_.max_bitrate_bps = kSecondMaxBitrateBps;
+ stream_->ReconfigureVideoEncoder(std::move(encoder_config_));
+ EXPECT_TRUE(init_encode_event_.Wait(kDefaultTimeoutMs));
+ EXPECT_EQ(2u, num_initializations_)
+ << "ReconfigureVideoEncoder did not reinitialize the encoder with "
+ "new encoder settings.";
+ }
+
+ const uint32_t kFirstMaxBitrateBps = 1000000;
+ const uint32_t kSecondMaxBitrateBps = 2000000;
+
+ rtc::Event init_encode_event_;
+ size_t num_initializations_;
+ VideoSendStream* stream_;
+ VideoEncoderConfig encoder_config_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+static const size_t kVideoCodecConfigObserverNumberOfTemporalLayers = 4;
+template <typename T>
+class VideoCodecConfigObserver : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ VideoCodecConfigObserver(VideoCodecType video_codec_type,
+ const char* codec_name)
+ : SendTest(VideoSendStreamTest::kDefaultTimeoutMs),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ video_codec_type_(video_codec_type),
+ codec_name_(codec_name),
+ init_encode_event_(false, false),
+ num_initializations_(0),
+ stream_(nullptr) {
+ memset(&encoder_settings_, 0, sizeof(encoder_settings_));
+ }
+
+ private:
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ for (size_t i = 0; i < streams.size(); ++i) {
+ streams[i].temporal_layer_thresholds_bps.resize(
+ kVideoCodecConfigObserverNumberOfTemporalLayers - 1);
+ }
+ return streams;
+ }
+ };
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ send_config->encoder_settings.payload_name = codec_name_;
+
+ encoder_config->encoder_specific_settings = GetEncoderSpecificSettings();
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>();
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ stream_ = send_stream;
+ }
+
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ EXPECT_EQ(video_codec_type_, config->codecType);
+ VerifyCodecSpecifics(*config);
+ ++num_initializations_;
+ init_encode_event_.Set();
+ return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
+ }
+
+ void VerifyCodecSpecifics(const VideoCodec& config) const;
+ rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+ GetEncoderSpecificSettings() const;
+
+ void PerformTest() override {
+ EXPECT_TRUE(
+ init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs));
+ ASSERT_EQ(1u, num_initializations_) << "VideoEncoder not initialized.";
+
+ encoder_settings_.frameDroppingOn = true;
+ encoder_config_.encoder_specific_settings = GetEncoderSpecificSettings();
+ stream_->ReconfigureVideoEncoder(std::move(encoder_config_));
+ ASSERT_TRUE(
+ init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs));
+ EXPECT_EQ(2u, num_initializations_)
+ << "ReconfigureVideoEncoder did not reinitialize the encoder with "
+ "new encoder settings.";
+ }
+
+ int32_t Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) override {
+ // Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
+ return 0;
+ }
+
+ T encoder_settings_;
+ const VideoCodecType video_codec_type_;
+ const char* const codec_name_;
+ rtc::Event init_encode_event_;
+ size_t num_initializations_;
+ VideoSendStream* stream_;
+ VideoEncoderConfig encoder_config_;
+};
+
+template <>
+void VideoCodecConfigObserver<VideoCodecH264>::VerifyCodecSpecifics(
+ const VideoCodec& config) const {
+ EXPECT_EQ(
+ 0, memcmp(&config.H264(), &encoder_settings_, sizeof(encoder_settings_)));
+}
+
+template <>
+rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+VideoCodecConfigObserver<VideoCodecH264>::GetEncoderSpecificSettings() const {
+ return new rtc::RefCountedObject<
+ VideoEncoderConfig::H264EncoderSpecificSettings>(encoder_settings_);
+}
+
+template <>
+void VideoCodecConfigObserver<VideoCodecVP8>::VerifyCodecSpecifics(
+ const VideoCodec& config) const {
+ // Check that the number of temporal layers has propagated properly to
+ // VideoCodec.
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.VP8().numberOfTemporalLayers);
+
+ for (unsigned char i = 0; i < config.numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.simulcastStream[i].numberOfTemporalLayers);
+ }
+
+ // Set expected temporal layers as they should have been set when
+ // reconfiguring the encoder and not match the set config. Also copy the
+ // TemporalLayersFactory pointer that has been injected by VideoStreamEncoder.
+ VideoCodecVP8 encoder_settings = encoder_settings_;
+ encoder_settings.numberOfTemporalLayers =
+ kVideoCodecConfigObserverNumberOfTemporalLayers;
+ encoder_settings.tl_factory = config.VP8().tl_factory;
+ EXPECT_EQ(
+ 0, memcmp(&config.VP8(), &encoder_settings, sizeof(encoder_settings_)));
+}
+
+template <>
+rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+VideoCodecConfigObserver<VideoCodecVP8>::GetEncoderSpecificSettings() const {
+ return new rtc::RefCountedObject<
+ VideoEncoderConfig::Vp8EncoderSpecificSettings>(encoder_settings_);
+}
+
+template <>
+void VideoCodecConfigObserver<VideoCodecVP9>::VerifyCodecSpecifics(
+ const VideoCodec& config) const {
+ // Check that the number of temporal layers has propagated properly to
+ // VideoCodec.
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.VP9().numberOfTemporalLayers);
+
+ for (unsigned char i = 0; i < config.numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
+ config.simulcastStream[i].numberOfTemporalLayers);
+ }
+
+ // Set expected temporal layers as they should have been set when
+ // reconfiguring the encoder and not match the set config.
+ VideoCodecVP9 encoder_settings = encoder_settings_;
+ encoder_settings.numberOfTemporalLayers =
+ kVideoCodecConfigObserverNumberOfTemporalLayers;
+ EXPECT_EQ(
+ 0, memcmp(&(config.VP9()), &encoder_settings, sizeof(encoder_settings_)));
+}
+
+template <>
+rtc::scoped_refptr<VideoEncoderConfig::EncoderSpecificSettings>
+VideoCodecConfigObserver<VideoCodecVP9>::GetEncoderSpecificSettings() const {
+ return new rtc::RefCountedObject<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(encoder_settings_);
+}
+
+TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
+ VideoCodecConfigObserver<VideoCodecVP8> test(kVideoCodecVP8, "VP8");
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp9Config) {
+ VideoCodecConfigObserver<VideoCodecVP9> test(kVideoCodecVP9, "VP9");
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, EncoderSetupPropagatesH264Config) {
+ VideoCodecConfigObserver<VideoCodecH264> test(kVideoCodecH264, "H264");
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) {
+ class RtcpSenderReportTest : public test::SendTest {
+ public:
+ RtcpSenderReportTest() : SendTest(kDefaultTimeoutMs),
+ rtp_packets_sent_(0),
+ media_bytes_sent_(0) {}
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ ++rtp_packets_sent_;
+ media_bytes_sent_ += length - header.headerLength - header.paddingLength;
+ return SEND_PACKET;
+ }
+
+ Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ test::RtcpPacketParser parser;
+ EXPECT_TRUE(parser.Parse(packet, length));
+
+ if (parser.sender_report()->num_packets() > 0) {
+ // Only compare sent media bytes if SenderPacketCount matches the
+ // number of sent rtp packets (a new rtp packet could be sent before
+ // the rtcp packet).
+ if (parser.sender_report()->sender_octet_count() > 0 &&
+ parser.sender_report()->sender_packet_count() ==
+ rtp_packets_sent_) {
+ EXPECT_EQ(media_bytes_sent_,
+ parser.sender_report()->sender_octet_count());
+ observation_complete_.Set();
+ }
+ }
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP sender report.";
+ }
+
+ rtc::CriticalSection crit_;
+ size_t rtp_packets_sent_ RTC_GUARDED_BY(&crit_);
+ size_t media_bytes_sent_ RTC_GUARDED_BY(&crit_);
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) {
+ static const int kScreencastTargetBitrateKbps = 200;
+
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ VideoStreamFactory() {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ EXPECT_TRUE(streams[0].temporal_layer_thresholds_bps.empty());
+ streams[0].temporal_layer_thresholds_bps.push_back(
+ kScreencastTargetBitrateKbps * 1000);
+ return streams;
+ }
+ };
+
+ class ScreencastTargetBitrateTest : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ ScreencastTargetBitrateTest()
+ : SendTest(kDefaultTimeoutMs),
+ test::FakeEncoder(Clock::GetRealTimeClock()) {}
+
+ private:
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ EXPECT_EQ(static_cast<unsigned int>(kScreencastTargetBitrateKbps),
+ config->targetBitrate);
+ observation_complete_.Set();
+ return test::FakeEncoder::InitEncode(
+ config, number_of_cores, max_payload_size);
+ }
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ EXPECT_EQ(1u, encoder_config->number_of_streams);
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>();
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for the encoder to be initialized.";
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) {
+ // These are chosen to be "kind of odd" to not be accidentally checked against
+ // default values.
+ static const int kMinBitrateKbps = 137;
+ static const int kStartBitrateKbps = 345;
+ static const int kLowerMaxBitrateKbps = 312;
+ static const int kMaxBitrateKbps = 413;
+ static const int kIncreasedStartBitrateKbps = 451;
+ static const int kIncreasedMaxBitrateKbps = 597;
+ class EncoderBitrateThresholdObserver : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ explicit EncoderBitrateThresholdObserver(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : SendTest(kDefaultTimeoutMs),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ task_queue_(task_queue),
+ init_encode_event_(false, false),
+ bitrate_changed_event_(false, false),
+ target_bitrate_(0),
+ num_initializations_(0),
+ call_(nullptr),
+ send_stream_(nullptr) {}
+
+ private:
+ int32_t InitEncode(const VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize) override {
+ EXPECT_GE(codecSettings->startBitrate, codecSettings->minBitrate);
+ EXPECT_LE(codecSettings->startBitrate, codecSettings->maxBitrate);
+ if (num_initializations_ == 0) {
+ EXPECT_EQ(static_cast<unsigned int>(kMinBitrateKbps),
+ codecSettings->minBitrate);
+ EXPECT_EQ(static_cast<unsigned int>(kStartBitrateKbps),
+ codecSettings->startBitrate);
+ EXPECT_EQ(static_cast<unsigned int>(kMaxBitrateKbps),
+ codecSettings->maxBitrate);
+ observation_complete_.Set();
+ } else if (num_initializations_ == 1) {
+ EXPECT_EQ(static_cast<unsigned int>(kLowerMaxBitrateKbps),
+ codecSettings->maxBitrate);
+ // The start bitrate should be kept (-1) and capped to the max bitrate.
+ // Since this is not an end-to-end call no receiver should have been
+ // returning a REMB that could lower this estimate.
+ EXPECT_EQ(codecSettings->startBitrate, codecSettings->maxBitrate);
+ } else if (num_initializations_ == 2) {
+ EXPECT_EQ(static_cast<unsigned int>(kIncreasedMaxBitrateKbps),
+ codecSettings->maxBitrate);
+ // The start bitrate will be whatever the rate BitRateController
+ // has currently configured but in the span of the set max and min
+ // bitrate.
+ }
+ ++num_initializations_;
+ init_encode_event_.Set();
+
+ return FakeEncoder::InitEncode(codecSettings, numberOfCores,
+ maxPayloadSize);
+ }
+
+ int32_t SetRateAllocation(const BitrateAllocation& bitrate,
+ uint32_t frameRate) override {
+ {
+ rtc::CritScope lock(&crit_);
+ if (target_bitrate_ == bitrate.get_sum_kbps()) {
+ return FakeEncoder::SetRateAllocation(bitrate, frameRate);
+ }
+ target_bitrate_ = bitrate.get_sum_kbps();
+ }
+ bitrate_changed_event_.Set();
+ return FakeEncoder::SetRateAllocation(bitrate, frameRate);
+ }
+
+ void WaitForSetRates(uint32_t expected_bitrate) {
+ EXPECT_TRUE(
+ bitrate_changed_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs))
+ << "Timed out while waiting encoder rate to be set.";
+ rtc::CritScope lock(&crit_);
+ EXPECT_EQ(expected_bitrate, target_bitrate_);
+ }
+
+ Call::Config GetSenderCallConfig() override {
+ Call::Config config(event_log_.get());
+ config.bitrate_config.min_bitrate_bps = kMinBitrateKbps * 1000;
+ config.bitrate_config.start_bitrate_bps = kStartBitrateKbps * 1000;
+ config.bitrate_config.max_bitrate_bps = kMaxBitrateKbps * 1000;
+ return config;
+ }
+
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ explicit VideoStreamFactory(int min_bitrate_bps)
+ : min_bitrate_bps_(min_bitrate_bps) {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ streams[0].min_bitrate_bps = min_bitrate_bps_;
+ return streams;
+ }
+
+ const int min_bitrate_bps_;
+ };
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ // Set bitrates lower/higher than min/max to make sure they are properly
+ // capped.
+ encoder_config->max_bitrate_bps = kMaxBitrateKbps * 1000;
+ // Create a new StreamFactory to be able to set
+ // |VideoStream.min_bitrate_bps|.
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(kMinBitrateKbps * 1000);
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ void PerformTest() override {
+ ASSERT_TRUE(
+ init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs))
+ << "Timed out while waiting for encoder to be configured.";
+ WaitForSetRates(kStartBitrateKbps);
+ Call::Config::BitrateConfig bitrate_config;
+ bitrate_config.start_bitrate_bps = kIncreasedStartBitrateKbps * 1000;
+ bitrate_config.max_bitrate_bps = kIncreasedMaxBitrateKbps * 1000;
+ task_queue_->SendTask([this, &bitrate_config]() {
+ call_->SetBitrateConfig(bitrate_config);
+ });
+ // Encoder rate is capped by EncoderConfig max_bitrate_bps.
+ WaitForSetRates(kMaxBitrateKbps);
+ encoder_config_.max_bitrate_bps = kLowerMaxBitrateKbps * 1000;
+ send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy());
+ ASSERT_TRUE(
+ init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs));
+ EXPECT_EQ(2, num_initializations_)
+ << "Encoder should have been reconfigured with the new value.";
+ WaitForSetRates(kLowerMaxBitrateKbps);
+
+ encoder_config_.max_bitrate_bps = kIncreasedMaxBitrateKbps * 1000;
+ send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy());
+ ASSERT_TRUE(
+ init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs));
+ EXPECT_EQ(3, num_initializations_)
+ << "Encoder should have been reconfigured with the new value.";
+ // Expected target bitrate is the start bitrate set in the call to
+ // call_->SetBitrateConfig.
+ WaitForSetRates(kIncreasedStartBitrateKbps);
+ }
+
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ rtc::Event init_encode_event_;
+ rtc::Event bitrate_changed_event_;
+ rtc::CriticalSection crit_;
+ uint32_t target_bitrate_ RTC_GUARDED_BY(&crit_);
+
+ int num_initializations_;
+ webrtc::Call* call_;
+ webrtc::VideoSendStream* send_stream_;
+ webrtc::VideoEncoderConfig encoder_config_;
+ } test(&task_queue_);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ReportsSentResolution) {
+ static const size_t kNumStreams = 3;
+ // Unusual resolutions to make sure that they are the ones being reported.
+ static const struct {
+ int width;
+ int height;
+ } kEncodedResolution[kNumStreams] = {
+ {241, 181}, {300, 121}, {121, 221}};
+ class ScreencastTargetBitrateTest : public test::SendTest,
+ public test::FakeEncoder {
+ public:
+ ScreencastTargetBitrateTest()
+ : SendTest(kDefaultTimeoutMs),
+ test::FakeEncoder(Clock::GetRealTimeClock()),
+ send_stream_(nullptr) {}
+
+ private:
+ int32_t Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>* frame_types) override {
+ CodecSpecificInfo specifics;
+ specifics.codecType = kVideoCodecGeneric;
+
+ uint8_t buffer[16] = {0};
+ EncodedImage encoded(buffer, sizeof(buffer), sizeof(buffer));
+ encoded._timeStamp = input_image.timestamp();
+ encoded.capture_time_ms_ = input_image.render_time_ms();
+
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ specifics.codecSpecific.generic.simulcast_idx = static_cast<uint8_t>(i);
+ encoded._frameType = (*frame_types)[i];
+ encoded._encodedWidth = kEncodedResolution[i].width;
+ encoded._encodedHeight = kEncodedResolution[i].height;
+ EncodedImageCallback* callback;
+ {
+ rtc::CritScope cs(&crit_sect_);
+ callback = callback_;
+ }
+ RTC_DCHECK(callback);
+ if (callback->OnEncodedImage(encoded, &specifics, nullptr).error !=
+ EncodedImageCallback::Result::OK) {
+ return -1;
+ }
+ }
+
+ observation_complete_.Set();
+ return 0;
+ }
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = this;
+ EXPECT_EQ(kNumStreams, encoder_config->number_of_streams);
+ }
+
+ size_t GetNumVideoStreams() const override { return kNumStreams; }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out while waiting for the encoder to send one frame.";
+ VideoSendStream::Stats stats = send_stream_->GetStats();
+
+ for (size_t i = 0; i < kNumStreams; ++i) {
+ ASSERT_TRUE(stats.substreams.find(kVideoSendSsrcs[i]) !=
+ stats.substreams.end())
+ << "No stats for SSRC: " << kVideoSendSsrcs[i]
+ << ", stats should exist as soon as frames have been encoded.";
+ VideoSendStream::StreamStats ssrc_stats =
+ stats.substreams[kVideoSendSsrcs[i]];
+ EXPECT_EQ(kEncodedResolution[i].width, ssrc_stats.width);
+ EXPECT_EQ(kEncodedResolution[i].height, ssrc_stats.height);
+ }
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ send_stream_ = send_stream;
+ }
+
+ VideoSendStream* send_stream_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+#if !defined(RTC_DISABLE_VP9)
+class Vp9HeaderObserver : public test::SendTest {
+ public:
+ Vp9HeaderObserver()
+ : SendTest(VideoSendStreamTest::kLongTimeoutMs),
+ vp9_encoder_(VP9Encoder::Create()),
+ vp9_settings_(VideoEncoder::GetDefaultVp9Settings()),
+ packets_sent_(0),
+ frames_sent_(0),
+ expected_width_(0),
+ expected_height_(0) {}
+
+ virtual void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) {}
+
+ virtual void InspectHeader(const RTPVideoHeaderVP9& vp9) = 0;
+
+ private:
+ const int kVp9PayloadType = test::CallTest::kVideoSendPayloadType;
+
+ class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ explicit VideoStreamFactory(size_t number_of_temporal_layers)
+ : number_of_temporal_layers_(number_of_temporal_layers) {}
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ streams[0].temporal_layer_thresholds_bps.resize(
+ number_of_temporal_layers_ - 1);
+ return streams;
+ }
+
+ const size_t number_of_temporal_layers_;
+ };
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->encoder_settings.encoder = vp9_encoder_.get();
+ send_config->encoder_settings.payload_name = "VP9";
+ send_config->encoder_settings.payload_type = kVp9PayloadType;
+ ModifyVideoConfigsHook(send_config, receive_configs, encoder_config);
+ encoder_config->encoder_specific_settings = new rtc::RefCountedObject<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings_);
+ EXPECT_EQ(1u, encoder_config->number_of_streams);
+ encoder_config->video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(
+ vp9_settings_.numberOfTemporalLayers);
+ encoder_config_ = encoder_config->Copy();
+ }
+
+ void ModifyVideoCaptureStartResolution(int* width,
+ int* height,
+ int* frame_rate) override {
+ expected_width_ = *width;
+ expected_height_ = *height;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Test timed out waiting for VP9 packet, num frames "
+ << frames_sent_;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ EXPECT_EQ(kVp9PayloadType, header.payloadType);
+ const uint8_t* payload = packet + header.headerLength;
+ size_t payload_length = length - header.headerLength - header.paddingLength;
+
+ bool new_packet = packets_sent_ == 0 ||
+ IsNewerSequenceNumber(header.sequenceNumber,
+ last_header_.sequenceNumber);
+ if (payload_length > 0 && new_packet) {
+ RtpDepacketizer::ParsedPayload parsed;
+ RtpDepacketizerVp9 depacketizer;
+ EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
+ EXPECT_EQ(RtpVideoCodecTypes::kRtpVideoVp9, parsed.type.Video.codec);
+ // Verify common fields for all configurations.
+ VerifyCommonHeader(parsed.type.Video.codecHeader.VP9);
+ CompareConsecutiveFrames(header, parsed.type.Video);
+ // Verify configuration specific settings.
+ InspectHeader(parsed.type.Video.codecHeader.VP9);
+
+ ++packets_sent_;
+ if (header.markerBit) {
+ ++frames_sent_;
+ }
+ last_header_ = header;
+ last_vp9_ = parsed.type.Video.codecHeader.VP9;
+ }
+ return SEND_PACKET;
+ }
+
+ protected:
+ bool ContinuousPictureId(const RTPVideoHeaderVP9& vp9) const {
+ if (last_vp9_.picture_id > vp9.picture_id) {
+ return vp9.picture_id == 0; // Wrap.
+ } else {
+ return vp9.picture_id == last_vp9_.picture_id + 1;
+ }
+ }
+
+ void VerifySpatialIdxWithinFrame(const RTPVideoHeaderVP9& vp9) const {
+ bool new_layer = vp9.spatial_idx != last_vp9_.spatial_idx;
+ EXPECT_EQ(new_layer, vp9.beginning_of_frame);
+ EXPECT_EQ(new_layer, last_vp9_.end_of_frame);
+ EXPECT_EQ(new_layer ? last_vp9_.spatial_idx + 1 : last_vp9_.spatial_idx,
+ vp9.spatial_idx);
+ }
+
+ void VerifyFixedTemporalLayerStructure(const RTPVideoHeaderVP9& vp9,
+ uint8_t num_layers) const {
+ switch (num_layers) {
+ case 0:
+ VerifyTemporalLayerStructure0(vp9);
+ break;
+ case 1:
+ VerifyTemporalLayerStructure1(vp9);
+ break;
+ case 2:
+ VerifyTemporalLayerStructure2(vp9);
+ break;
+ case 3:
+ VerifyTemporalLayerStructure3(vp9);
+ break;
+ default:
+ RTC_NOTREACHED();
+ }
+ }
+
+ void VerifyTemporalLayerStructure0(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_EQ(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_EQ(kNoTemporalIdx, vp9.temporal_idx); // no tid
+ EXPECT_FALSE(vp9.temporal_up_switch);
+ }
+
+ void VerifyTemporalLayerStructure1(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_EQ(0, vp9.temporal_idx); // 0,0,0,...
+ EXPECT_FALSE(vp9.temporal_up_switch);
+ }
+
+ void VerifyTemporalLayerStructure2(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_GE(vp9.temporal_idx, 0); // 0,1,0,1,... (tid reset on I-frames).
+ EXPECT_LE(vp9.temporal_idx, 1);
+ EXPECT_EQ(vp9.temporal_idx > 0, vp9.temporal_up_switch);
+ if (IsNewPictureId(vp9)) {
+ uint8_t expected_tid =
+ (!vp9.inter_pic_predicted || last_vp9_.temporal_idx == 1) ? 0 : 1;
+ EXPECT_EQ(expected_tid, vp9.temporal_idx);
+ }
+ }
+
+ void VerifyTemporalLayerStructure3(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
+ EXPECT_GE(vp9.temporal_idx, 0); // 0,2,1,2,... (tid reset on I-frames).
+ EXPECT_LE(vp9.temporal_idx, 2);
+ if (IsNewPictureId(vp9) && vp9.inter_pic_predicted) {
+ EXPECT_NE(vp9.temporal_idx, last_vp9_.temporal_idx);
+ switch (vp9.temporal_idx) {
+ case 0:
+ EXPECT_EQ(2, last_vp9_.temporal_idx);
+ EXPECT_FALSE(vp9.temporal_up_switch);
+ break;
+ case 1:
+ EXPECT_EQ(2, last_vp9_.temporal_idx);
+ EXPECT_TRUE(vp9.temporal_up_switch);
+ break;
+ case 2:
+ EXPECT_EQ(last_vp9_.temporal_idx == 0, vp9.temporal_up_switch);
+ break;
+ }
+ }
+ }
+
+ void VerifyTl0Idx(const RTPVideoHeaderVP9& vp9) const {
+ if (vp9.tl0_pic_idx == kNoTl0PicIdx)
+ return;
+
+ uint8_t expected_tl0_idx = last_vp9_.tl0_pic_idx;
+ if (vp9.temporal_idx == 0)
+ ++expected_tl0_idx;
+ EXPECT_EQ(expected_tl0_idx, vp9.tl0_pic_idx);
+ }
+
+ bool IsNewPictureId(const RTPVideoHeaderVP9& vp9) const {
+ return frames_sent_ > 0 && (vp9.picture_id != last_vp9_.picture_id);
+ }
+
+ // Flexible mode (F=1): Non-flexible mode (F=0):
+ //
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // |I|P|L|F|B|E|V|-| |I|P|L|F|B|E|V|-|
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // I: |M| PICTURE ID | I: |M| PICTURE ID |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // M: | EXTENDED PID | M: | EXTENDED PID |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // L: | T |U| S |D| L: | T |U| S |D|
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // P,F: | P_DIFF |X|N| | TL0PICIDX |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // X: |EXTENDED P_DIFF| V: | SS .. |
+ // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+ // V: | SS .. |
+ // +-+-+-+-+-+-+-+-+
+ void VerifyCommonHeader(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_EQ(kMaxTwoBytePictureId, vp9.max_picture_id); // M:1
+ EXPECT_NE(kNoPictureId, vp9.picture_id); // I:1
+ EXPECT_EQ(vp9_settings_.flexibleMode, vp9.flexible_mode); // F
+
+ if (vp9_settings_.numberOfSpatialLayers > 1) {
+ EXPECT_LT(vp9.spatial_idx, vp9_settings_.numberOfSpatialLayers);
+ } else if (vp9_settings_.numberOfTemporalLayers > 1) {
+ EXPECT_EQ(vp9.spatial_idx, 0);
+ } else {
+ EXPECT_EQ(vp9.spatial_idx, kNoSpatialIdx);
+ }
+
+ if (vp9_settings_.numberOfTemporalLayers > 1) {
+ EXPECT_LT(vp9.temporal_idx, vp9_settings_.numberOfTemporalLayers);
+ } else if (vp9_settings_.numberOfSpatialLayers > 1) {
+ EXPECT_EQ(vp9.temporal_idx, 0);
+ } else {
+ EXPECT_EQ(vp9.temporal_idx, kNoTemporalIdx);
+ }
+
+ if (vp9.ss_data_available) // V
+ VerifySsData(vp9);
+
+ if (frames_sent_ == 0)
+ EXPECT_FALSE(vp9.inter_pic_predicted); // P
+
+ if (!vp9.inter_pic_predicted) {
+ EXPECT_TRUE(vp9.temporal_idx == 0 || vp9.temporal_idx == kNoTemporalIdx);
+ EXPECT_FALSE(vp9.temporal_up_switch);
+ }
+ }
+
+ // Scalability structure (SS).
+ //
+ // +-+-+-+-+-+-+-+-+
+ // V: | N_S |Y|G|-|-|-|
+ // +-+-+-+-+-+-+-+-+
+ // Y: | WIDTH | N_S + 1 times
+ // +-+-+-+-+-+-+-+-+
+ // | HEIGHT |
+ // +-+-+-+-+-+-+-+-+
+ // G: | N_G |
+ // +-+-+-+-+-+-+-+-+
+ // N_G: | T |U| R |-|-| N_G times
+ // +-+-+-+-+-+-+-+-+
+ // | P_DIFF | R times
+ // +-+-+-+-+-+-+-+-+
+ void VerifySsData(const RTPVideoHeaderVP9& vp9) const {
+ EXPECT_TRUE(vp9.ss_data_available); // V
+ EXPECT_EQ(vp9_settings_.numberOfSpatialLayers, // N_S + 1
+ vp9.num_spatial_layers);
+ EXPECT_TRUE(vp9.spatial_layer_resolution_present); // Y:1
+ int expected_width = expected_width_;
+ int expected_height = expected_height_;
+ for (int i = static_cast<int>(vp9.num_spatial_layers) - 1; i >= 0; --i) {
+ EXPECT_EQ(expected_width, vp9.width[i]); // WIDTH
+ EXPECT_EQ(expected_height, vp9.height[i]); // HEIGHT
+ expected_width /= 2;
+ expected_height /= 2;
+ }
+ }
+
+ void CompareConsecutiveFrames(const RTPHeader& header,
+ const RTPVideoHeader& video) const {
+ const RTPVideoHeaderVP9& vp9 = video.codecHeader.VP9;
+
+ bool new_frame = packets_sent_ == 0 ||
+ IsNewerTimestamp(header.timestamp, last_header_.timestamp);
+ EXPECT_EQ(new_frame, video.is_first_packet_in_frame);
+ if (!new_frame) {
+ EXPECT_FALSE(last_header_.markerBit);
+ EXPECT_EQ(last_header_.timestamp, header.timestamp);
+ EXPECT_EQ(last_vp9_.picture_id, vp9.picture_id);
+ EXPECT_EQ(last_vp9_.temporal_idx, vp9.temporal_idx);
+ EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9.tl0_pic_idx);
+ VerifySpatialIdxWithinFrame(vp9);
+ return;
+ }
+ // New frame.
+ EXPECT_TRUE(vp9.beginning_of_frame);
+
+ // Compare with last packet in previous frame.
+ if (frames_sent_ == 0)
+ return;
+ EXPECT_TRUE(last_vp9_.end_of_frame);
+ EXPECT_TRUE(last_header_.markerBit);
+ EXPECT_TRUE(ContinuousPictureId(vp9));
+ VerifyTl0Idx(vp9);
+ }
+
+ std::unique_ptr<VP9Encoder> vp9_encoder_;
+ VideoCodecVP9 vp9_settings_;
+ webrtc::VideoEncoderConfig encoder_config_;
+ RTPHeader last_header_;
+ RTPVideoHeaderVP9 last_vp9_;
+ size_t packets_sent_;
+ size_t frames_sent_;
+ int expected_width_;
+ int expected_height_;
+};
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl1SLayers) {
+ const uint8_t kNumTemporalLayers = 1;
+ const uint8_t kNumSpatialLayers = 1;
+ TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
+}
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl1SLayers) {
+ const uint8_t kNumTemporalLayers = 2;
+ const uint8_t kNumSpatialLayers = 1;
+ TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
+}
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexMode_3Tl1SLayers) {
+ const uint8_t kNumTemporalLayers = 3;
+ const uint8_t kNumSpatialLayers = 1;
+ TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
+}
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl2SLayers) {
+ const uint8_t kNumTemporalLayers = 1;
+ const uint8_t kNumSpatialLayers = 2;
+ TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
+}
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl2SLayers) {
+ const uint8_t kNumTemporalLayers = 2;
+ const uint8_t kNumSpatialLayers = 2;
+ TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
+}
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexMode_3Tl2SLayers) {
+ const uint8_t kNumTemporalLayers = 3;
+ const uint8_t kNumSpatialLayers = 2;
+ TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
+}
+
+void VideoSendStreamTest::TestVp9NonFlexMode(uint8_t num_temporal_layers,
+ uint8_t num_spatial_layers) {
+ static const size_t kNumFramesToSend = 100;
+ // Set to < kNumFramesToSend and coprime to length of temporal layer
+ // structures to verify temporal id reset on key frame.
+ static const int kKeyFrameInterval = 31;
+ class NonFlexibleMode : public Vp9HeaderObserver {
+ public:
+ NonFlexibleMode(uint8_t num_temporal_layers, uint8_t num_spatial_layers)
+ : num_temporal_layers_(num_temporal_layers),
+ num_spatial_layers_(num_spatial_layers),
+ l_field_(num_temporal_layers > 1 || num_spatial_layers > 1) {}
+ void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ vp9_settings_.flexibleMode = false;
+ vp9_settings_.frameDroppingOn = false;
+ vp9_settings_.keyFrameInterval = kKeyFrameInterval;
+ vp9_settings_.numberOfTemporalLayers = num_temporal_layers_;
+ vp9_settings_.numberOfSpatialLayers = num_spatial_layers_;
+ }
+
+ void InspectHeader(const RTPVideoHeaderVP9& vp9) override {
+ bool ss_data_expected =
+ !vp9.inter_pic_predicted && vp9.beginning_of_frame &&
+ (vp9.spatial_idx == 0 || vp9.spatial_idx == kNoSpatialIdx);
+ EXPECT_EQ(ss_data_expected, vp9.ss_data_available);
+ if (num_spatial_layers_ > 1) {
+ EXPECT_EQ(vp9.spatial_idx > 0, vp9.inter_layer_predicted);
+ } else {
+ EXPECT_FALSE(vp9.inter_layer_predicted);
+ }
+
+ EXPECT_EQ(!vp9.inter_pic_predicted,
+ frames_sent_ % kKeyFrameInterval == 0);
+
+ if (IsNewPictureId(vp9)) {
+ if (num_temporal_layers_ == 1 && num_spatial_layers_ == 1) {
+ EXPECT_EQ(kNoSpatialIdx, vp9.spatial_idx);
+ } else {
+ EXPECT_EQ(0, vp9.spatial_idx);
+ }
+ if (num_spatial_layers_ > 1)
+ EXPECT_EQ(num_spatial_layers_ - 1, last_vp9_.spatial_idx);
+ }
+
+ VerifyFixedTemporalLayerStructure(vp9,
+ l_field_ ? num_temporal_layers_ : 0);
+
+ if (frames_sent_ > kNumFramesToSend)
+ observation_complete_.Set();
+ }
+ const uint8_t num_temporal_layers_;
+ const uint8_t num_spatial_layers_;
+ const bool l_field_;
+ } test(num_temporal_layers, num_spatial_layers);
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, Vp9NonFlexModeSmallResolution) {
+ static const size_t kNumFramesToSend = 50;
+ static const int kWidth = 4;
+ static const int kHeight = 4;
+ class NonFlexibleModeResolution : public Vp9HeaderObserver {
+ void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ vp9_settings_.flexibleMode = false;
+ vp9_settings_.numberOfTemporalLayers = 1;
+ vp9_settings_.numberOfSpatialLayers = 1;
+
+ EXPECT_EQ(1u, encoder_config->number_of_streams);
+ }
+
+ void InspectHeader(const RTPVideoHeaderVP9& vp9_header) override {
+ if (frames_sent_ > kNumFramesToSend)
+ observation_complete_.Set();
+ }
+
+ void ModifyVideoCaptureStartResolution(int* width,
+ int* height,
+ int* frame_rate) override {
+ expected_width_ = kWidth;
+ expected_height_ = kHeight;
+ *width = kWidth;
+ *height = kHeight;
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+
+#if defined(WEBRTC_ANDROID)
+// Crashes on Android; bugs.webrtc.org/7401
+#define MAYBE_Vp9FlexModeRefCount DISABLED_Vp9FlexModeRefCount
+#else
+#define MAYBE_Vp9FlexModeRefCount Vp9FlexModeRefCount
+#endif
+TEST_F(VideoSendStreamTest, MAYBE_Vp9FlexModeRefCount) {
+ class FlexibleMode : public Vp9HeaderObserver {
+ void ModifyVideoConfigsHook(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ vp9_settings_.flexibleMode = true;
+ vp9_settings_.numberOfTemporalLayers = 1;
+ vp9_settings_.numberOfSpatialLayers = 2;
+ }
+
+ void InspectHeader(const RTPVideoHeaderVP9& vp9_header) override {
+ EXPECT_TRUE(vp9_header.flexible_mode);
+ EXPECT_EQ(kNoTl0PicIdx, vp9_header.tl0_pic_idx);
+ if (vp9_header.inter_pic_predicted) {
+ EXPECT_GT(vp9_header.num_ref_pics, 0u);
+ observation_complete_.Set();
+ }
+ }
+ } test;
+
+ RunBaseTest(&test);
+}
+#endif // !defined(RTC_DISABLE_VP9)
+
+void VideoSendStreamTest::TestRequestSourceRotateVideo(
+ bool support_orientation_ext) {
+ CreateSenderCall(Call::Config(event_log_.get()));
+
+ test::NullTransport transport;
+ CreateSendConfig(1, 0, 0, &transport);
+ video_send_config_.rtp.extensions.clear();
+ if (support_orientation_ext) {
+ video_send_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kVideoRotationUri, 1));
+ }
+
+ CreateVideoStreams();
+ test::FrameForwarder forwarder;
+ video_send_stream_->SetSource(
+ &forwarder, VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ EXPECT_TRUE(forwarder.sink_wants().rotation_applied !=
+ support_orientation_ext);
+
+ DestroyStreams();
+}
+
+TEST_F(VideoSendStreamTest,
+ RequestSourceRotateIfVideoOrientationExtensionNotSupported) {
+ TestRequestSourceRotateVideo(false);
+}
+
+TEST_F(VideoSendStreamTest,
+ DoNotRequestsRotationIfVideoOrientationExtensionSupported) {
+ TestRequestSourceRotateVideo(true);
+}
+
+// This test verifies that overhead is removed from the bandwidth estimate by
+// testing that the maximum possible target payload rate is smaller than the
+// maximum bandwidth estimate by the overhead rate.
+TEST_F(VideoSendStreamTest, RemoveOverheadFromBandwidth) {
+ test::ScopedFieldTrials override_field_trials(
+ "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+ class RemoveOverheadFromBandwidthTest : public test::EndToEndTest,
+ public test::FakeEncoder {
+ public:
+ explicit RemoveOverheadFromBandwidthTest(
+ test::SingleThreadedTaskQueueForTesting* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ FakeEncoder(Clock::GetRealTimeClock()),
+ task_queue_(task_queue),
+ call_(nullptr),
+ max_bitrate_bps_(0),
+ first_packet_sent_(false),
+ bitrate_changed_event_(false, false) {}
+
+ int32_t SetRateAllocation(const BitrateAllocation& bitrate,
+ uint32_t frameRate) override {
+ rtc::CritScope lock(&crit_);
+ // Wait for the first sent packet so that videosendstream knows
+ // rtp_overhead.
+ if (first_packet_sent_) {
+ max_bitrate_bps_ = bitrate.get_sum_bps();
+ bitrate_changed_event_.Set();
+ }
+ return FakeEncoder::SetRateAllocation(bitrate, frameRate);
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ call_ = sender_call;
+ }
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ send_config->rtp.max_packet_size = 1200;
+ send_config->encoder_settings.encoder = this;
+ EXPECT_FALSE(send_config->rtp.extensions.empty());
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ rtc::CritScope lock(&crit_);
+ first_packet_sent_ = true;
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ Call::Config::BitrateConfig bitrate_config;
+ constexpr int kStartBitrateBps = 60000;
+ constexpr int kMaxBitrateBps = 60000;
+ constexpr int kMinBitrateBps = 10000;
+ bitrate_config.start_bitrate_bps = kStartBitrateBps;
+ bitrate_config.max_bitrate_bps = kMaxBitrateBps;
+ bitrate_config.min_bitrate_bps = kMinBitrateBps;
+ task_queue_->SendTask([this, &bitrate_config]() {
+ call_->SetBitrateConfig(bitrate_config);
+ call_->OnTransportOverheadChanged(webrtc::MediaType::VIDEO, 40);
+ });
+
+ // At a bitrate of 60kbps with a packet size of 1200B video and an
+ // overhead of 40B per packet video produces 2240bps overhead.
+ // So the encoder BW should be set to 57760bps.
+ bitrate_changed_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs);
+ {
+ rtc::CritScope lock(&crit_);
+ EXPECT_LE(max_bitrate_bps_, 57760u);
+ }
+ }
+
+ private:
+ test::SingleThreadedTaskQueueForTesting* const task_queue_;
+ Call* call_;
+ rtc::CriticalSection crit_;
+ uint32_t max_bitrate_bps_ RTC_GUARDED_BY(&crit_);
+ bool first_packet_sent_ RTC_GUARDED_BY(&crit_);
+ rtc::Event bitrate_changed_event_;
+ } test(&task_queue_);
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, SendsKeepAlive) {
+ const int kTimeoutMs = 50; // Really short timeout for testing.
+
+ class KeepaliveObserver : public test::SendTest {
+ public:
+ KeepaliveObserver() : SendTest(kDefaultTimeoutMs) {}
+
+ void OnRtpTransportControllerSendCreated(
+ RtpTransportControllerSend* controller) override {
+ RtpKeepAliveConfig config;
+ config.timeout_interval_ms = kTimeoutMs;
+ config.payload_type = CallTest::kDefaultKeepalivePayloadType;
+ controller->SetKeepAliveConfig(config);
+ }
+
+ private:
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+ if (header.payloadType != CallTest::kDefaultKeepalivePayloadType) {
+ // The video stream has started. Stop it now.
+ if (capturer_)
+ capturer_->Stop();
+ } else {
+ observation_complete_.Set();
+ }
+
+ return SEND_PACKET;
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for keep-alive packet.";
+ }
+
+ void OnFrameGeneratorCapturerCreated(
+ test::FrameGeneratorCapturer* frame_generator_capturer) override {
+ capturer_ = frame_generator_capturer;
+ }
+
+ test::FrameGeneratorCapturer* capturer_ = nullptr;
+ } test;
+
+ RunBaseTest(&test);
+}
+
+TEST_F(VideoSendStreamTest, ConfiguresAlrWhenSendSideOn) {
+ const std::string kAlrProbingExperiment =
+ std::string(AlrDetector::kScreenshareProbingBweExperimentName) +
+ "/1.0,2875,80,40,-60,3/";
+ test::ScopedFieldTrials alr_experiment(kAlrProbingExperiment);
+ class PacingFactorObserver : public test::SendTest {
+ public:
+ PacingFactorObserver(bool configure_send_side, float expected_pacing_factor)
+ : test::SendTest(kDefaultTimeoutMs),
+ configure_send_side_(configure_send_side),
+ expected_pacing_factor_(expected_pacing_factor),
+ paced_sender_(nullptr) {}
+
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Check if send-side bwe extension is already present, and remove it if
+ // it is not desired.
+ bool has_send_side = false;
+ for (auto it = send_config->rtp.extensions.begin();
+ it != send_config->rtp.extensions.end(); ++it) {
+ if (it->uri == RtpExtension::kTransportSequenceNumberUri) {
+ if (configure_send_side_) {
+ has_send_side = true;
+ } else {
+ send_config->rtp.extensions.erase(it);
+ }
+ break;
+ }
+ }
+
+ if (configure_send_side_ && !has_send_side) {
+ // Want send side, not present by default, so add it.
+ send_config->rtp.extensions.emplace_back(
+ RtpExtension::kTransportSequenceNumberUri,
+ RtpExtension::kTransportSequenceNumberDefaultId);
+ }
+
+ // ALR only enabled for screenshare.
+ encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
+ }
+
+ void OnRtpTransportControllerSendCreated(
+ RtpTransportControllerSend* controller) override {
+ // Grab a reference to the pacer.
+ paced_sender_ = controller->pacer();
+ }
+
+ void OnVideoStreamsCreated(
+ VideoSendStream* send_stream,
+ const std::vector<VideoReceiveStream*>& receive_streams) override {
+ // Video streams created, check that pacer is correctly configured.
+ EXPECT_EQ(expected_pacing_factor_, paced_sender_->GetPacingFactor());
+ observation_complete_.Set();
+ }
+
+ void PerformTest() override {
+ EXPECT_TRUE(Wait()) << "Timed out while waiting for pacer config.";
+ }
+
+ private:
+ const bool configure_send_side_;
+ const float expected_pacing_factor_;
+ const PacedSender* paced_sender_;
+ };
+
+ // Send-side bwe on, use pacing factor from |kAlrProbingExperiment| above.
+ PacingFactorObserver test_with_send_side(true, 1.0f);
+ RunBaseTest(&test_with_send_side);
+
+ // Send-side bwe off, use default pacing factor.
+ PacingFactorObserver test_without_send_side(
+ false, PacedSender::kDefaultPaceMultiplier);
+ RunBaseTest(&test_without_send_side);
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/video_stream_decoder.cc b/third_party/libwebrtc/webrtc/video/video_stream_decoder.cc
new file mode 100644
index 0000000000..b27a52ea89
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_stream_decoder.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_decoder.h"
+
+#include <algorithm>
+#include <map>
+#include <vector>
+
+#include "common_video/include/frame_callback.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+#include "video/call_stats.h"
+#include "video/payload_router.h"
+#include "video/receive_statistics_proxy.h"
+
+namespace webrtc {
+
+VideoStreamDecoder::VideoStreamDecoder(
+ vcm::VideoReceiver* video_receiver,
+ VCMFrameTypeCallback* vcm_frame_type_callback,
+ VCMPacketRequestCallback* vcm_packet_request_callback,
+ bool enable_nack,
+ bool enable_fec,
+ ReceiveStatisticsProxy* receive_statistics_proxy,
+ rtc::VideoSinkInterface<VideoFrame>* incoming_video_stream)
+ : video_receiver_(video_receiver),
+ receive_stats_callback_(receive_statistics_proxy),
+ incoming_video_stream_(incoming_video_stream),
+ last_rtt_ms_(0) {
+ RTC_DCHECK(video_receiver_);
+
+ static const int kMaxPacketAgeToNack = 450;
+ static const int kMaxNackListSize = 250;
+ video_receiver_->SetNackSettings(kMaxNackListSize,
+ kMaxPacketAgeToNack, 0);
+ video_receiver_->RegisterReceiveCallback(this);
+ video_receiver_->RegisterFrameTypeCallback(vcm_frame_type_callback);
+ video_receiver_->RegisterReceiveStatisticsCallback(this);
+
+ VCMVideoProtection video_protection =
+ enable_nack ? (enable_fec ? kProtectionNackFEC : kProtectionNack)
+ : kProtectionNone;
+
+ VCMDecodeErrorMode decode_error_mode = enable_nack ? kNoErrors : kWithErrors;
+ video_receiver_->SetVideoProtection(video_protection, true);
+ video_receiver_->SetDecodeErrorMode(decode_error_mode);
+ VCMPacketRequestCallback* packet_request_callback =
+ enable_nack ? vcm_packet_request_callback : nullptr;
+ video_receiver_->RegisterPacketRequestCallback(packet_request_callback);
+}
+
+VideoStreamDecoder::~VideoStreamDecoder() {
+ // Note: There's an assumption at this point that the decoder thread is
+ // *not* running. If it was, then there could be a race for each of these
+ // callbacks.
+
+ // Unset all the callback pointers that we set in the ctor.
+ video_receiver_->RegisterPacketRequestCallback(nullptr);
+ video_receiver_->RegisterReceiveStatisticsCallback(nullptr);
+ video_receiver_->RegisterFrameTypeCallback(nullptr);
+ video_receiver_->RegisterReceiveCallback(nullptr);
+}
+
+// Do not acquire the lock of |video_receiver_| in this function. Decode
+// callback won't necessarily be called from the decoding thread. The decoding
+// thread may have held the lock when calling VideoDecoder::Decode, Reset, or
+// Release. Acquiring the same lock in the path of decode callback can deadlock.
+int32_t VideoStreamDecoder::FrameToRender(VideoFrame& video_frame,
+ rtc::Optional<uint8_t> qp,
+ VideoContentType content_type) {
+ receive_stats_callback_->OnDecodedFrame(qp, content_type);
+ incoming_video_stream_->OnFrame(video_frame);
+ return 0;
+}
+
+int32_t VideoStreamDecoder::ReceivedDecodedReferenceFrame(
+ const uint64_t picture_id) {
+ RTC_NOTREACHED();
+ return 0;
+}
+
+void VideoStreamDecoder::OnIncomingPayloadType(int payload_type) {
+ receive_stats_callback_->OnIncomingPayloadType(payload_type);
+}
+
+void VideoStreamDecoder::OnDecoderImplementationName(
+ const char* implementation_name) {
+ receive_stats_callback_->OnDecoderImplementationName(implementation_name);
+}
+
+void VideoStreamDecoder::OnReceiveRatesUpdated(uint32_t bit_rate,
+ uint32_t frame_rate) {
+ receive_stats_callback_->OnIncomingRate(frame_rate, bit_rate);
+}
+
+void VideoStreamDecoder::OnDiscardedPacketsUpdated(int discarded_packets) {
+ receive_stats_callback_->OnDiscardedPacketsUpdated(discarded_packets);
+}
+
+void VideoStreamDecoder::OnFrameCountsUpdated(const FrameCounts& frame_counts) {
+ receive_stats_callback_->OnFrameCountsUpdated(frame_counts);
+}
+
+void VideoStreamDecoder::OnFrameBufferTimingsUpdated(int decode_ms,
+ int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) {}
+
+void VideoStreamDecoder::OnTimingFrameInfoUpdated(const TimingFrameInfo& info) {
+}
+
+void VideoStreamDecoder::OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) {}
+
+void VideoStreamDecoder::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) {
+ video_receiver_->SetReceiveChannelParameters(max_rtt_ms);
+
+ rtc::CritScope lock(&crit_);
+ last_rtt_ms_ = avg_rtt_ms;
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/video_stream_decoder.h b/third_party/libwebrtc/webrtc/video/video_stream_decoder.h
new file mode 100644
index 0000000000..4f996bdeba
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_stream_decoder.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_DECODER_H_
+#define VIDEO_VIDEO_STREAM_DECODER_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "media/base/videosinkinterface.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "typedefs.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+class CallStatsObserver;
+class ChannelStatsObserver;
+class EncodedImageCallback;
+class ReceiveStatisticsProxy;
+class VideoRenderCallback;
+
+namespace vcm {
+class VideoReceiver;
+} // namespace vcm
+
+enum StreamType {
+ kViEStreamTypeNormal = 0, // Normal media stream
+ kViEStreamTypeRtx = 1 // Retransmission media stream
+};
+
+class VideoStreamDecoder : public VCMReceiveCallback,
+ public VCMReceiveStatisticsCallback,
+ public CallStatsObserver {
+ public:
+ friend class ChannelStatsObserver;
+
+ VideoStreamDecoder(
+ vcm::VideoReceiver* video_receiver,
+ VCMFrameTypeCallback* vcm_frame_type_callback,
+ VCMPacketRequestCallback* vcm_packet_request_callback,
+ bool enable_nack,
+ bool enable_fec,
+ ReceiveStatisticsProxy* receive_statistics_proxy,
+ rtc::VideoSinkInterface<VideoFrame>* incoming_video_stream);
+ ~VideoStreamDecoder();
+
+ // Implements VCMReceiveCallback.
+ int32_t FrameToRender(VideoFrame& video_frame,
+ rtc::Optional<uint8_t> qp,
+ VideoContentType content_type) override;
+ int32_t ReceivedDecodedReferenceFrame(const uint64_t picture_id) override;
+ void OnIncomingPayloadType(int payload_type) override;
+ void OnDecoderImplementationName(const char* implementation_name) override;
+
+ // Implements VCMReceiveStatisticsCallback.
+ void OnReceiveRatesUpdated(uint32_t bit_rate, uint32_t frame_rate) override;
+ void OnDiscardedPacketsUpdated(int discarded_packets) override;
+ void OnFrameCountsUpdated(const FrameCounts& frame_counts) override;
+ void OnCompleteFrame(bool is_keyframe,
+ size_t size_bytes,
+ VideoContentType content_type) override;
+ void OnFrameBufferTimingsUpdated(int decode_ms,
+ int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) override;
+
+ void OnTimingFrameInfoUpdated(const TimingFrameInfo& info) override;
+
+ void RegisterReceiveStatisticsProxy(
+ ReceiveStatisticsProxy* receive_statistics_proxy);
+
+ // Implements StatsObserver.
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+
+ private:
+ // Used for all registered callbacks except rendering.
+ rtc::CriticalSection crit_;
+
+ vcm::VideoReceiver* const video_receiver_;
+
+ ReceiveStatisticsProxy* const receive_stats_callback_;
+ rtc::VideoSinkInterface<VideoFrame>* const incoming_video_stream_;
+
+ int64_t last_rtt_ms_ RTC_GUARDED_BY(crit_);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_DECODER_H_
diff --git a/third_party/libwebrtc/webrtc/video/video_stream_encoder.cc b/third_party/libwebrtc/webrtc/video/video_stream_encoder.cc
new file mode 100644
index 0000000000..61e7f00ab0
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_stream_encoder.cc
@@ -0,0 +1,1273 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_encoder.h"
+
+#include <algorithm>
+#include <limits>
+#include <numeric>
+#include <utility>
+
+#include "api/video/i420_buffer.h"
+#include "common_video/include/video_bitrate_allocator.h"
+#include "common_video/include/video_frame.h"
+#include "modules/pacing/paced_sender.h"
+#include "modules/video_coding/codecs/vp8/temporal_layers.h"
+#include "modules/video_coding/include/video_codec_initializer.h"
+#include "modules/video_coding/include/video_coding.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+#include "rtc_base/trace_event.h"
+#include "video/overuse_frame_detector.h"
+#include "video/send_statistics_proxy.h"
+
+namespace webrtc {
+
+namespace {
+
+// Time interval for logging frame counts.
+const int64_t kFrameLogIntervalMs = 60000;
+const int kMinFramerateFps = 2;
+const int kMaxFramerateFps = 120;
+
+// The maximum number of frames to drop at beginning of stream
+// to try and achieve desired bitrate.
+const int kMaxInitialFramedrop = 4;
+
+uint32_t MaximumFrameSizeForBitrate(uint32_t kbps) {
+ if (kbps > 0) {
+ if (kbps < 300 /* qvga */) {
+ return 320 * 240;
+ } else if (kbps < 500 /* vga */) {
+ return 640 * 480;
+ }
+ }
+ return std::numeric_limits<uint32_t>::max();
+}
+
+// Initial limits for kBalanced degradation preference.
+int MinFps(int pixels) {
+ if (pixels <= 320 * 240) {
+ return 7;
+ } else if (pixels <= 480 * 270) {
+ return 10;
+ } else if (pixels <= 640 * 480) {
+ return 15;
+ } else {
+ return std::numeric_limits<int>::max();
+ }
+}
+
+int MaxFps(int pixels) {
+ if (pixels <= 320 * 240) {
+ return 10;
+ } else if (pixels <= 480 * 270) {
+ return 15;
+ } else {
+ return std::numeric_limits<int>::max();
+ }
+}
+
+bool IsResolutionScalingEnabled(
+ VideoSendStream::DegradationPreference degradation_preference) {
+ return degradation_preference ==
+ VideoSendStream::DegradationPreference::kMaintainFramerate ||
+ degradation_preference ==
+ VideoSendStream::DegradationPreference::kBalanced;
+}
+
+bool IsFramerateScalingEnabled(
+ VideoSendStream::DegradationPreference degradation_preference) {
+ return degradation_preference ==
+ VideoSendStream::DegradationPreference::kMaintainResolution ||
+ degradation_preference ==
+ VideoSendStream::DegradationPreference::kBalanced;
+}
+
+} // namespace
+
+class VideoStreamEncoder::ConfigureEncoderTask : public rtc::QueuedTask {
+ public:
+ ConfigureEncoderTask(VideoStreamEncoder* video_stream_encoder,
+ VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ bool nack_enabled)
+ : video_stream_encoder_(video_stream_encoder),
+ config_(std::move(config)),
+ max_data_payload_length_(max_data_payload_length),
+ nack_enabled_(nack_enabled) {}
+
+ private:
+ bool Run() override {
+ video_stream_encoder_->ConfigureEncoderOnTaskQueue(
+ std::move(config_), max_data_payload_length_, nack_enabled_);
+ return true;
+ }
+
+ VideoStreamEncoder* const video_stream_encoder_;
+ VideoEncoderConfig config_;
+ size_t max_data_payload_length_;
+ bool nack_enabled_;
+};
+
+class VideoStreamEncoder::EncodeTask : public rtc::QueuedTask {
+ public:
+ EncodeTask(const VideoFrame& frame,
+ VideoStreamEncoder* video_stream_encoder,
+ int64_t time_when_posted_us,
+ bool log_stats)
+ : frame_(frame),
+ video_stream_encoder_(video_stream_encoder),
+ time_when_posted_us_(time_when_posted_us),
+ log_stats_(log_stats) {
+ ++video_stream_encoder_->posted_frames_waiting_for_encode_;
+ }
+
+ private:
+ bool Run() override {
+ RTC_DCHECK_RUN_ON(&video_stream_encoder_->encoder_queue_);
+ video_stream_encoder_->stats_proxy_->OnIncomingFrame(frame_.width(),
+ frame_.height());
+ ++video_stream_encoder_->captured_frame_count_;
+ const int posted_frames_waiting_for_encode =
+ video_stream_encoder_->posted_frames_waiting_for_encode_.fetch_sub(1);
+ RTC_DCHECK_GT(posted_frames_waiting_for_encode, 0);
+ if (posted_frames_waiting_for_encode == 1) {
+ video_stream_encoder_->EncodeVideoFrame(frame_, time_when_posted_us_);
+ } else {
+ // There is a newer frame in flight. Do not encode this frame.
+ RTC_LOG(LS_VERBOSE)
+ << "Incoming frame dropped due to that the encoder is blocked.";
+ ++video_stream_encoder_->dropped_frame_count_;
+ video_stream_encoder_->stats_proxy_->OnFrameDroppedInEncoderQueue();
+ }
+ if (log_stats_) {
+ RTC_LOG(LS_INFO) << "Number of frames: captured "
+ << video_stream_encoder_->captured_frame_count_
+ << ", dropped (due to encoder blocked) "
+ << video_stream_encoder_->dropped_frame_count_
+ << ", interval_ms " << kFrameLogIntervalMs;
+ video_stream_encoder_->captured_frame_count_ = 0;
+ video_stream_encoder_->dropped_frame_count_ = 0;
+ }
+ return true;
+ }
+ VideoFrame frame_;
+ VideoStreamEncoder* const video_stream_encoder_;
+ const int64_t time_when_posted_us_;
+ const bool log_stats_;
+};
+
+// VideoSourceProxy is responsible ensuring thread safety between calls to
+// VideoStreamEncoder::SetSource that will happen on libjingle's worker thread
+// when a video capturer is connected to the encoder and the encoder task queue
+// (encoder_queue_) where the encoder reports its VideoSinkWants.
+class VideoStreamEncoder::VideoSourceProxy {
+ public:
+ explicit VideoSourceProxy(VideoStreamEncoder* video_stream_encoder)
+ : video_stream_encoder_(video_stream_encoder),
+ degradation_preference_(
+ VideoSendStream::DegradationPreference::kDegradationDisabled),
+ source_(nullptr) {}
+
+ void SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* source,
+ const VideoSendStream::DegradationPreference& degradation_preference) {
+ // Called on libjingle's worker thread.
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&main_checker_);
+ rtc::VideoSourceInterface<VideoFrame>* old_source = nullptr;
+ rtc::VideoSinkWants wants;
+ {
+ rtc::CritScope lock(&crit_);
+ degradation_preference_ = degradation_preference;
+ old_source = source_;
+ source_ = source;
+ wants = GetActiveSinkWantsInternal();
+ }
+
+ if (old_source != source && old_source != nullptr) {
+ old_source->RemoveSink(video_stream_encoder_);
+ }
+
+ if (!source) {
+ return;
+ }
+
+ source->AddOrUpdateSink(video_stream_encoder_, wants);
+ }
+
+ void SetWantsRotationApplied(bool rotation_applied) {
+ rtc::CritScope lock(&crit_);
+ sink_wants_.rotation_applied = rotation_applied;
+ if (source_)
+ source_->AddOrUpdateSink(video_stream_encoder_, sink_wants_);
+ }
+
+ rtc::VideoSinkWants GetActiveSinkWants() {
+ rtc::CritScope lock(&crit_);
+ return GetActiveSinkWantsInternal();
+ }
+
+ void ResetPixelFpsCount() {
+ rtc::CritScope lock(&crit_);
+ sink_wants_.max_pixel_count = std::numeric_limits<int>::max();
+ sink_wants_.target_pixel_count.reset();
+ sink_wants_.max_framerate_fps = std::numeric_limits<int>::max();
+ if (source_)
+ source_->AddOrUpdateSink(video_stream_encoder_, sink_wants_);
+ }
+
+ bool RequestResolutionLowerThan(int pixel_count,
+ int min_pixels_per_frame,
+ bool* min_pixels_reached) {
+ // Called on the encoder task queue.
+ rtc::CritScope lock(&crit_);
+ if (!source_ || !IsResolutionScalingEnabled(degradation_preference_)) {
+ // This can happen since |degradation_preference_| is set on libjingle's
+ // worker thread but the adaptation is done on the encoder task queue.
+ return false;
+ }
+ // The input video frame size will have a resolution less than or equal to
+ // |max_pixel_count| depending on how the source can scale the frame size.
+ const int pixels_wanted = (pixel_count * 3) / 5;
+ if (pixels_wanted >= sink_wants_.max_pixel_count) {
+ return false;
+ }
+ if (pixels_wanted < min_pixels_per_frame) {
+ *min_pixels_reached = true;
+ return false;
+ }
+ RTC_LOG(LS_INFO) << "Scaling down resolution, max pixels: "
+ << pixels_wanted;
+ sink_wants_.max_pixel_count = pixels_wanted;
+ sink_wants_.target_pixel_count = rtc::Optional<int>();
+ source_->AddOrUpdateSink(video_stream_encoder_,
+ GetActiveSinkWantsInternal());
+ return true;
+ }
+
+ int RequestFramerateLowerThan(int fps) {
+ // Called on the encoder task queue.
+ // The input video frame rate will be scaled down to 2/3, rounding down.
+ int framerate_wanted = (fps * 2) / 3;
+ return RestrictFramerate(framerate_wanted) ? framerate_wanted : -1;
+ }
+
+ bool RequestHigherResolutionThan(int pixel_count) {
+ // Called on the encoder task queue.
+ rtc::CritScope lock(&crit_);
+ if (!source_ || !IsResolutionScalingEnabled(degradation_preference_)) {
+ // This can happen since |degradation_preference_| is set on libjingle's
+ // worker thread but the adaptation is done on the encoder task queue.
+ return false;
+ }
+ int max_pixels_wanted = pixel_count;
+ if (max_pixels_wanted != std::numeric_limits<int>::max())
+ max_pixels_wanted = pixel_count * 4;
+
+ if (max_pixels_wanted <= sink_wants_.max_pixel_count)
+ return false;
+
+ sink_wants_.max_pixel_count = max_pixels_wanted;
+ if (max_pixels_wanted == std::numeric_limits<int>::max()) {
+ // Remove any constraints.
+ sink_wants_.target_pixel_count.reset();
+ } else {
+ // On step down we request at most 3/5 the pixel count of the previous
+ // resolution, so in order to take "one step up" we request a resolution
+ // as close as possible to 5/3 of the current resolution. The actual pixel
+ // count selected depends on the capabilities of the source. In order to
+ // not take a too large step up, we cap the requested pixel count to be at
+ // most four time the current number of pixels.
+ sink_wants_.target_pixel_count =
+ rtc::Optional<int>((pixel_count * 5) / 3);
+ }
+ RTC_LOG(LS_INFO) << "Scaling up resolution, max pixels: "
+ << max_pixels_wanted;
+ source_->AddOrUpdateSink(video_stream_encoder_,
+ GetActiveSinkWantsInternal());
+ return true;
+ }
+
+ // Request upgrade in framerate. Returns the new requested frame, or -1 if
+ // no change requested. Note that maxint may be returned if limits due to
+ // adaptation requests are removed completely. In that case, consider
+ // |max_framerate_| to be the current limit (assuming the capturer complies).
+ int RequestHigherFramerateThan(int fps) {
+ // Called on the encoder task queue.
+ // The input frame rate will be scaled up to the last step, with rounding.
+ int framerate_wanted = fps;
+ if (fps != std::numeric_limits<int>::max())
+ framerate_wanted = (fps * 3) / 2;
+
+ return IncreaseFramerate(framerate_wanted) ? framerate_wanted : -1;
+ }
+
+ bool RestrictFramerate(int fps) {
+ // Called on the encoder task queue.
+ rtc::CritScope lock(&crit_);
+ if (!source_ || !IsFramerateScalingEnabled(degradation_preference_))
+ return false;
+
+ const int fps_wanted = std::max(kMinFramerateFps, fps);
+ if (fps_wanted >= sink_wants_.max_framerate_fps)
+ return false;
+
+ RTC_LOG(LS_INFO) << "Scaling down framerate: " << fps_wanted;
+ sink_wants_.max_framerate_fps = fps_wanted;
+ source_->AddOrUpdateSink(video_stream_encoder_,
+ GetActiveSinkWantsInternal());
+ return true;
+ }
+
+ bool IncreaseFramerate(int fps) {
+ // Called on the encoder task queue.
+ rtc::CritScope lock(&crit_);
+ if (!source_ || !IsFramerateScalingEnabled(degradation_preference_))
+ return false;
+
+ const int fps_wanted = std::max(kMinFramerateFps, fps);
+ if (fps_wanted <= sink_wants_.max_framerate_fps)
+ return false;
+
+ RTC_LOG(LS_INFO) << "Scaling up framerate: " << fps_wanted;
+ sink_wants_.max_framerate_fps = fps_wanted;
+ source_->AddOrUpdateSink(video_stream_encoder_,
+ GetActiveSinkWantsInternal());
+ return true;
+ }
+
+ private:
+ rtc::VideoSinkWants GetActiveSinkWantsInternal()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_) {
+ rtc::VideoSinkWants wants = sink_wants_;
+ // Clear any constraints from the current sink wants that don't apply to
+ // the used degradation_preference.
+ switch (degradation_preference_) {
+ case VideoSendStream::DegradationPreference::kBalanced:
+ break;
+ case VideoSendStream::DegradationPreference::kMaintainFramerate:
+ wants.max_framerate_fps = std::numeric_limits<int>::max();
+ break;
+ case VideoSendStream::DegradationPreference::kMaintainResolution:
+ wants.max_pixel_count = std::numeric_limits<int>::max();
+ wants.target_pixel_count.reset();
+ break;
+ case VideoSendStream::DegradationPreference::kDegradationDisabled:
+ wants.max_pixel_count = std::numeric_limits<int>::max();
+ wants.target_pixel_count.reset();
+ wants.max_framerate_fps = std::numeric_limits<int>::max();
+ }
+ return wants;
+ }
+
+ rtc::CriticalSection crit_;
+ rtc::SequencedTaskChecker main_checker_;
+ VideoStreamEncoder* const video_stream_encoder_;
+ rtc::VideoSinkWants sink_wants_ RTC_GUARDED_BY(&crit_);
+ VideoSendStream::DegradationPreference degradation_preference_
+ RTC_GUARDED_BY(&crit_);
+ rtc::VideoSourceInterface<VideoFrame>* source_ RTC_GUARDED_BY(&crit_);
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(VideoSourceProxy);
+};
+
+VideoStreamEncoder::VideoStreamEncoder(
+ uint32_t number_of_cores,
+ SendStatisticsProxy* stats_proxy,
+ const VideoSendStream::Config::EncoderSettings& settings,
+ rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
+ EncodedFrameObserver* encoder_timing,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector)
+ : shutdown_event_(true /* manual_reset */, false),
+ number_of_cores_(number_of_cores),
+ initial_rampup_(0),
+ source_proxy_(new VideoSourceProxy(this)),
+ sink_(nullptr),
+ settings_(settings),
+ codec_type_(PayloadStringToCodecType(settings.payload_name)),
+ video_sender_(Clock::GetRealTimeClock(), this),
+ overuse_detector_(
+ overuse_detector.get()
+ ? overuse_detector.release()
+ : new OveruseFrameDetector(
+ GetCpuOveruseOptions(settings.full_overuse_time),
+ this,
+ encoder_timing,
+ stats_proxy)),
+ stats_proxy_(stats_proxy),
+ pre_encode_callback_(pre_encode_callback),
+ max_framerate_(-1),
+ pending_encoder_reconfiguration_(false),
+ encoder_start_bitrate_bps_(0),
+ max_data_payload_length_(0),
+ nack_enabled_(false),
+ last_observed_bitrate_bps_(0),
+ encoder_paused_and_dropped_frame_(false),
+ clock_(Clock::GetRealTimeClock()),
+ degradation_preference_(
+ VideoSendStream::DegradationPreference::kDegradationDisabled),
+ posted_frames_waiting_for_encode_(0),
+ last_captured_timestamp_(0),
+ delta_ntp_internal_ms_(clock_->CurrentNtpInMilliseconds() -
+ clock_->TimeInMilliseconds()),
+ last_frame_log_ms_(clock_->TimeInMilliseconds()),
+ captured_frame_count_(0),
+ dropped_frame_count_(0),
+ bitrate_observer_(nullptr),
+ encoder_queue_("EncoderQueue") {
+ RTC_DCHECK(stats_proxy);
+ encoder_queue_.PostTask([this] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ overuse_detector_->StartCheckForOveruse();
+ video_sender_.RegisterExternalEncoder(
+ settings_.encoder, settings_.payload_type, settings_.internal_source);
+ });
+}
+
+VideoStreamEncoder::~VideoStreamEncoder() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(shutdown_event_.Wait(0))
+ << "Must call ::Stop() before destruction.";
+}
+
+// TODO(pbos): Lower these thresholds (to closer to 100%) when we handle
+// pipelining encoders better (multiple input frames before something comes
+// out). This should effectively turn off CPU adaptations for systems that
+// remotely cope with the load right now.
+CpuOveruseOptions VideoStreamEncoder::GetCpuOveruseOptions(
+ bool full_overuse_time) {
+ CpuOveruseOptions options;
+ if (full_overuse_time) {
+ options.low_encode_usage_threshold_percent = 150;
+ options.high_encode_usage_threshold_percent = 200;
+ }
+ return options;
+}
+
+void VideoStreamEncoder::Stop() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ source_proxy_->SetSource(nullptr, VideoSendStream::DegradationPreference());
+ encoder_queue_.PostTask([this] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ overuse_detector_->StopCheckForOveruse();
+ rate_allocator_.reset();
+ bitrate_observer_ = nullptr;
+ video_sender_.RegisterExternalEncoder(nullptr, settings_.payload_type,
+ false);
+ quality_scaler_ = nullptr;
+ shutdown_event_.Set();
+ });
+
+ shutdown_event_.Wait(rtc::Event::kForever);
+}
+
+void VideoStreamEncoder::SetBitrateObserver(
+ VideoBitrateAllocationObserver* bitrate_observer) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ encoder_queue_.PostTask([this, bitrate_observer] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(!bitrate_observer_);
+ bitrate_observer_ = bitrate_observer;
+ });
+}
+
+void VideoStreamEncoder::SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* source,
+ const VideoSendStream::DegradationPreference& degradation_preference) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ source_proxy_->SetSource(source, degradation_preference);
+ encoder_queue_.PostTask([this, degradation_preference] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ if (degradation_preference_ != degradation_preference) {
+ // Reset adaptation state, so that we're not tricked into thinking there's
+ // an already pending request of the same type.
+ last_adaptation_request_.reset();
+ if (degradation_preference ==
+ VideoSendStream::DegradationPreference::kBalanced ||
+ degradation_preference_ ==
+ VideoSendStream::DegradationPreference::kBalanced) {
+ // TODO(asapersson): Consider removing |adapt_counters_| map and use one
+ // AdaptCounter for all modes.
+ source_proxy_->ResetPixelFpsCount();
+ adapt_counters_.clear();
+ }
+ }
+ degradation_preference_ = degradation_preference;
+ bool allow_scaling = IsResolutionScalingEnabled(degradation_preference_);
+ initial_rampup_ = allow_scaling ? 0 : kMaxInitialFramedrop;
+ ConfigureQualityScaler();
+ if (!IsFramerateScalingEnabled(degradation_preference) &&
+ max_framerate_ != -1) {
+ // If frame rate scaling is no longer allowed, remove any potential
+ // allowance for longer frame intervals.
+ overuse_detector_->OnTargetFramerateUpdated(max_framerate_);
+ }
+ });
+}
+
+void VideoStreamEncoder::SetSink(EncoderSink* sink, bool rotation_applied) {
+ source_proxy_->SetWantsRotationApplied(rotation_applied);
+ encoder_queue_.PostTask([this, sink] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ sink_ = sink;
+ });
+}
+
+void VideoStreamEncoder::SetStartBitrate(int start_bitrate_bps) {
+ encoder_queue_.PostTask([this, start_bitrate_bps] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ encoder_start_bitrate_bps_ = start_bitrate_bps;
+ });
+}
+
+void VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ bool nack_enabled) {
+ encoder_queue_.PostTask(
+ std::unique_ptr<rtc::QueuedTask>(new ConfigureEncoderTask(
+ this, std::move(config), max_data_payload_length, nack_enabled)));
+}
+
+void VideoStreamEncoder::ConfigureEncoderOnTaskQueue(
+ VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ bool nack_enabled) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(sink_);
+ RTC_LOG(LS_INFO) << "ConfigureEncoder requested.";
+
+ max_data_payload_length_ = max_data_payload_length;
+ nack_enabled_ = nack_enabled;
+ encoder_config_ = std::move(config);
+ pending_encoder_reconfiguration_ = true;
+
+ // Reconfigure the encoder now if the encoder has an internal source or
+ // if the frame resolution is known. Otherwise, the reconfiguration is
+ // deferred until the next frame to minimize the number of reconfigurations.
+ // The codec configuration depends on incoming video frame size.
+ if (last_frame_info_) {
+ ReconfigureEncoder();
+ } else if (settings_.internal_source) {
+ last_frame_info_ =
+ rtc::Optional<VideoFrameInfo>(VideoFrameInfo(176, 144, false));
+ ReconfigureEncoder();
+ }
+}
+
+void VideoStreamEncoder::ReconfigureEncoder() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(pending_encoder_reconfiguration_);
+ std::vector<VideoStream> streams =
+ encoder_config_.video_stream_factory->CreateEncoderStreams(
+ last_frame_info_->width, last_frame_info_->height, encoder_config_);
+
+ // TODO(ilnik): If configured resolution is significantly less than provided,
+ // e.g. because there are not enough SSRCs for all simulcast streams,
+ // signal new resolutions via SinkWants to video source.
+
+ // Stream dimensions may be not equal to given because of a simulcast
+ // restrictions.
+ auto highest_stream = std::max_element(
+ streams.begin(), streams.end(),
+ [](const webrtc::VideoStream& a, const webrtc::VideoStream& b) {
+ return std::tie(a.width, a.height) < std::tie(b.width, b.height);
+ });
+ int highest_stream_width = static_cast<int>(highest_stream->width);
+ int highest_stream_height = static_cast<int>(highest_stream->height);
+ // Dimension may be reduced to be, e.g. divisible by 4.
+ RTC_CHECK_GE(last_frame_info_->width, highest_stream_width);
+ RTC_CHECK_GE(last_frame_info_->height, highest_stream_height);
+ crop_width_ = last_frame_info_->width - highest_stream_width;
+ crop_height_ = last_frame_info_->height - highest_stream_height;
+
+ VideoCodec codec;
+ if (!VideoCodecInitializer::SetupCodec(encoder_config_, settings_, streams,
+ nack_enabled_, &codec,
+ &rate_allocator_)) {
+ RTC_LOG(LS_ERROR) << "Failed to create encoder configuration.";
+ }
+
+ codec.startBitrate =
+ std::max(encoder_start_bitrate_bps_ / 1000, codec.minBitrate);
+ codec.startBitrate = std::min(codec.startBitrate, codec.maxBitrate);
+ codec.expect_encode_from_texture = last_frame_info_->is_texture;
+ max_framerate_ = codec.maxFramerate;
+ RTC_DCHECK_LE(max_framerate_, kMaxFramerateFps);
+
+ bool success = video_sender_.RegisterSendCodec(
+ &codec, number_of_cores_,
+ static_cast<uint32_t>(max_data_payload_length_)) == VCM_OK;
+ if (!success) {
+ RTC_LOG(LS_ERROR) << "Failed to configure encoder.";
+ rate_allocator_.reset();
+ }
+
+ video_sender_.UpdateChannelParemeters(rate_allocator_.get(),
+ bitrate_observer_);
+
+ // Get the current actual framerate, as measured by the stats proxy. This is
+ // used to get the correct bitrate layer allocation.
+ int current_framerate = stats_proxy_->GetSendFrameRate();
+ if (current_framerate == 0)
+ current_framerate = codec.maxFramerate;
+ stats_proxy_->OnEncoderReconfigured(
+ encoder_config_,
+ rate_allocator_.get()
+ ? rate_allocator_->GetPreferredBitrateBps(current_framerate)
+ : codec.maxBitrate);
+
+ pending_encoder_reconfiguration_ = false;
+
+ sink_->OnEncoderConfigurationChanged(
+ std::move(streams), encoder_config_.min_transmit_bitrate_bps);
+
+ // Get the current target framerate, ie the maximum framerate as specified by
+ // the current codec configuration, or any limit imposed by cpu adaption in
+ // maintain-resolution or balanced mode. This is used to make sure overuse
+ // detection doesn't needlessly trigger in low and/or variable framerate
+ // scenarios.
+ int target_framerate = std::min(
+ max_framerate_, source_proxy_->GetActiveSinkWants().max_framerate_fps);
+ overuse_detector_->OnTargetFramerateUpdated(target_framerate);
+
+ ConfigureQualityScaler();
+}
+
+void VideoStreamEncoder::ConfigureQualityScaler() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ const auto scaling_settings = settings_.encoder->GetScalingSettings();
+ const bool quality_scaling_allowed =
+ IsResolutionScalingEnabled(degradation_preference_) &&
+ scaling_settings.enabled;
+
+ if (quality_scaling_allowed) {
+ if (quality_scaler_.get() == nullptr) {
+ // Quality scaler has not already been configured.
+ // Drop frames and scale down until desired quality is achieved.
+ if (scaling_settings.thresholds) {
+ quality_scaler_.reset(
+ new QualityScaler(this, *(scaling_settings.thresholds)));
+ } else {
+ quality_scaler_.reset(new QualityScaler(this, codec_type_));
+ }
+ }
+ } else {
+ quality_scaler_.reset(nullptr);
+ initial_rampup_ = kMaxInitialFramedrop;
+ }
+
+ stats_proxy_->SetAdaptationStats(GetActiveCounts(kCpu),
+ GetActiveCounts(kQuality));
+}
+
+void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) {
+ RTC_DCHECK_RUNS_SERIALIZED(&incoming_frame_race_checker_);
+ VideoFrame incoming_frame = video_frame;
+
+ // Local time in webrtc time base.
+ int64_t current_time_us = clock_->TimeInMicroseconds();
+ int64_t current_time_ms = current_time_us / rtc::kNumMicrosecsPerMillisec;
+ // In some cases, e.g., when the frame from decoder is fed to encoder,
+ // the timestamp may be set to the future. As the encoding pipeline assumes
+ // capture time to be less than present time, we should reset the capture
+ // timestamps here. Otherwise there may be issues with RTP send stream.
+
+ if (incoming_frame.timestamp_us() > current_time_us)
+ incoming_frame.set_timestamp_us(current_time_us);
+
+ // Capture time may come from clock with an offset and drift from clock_.
+ int64_t capture_ntp_time_ms;
+ if (video_frame.ntp_time_ms() > 0) {
+ capture_ntp_time_ms = video_frame.ntp_time_ms();
+ } else if (video_frame.render_time_ms() != 0) {
+ capture_ntp_time_ms = video_frame.render_time_ms() + delta_ntp_internal_ms_;
+ } else {
+ capture_ntp_time_ms = current_time_ms + delta_ntp_internal_ms_;
+ }
+ incoming_frame.set_ntp_time_ms(capture_ntp_time_ms);
+
+ // Convert NTP time, in ms, to RTP timestamp.
+ const int kMsToRtpTimestamp = 90;
+ incoming_frame.set_timestamp(
+ kMsToRtpTimestamp * static_cast<uint32_t>(incoming_frame.ntp_time_ms()));
+
+ if (incoming_frame.ntp_time_ms() <= last_captured_timestamp_) {
+ // We don't allow the same capture time for two frames, drop this one.
+ RTC_LOG(LS_WARNING) << "Same/old NTP timestamp ("
+ << incoming_frame.ntp_time_ms()
+ << " <= " << last_captured_timestamp_
+ << ") for incoming frame. Dropping.";
+ return;
+ }
+
+ bool log_stats = false;
+ if (current_time_ms - last_frame_log_ms_ > kFrameLogIntervalMs) {
+ last_frame_log_ms_ = current_time_ms;
+ log_stats = true;
+ }
+
+ last_captured_timestamp_ = incoming_frame.ntp_time_ms();
+ encoder_queue_.PostTask(std::unique_ptr<rtc::QueuedTask>(new EncodeTask(
+ incoming_frame, this, rtc::TimeMicros(), log_stats)));
+}
+
+void VideoStreamEncoder::OnDiscardedFrame() {
+ stats_proxy_->OnFrameDroppedBySource();
+}
+
+bool VideoStreamEncoder::EncoderPaused() const {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ // Pause video if paused by caller or as long as the network is down or the
+ // pacer queue has grown too large in buffered mode.
+ // If the pacer queue has grown too large or the network is down,
+ // last_observed_bitrate_bps_ will be 0.
+ return last_observed_bitrate_bps_ == 0;
+}
+
+void VideoStreamEncoder::TraceFrameDropStart() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ // Start trace event only on the first frame after encoder is paused.
+ if (!encoder_paused_and_dropped_frame_) {
+ TRACE_EVENT_ASYNC_BEGIN0("webrtc", "EncoderPaused", this);
+ }
+ encoder_paused_and_dropped_frame_ = true;
+}
+
+void VideoStreamEncoder::TraceFrameDropEnd() {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ // End trace event on first frame after encoder resumes, if frame was dropped.
+ if (encoder_paused_and_dropped_frame_) {
+ TRACE_EVENT_ASYNC_END0("webrtc", "EncoderPaused", this);
+ }
+ encoder_paused_and_dropped_frame_ = false;
+}
+
+void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
+ int64_t time_when_posted_us) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ if (pre_encode_callback_)
+ pre_encode_callback_->OnFrame(video_frame);
+
+ if (!last_frame_info_ || video_frame.width() != last_frame_info_->width ||
+ video_frame.height() != last_frame_info_->height ||
+ video_frame.is_texture() != last_frame_info_->is_texture) {
+ pending_encoder_reconfiguration_ = true;
+ last_frame_info_ = rtc::Optional<VideoFrameInfo>(VideoFrameInfo(
+ video_frame.width(), video_frame.height(), video_frame.is_texture()));
+ RTC_LOG(LS_INFO) << "Video frame parameters changed: dimensions="
+ << last_frame_info_->width << "x"
+ << last_frame_info_->height
+ << ", texture=" << last_frame_info_->is_texture << ".";
+ }
+
+ if (initial_rampup_ < kMaxInitialFramedrop &&
+ video_frame.size() >
+ MaximumFrameSizeForBitrate(encoder_start_bitrate_bps_ / 1000)) {
+ RTC_LOG(LS_INFO) << "Dropping frame. Too large for target bitrate.";
+ AdaptDown(kQuality);
+ ++initial_rampup_;
+ return;
+ }
+ initial_rampup_ = kMaxInitialFramedrop;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (pending_encoder_reconfiguration_) {
+ ReconfigureEncoder();
+ last_parameters_update_ms_.emplace(now_ms);
+ } else if (!last_parameters_update_ms_ ||
+ now_ms - *last_parameters_update_ms_ >=
+ vcm::VCMProcessTimer::kDefaultProcessIntervalMs) {
+ video_sender_.UpdateChannelParemeters(rate_allocator_.get(),
+ bitrate_observer_);
+ last_parameters_update_ms_.emplace(now_ms);
+ }
+
+ if (EncoderPaused()) {
+ TraceFrameDropStart();
+ return;
+ }
+ TraceFrameDropEnd();
+
+ VideoFrame out_frame(video_frame);
+ // Crop frame if needed.
+ if (crop_width_ > 0 || crop_height_ > 0) {
+ int cropped_width = video_frame.width() - crop_width_;
+ int cropped_height = video_frame.height() - crop_height_;
+ rtc::scoped_refptr<I420Buffer> cropped_buffer =
+ I420Buffer::Create(cropped_width, cropped_height);
+ // TODO(ilnik): Remove scaling if cropping is too big, as it should never
+ // happen after SinkWants signaled correctly from ReconfigureEncoder.
+ if (crop_width_ < 4 && crop_height_ < 4) {
+ cropped_buffer->CropAndScaleFrom(
+ *video_frame.video_frame_buffer()->ToI420(), crop_width_ / 2,
+ crop_height_ / 2, cropped_width, cropped_height);
+ } else {
+ cropped_buffer->ScaleFrom(
+ *video_frame.video_frame_buffer()->ToI420().get());
+ }
+ out_frame =
+ VideoFrame(cropped_buffer, video_frame.timestamp(),
+ video_frame.render_time_ms(), video_frame.rotation());
+ out_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
+ }
+
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", video_frame.render_time_ms(),
+ "Encode");
+
+ overuse_detector_->FrameCaptured(out_frame, time_when_posted_us);
+
+ video_sender_.AddVideoFrame(out_frame, nullptr);
+}
+
+void VideoStreamEncoder::SendKeyFrame() {
+ if (!encoder_queue_.IsCurrent()) {
+ encoder_queue_.PostTask([this] { SendKeyFrame(); });
+ return;
+ }
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ video_sender_.IntraFrameRequest(0);
+}
+
+EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
+ // Encoded is called on whatever thread the real encoder implementation run
+ // on. In the case of hardware encoders, there might be several encoders
+ // running in parallel on different threads.
+ stats_proxy_->OnSendEncodedImage(encoded_image, codec_specific_info);
+
+ EncodedImageCallback::Result result =
+ sink_->OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
+
+ int64_t time_sent_us = rtc::TimeMicros();
+ uint32_t timestamp = encoded_image._timeStamp;
+ const int qp = encoded_image.qp_;
+ encoder_queue_.PostTask([this, timestamp, time_sent_us, qp] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ overuse_detector_->FrameSent(timestamp, time_sent_us);
+ if (quality_scaler_ && qp >= 0)
+ quality_scaler_->ReportQP(qp);
+ });
+
+ return result;
+}
+
+void VideoStreamEncoder::OnDroppedFrame(DropReason reason) {
+ switch (reason) {
+ case DropReason::kDroppedByMediaOptimizations:
+ stats_proxy_->OnFrameDroppedByMediaOptimizations();
+ encoder_queue_.PostTask([this] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ if (quality_scaler_)
+ quality_scaler_->ReportDroppedFrame();
+ });
+ break;
+ case DropReason::kDroppedByEncoder:
+ stats_proxy_->OnFrameDroppedByEncoder();
+ break;
+ }
+}
+
+void VideoStreamEncoder::OnReceivedIntraFrameRequest(size_t stream_index) {
+ if (!encoder_queue_.IsCurrent()) {
+ encoder_queue_.PostTask(
+ [this, stream_index] { OnReceivedIntraFrameRequest(stream_index); });
+ return;
+ }
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ // Key frame request from remote side, signal to VCM.
+ TRACE_EVENT0("webrtc", "OnKeyFrameRequest");
+ video_sender_.IntraFrameRequest(stream_index);
+}
+
+void VideoStreamEncoder::OnBitrateUpdated(uint32_t bitrate_bps,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms) {
+ if (!encoder_queue_.IsCurrent()) {
+ encoder_queue_.PostTask(
+ [this, bitrate_bps, fraction_lost, round_trip_time_ms] {
+ OnBitrateUpdated(bitrate_bps, fraction_lost, round_trip_time_ms);
+ });
+ return;
+ }
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ RTC_DCHECK(sink_) << "sink_ must be set before the encoder is active.";
+
+ RTC_LOG(LS_VERBOSE) << "OnBitrateUpdated, bitrate " << bitrate_bps
+ << " packet loss " << static_cast<int>(fraction_lost)
+ << " rtt " << round_trip_time_ms;
+
+ video_sender_.SetChannelParameters(bitrate_bps, fraction_lost,
+ round_trip_time_ms, rate_allocator_.get(),
+ bitrate_observer_);
+
+ encoder_start_bitrate_bps_ =
+ bitrate_bps != 0 ? bitrate_bps : encoder_start_bitrate_bps_;
+ bool video_is_suspended = bitrate_bps == 0;
+ bool video_suspension_changed = video_is_suspended != EncoderPaused();
+ last_observed_bitrate_bps_ = bitrate_bps;
+
+ if (video_suspension_changed) {
+ RTC_LOG(LS_INFO) << "Video suspend state changed to: "
+ << (video_is_suspended ? "suspended" : "not suspended");
+ stats_proxy_->OnSuspendChange(video_is_suspended);
+ }
+}
+
+void VideoStreamEncoder::AdaptDown(AdaptReason reason) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ AdaptationRequest adaptation_request = {
+ last_frame_info_->pixel_count(),
+ stats_proxy_->GetStats().input_frame_rate,
+ AdaptationRequest::Mode::kAdaptDown};
+
+ bool downgrade_requested =
+ last_adaptation_request_ &&
+ last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptDown;
+
+ switch (degradation_preference_) {
+ case VideoSendStream::DegradationPreference::kBalanced:
+ break;
+ case VideoSendStream::DegradationPreference::kMaintainFramerate:
+ if (downgrade_requested &&
+ adaptation_request.input_pixel_count_ >=
+ last_adaptation_request_->input_pixel_count_) {
+ // Don't request lower resolution if the current resolution is not
+ // lower than the last time we asked for the resolution to be lowered.
+ return;
+ }
+ break;
+ case VideoSendStream::DegradationPreference::kMaintainResolution:
+ if (adaptation_request.framerate_fps_ <= 0 ||
+ (downgrade_requested &&
+ adaptation_request.framerate_fps_ < kMinFramerateFps)) {
+ // If no input fps estimate available, can't determine how to scale down
+ // framerate. Otherwise, don't request lower framerate if we don't have
+ // a valid frame rate. Since framerate, unlike resolution, is a measure
+ // we have to estimate, and can fluctuate naturally over time, don't
+ // make the same kind of limitations as for resolution, but trust the
+ // overuse detector to not trigger too often.
+ return;
+ }
+ break;
+ case VideoSendStream::DegradationPreference::kDegradationDisabled:
+ return;
+ }
+
+ if (reason == kCpu) {
+ if (GetConstAdaptCounter().ResolutionCount(kCpu) >=
+ kMaxCpuResolutionDowngrades ||
+ GetConstAdaptCounter().FramerateCount(kCpu) >=
+ kMaxCpuFramerateDowngrades) {
+ return;
+ }
+ }
+
+ switch (degradation_preference_) {
+ case VideoSendStream::DegradationPreference::kBalanced: {
+ // Try scale down framerate, if lower.
+ int fps = MinFps(last_frame_info_->pixel_count());
+ if (source_proxy_->RestrictFramerate(fps)) {
+ GetAdaptCounter().IncrementFramerate(reason);
+ break;
+ }
+ // Scale down resolution.
+ FALLTHROUGH();
+ }
+ case VideoSendStream::DegradationPreference::kMaintainFramerate: {
+ // Scale down resolution.
+ bool min_pixels_reached = false;
+ if (!source_proxy_->RequestResolutionLowerThan(
+ adaptation_request.input_pixel_count_,
+ settings_.encoder->GetScalingSettings().min_pixels_per_frame,
+ &min_pixels_reached)) {
+ if (min_pixels_reached)
+ stats_proxy_->OnMinPixelLimitReached();
+ return;
+ }
+ GetAdaptCounter().IncrementResolution(reason);
+ break;
+ }
+ case VideoSendStream::DegradationPreference::kMaintainResolution: {
+ // Scale down framerate.
+ const int requested_framerate = source_proxy_->RequestFramerateLowerThan(
+ adaptation_request.framerate_fps_);
+ if (requested_framerate == -1)
+ return;
+ RTC_DCHECK_NE(max_framerate_, -1);
+ overuse_detector_->OnTargetFramerateUpdated(
+ std::min(max_framerate_, requested_framerate));
+ GetAdaptCounter().IncrementFramerate(reason);
+ break;
+ }
+ case VideoSendStream::DegradationPreference::kDegradationDisabled:
+ RTC_NOTREACHED();
+ }
+
+ last_adaptation_request_.emplace(adaptation_request);
+
+ UpdateAdaptationStats(reason);
+
+ RTC_LOG(LS_INFO) << GetConstAdaptCounter().ToString();
+}
+
+void VideoStreamEncoder::AdaptUp(AdaptReason reason) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+ const AdaptCounter& adapt_counter = GetConstAdaptCounter();
+ int num_downgrades = adapt_counter.TotalCount(reason);
+ if (num_downgrades == 0)
+ return;
+ RTC_DCHECK_GT(num_downgrades, 0);
+
+ AdaptationRequest adaptation_request = {
+ last_frame_info_->pixel_count(),
+ stats_proxy_->GetStats().input_frame_rate,
+ AdaptationRequest::Mode::kAdaptUp};
+
+ bool adapt_up_requested =
+ last_adaptation_request_ &&
+ last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptUp;
+
+ if (degradation_preference_ ==
+ VideoSendStream::DegradationPreference::kMaintainFramerate) {
+ if (adapt_up_requested &&
+ adaptation_request.input_pixel_count_ <=
+ last_adaptation_request_->input_pixel_count_) {
+ // Don't request higher resolution if the current resolution is not
+ // higher than the last time we asked for the resolution to be higher.
+ return;
+ }
+ }
+
+ switch (degradation_preference_) {
+ case VideoSendStream::DegradationPreference::kBalanced: {
+ // Try scale up framerate, if higher.
+ int fps = MaxFps(last_frame_info_->pixel_count());
+ if (source_proxy_->IncreaseFramerate(fps)) {
+ GetAdaptCounter().DecrementFramerate(reason, fps);
+ // Reset framerate in case of fewer fps steps down than up.
+ if (adapt_counter.FramerateCount() == 0 &&
+ fps != std::numeric_limits<int>::max()) {
+ RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
+ source_proxy_->IncreaseFramerate(std::numeric_limits<int>::max());
+ }
+ break;
+ }
+ // Scale up resolution.
+ FALLTHROUGH();
+ }
+ case VideoSendStream::DegradationPreference::kMaintainFramerate: {
+ // Scale up resolution.
+ int pixel_count = adaptation_request.input_pixel_count_;
+ if (adapt_counter.ResolutionCount() == 1) {
+ RTC_LOG(LS_INFO) << "Removing resolution down-scaling setting.";
+ pixel_count = std::numeric_limits<int>::max();
+ }
+ if (!source_proxy_->RequestHigherResolutionThan(pixel_count))
+ return;
+ GetAdaptCounter().DecrementResolution(reason);
+ break;
+ }
+ case VideoSendStream::DegradationPreference::kMaintainResolution: {
+ // Scale up framerate.
+ int fps = adaptation_request.framerate_fps_;
+ if (adapt_counter.FramerateCount() == 1) {
+ RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
+ fps = std::numeric_limits<int>::max();
+ }
+
+ const int requested_framerate =
+ source_proxy_->RequestHigherFramerateThan(fps);
+ if (requested_framerate == -1) {
+ overuse_detector_->OnTargetFramerateUpdated(max_framerate_);
+ return;
+ }
+ overuse_detector_->OnTargetFramerateUpdated(
+ std::min(max_framerate_, requested_framerate));
+ GetAdaptCounter().DecrementFramerate(reason);
+ break;
+ }
+ case VideoSendStream::DegradationPreference::kDegradationDisabled:
+ return;
+ }
+
+ last_adaptation_request_.emplace(adaptation_request);
+
+ UpdateAdaptationStats(reason);
+
+ RTC_LOG(LS_INFO) << adapt_counter.ToString();
+}
+
+void VideoStreamEncoder::UpdateAdaptationStats(AdaptReason reason) {
+ switch (reason) {
+ case kCpu:
+ stats_proxy_->OnCpuAdaptationChanged(GetActiveCounts(kCpu),
+ GetActiveCounts(kQuality));
+ break;
+ case kQuality:
+ stats_proxy_->OnQualityAdaptationChanged(GetActiveCounts(kCpu),
+ GetActiveCounts(kQuality));
+ break;
+ }
+}
+
+VideoStreamEncoder::AdaptCounts VideoStreamEncoder::GetActiveCounts(
+ AdaptReason reason) {
+ VideoStreamEncoder::AdaptCounts counts =
+ GetConstAdaptCounter().Counts(reason);
+ switch (reason) {
+ case kCpu:
+ if (!IsFramerateScalingEnabled(degradation_preference_))
+ counts.fps = -1;
+ if (!IsResolutionScalingEnabled(degradation_preference_))
+ counts.resolution = -1;
+ break;
+ case kQuality:
+ if (!IsFramerateScalingEnabled(degradation_preference_) ||
+ !quality_scaler_) {
+ counts.fps = -1;
+ }
+ if (!IsResolutionScalingEnabled(degradation_preference_) ||
+ !quality_scaler_) {
+ counts.resolution = -1;
+ }
+ break;
+ }
+ return counts;
+}
+
+VideoStreamEncoder::AdaptCounter& VideoStreamEncoder::GetAdaptCounter() {
+ return adapt_counters_[degradation_preference_];
+}
+
+const VideoStreamEncoder::AdaptCounter&
+VideoStreamEncoder::GetConstAdaptCounter() {
+ return adapt_counters_[degradation_preference_];
+}
+
+// Class holding adaptation information.
+VideoStreamEncoder::AdaptCounter::AdaptCounter() {
+ fps_counters_.resize(kScaleReasonSize);
+ resolution_counters_.resize(kScaleReasonSize);
+ static_assert(kScaleReasonSize == 2, "Update MoveCount.");
+}
+
+VideoStreamEncoder::AdaptCounter::~AdaptCounter() {}
+
+std::string VideoStreamEncoder::AdaptCounter::ToString() const {
+ std::stringstream ss;
+ ss << "Downgrade counts: fps: {" << ToString(fps_counters_);
+ ss << "}, resolution: {" << ToString(resolution_counters_) << "}";
+ return ss.str();
+}
+
+VideoStreamEncoder::AdaptCounts VideoStreamEncoder::AdaptCounter::Counts(
+ int reason) const {
+ AdaptCounts counts;
+ counts.fps = fps_counters_[reason];
+ counts.resolution = resolution_counters_[reason];
+ return counts;
+}
+
+void VideoStreamEncoder::AdaptCounter::IncrementFramerate(int reason) {
+ ++(fps_counters_[reason]);
+}
+
+void VideoStreamEncoder::AdaptCounter::IncrementResolution(int reason) {
+ ++(resolution_counters_[reason]);
+}
+
+void VideoStreamEncoder::AdaptCounter::DecrementFramerate(int reason) {
+ if (fps_counters_[reason] == 0) {
+ // Balanced mode: Adapt up is in a different order, switch reason.
+ // E.g. framerate adapt down: quality (2), framerate adapt up: cpu (3).
+ // 1. Down resolution (cpu): res={quality:0,cpu:1}, fps={quality:0,cpu:0}
+ // 2. Down fps (quality): res={quality:0,cpu:1}, fps={quality:1,cpu:0}
+ // 3. Up fps (cpu): res={quality:1,cpu:0}, fps={quality:0,cpu:0}
+ // 4. Up resolution (quality): res={quality:0,cpu:0}, fps={quality:0,cpu:0}
+ RTC_DCHECK_GT(TotalCount(reason), 0) << "No downgrade for reason.";
+ RTC_DCHECK_GT(FramerateCount(), 0) << "Framerate not downgraded.";
+ MoveCount(&resolution_counters_, reason);
+ MoveCount(&fps_counters_, (reason + 1) % kScaleReasonSize);
+ }
+ --(fps_counters_[reason]);
+ RTC_DCHECK_GE(fps_counters_[reason], 0);
+}
+
+void VideoStreamEncoder::AdaptCounter::DecrementResolution(int reason) {
+ if (resolution_counters_[reason] == 0) {
+ // Balanced mode: Adapt up is in a different order, switch reason.
+ RTC_DCHECK_GT(TotalCount(reason), 0) << "No downgrade for reason.";
+ RTC_DCHECK_GT(ResolutionCount(), 0) << "Resolution not downgraded.";
+ MoveCount(&fps_counters_, reason);
+ MoveCount(&resolution_counters_, (reason + 1) % kScaleReasonSize);
+ }
+ --(resolution_counters_[reason]);
+ RTC_DCHECK_GE(resolution_counters_[reason], 0);
+}
+
+void VideoStreamEncoder::AdaptCounter::DecrementFramerate(int reason,
+ int cur_fps) {
+ DecrementFramerate(reason);
+ // Reset if at max fps (i.e. in case of fewer steps up than down).
+ if (cur_fps == std::numeric_limits<int>::max())
+ std::fill(fps_counters_.begin(), fps_counters_.end(), 0);
+}
+
+int VideoStreamEncoder::AdaptCounter::FramerateCount() const {
+ return Count(fps_counters_);
+}
+
+int VideoStreamEncoder::AdaptCounter::ResolutionCount() const {
+ return Count(resolution_counters_);
+}
+
+int VideoStreamEncoder::AdaptCounter::FramerateCount(int reason) const {
+ return fps_counters_[reason];
+}
+
+int VideoStreamEncoder::AdaptCounter::ResolutionCount(int reason) const {
+ return resolution_counters_[reason];
+}
+
+int VideoStreamEncoder::AdaptCounter::TotalCount(int reason) const {
+ return FramerateCount(reason) + ResolutionCount(reason);
+}
+
+int VideoStreamEncoder::AdaptCounter::Count(
+ const std::vector<int>& counters) const {
+ return std::accumulate(counters.begin(), counters.end(), 0);
+}
+
+void VideoStreamEncoder::AdaptCounter::MoveCount(std::vector<int>* counters,
+ int from_reason) {
+ int to_reason = (from_reason + 1) % kScaleReasonSize;
+ ++((*counters)[to_reason]);
+ --((*counters)[from_reason]);
+}
+
+std::string VideoStreamEncoder::AdaptCounter::ToString(
+ const std::vector<int>& counters) const {
+ std::stringstream ss;
+ for (size_t reason = 0; reason < kScaleReasonSize; ++reason) {
+ ss << (reason ? " cpu" : "quality") << ":" << counters[reason];
+ }
+ return ss.str();
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/webrtc/video/video_stream_encoder.h b/third_party/libwebrtc/webrtc/video/video_stream_encoder.h
new file mode 100644
index 0000000000..421b733961
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_stream_encoder.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_STREAM_ENCODER_H_
+#define VIDEO_VIDEO_STREAM_ENCODER_H_
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/video/video_rotation.h"
+#include "api/video_codecs/video_encoder.h"
+#include "call/call.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "common_video/include/video_bitrate_allocator.h"
+#include "media/base/videosinkinterface.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "modules/video_coding/video_coding_impl.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/sequenced_task_checker.h"
+#include "rtc_base/task_queue.h"
+#include "typedefs.h" // NOLINT(build/include)
+#include "video/overuse_frame_detector.h"
+#include "call/video_send_stream.h"
+
+namespace webrtc {
+
+class SendStatisticsProxy;
+class VideoBitrateAllocationObserver;
+
+// VideoStreamEncoder represent a video encoder that accepts raw video frames as
+// input and produces an encoded bit stream.
+// Usage:
+// Instantiate.
+// Call SetSink.
+// Call SetSource.
+// Call ConfigureEncoder with the codec settings.
+// Call Stop() when done.
+class VideoStreamEncoder : public rtc::VideoSinkInterface<VideoFrame>,
+ public EncodedImageCallback,
+ public AdaptationObserverInterface {
+ public:
+ // Interface for receiving encoded video frames and notifications about
+ // configuration changes.
+ class EncoderSink : public EncodedImageCallback {
+ public:
+ virtual void OnEncoderConfigurationChanged(
+ std::vector<VideoStream> streams,
+ int min_transmit_bitrate_bps) = 0;
+ };
+
+ // Number of resolution and framerate reductions (-1: disabled).
+ struct AdaptCounts {
+ int resolution = 0;
+ int fps = 0;
+ };
+
+ // Downscale resolution at most 2 times for CPU reasons.
+ static const int kMaxCpuResolutionDowngrades = 2;
+ // Downscale framerate at most 4 times.
+ static const int kMaxCpuFramerateDowngrades = 4;
+
+ VideoStreamEncoder(uint32_t number_of_cores,
+ SendStatisticsProxy* stats_proxy,
+ const VideoSendStream::Config::EncoderSettings& settings,
+ rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
+ EncodedFrameObserver* encoder_timing,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector);
+ ~VideoStreamEncoder();
+
+ // Sets the source that will provide I420 video frames.
+ // |degradation_preference| control whether or not resolution or frame rate
+ // may be reduced.
+ void SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* source,
+ const VideoSendStream::DegradationPreference& degradation_preference);
+
+ // Sets the |sink| that gets the encoded frames. |rotation_applied| means
+ // that the source must support rotation. Only set |rotation_applied| if the
+ // remote side does not support the rotation extension.
+ void SetSink(EncoderSink* sink, bool rotation_applied);
+
+ // TODO(perkj): Can we remove VideoCodec.startBitrate ?
+ void SetStartBitrate(int start_bitrate_bps);
+
+ void SetBitrateObserver(VideoBitrateAllocationObserver* bitrate_observer);
+
+ void ConfigureEncoder(VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ bool nack_enabled);
+
+ // Permanently stop encoding. After this method has returned, it is
+ // guaranteed that no encoded frames will be delivered to the sink.
+ void Stop();
+
+ void SendKeyFrame();
+
+ // virtual to test EncoderStateFeedback with mocks.
+ virtual void OnReceivedIntraFrameRequest(size_t stream_index);
+
+ void OnBitrateUpdated(uint32_t bitrate_bps,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms);
+
+ protected:
+ // Used for testing. For example the |ScalingObserverInterface| methods must
+ // be called on |encoder_queue_|.
+ rtc::TaskQueue* encoder_queue() { return &encoder_queue_; }
+
+ // AdaptationObserverInterface implementation.
+ // These methods are protected for easier testing.
+ void AdaptUp(AdaptReason reason) override;
+ void AdaptDown(AdaptReason reason) override;
+ static CpuOveruseOptions GetCpuOveruseOptions(bool full_overuse_time);
+
+ private:
+ class ConfigureEncoderTask;
+ class EncodeTask;
+ class VideoSourceProxy;
+
+ class VideoFrameInfo {
+ public:
+ VideoFrameInfo(int width,
+ int height,
+ bool is_texture)
+ : width(width),
+ height(height),
+ is_texture(is_texture) {}
+ int width;
+ int height;
+ bool is_texture;
+ int pixel_count() const { return width * height; }
+ };
+
+ void ConfigureEncoderOnTaskQueue(VideoEncoderConfig config,
+ size_t max_data_payload_length,
+ bool nack_enabled);
+ void ReconfigureEncoder();
+
+ void ConfigureQualityScaler();
+
+ // Implements VideoSinkInterface.
+ void OnFrame(const VideoFrame& video_frame) override;
+ void OnDiscardedFrame() override;
+
+ void EncodeVideoFrame(const VideoFrame& frame,
+ int64_t time_when_posted_in_ms);
+
+ // Implements EncodedImageCallback.
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ void OnDroppedFrame(EncodedImageCallback::DropReason reason) override;
+
+ bool EncoderPaused() const;
+ void TraceFrameDropStart();
+ void TraceFrameDropEnd();
+
+ // Class holding adaptation information.
+ class AdaptCounter final {
+ public:
+ AdaptCounter();
+ ~AdaptCounter();
+
+ // Get number of adaptation downscales for |reason|.
+ AdaptCounts Counts(int reason) const;
+
+ std::string ToString() const;
+
+ void IncrementFramerate(int reason);
+ void IncrementResolution(int reason);
+ void DecrementFramerate(int reason);
+ void DecrementResolution(int reason);
+ void DecrementFramerate(int reason, int cur_fps);
+
+ // Gets the total number of downgrades (for all adapt reasons).
+ int FramerateCount() const;
+ int ResolutionCount() const;
+
+ // Gets the total number of downgrades for |reason|.
+ int FramerateCount(int reason) const;
+ int ResolutionCount(int reason) const;
+ int TotalCount(int reason) const;
+
+ private:
+ std::string ToString(const std::vector<int>& counters) const;
+ int Count(const std::vector<int>& counters) const;
+ void MoveCount(std::vector<int>* counters, int from_reason);
+
+ // Degradation counters holding number of framerate/resolution reductions
+ // per adapt reason.
+ std::vector<int> fps_counters_;
+ std::vector<int> resolution_counters_;
+ };
+
+ AdaptCounter& GetAdaptCounter() RTC_RUN_ON(&encoder_queue_);
+ const AdaptCounter& GetConstAdaptCounter() RTC_RUN_ON(&encoder_queue_);
+ void UpdateAdaptationStats(AdaptReason reason) RTC_RUN_ON(&encoder_queue_);
+ AdaptCounts GetActiveCounts(AdaptReason reason) RTC_RUN_ON(&encoder_queue_);
+
+ rtc::Event shutdown_event_;
+
+ const uint32_t number_of_cores_;
+ // Counts how many frames we've dropped in the initial rampup phase.
+ int initial_rampup_;
+
+ const std::unique_ptr<VideoSourceProxy> source_proxy_;
+ EncoderSink* sink_;
+ const VideoSendStream::Config::EncoderSettings settings_;
+ const VideoCodecType codec_type_;
+
+ vcm::VideoSender video_sender_ RTC_ACCESS_ON(&encoder_queue_);
+ std::unique_ptr<OveruseFrameDetector> overuse_detector_
+ RTC_ACCESS_ON(&encoder_queue_);
+ std::unique_ptr<QualityScaler> quality_scaler_ RTC_ACCESS_ON(&encoder_queue_);
+
+ SendStatisticsProxy* const stats_proxy_;
+ rtc::VideoSinkInterface<VideoFrame>* const pre_encode_callback_;
+ // |thread_checker_| checks that public methods that are related to lifetime
+ // of VideoStreamEncoder are called on the same thread.
+ rtc::ThreadChecker thread_checker_;
+
+ VideoEncoderConfig encoder_config_ RTC_ACCESS_ON(&encoder_queue_);
+ std::unique_ptr<VideoBitrateAllocator> rate_allocator_
+ RTC_ACCESS_ON(&encoder_queue_);
+ // The maximum frame rate of the current codec configuration, as determined
+ // at the last ReconfigureEncoder() call.
+ int max_framerate_ RTC_ACCESS_ON(&encoder_queue_);
+
+ // Set when ConfigureEncoder has been called in order to lazy reconfigure the
+ // encoder on the next frame.
+ bool pending_encoder_reconfiguration_ RTC_ACCESS_ON(&encoder_queue_);
+ rtc::Optional<VideoFrameInfo> last_frame_info_ RTC_ACCESS_ON(&encoder_queue_);
+ int crop_width_ RTC_ACCESS_ON(&encoder_queue_);
+ int crop_height_ RTC_ACCESS_ON(&encoder_queue_);
+ uint32_t encoder_start_bitrate_bps_ RTC_ACCESS_ON(&encoder_queue_);
+ size_t max_data_payload_length_ RTC_ACCESS_ON(&encoder_queue_);
+ bool nack_enabled_ RTC_ACCESS_ON(&encoder_queue_);
+ uint32_t last_observed_bitrate_bps_ RTC_ACCESS_ON(&encoder_queue_);
+ bool encoder_paused_and_dropped_frame_ RTC_ACCESS_ON(&encoder_queue_);
+ Clock* const clock_;
+ // Counters used for deciding if the video resolution or framerate is
+ // currently restricted, and if so, why, on a per degradation preference
+ // basis.
+ // TODO(sprang): Replace this with a state holding a relative overuse measure
+ // instead, that can be translated into suitable down-scale or fps limit.
+ std::map<const VideoSendStream::DegradationPreference, AdaptCounter>
+ adapt_counters_ RTC_ACCESS_ON(&encoder_queue_);
+ // Set depending on degradation preferences.
+ VideoSendStream::DegradationPreference degradation_preference_
+ RTC_ACCESS_ON(&encoder_queue_);
+
+ struct AdaptationRequest {
+ // The pixel count produced by the source at the time of the adaptation.
+ int input_pixel_count_;
+ // Framerate received from the source at the time of the adaptation.
+ int framerate_fps_;
+ // Indicates if request was to adapt up or down.
+ enum class Mode { kAdaptUp, kAdaptDown } mode_;
+ };
+ // Stores a snapshot of the last adaptation request triggered by an AdaptUp
+ // or AdaptDown signal.
+ rtc::Optional<AdaptationRequest> last_adaptation_request_
+ RTC_ACCESS_ON(&encoder_queue_);
+
+ rtc::RaceChecker incoming_frame_race_checker_
+ RTC_GUARDED_BY(incoming_frame_race_checker_);
+ std::atomic<int> posted_frames_waiting_for_encode_;
+ // Used to make sure incoming time stamp is increasing for every frame.
+ int64_t last_captured_timestamp_ RTC_GUARDED_BY(incoming_frame_race_checker_);
+ // Delta used for translating between NTP and internal timestamps.
+ const int64_t delta_ntp_internal_ms_
+ RTC_GUARDED_BY(incoming_frame_race_checker_);
+
+ int64_t last_frame_log_ms_ RTC_GUARDED_BY(incoming_frame_race_checker_);
+ int captured_frame_count_ RTC_ACCESS_ON(&encoder_queue_);
+ int dropped_frame_count_ RTC_ACCESS_ON(&encoder_queue_);
+
+ VideoBitrateAllocationObserver* bitrate_observer_
+ RTC_ACCESS_ON(&encoder_queue_);
+ rtc::Optional<int64_t> last_parameters_update_ms_
+ RTC_ACCESS_ON(&encoder_queue_);
+
+ // All public methods are proxied to |encoder_queue_|. It must must be
+ // destroyed first to make sure no tasks are run that use other members.
+ rtc::TaskQueue encoder_queue_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(VideoStreamEncoder);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_STREAM_ENCODER_H_
diff --git a/third_party/libwebrtc/webrtc/video/video_stream_encoder_unittest.cc b/third_party/libwebrtc/webrtc/video/video_stream_encoder_unittest.cc
new file mode 100644
index 0000000000..5afbe27d2b
--- /dev/null
+++ b/third_party/libwebrtc/webrtc/video/video_stream_encoder_unittest.cc
@@ -0,0 +1,3280 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "api/video/i420_buffer.h"
+#include "media/base/videoadapter.h"
+#include "modules/video_coding/codecs/vp8/temporal_layers.h"
+#include "modules/video_coding/utility/default_video_bitrate_allocator.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics_default.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/encoder_settings.h"
+#include "test/fake_encoder.h"
+#include "test/frame_generator.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/send_statistics_proxy.h"
+#include "video/video_stream_encoder.h"
+
+namespace {
+const int kMinPixelsPerFrame = 320 * 180;
+const int kMinFramerateFps = 2;
+const int64_t kFrameTimeoutMs = 100;
+const unsigned char kNumSlDummy = 0;
+} // namespace
+
+namespace webrtc {
+
+using DegredationPreference = VideoSendStream::DegradationPreference;
+using ScaleReason = AdaptationObserverInterface::AdaptReason;
+using ::testing::_;
+using ::testing::Return;
+
+namespace {
+const size_t kMaxPayloadLength = 1440;
+const int kTargetBitrateBps = 1000000;
+const int kLowTargetBitrateBps = kTargetBitrateBps / 10;
+const int kMaxInitialFramedrop = 4;
+const int kDefaultFramerate = 30;
+
+class TestBuffer : public webrtc::I420Buffer {
+ public:
+ TestBuffer(rtc::Event* event, int width, int height)
+ : I420Buffer(width, height), event_(event) {}
+
+ private:
+ friend class rtc::RefCountedObject<TestBuffer>;
+ ~TestBuffer() override {
+ if (event_)
+ event_->Set();
+ }
+ rtc::Event* const event_;
+};
+
+class CpuOveruseDetectorProxy : public OveruseFrameDetector {
+ public:
+ CpuOveruseDetectorProxy(const CpuOveruseOptions& options,
+ AdaptationObserverInterface* overuse_observer,
+ EncodedFrameObserver* encoder_timing_,
+ CpuOveruseMetricsObserver* metrics_observer)
+ : OveruseFrameDetector(options,
+ overuse_observer,
+ encoder_timing_,
+ metrics_observer),
+ last_target_framerate_fps_(-1) {}
+ virtual ~CpuOveruseDetectorProxy() {}
+
+ void OnTargetFramerateUpdated(int framerate_fps) override {
+ rtc::CritScope cs(&lock_);
+ last_target_framerate_fps_ = framerate_fps;
+ OveruseFrameDetector::OnTargetFramerateUpdated(framerate_fps);
+ }
+
+ int GetLastTargetFramerate() {
+ rtc::CritScope cs(&lock_);
+ return last_target_framerate_fps_;
+ }
+
+ private:
+ rtc::CriticalSection lock_;
+ int last_target_framerate_fps_ RTC_GUARDED_BY(lock_);
+};
+
+class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
+ public:
+ VideoStreamEncoderUnderTest(SendStatisticsProxy* stats_proxy,
+ const VideoSendStream::Config::EncoderSettings& settings)
+ : VideoStreamEncoder(
+ 1 /* number_of_cores */,
+ stats_proxy,
+ settings,
+ nullptr /* pre_encode_callback */,
+ nullptr /* encoder_timing */,
+ std::unique_ptr<OveruseFrameDetector>(
+ overuse_detector_proxy_ = new CpuOveruseDetectorProxy(
+ GetCpuOveruseOptions(settings.full_overuse_time),
+ this,
+ nullptr,
+ stats_proxy))) {}
+
+ void PostTaskAndWait(bool down, AdaptReason reason) {
+ rtc::Event event(false, false);
+ encoder_queue()->PostTask([this, &event, reason, down] {
+ down ? AdaptDown(reason) : AdaptUp(reason);
+ event.Set();
+ });
+ ASSERT_TRUE(event.Wait(5000));
+ }
+
+ // This is used as a synchronisation mechanism, to make sure that the
+ // encoder queue is not blocked before we start sending it frames.
+ void WaitUntilTaskQueueIsIdle() {
+ rtc::Event event(false, false);
+ encoder_queue()->PostTask([&event] {
+ event.Set();
+ });
+ ASSERT_TRUE(event.Wait(5000));
+ }
+
+ void TriggerCpuOveruse() { PostTaskAndWait(true, AdaptReason::kCpu); }
+
+ void TriggerCpuNormalUsage() { PostTaskAndWait(false, AdaptReason::kCpu); }
+
+ void TriggerQualityLow() { PostTaskAndWait(true, AdaptReason::kQuality); }
+
+ void TriggerQualityHigh() { PostTaskAndWait(false, AdaptReason::kQuality); }
+
+ CpuOveruseDetectorProxy* overuse_detector_proxy_;
+};
+
+class VideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ explicit VideoStreamFactory(size_t num_temporal_layers, int framerate)
+ : num_temporal_layers_(num_temporal_layers), framerate_(framerate) {
+ EXPECT_GT(num_temporal_layers, 0u);
+ EXPECT_GT(framerate, 0);
+ }
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width, height, encoder_config);
+ for (VideoStream& stream : streams) {
+ stream.temporal_layer_thresholds_bps.resize(num_temporal_layers_ - 1);
+ stream.max_framerate = framerate_;
+ }
+ return streams;
+ }
+
+ const size_t num_temporal_layers_;
+ const int framerate_;
+};
+
+
+class AdaptingFrameForwarder : public test::FrameForwarder {
+ public:
+ AdaptingFrameForwarder() : adaptation_enabled_(false) {}
+ ~AdaptingFrameForwarder() override {}
+
+ void set_adaptation_enabled(bool enabled) {
+ rtc::CritScope cs(&crit_);
+ adaptation_enabled_ = enabled;
+ }
+
+ bool adaption_enabled() const {
+ rtc::CritScope cs(&crit_);
+ return adaptation_enabled_;
+ }
+
+ rtc::VideoSinkWants last_wants() const {
+ rtc::CritScope cs(&crit_);
+ return last_wants_;
+ }
+
+ void IncomingCapturedFrame(const VideoFrame& video_frame) override {
+ int cropped_width = 0;
+ int cropped_height = 0;
+ int out_width = 0;
+ int out_height = 0;
+ if (adaption_enabled()) {
+ if (adapter_.AdaptFrameResolution(
+ video_frame.width(), video_frame.height(),
+ video_frame.timestamp_us() * 1000, &cropped_width,
+ &cropped_height, &out_width, &out_height)) {
+ VideoFrame adapted_frame(new rtc::RefCountedObject<TestBuffer>(
+ nullptr, out_width, out_height),
+ 99, 99, kVideoRotation_0);
+ adapted_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
+ test::FrameForwarder::IncomingCapturedFrame(adapted_frame);
+ }
+ } else {
+ test::FrameForwarder::IncomingCapturedFrame(video_frame);
+ }
+ }
+
+ void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) override {
+ rtc::CritScope cs(&crit_);
+ last_wants_ = sink_wants();
+ adapter_.OnResolutionFramerateRequest(wants.target_pixel_count,
+ wants.max_pixel_count,
+ wants.max_framerate_fps);
+ test::FrameForwarder::AddOrUpdateSink(sink, wants);
+ }
+ cricket::VideoAdapter adapter_;
+ bool adaptation_enabled_ RTC_GUARDED_BY(crit_);
+ rtc::VideoSinkWants last_wants_ RTC_GUARDED_BY(crit_);
+};
+
+class MockableSendStatisticsProxy : public SendStatisticsProxy {
+ public:
+ MockableSendStatisticsProxy(Clock* clock,
+ const VideoSendStream::Config& config,
+ VideoEncoderConfig::ContentType content_type)
+ : SendStatisticsProxy(clock, config, content_type) {}
+
+ VideoSendStream::Stats GetStats() override {
+ rtc::CritScope cs(&lock_);
+ if (mock_stats_)
+ return *mock_stats_;
+ return SendStatisticsProxy::GetStats();
+ }
+
+ void SetMockStats(const VideoSendStream::Stats& stats) {
+ rtc::CritScope cs(&lock_);
+ mock_stats_.emplace(stats);
+ }
+
+ void ResetMockStats() {
+ rtc::CritScope cs(&lock_);
+ mock_stats_.reset();
+ }
+
+ private:
+ rtc::CriticalSection lock_;
+ rtc::Optional<VideoSendStream::Stats> mock_stats_ RTC_GUARDED_BY(lock_);
+};
+
+class MockBitrateObserver : public VideoBitrateAllocationObserver {
+ public:
+ MOCK_METHOD1(OnBitrateAllocationUpdated, void(const BitrateAllocation&));
+};
+
+} // namespace
+
+class VideoStreamEncoderTest : public ::testing::Test {
+ public:
+ static const int kDefaultTimeoutMs = 30 * 1000;
+
+ VideoStreamEncoderTest()
+ : video_send_config_(VideoSendStream::Config(nullptr)),
+ codec_width_(320),
+ codec_height_(240),
+ max_framerate_(30),
+ fake_encoder_(),
+ stats_proxy_(new MockableSendStatisticsProxy(
+ Clock::GetRealTimeClock(),
+ video_send_config_,
+ webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo)),
+ sink_(&fake_encoder_) {}
+
+ void SetUp() override {
+ metrics::Reset();
+ video_send_config_ = VideoSendStream::Config(nullptr);
+ video_send_config_.encoder_settings.encoder = &fake_encoder_;
+ video_send_config_.encoder_settings.payload_name = "FAKE";
+ video_send_config_.encoder_settings.payload_type = 125;
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(1, &video_encoder_config);
+ video_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(1, max_framerate_);
+ video_encoder_config_ = video_encoder_config.Copy();
+
+ // Framerate limit is specified by the VideoStreamFactory.
+ std::vector<VideoStream> streams =
+ video_encoder_config.video_stream_factory->CreateEncoderStreams(
+ codec_width_, codec_height_, video_encoder_config);
+ max_framerate_ = streams[0].max_framerate;
+ fake_clock_.SetTimeMicros(1234);
+
+ ConfigureEncoder(std::move(video_encoder_config), true /* nack_enabled */);
+ }
+
+ void ConfigureEncoder(VideoEncoderConfig video_encoder_config,
+ bool nack_enabled) {
+ if (video_stream_encoder_)
+ video_stream_encoder_->Stop();
+ video_stream_encoder_.reset(new VideoStreamEncoderUnderTest(
+ stats_proxy_.get(), video_send_config_.encoder_settings));
+ video_stream_encoder_->SetSink(&sink_, false /* rotation_applied */);
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+ video_stream_encoder_->SetStartBitrate(kTargetBitrateBps);
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength, nack_enabled);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ }
+
+ void ResetEncoder(const std::string& payload_name,
+ size_t num_streams,
+ size_t num_temporal_layers,
+ unsigned char num_spatial_layers,
+ bool nack_enabled,
+ bool screenshare) {
+ video_send_config_.encoder_settings.payload_name = payload_name;
+
+ VideoEncoderConfig video_encoder_config;
+ video_encoder_config.number_of_streams = num_streams;
+ video_encoder_config.max_bitrate_bps = kTargetBitrateBps;
+ video_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(num_temporal_layers,
+ kDefaultFramerate);
+ video_encoder_config.content_type =
+ screenshare ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo;
+ if (payload_name == "VP9") {
+ VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
+ vp9_settings.numberOfSpatialLayers = num_spatial_layers;
+ video_encoder_config.encoder_specific_settings =
+ new rtc::RefCountedObject<
+ VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
+ }
+ ConfigureEncoder(std::move(video_encoder_config), nack_enabled);
+ }
+
+ VideoFrame CreateFrame(int64_t ntp_time_ms,
+ rtc::Event* destruction_event) const {
+ VideoFrame frame(new rtc::RefCountedObject<TestBuffer>(
+ destruction_event, codec_width_, codec_height_),
+ 99, 99, kVideoRotation_0);
+ frame.set_ntp_time_ms(ntp_time_ms);
+ return frame;
+ }
+
+ VideoFrame CreateFrame(int64_t ntp_time_ms, int width, int height) const {
+ VideoFrame frame(
+ new rtc::RefCountedObject<TestBuffer>(nullptr, width, height), 99, 99,
+ kVideoRotation_0);
+ frame.set_ntp_time_ms(ntp_time_ms);
+ frame.set_timestamp_us(ntp_time_ms * 1000);
+ return frame;
+ }
+
+ void VerifyNoLimitation(const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_framerate_fps);
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_pixel_count);
+ EXPECT_FALSE(wants.target_pixel_count);
+ }
+
+ void VerifyFpsEqResolutionEq(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps);
+ EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
+ }
+
+ void VerifyFpsMaxResolutionLt(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants1.max_framerate_fps);
+ EXPECT_LT(wants1.max_pixel_count, wants2.max_pixel_count);
+ EXPECT_GT(wants1.max_pixel_count, 0);
+ }
+
+ void VerifyFpsMaxResolutionGt(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants1.max_framerate_fps);
+ EXPECT_GT(wants1.max_pixel_count, wants2.max_pixel_count);
+ }
+
+ void VerifyFpsMaxResolutionEq(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants1.max_framerate_fps);
+ EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
+ }
+
+ void VerifyFpsLtResolutionEq(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_LT(wants1.max_framerate_fps, wants2.max_framerate_fps);
+ EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
+ }
+
+ void VerifyFpsGtResolutionEq(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_GT(wants1.max_framerate_fps, wants2.max_framerate_fps);
+ EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
+ }
+
+ void VerifyFpsEqResolutionLt(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps);
+ EXPECT_LT(wants1.max_pixel_count, wants2.max_pixel_count);
+ EXPECT_GT(wants1.max_pixel_count, 0);
+ }
+
+ void VerifyFpsEqResolutionGt(const rtc::VideoSinkWants& wants1,
+ const rtc::VideoSinkWants& wants2) {
+ EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps);
+ EXPECT_GT(wants1.max_pixel_count, wants2.max_pixel_count);
+ }
+
+ void VerifyFpsMaxResolutionLt(const rtc::VideoSinkWants& wants,
+ int pixel_count) {
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_framerate_fps);
+ EXPECT_LT(wants.max_pixel_count, pixel_count);
+ EXPECT_GT(wants.max_pixel_count, 0);
+ }
+
+ void VerifyFpsLtResolutionMax(const rtc::VideoSinkWants& wants, int fps) {
+ EXPECT_LT(wants.max_framerate_fps, fps);
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_pixel_count);
+ EXPECT_FALSE(wants.target_pixel_count);
+ }
+
+ void VerifyFpsEqResolutionMax(const rtc::VideoSinkWants& wants,
+ int expected_fps) {
+ EXPECT_EQ(expected_fps, wants.max_framerate_fps);
+ EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_pixel_count);
+ EXPECT_FALSE(wants.target_pixel_count);
+ }
+
+ void WaitForEncodedFrame(int64_t expected_ntp_time) {
+ sink_.WaitForEncodedFrame(expected_ntp_time);
+ fake_clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerSec / max_framerate_);
+ }
+
+ bool TimedWaitForEncodedFrame(int64_t expected_ntp_time, int64_t timeout_ms) {
+ bool ok = sink_.TimedWaitForEncodedFrame(expected_ntp_time, timeout_ms);
+ fake_clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerSec / max_framerate_);
+ return ok;
+ }
+
+ void WaitForEncodedFrame(uint32_t expected_width, uint32_t expected_height) {
+ sink_.WaitForEncodedFrame(expected_width, expected_height);
+ fake_clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerSec / max_framerate_);
+ }
+
+ void ExpectDroppedFrame() {
+ sink_.ExpectDroppedFrame();
+ fake_clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerSec / max_framerate_);
+ }
+
+ bool WaitForFrame(int64_t timeout_ms) {
+ bool ok = sink_.WaitForFrame(timeout_ms);
+ fake_clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerSec / max_framerate_);
+ return ok;
+ }
+
+ class TestEncoder : public test::FakeEncoder {
+ public:
+ TestEncoder()
+ : FakeEncoder(Clock::GetRealTimeClock()),
+ continue_encode_event_(false, false) {}
+
+ VideoCodec codec_config() const {
+ rtc::CritScope lock(&crit_sect_);
+ return config_;
+ }
+
+ void BlockNextEncode() {
+ rtc::CritScope lock(&local_crit_sect_);
+ block_next_encode_ = true;
+ }
+
+ VideoEncoder::ScalingSettings GetScalingSettings() const override {
+ rtc::CritScope lock(&local_crit_sect_);
+ if (quality_scaling_)
+ return VideoEncoder::ScalingSettings(true, 1, 2, kMinPixelsPerFrame);
+ return VideoEncoder::ScalingSettings(false);
+ }
+
+ void ContinueEncode() { continue_encode_event_.Set(); }
+
+ void CheckLastTimeStampsMatch(int64_t ntp_time_ms,
+ uint32_t timestamp) const {
+ rtc::CritScope lock(&local_crit_sect_);
+ EXPECT_EQ(timestamp_, timestamp);
+ EXPECT_EQ(ntp_time_ms_, ntp_time_ms);
+ }
+
+ void SetQualityScaling(bool b) {
+ rtc::CritScope lock(&local_crit_sect_);
+ quality_scaling_ = b;
+ }
+
+ void ForceInitEncodeFailure(bool force_failure) {
+ rtc::CritScope lock(&local_crit_sect_);
+ force_init_encode_failed_ = force_failure;
+ }
+
+ private:
+ int32_t Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) override {
+ bool block_encode;
+ {
+ rtc::CritScope lock(&local_crit_sect_);
+ EXPECT_GT(input_image.timestamp(), timestamp_);
+ EXPECT_GT(input_image.ntp_time_ms(), ntp_time_ms_);
+ EXPECT_EQ(input_image.timestamp(), input_image.ntp_time_ms() * 90);
+
+ timestamp_ = input_image.timestamp();
+ ntp_time_ms_ = input_image.ntp_time_ms();
+ last_input_width_ = input_image.width();
+ last_input_height_ = input_image.height();
+ block_encode = block_next_encode_;
+ block_next_encode_ = false;
+ }
+ int32_t result =
+ FakeEncoder::Encode(input_image, codec_specific_info, frame_types);
+ if (block_encode)
+ EXPECT_TRUE(continue_encode_event_.Wait(kDefaultTimeoutMs));
+ return result;
+ }
+
+ int32_t InitEncode(const VideoCodec* config,
+ int32_t number_of_cores,
+ size_t max_payload_size) override {
+ int res =
+ FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
+ rtc::CritScope lock(&local_crit_sect_);
+ if (config->codecType == kVideoCodecVP8 && config->VP8().tl_factory) {
+ // Simulate setting up temporal layers, in order to validate the life
+ // cycle of these objects.
+ int num_streams = std::max<int>(1, config->numberOfSimulcastStreams);
+ int num_temporal_layers =
+ std::max<int>(1, config->VP8().numberOfTemporalLayers);
+ for (int i = 0; i < num_streams; ++i) {
+ allocated_temporal_layers_.emplace_back(
+ config->VP8().tl_factory->Create(i, num_temporal_layers, 42));
+ }
+ }
+ if (force_init_encode_failed_)
+ return -1;
+ return res;
+ }
+
+ rtc::CriticalSection local_crit_sect_;
+ bool block_next_encode_ RTC_GUARDED_BY(local_crit_sect_) = false;
+ rtc::Event continue_encode_event_;
+ uint32_t timestamp_ RTC_GUARDED_BY(local_crit_sect_) = 0;
+ int64_t ntp_time_ms_ RTC_GUARDED_BY(local_crit_sect_) = 0;
+ int last_input_width_ RTC_GUARDED_BY(local_crit_sect_) = 0;
+ int last_input_height_ RTC_GUARDED_BY(local_crit_sect_) = 0;
+ bool quality_scaling_ RTC_GUARDED_BY(local_crit_sect_) = true;
+ std::vector<std::unique_ptr<TemporalLayers>> allocated_temporal_layers_
+ RTC_GUARDED_BY(local_crit_sect_);
+ bool force_init_encode_failed_ RTC_GUARDED_BY(local_crit_sect_) = false;
+ };
+
+ class TestSink : public VideoStreamEncoder::EncoderSink {
+ public:
+ explicit TestSink(TestEncoder* test_encoder)
+ : test_encoder_(test_encoder), encoded_frame_event_(false, false) {}
+
+ void WaitForEncodedFrame(int64_t expected_ntp_time) {
+ EXPECT_TRUE(
+ TimedWaitForEncodedFrame(expected_ntp_time, kDefaultTimeoutMs));
+ }
+
+ bool TimedWaitForEncodedFrame(int64_t expected_ntp_time,
+ int64_t timeout_ms) {
+ uint32_t timestamp = 0;
+ if (!encoded_frame_event_.Wait(timeout_ms))
+ return false;
+ {
+ rtc::CritScope lock(&crit_);
+ timestamp = last_timestamp_;
+ }
+ test_encoder_->CheckLastTimeStampsMatch(expected_ntp_time, timestamp);
+ return true;
+ }
+
+ void WaitForEncodedFrame(uint32_t expected_width,
+ uint32_t expected_height) {
+ EXPECT_TRUE(encoded_frame_event_.Wait(kDefaultTimeoutMs));
+ CheckLastFrameSizeMathces(expected_width, expected_height);
+ }
+
+ void CheckLastFrameSizeMathces(uint32_t expected_width,
+ uint32_t expected_height) {
+ uint32_t width = 0;
+ uint32_t height = 0;
+ {
+ rtc::CritScope lock(&crit_);
+ width = last_width_;
+ height = last_height_;
+ }
+ EXPECT_EQ(expected_height, height);
+ EXPECT_EQ(expected_width, width);
+ }
+
+ void ExpectDroppedFrame() { EXPECT_FALSE(encoded_frame_event_.Wait(100)); }
+
+ bool WaitForFrame(int64_t timeout_ms) {
+ return encoded_frame_event_.Wait(timeout_ms);
+ }
+
+ void SetExpectNoFrames() {
+ rtc::CritScope lock(&crit_);
+ expect_frames_ = false;
+ }
+
+ int number_of_reconfigurations() const {
+ rtc::CritScope lock(&crit_);
+ return number_of_reconfigurations_;
+ }
+
+ int last_min_transmit_bitrate() const {
+ rtc::CritScope lock(&crit_);
+ return min_transmit_bitrate_bps_;
+ }
+
+ private:
+ Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override {
+ rtc::CritScope lock(&crit_);
+ EXPECT_TRUE(expect_frames_);
+ last_timestamp_ = encoded_image._timeStamp;
+ last_width_ = encoded_image._encodedWidth;
+ last_height_ = encoded_image._encodedHeight;
+ encoded_frame_event_.Set();
+ return Result(Result::OK, last_timestamp_);
+ }
+
+ void OnEncoderConfigurationChanged(std::vector<VideoStream> streams,
+ int min_transmit_bitrate_bps) override {
+ rtc::CriticalSection crit_;
+ ++number_of_reconfigurations_;
+ min_transmit_bitrate_bps_ = min_transmit_bitrate_bps;
+ }
+
+ rtc::CriticalSection crit_;
+ TestEncoder* test_encoder_;
+ rtc::Event encoded_frame_event_;
+ uint32_t last_timestamp_ = 0;
+ uint32_t last_height_ = 0;
+ uint32_t last_width_ = 0;
+ bool expect_frames_ = true;
+ int number_of_reconfigurations_ = 0;
+ int min_transmit_bitrate_bps_ = 0;
+ };
+
+ VideoSendStream::Config video_send_config_;
+ VideoEncoderConfig video_encoder_config_;
+ int codec_width_;
+ int codec_height_;
+ int max_framerate_;
+ TestEncoder fake_encoder_;
+ std::unique_ptr<MockableSendStatisticsProxy> stats_proxy_;
+ TestSink sink_;
+ AdaptingFrameForwarder video_source_;
+ std::unique_ptr<VideoStreamEncoderUnderTest> video_stream_encoder_;
+ rtc::ScopedFakeClock fake_clock_;
+};
+
+TEST_F(VideoStreamEncoderTest, EncodeOneFrame) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ rtc::Event frame_destroyed_event(false, false);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, &frame_destroyed_event));
+ WaitForEncodedFrame(1);
+ EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs));
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesBeforeFirstOnBitrateUpdated) {
+ // Dropped since no target bitrate has been set.
+ rtc::Event frame_destroyed_event(false, false);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, &frame_destroyed_event));
+ EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs));
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesWhenRateSetToZero) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->OnBitrateUpdated(0, 0, 0);
+ // Dropped since bitrate is zero.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+ WaitForEncodedFrame(3);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesWithSameOrOldNtpTimestamp) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ // This frame will be dropped since it has the same ntp timestamp.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFrameAfterStop) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->Stop();
+ sink_.SetExpectNoFrames();
+ rtc::Event frame_destroyed_event(false, false);
+ video_source_.IncomingCapturedFrame(CreateFrame(2, &frame_destroyed_event));
+ EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs));
+}
+
+TEST_F(VideoStreamEncoderTest, DropsPendingFramesOnSlowEncode) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ fake_encoder_.BlockNextEncode();
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // Here, the encoder thread will be blocked in the TestEncoder waiting for a
+ // call to ContinueEncode.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+ fake_encoder_.ContinueEncode();
+ WaitForEncodedFrame(3);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ConfigureEncoderTriggersOnEncoderConfigurationChanged) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ EXPECT_EQ(0, sink_.number_of_reconfigurations());
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder will have been configured once when the first frame is
+ // received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(1, &video_encoder_config);
+ video_encoder_config.min_transmit_bitrate_bps = 9999;
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength,
+ true /* nack_enabled */);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(2, sink_.number_of_reconfigurations());
+ EXPECT_EQ(9999, sink_.last_min_transmit_bitrate());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, FrameResolutionChangeReconfigureEncoder) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder will have been configured once.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(codec_width_, fake_encoder_.codec_config().width);
+ EXPECT_EQ(codec_height_, fake_encoder_.codec_config().height);
+
+ codec_width_ *= 2;
+ codec_height_ *= 2;
+ // Capture a frame with a higher resolution and wait for it to synchronize
+ // with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(codec_width_, fake_encoder_.codec_config().width);
+ EXPECT_EQ(codec_height_, fake_encoder_.codec_config().height);
+ EXPECT_EQ(2, sink_.number_of_reconfigurations());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp8ResilienceIsOffFor1S1TLWithNackEnabled) {
+ const bool kNackEnabled = true;
+ const size_t kNumStreams = 1;
+ const size_t kNumTl = 1;
+ ResetEncoder("VP8", kNumStreams, kNumTl, kNumSlDummy, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP8, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP8()->numberOfTemporalLayers);
+ // Resilience is off for no temporal layers with nack on.
+ EXPECT_EQ(kResilienceOff, fake_encoder_.codec_config().VP8()->resilience);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp8ResilienceIsOffFor2S1TlWithNackEnabled) {
+ const bool kNackEnabled = true;
+ const size_t kNumStreams = 2;
+ const size_t kNumTl = 1;
+ ResetEncoder("VP8", kNumStreams, kNumTl, kNumSlDummy, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP8, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP8()->numberOfTemporalLayers);
+ // Resilience is off for no temporal layers and >1 streams with nack on.
+ EXPECT_EQ(kResilienceOff, fake_encoder_.codec_config().VP8()->resilience);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp8ResilienceIsOnFor1S1TLWithNackDisabled) {
+ const bool kNackEnabled = false;
+ const size_t kNumStreams = 1;
+ const size_t kNumTl = 1;
+ ResetEncoder("VP8", kNumStreams, kNumTl, kNumSlDummy, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP8, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP8()->numberOfTemporalLayers);
+ // Resilience is on for no temporal layers with nack off.
+ EXPECT_EQ(kResilientStream, fake_encoder_.codec_config().VP8()->resilience);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp8ResilienceIsOnFor1S2TlWithNackEnabled) {
+ const bool kNackEnabled = true;
+ const size_t kNumStreams = 1;
+ const size_t kNumTl = 2;
+ ResetEncoder("VP8", kNumStreams, kNumTl, kNumSlDummy, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP8, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP8()->numberOfTemporalLayers);
+ // Resilience is on for temporal layers.
+ EXPECT_EQ(kResilientStream, fake_encoder_.codec_config().VP8()->resilience);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp9ResilienceIsOffFor1SL1TLWithNackEnabled) {
+ const bool kNackEnabled = true;
+ const size_t kNumStreams = 1;
+ const size_t kNumTl = 1;
+ const unsigned char kNumSl = 1;
+ ResetEncoder("VP9", kNumStreams, kNumTl, kNumSl, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ sink_.WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP9, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP9()->numberOfTemporalLayers);
+ EXPECT_EQ(kNumSl, fake_encoder_.codec_config().VP9()->numberOfSpatialLayers);
+ // Resilience is off for no spatial and temporal layers with nack on.
+ EXPECT_FALSE(fake_encoder_.codec_config().VP9()->resilienceOn);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp9ResilienceIsOnFor1SL1TLWithNackDisabled) {
+ const bool kNackEnabled = false;
+ const size_t kNumStreams = 1;
+ const size_t kNumTl = 1;
+ const unsigned char kNumSl = 1;
+ ResetEncoder("VP9", kNumStreams, kNumTl, kNumSl, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ sink_.WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP9, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP9()->numberOfTemporalLayers);
+ EXPECT_EQ(kNumSl, fake_encoder_.codec_config().VP9()->numberOfSpatialLayers);
+ // Resilience is on if nack is off.
+ EXPECT_TRUE(fake_encoder_.codec_config().VP9()->resilienceOn);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp9ResilienceIsOnFor2SL1TLWithNackEnabled) {
+ const bool kNackEnabled = true;
+ const size_t kNumStreams = 1;
+ const size_t kNumTl = 1;
+ const unsigned char kNumSl = 2;
+ ResetEncoder("VP9", kNumStreams, kNumTl, kNumSl, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ sink_.WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP9, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP9()->numberOfTemporalLayers);
+ EXPECT_EQ(kNumSl, fake_encoder_.codec_config().VP9()->numberOfSpatialLayers);
+ // Resilience is on for spatial layers.
+ EXPECT_TRUE(fake_encoder_.codec_config().VP9()->resilienceOn);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, Vp9ResilienceIsOnFor1SL2TLWithNackEnabled) {
+ const bool kNackEnabled = true;
+ const size_t kNumStreams = 1;
+ const size_t kNumTl = 2;
+ const unsigned char kNumSl = 1;
+ ResetEncoder("VP9", kNumStreams, kNumTl, kNumSl, kNackEnabled, false);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Capture a frame and wait for it to synchronize with the encoder thread.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ sink_.WaitForEncodedFrame(1);
+ // The encoder have been configured once when the first frame is received.
+ EXPECT_EQ(1, sink_.number_of_reconfigurations());
+ EXPECT_EQ(kVideoCodecVP9, fake_encoder_.codec_config().codecType);
+ EXPECT_EQ(kNumStreams, fake_encoder_.codec_config().numberOfSimulcastStreams);
+ EXPECT_EQ(kNumTl, fake_encoder_.codec_config().VP9()->numberOfTemporalLayers);
+ EXPECT_EQ(kNumSl, fake_encoder_.codec_config().VP9()->numberOfSpatialLayers);
+ // Resilience is on for temporal layers.
+ EXPECT_TRUE(fake_encoder_.codec_config().VP9()->resilienceOn);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SwitchSourceDeregisterEncoderAsSink) {
+ EXPECT_TRUE(video_source_.has_sinks());
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+ EXPECT_FALSE(video_source_.has_sinks());
+ EXPECT_TRUE(new_video_source.has_sinks());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SinkWantsRotationApplied) {
+ EXPECT_FALSE(video_source_.sink_wants().rotation_applied);
+ video_stream_encoder_->SetSink(&sink_, true /*rotation_applied*/);
+ EXPECT_TRUE(video_source_.sink_wants().rotation_applied);
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SinkWantsFromOveruseDetector) {
+ const int kMaxDowngrades = VideoStreamEncoder::kMaxCpuResolutionDowngrades;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ VerifyNoLimitation(video_source_.sink_wants());
+
+ int frame_width = 1280;
+ int frame_height = 720;
+
+ // Trigger CPU overuse kMaxCpuDowngrades times. Every time, VideoStreamEncoder
+ // should request lower resolution.
+ for (int i = 1; i <= kMaxDowngrades; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(i, frame_width, frame_height));
+ WaitForEncodedFrame(i);
+
+ video_stream_encoder_->TriggerCpuOveruse();
+
+ EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count,
+ frame_width * frame_height);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(i, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ frame_width /= 2;
+ frame_height /= 2;
+ }
+
+ // Trigger CPU overuse one more time. This should not trigger a request for
+ // lower resolution.
+ rtc::VideoSinkWants current_wants = video_source_.sink_wants();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(kMaxDowngrades + 1, frame_width, frame_height));
+ WaitForEncodedFrame(kMaxDowngrades + 1);
+ video_stream_encoder_->TriggerCpuOveruse();
+ EXPECT_EQ(video_source_.sink_wants().target_pixel_count,
+ current_wants.target_pixel_count);
+ EXPECT_EQ(video_source_.sink_wants().max_pixel_count,
+ current_wants.max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(kMaxDowngrades,
+ stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal use.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ EXPECT_EQ(frame_width * frame_height * 5 / 3,
+ video_source_.sink_wants().target_pixel_count.value_or(0));
+ EXPECT_EQ(frame_width * frame_height * 4,
+ video_source_.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(kMaxDowngrades + 1,
+ stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ TestMaxCpuResolutionDowngrades_BalancedMode_NoFpsLimit) {
+ const int kMaxDowngrades = VideoStreamEncoder::kMaxCpuResolutionDowngrades;
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down kMaxCpuDowngrades times.
+ int t = 1;
+ for (int i = 1; i <= kMaxDowngrades; ++i) {
+ source.IncomingCapturedFrame(CreateFrame(t, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(t++);
+ video_stream_encoder_->TriggerCpuOveruse();
+ VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(i, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ }
+
+ // Trigger adapt down, max cpu downgrades reach, expect no change.
+ rtc::VideoSinkWants last_wants = source.sink_wants();
+ source.IncomingCapturedFrame(CreateFrame(t, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(t++);
+ video_stream_encoder_->TriggerCpuOveruse();
+ VerifyFpsEqResolutionEq(source.sink_wants(), last_wants);
+ EXPECT_EQ(last_wants.max_pixel_count, source.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(kMaxDowngrades,
+ stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up kMaxCpuDowngrades times.
+ for (int i = 1; i <= kMaxDowngrades; ++i) {
+ source.IncomingCapturedFrame(CreateFrame(t, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(t++);
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_GT(source.sink_wants().max_pixel_count, last_wants.max_pixel_count);
+ EXPECT_EQ(kMaxDowngrades + i,
+ stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ }
+
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+
+ video_stream_encoder_->Stop();
+}
+TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ VerifyNoLimitation(video_source_.sink_wants());
+
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kFrameIntervalMs = 1000 / 30;
+
+ int frame_timestamp = 1;
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Trigger CPU overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Default degradation preference is maintain-framerate, so will lower max
+ // wanted resolution.
+ EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count,
+ kFrameWidth * kFrameHeight);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ video_source_.sink_wants().max_framerate_fps);
+
+ // Set new source, switch to maintain-resolution.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ // Initially no degradation registered.
+ VerifyNoLimitation(new_video_source.sink_wants());
+
+ // Force an input frame rate to be available, or the adaptation call won't
+ // know what framerate to adapt form.
+ const int kInputFps = 30;
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kInputFps;
+ stats_proxy_->SetMockStats(stats);
+
+ video_stream_encoder_->TriggerCpuOveruse();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Some framerate constraint should be set.
+ EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+ EXPECT_LT(new_video_source.sink_wants().max_framerate_fps, kInputFps);
+
+ // Turn off degradation completely.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kDegradationDisabled);
+ VerifyNoLimitation(new_video_source.sink_wants());
+
+ video_stream_encoder_->TriggerCpuOveruse();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
+
+ // Still no degradation.
+ VerifyNoLimitation(new_video_source.sink_wants());
+
+ // Calling SetSource with resolution scaling enabled apply the old SinkWants.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+ EXPECT_LT(new_video_source.sink_wants().max_pixel_count,
+ kFrameWidth * kFrameHeight);
+ EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_framerate_fps);
+
+ // Calling SetSource with framerate scaling enabled apply the old SinkWants.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+ EXPECT_LT(new_video_source.sink_wants().max_framerate_fps, kInputFps);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, StatsTracksQualityAdaptationStats) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ // Trigger adapt down.
+ video_stream_encoder_->TriggerQualityLow();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.bw_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+
+ // Trigger adapt up.
+ video_stream_encoder_->TriggerQualityHigh();
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_EQ(2, stats.number_of_quality_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, StatsTracksCpuAdaptationStats) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal use.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsCpuAdaptation) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set adaptation disabled.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kDegradationDisabled);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set adaptation back to enabled.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(5, kWidth, kHeight));
+ WaitForEncodedFrame(5);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal use.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ new_video_source.IncomingCapturedFrame(CreateFrame(6, kWidth, kHeight));
+ WaitForEncodedFrame(6);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsQualityAdaptation) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ // Trigger adapt down.
+ video_stream_encoder_->TriggerQualityLow();
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+
+ // Disable resolution scaling.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ new_video_source.IncomingCapturedFrame(CreateFrame(5, kWidth, kHeight));
+ WaitForEncodedFrame(5);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_quality_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ QualityAdaptationStatsAreResetWhenScalerIsDisabled) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.set_adaptation_enabled(true);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down.
+ video_stream_encoder_->TriggerQualityLow();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger overuse.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Set source with adaptation still enabled but quality scaler is off.
+ fake_encoder_.SetQualityScaling(false);
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ StatsTracksCpuAdaptationStatsWhenSwitchingSource) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ int sequence = 1;
+
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse, should now adapt down.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set cpu adaptation by frame dropping.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ // Not adapted at first.
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Force an input frame rate to be available, or the adaptation call won't
+ // know what framerate to adapt from.
+ VideoSendStream::Stats mock_stats = stats_proxy_->GetStats();
+ mock_stats.input_frame_rate = 30;
+ stats_proxy_->SetMockStats(mock_stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ stats_proxy_->ResetMockStats();
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+
+ // Framerate now adapted.
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Disable CPU adaptation.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kDegradationDisabled);
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Try to trigger overuse. Should not succeed.
+ stats_proxy_->SetMockStats(mock_stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ stats_proxy_->ResetMockStats();
+
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Switch back the source with resolution adaptation enabled.
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kMaintainFramerate);
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal usage.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ video_source_.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(3, stats.number_of_cpu_adapt_changes);
+
+ // Back to the source with adaptation off, set it back to maintain-resolution.
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ // Disabled, since we previously switched the source to disabled.
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_TRUE(stats.cpu_limited_framerate);
+ EXPECT_EQ(3, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU normal usage.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(4, stats.number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats.number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, StatsTracksPreferredBitrate) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_EQ(video_encoder_config_.max_bitrate_bps,
+ stats.preferred_media_bitrate_bps);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ScalingUpAndDownDoesNothingWithMaintainResolution) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Expect no scaling to begin with.
+ VerifyNoLimitation(video_source_.sink_wants());
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+
+ // Trigger scale down.
+ video_stream_encoder_->TriggerQualityLow();
+
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+
+ // Expect a scale down.
+ EXPECT_TRUE(video_source_.sink_wants().max_pixel_count);
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
+
+ // Set resolution scaling disabled.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ // Trigger scale down.
+ video_stream_encoder_->TriggerQualityLow();
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+
+ // Expect no scaling.
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+
+ // Trigger scale up.
+ video_stream_encoder_->TriggerQualityHigh();
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+
+ // Expect nothing to change, still no scaling.
+ EXPECT_EQ(std::numeric_limits<int>::max(),
+ new_video_source.sink_wants().max_pixel_count);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ SkipsSameAdaptDownRequest_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kMaintainFramerate preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerCpuOveruse();
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ const int kLastMaxPixelCount = source.sink_wants().max_pixel_count;
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down for same input resolution, expect no change.
+ video_stream_encoder_->TriggerCpuOveruse();
+ EXPECT_EQ(kLastMaxPixelCount, source.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, SkipsSameOrLargerAdaptDownRequest_BalancedMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(1);
+ VerifyNoLimitation(source.sink_wants());
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ const int kLastMaxPixelCount = source.sink_wants().max_pixel_count;
+
+ // Trigger adapt down for same input resolution, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(2);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_EQ(kLastMaxPixelCount, source.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down for larger input resolution, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(3, kWidth + 1, kHeight + 1));
+ sink_.WaitForEncodedFrame(3);
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_EQ(kLastMaxPixelCount, source.sink_wants().max_pixel_count);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NoChangeForInitialNormalUsage_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kMaintainFramerate preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NoChangeForInitialNormalUsage_MaintainResolutionMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kMaintainResolution preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_BalancedMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_DisabledMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kDegradationDisabled preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kDegradationDisabled);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionForLowQuality_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kMaintainFramerate preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(1);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsFramerateForLowQuality_MaintainResolutionMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int kInputFps = 30;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kInputFps;
+ stats_proxy_->SetMockStats(stats);
+
+ // Expect no scaling to begin with (preference: kMaintainFramerate).
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(1);
+ VerifyNoLimitation(video_source_.sink_wants());
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(2);
+ VerifyFpsMaxResolutionLt(video_source_.sink_wants(), kWidth * kHeight);
+
+ // Enable kMaintainResolution preference.
+ test::FrameForwarder new_video_source;
+ video_stream_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ VerifyNoLimitation(new_video_source.sink_wants());
+
+ // Trigger adapt down, expect reduced framerate.
+ video_stream_encoder_->TriggerQualityLow();
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(3);
+ VerifyFpsLtResolutionMax(new_video_source.sink_wants(), kInputFps);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ VerifyNoLimitation(new_video_source.sink_wants());
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DoesNotScaleBelowSetResolutionLimit) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const size_t kNumFrames = 10;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable adapter, expected input resolutions when downscaling:
+ // 1280x720 -> 960x540 -> 640x360 -> 480x270 -> 320x180 (kMinPixelsPerFrame)
+ video_source_.set_adaptation_enabled(true);
+
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ int downscales = 0;
+ for (size_t i = 1; i <= kNumFrames; i++) {
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ WaitForEncodedFrame(i);
+
+ // Trigger scale down.
+ rtc::VideoSinkWants last_wants = video_source_.sink_wants();
+ video_stream_encoder_->TriggerQualityLow();
+ EXPECT_GE(video_source_.sink_wants().max_pixel_count, kMinPixelsPerFrame);
+
+ if (video_source_.sink_wants().max_pixel_count < last_wants.max_pixel_count)
+ ++downscales;
+
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(downscales,
+ stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_GT(downscales, 0);
+ }
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionUpAndDownTwiceOnOveruse_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kMaintainFramerate preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerCpuOveruse();
+ source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerCpuOveruse();
+ source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ source.IncomingCapturedFrame(CreateFrame(5, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionUpAndDownTwiceForLowQuality_BalancedMode_NoFpsLimit) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(2);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution.
+ video_stream_encoder_->TriggerQualityLow();
+ source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(4);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction.
+ video_stream_encoder_->TriggerQualityHigh();
+ source.IncomingCapturedFrame(CreateFrame(5, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionOnOveruseAndLowQuality_MaintainFramerateMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kMaintainFramerate preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainFramerate);
+
+ source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (960x540).
+ video_stream_encoder_->TriggerCpuOveruse();
+ source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
+ WaitForEncodedFrame(2);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (640x360).
+ video_stream_encoder_->TriggerCpuOveruse();
+ source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ WaitForEncodedFrame(3);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ rtc::VideoSinkWants last_wants = source.sink_wants();
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, max cpu downgrades reached, expect no change.
+ video_stream_encoder_->TriggerCpuOveruse();
+ source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ WaitForEncodedFrame(4);
+ VerifyFpsEqResolutionEq(source.sink_wants(), last_wants);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt down, expect scaled down resolution (480x270).
+ video_stream_encoder_->TriggerQualityLow();
+ source.IncomingCapturedFrame(CreateFrame(5, kWidth, kHeight));
+ WaitForEncodedFrame(5);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect upscaled resolution (640x360).
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ source.IncomingCapturedFrame(CreateFrame(6, kWidth, kHeight));
+ WaitForEncodedFrame(6);
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect upscaled resolution (960x540).
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ source.IncomingCapturedFrame(CreateFrame(7, kWidth, kHeight));
+ WaitForEncodedFrame(7);
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ last_wants = source.sink_wants();
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, no cpu downgrades, expect no change (960x540).
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ source.IncomingCapturedFrame(CreateFrame(8, kWidth, kHeight));
+ WaitForEncodedFrame(8);
+ VerifyFpsEqResolutionEq(source.sink_wants(), last_wants);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt up, expect no restriction (1280x720).
+ video_stream_encoder_->TriggerQualityHigh();
+ source.IncomingCapturedFrame(CreateFrame(9, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, CpuLimitedHistogramIsReported) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ for (int i = 1; i <= SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ WaitForEncodedFrame(i);
+ }
+
+ video_stream_encoder_->TriggerCpuOveruse();
+ for (int i = 1; i <= SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(
+ SendStatisticsProxy::kMinRequiredMetricsSamples + i, kWidth, kHeight));
+ WaitForEncodedFrame(SendStatisticsProxy::kMinRequiredMetricsSamples + i);
+ }
+
+ video_stream_encoder_->Stop();
+ video_stream_encoder_.reset();
+ stats_proxy_.reset();
+
+ EXPECT_EQ(1,
+ metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+ EXPECT_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.CpuLimitedResolutionInPercent", 50));
+}
+
+TEST_F(VideoStreamEncoderTest,
+ CpuLimitedHistogramIsNotReportedForDisabledDegradation) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kDegradationDisabled);
+
+ for (int i = 1; i <= SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ WaitForEncodedFrame(i);
+ }
+
+ video_stream_encoder_->Stop();
+ video_stream_encoder_.reset();
+ stats_proxy_.reset();
+
+ EXPECT_EQ(0,
+ metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+}
+
+TEST_F(VideoStreamEncoderTest, CallsBitrateObserver) {
+ MockBitrateObserver bitrate_observer;
+ video_stream_encoder_->SetBitrateObserver(&bitrate_observer);
+
+ const int kDefaultFps = 30;
+ const BitrateAllocation expected_bitrate =
+ DefaultVideoBitrateAllocator(fake_encoder_.codec_config())
+ .GetAllocation(kLowTargetBitrateBps, kDefaultFps);
+
+ // First called on bitrate updated, then again on first frame.
+ EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate))
+ .Times(2);
+ video_stream_encoder_->OnBitrateUpdated(kLowTargetBitrateBps, 0, 0);
+
+ const int64_t kStartTimeMs = 1;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(kStartTimeMs, codec_width_, codec_height_));
+ WaitForEncodedFrame(kStartTimeMs);
+
+ // Not called on second frame.
+ EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate))
+ .Times(0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(kStartTimeMs + 1, codec_width_, codec_height_));
+ WaitForEncodedFrame(kStartTimeMs + 1);
+
+ // Called after a process interval.
+ const int64_t kProcessIntervalMs =
+ vcm::VCMProcessTimer::kDefaultProcessIntervalMs;
+ fake_clock_.AdvanceTimeMicros(rtc::kNumMicrosecsPerMillisec *
+ (kProcessIntervalMs + (1000 / kDefaultFps)));
+ EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate))
+ .Times(1);
+ video_source_.IncomingCapturedFrame(CreateFrame(
+ kStartTimeMs + kProcessIntervalMs, codec_width_, codec_height_));
+ WaitForEncodedFrame(kStartTimeMs + kProcessIntervalMs);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, OveruseDetectorUpdatedOnReconfigureAndAdaption) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kFramerate = 24;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ // Insert a single frame, triggering initial configuration.
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kDefaultFramerate);
+
+ // Trigger reconfigure encoder (without resetting the entire instance).
+ VideoEncoderConfig video_encoder_config;
+ video_encoder_config.max_bitrate_bps = kTargetBitrateBps;
+ video_encoder_config.number_of_streams = 1;
+ video_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(1, kFramerate);
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength, false);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Detector should be updated with fps limit from codec config.
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ // Trigger overuse, max framerate should be reduced.
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kFramerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ int adapted_framerate =
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate();
+ EXPECT_LT(adapted_framerate, kFramerate);
+
+ // Trigger underuse, max framerate should go back to codec configured fps.
+ // Set extra low fps, to make sure it's actually reset, not just incremented.
+ stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = adapted_framerate / 2;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ OveruseDetectorUpdatedRespectsFramerateAfterUnderuse) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kLowFramerate = 15;
+ const int kHighFramerate = 25;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ // Trigger initial configuration.
+ VideoEncoderConfig video_encoder_config;
+ video_encoder_config.max_bitrate_bps = kTargetBitrateBps;
+ video_encoder_config.number_of_streams = 1;
+ video_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(1, kLowFramerate);
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength, false);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kLowFramerate);
+
+ // Trigger overuse, max framerate should be reduced.
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kLowFramerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ int adapted_framerate =
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate();
+ EXPECT_LT(adapted_framerate, kLowFramerate);
+
+ // Reconfigure the encoder with a new (higher max framerate), max fps should
+ // still respect the adaptation.
+ video_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(1, kHighFramerate);
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength, false);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ adapted_framerate);
+
+ // Trigger underuse, max framerate should go back to codec configured fps.
+ stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = adapted_framerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kHighFramerate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ OveruseDetectorUpdatedOnDegradationPreferenceChange) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kFramerate = 24;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ // Trigger initial configuration.
+ VideoEncoderConfig video_encoder_config;
+ video_encoder_config.max_bitrate_bps = kTargetBitrateBps;
+ video_encoder_config.number_of_streams = 1;
+ video_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<VideoStreamFactory>(1, kFramerate);
+ source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight));
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength, false);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ // Trigger overuse, max framerate should be reduced.
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ stats.input_frame_rate = kFramerate;
+ stats_proxy_->SetMockStats(stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ int adapted_framerate =
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate();
+ EXPECT_LT(adapted_framerate, kFramerate);
+
+ // Change degradation preference to not enable framerate scaling. Target
+ // framerate should be changed to codec defined limit.
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainFramerate);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+ EXPECT_EQ(
+ video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(),
+ kFramerate);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DropsFramesAndScalesWhenBitrateIsTooLow) {
+ const int kTooLowBitrateForFrameSizeBps = 10000;
+ video_stream_encoder_->OnBitrateUpdated(kTooLowBitrateForFrameSizeBps, 0, 0);
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+
+ // Expect to drop this frame, the wait should time out.
+ ExpectDroppedFrame();
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
+
+ int last_pixel_count = video_source_.sink_wants().max_pixel_count;
+
+ // Next frame is scaled.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, kWidth * 3 / 4, kHeight * 3 / 4));
+
+ // Expect to drop this frame, the wait should time out.
+ ExpectDroppedFrame();
+
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count, last_pixel_count);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ NumberOfDroppedFramesLimitedWhenBitrateIsTooLow) {
+ const int kTooLowBitrateForFrameSizeBps = 10000;
+ video_stream_encoder_->OnBitrateUpdated(kTooLowBitrateForFrameSizeBps, 0, 0);
+ const int kWidth = 640;
+ const int kHeight = 360;
+
+ // We expect the n initial frames to get dropped.
+ int i;
+ for (i = 1; i <= kMaxInitialFramedrop; ++i) {
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ ExpectDroppedFrame();
+ }
+ // The n+1th frame should not be dropped, even though it's size is too large.
+ video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
+ WaitForEncodedFrame(i);
+
+ // Expect the sink_wants to specify a scaled frame.
+ EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ InitialFrameDropOffWithMaintainResolutionPreference) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ video_stream_encoder_->OnBitrateUpdated(kLowTargetBitrateBps, 0, 0);
+
+ // Set degradation preference.
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped, even if it's too large.
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, InitialFrameDropOffWhenEncoderDisabledScaling) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ fake_encoder_.SetQualityScaling(false);
+ video_stream_encoder_->OnBitrateUpdated(kLowTargetBitrateBps, 0, 0);
+
+ // Force quality scaler reconfiguration by resetting the source.
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
+ // Frame should not be dropped, even if it's too large.
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->Stop();
+ fake_encoder_.SetQualityScaling(true);
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ResolutionNotAdaptedForTooSmallFrame_MaintainFramerateMode) {
+ const int kTooSmallWidth = 10;
+ const int kTooSmallHeight = 10;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kMaintainFramerate preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source, VideoSendStream::DegradationPreference::kMaintainFramerate);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+
+ // Trigger adapt down, too small frame, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerCpuOveruse();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ ResolutionNotAdaptedForTooSmallFrame_BalancedMode) {
+ const int kTooSmallWidth = 10;
+ const int kTooSmallHeight = 10;
+ const int kFpsLimit = 7;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+
+ // Trigger adapt down, expect limited framerate.
+ source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight));
+ WaitForEncodedFrame(1);
+ video_stream_encoder_->TriggerQualityLow();
+ VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, too small frame, expect no change.
+ source.IncomingCapturedFrame(CreateFrame(2, kTooSmallWidth, kTooSmallHeight));
+ WaitForEncodedFrame(2);
+ video_stream_encoder_->TriggerQualityLow();
+ VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, FailingInitEncodeDoesntCauseCrash) {
+ fake_encoder_.ForceInitEncodeFailure(true);
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ ResetEncoder("VP8", 2, 1, 1, true, false);
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ ExpectDroppedFrame();
+ video_stream_encoder_->Stop();
+}
+
+// TODO(sprang): Extend this with fps throttling and any "balanced" extensions.
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionOnOveruse_MaintainFramerateMode) {
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ // Enabled default VideoAdapter downscaling. First step is 3/4, not 3/5 as
+ // requested by
+ // VideoStreamEncoder::VideoSourceProxy::RequestResolutionLowerThan().
+ video_source_.set_adaptation_enabled(true);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kFrameWidth, kFrameHeight);
+
+ // Trigger CPU overuse, downscale by 3/4.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame((kFrameWidth * 3) / 4, (kFrameHeight * 3) / 4);
+
+ // Trigger CPU normal use, return to original resolution.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(3, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kFrameWidth, kFrameHeight);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsFramerateOnOveruse_MaintainResolutionMode) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ int kFrameIntervalMs = rtc::kNumMillisecsPerSec / max_framerate_;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ video_source_.set_adaptation_enabled(true);
+
+ int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+
+ // Try to trigger overuse. No fps estimate available => no effect.
+ video_stream_encoder_->TriggerCpuOveruse();
+
+ // Insert frames for one second to get a stable estimate.
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ }
+
+ // Trigger CPU overuse, reduce framerate by 2/3.
+ video_stream_encoder_->TriggerCpuOveruse();
+ int num_frames_dropped = 0;
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeoutMs)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+ }
+ }
+
+ // Add some slack to account for frames dropped by the frame dropper.
+ const int kErrorMargin = 1;
+ EXPECT_NEAR(num_frames_dropped, max_framerate_ - (max_framerate_ * 2 / 3),
+ kErrorMargin);
+
+ // Trigger CPU overuse, reduce framerate by 2/3 again.
+ video_stream_encoder_->TriggerCpuOveruse();
+ num_frames_dropped = 0;
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeoutMs)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+ }
+ }
+ EXPECT_NEAR(num_frames_dropped, max_framerate_ - (max_framerate_ * 4 / 9),
+ kErrorMargin);
+
+ // Go back up one step.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ num_frames_dropped = 0;
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeoutMs)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+ }
+ }
+ EXPECT_NEAR(num_frames_dropped, max_framerate_ - (max_framerate_ * 2 / 3),
+ kErrorMargin);
+
+ // Go back up to original mode.
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ num_frames_dropped = 0;
+ for (int i = 0; i < max_framerate_; ++i) {
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ if (!WaitForFrame(kFrameTimeoutMs)) {
+ ++num_frames_dropped;
+ } else {
+ sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+ }
+ }
+ EXPECT_NEAR(num_frames_dropped, 0, kErrorMargin);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) {
+ const int kFramerateFps = 5;
+ const int kFrameIntervalMs = rtc::kNumMillisecsPerSec / kFramerateFps;
+ const int kMinFpsFrameInterval = rtc::kNumMillisecsPerSec / kMinFramerateFps;
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+
+ // Reconfigure encoder with two temporal layers and screensharing, which will
+ // disable frame dropping and make testing easier.
+ ResetEncoder("VP8", 1, 2, 1, true, true);
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ video_stream_encoder_->SetSource(
+ &video_source_,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ video_source_.set_adaptation_enabled(true);
+
+ int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
+
+ // Trigger overuse as much as we can.
+ for (int i = 0; i < VideoStreamEncoder::kMaxCpuResolutionDowngrades; ++i) {
+ // Insert frames to get a new fps estimate...
+ for (int j = 0; j < kFramerateFps; ++j) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ timestamp_ms += kFrameIntervalMs;
+ }
+ // ...and then try to adapt again.
+ video_stream_encoder_->TriggerCpuOveruse();
+ }
+
+ // Drain any frame in the pipeline.
+ WaitForFrame(kDefaultTimeoutMs);
+
+ // Insert frames at min fps, all should go through.
+ for (int i = 0; i < 10; ++i) {
+ timestamp_ms += kMinFpsFrameInterval;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ }
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptsResolutionAndFramerateForLowQuality_BalancedMode) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int64_t kFrameIntervalMs = 150;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (960x540@30fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (640x360@30fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (640x360@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (480x270@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Restrict bitrate, trigger adapt down, expect reduced fps (480x270@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect scaled down resolution (320x180@10fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect reduced fps (320x180@7fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
+ rtc::VideoSinkWants last_wants = source.sink_wants();
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(7, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, min resolution reached, expect no change.
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionEq(source.sink_wants(), last_wants);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(7, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt down, expect expect increased fps (320x180@10fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsGtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(8, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled resolution (480x270@10fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(9, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Increase bitrate, trigger adapt up, expect increased fps (480x270@15fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsGtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(10, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled resolution (640x360@15fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(11, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect increased fps (640x360@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(12, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect upscaled resolution (960x540@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(13, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no restriction (1280x720fps@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ const int64_t kFrameIntervalMs = 150;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (960x540@30fps).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down resolution (640x360@30fps).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt down, expect reduced fps (640x360@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect increased fps (640x360@30fps).
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt up, expect upscaled resolution (960x540@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect no restriction (1280x720fps@30fps).
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
+ AdaptWithTwoReasonsAndDifferentOrder_Resolution) {
+ const int kWidth = 640;
+ const int kHeight = 360;
+ const int kFpsLimit = 15;
+ const int64_t kFrameIntervalMs = 150;
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Enable kBalanced preference, no initial limitation.
+ AdaptingFrameForwarder source;
+ source.set_adaptation_enabled(true);
+ video_stream_encoder_->SetSource(
+ &source,
+ VideoSendStream::DegradationPreference::kBalanced);
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(kWidth, kHeight);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt down, expect scaled down framerate (640x360@15fps).
+ video_stream_encoder_->TriggerCpuOveruse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt down, expect scaled down resolution (480x270@15fps).
+ video_stream_encoder_->TriggerQualityLow();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger cpu adapt up, expect upscaled resolution (640x360@15fps).
+ video_stream_encoder_->TriggerCpuNormalUsage();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger quality adapt up, expect increased fps (640x360@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ // Trigger adapt up, expect no change.
+ video_stream_encoder_->TriggerQualityHigh();
+ VerifyNoLimitation(source.sink_wants());
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, AcceptsFullHdAdaptedDownSimulcastFrames) {
+ // Simulates simulcast behavior and makes highest stream resolutions divisible
+ // by 4.
+ class CroppingVideoStreamFactory
+ : public VideoEncoderConfig::VideoStreamFactoryInterface {
+ public:
+ explicit CroppingVideoStreamFactory(size_t num_temporal_layers,
+ int framerate)
+ : num_temporal_layers_(num_temporal_layers), framerate_(framerate) {
+ EXPECT_GT(num_temporal_layers, 0u);
+ EXPECT_GT(framerate, 0);
+ }
+
+ private:
+ std::vector<VideoStream> CreateEncoderStreams(
+ int width,
+ int height,
+ const VideoEncoderConfig& encoder_config) override {
+ std::vector<VideoStream> streams =
+ test::CreateVideoStreams(width - width % 4, height - height % 4,
+ encoder_config);
+ for (VideoStream& stream : streams) {
+ stream.temporal_layer_thresholds_bps.resize(num_temporal_layers_ - 1);
+ stream.max_framerate = framerate_;
+ }
+ return streams;
+ }
+
+ const size_t num_temporal_layers_;
+ const int framerate_;
+ };
+
+ const int kFrameWidth = 1920;
+ const int kFrameHeight = 1080;
+ // 3/4 of 1920.
+ const int kAdaptedFrameWidth = 1440;
+ // 3/4 of 1080 rounded down to multiple of 4.
+ const int kAdaptedFrameHeight = 808;
+ const int kFramerate = 24;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ // Trigger reconfigure encoder (without resetting the entire instance).
+ VideoEncoderConfig video_encoder_config;
+ video_encoder_config.max_bitrate_bps = kTargetBitrateBps;
+ video_encoder_config.number_of_streams = 1;
+ video_encoder_config.video_stream_factory =
+ new rtc::RefCountedObject<CroppingVideoStreamFactory>(1, kFramerate);
+ video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMaxPayloadLength, false);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ video_source_.set_adaptation_enabled(true);
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kFrameWidth, kFrameHeight);
+
+ // Trigger CPU overuse, downscale by 3/4.
+ video_stream_encoder_->TriggerCpuOveruse();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(kAdaptedFrameWidth, kAdaptedFrameHeight);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, PeriodicallyUpdatesChannelParameters) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kLowFps = 2;
+ const int kHighFps = 30;
+
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
+ max_framerate_ = kLowFps;
+
+ // Insert 2 seconds of 2fps video.
+ for (int i = 0; i < kLowFps * 2; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ timestamp_ms += 1000 / kLowFps;
+ }
+
+ // Make sure encoder is updated with new target.
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ timestamp_ms += 1000 / kLowFps;
+
+ EXPECT_EQ(kLowFps, fake_encoder_.GetConfiguredInputFramerate());
+
+ // Insert 30fps frames for just a little more than the forced update period.
+ const int kVcmTimerIntervalFrames =
+ (vcm::VCMProcessTimer::kDefaultProcessIntervalMs * kHighFps) / 1000;
+ const int kFrameIntervalMs = 1000 / kHighFps;
+ max_framerate_ = kHighFps;
+ for (int i = 0; i < kVcmTimerIntervalFrames + 2; ++i) {
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ // Wait for encoded frame, but skip ahead if it doesn't arrive as it might
+ // be dropped if the encoder hans't been updated with the new higher target
+ // framerate yet, causing it to overshoot the target bitrate and then
+ // suffering the wrath of the media optimizer.
+ TimedWaitForEncodedFrame(timestamp_ms, 2 * kFrameIntervalMs);
+ timestamp_ms += kFrameIntervalMs;
+ }
+
+ // Don expect correct measurement just yet, but it should be higher than
+ // before.
+ EXPECT_GT(fake_encoder_.GetConfiguredInputFramerate(), kLowFps);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, DoesNotUpdateBitrateAllocationWhenSuspended) {
+ const int kFrameWidth = 1280;
+ const int kFrameHeight = 720;
+ const int kTargetBitrateBps = 1000000;
+
+ MockBitrateObserver bitrate_observer;
+ video_stream_encoder_->SetBitrateObserver(&bitrate_observer);
+
+ EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(_)).Times(1);
+ // Initial bitrate update.
+ video_stream_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
+
+ // Insert a first video frame, causes another bitrate update.
+ int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
+ EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(_)).Times(1);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ WaitForEncodedFrame(timestamp_ms);
+
+ // Next, simulate video suspension due to pacer queue overrun.
+ video_stream_encoder_->OnBitrateUpdated(0, 0, 1);
+
+ // Skip ahead until a new periodic parameter update should have occured.
+ timestamp_ms += vcm::VCMProcessTimer::kDefaultProcessIntervalMs;
+ fake_clock_.AdvanceTimeMicros(
+ vcm::VCMProcessTimer::kDefaultProcessIntervalMs *
+ rtc::kNumMicrosecsPerMillisec);
+
+ // Bitrate observer should not be called.
+ EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(_)).Times(0);
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+ ExpectDroppedFrame();
+
+ video_stream_encoder_->Stop();
+}
+
+} // namespace webrtc