summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/rtc_tools/frame_analyzer
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/rtc_tools/frame_analyzer')
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/frame_analyzer.cc197
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.cc206
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.h56
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares_unittest.cc93
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis.cc41
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.cc139
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.h48
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_unittest.cc49
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.cc237
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.h51
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner_unittest.cc176
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.cc178
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.h57
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner_unittest.cc153
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.cc160
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.h103
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis_unittest.cc249
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.cc238
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.h61
-rw-r--r--third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner_unittest.cc129
20 files changed, 2621 insertions, 0 deletions
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/frame_analyzer.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/frame_analyzer.cc
new file mode 100644
index 0000000000..501a6142a8
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/frame_analyzer.cc
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <cstddef>
+#include <string>
+#include <vector>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/strings/match.h"
+#include "api/scoped_refptr.h"
+#include "api/test/metrics/chrome_perf_dashboard_metrics_exporter.h"
+#include "api/test/metrics/global_metrics_logger_and_exporter.h"
+#include "api/test/metrics/metrics_exporter.h"
+#include "api/test/metrics/stdout_metrics_exporter.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_tools/frame_analyzer/video_color_aligner.h"
+#include "rtc_tools/frame_analyzer/video_geometry_aligner.h"
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+#include "rtc_tools/frame_analyzer/video_temporal_aligner.h"
+#include "rtc_tools/video_file_reader.h"
+#include "rtc_tools/video_file_writer.h"
+
+ABSL_FLAG(int32_t, width, -1, "The width of the reference and test files");
+ABSL_FLAG(int32_t, height, -1, "The height of the reference and test files");
+ABSL_FLAG(std::string,
+ label,
+ "MY_TEST",
+ "The label to use for the perf output");
+ABSL_FLAG(std::string,
+ reference_file,
+ "ref.yuv",
+ "The reference YUV file to run the analysis against");
+ABSL_FLAG(std::string,
+ test_file,
+ "test.yuv",
+ "The test YUV file to run the analysis for");
+ABSL_FLAG(std::string,
+ aligned_output_file,
+ "",
+ "Where to write aligned YUV/Y4M output file, f not present, no files "
+ "will be written");
+ABSL_FLAG(std::string,
+ yuv_directory,
+ "",
+ "Where to write aligned YUV ref+test output files, if not present, "
+ "no files will be written");
+ABSL_FLAG(std::string,
+ chartjson_result_file,
+ "",
+ "Where to store perf result in chartjson format, if not present, no "
+ "perf result will be stored");
+
+namespace {
+
+#ifdef WIN32
+const char* const kPathDelimiter = "\\";
+#else
+const char* const kPathDelimiter = "/";
+#endif
+
+std::string JoinFilename(std::string directory, std::string filename) {
+ return directory + kPathDelimiter + filename;
+}
+
+} // namespace
+
+/*
+ * A command line tool running PSNR and SSIM on a reference video and a test
+ * video. The test video is a record of the reference video which can start at
+ * an arbitrary point. It is possible that there will be repeated frames or
+ * skipped frames as well. The video files should be I420 .y4m or .yuv videos.
+ * If both files are .y4m, it's not needed to specify width/height. The tool
+ * prints the result to standard output in the Chromium perf format:
+ * RESULT <metric>:<label>= <values>
+ *
+ * The max value for PSNR is 48.0 (between equal frames), as for SSIM it is 1.0.
+ *
+ * Usage:
+ * frame_analyzer --label=<test_label> --reference_file=<name_of_file>
+ * --test_file_ref=<name_of_file> --width=<frame_width> --height=<frame_height>
+ */
+int main(int argc, char* argv[]) {
+ absl::ParseCommandLine(argc, argv);
+
+ int width = absl::GetFlag(FLAGS_width);
+ int height = absl::GetFlag(FLAGS_height);
+ const std::string reference_file_name = absl::GetFlag(FLAGS_reference_file);
+ const std::string test_file_name = absl::GetFlag(FLAGS_test_file);
+
+ // .yuv files require explicit resolution.
+ if ((absl::EndsWith(reference_file_name, ".yuv") ||
+ absl::EndsWith(test_file_name, ".yuv")) &&
+ (width <= 0 || height <= 0)) {
+ fprintf(stderr,
+ "Error: You need to specify width and height when using .yuv "
+ "files\n");
+ return -1;
+ }
+
+ webrtc::test::ResultsContainer results;
+
+ rtc::scoped_refptr<webrtc::test::Video> reference_video =
+ webrtc::test::OpenYuvOrY4mFile(reference_file_name, width, height);
+ rtc::scoped_refptr<webrtc::test::Video> test_video =
+ webrtc::test::OpenYuvOrY4mFile(test_file_name, width, height);
+
+ if (!reference_video || !test_video) {
+ fprintf(stderr, "Error opening video files\n");
+ return 1;
+ }
+
+ const std::vector<size_t> matching_indices =
+ webrtc::test::FindMatchingFrameIndices(reference_video, test_video);
+
+ // Align the reference video both temporally and geometrically. I.e. align the
+ // frames to match up in order to the test video, and align a crop region of
+ // the reference video to match up to the test video.
+ const rtc::scoped_refptr<webrtc::test::Video> aligned_reference_video =
+ AdjustCropping(ReorderVideo(reference_video, matching_indices),
+ test_video);
+
+ // Calculate if there is any systematic color difference between the reference
+ // and test video.
+ const webrtc::test::ColorTransformationMatrix color_transformation =
+ CalculateColorTransformationMatrix(aligned_reference_video, test_video);
+
+ char buf[256];
+ rtc::SimpleStringBuilder string_builder(buf);
+ for (int i = 0; i < 3; ++i) {
+ string_builder << "\n";
+ for (int j = 0; j < 4; ++j)
+ string_builder.AppendFormat("%6.2f ", color_transformation[i][j]);
+ }
+ printf("Adjusting test video with color transformation: %s\n",
+ string_builder.str());
+
+ // Adjust all frames in the test video with the calculated color
+ // transformation.
+ const rtc::scoped_refptr<webrtc::test::Video> color_adjusted_test_video =
+ AdjustColors(color_transformation, test_video);
+
+ results.frames = webrtc::test::RunAnalysis(
+ aligned_reference_video, color_adjusted_test_video, matching_indices);
+
+ const std::vector<webrtc::test::Cluster> clusters =
+ webrtc::test::CalculateFrameClusters(matching_indices);
+ results.max_repeated_frames = webrtc::test::GetMaxRepeatedFrames(clusters);
+ results.max_skipped_frames = webrtc::test::GetMaxSkippedFrames(clusters);
+ results.total_skipped_frames =
+ webrtc::test::GetTotalNumberOfSkippedFrames(clusters);
+ results.decode_errors_ref = 0;
+ results.decode_errors_test = 0;
+
+ webrtc::test::PrintAnalysisResults(absl::GetFlag(FLAGS_label), results,
+ *webrtc::test::GetGlobalMetricsLogger());
+
+ std::vector<std::unique_ptr<webrtc::test::MetricsExporter>> exporters;
+ exporters.push_back(std::make_unique<webrtc::test::StdoutMetricsExporter>());
+ std::string chartjson_result_file =
+ absl::GetFlag(FLAGS_chartjson_result_file);
+ if (!chartjson_result_file.empty()) {
+ exporters.push_back(
+ std::make_unique<webrtc::test::ChromePerfDashboardMetricsExporter>(
+ chartjson_result_file));
+ }
+ if (!webrtc::test::ExportPerfMetric(*webrtc::test::GetGlobalMetricsLogger(),
+ std::move(exporters))) {
+ return 1;
+ }
+ std::string aligned_output_file = absl::GetFlag(FLAGS_aligned_output_file);
+ if (!aligned_output_file.empty()) {
+ webrtc::test::WriteVideoToFile(aligned_reference_video, aligned_output_file,
+ /*fps=*/30);
+ }
+ std::string yuv_directory = absl::GetFlag(FLAGS_yuv_directory);
+ if (!yuv_directory.empty()) {
+ webrtc::test::WriteVideoToFile(aligned_reference_video,
+ JoinFilename(yuv_directory, "ref.yuv"),
+ /*fps=*/30);
+ webrtc::test::WriteVideoToFile(color_adjusted_test_video,
+ JoinFilename(yuv_directory, "test.yuv"),
+ /*fps=*/30);
+ }
+
+ return 0;
+}
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.cc
new file mode 100644
index 0000000000..93a6f90e69
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/linear_least_squares.h"
+
+#include <math.h>
+
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <numeric>
+#include <type_traits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace test {
+
+template <class T>
+using Matrix = std::valarray<std::valarray<T>>;
+
+namespace {
+
+template <typename R, typename T>
+R DotProduct(const std::valarray<T>& a, const std::valarray<T>& b) {
+ RTC_CHECK_EQ(a.size(), b.size());
+ return std::inner_product(std::begin(a), std::end(a), std::begin(b), R(0));
+}
+
+// Calculates a^T * b.
+template <typename R, typename T>
+Matrix<R> MatrixMultiply(const Matrix<T>& a, const Matrix<T>& b) {
+ Matrix<R> result(std::valarray<R>(a.size()), b.size());
+ for (size_t i = 0; i < a.size(); ++i) {
+ for (size_t j = 0; j < b.size(); ++j)
+ result[j][i] = DotProduct<R>(a[i], b[j]);
+ }
+
+ return result;
+}
+
+template <typename T>
+Matrix<T> Transpose(const Matrix<T>& matrix) {
+ if (matrix.size() == 0)
+ return Matrix<T>();
+ const size_t rows = matrix.size();
+ const size_t columns = matrix[0].size();
+ Matrix<T> result(std::valarray<T>(rows), columns);
+
+ for (size_t i = 0; i < rows; ++i) {
+ for (size_t j = 0; j < columns; ++j)
+ result[j][i] = matrix[i][j];
+ }
+
+ return result;
+}
+
+// Convert valarray from type T to type R.
+template <typename R, typename T>
+std::valarray<R> ConvertTo(const std::valarray<T>& v) {
+ std::valarray<R> result(v.size());
+ for (size_t i = 0; i < v.size(); ++i)
+ result[i] = static_cast<R>(v[i]);
+ return result;
+}
+
+// Convert valarray Matrix from type T to type R.
+template <typename R, typename T>
+Matrix<R> ConvertTo(const Matrix<T>& mat) {
+ Matrix<R> result(mat.size());
+ for (size_t i = 0; i < mat.size(); ++i)
+ result[i] = ConvertTo<R>(mat[i]);
+ return result;
+}
+
+// Convert from valarray Matrix back to the more conventional std::vector.
+template <typename T>
+std::vector<std::vector<T>> ToVectorMatrix(const Matrix<T>& m) {
+ std::vector<std::vector<T>> result;
+ for (const std::valarray<T>& v : m)
+ result.emplace_back(std::begin(v), std::end(v));
+ return result;
+}
+
+// Create a valarray Matrix from a conventional std::vector.
+template <typename T>
+Matrix<T> FromVectorMatrix(const std::vector<std::vector<T>>& mat) {
+ Matrix<T> result(mat.size());
+ for (size_t i = 0; i < mat.size(); ++i)
+ result[i] = std::valarray<T>(mat[i].data(), mat[i].size());
+ return result;
+}
+
+// Returns `matrix_to_invert`^-1 * `right_hand_matrix`. `matrix_to_invert` must
+// have square size.
+Matrix<double> GaussianElimination(Matrix<double> matrix_to_invert,
+ Matrix<double> right_hand_matrix) {
+ // `n` is the width/height of `matrix_to_invert`.
+ const size_t n = matrix_to_invert.size();
+ // Make sure `matrix_to_invert` has square size.
+ for (const std::valarray<double>& column : matrix_to_invert)
+ RTC_CHECK_EQ(n, column.size());
+ // Make sure `right_hand_matrix` has correct size.
+ for (const std::valarray<double>& column : right_hand_matrix)
+ RTC_CHECK_EQ(n, column.size());
+
+ // Transpose the matrices before and after so that we can perform Gaussian
+ // elimination on the columns instead of the rows, since that is easier with
+ // our representation.
+ matrix_to_invert = Transpose(matrix_to_invert);
+ right_hand_matrix = Transpose(right_hand_matrix);
+
+ // Loop over the diagonal of `matrix_to_invert` and perform column reduction.
+ // Column reduction is a sequence of elementary column operations that is
+ // performed on both `matrix_to_invert` and `right_hand_matrix` until
+ // `matrix_to_invert` has been transformed to the identity matrix.
+ for (size_t diagonal_index = 0; diagonal_index < n; ++diagonal_index) {
+ // Make sure the diagonal element has the highest absolute value by
+ // swapping columns if necessary.
+ for (size_t column = diagonal_index + 1; column < n; ++column) {
+ if (std::abs(matrix_to_invert[column][diagonal_index]) >
+ std::abs(matrix_to_invert[diagonal_index][diagonal_index])) {
+ std::swap(matrix_to_invert[column], matrix_to_invert[diagonal_index]);
+ std::swap(right_hand_matrix[column], right_hand_matrix[diagonal_index]);
+ }
+ }
+
+ // Reduce the diagonal element to be 1, by dividing the column with that
+ // value. If the diagonal element is 0, it means the system of equations has
+ // many solutions, and in that case we will return an arbitrary solution.
+ if (matrix_to_invert[diagonal_index][diagonal_index] == 0.0) {
+ RTC_LOG(LS_WARNING) << "Matrix is not invertible, ignoring.";
+ continue;
+ }
+ const double diagonal_element =
+ matrix_to_invert[diagonal_index][diagonal_index];
+ matrix_to_invert[diagonal_index] /= diagonal_element;
+ right_hand_matrix[diagonal_index] /= diagonal_element;
+
+ // Eliminate the other entries in row `diagonal_index` by making them zero.
+ for (size_t column = 0; column < n; ++column) {
+ if (column == diagonal_index)
+ continue;
+ const double row_element = matrix_to_invert[column][diagonal_index];
+ matrix_to_invert[column] -=
+ row_element * matrix_to_invert[diagonal_index];
+ right_hand_matrix[column] -=
+ row_element * right_hand_matrix[diagonal_index];
+ }
+ }
+
+ // Transpose the result before returning it, explained in comment above.
+ return Transpose(right_hand_matrix);
+}
+
+} // namespace
+
+IncrementalLinearLeastSquares::IncrementalLinearLeastSquares() = default;
+IncrementalLinearLeastSquares::~IncrementalLinearLeastSquares() = default;
+
+void IncrementalLinearLeastSquares::AddObservations(
+ const std::vector<std::vector<uint8_t>>& x,
+ const std::vector<std::vector<uint8_t>>& y) {
+ if (x.empty() || y.empty())
+ return;
+ // Make sure all columns are the same size.
+ const size_t n = x[0].size();
+ for (const std::vector<uint8_t>& column : x)
+ RTC_CHECK_EQ(n, column.size());
+ for (const std::vector<uint8_t>& column : y)
+ RTC_CHECK_EQ(n, column.size());
+
+ // We will multiply the uint8_t values together, so we need to expand to a
+ // type that can safely store those values, i.e. uint16_t.
+ const Matrix<uint16_t> unpacked_x = ConvertTo<uint16_t>(FromVectorMatrix(x));
+ const Matrix<uint16_t> unpacked_y = ConvertTo<uint16_t>(FromVectorMatrix(y));
+
+ const Matrix<uint64_t> xx = MatrixMultiply<uint64_t>(unpacked_x, unpacked_x);
+ const Matrix<uint64_t> xy = MatrixMultiply<uint64_t>(unpacked_x, unpacked_y);
+ if (sum_xx && sum_xy) {
+ *sum_xx += xx;
+ *sum_xy += xy;
+ } else {
+ sum_xx = xx;
+ sum_xy = xy;
+ }
+}
+
+std::vector<std::vector<double>>
+IncrementalLinearLeastSquares::GetBestSolution() const {
+ RTC_CHECK(sum_xx && sum_xy) << "No observations have been added";
+ return ToVectorMatrix(GaussianElimination(ConvertTo<double>(*sum_xx),
+ ConvertTo<double>(*sum_xy)));
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.h b/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.h
new file mode 100644
index 0000000000..7006db1d65
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_TOOLS_FRAME_ANALYZER_LINEAR_LEAST_SQUARES_H_
+#define RTC_TOOLS_FRAME_ANALYZER_LINEAR_LEAST_SQUARES_H_
+
+#include <stdint.h>
+
+#include <valarray>
+#include <vector>
+
+#include "absl/types/optional.h"
+
+namespace webrtc {
+namespace test {
+
+// This class is used for finding a matrix b that roughly solves the equation:
+// y = x * b. This is generally impossible to do exactly, so the problem is
+// rephrased as finding the matrix b that minimizes the difference:
+// |y - x * b|^2. Calling multiple AddObservations() is equivalent to
+// concatenating the observation vectors and calling AddObservations() once. The
+// reason for doing it incrementally is that we can't store the raw YUV values
+// for a whole video file in memory at once. This class has a constant memory
+// footprint, regardless how may times AddObservations() is called.
+class IncrementalLinearLeastSquares {
+ public:
+ IncrementalLinearLeastSquares();
+ ~IncrementalLinearLeastSquares();
+
+ // Add a number of observations. The subvectors of x and y must have the same
+ // length.
+ void AddObservations(const std::vector<std::vector<uint8_t>>& x,
+ const std::vector<std::vector<uint8_t>>& y);
+
+ // Calculate and return the best linear solution, given the observations so
+ // far.
+ std::vector<std::vector<double>> GetBestSolution() const;
+
+ private:
+ // Running sum of x^T * x.
+ absl::optional<std::valarray<std::valarray<uint64_t>>> sum_xx;
+ // Running sum of x^T * y.
+ absl::optional<std::valarray<std::valarray<uint64_t>>> sum_xy;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // RTC_TOOLS_FRAME_ANALYZER_LINEAR_LEAST_SQUARES_H_
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares_unittest.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares_unittest.cc
new file mode 100644
index 0000000000..d4a23e87a6
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/linear_least_squares_unittest.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/linear_least_squares.h"
+
+#include <cstdint>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+TEST(LinearLeastSquares, ScalarIdentityOneObservation) {
+ IncrementalLinearLeastSquares lls;
+ lls.AddObservations({{1}}, {{1}});
+ EXPECT_EQ(std::vector<std::vector<double>>({{1.0}}), lls.GetBestSolution());
+}
+
+TEST(LinearLeastSquares, ScalarIdentityTwoObservationsOneCall) {
+ IncrementalLinearLeastSquares lls;
+ lls.AddObservations({{1, 2}}, {{1, 2}});
+ EXPECT_EQ(std::vector<std::vector<double>>({{1.0}}), lls.GetBestSolution());
+}
+
+TEST(LinearLeastSquares, ScalarIdentityTwoObservationsTwoCalls) {
+ IncrementalLinearLeastSquares lls;
+ lls.AddObservations({{1}}, {{1}});
+ lls.AddObservations({{2}}, {{2}});
+ EXPECT_EQ(std::vector<std::vector<double>>({{1.0}}), lls.GetBestSolution());
+}
+
+TEST(LinearLeastSquares, MatrixIdentityOneObservation) {
+ IncrementalLinearLeastSquares lls;
+ lls.AddObservations({{1, 2}, {3, 4}}, {{1, 2}, {3, 4}});
+ EXPECT_EQ(std::vector<std::vector<double>>({{1.0, 0.0}, {0.0, 1.0}}),
+ lls.GetBestSolution());
+}
+
+TEST(LinearLeastSquares, MatrixManyObservations) {
+ IncrementalLinearLeastSquares lls;
+ // Test that we can find the solution of the overspecified equation system:
+ // [1, 2] [1, 3] = [5, 11]
+ // [3, 4] [2, 4] [11, 25]
+ // [5, 6] [17, 39]
+ lls.AddObservations({{1}, {2}}, {{5}, {11}});
+ lls.AddObservations({{3}, {4}}, {{11}, {25}});
+ lls.AddObservations({{5}, {6}}, {{17}, {39}});
+
+ const std::vector<std::vector<double>> result = lls.GetBestSolution();
+ // We allow some numerical flexibility here.
+ EXPECT_DOUBLE_EQ(1.0, result[0][0]);
+ EXPECT_DOUBLE_EQ(2.0, result[0][1]);
+ EXPECT_DOUBLE_EQ(3.0, result[1][0]);
+ EXPECT_DOUBLE_EQ(4.0, result[1][1]);
+}
+
+TEST(LinearLeastSquares, MatrixVectorOneObservation) {
+ IncrementalLinearLeastSquares lls;
+ // Test that we can find the solution of the overspecified equation system:
+ // [1, 2] [1] = [5]
+ // [3, 4] [2] [11]
+ // [5, 6] [17]
+ lls.AddObservations({{1, 3, 5}, {2, 4, 6}}, {{5, 11, 17}});
+
+ const std::vector<std::vector<double>> result = lls.GetBestSolution();
+ // We allow some numerical flexibility here.
+ EXPECT_DOUBLE_EQ(1.0, result[0][0]);
+ EXPECT_DOUBLE_EQ(2.0, result[0][1]);
+}
+
+TEST(LinearLeastSquares, LinearLeastSquaresNonPerfectSolution) {
+ IncrementalLinearLeastSquares lls;
+ // Test that we can find the non-perfect solution of the overspecified
+ // equation system:
+ // [1] [20] = [21]
+ // [2] [39]
+ // [3] [60]
+ // [2] [41]
+ // [1] [19]
+ lls.AddObservations({{1, 2, 3, 2, 1}}, {{21, 39, 60, 41, 19}});
+
+ EXPECT_DOUBLE_EQ(20.0, lls.GetBestSolution()[0][0]);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis.cc
new file mode 100644
index 0000000000..fca89a8f2f
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <string>
+
+#include "absl/flags/flag.h"
+#include "absl/flags/parse.h"
+#include "absl/flags/usage.h"
+#include "rtc_tools/frame_analyzer/reference_less_video_analysis_lib.h"
+
+ABSL_FLAG(std::string,
+ video_file,
+ "",
+ "Path of the video file to be analyzed, only y4m file format is "
+ "supported");
+
+int main(int argc, char* argv[]) {
+ absl::SetProgramUsageMessage(
+ "Outputs the freezing score by comparing "
+ "current frame with the previous frame.\n"
+ "Example usage:\n"
+ "./reference_less_video_analysis "
+ "--video_file=video_file.y4m\n");
+ absl::ParseCommandLine(argc, argv);
+
+ std::string video_file = absl::GetFlag(FLAGS_video_file);
+ if (video_file.empty()) {
+ exit(EXIT_FAILURE);
+ }
+
+ return run_analysis(video_file);
+}
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.cc
new file mode 100644
index 0000000000..ebfc6650b2
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "rtc_tools/frame_analyzer/reference_less_video_analysis_lib.h"
+
+#include <stdio.h>
+
+#include <numeric>
+#include <vector>
+
+#include "api/video/video_frame_buffer.h"
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+
+#define STATS_LINE_LENGTH 28
+#define PSNR_FREEZE_THRESHOLD 47
+#define SSIM_FREEZE_THRESHOLD .999
+
+#if defined(_WIN32) || defined(_WIN64)
+#define strtok_r strtok_s
+#endif
+
+bool frozen_frame(std::vector<double> psnr_per_frame,
+ std::vector<double> ssim_per_frame,
+ size_t frame) {
+ if (psnr_per_frame[frame] >= PSNR_FREEZE_THRESHOLD ||
+ ssim_per_frame[frame] >= SSIM_FREEZE_THRESHOLD)
+ return true;
+ return false;
+}
+
+std::vector<int> find_frame_clusters(
+ const std::vector<double>& psnr_per_frame,
+ const std::vector<double>& ssim_per_frame) {
+ std::vector<int> identical_frame_clusters;
+ int num_frozen = 0;
+ size_t total_no_of_frames = psnr_per_frame.size();
+
+ for (size_t each_frame = 0; each_frame < total_no_of_frames; each_frame++) {
+ if (frozen_frame(psnr_per_frame, ssim_per_frame, each_frame)) {
+ num_frozen++;
+ } else if (num_frozen > 0) {
+ // Not frozen anymore.
+ identical_frame_clusters.push_back(num_frozen);
+ num_frozen = 0;
+ }
+ }
+ return identical_frame_clusters;
+}
+
+void print_freezing_metrics(const std::vector<double>& psnr_per_frame,
+ const std::vector<double>& ssim_per_frame) {
+ /*
+ * Prints the different metrics mainly:
+ * 1) Identical frame number, PSNR and SSIM values.
+ * 2) Length of continuous frozen frames.
+ * 3) Max length of continuous freezed frames.
+ * 4) No of unique frames found.
+ * 5) Total different identical frames found.
+ *
+ * Sample output:
+ * Printing metrics for file: /src/rtc_tools/test_3.y4m
+ =============================
+ Total number of frames received: 74
+ Total identical frames: 5
+ Number of unique frames: 69
+ Printing Identical Frames:
+ Frame Number: 29 PSNR: 48.000000 SSIM: 0.999618
+ Frame Number: 30 PSNR: 48.000000 SSIM: 0.999898
+ Frame Number: 60 PSNR: 48.000000 SSIM: 0.999564
+ Frame Number: 64 PSNR: 48.000000 SSIM: 0.999651
+ Frame Number: 69 PSNR: 48.000000 SSIM: 0.999684
+ Print identical frame which appears in clusters :
+ 2 1 1 1
+ *
+ */
+ size_t total_no_of_frames = psnr_per_frame.size();
+ std::vector<int> identical_frame_clusters =
+ find_frame_clusters(psnr_per_frame, ssim_per_frame);
+ int total_identical_frames = std::accumulate(
+ identical_frame_clusters.begin(), identical_frame_clusters.end(), 0);
+ size_t unique_frames = total_no_of_frames - total_identical_frames;
+
+ printf("Total number of frames received: %zu\n", total_no_of_frames);
+ printf("Total identical frames: %d\n", total_identical_frames);
+ printf("Number of unique frames: %zu\n", unique_frames);
+
+ printf("Printing Identical Frames: \n");
+ for (size_t frame = 0; frame < total_no_of_frames; frame++) {
+ if (frozen_frame(psnr_per_frame, ssim_per_frame, frame)) {
+ printf(" Frame Number: %zu PSNR: %f SSIM: %f \n", frame,
+ psnr_per_frame[frame], ssim_per_frame[frame]);
+ }
+ }
+
+ printf("Print identical frame which appears in clusters : \n");
+ for (int cluster = 0;
+ cluster < static_cast<int>(identical_frame_clusters.size()); cluster++)
+ printf("%d ", identical_frame_clusters[cluster]);
+ printf("\n");
+}
+
+void compute_metrics(const rtc::scoped_refptr<webrtc::test::Video>& video,
+ std::vector<double>* psnr_per_frame,
+ std::vector<double>* ssim_per_frame) {
+ for (size_t i = 0; i < video->number_of_frames() - 1; ++i) {
+ const rtc::scoped_refptr<webrtc::I420BufferInterface> current_frame =
+ video->GetFrame(i);
+ const rtc::scoped_refptr<webrtc::I420BufferInterface> next_frame =
+ video->GetFrame(i + 1);
+ double result_psnr = webrtc::test::Psnr(current_frame, next_frame);
+ double result_ssim = webrtc::test::Ssim(current_frame, next_frame);
+
+ psnr_per_frame->push_back(result_psnr);
+ ssim_per_frame->push_back(result_ssim);
+ }
+}
+
+int run_analysis(const std::string& video_file) {
+ std::vector<double> psnr_per_frame;
+ std::vector<double> ssim_per_frame;
+ rtc::scoped_refptr<webrtc::test::Video> video =
+ webrtc::test::OpenY4mFile(video_file);
+ if (video) {
+ compute_metrics(video, &psnr_per_frame, &ssim_per_frame);
+ } else {
+ return -1;
+ }
+ printf("=============================\n");
+ printf("Printing metrics for file: %s\n", video_file.c_str());
+ printf("=============================\n");
+ print_freezing_metrics(psnr_per_frame, ssim_per_frame);
+ return 0;
+}
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.h b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.h
new file mode 100644
index 0000000000..3c93119905
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_lib.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_TOOLS_FRAME_ANALYZER_REFERENCE_LESS_VIDEO_ANALYSIS_LIB_H_
+#define RTC_TOOLS_FRAME_ANALYZER_REFERENCE_LESS_VIDEO_ANALYSIS_LIB_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "rtc_tools/video_file_reader.h"
+
+// Returns true if the frame is frozen based on psnr and ssim freezing
+// threshold values.
+bool frozen_frame(std::vector<double> psnr_per_frame,
+ std::vector<double> ssim_per_frame,
+ size_t frame);
+
+// Returns the vector of identical cluster of frames that are frozen
+// and appears continuously.
+std::vector<int> find_frame_clusters(const std::vector<double>& psnr_per_frame,
+ const std::vector<double>& ssim_per_frame);
+
+// Prints various freezing metrics like identical frames,
+// total unique frames etc.
+void print_freezing_metrics(const std::vector<double>& psnr_per_frame,
+ const std::vector<double>& ssim_per_frame);
+
+// Compute the metrics like freezing score based on PSNR and SSIM values for a
+// given video file.
+void compute_metrics(const rtc::scoped_refptr<webrtc::test::Video>& video,
+ std::vector<double>* psnr_per_frame,
+ std::vector<double>* ssim_per_frame);
+
+// Compute freezing score metrics and prints the metrics
+// for a list of video files.
+int run_analysis(const std::string& video_file);
+
+#endif // RTC_TOOLS_FRAME_ANALYZER_REFERENCE_LESS_VIDEO_ANALYSIS_LIB_H_
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_unittest.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_unittest.cc
new file mode 100644
index 0000000000..b98a014989
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/reference_less_video_analysis_unittest.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "rtc_tools/frame_analyzer/reference_less_video_analysis_lib.h"
+#include "rtc_tools/video_file_reader.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+class ReferenceLessVideoAnalysisTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ video = webrtc::test::OpenY4mFile(
+ webrtc::test::ResourcePath("reference_less_video_test_file", "y4m"));
+ ASSERT_TRUE(video);
+ }
+
+ rtc::scoped_refptr<webrtc::test::Video> video;
+ std::vector<double> psnr_per_frame;
+ std::vector<double> ssim_per_frame;
+};
+
+TEST_F(ReferenceLessVideoAnalysisTest, MatchComputedMetrics) {
+ compute_metrics(video, &psnr_per_frame, &ssim_per_frame);
+ EXPECT_EQ(74, (int)psnr_per_frame.size());
+
+ ASSERT_NEAR(27.2f, psnr_per_frame[1], 0.1f);
+ ASSERT_NEAR(24.9f, psnr_per_frame[5], 0.1f);
+
+ ASSERT_NEAR(0.9f, ssim_per_frame[1], 0.1f);
+ ASSERT_NEAR(0.9f, ssim_per_frame[5], 0.1f);
+}
+
+TEST_F(ReferenceLessVideoAnalysisTest, MatchIdenticalFrameClusters) {
+ compute_metrics(video, &psnr_per_frame, &ssim_per_frame);
+ std::vector<int> identical_frame_clusters =
+ find_frame_clusters(psnr_per_frame, ssim_per_frame);
+ EXPECT_EQ(5, (int)identical_frame_clusters.size());
+ EXPECT_EQ(1, identical_frame_clusters[0]);
+ EXPECT_EQ(1, identical_frame_clusters[4]);
+}
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.cc
new file mode 100644
index 0000000000..5983e47f69
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.cc
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/video_color_aligner.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/make_ref_counted.h"
+#include "api/video/i420_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_tools/frame_analyzer/linear_least_squares.h"
+#include "third_party/libyuv/include/libyuv/planar_functions.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+// Helper function for AdjustColors(). This functions calculates a single output
+// row for y with the given color coefficients. The u/v channels are assumed to
+// be subsampled by a factor of 2, which is the case of I420.
+void CalculateYChannel(rtc::ArrayView<const uint8_t> y_data,
+ rtc::ArrayView<const uint8_t> u_data,
+ rtc::ArrayView<const uint8_t> v_data,
+ const std::array<float, 4>& coeff,
+ rtc::ArrayView<uint8_t> output) {
+ RTC_CHECK_EQ(y_data.size(), output.size());
+ // Each u/v element represents two y elements. Make sure we have enough to
+ // cover the Y values.
+ RTC_CHECK_GE(u_data.size() * 2, y_data.size());
+ RTC_CHECK_GE(v_data.size() * 2, y_data.size());
+
+ // Do two pixels at a time since u/v are subsampled.
+ for (size_t i = 0; i * 2 < y_data.size() - 1; ++i) {
+ const float uv_contribution =
+ coeff[1] * u_data[i] + coeff[2] * v_data[i] + coeff[3];
+
+ const float val0 = coeff[0] * y_data[i * 2 + 0] + uv_contribution;
+ const float val1 = coeff[0] * y_data[i * 2 + 1] + uv_contribution;
+
+ // Clamp result to a byte.
+ output[i * 2 + 0] = static_cast<uint8_t>(
+ std::round(std::max(0.0f, std::min(val0, 255.0f))));
+ output[i * 2 + 1] = static_cast<uint8_t>(
+ std::round(std::max(0.0f, std::min(val1, 255.0f))));
+ }
+
+ // Handle the last pixel for odd widths.
+ if (y_data.size() % 2 == 1) {
+ const float val = coeff[0] * y_data[y_data.size() - 1] +
+ coeff[1] * u_data[(y_data.size() - 1) / 2] +
+ coeff[2] * v_data[(y_data.size() - 1) / 2] + coeff[3];
+ output[y_data.size() - 1] =
+ static_cast<uint8_t>(std::round(std::max(0.0f, std::min(val, 255.0f))));
+ }
+}
+
+// Helper function for AdjustColors(). This functions calculates a single output
+// row for either u or v, with the given color coefficients. Y, U, and V are
+// assumed to be the same size, i.e. no subsampling.
+void CalculateUVChannel(rtc::ArrayView<const uint8_t> y_data,
+ rtc::ArrayView<const uint8_t> u_data,
+ rtc::ArrayView<const uint8_t> v_data,
+ const std::array<float, 4>& coeff,
+ rtc::ArrayView<uint8_t> output) {
+ RTC_CHECK_EQ(y_data.size(), u_data.size());
+ RTC_CHECK_EQ(y_data.size(), v_data.size());
+ RTC_CHECK_EQ(y_data.size(), output.size());
+
+ for (size_t x = 0; x < y_data.size(); ++x) {
+ const float val = coeff[0] * y_data[x] + coeff[1] * u_data[x] +
+ coeff[2] * v_data[x] + coeff[3];
+ // Clamp result to a byte.
+ output[x] =
+ static_cast<uint8_t>(std::round(std::max(0.0f, std::min(val, 255.0f))));
+ }
+}
+
+// Convert a frame to four vectors consisting of [y, u, v, 1].
+std::vector<std::vector<uint8_t>> FlattenYuvData(
+ const rtc::scoped_refptr<I420BufferInterface>& frame) {
+ std::vector<std::vector<uint8_t>> result(
+ 4, std::vector<uint8_t>(frame->ChromaWidth() * frame->ChromaHeight()));
+
+ // Downscale the Y plane so that all YUV planes are the same size.
+ libyuv::ScalePlane(frame->DataY(), frame->StrideY(), frame->width(),
+ frame->height(), result[0].data(), frame->ChromaWidth(),
+ frame->ChromaWidth(), frame->ChromaHeight(),
+ libyuv::kFilterBox);
+
+ libyuv::CopyPlane(frame->DataU(), frame->StrideU(), result[1].data(),
+ frame->ChromaWidth(), frame->ChromaWidth(),
+ frame->ChromaHeight());
+
+ libyuv::CopyPlane(frame->DataV(), frame->StrideV(), result[2].data(),
+ frame->ChromaWidth(), frame->ChromaWidth(),
+ frame->ChromaHeight());
+
+ std::fill(result[3].begin(), result[3].end(), 1u);
+
+ return result;
+}
+
+ColorTransformationMatrix VectorToColorMatrix(
+ const std::vector<std::vector<double>>& v) {
+ ColorTransformationMatrix color_transformation;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 4; ++j)
+ color_transformation[i][j] = v[i][j];
+ }
+ return color_transformation;
+}
+
+} // namespace
+
+ColorTransformationMatrix CalculateColorTransformationMatrix(
+ const rtc::scoped_refptr<I420BufferInterface>& reference_frame,
+ const rtc::scoped_refptr<I420BufferInterface>& test_frame) {
+ IncrementalLinearLeastSquares incremental_lls;
+ incremental_lls.AddObservations(FlattenYuvData(test_frame),
+ FlattenYuvData(reference_frame));
+ return VectorToColorMatrix(incremental_lls.GetBestSolution());
+}
+
+ColorTransformationMatrix CalculateColorTransformationMatrix(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video) {
+ RTC_CHECK_GE(reference_video->number_of_frames(),
+ test_video->number_of_frames());
+
+ IncrementalLinearLeastSquares incremental_lls;
+ for (size_t i = 0; i < test_video->number_of_frames(); ++i) {
+ incremental_lls.AddObservations(
+ FlattenYuvData(test_video->GetFrame(i)),
+ FlattenYuvData(reference_video->GetFrame(i)));
+ }
+
+ return VectorToColorMatrix(incremental_lls.GetBestSolution());
+}
+
+rtc::scoped_refptr<Video> AdjustColors(
+ const ColorTransformationMatrix& color_transformation,
+ const rtc::scoped_refptr<Video>& video) {
+ class ColorAdjustedVideo : public Video {
+ public:
+ ColorAdjustedVideo(const ColorTransformationMatrix& color_transformation,
+ const rtc::scoped_refptr<Video>& video)
+ : color_transformation_(color_transformation), video_(video) {}
+
+ int width() const override { return video_->width(); }
+ int height() const override { return video_->height(); }
+ size_t number_of_frames() const override {
+ return video_->number_of_frames();
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> GetFrame(
+ size_t index) const override {
+ return AdjustColors(color_transformation_, video_->GetFrame(index));
+ }
+
+ private:
+ const ColorTransformationMatrix color_transformation_;
+ const rtc::scoped_refptr<Video> video_;
+ };
+
+ return rtc::make_ref_counted<ColorAdjustedVideo>(color_transformation, video);
+}
+
+rtc::scoped_refptr<I420BufferInterface> AdjustColors(
+ const ColorTransformationMatrix& color_matrix,
+ const rtc::scoped_refptr<I420BufferInterface>& frame) {
+ // Allocate I420 buffer that will hold the color adjusted frame.
+ rtc::scoped_refptr<I420Buffer> adjusted_frame =
+ I420Buffer::Create(frame->width(), frame->height());
+
+ // Create a downscaled Y plane with the same size as the U/V planes to
+ // simplify converting the U/V planes.
+ std::vector<uint8_t> downscaled_y_plane(frame->ChromaWidth() *
+ frame->ChromaHeight());
+ libyuv::ScalePlane(frame->DataY(), frame->StrideY(), frame->width(),
+ frame->height(), downscaled_y_plane.data(),
+ frame->ChromaWidth(), frame->ChromaWidth(),
+ frame->ChromaHeight(), libyuv::kFilterBox);
+
+ // Fill in the adjusted data row by row.
+ for (int y = 0; y < frame->height(); ++y) {
+ const int half_y = y / 2;
+ rtc::ArrayView<const uint8_t> y_row(frame->DataY() + frame->StrideY() * y,
+ frame->width());
+ rtc::ArrayView<const uint8_t> u_row(
+ frame->DataU() + frame->StrideU() * half_y, frame->ChromaWidth());
+ rtc::ArrayView<const uint8_t> v_row(
+ frame->DataV() + frame->StrideV() * half_y, frame->ChromaWidth());
+ rtc::ArrayView<uint8_t> output_y_row(
+ adjusted_frame->MutableDataY() + adjusted_frame->StrideY() * y,
+ frame->width());
+
+ CalculateYChannel(y_row, u_row, v_row, color_matrix[0], output_y_row);
+
+ // Chroma channels only exist every second row for I420.
+ if (y % 2 == 0) {
+ rtc::ArrayView<const uint8_t> downscaled_y_row(
+ downscaled_y_plane.data() + frame->ChromaWidth() * half_y,
+ frame->ChromaWidth());
+ rtc::ArrayView<uint8_t> output_u_row(
+ adjusted_frame->MutableDataU() + adjusted_frame->StrideU() * half_y,
+ frame->ChromaWidth());
+ rtc::ArrayView<uint8_t> output_v_row(
+ adjusted_frame->MutableDataV() + adjusted_frame->StrideV() * half_y,
+ frame->ChromaWidth());
+
+ CalculateUVChannel(downscaled_y_row, u_row, v_row, color_matrix[1],
+ output_u_row);
+ CalculateUVChannel(downscaled_y_row, u_row, v_row, color_matrix[2],
+ output_v_row);
+ }
+ }
+
+ return adjusted_frame;
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.h b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.h
new file mode 100644
index 0000000000..b51e06060b
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_TOOLS_FRAME_ANALYZER_VIDEO_COLOR_ALIGNER_H_
+#define RTC_TOOLS_FRAME_ANALYZER_VIDEO_COLOR_ALIGNER_H_
+
+#include <array>
+
+#include "api/scoped_refptr.h"
+#include "api/video/video_frame_buffer.h"
+#include "rtc_tools/video_file_reader.h"
+
+namespace webrtc {
+namespace test {
+
+// Represents a linear color transformation from [y, u, v] to [y', u', v']
+// through the equation: [y', u', v'] = [y, u, v, 1] * matrix.
+using ColorTransformationMatrix = std::array<std::array<float, 4>, 3>;
+
+// Calculate the optimal color transformation that should be applied to the test
+// video to match as closely as possible to the reference video.
+ColorTransformationMatrix CalculateColorTransformationMatrix(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video);
+
+// Calculate color transformation for a single I420 frame.
+ColorTransformationMatrix CalculateColorTransformationMatrix(
+ const rtc::scoped_refptr<I420BufferInterface>& reference_frame,
+ const rtc::scoped_refptr<I420BufferInterface>& test_frame);
+
+// Apply a color transformation to a video.
+rtc::scoped_refptr<Video> AdjustColors(
+ const ColorTransformationMatrix& color_matrix,
+ const rtc::scoped_refptr<Video>& video);
+
+// Apply a color transformation to a single I420 frame.
+rtc::scoped_refptr<I420BufferInterface> AdjustColors(
+ const ColorTransformationMatrix& color_matrix,
+ const rtc::scoped_refptr<I420BufferInterface>& frame);
+
+} // namespace test
+} // namespace webrtc
+
+#endif // RTC_TOOLS_FRAME_ANALYZER_VIDEO_COLOR_ALIGNER_H_
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner_unittest.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner_unittest.cc
new file mode 100644
index 0000000000..980898b6bd
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_color_aligner_unittest.cc
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/video_color_aligner.h"
+
+#include <stdint.h>
+
+#include "api/video/i420_buffer.h"
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+#include "rtc_tools/video_file_reader.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+const ColorTransformationMatrix kIdentityColorMatrix = {
+ {{1, 0, 0, 0}, {0, 1, 0, 0}, {0, 0, 1, 0}}};
+
+void ExpectNear(const ColorTransformationMatrix& expected,
+ const ColorTransformationMatrix& actual) {
+ // The scaling factor on y/u/v should be pretty precise.
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j)
+ EXPECT_NEAR(expected[i][j], actual[i][j], /* abs_error= */ 1.0e-3)
+ << "at element i: " << i << ", j: " << j;
+ }
+ // The offset can be less precise since the range is [0, 255].
+ for (int i = 0; i < 3; ++i)
+ EXPECT_NEAR(expected[i][3], actual[i][3], /* abs_error= */ 0.1)
+ << "at element i: " << i;
+}
+
+} // namespace
+
+class VideoColorAlignerTest : public ::testing::Test {
+ protected:
+ void SetUp() {
+ reference_video_ =
+ OpenYuvFile(ResourcePath("foreman_128x96", "yuv"), 128, 96);
+ ASSERT_TRUE(reference_video_);
+ }
+
+ rtc::scoped_refptr<Video> reference_video_;
+};
+
+TEST_F(VideoColorAlignerTest, AdjustColorsFrameIdentity) {
+ const rtc::scoped_refptr<I420BufferInterface> test_frame =
+ reference_video_->GetFrame(0);
+
+ // Assume perfect match, i.e. ssim == 1.
+ EXPECT_EQ(1.0,
+ Ssim(test_frame, AdjustColors(kIdentityColorMatrix, test_frame)));
+}
+
+TEST_F(VideoColorAlignerTest, AdjustColorsFrame1x1) {
+ const ColorTransformationMatrix color_matrix = {
+ {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}};
+
+ const uint8_t data_y[] = {2};
+ const uint8_t data_u[] = {6};
+ const uint8_t data_v[] = {7};
+ const rtc::scoped_refptr<I420BufferInterface> i420_buffer = I420Buffer::Copy(
+ /* width= */ 1, /* height= */ 1, data_y, /* stride_y= */ 1, data_u,
+ /* stride_u= */ 1, data_v, /* stride_v= */ 1);
+
+ const rtc::scoped_refptr<I420BufferInterface> adjusted_buffer =
+ AdjustColors(color_matrix, i420_buffer);
+
+ EXPECT_EQ(2 * 1 + 6 * 2 + 7 * 3 + 4, adjusted_buffer->DataY()[0]);
+ EXPECT_EQ(2 * 5 + 6 * 6 + 7 * 7 + 8, adjusted_buffer->DataU()[0]);
+ EXPECT_EQ(2 * 9 + 6 * 10 + 7 * 11 + 12, adjusted_buffer->DataV()[0]);
+}
+
+TEST_F(VideoColorAlignerTest, AdjustColorsFrame1x1Negative) {
+ const ColorTransformationMatrix color_matrix = {
+ {{-1, 0, 0, 255}, {0, -1, 0, 255}, {0, 0, -1, 255}}};
+
+ const uint8_t data_y[] = {2};
+ const uint8_t data_u[] = {6};
+ const uint8_t data_v[] = {7};
+ const rtc::scoped_refptr<I420BufferInterface> i420_buffer = I420Buffer::Copy(
+ /* width= */ 1, /* height= */ 1, data_y, /* stride_y= */ 1, data_u,
+ /* stride_u= */ 1, data_v, /* stride_v= */ 1);
+
+ const rtc::scoped_refptr<I420BufferInterface> adjusted_buffer =
+ AdjustColors(color_matrix, i420_buffer);
+
+ EXPECT_EQ(255 - 2, adjusted_buffer->DataY()[0]);
+ EXPECT_EQ(255 - 6, adjusted_buffer->DataU()[0]);
+ EXPECT_EQ(255 - 7, adjusted_buffer->DataV()[0]);
+}
+
+TEST_F(VideoColorAlignerTest, AdjustColorsFrame2x2) {
+ const ColorTransformationMatrix color_matrix = {
+ {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}};
+
+ const uint8_t data_y[] = {0, 1, 3, 4};
+ const uint8_t data_u[] = {6};
+ const uint8_t data_v[] = {7};
+ const rtc::scoped_refptr<I420BufferInterface> i420_buffer = I420Buffer::Copy(
+ /* width= */ 2, /* height= */ 2, data_y, /* stride_y= */ 2, data_u,
+ /* stride_u= */ 1, data_v, /* stride_v= */ 1);
+
+ const rtc::scoped_refptr<I420BufferInterface> adjusted_buffer =
+ AdjustColors(color_matrix, i420_buffer);
+
+ EXPECT_EQ(0 * 1 + 6 * 2 + 7 * 3 + 4, adjusted_buffer->DataY()[0]);
+ EXPECT_EQ(1 * 1 + 6 * 2 + 7 * 3 + 4, adjusted_buffer->DataY()[1]);
+ EXPECT_EQ(3 * 1 + 6 * 2 + 7 * 3 + 4, adjusted_buffer->DataY()[2]);
+ EXPECT_EQ(4 * 1 + 6 * 2 + 7 * 3 + 4, adjusted_buffer->DataY()[3]);
+
+ EXPECT_EQ(2 * 5 + 6 * 6 + 7 * 7 + 8, adjusted_buffer->DataU()[0]);
+ EXPECT_EQ(2 * 9 + 6 * 10 + 7 * 11 + 12, adjusted_buffer->DataV()[0]);
+}
+
+TEST_F(VideoColorAlignerTest, CalculateColorTransformationMatrixIdentity) {
+ EXPECT_EQ(kIdentityColorMatrix, CalculateColorTransformationMatrix(
+ reference_video_, reference_video_));
+}
+
+TEST_F(VideoColorAlignerTest, CalculateColorTransformationMatrixOffset) {
+ const uint8_t small_data_y[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+ const uint8_t small_data_u[] = {15, 13, 17, 29};
+ const uint8_t small_data_v[] = {3, 200, 170, 29};
+ const rtc::scoped_refptr<I420BufferInterface> small_i420_buffer =
+ I420Buffer::Copy(
+ /* width= */ 4, /* height= */ 4, small_data_y, /* stride_y= */ 4,
+ small_data_u, /* stride_u= */ 2, small_data_v, /* stride_v= */ 2);
+
+ uint8_t big_data_y[16];
+ uint8_t big_data_u[4];
+ uint8_t big_data_v[4];
+ // Create another I420 frame where all values are 10 bigger.
+ for (int i = 0; i < 16; ++i)
+ big_data_y[i] = small_data_y[i] + 10;
+ for (int i = 0; i < 4; ++i)
+ big_data_u[i] = small_data_u[i] + 10;
+ for (int i = 0; i < 4; ++i)
+ big_data_v[i] = small_data_v[i] + 10;
+
+ const rtc::scoped_refptr<I420BufferInterface> big_i420_buffer =
+ I420Buffer::Copy(
+ /* width= */ 4, /* height= */ 4, big_data_y, /* stride_y= */ 4,
+ big_data_u, /* stride_u= */ 2, big_data_v, /* stride_v= */ 2);
+
+ const ColorTransformationMatrix color_matrix =
+ CalculateColorTransformationMatrix(big_i420_buffer, small_i420_buffer);
+
+ ExpectNear({{{1, 0, 0, 10}, {0, 1, 0, 10}, {0, 0, 1, 10}}}, color_matrix);
+}
+
+TEST_F(VideoColorAlignerTest, CalculateColorTransformationMatrix) {
+ // Arbitrary color transformation matrix.
+ const ColorTransformationMatrix org_color_matrix = {
+ {{0.8, 0.05, 0.04, -4}, {-0.2, 0.7, 0.1, 10}, {0.1, 0.2, 0.4, 20}}};
+
+ const ColorTransformationMatrix result_color_matrix =
+ CalculateColorTransformationMatrix(
+ AdjustColors(org_color_matrix, reference_video_), reference_video_);
+
+ ExpectNear(org_color_matrix, result_color_matrix);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.cc
new file mode 100644
index 0000000000..efb033317a
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.cc
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/video_geometry_aligner.h"
+
+#include <map>
+
+#include "api/make_ref_counted.h"
+#include "api/video/i420_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+bool IsValidRegion(const CropRegion& region,
+ const rtc::scoped_refptr<I420BufferInterface>& frame) {
+ return region.left >= 0 && region.right >= 0 && region.top >= 0 &&
+ region.bottom >= 0 && region.left + region.right < frame->width() &&
+ region.top + region.bottom < frame->height();
+}
+
+} // namespace
+
+rtc::scoped_refptr<I420BufferInterface> CropAndZoom(
+ const CropRegion& crop_region,
+ const rtc::scoped_refptr<I420BufferInterface>& frame) {
+ RTC_CHECK(IsValidRegion(crop_region, frame));
+
+ const int uv_crop_left = crop_region.left / 2;
+ const int uv_crop_top = crop_region.top / 2;
+
+ const int cropped_width =
+ frame->width() - crop_region.left - crop_region.right;
+ const int cropped_height =
+ frame->height() - crop_region.top - crop_region.bottom;
+
+ // Crop by only adjusting pointers.
+ const uint8_t* y_plane =
+ frame->DataY() + frame->StrideY() * crop_region.top + crop_region.left;
+ const uint8_t* u_plane =
+ frame->DataU() + frame->StrideU() * uv_crop_top + uv_crop_left;
+ const uint8_t* v_plane =
+ frame->DataV() + frame->StrideV() * uv_crop_top + uv_crop_left;
+
+ // Stretch the cropped frame to the original size using libyuv.
+ rtc::scoped_refptr<I420Buffer> adjusted_frame =
+ I420Buffer::Create(frame->width(), frame->height());
+ libyuv::I420Scale(y_plane, frame->StrideY(), u_plane, frame->StrideU(),
+ v_plane, frame->StrideV(), cropped_width, cropped_height,
+ adjusted_frame->MutableDataY(), adjusted_frame->StrideY(),
+ adjusted_frame->MutableDataU(), adjusted_frame->StrideU(),
+ adjusted_frame->MutableDataV(), adjusted_frame->StrideV(),
+ frame->width(), frame->height(), libyuv::kFilterBox);
+
+ return adjusted_frame;
+}
+
+CropRegion CalculateCropRegion(
+ const rtc::scoped_refptr<I420BufferInterface>& reference_frame,
+ const rtc::scoped_refptr<I420BufferInterface>& test_frame) {
+ RTC_CHECK_EQ(reference_frame->width(), test_frame->width());
+ RTC_CHECK_EQ(reference_frame->height(), test_frame->height());
+
+ CropRegion best_region;
+ double best_ssim = Ssim(reference_frame, test_frame);
+
+ typedef int CropRegion::*CropParameter;
+ CropParameter crop_parameters[4] = {&CropRegion::left, &CropRegion::top,
+ &CropRegion::right, &CropRegion::bottom};
+
+ while (true) {
+ // Find the parameter in which direction SSIM improves the most.
+ CropParameter best_parameter = nullptr;
+ const CropRegion prev_best_region = best_region;
+
+ for (CropParameter crop_parameter : crop_parameters) {
+ CropRegion test_region = prev_best_region;
+ ++(test_region.*crop_parameter);
+
+ if (!IsValidRegion(test_region, reference_frame))
+ continue;
+
+ const double ssim =
+ Ssim(CropAndZoom(test_region, reference_frame), test_frame);
+
+ if (ssim > best_ssim) {
+ best_ssim = ssim;
+ best_parameter = crop_parameter;
+ best_region = test_region;
+ }
+ }
+
+ // No improvement among any direction, stop iteration.
+ if (best_parameter == nullptr)
+ break;
+
+ // Iterate in the best direction as long as it improves SSIM.
+ for (CropRegion test_region = best_region;
+ IsValidRegion(test_region, reference_frame);
+ ++(test_region.*best_parameter)) {
+ const double ssim =
+ Ssim(CropAndZoom(test_region, reference_frame), test_frame);
+ if (ssim <= best_ssim)
+ break;
+
+ best_ssim = ssim;
+ best_region = test_region;
+ }
+ }
+
+ return best_region;
+}
+
+rtc::scoped_refptr<I420BufferInterface> AdjustCropping(
+ const rtc::scoped_refptr<I420BufferInterface>& reference_frame,
+ const rtc::scoped_refptr<I420BufferInterface>& test_frame) {
+ return CropAndZoom(CalculateCropRegion(reference_frame, test_frame),
+ reference_frame);
+}
+
+rtc::scoped_refptr<Video> AdjustCropping(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video) {
+ class CroppedVideo : public Video {
+ public:
+ CroppedVideo(const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video)
+ : reference_video_(reference_video), test_video_(test_video) {
+ RTC_CHECK_EQ(reference_video->number_of_frames(),
+ test_video->number_of_frames());
+ RTC_CHECK_EQ(reference_video->width(), test_video->width());
+ RTC_CHECK_EQ(reference_video->height(), test_video->height());
+ }
+
+ int width() const override { return test_video_->width(); }
+ int height() const override { return test_video_->height(); }
+ size_t number_of_frames() const override {
+ return test_video_->number_of_frames();
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> GetFrame(
+ size_t index) const override {
+ const rtc::scoped_refptr<I420BufferInterface> reference_frame =
+ reference_video_->GetFrame(index);
+
+ // Only calculate cropping region once per frame since it's expensive.
+ if (!crop_regions_.count(index)) {
+ crop_regions_[index] =
+ CalculateCropRegion(reference_frame, test_video_->GetFrame(index));
+ }
+
+ return CropAndZoom(crop_regions_[index], reference_frame);
+ }
+
+ private:
+ const rtc::scoped_refptr<Video> reference_video_;
+ const rtc::scoped_refptr<Video> test_video_;
+ // Mutable since this is a cache that affects performance and not logical
+ // behavior.
+ mutable std::map<size_t, CropRegion> crop_regions_;
+ };
+
+ return rtc::make_ref_counted<CroppedVideo>(reference_video, test_video);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.h b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.h
new file mode 100644
index 0000000000..47667b0d13
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_TOOLS_FRAME_ANALYZER_VIDEO_GEOMETRY_ALIGNER_H_
+#define RTC_TOOLS_FRAME_ANALYZER_VIDEO_GEOMETRY_ALIGNER_H_
+
+#include "api/video/video_frame_buffer.h"
+#include "rtc_tools/video_file_reader.h"
+
+namespace webrtc {
+namespace test {
+
+struct CropRegion {
+ // Each value represents how much to crop from each side. Left is where x=0,
+ // and top is where y=0. All values equal to zero represents no cropping.
+ int left = 0;
+ int right = 0;
+ int top = 0;
+ int bottom = 0;
+};
+
+// Crops and zooms in on the cropped region so that the returned frame has the
+// same resolution as the input frame.
+rtc::scoped_refptr<I420BufferInterface> CropAndZoom(
+ const CropRegion& crop_region,
+ const rtc::scoped_refptr<I420BufferInterface>& frame);
+
+// Calculate the optimal cropping region on the reference frame to maximize SSIM
+// to the test frame.
+CropRegion CalculateCropRegion(
+ const rtc::scoped_refptr<I420BufferInterface>& reference_frame,
+ const rtc::scoped_refptr<I420BufferInterface>& test_frame);
+
+// Returns a cropped and zoomed version of the reference frame that matches up
+// to the test frame. This is a simple helper function on top of
+// CalculateCropRegion() and CropAndZoom().
+rtc::scoped_refptr<I420BufferInterface> AdjustCropping(
+ const rtc::scoped_refptr<I420BufferInterface>& reference_frame,
+ const rtc::scoped_refptr<I420BufferInterface>& test_frame);
+
+// Returns a cropped and zoomed version of the reference video that matches up
+// to the test video. Frames are individually adjusted for cropping.
+rtc::scoped_refptr<Video> AdjustCropping(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video);
+
+} // namespace test
+} // namespace webrtc
+
+#endif // RTC_TOOLS_FRAME_ANALYZER_VIDEO_GEOMETRY_ALIGNER_H_
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner_unittest.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner_unittest.cc
new file mode 100644
index 0000000000..a86b8c5f4c
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_geometry_aligner_unittest.cc
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/video_geometry_aligner.h"
+
+#include <vector>
+
+#include "api/video/i420_buffer.h"
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+#include "rtc_tools/video_file_reader.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+class VideoGeometryAlignerTest : public ::testing::Test {
+ protected:
+ void SetUp() {
+ reference_video_ =
+ OpenYuvFile(ResourcePath("foreman_128x96", "yuv"), 128, 96);
+ ASSERT_TRUE(reference_video_);
+
+ // Very simple 4x4 frame used for verying CropAndZoom.
+ const uint8_t data_y[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+ const uint8_t data_u[] = {0, 1, 2, 3};
+ const uint8_t data_v[] = {0, 1, 2, 3};
+ test_frame_ = I420Buffer::Copy(
+ /* width= */ 4, /* height= */ 4, data_y, /* stride_y= */ 4, data_u,
+ /* stride_u= */ 2, data_v, /* stride_v= */ 2);
+ }
+
+ rtc::scoped_refptr<Video> reference_video_;
+ rtc::scoped_refptr<I420BufferInterface> test_frame_;
+};
+
+// Teach gtest how to compare CropRegions.
+bool operator==(const CropRegion& a, const CropRegion& b) {
+ return a.left == b.left && a.top == b.top && a.right == b.right &&
+ a.bottom == b.bottom;
+}
+
+TEST_F(VideoGeometryAlignerTest, CropAndZoomIdentity) {
+ const rtc::scoped_refptr<I420BufferInterface> frame =
+ reference_video_->GetFrame(0);
+
+ // Assume perfect match, i.e. SSIM == 1.
+ CropRegion identity_region;
+ EXPECT_EQ(1.0, Ssim(frame, CropAndZoom(identity_region, frame)));
+}
+
+TEST_F(VideoGeometryAlignerTest, CropAndZoomLeft) {
+ CropRegion region;
+ region.left = 2;
+ const rtc::scoped_refptr<I420BufferInterface> cropped_frame =
+ CropAndZoom(region, test_frame_);
+ EXPECT_EQ(std::vector<uint8_t>(
+ {2, 2, 3, 3, 6, 6, 7, 7, 10, 10, 11, 11, 14, 14, 15, 15}),
+ std::vector<uint8_t>(cropped_frame->DataY(),
+ cropped_frame->DataY() + 16));
+ EXPECT_EQ(
+ std::vector<uint8_t>({1, 1, 3, 3}),
+ std::vector<uint8_t>(cropped_frame->DataU(), cropped_frame->DataU() + 4));
+ EXPECT_EQ(
+ std::vector<uint8_t>({1, 1, 3, 3}),
+ std::vector<uint8_t>(cropped_frame->DataV(), cropped_frame->DataV() + 4));
+}
+
+// TODO(magjed): Re-enable when libyuv filtering is updated.
+TEST_F(VideoGeometryAlignerTest, DISABLED_CropAndZoomTop) {
+ CropRegion region;
+ region.top = 2;
+ const rtc::scoped_refptr<I420BufferInterface> cropped_frame =
+ CropAndZoom(region, test_frame_);
+ EXPECT_EQ(std::vector<uint8_t>(
+ {8, 9, 10, 11, 10, 11, 12, 13, 12, 13, 14, 15, 12, 13, 14, 15}),
+ std::vector<uint8_t>(cropped_frame->DataY(),
+ cropped_frame->DataY() + 16));
+ EXPECT_EQ(
+ std::vector<uint8_t>({2, 3, 2, 3}),
+ std::vector<uint8_t>(cropped_frame->DataU(), cropped_frame->DataU() + 4));
+ EXPECT_EQ(
+ std::vector<uint8_t>({2, 3, 2, 3}),
+ std::vector<uint8_t>(cropped_frame->DataV(), cropped_frame->DataV() + 4));
+}
+
+TEST_F(VideoGeometryAlignerTest, CropAndZoomRight) {
+ CropRegion region;
+ region.right = 2;
+ const rtc::scoped_refptr<I420BufferInterface> cropped_frame =
+ CropAndZoom(region, test_frame_);
+ EXPECT_EQ(std::vector<uint8_t>(
+ {0, 0, 1, 1, 4, 4, 5, 5, 8, 8, 9, 9, 12, 12, 13, 13}),
+ std::vector<uint8_t>(cropped_frame->DataY(),
+ cropped_frame->DataY() + 16));
+ EXPECT_EQ(
+ std::vector<uint8_t>({0, 0, 2, 2}),
+ std::vector<uint8_t>(cropped_frame->DataU(), cropped_frame->DataU() + 4));
+ EXPECT_EQ(
+ std::vector<uint8_t>({0, 0, 2, 2}),
+ std::vector<uint8_t>(cropped_frame->DataV(), cropped_frame->DataV() + 4));
+}
+
+// TODO(magjed): Re-enable when libyuv filtering is updated.
+TEST_F(VideoGeometryAlignerTest, DISABLED_CropAndZoomBottom) {
+ CropRegion region;
+ region.bottom = 2;
+ const rtc::scoped_refptr<I420BufferInterface> cropped_frame =
+ CropAndZoom(region, test_frame_);
+ EXPECT_EQ(
+ std::vector<uint8_t>({0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 4, 5, 6, 7}),
+ std::vector<uint8_t>(cropped_frame->DataY(),
+ cropped_frame->DataY() + 16));
+ EXPECT_EQ(
+ std::vector<uint8_t>({0, 1, 0, 1}),
+ std::vector<uint8_t>(cropped_frame->DataU(), cropped_frame->DataU() + 4));
+ EXPECT_EQ(
+ std::vector<uint8_t>({0, 1, 0, 1}),
+ std::vector<uint8_t>(cropped_frame->DataV(), cropped_frame->DataV() + 4));
+}
+
+TEST_F(VideoGeometryAlignerTest, CalculateCropRegionIdentity) {
+ const rtc::scoped_refptr<I420BufferInterface> frame =
+ reference_video_->GetFrame(0);
+ CropRegion identity_region;
+ EXPECT_EQ(identity_region, CalculateCropRegion(frame, frame));
+}
+
+TEST_F(VideoGeometryAlignerTest, CalculateCropRegionArbitrary) {
+ // Arbitrary crop region.
+ CropRegion crop_region;
+ crop_region.left = 2;
+ crop_region.top = 4;
+ crop_region.right = 5;
+ crop_region.bottom = 3;
+
+ const rtc::scoped_refptr<I420BufferInterface> frame =
+ reference_video_->GetFrame(0);
+
+ EXPECT_EQ(crop_region,
+ CalculateCropRegion(frame, CropAndZoom(crop_region, frame)));
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.cc
new file mode 100644
index 0000000000..1832438b75
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+
+#include <algorithm>
+#include <array>
+#include <cstddef>
+
+#include "api/numerics/samples_stats_counter.h"
+#include "api/test/metrics/metric.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "third_party/libyuv/include/libyuv/compare.h"
+
+namespace webrtc {
+namespace test {
+
+ResultsContainer::ResultsContainer() {}
+ResultsContainer::~ResultsContainer() {}
+
+template <typename FrameMetricFunction>
+static double CalculateMetric(
+ const FrameMetricFunction& frame_metric_function,
+ const rtc::scoped_refptr<I420BufferInterface>& ref_buffer,
+ const rtc::scoped_refptr<I420BufferInterface>& test_buffer) {
+ RTC_CHECK_EQ(ref_buffer->width(), test_buffer->width());
+ RTC_CHECK_EQ(ref_buffer->height(), test_buffer->height());
+ return frame_metric_function(
+ ref_buffer->DataY(), ref_buffer->StrideY(), ref_buffer->DataU(),
+ ref_buffer->StrideU(), ref_buffer->DataV(), ref_buffer->StrideV(),
+ test_buffer->DataY(), test_buffer->StrideY(), test_buffer->DataU(),
+ test_buffer->StrideU(), test_buffer->DataV(), test_buffer->StrideV(),
+ test_buffer->width(), test_buffer->height());
+}
+
+double Psnr(const rtc::scoped_refptr<I420BufferInterface>& ref_buffer,
+ const rtc::scoped_refptr<I420BufferInterface>& test_buffer) {
+ // LibYuv sets the max psnr value to 128, we restrict it to 48.
+ // In case of 0 mse in one frame, 128 can skew the results significantly.
+ return std::min(48.0,
+ CalculateMetric(&libyuv::I420Psnr, ref_buffer, test_buffer));
+}
+
+double Ssim(const rtc::scoped_refptr<I420BufferInterface>& ref_buffer,
+ const rtc::scoped_refptr<I420BufferInterface>& test_buffer) {
+ return CalculateMetric(&libyuv::I420Ssim, ref_buffer, test_buffer);
+}
+
+std::vector<AnalysisResult> RunAnalysis(
+ const rtc::scoped_refptr<webrtc::test::Video>& reference_video,
+ const rtc::scoped_refptr<webrtc::test::Video>& test_video,
+ const std::vector<size_t>& test_frame_indices) {
+ std::vector<AnalysisResult> results;
+ for (size_t i = 0; i < test_video->number_of_frames(); ++i) {
+ const rtc::scoped_refptr<I420BufferInterface>& test_frame =
+ test_video->GetFrame(i);
+ const rtc::scoped_refptr<I420BufferInterface>& reference_frame =
+ reference_video->GetFrame(i);
+
+ // Fill in the result struct.
+ AnalysisResult result;
+ result.frame_number = test_frame_indices[i];
+ result.psnr_value = Psnr(reference_frame, test_frame);
+ result.ssim_value = Ssim(reference_frame, test_frame);
+ results.push_back(result);
+ }
+
+ return results;
+}
+
+std::vector<Cluster> CalculateFrameClusters(
+ const std::vector<size_t>& indices) {
+ std::vector<Cluster> clusters;
+
+ for (size_t index : indices) {
+ if (!clusters.empty() && clusters.back().index == index) {
+ // This frame belongs to the previous cluster.
+ ++clusters.back().number_of_repeated_frames;
+ } else {
+ // Start a new cluster.
+ clusters.push_back({index, /* number_of_repeated_frames= */ 1});
+ }
+ }
+
+ return clusters;
+}
+
+int GetMaxRepeatedFrames(const std::vector<Cluster>& clusters) {
+ int max_number_of_repeated_frames = 0;
+ for (const Cluster& cluster : clusters) {
+ max_number_of_repeated_frames = std::max(max_number_of_repeated_frames,
+ cluster.number_of_repeated_frames);
+ }
+ return max_number_of_repeated_frames;
+}
+
+int GetMaxSkippedFrames(const std::vector<Cluster>& clusters) {
+ size_t max_skipped_frames = 0;
+ for (size_t i = 1; i < clusters.size(); ++i) {
+ const size_t skipped_frames = clusters[i].index - clusters[i - 1].index - 1;
+ max_skipped_frames = std::max(max_skipped_frames, skipped_frames);
+ }
+ return static_cast<int>(max_skipped_frames);
+}
+
+int GetTotalNumberOfSkippedFrames(const std::vector<Cluster>& clusters) {
+ // The number of reference frames the test video spans.
+ const size_t number_ref_frames =
+ clusters.empty() ? 0 : 1 + clusters.back().index - clusters.front().index;
+ return static_cast<int>(number_ref_frames - clusters.size());
+}
+
+void PrintAnalysisResults(const std::string& label,
+ ResultsContainer& results,
+ MetricsLogger& logger) {
+ if (results.frames.size() > 0u) {
+ logger.LogSingleValueMetric("Unique_frames_count", label,
+ results.frames.size(), Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+
+ SamplesStatsCounter psnr_values;
+ SamplesStatsCounter ssim_values;
+ for (const auto& frame : results.frames) {
+ psnr_values.AddSample(frame.psnr_value);
+ ssim_values.AddSample(frame.ssim_value);
+ }
+
+ logger.LogMetric("PSNR_dB", label, psnr_values, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ logger.LogMetric("SSIM", label, ssim_values, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ }
+
+ logger.LogSingleValueMetric("Max_repeated", label,
+ results.max_repeated_frames, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ logger.LogSingleValueMetric("Max_skipped", label, results.max_skipped_frames,
+ Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ logger.LogSingleValueMetric("Total_skipped", label,
+ results.total_skipped_frames, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ logger.LogSingleValueMetric("Decode_errors_reference", label,
+ results.decode_errors_ref, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+ logger.LogSingleValueMetric("Decode_errors_test", label,
+ results.decode_errors_test, Unit::kUnitless,
+ ImprovementDirection::kNeitherIsBetter);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.h b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.h
new file mode 100644
index 0000000000..701b5859b0
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_TOOLS_FRAME_ANALYZER_VIDEO_QUALITY_ANALYSIS_H_
+#define RTC_TOOLS_FRAME_ANALYZER_VIDEO_QUALITY_ANALYSIS_H_
+
+#include <stdio.h>
+
+#include <string>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "api/test/metrics/metrics_logger.h"
+#include "api/video/video_frame_buffer.h"
+#include "rtc_tools/video_file_reader.h"
+
+namespace webrtc {
+namespace test {
+
+struct AnalysisResult {
+ AnalysisResult() {}
+ AnalysisResult(int frame_number, double psnr_value, double ssim_value)
+ : frame_number(frame_number),
+ psnr_value(psnr_value),
+ ssim_value(ssim_value) {}
+ int frame_number;
+ double psnr_value;
+ double ssim_value;
+};
+
+struct ResultsContainer {
+ ResultsContainer();
+ ~ResultsContainer();
+
+ std::vector<AnalysisResult> frames;
+ int max_repeated_frames = 0;
+ int max_skipped_frames = 0;
+ int total_skipped_frames = 0;
+ int decode_errors_ref = 0;
+ int decode_errors_test = 0;
+};
+
+// A function to run the PSNR and SSIM analysis on the test file. The test file
+// comprises the frames that were captured during the quality measurement test.
+// There may be missing or duplicate frames. Also the frames start at a random
+// position in the original video. We also need to provide a map from test frame
+// indices to reference frame indices.
+std::vector<AnalysisResult> RunAnalysis(
+ const rtc::scoped_refptr<webrtc::test::Video>& reference_video,
+ const rtc::scoped_refptr<webrtc::test::Video>& test_video,
+ const std::vector<size_t>& test_frame_indices);
+
+// Compute PSNR for an I420 buffer (all planes). The max return value (in the
+// case where the test and reference frames are exactly the same) will be 48.
+double Psnr(const rtc::scoped_refptr<I420BufferInterface>& ref_buffer,
+ const rtc::scoped_refptr<I420BufferInterface>& test_buffer);
+
+// Compute SSIM for an I420 buffer (all planes). The max return value (in the
+// case where the test and reference frames are exactly the same) will be 1.
+double Ssim(const rtc::scoped_refptr<I420BufferInterface>& ref_buffer,
+ const rtc::scoped_refptr<I420BufferInterface>& test_buffer);
+
+// Prints the result from the analysis in Chromium performance
+// numbers compatible format to stdout. If the results object contains no frames
+// no output will be written.
+void PrintAnalysisResults(const std::string& label,
+ ResultsContainer& results,
+ MetricsLogger& logger);
+
+struct Cluster {
+ // Corresponding reference frame index for this cluster.
+ size_t index;
+ // The number of sequential frames that mapped to the same reference frame
+ // index.
+ int number_of_repeated_frames;
+};
+
+// Clusters sequentially repeated frames. For example, the sequence {100, 102,
+// 102, 103} will be mapped to {{100, 1}, {102, 2}, {103, 1}}.
+std::vector<Cluster> CalculateFrameClusters(const std::vector<size_t>& indices);
+
+// Get number of max sequentially repeated frames in the test video. This number
+// will be one if we only store unique frames in the test video.
+int GetMaxRepeatedFrames(const std::vector<Cluster>& clusters);
+
+// Get the longest sequence of skipped reference frames. This corresponds to the
+// longest freeze in the test video.
+int GetMaxSkippedFrames(const std::vector<Cluster>& clusters);
+
+// Get total number of skipped frames in the test video.
+int GetTotalNumberOfSkippedFrames(const std::vector<Cluster>& clusters);
+
+} // namespace test
+} // namespace webrtc
+
+#endif // RTC_TOOLS_FRAME_ANALYZER_VIDEO_QUALITY_ANALYSIS_H_
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis_unittest.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis_unittest.cc
new file mode 100644
index 0000000000..d0227fb4b3
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_quality_analysis_unittest.cc
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+
+#include <string>
+#include <vector>
+
+#include "api/test/metrics/metric.h"
+#include "api/test/metrics/metrics_logger.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using ::testing::IsSupersetOf;
+
+// Metric fields to assert on
+struct MetricValidationInfo {
+ std::string test_case;
+ std::string name;
+ Unit unit;
+ ImprovementDirection improvement_direction;
+ double mean;
+};
+
+bool operator==(const MetricValidationInfo& a, const MetricValidationInfo& b) {
+ return a.name == b.name && a.test_case == b.test_case && a.unit == b.unit &&
+ a.improvement_direction == b.improvement_direction;
+}
+
+std::ostream& operator<<(std::ostream& os, const MetricValidationInfo& m) {
+ os << "{ test_case=" << m.test_case << "; name=" << m.name
+ << "; unit=" << test::ToString(m.unit)
+ << "; improvement_direction=" << test::ToString(m.improvement_direction)
+ << " }";
+ return os;
+}
+
+std::vector<MetricValidationInfo> ToValidationInfo(
+ const std::vector<Metric>& metrics) {
+ std::vector<MetricValidationInfo> out;
+ for (const Metric& m : metrics) {
+ out.push_back(
+ MetricValidationInfo{.test_case = m.test_case,
+ .name = m.name,
+ .unit = m.unit,
+ .improvement_direction = m.improvement_direction,
+ .mean = *m.stats.mean});
+ }
+ return out;
+}
+
+TEST(VideoQualityAnalysisTest, PrintAnalysisResultsEmpty) {
+ ResultsContainer result;
+ DefaultMetricsLogger logger(Clock::GetRealTimeClock());
+ PrintAnalysisResults("Empty", result, logger);
+}
+
+TEST(VideoQualityAnalysisTest, PrintAnalysisResultsOneFrame) {
+ ResultsContainer result;
+ result.frames.push_back(AnalysisResult(0, 35.0, 0.9));
+ DefaultMetricsLogger logger(Clock::GetRealTimeClock());
+ PrintAnalysisResults("OneFrame", result, logger);
+}
+
+TEST(VideoQualityAnalysisTest, PrintAnalysisResultsThreeFrames) {
+ ResultsContainer result;
+ result.frames.push_back(AnalysisResult(0, 35.0, 0.9));
+ result.frames.push_back(AnalysisResult(1, 34.0, 0.8));
+ result.frames.push_back(AnalysisResult(2, 33.0, 0.7));
+ DefaultMetricsLogger logger(Clock::GetRealTimeClock());
+ PrintAnalysisResults("ThreeFrames", result, logger);
+}
+
+TEST(VideoQualityAnalysisTest, PrintMaxRepeatedAndSkippedFramesSkippedFrames) {
+ ResultsContainer result;
+
+ result.max_repeated_frames = 2;
+ result.max_skipped_frames = 2;
+ result.total_skipped_frames = 3;
+ result.decode_errors_ref = 0;
+ result.decode_errors_test = 0;
+
+ DefaultMetricsLogger logger(Clock::GetRealTimeClock());
+ PrintAnalysisResults("NormalStatsFile", result, logger);
+
+ std::vector<MetricValidationInfo> metrics =
+ ToValidationInfo(logger.GetCollectedMetrics());
+ EXPECT_THAT(
+ metrics,
+ IsSupersetOf(
+ {MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Max_repeated",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 2},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Max_skipped",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 2},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Total_skipped",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 3},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Decode_errors_reference",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 0},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Decode_errors_test",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 0}}));
+}
+
+TEST(VideoQualityAnalysisTest,
+ PrintMaxRepeatedAndSkippedFramesDecodeErrorInTest) {
+ ResultsContainer result;
+
+ std::string log_filename =
+ TempFilename(webrtc::test::OutputPath(), "log.log");
+ FILE* logfile = fopen(log_filename.c_str(), "w");
+ ASSERT_TRUE(logfile != NULL);
+
+ result.max_repeated_frames = 1;
+ result.max_skipped_frames = 0;
+ result.total_skipped_frames = 0;
+ result.decode_errors_ref = 0;
+ result.decode_errors_test = 3;
+
+ DefaultMetricsLogger logger(Clock::GetRealTimeClock());
+ PrintAnalysisResults("NormalStatsFile", result, logger);
+
+ std::vector<MetricValidationInfo> metrics =
+ ToValidationInfo(logger.GetCollectedMetrics());
+ EXPECT_THAT(
+ metrics,
+ IsSupersetOf(
+ {MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Max_repeated",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 1},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Max_skipped",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 0},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Total_skipped",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 0},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Decode_errors_reference",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 0},
+ MetricValidationInfo{
+ .test_case = "NormalStatsFile",
+ .name = "Decode_errors_test",
+ .unit = Unit::kUnitless,
+ .improvement_direction = ImprovementDirection::kNeitherIsBetter,
+ .mean = 3}}));
+}
+
+TEST(VideoQualityAnalysisTest, CalculateFrameClustersOneValue) {
+ const std::vector<Cluster> result = CalculateFrameClusters({1});
+ EXPECT_EQ(1u, result.size());
+ EXPECT_EQ(1u, result[0].index);
+ EXPECT_EQ(1, result[0].number_of_repeated_frames);
+}
+
+TEST(VideoQualityAnalysisTest, GetMaxRepeatedFramesOneValue) {
+ EXPECT_EQ(1, GetMaxRepeatedFrames(CalculateFrameClusters({1})));
+}
+
+TEST(VideoQualityAnalysisTest, GetMaxSkippedFramesOneValue) {
+ EXPECT_EQ(0, GetMaxSkippedFrames(CalculateFrameClusters({1})));
+}
+
+TEST(VideoQualityAnalysisTest, GetTotalNumberOfSkippedFramesOneValue) {
+ EXPECT_EQ(0, GetTotalNumberOfSkippedFrames(CalculateFrameClusters({1})));
+}
+
+TEST(VideoQualityAnalysisTest, CalculateFrameClustersOneOneTwo) {
+ const std::vector<Cluster> result = CalculateFrameClusters({1, 1, 2});
+ EXPECT_EQ(2u, result.size());
+ EXPECT_EQ(1u, result[0].index);
+ EXPECT_EQ(2, result[0].number_of_repeated_frames);
+ EXPECT_EQ(2u, result[1].index);
+ EXPECT_EQ(1, result[1].number_of_repeated_frames);
+}
+
+TEST(VideoQualityAnalysisTest, GetMaxRepeatedFramesOneOneTwo) {
+ EXPECT_EQ(2, GetMaxRepeatedFrames(CalculateFrameClusters({1, 1, 2})));
+}
+
+TEST(VideoQualityAnalysisTest, GetMaxSkippedFramesOneOneTwo) {
+ EXPECT_EQ(0, GetMaxSkippedFrames(CalculateFrameClusters({1, 1, 2})));
+}
+
+TEST(VideoQualityAnalysisTest, GetTotalNumberOfSkippedFramesOneOneTwo) {
+ EXPECT_EQ(0,
+ GetTotalNumberOfSkippedFrames(CalculateFrameClusters({1, 1, 2})));
+}
+
+TEST(VideoQualityAnalysisTest, CalculateFrameClustersEmpty) {
+ EXPECT_TRUE(CalculateFrameClusters({}).empty());
+}
+
+TEST(VideoQualityAnalysisTest, GetMaxRepeatedFramesEmpty) {
+ EXPECT_EQ(0, GetMaxRepeatedFrames({}));
+}
+
+TEST(VideoQualityAnalysisTest, GetMaxSkippedFramesEmpty) {
+ EXPECT_EQ(0, GetMaxSkippedFrames({}));
+}
+
+TEST(VideoQualityAnalysisTest, GetTotalNumberOfSkippedFramesEmpty) {
+ EXPECT_EQ(0, GetTotalNumberOfSkippedFrames({}));
+}
+
+} // namespace
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.cc
new file mode 100644
index 0000000000..4b940d0fe3
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/video_temporal_aligner.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <deque>
+#include <iterator>
+#include <limits>
+#include <vector>
+
+#include "api/make_ref_counted.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame_buffer.h"
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+// This constant controls how many frames we look ahead while seeking for the
+// match for the next frame. Note that we may span bigger gaps than this number
+// since we reset the counter as soon as we find a better match. The seeking
+// will stop when there is no improvement in the next kNumberOfFramesLookAhead
+// frames. Typically, the SSIM will improve as we get closer and closer to the
+// real match.
+const int kNumberOfFramesLookAhead = 60;
+
+// Helper class that takes a video and generates an infinite looping video.
+class LoopingVideo : public Video {
+ public:
+ explicit LoopingVideo(const rtc::scoped_refptr<Video>& video)
+ : video_(video) {}
+
+ int width() const override { return video_->width(); }
+ int height() const override { return video_->height(); }
+ size_t number_of_frames() const override {
+ return std::numeric_limits<size_t>::max();
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> GetFrame(
+ size_t index) const override {
+ return video_->GetFrame(index % video_->number_of_frames());
+ }
+
+ private:
+ const rtc::scoped_refptr<Video> video_;
+};
+
+// Helper class that take a vector of frame indices and a video and produces a
+// new video where the frames have been reshuffled.
+class ReorderedVideo : public Video {
+ public:
+ ReorderedVideo(const rtc::scoped_refptr<Video>& video,
+ const std::vector<size_t>& indices)
+ : video_(video), indices_(indices) {}
+
+ int width() const override { return video_->width(); }
+ int height() const override { return video_->height(); }
+ size_t number_of_frames() const override { return indices_.size(); }
+
+ rtc::scoped_refptr<I420BufferInterface> GetFrame(
+ size_t index) const override {
+ return video_->GetFrame(indices_.at(index));
+ }
+
+ private:
+ const rtc::scoped_refptr<Video> video_;
+ const std::vector<size_t> indices_;
+};
+
+// Helper class that takes a video and produces a downscaled video.
+class DownscaledVideo : public Video {
+ public:
+ DownscaledVideo(float scale_factor, const rtc::scoped_refptr<Video>& video)
+ : downscaled_width_(
+ static_cast<int>(std::round(scale_factor * video->width()))),
+ downscaled_height_(
+ static_cast<int>(std::round(scale_factor * video->height()))),
+ video_(video) {}
+
+ int width() const override { return downscaled_width_; }
+ int height() const override { return downscaled_height_; }
+ size_t number_of_frames() const override {
+ return video_->number_of_frames();
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> GetFrame(
+ size_t index) const override {
+ const rtc::scoped_refptr<I420BufferInterface> frame =
+ video_->GetFrame(index);
+ rtc::scoped_refptr<I420Buffer> downscaled_frame =
+ I420Buffer::Create(downscaled_width_, downscaled_height_);
+ downscaled_frame->ScaleFrom(*frame);
+ return downscaled_frame;
+ }
+
+ private:
+ const int downscaled_width_;
+ const int downscaled_height_;
+ const rtc::scoped_refptr<Video> video_;
+};
+
+// Helper class that takes a video and caches the latest frame access. This
+// improves performance a lot since the original source is often from a file.
+class CachedVideo : public Video {
+ public:
+ CachedVideo(int max_cache_size, const rtc::scoped_refptr<Video>& video)
+ : max_cache_size_(max_cache_size), video_(video) {}
+
+ int width() const override { return video_->width(); }
+ int height() const override { return video_->height(); }
+ size_t number_of_frames() const override {
+ return video_->number_of_frames();
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> GetFrame(
+ size_t index) const override {
+ for (const CachedFrame& cached_frame : cache_) {
+ if (cached_frame.index == index)
+ return cached_frame.frame;
+ }
+
+ rtc::scoped_refptr<I420BufferInterface> frame = video_->GetFrame(index);
+ cache_.push_front({index, frame});
+ if (cache_.size() > max_cache_size_)
+ cache_.pop_back();
+
+ return frame;
+ }
+
+ private:
+ struct CachedFrame {
+ size_t index;
+ rtc::scoped_refptr<I420BufferInterface> frame;
+ };
+
+ const size_t max_cache_size_;
+ const rtc::scoped_refptr<Video> video_;
+ mutable std::deque<CachedFrame> cache_;
+};
+
+// Try matching the test frame against all frames in the reference video and
+// return the index of the best matching frame.
+size_t FindBestMatch(const rtc::scoped_refptr<I420BufferInterface>& test_frame,
+ const Video& reference_video) {
+ std::vector<double> ssim;
+ for (const auto& ref_frame : reference_video)
+ ssim.push_back(Ssim(test_frame, ref_frame));
+ return std::distance(ssim.begin(),
+ std::max_element(ssim.begin(), ssim.end()));
+}
+
+// Find and return the index of the frame matching the test frame. The search
+// starts at the starting index and continues until there is no better match
+// within the next kNumberOfFramesLookAhead frames.
+size_t FindNextMatch(const rtc::scoped_refptr<I420BufferInterface>& test_frame,
+ const Video& reference_video,
+ size_t start_index) {
+ const double start_ssim =
+ Ssim(test_frame, reference_video.GetFrame(start_index));
+ for (int i = 1; i < kNumberOfFramesLookAhead; ++i) {
+ const size_t next_index = start_index + i;
+ // If we find a better match, restart the search at that point.
+ if (start_ssim < Ssim(test_frame, reference_video.GetFrame(next_index)))
+ return FindNextMatch(test_frame, reference_video, next_index);
+ }
+ // The starting index was the best match.
+ return start_index;
+}
+
+} // namespace
+
+std::vector<size_t> FindMatchingFrameIndices(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video) {
+ // This is done to get a 10x speedup. We don't need the full resolution in
+ // order to match frames, and we should limit file access and not read the
+ // same memory tens of times.
+ const float kScaleFactor = 0.25f;
+ const rtc::scoped_refptr<Video> cached_downscaled_reference_video =
+ rtc::make_ref_counted<CachedVideo>(kNumberOfFramesLookAhead,
+ rtc::make_ref_counted<DownscaledVideo>(
+ kScaleFactor, reference_video));
+ const rtc::scoped_refptr<Video> downscaled_test_video =
+ rtc::make_ref_counted<DownscaledVideo>(kScaleFactor, test_video);
+
+ // Assume the video is looping around.
+ const rtc::scoped_refptr<Video> looping_reference_video =
+ rtc::make_ref_counted<LoopingVideo>(cached_downscaled_reference_video);
+
+ std::vector<size_t> match_indices;
+ for (const rtc::scoped_refptr<I420BufferInterface>& test_frame :
+ *downscaled_test_video) {
+ if (match_indices.empty()) {
+ // First frame.
+ match_indices.push_back(
+ FindBestMatch(test_frame, *cached_downscaled_reference_video));
+ } else {
+ match_indices.push_back(FindNextMatch(
+ test_frame, *looping_reference_video, match_indices.back()));
+ }
+ }
+
+ return match_indices;
+}
+
+rtc::scoped_refptr<Video> ReorderVideo(const rtc::scoped_refptr<Video>& video,
+ const std::vector<size_t>& indices) {
+ return rtc::make_ref_counted<ReorderedVideo>(
+ rtc::make_ref_counted<LoopingVideo>(video), indices);
+}
+
+rtc::scoped_refptr<Video> GenerateAlignedReferenceVideo(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video) {
+ return GenerateAlignedReferenceVideo(
+ reference_video, FindMatchingFrameIndices(reference_video, test_video));
+}
+
+rtc::scoped_refptr<Video> GenerateAlignedReferenceVideo(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const std::vector<size_t>& indices) {
+ return ReorderVideo(reference_video, indices);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.h b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.h
new file mode 100644
index 0000000000..26a4088815
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_TOOLS_FRAME_ANALYZER_VIDEO_TEMPORAL_ALIGNER_H_
+#define RTC_TOOLS_FRAME_ANALYZER_VIDEO_TEMPORAL_ALIGNER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "rtc_tools/video_file_reader.h"
+
+namespace webrtc {
+namespace test {
+
+// Returns a vector with the same size as the given test video. Each index
+// corresponds to what reference frame that test frame matches to. These
+// indices are strictly increasing and might loop around the reference video,
+// e.g. their values can be bigger than the number of frames in the reference
+// video and they should be interpreted modulo that size. The matching frames
+// will be determined by maximizing SSIM.
+std::vector<size_t> FindMatchingFrameIndices(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video);
+
+// Generate a new video using the frames from the original video. The returned
+// video will have the same number of frames as the size of `indices`, and
+// frame nr i in the returned video will point to frame nr indices[i] in the
+// original video.
+rtc::scoped_refptr<Video> ReorderVideo(const rtc::scoped_refptr<Video>& video,
+ const std::vector<size_t>& indices);
+
+// Returns a modified version of the reference video where the frames have
+// been aligned to the test video. The test video is assumed to be captured
+// during a quality measurement test where the reference video is the source.
+// The test video may start at an arbitrary position in the reference video
+// and there might be missing frames. The reference video is assumed to loop
+// over when it reaches the end. The returned result is a version of the
+// reference video where the missing frames are left out so it aligns to the
+// test video.
+rtc::scoped_refptr<Video> GenerateAlignedReferenceVideo(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const rtc::scoped_refptr<Video>& test_video);
+
+// As above, but using precalculated indices.
+rtc::scoped_refptr<Video> GenerateAlignedReferenceVideo(
+ const rtc::scoped_refptr<Video>& reference_video,
+ const std::vector<size_t>& indices);
+
+} // namespace test
+} // namespace webrtc
+
+#endif // RTC_TOOLS_FRAME_ANALYZER_VIDEO_TEMPORAL_ALIGNER_H_
diff --git a/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner_unittest.cc b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner_unittest.cc
new file mode 100644
index 0000000000..9519a7462b
--- /dev/null
+++ b/third_party/libwebrtc/rtc_tools/frame_analyzer/video_temporal_aligner_unittest.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_tools/frame_analyzer/video_temporal_aligner.h"
+
+#include <cstddef>
+
+#include "rtc_tools/frame_analyzer/video_quality_analysis.h"
+#include "rtc_tools/video_file_reader.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+namespace webrtc {
+namespace test {
+
+class VideoTemporalAlignerTest : public ::testing::Test {
+ protected:
+ void SetUp() {
+ reference_video =
+ OpenYuvFile(ResourcePath("foreman_128x96", "yuv"), 128, 96);
+ ASSERT_TRUE(reference_video);
+ }
+
+ rtc::scoped_refptr<Video> reference_video;
+};
+
+TEST_F(VideoTemporalAlignerTest, FindMatchingFrameIndicesEmpty) {
+ rtc::scoped_refptr<Video> empty_test_video =
+ ReorderVideo(reference_video, std::vector<size_t>());
+
+ const std::vector<size_t> matched_indices =
+ FindMatchingFrameIndices(reference_video, empty_test_video);
+
+ EXPECT_TRUE(matched_indices.empty());
+}
+
+TEST_F(VideoTemporalAlignerTest, FindMatchingFrameIndicesIdentity) {
+ const std::vector<size_t> indices =
+ FindMatchingFrameIndices(reference_video, reference_video);
+
+ EXPECT_EQ(indices.size(), reference_video->number_of_frames());
+ for (size_t i = 0; i < indices.size(); ++i)
+ EXPECT_EQ(i, indices[i]);
+}
+
+TEST_F(VideoTemporalAlignerTest, FindMatchingFrameIndicesDuplicateFrames) {
+ const std::vector<size_t> indices = {2, 2, 2, 2};
+
+ // Generate a test video based on this sequence.
+ rtc::scoped_refptr<Video> test_video = ReorderVideo(reference_video, indices);
+
+ const std::vector<size_t> matched_indices =
+ FindMatchingFrameIndices(reference_video, test_video);
+
+ EXPECT_EQ(indices, matched_indices);
+}
+
+TEST_F(VideoTemporalAlignerTest, FindMatchingFrameIndicesLoopAround) {
+ std::vector<size_t> indices;
+ for (size_t i = 0; i < reference_video->number_of_frames() * 2; ++i)
+ indices.push_back(i % reference_video->number_of_frames());
+
+ // Generate a test video based on this sequence.
+ rtc::scoped_refptr<Video> test_video = ReorderVideo(reference_video, indices);
+
+ const std::vector<size_t> matched_indices =
+ FindMatchingFrameIndices(reference_video, test_video);
+
+ for (size_t i = 0; i < matched_indices.size(); ++i)
+ EXPECT_EQ(i, matched_indices[i]);
+}
+
+TEST_F(VideoTemporalAlignerTest, FindMatchingFrameIndicesStressTest) {
+ std::vector<size_t> indices;
+ // Arbitrary start index.
+ const size_t start_index = 12345;
+ // Generate some generic sequence of frames.
+ indices.push_back(start_index % reference_video->number_of_frames());
+ indices.push_back((start_index + 1) % reference_video->number_of_frames());
+ indices.push_back((start_index + 2) % reference_video->number_of_frames());
+ indices.push_back((start_index + 5) % reference_video->number_of_frames());
+ indices.push_back((start_index + 10) % reference_video->number_of_frames());
+ indices.push_back((start_index + 20) % reference_video->number_of_frames());
+ indices.push_back((start_index + 20) % reference_video->number_of_frames());
+ indices.push_back((start_index + 22) % reference_video->number_of_frames());
+ indices.push_back((start_index + 32) % reference_video->number_of_frames());
+
+ // Generate a test video based on this sequence.
+ rtc::scoped_refptr<Video> test_video = ReorderVideo(reference_video, indices);
+
+ const std::vector<size_t> matched_indices =
+ FindMatchingFrameIndices(reference_video, test_video);
+
+ EXPECT_EQ(indices, matched_indices);
+}
+
+TEST_F(VideoTemporalAlignerTest, GenerateAlignedReferenceVideo) {
+ // Arbitrary start index.
+ const size_t start_index = 12345;
+ std::vector<size_t> indices;
+ const size_t frame_step = 10;
+ for (size_t i = 0; i < reference_video->number_of_frames() / frame_step;
+ ++i) {
+ indices.push_back((start_index + i * frame_step) %
+ reference_video->number_of_frames());
+ }
+
+ // Generate a test video based on this sequence.
+ rtc::scoped_refptr<Video> test_video = ReorderVideo(reference_video, indices);
+
+ rtc::scoped_refptr<Video> aligned_reference_video =
+ GenerateAlignedReferenceVideo(reference_video, test_video);
+
+ // Assume perfect match, i.e. ssim == 1, for all frames.
+ for (size_t i = 0; i < test_video->number_of_frames(); ++i) {
+ EXPECT_EQ(1.0, Ssim(test_video->GetFrame(i),
+ aligned_reference_video->GetFrame(i)));
+ }
+}
+
+} // namespace test
+} // namespace webrtc