/* * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "rtc_tools/frame_analyzer/video_color_aligner.h" #include #include #include #include #include #include "api/array_view.h" #include "api/make_ref_counted.h" #include "api/video/i420_buffer.h" #include "rtc_base/checks.h" #include "rtc_tools/frame_analyzer/linear_least_squares.h" #include "third_party/libyuv/include/libyuv/planar_functions.h" #include "third_party/libyuv/include/libyuv/scale.h" namespace webrtc { namespace test { namespace { // Helper function for AdjustColors(). This functions calculates a single output // row for y with the given color coefficients. The u/v channels are assumed to // be subsampled by a factor of 2, which is the case of I420. void CalculateYChannel(rtc::ArrayView y_data, rtc::ArrayView u_data, rtc::ArrayView v_data, const std::array& coeff, rtc::ArrayView output) { RTC_CHECK_EQ(y_data.size(), output.size()); // Each u/v element represents two y elements. Make sure we have enough to // cover the Y values. RTC_CHECK_GE(u_data.size() * 2, y_data.size()); RTC_CHECK_GE(v_data.size() * 2, y_data.size()); // Do two pixels at a time since u/v are subsampled. for (size_t i = 0; i * 2 < y_data.size() - 1; ++i) { const float uv_contribution = coeff[1] * u_data[i] + coeff[2] * v_data[i] + coeff[3]; const float val0 = coeff[0] * y_data[i * 2 + 0] + uv_contribution; const float val1 = coeff[0] * y_data[i * 2 + 1] + uv_contribution; // Clamp result to a byte. output[i * 2 + 0] = static_cast( std::round(std::max(0.0f, std::min(val0, 255.0f)))); output[i * 2 + 1] = static_cast( std::round(std::max(0.0f, std::min(val1, 255.0f)))); } // Handle the last pixel for odd widths. if (y_data.size() % 2 == 1) { const float val = coeff[0] * y_data[y_data.size() - 1] + coeff[1] * u_data[(y_data.size() - 1) / 2] + coeff[2] * v_data[(y_data.size() - 1) / 2] + coeff[3]; output[y_data.size() - 1] = static_cast(std::round(std::max(0.0f, std::min(val, 255.0f)))); } } // Helper function for AdjustColors(). This functions calculates a single output // row for either u or v, with the given color coefficients. Y, U, and V are // assumed to be the same size, i.e. no subsampling. void CalculateUVChannel(rtc::ArrayView y_data, rtc::ArrayView u_data, rtc::ArrayView v_data, const std::array& coeff, rtc::ArrayView output) { RTC_CHECK_EQ(y_data.size(), u_data.size()); RTC_CHECK_EQ(y_data.size(), v_data.size()); RTC_CHECK_EQ(y_data.size(), output.size()); for (size_t x = 0; x < y_data.size(); ++x) { const float val = coeff[0] * y_data[x] + coeff[1] * u_data[x] + coeff[2] * v_data[x] + coeff[3]; // Clamp result to a byte. output[x] = static_cast(std::round(std::max(0.0f, std::min(val, 255.0f)))); } } // Convert a frame to four vectors consisting of [y, u, v, 1]. std::vector> FlattenYuvData( const rtc::scoped_refptr& frame) { std::vector> result( 4, std::vector(frame->ChromaWidth() * frame->ChromaHeight())); // Downscale the Y plane so that all YUV planes are the same size. libyuv::ScalePlane(frame->DataY(), frame->StrideY(), frame->width(), frame->height(), result[0].data(), frame->ChromaWidth(), frame->ChromaWidth(), frame->ChromaHeight(), libyuv::kFilterBox); libyuv::CopyPlane(frame->DataU(), frame->StrideU(), result[1].data(), frame->ChromaWidth(), frame->ChromaWidth(), frame->ChromaHeight()); libyuv::CopyPlane(frame->DataV(), frame->StrideV(), result[2].data(), frame->ChromaWidth(), frame->ChromaWidth(), frame->ChromaHeight()); std::fill(result[3].begin(), result[3].end(), 1u); return result; } ColorTransformationMatrix VectorToColorMatrix( const std::vector>& v) { ColorTransformationMatrix color_transformation; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 4; ++j) color_transformation[i][j] = v[i][j]; } return color_transformation; } } // namespace ColorTransformationMatrix CalculateColorTransformationMatrix( const rtc::scoped_refptr& reference_frame, const rtc::scoped_refptr& test_frame) { IncrementalLinearLeastSquares incremental_lls; incremental_lls.AddObservations(FlattenYuvData(test_frame), FlattenYuvData(reference_frame)); return VectorToColorMatrix(incremental_lls.GetBestSolution()); } ColorTransformationMatrix CalculateColorTransformationMatrix( const rtc::scoped_refptr