diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-12 05:35:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-12 05:35:37 +0000 |
commit | a90a5cba08fdf6c0ceb95101c275108a152a3aed (patch) | |
tree | 532507288f3defd7f4dcf1af49698bcb76034855 /third_party/libwebrtc/modules/video_coding | |
parent | Adding debian version 126.0.1-1. (diff) | |
download | firefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.tar.xz firefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.zip |
Merging upstream version 127.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/modules/video_coding')
30 files changed, 1575 insertions, 865 deletions
diff --git a/third_party/libwebrtc/modules/video_coding/BUILD.gn b/third_party/libwebrtc/modules/video_coding/BUILD.gn index 0457b818c3..db5b57dff4 100644 --- a/third_party/libwebrtc/modules/video_coding/BUILD.gn +++ b/third_party/libwebrtc/modules/video_coding/BUILD.gn @@ -124,10 +124,10 @@ rtc_library("packet_buffer") { ] } -rtc_library("h264_packet_buffer") { +rtc_library("h26x_packet_buffer") { sources = [ - "h264_packet_buffer.cc", - "h264_packet_buffer.h", + "h26x_packet_buffer.cc", + "h26x_packet_buffer.h", ] deps = [ ":codec_globals_headers", @@ -287,6 +287,8 @@ rtc_library("video_codec_interface") { "include/video_codec_interface.h", "include/video_coding_defines.h", "include/video_error_codes.h", + "include/video_error_codes_utils.cc", + "include/video_error_codes_utils.h", "video_coding_defines.cc", ] deps = [ @@ -527,6 +529,7 @@ rtc_library("webrtc_multiplex") { ":video_coding_utility", "../../api:fec_controller_api", "../../api:scoped_refptr", + "../../api/environment", "../../api/video:encoded_image", "../../api/video:video_frame", "../../api/video:video_rtp_headers", @@ -583,7 +586,10 @@ rtc_library("webrtc_vp8") { ":webrtc_vp8_scalability", ":webrtc_vp8_temporal_layers", "../../api:fec_controller_api", + "../../api:field_trials_view", "../../api:scoped_refptr", + "../../api/environment", + "../../api/transport:field_trial_based_config", "../../api/units:time_delta", "../../api/units:timestamp", "../../api/video:encoded_image", @@ -820,6 +826,8 @@ if (rtc_include_tests) { "../../api:mock_video_decoder", "../../api:mock_video_encoder", "../../api:simulcast_test_fixture_api", + "../../api/environment", + "../../api/environment:environment_factory", "../../api/video:encoded_image", "../../api/video:video_frame", "../../api/video:video_rtp_headers", @@ -932,6 +940,8 @@ if (rtc_include_tests) { ":webrtc_vp9_helpers", "../../api:array_view", "../../api:videocodec_test_fixture_api", + "../../api/environment", + "../../api/environment:environment_factory", "../../api/test/metrics:global_metrics_logger_and_exporter", "../../api/test/metrics:metric", "../../api/test/video:function_video_factory", @@ -999,6 +1009,8 @@ if (rtc_include_tests) { deps = [ ":video_codec_interface", + "../../api/environment", + "../../api/environment:environment_factory", "../../api/test/metrics:global_metrics_logger_and_exporter", "../../api/units:data_rate", "../../api/units:frequency", @@ -1008,6 +1020,8 @@ if (rtc_include_tests) { "../../modules/video_coding/svc:scalability_mode_util", "../../rtc_base:logging", "../../rtc_base:stringutils", + "../../test:explicit_key_value_config", + "../../test:field_trial", "../../test:fileutils", "../../test:test_flags", "../../test:test_main", @@ -1077,6 +1091,8 @@ if (rtc_include_tests) { "../../api:scoped_refptr", "../../api:videocodec_test_fixture_api", "../../api:videocodec_test_stats_api", + "../../api/environment", + "../../api/environment:environment_factory", "../../api/test/metrics:global_metrics_logger_and_exporter", "../../api/test/video:function_video_factory", "../../api/video:encoded_image", @@ -1152,9 +1168,9 @@ if (rtc_include_tests) { "frame_dependencies_calculator_unittest.cc", "frame_helpers_unittest.cc", "generic_decoder_unittest.cc", - "h264_packet_buffer_unittest.cc", "h264_sprop_parameter_sets_unittest.cc", "h264_sps_pps_tracker_unittest.cc", + "h26x_packet_buffer_unittest.cc", "histogram_unittest.cc", "loss_notification_controller_unittest.cc", "nack_requester_unittest.cc", @@ -1189,7 +1205,7 @@ if (rtc_include_tests) { ":encoded_frame", ":frame_dependencies_calculator", ":frame_helpers", - ":h264_packet_buffer", + ":h26x_packet_buffer", ":nack_requester", ":packet_buffer", ":simulcast_test_fixture_impl", diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc index 4ff22bfe34..03bb367fe0 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc @@ -133,6 +133,7 @@ class LibaomAv1Encoder final : public VideoEncoder { // TODO(webrtc:15225): Kill switch for disabling frame dropping. Remove it // after frame dropping is fully rolled out. bool disable_frame_dropping_; + int max_consec_frame_drop_; }; int32_t VerifyCodecSettings(const VideoCodec& codec_settings) { @@ -163,6 +164,14 @@ int32_t VerifyCodecSettings(const VideoCodec& codec_settings) { return WEBRTC_VIDEO_CODEC_OK; } +int GetMaxConsecutiveFrameDrop(const FieldTrialsView& field_trials) { + webrtc::FieldTrialParameter<int> maxdrop("maxdrop", 0); + webrtc::ParseFieldTrial( + {&maxdrop}, + field_trials.Lookup("WebRTC-LibaomAv1Encoder-MaxConsecFrameDrop")); + return maxdrop; +} + LibaomAv1Encoder::LibaomAv1Encoder( const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config, const FieldTrialsView& trials) @@ -174,7 +183,8 @@ LibaomAv1Encoder::LibaomAv1Encoder( timestamp_(0), disable_frame_dropping_(absl::StartsWith( trials.Lookup("WebRTC-LibaomAv1Encoder-DisableFrameDropping"), - "Enabled")) {} + "Enabled")), + max_consec_frame_drop_(GetMaxConsecutiveFrameDrop(trials)) {} LibaomAv1Encoder::~LibaomAv1Encoder() { Release(); @@ -297,6 +307,12 @@ int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings, SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 0); } + if (codec_settings->mode == VideoCodecMode::kRealtimeVideo && + encoder_settings_.GetFrameDropEnabled() && max_consec_frame_drop_ > 0) { + SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MAX_CONSEC_FRAME_DROP_CBR, + max_consec_frame_drop_); + } + if (cfg_.g_threads == 8) { // Values passed to AV1E_SET_TILE_ROWS and AV1E_SET_TILE_COLUMNS are log2() // based. diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc index 04ee9162ba..127aadb275 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc @@ -188,6 +188,31 @@ TEST(LibaomAv1EncoderTest, CheckOddDimensionsWithSpatialLayers) { ASSERT_THAT(encoded_frames, SizeIs(6)); } +TEST(LibaomAv1EncoderTest, WithMaximumConsecutiveFrameDrop) { + test::ScopedFieldTrials field_trials( + "WebRTC-LibaomAv1Encoder-MaxConsecFrameDrop/maxdrop:2/"); + VideoBitrateAllocation allocation; + allocation.SetBitrate(0, 0, 1000); // some very low bitrate + std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.SetFrameDropEnabled(true); + codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1); + codec_settings.startBitrate = allocation.get_sum_kbps(); + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + encoder->SetRates(VideoEncoder::RateControlParameters( + allocation, codec_settings.maxFramerate)); + EncodedVideoFrameProducer evfp(*encoder); + evfp.SetResolution( + RenderResolution{codec_settings.width, codec_settings.height}); + // We should code the first frame, skip two, then code another frame. + std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames = + evfp.SetNumInputFrames(4).Encode(); + ASSERT_THAT(encoded_frames, SizeIs(2)); + // The 4 frames have default Rtp-timestamps of 1000, 4000, 7000, 10000. + ASSERT_THAT(encoded_frames[1].encoded_image.RtpTimestamp(), 10000); +} + TEST(LibaomAv1EncoderTest, EncoderInfoWithoutResolutionBitrateLimits) { std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder(); EXPECT_TRUE(encoder->GetEncoderInfo().resolution_bitrate_limits.empty()); diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc index d486c1d062..6a135e2bab 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc @@ -62,6 +62,7 @@ VideoCodec DefaultCodecSettings() { codec_settings.height = kHeight; codec_settings.maxFramerate = kFramerate; codec_settings.maxBitrate = 1000; + codec_settings.startBitrate = 1; codec_settings.qpMax = 63; return codec_settings; } diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h index d58981e4b2..ed02f2d72b 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h +++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h @@ -15,6 +15,7 @@ #include <memory> #include <vector> +#include "api/environment/environment.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_decoder.h" #include "api/video_codecs/video_decoder_factory.h" @@ -25,7 +26,8 @@ namespace webrtc { class MultiplexDecoderAdapter : public VideoDecoder { public: // `factory` is not owned and expected to outlive this class. - MultiplexDecoderAdapter(VideoDecoderFactory* factory, + MultiplexDecoderAdapter(const Environment& env, + VideoDecoderFactory* factory, const SdpVideoFormat& associated_format, bool supports_augmenting_data = false); virtual ~MultiplexDecoderAdapter(); @@ -62,6 +64,7 @@ class MultiplexDecoderAdapter : public VideoDecoder { std::unique_ptr<uint8_t[]> augmenting_data, uint16_t augmenting_data_length); + const Environment env_; VideoDecoderFactory* const factory_; const SdpVideoFormat associated_format_; std::vector<std::unique_ptr<VideoDecoder>> decoders_; diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc index 551a9490b0..7cebbe14d0 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc @@ -10,6 +10,7 @@ #include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h" +#include "api/environment/environment.h" #include "api/video/encoded_image.h" #include "api/video/i420_buffer.h" #include "api/video/video_frame_buffer.h" @@ -93,10 +94,12 @@ struct MultiplexDecoderAdapter::AugmentingData { }; MultiplexDecoderAdapter::MultiplexDecoderAdapter( + const Environment& env, VideoDecoderFactory* factory, const SdpVideoFormat& associated_format, bool supports_augmenting_data) - : factory_(factory), + : env_(env), + factory_(factory), associated_format_(associated_format), supports_augmenting_data_(supports_augmenting_data) {} @@ -111,7 +114,7 @@ bool MultiplexDecoderAdapter::Configure(const Settings& settings) { PayloadStringToCodecType(associated_format_.name)); for (size_t i = 0; i < kAlphaCodecStreams; ++i) { std::unique_ptr<VideoDecoder> decoder = - factory_->CreateVideoDecoder(associated_format_); + factory_->Create(env_, associated_format_); if (!decoder->Configure(associated_settings)) { return false; } diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc index a2f36a306d..9c6300e368 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc @@ -16,6 +16,8 @@ #include <vector> #include "absl/types/optional.h" +#include "api/environment/environment.h" +#include "api/environment/environment_factory.h" #include "api/scoped_refptr.h" #include "api/test/mock_video_decoder_factory.h" #include "api/test/mock_video_encoder_factory.h" @@ -63,7 +65,8 @@ class TestMultiplexAdapter : public VideoCodecUnitTest, protected: std::unique_ptr<VideoDecoder> CreateDecoder() override { return std::make_unique<MultiplexDecoderAdapter>( - decoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName), + env_, decoder_factory_.get(), + SdpVideoFormat(kMultiplexAssociatedCodecName), supports_augmenting_data_); } @@ -182,9 +185,9 @@ class TestMultiplexAdapter : public VideoCodecUnitTest, EXPECT_CALL(*decoder_factory_, Die); // The decoders/encoders will be owned by the caller of // CreateVideoDecoder()/CreateVideoEncoder(). - EXPECT_CALL(*decoder_factory_, CreateVideoDecoder) - .Times(2) - .WillRepeatedly([] { return VP9Decoder::Create(); }); + EXPECT_CALL(*decoder_factory_, Create).Times(2).WillRepeatedly([] { + return VP9Decoder::Create(); + }); EXPECT_CALL(*encoder_factory_, Die); EXPECT_CALL(*encoder_factory_, CreateVideoEncoder) @@ -194,6 +197,7 @@ class TestMultiplexAdapter : public VideoCodecUnitTest, VideoCodecUnitTest::SetUp(); } + const Environment env_ = CreateEnvironment(); const std::unique_ptr<webrtc::MockVideoDecoderFactory> decoder_factory_; const std::unique_ptr<webrtc::MockVideoEncoderFactory> encoder_factory_; const bool supports_augmenting_data_; diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc index 2ab1106a59..0811685e33 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc @@ -14,6 +14,8 @@ #include "absl/flags/flag.h" #include "absl/functional/any_invocable.h" +#include "api/environment/environment.h" +#include "api/environment/environment_factory.h" #include "api/test/metrics/global_metrics_logger_and_exporter.h" #include "api/units/data_rate.h" #include "api/units/frequency.h" @@ -26,6 +28,8 @@ #include "modules/video_coding/svc/scalability_mode_util.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" +#include "test/explicit_key_value_config.h" +#include "test/field_trial.h" #include "test/gtest.h" #include "test/test_flags.h" #include "test/testsupport/file_utils.h" @@ -58,6 +62,7 @@ ABSL_FLAG(double, 30.0, "Encode target frame rate of the top temporal layer in fps."); ABSL_FLAG(int, num_frames, 300, "Number of frames to encode and/or decode."); +ABSL_FLAG(std::string, field_trials, "", "Field trials to apply."); ABSL_FLAG(std::string, test_name, "", "Test name."); ABSL_FLAG(bool, dump_decoder_input, false, "Dump decoder input."); ABSL_FLAG(bool, dump_decoder_output, false, "Dump decoder output."); @@ -178,6 +183,7 @@ std::string TestOutputPath() { } // namespace std::unique_ptr<VideoCodecStats> RunEncodeDecodeTest( + const Environment& env, std::string encoder_impl, std::string decoder_impl, const VideoInfo& video_info, @@ -247,7 +253,7 @@ std::unique_ptr<VideoCodecStats> RunEncodeDecodeTest( } return VideoCodecTester::RunEncodeDecodeTest( - source_settings, encoder_factory.get(), decoder_factory.get(), + env, source_settings, encoder_factory.get(), decoder_factory.get(), encoder_settings, decoder_settings, encoding_settings); } @@ -313,6 +319,7 @@ class SpatialQualityTest : public ::testing::TestWithParam<std::tuple< }; TEST_P(SpatialQualityTest, SpatialQuality) { + const Environment env = CreateEnvironment(); auto [codec_type, codec_impl, video_info, coding_settings] = GetParam(); auto [width, height, framerate_fps, bitrate_kbps, expected_min_psnr] = coding_settings; @@ -324,8 +331,8 @@ TEST_P(SpatialQualityTest, SpatialQuality) { codec_type, /*scalability_mode=*/"L1T1", width, height, {bitrate_kbps}, framerate_fps, num_frames); - std::unique_ptr<VideoCodecStats> stats = - RunEncodeDecodeTest(codec_impl, codec_impl, video_info, frames_settings); + std::unique_ptr<VideoCodecStats> stats = RunEncodeDecodeTest( + env, codec_impl, codec_impl, video_info, frames_settings); VideoCodecStats::Stream stream; if (stats != nullptr) { @@ -527,6 +534,11 @@ INSTANTIATE_TEST_SUITE_P( FramerateAdaptationTest::TestParamsToString); TEST(VideoCodecTest, DISABLED_EncodeDecode) { + ScopedFieldTrials field_trials(absl::GetFlag(FLAGS_field_trials)); + const Environment env = + CreateEnvironment(std::make_unique<ExplicitKeyValueConfig>( + absl::GetFlag(FLAGS_field_trials))); + std::vector<std::string> bitrate_str = absl::GetFlag(FLAGS_bitrate_kbps); std::vector<int> bitrate_kbps; std::transform(bitrate_str.begin(), bitrate_str.end(), @@ -544,7 +556,7 @@ TEST(VideoCodecTest, DISABLED_EncodeDecode) { // logged test name (implies lossing history in the chromeperf dashboard). // Sync with changes in Stream::LogMetrics (see TODOs there). std::unique_ptr<VideoCodecStats> stats = RunEncodeDecodeTest( - CodecNameToCodecImpl(absl::GetFlag(FLAGS_encoder)), + env, CodecNameToCodecImpl(absl::GetFlag(FLAGS_encoder)), CodecNameToCodecImpl(absl::GetFlag(FLAGS_decoder)), kRawVideos.at(absl::GetFlag(FLAGS_video_name)), frames_settings); ASSERT_NE(nullptr, stats); diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc index 41f2304748..581750768d 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc @@ -11,6 +11,8 @@ #include <memory> #include <vector> +#include "api/environment/environment.h" +#include "api/environment/environment_factory.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_decoder.h" #include "api/video_codecs/video_decoder_factory.h" @@ -86,6 +88,8 @@ class VideoEncoderDecoderInstantiationTest } } + const Environment env_ = CreateEnvironment(); + const SdpVideoFormat vp8_format_; const SdpVideoFormat vp9_format_; const SdpVideoFormat h264cbp_format_; @@ -126,7 +130,7 @@ TEST_P(VideoEncoderDecoderInstantiationTest, DISABLED_InstantiateVp8Codecs) { for (int i = 0; i < num_decoders_; ++i) { std::unique_ptr<VideoDecoder> decoder = - decoder_factory_->CreateVideoDecoder(vp8_format_); + decoder_factory_->Create(env_, vp8_format_); ASSERT_THAT(decoder, NotNull()); EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecVP8))); decoders_.emplace_back(std::move(decoder)); @@ -144,7 +148,7 @@ TEST_P(VideoEncoderDecoderInstantiationTest, for (int i = 0; i < num_decoders_; ++i) { std::unique_ptr<VideoDecoder> decoder = - decoder_factory_->CreateVideoDecoder(h264cbp_format_); + decoder_factory_->Create(env_, h264cbp_format_); ASSERT_THAT(decoder, NotNull()); EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecH264))); decoders_.push_back(std::move(decoder)); diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc index 35355d4387..508ac384b0 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc @@ -24,6 +24,8 @@ #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/environment/environment.h" +#include "api/environment/environment_factory.h" #include "api/test/metrics/global_metrics_logger_and_exporter.h" #include "api/test/metrics/metric.h" #include "api/transport/field_trial_based_config.h" @@ -685,6 +687,8 @@ void VideoCodecTestFixtureImpl::VerifyVideoStatistic( } bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() { + const Environment env = CreateEnvironment(); + SdpVideoFormat encoder_format(CreateSdpVideoFormat(config_)); SdpVideoFormat decoder_format = encoder_format; @@ -709,7 +713,7 @@ bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() { config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers()); for (size_t i = 0; i < num_simulcast_or_spatial_layers; ++i) { std::unique_ptr<VideoDecoder> decoder = - decoder_factory_->CreateVideoDecoder(decoder_format); + decoder_factory_->Create(env, decoder_format); EXPECT_TRUE(decoder) << "Decoder not successfully created."; if (decoder == nullptr) { return false; diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h index 2fc647874f..45b7cee00a 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h @@ -14,6 +14,7 @@ #include <memory> #include <vector> +#include "api/environment/environment.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/vp8_frame_buffer_controller.h" #include "modules/video_coding/include/video_codec_interface.h" @@ -40,11 +41,15 @@ class VP8Encoder { static std::unique_ptr<VideoEncoder> Create(Settings settings); }; +// TODO: bugs.webrtc.org/15791 - Deprecate and delete in favor of the +// CreateVp8Decoder function. class VP8Decoder { public: static std::unique_ptr<VideoDecoder> Create(); }; +std::unique_ptr<VideoDecoder> CreateVp8Decoder(const Environment& env); + } // namespace webrtc #endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_ diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc index 9b77388f10..4c06aca5ad 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc @@ -18,7 +18,10 @@ #include <string> #include "absl/types/optional.h" +#include "api/environment/environment.h" +#include "api/field_trials_view.h" #include "api/scoped_refptr.h" +#include "api/transport/field_trial_based_config.h" #include "api/video/i420_buffer.h" #include "api/video/video_frame.h" #include "api/video/video_frame_buffer.h" @@ -28,7 +31,6 @@ #include "rtc_base/checks.h" #include "rtc_base/numerics/exp_filter.h" #include "rtc_base/time_utils.h" -#include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" #include "third_party/libyuv/include/libyuv/convert.h" #include "vpx/vp8.h" @@ -59,9 +61,9 @@ absl::optional<LibvpxVp8Decoder::DeblockParams> DefaultDeblockParams() { } absl::optional<LibvpxVp8Decoder::DeblockParams> -GetPostProcParamsFromFieldTrialGroup() { - std::string group = webrtc::field_trial::FindFullName( - kIsArm ? kVp8PostProcArmFieldTrial : kVp8PostProcFieldTrial); +GetPostProcParamsFromFieldTrialGroup(const FieldTrialsView& field_trials) { + std::string group = field_trials.Lookup(kIsArm ? kVp8PostProcArmFieldTrial + : kVp8PostProcFieldTrial); if (group.empty()) { return DefaultDeblockParams(); } @@ -89,6 +91,10 @@ std::unique_ptr<VideoDecoder> VP8Decoder::Create() { return std::make_unique<LibvpxVp8Decoder>(); } +std::unique_ptr<VideoDecoder> CreateVp8Decoder(const Environment& env) { + return std::make_unique<LibvpxVp8Decoder>(env); +} + class LibvpxVp8Decoder::QpSmoother { public: QpSmoother() : last_sample_ms_(rtc::TimeMillis()), smoother_(kAlpha) {} @@ -114,9 +120,14 @@ class LibvpxVp8Decoder::QpSmoother { }; LibvpxVp8Decoder::LibvpxVp8Decoder() - : use_postproc_( - kIsArm ? webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial) - : true), + : LibvpxVp8Decoder(FieldTrialBasedConfig()) {} + +LibvpxVp8Decoder::LibvpxVp8Decoder(const Environment& env) + : LibvpxVp8Decoder(env.field_trials()) {} + +LibvpxVp8Decoder::LibvpxVp8Decoder(const FieldTrialsView& field_trials) + : use_postproc_(kIsArm ? field_trials.IsEnabled(kVp8PostProcArmFieldTrial) + : true), buffer_pool_(false, 300 /* max_number_of_buffers*/), decode_complete_callback_(NULL), inited_(false), @@ -124,8 +135,9 @@ LibvpxVp8Decoder::LibvpxVp8Decoder() last_frame_width_(0), last_frame_height_(0), key_frame_required_(true), - deblock_params_(use_postproc_ ? GetPostProcParamsFromFieldTrialGroup() - : absl::nullopt), + deblock_params_(use_postproc_ + ? GetPostProcParamsFromFieldTrialGroup(field_trials) + : absl::nullopt), qp_smoother_(use_postproc_ ? new QpSmoother() : nullptr) {} LibvpxVp8Decoder::~LibvpxVp8Decoder() { diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h index 74f4dc7c89..8ed8e7ca88 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h @@ -14,6 +14,8 @@ #include <memory> #include "absl/types/optional.h" +#include "api/environment/environment.h" +#include "api/field_trials_view.h" #include "api/video/encoded_image.h" #include "api/video_codecs/video_decoder.h" #include "common_video/include/video_frame_buffer_pool.h" @@ -26,7 +28,10 @@ namespace webrtc { class LibvpxVp8Decoder : public VideoDecoder { public: + // TODO: bugs.webrtc.org/15791 - Delete default constructor when + // Environment is always propagated. LibvpxVp8Decoder(); + explicit LibvpxVp8Decoder(const Environment& env); ~LibvpxVp8Decoder() override; bool Configure(const Settings& settings) override; @@ -56,6 +61,7 @@ class LibvpxVp8Decoder : public VideoDecoder { private: class QpSmoother; + explicit LibvpxVp8Decoder(const FieldTrialsView& field_trials); int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp, int qp, diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc index 4ca3de20d5..3f13066892 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc @@ -27,7 +27,9 @@ std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() { []() { return VP8Encoder::Create(); }); std::unique_ptr<VideoDecoderFactory> decoder_factory = std::make_unique<FunctionVideoDecoderFactory>( - []() { return VP8Decoder::Create(); }); + [](const Environment& env, const SdpVideoFormat& format) { + return CreateVp8Decoder(env); + }); return CreateSimulcastTestFixture(std::move(encoder_factory), std::move(decoder_factory), SdpVideoFormat("VP8")); diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc index a6f570f855..514d3d7e1d 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc @@ -13,6 +13,7 @@ #include <algorithm> #include <memory> +#include "api/environment/environment_factory.h" #include "api/test/create_frame_generator.h" #include "api/test/frame_generator_interface.h" #include "api/test/mock_video_decoder.h" @@ -70,7 +71,7 @@ class TestVp8Impl : public VideoCodecUnitTest { } std::unique_ptr<VideoDecoder> CreateDecoder() override { - return VP8Decoder::Create(); + return CreateVp8Decoder(CreateEnvironment()); } void ModifyCodecSettings(VideoCodec* codec_settings) override { diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc index 5330eb7e8c..edbe781639 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc @@ -267,7 +267,8 @@ LibvpxVp9Encoder::LibvpxVp9Encoder(const cricket::VideoCodec& codec, "Disabled")), performance_flags_(ParsePerformanceFlagsFromTrials(trials)), num_steady_state_frames_(0), - config_changed_(true) { + config_changed_(true), + svc_frame_drop_config_(ParseSvcFrameDropConfig(trials)) { codec_ = {}; memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t)); } @@ -838,6 +839,8 @@ int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) { // 1:2 scaling in each dimension. svc_params_.scaling_factor_num[i] = scaling_factor_num; svc_params_.scaling_factor_den[i] = 256; + if (inst->mode != VideoCodecMode::kScreensharing) + scaling_factor_num /= 2; } } @@ -924,11 +927,24 @@ int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) { svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh; } } else { - // Configure encoder to drop entire superframe whenever it needs to drop - // a layer. This mode is preferred over per-layer dropping which causes - // quality flickering and is not compatible with RTP non-flexible mode. - svc_drop_frame_.framedrop_mode = FULL_SUPERFRAME_DROP; - svc_drop_frame_.max_consec_drop = std::numeric_limits<int>::max(); + if (svc_frame_drop_config_.enabled && + svc_frame_drop_config_.layer_drop_mode == LAYER_DROP && + is_flexible_mode_ && svc_controller_ && + (inter_layer_pred_ == InterLayerPredMode::kOff || + inter_layer_pred_ == InterLayerPredMode::kOnKeyPic)) { + // SVC controller is required since it properly accounts for dropped + // refs (unlike SetReferences(), which assumes full superframe drop). + svc_drop_frame_.framedrop_mode = LAYER_DROP; + } else { + // Configure encoder to drop entire superframe whenever it needs to drop + // a layer. This mode is preferred over per-layer dropping which causes + // quality flickering and is not compatible with RTP non-flexible mode. + svc_drop_frame_.framedrop_mode = FULL_SUPERFRAME_DROP; + } + svc_drop_frame_.max_consec_drop = + svc_frame_drop_config_.enabled + ? svc_frame_drop_config_.max_consec_drop + : std::numeric_limits<int>::max(); for (size_t i = 0; i < num_spatial_layers_; ++i) { svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh; } @@ -1960,6 +1976,26 @@ LibvpxVp9Encoder::ParseQualityScalerConfig(const FieldTrialsView& trials) { return config; } +LibvpxVp9Encoder::SvcFrameDropConfig LibvpxVp9Encoder::ParseSvcFrameDropConfig( + const FieldTrialsView& trials) { + FieldTrialFlag enabled = FieldTrialFlag("Enabled"); + FieldTrialParameter<int> layer_drop_mode("layer_drop_mode", + FULL_SUPERFRAME_DROP); + FieldTrialParameter<int> max_consec_drop("max_consec_drop", + std::numeric_limits<int>::max()); + ParseFieldTrial({&enabled, &layer_drop_mode, &max_consec_drop}, + trials.Lookup("WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig")); + SvcFrameDropConfig config; + config.enabled = enabled.Get(); + config.layer_drop_mode = layer_drop_mode.Get(); + config.max_consec_drop = max_consec_drop.Get(); + RTC_LOG(LS_INFO) << "Libvpx VP9 encoder SVC frame drop config: " + << (config.enabled ? "enabled" : "disabled") + << " layer_drop_mode " << config.layer_drop_mode + << " max_consec_drop " << config.max_consec_drop; + return config; +} + void LibvpxVp9Encoder::UpdatePerformanceFlags() { flat_map<int, PerformanceFlags::ParameterSet> params_by_resolution; if (codec_.GetVideoEncoderComplexity() == diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h index 1953923f81..ea4e5810ac 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h @@ -240,6 +240,14 @@ class LibvpxVp9Encoder : public VP9Encoder { bool config_changed_; const LibvpxVp9EncoderInfoSettings encoder_info_override_; + + const struct SvcFrameDropConfig { + bool enabled; + int layer_drop_mode; // SVC_LAYER_DROP_MODE + int max_consec_drop; + } svc_frame_drop_config_; + static SvcFrameDropConfig ParseSvcFrameDropConfig( + const FieldTrialsView& trials); }; } // namespace webrtc diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc index 7af8cab3cb..555af835a5 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc @@ -190,6 +190,9 @@ std::vector<SpatialLayer> GetVp9SvcConfig(VideoCodec& codec) { codec.SetScalabilityMode(limited_scalability_mode); } + codec.VP9()->interLayerPred = + ScalabilityModeToInterLayerPredMode(*scalability_mode); + absl::optional<ScalableVideoController::StreamLayersConfig> info = ScalabilityStructureConfig(*scalability_mode); if (!info.has_value()) { diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc index 1b1abe0f6d..2515b1ce4b 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc @@ -13,6 +13,7 @@ #include <cstddef> #include <vector> +#include "api/video_codecs/video_encoder.h" #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" #include "test/gmock.h" #include "test/gtest.h" @@ -65,6 +66,25 @@ TEST(SvcConfig, NumSpatialLayersWithScalabilityMode) { EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL3T3_KEY); } +TEST(SvcConfig, UpdatesInterLayerPredModeBasedOnScalabilityMode) { + VideoCodec codec; + codec.codecType = kVideoCodecVP9; + codec.width = 1280; + codec.height = 720; + codec.SetScalabilityMode(ScalabilityMode::kL3T3_KEY); + + std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec); + EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOnKeyPic); + + codec.SetScalabilityMode(ScalabilityMode::kL3T3); + spatial_layers = GetVp9SvcConfig(codec); + EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOn); + + codec.SetScalabilityMode(ScalabilityMode::kS3T3); + spatial_layers = GetVp9SvcConfig(codec); + EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOff); +} + TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityMode) { VideoCodec codec; codec.codecType = kVideoCodecVP9; diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc index 993fd245ad..50e9cf2369 100644 --- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc +++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc @@ -2459,4 +2459,113 @@ TEST(Vp9SpeedSettingsTrialsTest, DefaultPerLayerFlagsWithSvc) { } } +struct SvcFrameDropConfigTestParameters { + bool flexible_mode; + absl::optional<ScalabilityMode> scalability_mode; + std::string field_trial; + int expected_framedrop_mode; + int expected_max_consec_drop; +}; + +class TestVp9ImplSvcFrameDropConfig + : public ::testing::TestWithParam<SvcFrameDropConfigTestParameters> {}; + +TEST_P(TestVp9ImplSvcFrameDropConfig, SvcFrameDropConfig) { + SvcFrameDropConfigTestParameters test_params = GetParam(); + auto* const vpx = new NiceMock<MockLibvpxInterface>(); + LibvpxVp9Encoder encoder( + cricket::CreateVideoCodec(cricket::kVp9CodecName), + absl::WrapUnique<LibvpxInterface>(vpx), + test::ExplicitKeyValueConfig(test_params.field_trial)); + + vpx_image_t img; + ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img)); + + EXPECT_CALL(*vpx, + codec_control(_, VP9E_SET_SVC_FRAME_DROP_LAYER, + SafeMatcherCast<vpx_svc_frame_drop_t*>(AllOf( + Field(&vpx_svc_frame_drop_t::framedrop_mode, + test_params.expected_framedrop_mode), + Field(&vpx_svc_frame_drop_t::max_consec_drop, + test_params.expected_max_consec_drop))))); + + VideoCodec settings = DefaultCodecSettings(); + settings.VP9()->flexibleMode = test_params.flexible_mode; + if (test_params.scalability_mode.has_value()) { + settings.SetScalabilityMode(*test_params.scalability_mode); + } + settings.VP9()->numberOfSpatialLayers = + 3; // to execute SVC code paths even when scalability_mode is not set. + + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings)); +} + +INSTANTIATE_TEST_SUITE_P( + All, + TestVp9ImplSvcFrameDropConfig, + ::testing::Values( + // Flexible mode is disabled. Layer drop is not allowed. Ignore + // layer_drop_mode from field trial. + SvcFrameDropConfigTestParameters{ + .flexible_mode = false, + .scalability_mode = ScalabilityMode::kL3T3_KEY, + .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/" + "Enabled,layer_drop_mode:1,max_consec_drop:7/", + .expected_framedrop_mode = FULL_SUPERFRAME_DROP, + .expected_max_consec_drop = 7}, + // Flexible mode is enabled but the field trial is not set. Use default + // settings. + SvcFrameDropConfigTestParameters{ + .flexible_mode = true, + .scalability_mode = ScalabilityMode::kL3T3_KEY, + .field_trial = "", + .expected_framedrop_mode = FULL_SUPERFRAME_DROP, + .expected_max_consec_drop = std::numeric_limits<int>::max()}, + // Flexible mode is enabled but the field trial is disabled. Use default + // settings. + SvcFrameDropConfigTestParameters{ + .flexible_mode = true, + .scalability_mode = ScalabilityMode::kL3T3_KEY, + .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/" + "Disabled,layer_drop_mode:1,max_consec_drop:7/", + .expected_framedrop_mode = FULL_SUPERFRAME_DROP, + .expected_max_consec_drop = std::numeric_limits<int>::max()}, + // Flexible mode is enabled, layer drop is enabled, KSVC. Apply config + // from field trial. + SvcFrameDropConfigTestParameters{ + .flexible_mode = true, + .scalability_mode = ScalabilityMode::kL3T3_KEY, + .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/" + "Enabled,layer_drop_mode:1,max_consec_drop:7/", + .expected_framedrop_mode = LAYER_DROP, + .expected_max_consec_drop = 7}, + // Flexible mode is enabled, layer drop is enabled, simulcast. Apply + // config from field trial. + SvcFrameDropConfigTestParameters{ + .flexible_mode = true, + .scalability_mode = ScalabilityMode::kS3T3, + .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/" + "Enabled,layer_drop_mode:1,max_consec_drop:7/", + .expected_framedrop_mode = LAYER_DROP, + .expected_max_consec_drop = 7}, + // Flexible mode is enabled, layer drop is enabled, full SVC. Apply + // config from field trial. + SvcFrameDropConfigTestParameters{ + .flexible_mode = false, + .scalability_mode = ScalabilityMode::kL3T3, + .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/" + "Enabled,layer_drop_mode:1,max_consec_drop:7/", + .expected_framedrop_mode = FULL_SUPERFRAME_DROP, + .expected_max_consec_drop = 7}, + // Flexible mode is enabled, layer-drop is enabled, scalability mode is + // not set (i.e., SVC controller is not enabled). Ignore layer_drop_mode + // from field trial. + SvcFrameDropConfigTestParameters{ + .flexible_mode = true, + .scalability_mode = absl::nullopt, + .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/" + "Enabled,layer_drop_mode:1,max_consec_drop:7/", + .expected_framedrop_mode = FULL_SUPERFRAME_DROP, + .expected_max_consec_drop = 7})); + } // namespace webrtc diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc deleted file mode 100644 index 4f2331da28..0000000000 --- a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc +++ /dev/null @@ -1,778 +0,0 @@ -/* - * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "modules/video_coding/h264_packet_buffer.h" - -#include <cstring> -#include <limits> -#include <ostream> -#include <string> -#include <utility> - -#include "api/array_view.h" -#include "api/video/render_resolution.h" -#include "common_video/h264/h264_common.h" -#include "rtc_base/system/unused.h" -#include "test/gmock.h" -#include "test/gtest.h" - -namespace webrtc { -namespace { - -using ::testing::ElementsAreArray; -using ::testing::Eq; -using ::testing::IsEmpty; -using ::testing::SizeIs; - -using H264::NaluType::kAud; -using H264::NaluType::kFuA; -using H264::NaluType::kIdr; -using H264::NaluType::kPps; -using H264::NaluType::kSlice; -using H264::NaluType::kSps; -using H264::NaluType::kStapA; - -constexpr int kBufferSize = 2048; - -std::vector<uint8_t> StartCode() { - return {0, 0, 0, 1}; -} - -NaluInfo MakeNaluInfo(uint8_t type) { - NaluInfo res; - res.type = type; - res.sps_id = -1; - res.pps_id = -1; - return res; -} - -class Packet { - public: - explicit Packet(H264PacketizationTypes type); - - Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9}); - Packet& Slice(std::vector<uint8_t> payload = {9, 9, 9}); - Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9}); - Packet& SpsWithResolution(RenderResolution resolution, - std::vector<uint8_t> payload = {9, 9, 9}); - Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9}); - Packet& Aud(); - Packet& Marker(); - Packet& AsFirstFragment(); - Packet& Time(uint32_t rtp_timestamp); - Packet& SeqNum(uint16_t rtp_seq_num); - - std::unique_ptr<H264PacketBuffer::Packet> Build(); - - private: - rtc::CopyOnWriteBuffer BuildFuaPayload() const; - rtc::CopyOnWriteBuffer BuildSingleNaluPayload() const; - rtc::CopyOnWriteBuffer BuildStapAPayload() const; - - RTPVideoHeaderH264& H264Header() { - return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header); - } - const RTPVideoHeaderH264& H264Header() const { - return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header); - } - - H264PacketizationTypes type_; - RTPVideoHeader video_header_; - bool first_fragment_ = false; - bool marker_bit_ = false; - uint32_t rtp_timestamp_ = 0; - uint16_t rtp_seq_num_ = 0; - std::vector<std::vector<uint8_t>> nalu_payloads_; -}; - -Packet::Packet(H264PacketizationTypes type) : type_(type) { - video_header_.video_type_header.emplace<RTPVideoHeaderH264>(); -} - -Packet& Packet::Idr(std::vector<uint8_t> payload) { - auto& h264_header = H264Header(); - h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kIdr); - nalu_payloads_.push_back(std::move(payload)); - return *this; -} - -Packet& Packet::Slice(std::vector<uint8_t> payload) { - auto& h264_header = H264Header(); - h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSlice); - nalu_payloads_.push_back(std::move(payload)); - return *this; -} - -Packet& Packet::Sps(std::vector<uint8_t> payload) { - auto& h264_header = H264Header(); - h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps); - nalu_payloads_.push_back(std::move(payload)); - return *this; -} - -Packet& Packet::SpsWithResolution(RenderResolution resolution, - std::vector<uint8_t> payload) { - auto& h264_header = H264Header(); - h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps); - video_header_.width = resolution.Width(); - video_header_.height = resolution.Height(); - nalu_payloads_.push_back(std::move(payload)); - return *this; -} - -Packet& Packet::Pps(std::vector<uint8_t> payload) { - auto& h264_header = H264Header(); - h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kPps); - nalu_payloads_.push_back(std::move(payload)); - return *this; -} - -Packet& Packet::Aud() { - auto& h264_header = H264Header(); - h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kAud); - nalu_payloads_.push_back({}); - return *this; -} - -Packet& Packet::Marker() { - marker_bit_ = true; - return *this; -} - -Packet& Packet::AsFirstFragment() { - first_fragment_ = true; - return *this; -} - -Packet& Packet::Time(uint32_t rtp_timestamp) { - rtp_timestamp_ = rtp_timestamp; - return *this; -} - -Packet& Packet::SeqNum(uint16_t rtp_seq_num) { - rtp_seq_num_ = rtp_seq_num; - return *this; -} - -std::unique_ptr<H264PacketBuffer::Packet> Packet::Build() { - auto res = std::make_unique<H264PacketBuffer::Packet>(); - - auto& h264_header = H264Header(); - switch (type_) { - case kH264FuA: { - RTC_CHECK_EQ(h264_header.nalus_length, 1); - res->video_payload = BuildFuaPayload(); - break; - } - case kH264SingleNalu: { - RTC_CHECK_EQ(h264_header.nalus_length, 1); - res->video_payload = BuildSingleNaluPayload(); - break; - } - case kH264StapA: { - RTC_CHECK_GT(h264_header.nalus_length, 1); - RTC_CHECK_LE(h264_header.nalus_length, kMaxNalusPerPacket); - res->video_payload = BuildStapAPayload(); - break; - } - } - - if (type_ == kH264FuA && !first_fragment_) { - h264_header.nalus_length = 0; - } - - h264_header.packetization_type = type_; - res->marker_bit = marker_bit_; - res->video_header = video_header_; - res->timestamp = rtp_timestamp_; - res->seq_num = rtp_seq_num_; - res->video_header.codec = kVideoCodecH264; - - return res; -} - -rtc::CopyOnWriteBuffer Packet::BuildFuaPayload() const { - return rtc::CopyOnWriteBuffer(nalu_payloads_[0]); -} - -rtc::CopyOnWriteBuffer Packet::BuildSingleNaluPayload() const { - rtc::CopyOnWriteBuffer res; - auto& h264_header = H264Header(); - res.AppendData(&h264_header.nalus[0].type, 1); - res.AppendData(nalu_payloads_[0]); - return res; -} - -rtc::CopyOnWriteBuffer Packet::BuildStapAPayload() const { - rtc::CopyOnWriteBuffer res; - - const uint8_t indicator = H264::NaluType::kStapA; - res.AppendData(&indicator, 1); - - auto& h264_header = H264Header(); - for (size_t i = 0; i < h264_header.nalus_length; ++i) { - // The two first bytes indicates the nalu segment size. - uint8_t length_as_array[2] = { - 0, static_cast<uint8_t>(nalu_payloads_[i].size() + 1)}; - res.AppendData(length_as_array); - - res.AppendData(&h264_header.nalus[i].type, 1); - res.AppendData(nalu_payloads_[i]); - } - return res; -} - -rtc::ArrayView<const uint8_t> PacketPayload( - const std::unique_ptr<H264PacketBuffer::Packet>& packet) { - return packet->video_payload; -} - -std::vector<uint8_t> FlatVector( - const std::vector<std::vector<uint8_t>>& elems) { - std::vector<uint8_t> res; - for (const auto& elem : elems) { - res.insert(res.end(), elem.begin(), elem.end()); - } - return res; -} - -TEST(H264PacketBufferTest, IdrIsKeyframe) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true); - - EXPECT_THAT( - packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build()) - .packets, - SizeIs(1)); -} - -TEST(H264PacketBufferTest, IdrIsNotKeyframe) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT( - packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build()) - .packets, - IsEmpty()); -} - -TEST(H264PacketBufferTest, IdrIsKeyframeFuaRequiresFirstFragmet) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true); - - // Not marked as the first fragment - EXPECT_THAT( - packet_buffer - .InsertPacket(Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build()) - .packets, - IsEmpty()); - - EXPECT_THAT(packet_buffer - .InsertPacket( - Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build()) - .packets, - IsEmpty()); - - // Marked as first fragment - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264FuA) - .Idr() - .SeqNum(2) - .Time(1) - .AsFirstFragment() - .Build()) - .packets, - IsEmpty()); - - EXPECT_THAT(packet_buffer - .InsertPacket( - Packet(kH264FuA).Idr().SeqNum(3).Time(1).Marker().Build()) - .packets, - SizeIs(2)); -} - -TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeSingleNalus) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build())); - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Pps().SeqNum(1).Time(0).Build())); - EXPECT_THAT( - packet_buffer - .InsertPacket( - Packet(kH264SingleNalu).Idr().SeqNum(2).Time(0).Marker().Build()) - .packets, - SizeIs(3)); -} - -TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeSingleNalus) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Pps().SeqNum(0).Time(0).Build())); - EXPECT_THAT( - packet_buffer - .InsertPacket( - Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build()) - .packets, - IsEmpty()); -} - -TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeSingleNalus) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build())); - EXPECT_THAT( - packet_buffer - .InsertPacket( - Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build()) - .packets, - IsEmpty()); -} - -TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeStapA) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(0) - .Time(0) - .Marker() - .Build()) - .packets, - SizeIs(1)); -} - -TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeStapA) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT( - packet_buffer - .InsertPacket( - Packet(kH264StapA).Pps().Idr().SeqNum(0).Time(0).Marker().Build()) - .packets, - IsEmpty()); -} - -TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeStapA) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT( - packet_buffer - .InsertPacket( - Packet(kH264StapA).Sps().Idr().SeqNum(2).Time(2).Marker().Build()) - .packets, - IsEmpty()); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(3) - .Time(3) - .Marker() - .Build()) - .packets, - SizeIs(1)); -} - -TEST(H264PacketBufferTest, InsertingSpsPpsLastCompletesKeyframe) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Idr().SeqNum(2).Time(1).Marker().Build())); - - EXPECT_THAT(packet_buffer - .InsertPacket( - Packet(kH264StapA).Sps().Pps().SeqNum(1).Time(1).Build()) - .packets, - SizeIs(2)); -} - -TEST(H264PacketBufferTest, InsertingMidFuaCompletesFrame) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(0) - .Time(0) - .Marker() - .Build()) - .packets, - SizeIs(1)); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264FuA).Slice().SeqNum(1).Time(1).AsFirstFragment().Build())); - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264FuA).Slice().SeqNum(3).Time(1).Marker().Build())); - EXPECT_THAT( - packet_buffer - .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(1).Build()) - .packets, - SizeIs(3)); -} - -TEST(H264PacketBufferTest, SeqNumJumpDoesNotCompleteFrame) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(0) - .Time(0) - .Marker() - .Build()) - .packets, - SizeIs(1)); - - EXPECT_THAT( - packet_buffer - .InsertPacket(Packet(kH264FuA).Slice().SeqNum(1).Time(1).Build()) - .packets, - IsEmpty()); - - // Add `kBufferSize` to make the index of the sequence number wrap and end up - // where the packet with sequence number 2 would have ended up. - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264FuA) - .Slice() - .SeqNum(2 + kBufferSize) - .Time(3) - .Marker() - .Build()) - .packets, - IsEmpty()); -} - -TEST(H264PacketBufferTest, OldFramesAreNotCompletedAfterBufferWrap) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264SingleNalu) - .Slice() - .SeqNum(1) - .Time(1) - .Marker() - .Build()) - .packets, - IsEmpty()); - - // New keyframe, preceedes packet with sequence number 1 in the buffer. - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(kBufferSize) - .Time(kBufferSize) - .Marker() - .Build()) - .packets, - SizeIs(1)); -} - -TEST(H264PacketBufferTest, OldPacketsDontBlockNewPackets) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(kBufferSize) - .Time(kBufferSize) - .Marker() - .Build()) - .packets, - SizeIs(1)); - - RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA) - .Slice() - .SeqNum(kBufferSize + 1) - .Time(kBufferSize + 1) - .AsFirstFragment() - .Build())); - - RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA) - .Slice() - .SeqNum(kBufferSize + 3) - .Time(kBufferSize + 1) - .Marker() - .Build())); - EXPECT_THAT( - packet_buffer - .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build()) - .packets, - IsEmpty()); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264FuA) - .Slice() - .SeqNum(kBufferSize + 2) - .Time(kBufferSize + 1) - .Build()) - .packets, - SizeIs(3)); -} - -TEST(H264PacketBufferTest, OldPacketDoesntCompleteFrame) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(kBufferSize) - .Time(kBufferSize) - .Marker() - .Build()) - .packets, - SizeIs(1)); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264FuA) - .Slice() - .SeqNum(kBufferSize + 3) - .Time(kBufferSize + 1) - .Marker() - .Build()) - .packets, - IsEmpty()); - - EXPECT_THAT( - packet_buffer - .InsertPacket( - Packet(kH264FuA).Slice().SeqNum(2).Time(2).Marker().Build()) - .packets, - IsEmpty()); - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264FuA) - .Slice() - .SeqNum(kBufferSize + 1) - .Time(kBufferSize + 1) - .AsFirstFragment() - .Build()) - .packets, - IsEmpty()); -} - -TEST(H264PacketBufferTest, FrameBoundariesAreSet) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - auto key = packet_buffer.InsertPacket( - Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build()); - - ASSERT_THAT(key.packets, SizeIs(1)); - EXPECT_TRUE(key.packets[0]->video_header.is_first_packet_in_frame); - EXPECT_TRUE(key.packets[0]->video_header.is_last_packet_in_frame); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build())); - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264FuA).Slice().SeqNum(3).Time(2).Build())); - auto delta = packet_buffer.InsertPacket( - Packet(kH264FuA).Slice().SeqNum(4).Time(2).Marker().Build()); - - ASSERT_THAT(delta.packets, SizeIs(3)); - EXPECT_TRUE(delta.packets[0]->video_header.is_first_packet_in_frame); - EXPECT_FALSE(delta.packets[0]->video_header.is_last_packet_in_frame); - - EXPECT_FALSE(delta.packets[1]->video_header.is_first_packet_in_frame); - EXPECT_FALSE(delta.packets[1]->video_header.is_last_packet_in_frame); - - EXPECT_FALSE(delta.packets[2]->video_header.is_first_packet_in_frame); - EXPECT_TRUE(delta.packets[2]->video_header.is_last_packet_in_frame); -} - -TEST(H264PacketBufferTest, ResolutionSetOnFirstPacket) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build())); - auto res = packet_buffer.InsertPacket(Packet(kH264StapA) - .SpsWithResolution({320, 240}) - .Pps() - .Idr() - .SeqNum(2) - .Time(1) - .Marker() - .Build()); - - ASSERT_THAT(res.packets, SizeIs(2)); - EXPECT_THAT(res.packets[0]->video_header.width, Eq(320)); - EXPECT_THAT(res.packets[0]->video_header.height, Eq(240)); -} - -TEST(H264PacketBufferTest, KeyframeAndDeltaFrameSetOnFirstPacket) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build())); - auto key = packet_buffer.InsertPacket( - Packet(kH264StapA).Sps().Pps().Idr().SeqNum(2).Time(1).Marker().Build()); - - auto delta = packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Slice().SeqNum(3).Time(2).Marker().Build()); - - ASSERT_THAT(key.packets, SizeIs(2)); - EXPECT_THAT(key.packets[0]->video_header.frame_type, - Eq(VideoFrameType::kVideoFrameKey)); - ASSERT_THAT(delta.packets, SizeIs(1)); - EXPECT_THAT(delta.packets[0]->video_header.frame_type, - Eq(VideoFrameType::kVideoFrameDelta)); -} - -TEST(H264PacketBufferTest, RtpSeqNumWrap) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264StapA).Sps().Pps().SeqNum(0xffff).Time(0).Build())); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build())); - EXPECT_THAT(packet_buffer - .InsertPacket( - Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build()) - .packets, - SizeIs(3)); -} - -TEST(H264PacketBufferTest, StapAFixedBitstream) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - auto packets = packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps({1, 2, 3}) - .Pps({4, 5, 6}) - .Idr({7, 8, 9}) - .SeqNum(0) - .Time(0) - .Marker() - .Build()) - .packets; - - ASSERT_THAT(packets, SizeIs(1)); - EXPECT_THAT(PacketPayload(packets[0]), - ElementsAreArray(FlatVector({StartCode(), - {kSps, 1, 2, 3}, - StartCode(), - {kPps, 4, 5, 6}, - StartCode(), - {kIdr, 7, 8, 9}}))); -} - -TEST(H264PacketBufferTest, SingleNaluFixedBitstream) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Sps({1, 2, 3}).SeqNum(0).Time(0).Build())); - RTC_UNUSED(packet_buffer.InsertPacket( - Packet(kH264SingleNalu).Pps({4, 5, 6}).SeqNum(1).Time(0).Build())); - auto packets = packet_buffer - .InsertPacket(Packet(kH264SingleNalu) - .Idr({7, 8, 9}) - .SeqNum(2) - .Time(0) - .Marker() - .Build()) - .packets; - - ASSERT_THAT(packets, SizeIs(3)); - EXPECT_THAT(PacketPayload(packets[0]), - ElementsAreArray(FlatVector({StartCode(), {kSps, 1, 2, 3}}))); - EXPECT_THAT(PacketPayload(packets[1]), - ElementsAreArray(FlatVector({StartCode(), {kPps, 4, 5, 6}}))); - EXPECT_THAT(PacketPayload(packets[2]), - ElementsAreArray(FlatVector({StartCode(), {kIdr, 7, 8, 9}}))); -} - -TEST(H264PacketBufferTest, StapaAndFuaFixedBitstream) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264StapA) - .Sps({1, 2, 3}) - .Pps({4, 5, 6}) - .SeqNum(0) - .Time(0) - .Build())); - RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA) - .Idr({8, 8, 8}) - .SeqNum(1) - .Time(0) - .AsFirstFragment() - .Build())); - auto packets = packet_buffer - .InsertPacket(Packet(kH264FuA) - .Idr({9, 9, 9}) - .SeqNum(2) - .Time(0) - .Marker() - .Build()) - .packets; - - ASSERT_THAT(packets, SizeIs(3)); - EXPECT_THAT( - PacketPayload(packets[0]), - ElementsAreArray(FlatVector( - {StartCode(), {kSps, 1, 2, 3}, StartCode(), {kPps, 4, 5, 6}}))); - EXPECT_THAT(PacketPayload(packets[1]), - ElementsAreArray(FlatVector({StartCode(), {8, 8, 8}}))); - // Third is a continuation of second, so only the payload is expected. - EXPECT_THAT(PacketPayload(packets[2]), - ElementsAreArray(FlatVector({{9, 9, 9}}))); -} - -TEST(H264PacketBufferTest, FullPacketBufferDoesNotBlockKeyframe) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - for (int i = 0; i < kBufferSize; ++i) { - EXPECT_THAT( - packet_buffer - .InsertPacket( - Packet(kH264SingleNalu).Slice().SeqNum(i).Time(0).Build()) - .packets, - IsEmpty()); - } - - EXPECT_THAT(packet_buffer - .InsertPacket(Packet(kH264StapA) - .Sps() - .Pps() - .Idr() - .SeqNum(kBufferSize) - .Time(1) - .Marker() - .Build()) - .packets, - SizeIs(1)); -} - -TEST(H264PacketBufferTest, TooManyNalusInPacket) { - H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); - - std::unique_ptr<H264PacketBuffer::Packet> packet( - Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build()); - auto& h264_header = - absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header); - h264_header.nalus_length = kMaxNalusPerPacket + 1; - - EXPECT_THAT(packet_buffer.InsertPacket(std::move(packet)).packets, IsEmpty()); -} - -} // namespace -} // namespace webrtc diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.cc index 6096665bda..bca2b5ce29 100644 --- a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc +++ b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/video_coding/h264_packet_buffer.h" +#include "modules/video_coding/h26x_packet_buffer.h" #include <algorithm> #include <cstdint> @@ -27,9 +27,13 @@ #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/sequence_number_util.h" +#ifdef RTC_ENABLE_H265 +#include "common_video/h265/h265_common.h" +#endif namespace webrtc { namespace { + int64_t EuclideanMod(int64_t n, int64_t div) { RTC_DCHECK_GT(div, 0); return (n %= div) < 0 ? n + div : n; @@ -48,7 +52,7 @@ bool IsFirstPacketOfFragment(const RTPVideoHeaderH264& h264_header) { return h264_header.nalus_length > 0; } -bool BeginningOfIdr(const H264PacketBuffer::Packet& packet) { +bool BeginningOfIdr(const H26xPacketBuffer::Packet& packet) { const auto& h264_header = absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header); const bool contains_idr_nalu = @@ -66,7 +70,7 @@ bool BeginningOfIdr(const H264PacketBuffer::Packet& packet) { } } -bool HasSps(const H264PacketBuffer::Packet& packet) { +bool HasSps(const H26xPacketBuffer::Packet& packet) { auto& h264_header = absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header); return absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) { @@ -74,10 +78,24 @@ bool HasSps(const H264PacketBuffer::Packet& packet) { }); } +#ifdef RTC_ENABLE_H265 +bool HasVps(const H26xPacketBuffer::Packet& packet) { + std::vector<H265::NaluIndex> nalu_indices = H265::FindNaluIndices( + packet.video_payload.cdata(), packet.video_payload.size()); + return absl::c_any_of((nalu_indices), [&packet]( + const H265::NaluIndex& nalu_index) { + return H265::ParseNaluType( + packet.video_payload.cdata()[nalu_index.payload_start_offset]) == + H265::NaluType::kVps; + }); +} +#endif + // TODO(bugs.webrtc.org/13157): Update the H264 depacketizer so we don't have to // fiddle with the payload at this point. -rtc::CopyOnWriteBuffer FixVideoPayload(rtc::ArrayView<const uint8_t> payload, - const RTPVideoHeader& video_header) { +rtc::CopyOnWriteBuffer FixH264VideoPayload( + rtc::ArrayView<const uint8_t> payload, + const RTPVideoHeader& video_header) { constexpr uint8_t kStartCode[] = {0, 0, 0, 1}; const auto& h264_header = @@ -124,18 +142,15 @@ rtc::CopyOnWriteBuffer FixVideoPayload(rtc::ArrayView<const uint8_t> payload, } // namespace -H264PacketBuffer::H264PacketBuffer(bool idr_only_keyframes_allowed) - : idr_only_keyframes_allowed_(idr_only_keyframes_allowed) {} +H26xPacketBuffer::H26xPacketBuffer(bool h264_idr_only_keyframes_allowed) + : h264_idr_only_keyframes_allowed_(h264_idr_only_keyframes_allowed) {} -H264PacketBuffer::InsertResult H264PacketBuffer::InsertPacket( +H26xPacketBuffer::InsertResult H26xPacketBuffer::InsertPacket( std::unique_ptr<Packet> packet) { - RTC_DCHECK(packet->video_header.codec == kVideoCodecH264); + RTC_DCHECK(packet->video_header.codec == kVideoCodecH264 || + packet->video_header.codec == kVideoCodecH265); InsertResult result; - if (!absl::holds_alternative<RTPVideoHeaderH264>( - packet->video_header.video_type_header)) { - return result; - } int64_t unwrapped_seq_num = seq_num_unwrapper_.Unwrap(packet->seq_num); auto& packet_slot = GetPacket(unwrapped_seq_num); @@ -151,19 +166,27 @@ H264PacketBuffer::InsertResult H264PacketBuffer::InsertPacket( return result; } -std::unique_ptr<H264PacketBuffer::Packet>& H264PacketBuffer::GetPacket( +std::unique_ptr<H26xPacketBuffer::Packet>& H26xPacketBuffer::GetPacket( int64_t unwrapped_seq_num) { return buffer_[EuclideanMod(unwrapped_seq_num, kBufferSize)]; } -bool H264PacketBuffer::BeginningOfStream( - const H264PacketBuffer::Packet& packet) const { - return HasSps(packet) || - (idr_only_keyframes_allowed_ && BeginningOfIdr(packet)); +bool H26xPacketBuffer::BeginningOfStream( + const H26xPacketBuffer::Packet& packet) const { + if (packet.codec() == kVideoCodecH264) { + return HasSps(packet) || + (h264_idr_only_keyframes_allowed_ && BeginningOfIdr(packet)); +#ifdef RTC_ENABLE_H265 + } else if (packet.codec() == kVideoCodecH265) { + return HasVps(packet); +#endif + } + RTC_DCHECK_NOTREACHED(); + return false; } -std::vector<std::unique_ptr<H264PacketBuffer::Packet>> -H264PacketBuffer::FindFrames(int64_t unwrapped_seq_num) { +std::vector<std::unique_ptr<H26xPacketBuffer::Packet>> +H26xPacketBuffer::FindFrames(int64_t unwrapped_seq_num) { std::vector<std::unique_ptr<Packet>> found_frames; Packet* packet = GetPacket(unwrapped_seq_num).get(); @@ -223,13 +246,17 @@ H264PacketBuffer::FindFrames(int64_t unwrapped_seq_num) { return found_frames; } -bool H264PacketBuffer::MaybeAssembleFrame( +bool H26xPacketBuffer::MaybeAssembleFrame( int64_t start_seq_num_unwrapped, int64_t end_sequence_number_unwrapped, std::vector<std::unique_ptr<Packet>>& frames) { +#ifdef RTC_ENABLE_H265 + bool has_vps = false; +#endif bool has_sps = false; bool has_pps = false; bool has_idr = false; + bool has_irap = false; int width = -1; int height = -1; @@ -237,24 +264,44 @@ bool H264PacketBuffer::MaybeAssembleFrame( for (int64_t seq_num = start_seq_num_unwrapped; seq_num <= end_sequence_number_unwrapped; ++seq_num) { const auto& packet = GetPacket(seq_num); - const auto& h264_header = - absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header); - for (const auto& nalu : GetNaluInfos(h264_header)) { - has_idr |= nalu.type == H264::NaluType::kIdr; - has_sps |= nalu.type == H264::NaluType::kSps; - has_pps |= nalu.type == H264::NaluType::kPps; + if (packet->codec() == kVideoCodecH264) { + const auto& h264_header = + absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header); + for (const auto& nalu : GetNaluInfos(h264_header)) { + has_idr |= nalu.type == H264::NaluType::kIdr; + has_sps |= nalu.type == H264::NaluType::kSps; + has_pps |= nalu.type == H264::NaluType::kPps; + } + if (has_idr) { + if (!h264_idr_only_keyframes_allowed_ && (!has_sps || !has_pps)) { + return false; + } + } +#ifdef RTC_ENABLE_H265 + } else if (packet->codec() == kVideoCodecH265) { + std::vector<H265::NaluIndex> nalu_indices = H265::FindNaluIndices( + packet->video_payload.cdata(), packet->video_payload.size()); + for (const auto& nalu_index : nalu_indices) { + uint8_t nalu_type = H265::ParseNaluType( + packet->video_payload.cdata()[nalu_index.payload_start_offset]); + has_irap |= (nalu_type >= H265::NaluType::kBlaWLp && + nalu_type <= H265::NaluType::kRsvIrapVcl23); + has_vps |= nalu_type == H265::NaluType::kVps; + has_sps |= nalu_type == H265::NaluType::kSps; + has_pps |= nalu_type == H265::NaluType::kPps; + } + if (has_irap) { + if (!has_vps || !has_sps || !has_pps) { + return false; + } + } +#endif // RTC_ENABLE_H265 } width = std::max<int>(packet->video_header.width, width); height = std::max<int>(packet->video_header.height, height); } - if (has_idr) { - if (!idr_only_keyframes_allowed_ && (!has_sps || !has_pps)) { - return false; - } - } - for (int64_t seq_num = start_seq_num_unwrapped; seq_num <= end_sequence_number_unwrapped; ++seq_num) { auto& packet = GetPacket(seq_num); @@ -270,13 +317,16 @@ bool H264PacketBuffer::MaybeAssembleFrame( packet->video_header.height = height; } - packet->video_header.frame_type = has_idr + packet->video_header.frame_type = has_idr || has_irap ? VideoFrameType::kVideoFrameKey : VideoFrameType::kVideoFrameDelta; } - packet->video_payload = - FixVideoPayload(packet->video_payload, packet->video_header); + // Start code is inserted by depacktizer for H.265. + if (packet->codec() == kVideoCodecH264) { + packet->video_payload = + FixH264VideoPayload(packet->video_payload, packet->video_header); + } frames.push_back(std::move(packet)); } diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.h index a72c240e82..21601562c5 100644 --- a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h +++ b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_ -#define MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_ +#ifndef MODULES_VIDEO_CODING_H26X_PACKET_BUFFER_H_ +#define MODULES_VIDEO_CODING_H26X_PACKET_BUFFER_H_ #include <array> #include <memory> @@ -22,15 +22,16 @@ namespace webrtc { -class H264PacketBuffer { +class H26xPacketBuffer { public: - // The H264PacketBuffer does the same job as the PacketBuffer but for H264 - // only. To make it fit in with surronding code the PacketBuffer input/output - // classes are used. + // The H26xPacketBuffer does the same job as the PacketBuffer but for H264 and + // H265 only. To make it fit in with surronding code the PacketBuffer + // input/output classes are used. using Packet = video_coding::PacketBuffer::Packet; using InsertResult = video_coding::PacketBuffer::InsertResult; - explicit H264PacketBuffer(bool idr_only_keyframes_allowed); + // |h264_idr_only_keyframes_allowed| is ignored if H.265 is used. + explicit H26xPacketBuffer(bool h264_idr_only_keyframes_allowed); ABSL_MUST_USE_RESULT InsertResult InsertPacket(std::unique_ptr<Packet> packet); @@ -45,7 +46,7 @@ class H264PacketBuffer { int64_t end_sequence_number_unwrapped, std::vector<std::unique_ptr<Packet>>& packets); - const bool idr_only_keyframes_allowed_; + const bool h264_idr_only_keyframes_allowed_; std::array<std::unique_ptr<Packet>, kBufferSize> buffer_; absl::optional<int64_t> last_continuous_unwrapped_seq_num_; SeqNumUnwrapper<uint16_t> seq_num_unwrapper_; @@ -53,4 +54,4 @@ class H264PacketBuffer { } // namespace webrtc -#endif // MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_ +#endif // MODULES_VIDEO_CODING_H26X_PACKET_BUFFER_H_ diff --git a/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer_unittest.cc new file mode 100644 index 0000000000..ac5bcb735b --- /dev/null +++ b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer_unittest.cc @@ -0,0 +1,1058 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/h26x_packet_buffer.h" + +#include <cstring> +#include <limits> +#include <ostream> +#include <string> +#include <utility> + +#include "api/array_view.h" +#include "api/video/render_resolution.h" +#include "common_video/h264/h264_common.h" +#include "rtc_base/system/unused.h" +#include "test/gmock.h" +#include "test/gtest.h" +#ifdef RTC_ENABLE_H265 +#include "common_video/h265/h265_common.h" +#endif + +namespace webrtc { +namespace { + +using ::testing::ElementsAreArray; +using ::testing::Eq; +using ::testing::IsEmpty; +using ::testing::SizeIs; + +using H264::NaluType::kAud; +using H264::NaluType::kFuA; +using H264::NaluType::kIdr; +using H264::NaluType::kPps; +using H264::NaluType::kSlice; +using H264::NaluType::kSps; +using H264::NaluType::kStapA; + +constexpr int kBufferSize = 2048; + +std::vector<uint8_t> StartCode() { + return {0, 0, 0, 1}; +} + +NaluInfo MakeNaluInfo(uint8_t type) { + NaluInfo res; + res.type = type; + res.sps_id = -1; + res.pps_id = -1; + return res; +} + +class H264Packet { + public: + explicit H264Packet(H264PacketizationTypes type); + + H264Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9}); + H264Packet& Slice(std::vector<uint8_t> payload = {9, 9, 9}); + H264Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9}); + H264Packet& SpsWithResolution(RenderResolution resolution, + std::vector<uint8_t> payload = {9, 9, 9}); + H264Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9}); + H264Packet& Aud(); + H264Packet& Marker(); + H264Packet& AsFirstFragment(); + H264Packet& Time(uint32_t rtp_timestamp); + H264Packet& SeqNum(uint16_t rtp_seq_num); + + std::unique_ptr<H26xPacketBuffer::Packet> Build(); + + private: + rtc::CopyOnWriteBuffer BuildFuaPayload() const; + rtc::CopyOnWriteBuffer BuildSingleNaluPayload() const; + rtc::CopyOnWriteBuffer BuildStapAPayload() const; + + RTPVideoHeaderH264& H264Header() { + return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header); + } + const RTPVideoHeaderH264& H264Header() const { + return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header); + } + + H264PacketizationTypes type_; + RTPVideoHeader video_header_; + bool first_fragment_ = false; + bool marker_bit_ = false; + uint32_t rtp_timestamp_ = 0; + uint16_t rtp_seq_num_ = 0; + std::vector<std::vector<uint8_t>> nalu_payloads_; +}; + +H264Packet::H264Packet(H264PacketizationTypes type) : type_(type) { + video_header_.video_type_header.emplace<RTPVideoHeaderH264>(); +} + +H264Packet& H264Packet::Idr(std::vector<uint8_t> payload) { + auto& h264_header = H264Header(); + h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kIdr); + nalu_payloads_.push_back(std::move(payload)); + return *this; +} + +H264Packet& H264Packet::Slice(std::vector<uint8_t> payload) { + auto& h264_header = H264Header(); + h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSlice); + nalu_payloads_.push_back(std::move(payload)); + return *this; +} + +H264Packet& H264Packet::Sps(std::vector<uint8_t> payload) { + auto& h264_header = H264Header(); + h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps); + nalu_payloads_.push_back(std::move(payload)); + return *this; +} + +H264Packet& H264Packet::SpsWithResolution(RenderResolution resolution, + std::vector<uint8_t> payload) { + auto& h264_header = H264Header(); + h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps); + video_header_.width = resolution.Width(); + video_header_.height = resolution.Height(); + nalu_payloads_.push_back(std::move(payload)); + return *this; +} + +H264Packet& H264Packet::Pps(std::vector<uint8_t> payload) { + auto& h264_header = H264Header(); + h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kPps); + nalu_payloads_.push_back(std::move(payload)); + return *this; +} + +H264Packet& H264Packet::Aud() { + auto& h264_header = H264Header(); + h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kAud); + nalu_payloads_.push_back({}); + return *this; +} + +H264Packet& H264Packet::Marker() { + marker_bit_ = true; + return *this; +} + +H264Packet& H264Packet::AsFirstFragment() { + first_fragment_ = true; + return *this; +} + +H264Packet& H264Packet::Time(uint32_t rtp_timestamp) { + rtp_timestamp_ = rtp_timestamp; + return *this; +} + +H264Packet& H264Packet::SeqNum(uint16_t rtp_seq_num) { + rtp_seq_num_ = rtp_seq_num; + return *this; +} + +std::unique_ptr<H26xPacketBuffer::Packet> H264Packet::Build() { + auto res = std::make_unique<H26xPacketBuffer::Packet>(); + + auto& h264_header = H264Header(); + switch (type_) { + case kH264FuA: { + RTC_CHECK_EQ(h264_header.nalus_length, 1); + res->video_payload = BuildFuaPayload(); + break; + } + case kH264SingleNalu: { + RTC_CHECK_EQ(h264_header.nalus_length, 1); + res->video_payload = BuildSingleNaluPayload(); + break; + } + case kH264StapA: { + RTC_CHECK_GT(h264_header.nalus_length, 1); + RTC_CHECK_LE(h264_header.nalus_length, kMaxNalusPerPacket); + res->video_payload = BuildStapAPayload(); + break; + } + } + + if (type_ == kH264FuA && !first_fragment_) { + h264_header.nalus_length = 0; + } + + h264_header.packetization_type = type_; + res->marker_bit = marker_bit_; + res->video_header = video_header_; + res->timestamp = rtp_timestamp_; + res->seq_num = rtp_seq_num_; + res->video_header.codec = kVideoCodecH264; + + return res; +} + +rtc::CopyOnWriteBuffer H264Packet::BuildFuaPayload() const { + return rtc::CopyOnWriteBuffer(nalu_payloads_[0]); +} + +rtc::CopyOnWriteBuffer H264Packet::BuildSingleNaluPayload() const { + rtc::CopyOnWriteBuffer res; + auto& h264_header = H264Header(); + res.AppendData(&h264_header.nalus[0].type, 1); + res.AppendData(nalu_payloads_[0]); + return res; +} + +rtc::CopyOnWriteBuffer H264Packet::BuildStapAPayload() const { + rtc::CopyOnWriteBuffer res; + + const uint8_t indicator = H264::NaluType::kStapA; + res.AppendData(&indicator, 1); + + auto& h264_header = H264Header(); + for (size_t i = 0; i < h264_header.nalus_length; ++i) { + // The two first bytes indicates the nalu segment size. + uint8_t length_as_array[2] = { + 0, static_cast<uint8_t>(nalu_payloads_[i].size() + 1)}; + res.AppendData(length_as_array); + + res.AppendData(&h264_header.nalus[i].type, 1); + res.AppendData(nalu_payloads_[i]); + } + return res; +} + +#ifdef RTC_ENABLE_H265 +class H265Packet { + public: + H265Packet() = default; + + H265Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9}); + H265Packet& Slice(H265::NaluType type, + std::vector<uint8_t> payload = {9, 9, 9}); + H265Packet& Vps(std::vector<uint8_t> payload = {9, 9, 9}); + H265Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9}); + H265Packet& SpsWithResolution(RenderResolution resolution, + std::vector<uint8_t> payload = {9, 9, 9}); + H265Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9}); + H265Packet& Aud(); + H265Packet& Marker(); + H265Packet& AsFirstFragment(); + H265Packet& Time(uint32_t rtp_timestamp); + H265Packet& SeqNum(uint16_t rtp_seq_num); + + std::unique_ptr<H26xPacketBuffer::Packet> Build(); + + private: + H265Packet& StartCode(); + + RTPVideoHeader video_header_; + bool first_fragment_ = false; + bool marker_bit_ = false; + uint32_t rtp_timestamp_ = 0; + uint16_t rtp_seq_num_ = 0; + std::vector<std::vector<uint8_t>> nalu_payloads_; +}; + +H265Packet& H265Packet::Idr(std::vector<uint8_t> payload) { + return Slice(H265::NaluType::kIdrNLp, std::move(payload)); +} + +H265Packet& H265Packet::Slice(H265::NaluType type, + std::vector<uint8_t> payload) { + StartCode(); + // Nalu header. Assume layer ID is 0 and TID is 2. + nalu_payloads_.push_back({static_cast<uint8_t>(type << 1), 0x02}); + nalu_payloads_.push_back(std::move(payload)); + return *this; +} + +H265Packet& H265Packet::Vps(std::vector<uint8_t> payload) { + return Slice(H265::NaluType::kVps, std::move(payload)); +} + +H265Packet& H265Packet::Sps(std::vector<uint8_t> payload) { + return Slice(H265::NaluType::kSps, std::move(payload)); +} + +H265Packet& H265Packet::SpsWithResolution(RenderResolution resolution, + std::vector<uint8_t> payload) { + video_header_.width = resolution.Width(); + video_header_.height = resolution.Height(); + return Sps(std::move(payload)); +} + +H265Packet& H265Packet::Pps(std::vector<uint8_t> payload) { + return Slice(H265::NaluType::kPps, std::move(payload)); +} + +H265Packet& H265Packet::Aud() { + return Slice(H265::NaluType::kAud, {}); +} + +H265Packet& H265Packet::Marker() { + marker_bit_ = true; + return *this; +} + +H265Packet& H265Packet::StartCode() { + nalu_payloads_.push_back({0x00, 0x00, 0x00, 0x01}); + return *this; +} + +std::unique_ptr<H26xPacketBuffer::Packet> H265Packet::Build() { + auto res = std::make_unique<H26xPacketBuffer::Packet>(); + res->marker_bit = marker_bit_; + res->video_header = video_header_; + res->timestamp = rtp_timestamp_; + res->seq_num = rtp_seq_num_; + res->video_header.codec = kVideoCodecH265; + res->video_payload = rtc::CopyOnWriteBuffer(); + for (const auto& payload : nalu_payloads_) { + res->video_payload.AppendData(payload); + } + + return res; +} + +H265Packet& H265Packet::AsFirstFragment() { + first_fragment_ = true; + return *this; +} + +H265Packet& H265Packet::Time(uint32_t rtp_timestamp) { + rtp_timestamp_ = rtp_timestamp; + return *this; +} + +H265Packet& H265Packet::SeqNum(uint16_t rtp_seq_num) { + rtp_seq_num_ = rtp_seq_num; + return *this; +} +#endif + +rtc::ArrayView<const uint8_t> PacketPayload( + const std::unique_ptr<H26xPacketBuffer::Packet>& packet) { + return packet->video_payload; +} + +std::vector<uint8_t> FlatVector( + const std::vector<std::vector<uint8_t>>& elems) { + std::vector<uint8_t> res; + for (const auto& elem : elems) { + res.insert(res.end(), elem.begin(), elem.end()); + } + return res; +} + +TEST(H26xPacketBufferTest, IdrIsKeyframe) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/true); + + EXPECT_THAT( + packet_buffer + .InsertPacket(H264Packet(kH264SingleNalu).Idr().Marker().Build()) + .packets, + SizeIs(1)); +} + +TEST(H26xPacketBufferTest, IdrIsNotKeyframe) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT( + packet_buffer + .InsertPacket(H264Packet(kH264SingleNalu).Idr().Marker().Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, IdrIsKeyframeFuaRequiresFirstFragmet) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/true); + + // Not marked as the first fragment + EXPECT_THAT( + packet_buffer + .InsertPacket(H264Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build()) + .packets, + IsEmpty()); + + EXPECT_THAT( + packet_buffer + .InsertPacket( + H264Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build()) + .packets, + IsEmpty()); + + // Marked as first fragment + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264FuA) + .Idr() + .SeqNum(2) + .Time(1) + .AsFirstFragment() + .Build()) + .packets, + IsEmpty()); + + EXPECT_THAT( + packet_buffer + .InsertPacket( + H264Packet(kH264FuA).Idr().SeqNum(3).Time(1).Marker().Build()) + .packets, + SizeIs(2)); +} + +TEST(H26xPacketBufferTest, SpsPpsIdrIsKeyframeSingleNalus) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build())); + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Pps().SeqNum(1).Time(0).Build())); + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264SingleNalu) + .Idr() + .SeqNum(2) + .Time(0) + .Marker() + .Build()) + .packets, + SizeIs(3)); +} + +TEST(H26xPacketBufferTest, PpsIdrIsNotKeyframeSingleNalus) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Pps().SeqNum(0).Time(0).Build())); + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264SingleNalu) + .Idr() + .SeqNum(1) + .Time(0) + .Marker() + .Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, SpsIdrIsNotKeyframeSingleNalus) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build())); + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264SingleNalu) + .Idr() + .SeqNum(1) + .Time(0) + .Marker() + .Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, SpsPpsIdrIsKeyframeStapA) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(0) + .Time(0) + .Marker() + .Build()) + .packets, + SizeIs(1)); +} + +TEST(H26xPacketBufferTest, PpsIdrIsNotKeyframeStapA) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Pps() + .Idr() + .SeqNum(0) + .Time(0) + .Marker() + .Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, SpsIdrIsNotKeyframeStapA) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Idr() + .SeqNum(2) + .Time(2) + .Marker() + .Build()) + .packets, + IsEmpty()); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(3) + .Time(3) + .Marker() + .Build()) + .packets, + SizeIs(1)); +} + +TEST(H26xPacketBufferTest, InsertingSpsPpsLastCompletesKeyframe) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Idr().SeqNum(2).Time(1).Marker().Build())); + + EXPECT_THAT( + packet_buffer + .InsertPacket( + H264Packet(kH264StapA).Sps().Pps().SeqNum(1).Time(1).Build()) + .packets, + SizeIs(2)); +} + +TEST(H26xPacketBufferTest, InsertingMidFuaCompletesFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(0) + .Time(0) + .Marker() + .Build()) + .packets, + SizeIs(1)); + + RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA) + .Slice() + .SeqNum(1) + .Time(1) + .AsFirstFragment() + .Build())); + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264FuA).Slice().SeqNum(3).Time(1).Marker().Build())); + EXPECT_THAT( + packet_buffer + .InsertPacket(H264Packet(kH264FuA).Slice().SeqNum(2).Time(1).Build()) + .packets, + SizeIs(3)); +} + +TEST(H26xPacketBufferTest, SeqNumJumpDoesNotCompleteFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(0) + .Time(0) + .Marker() + .Build()) + .packets, + SizeIs(1)); + + EXPECT_THAT( + packet_buffer + .InsertPacket(H264Packet(kH264FuA).Slice().SeqNum(1).Time(1).Build()) + .packets, + IsEmpty()); + + // Add `kBufferSize` to make the index of the sequence number wrap and end up + // where the packet with sequence number 2 would have ended up. + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264FuA) + .Slice() + .SeqNum(2 + kBufferSize) + .Time(3) + .Marker() + .Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, OldFramesAreNotCompletedAfterBufferWrap) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264SingleNalu) + .Slice() + .SeqNum(1) + .Time(1) + .Marker() + .Build()) + .packets, + IsEmpty()); + + // New keyframe, preceedes packet with sequence number 1 in the buffer. + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(kBufferSize) + .Time(kBufferSize) + .Marker() + .Build()) + .packets, + SizeIs(1)); +} + +TEST(H26xPacketBufferTest, OldPacketsDontBlockNewPackets) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(kBufferSize) + .Time(kBufferSize) + .Marker() + .Build()) + .packets, + SizeIs(1)); + + RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA) + .Slice() + .SeqNum(kBufferSize + 1) + .Time(kBufferSize + 1) + .AsFirstFragment() + .Build())); + + RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA) + .Slice() + .SeqNum(kBufferSize + 3) + .Time(kBufferSize + 1) + .Marker() + .Build())); + EXPECT_THAT( + packet_buffer + .InsertPacket(H264Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build()) + .packets, + IsEmpty()); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264FuA) + .Slice() + .SeqNum(kBufferSize + 2) + .Time(kBufferSize + 1) + .Build()) + .packets, + SizeIs(3)); +} + +TEST(H26xPacketBufferTest, OldPacketDoesntCompleteFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(kBufferSize) + .Time(kBufferSize) + .Marker() + .Build()) + .packets, + SizeIs(1)); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264FuA) + .Slice() + .SeqNum(kBufferSize + 3) + .Time(kBufferSize + 1) + .Marker() + .Build()) + .packets, + IsEmpty()); + + EXPECT_THAT( + packet_buffer + .InsertPacket( + H264Packet(kH264FuA).Slice().SeqNum(2).Time(2).Marker().Build()) + .packets, + IsEmpty()); + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264FuA) + .Slice() + .SeqNum(kBufferSize + 1) + .Time(kBufferSize + 1) + .AsFirstFragment() + .Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, FrameBoundariesAreSet) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + auto key = packet_buffer.InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(1) + .Time(1) + .Marker() + .Build()); + + ASSERT_THAT(key.packets, SizeIs(1)); + EXPECT_TRUE(key.packets[0]->video_header.is_first_packet_in_frame); + EXPECT_TRUE(key.packets[0]->video_header.is_last_packet_in_frame); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build())); + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264FuA).Slice().SeqNum(3).Time(2).Build())); + auto delta = packet_buffer.InsertPacket( + H264Packet(kH264FuA).Slice().SeqNum(4).Time(2).Marker().Build()); + + ASSERT_THAT(delta.packets, SizeIs(3)); + EXPECT_TRUE(delta.packets[0]->video_header.is_first_packet_in_frame); + EXPECT_FALSE(delta.packets[0]->video_header.is_last_packet_in_frame); + + EXPECT_FALSE(delta.packets[1]->video_header.is_first_packet_in_frame); + EXPECT_FALSE(delta.packets[1]->video_header.is_last_packet_in_frame); + + EXPECT_FALSE(delta.packets[2]->video_header.is_first_packet_in_frame); + EXPECT_TRUE(delta.packets[2]->video_header.is_last_packet_in_frame); +} + +TEST(H26xPacketBufferTest, ResolutionSetOnFirstPacket) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build())); + auto res = packet_buffer.InsertPacket(H264Packet(kH264StapA) + .SpsWithResolution({320, 240}) + .Pps() + .Idr() + .SeqNum(2) + .Time(1) + .Marker() + .Build()); + + ASSERT_THAT(res.packets, SizeIs(2)); + EXPECT_THAT(res.packets[0]->video_header.width, Eq(320)); + EXPECT_THAT(res.packets[0]->video_header.height, Eq(240)); +} + +TEST(H26xPacketBufferTest, KeyframeAndDeltaFrameSetOnFirstPacket) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build())); + auto key = packet_buffer.InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(2) + .Time(1) + .Marker() + .Build()); + + auto delta = packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Slice().SeqNum(3).Time(2).Marker().Build()); + + ASSERT_THAT(key.packets, SizeIs(2)); + EXPECT_THAT(key.packets[0]->video_header.frame_type, + Eq(VideoFrameType::kVideoFrameKey)); + ASSERT_THAT(delta.packets, SizeIs(1)); + EXPECT_THAT(delta.packets[0]->video_header.frame_type, + Eq(VideoFrameType::kVideoFrameDelta)); +} + +TEST(H26xPacketBufferTest, RtpSeqNumWrap) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264StapA).Sps().Pps().SeqNum(0xffff).Time(0).Build())); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build())); + EXPECT_THAT( + packet_buffer + .InsertPacket( + H264Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build()) + .packets, + SizeIs(3)); +} + +TEST(H26xPacketBufferTest, StapAFixedBitstream) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + auto packets = packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps({1, 2, 3}) + .Pps({4, 5, 6}) + .Idr({7, 8, 9}) + .SeqNum(0) + .Time(0) + .Marker() + .Build()) + .packets; + + ASSERT_THAT(packets, SizeIs(1)); + EXPECT_THAT(PacketPayload(packets[0]), + ElementsAreArray(FlatVector({StartCode(), + {kSps, 1, 2, 3}, + StartCode(), + {kPps, 4, 5, 6}, + StartCode(), + {kIdr, 7, 8, 9}}))); +} + +TEST(H26xPacketBufferTest, SingleNaluFixedBitstream) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Sps({1, 2, 3}).SeqNum(0).Time(0).Build())); + RTC_UNUSED(packet_buffer.InsertPacket( + H264Packet(kH264SingleNalu).Pps({4, 5, 6}).SeqNum(1).Time(0).Build())); + auto packets = packet_buffer + .InsertPacket(H264Packet(kH264SingleNalu) + .Idr({7, 8, 9}) + .SeqNum(2) + .Time(0) + .Marker() + .Build()) + .packets; + + ASSERT_THAT(packets, SizeIs(3)); + EXPECT_THAT(PacketPayload(packets[0]), + ElementsAreArray(FlatVector({StartCode(), {kSps, 1, 2, 3}}))); + EXPECT_THAT(PacketPayload(packets[1]), + ElementsAreArray(FlatVector({StartCode(), {kPps, 4, 5, 6}}))); + EXPECT_THAT(PacketPayload(packets[2]), + ElementsAreArray(FlatVector({StartCode(), {kIdr, 7, 8, 9}}))); +} + +TEST(H26xPacketBufferTest, StapaAndFuaFixedBitstream) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264StapA) + .Sps({1, 2, 3}) + .Pps({4, 5, 6}) + .SeqNum(0) + .Time(0) + .Build())); + RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA) + .Idr({8, 8, 8}) + .SeqNum(1) + .Time(0) + .AsFirstFragment() + .Build())); + auto packets = packet_buffer + .InsertPacket(H264Packet(kH264FuA) + .Idr({9, 9, 9}) + .SeqNum(2) + .Time(0) + .Marker() + .Build()) + .packets; + + ASSERT_THAT(packets, SizeIs(3)); + EXPECT_THAT( + PacketPayload(packets[0]), + ElementsAreArray(FlatVector( + {StartCode(), {kSps, 1, 2, 3}, StartCode(), {kPps, 4, 5, 6}}))); + EXPECT_THAT(PacketPayload(packets[1]), + ElementsAreArray(FlatVector({StartCode(), {8, 8, 8}}))); + // Third is a continuation of second, so only the payload is expected. + EXPECT_THAT(PacketPayload(packets[2]), + ElementsAreArray(FlatVector({{9, 9, 9}}))); +} + +TEST(H26xPacketBufferTest, FullPacketBufferDoesNotBlockKeyframe) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + for (int i = 0; i < kBufferSize; ++i) { + EXPECT_THAT( + packet_buffer + .InsertPacket( + H264Packet(kH264SingleNalu).Slice().SeqNum(i).Time(0).Build()) + .packets, + IsEmpty()); + } + + EXPECT_THAT(packet_buffer + .InsertPacket(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(kBufferSize) + .Time(1) + .Marker() + .Build()) + .packets, + SizeIs(1)); +} + +TEST(H26xPacketBufferTest, TooManyNalusInPacket) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + std::unique_ptr<H26xPacketBuffer::Packet> packet(H264Packet(kH264StapA) + .Sps() + .Pps() + .Idr() + .SeqNum(1) + .Time(1) + .Marker() + .Build()); + auto& h264_header = + absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header); + h264_header.nalus_length = kMaxNalusPerPacket + 1; + + EXPECT_THAT(packet_buffer.InsertPacket(std::move(packet)).packets, IsEmpty()); +} + +#ifdef RTC_ENABLE_H265 +TEST(H26xPacketBufferTest, H265VpsSpsPpsIdrIsKeyframe) { + H26xPacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false); + + EXPECT_THAT( + packet_buffer + .InsertPacket(H265Packet().Vps().Sps().Pps().Idr().Marker().Build()) + .packets, + SizeIs(1)); +} + +TEST(H26xPacketBufferTest, H265IrapIsNotKeyframe) { + std::vector<const H265::NaluType> irap_types = { + H265::NaluType::kBlaWLp, H265::NaluType::kBlaWRadl, + H265::NaluType::kBlaNLp, H265::NaluType::kIdrWRadl, + H265::NaluType::kIdrNLp, H265::NaluType::kCra, + H265::NaluType::kRsvIrapVcl23}; + for (const H265::NaluType type : irap_types) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT( + packet_buffer.InsertPacket(H265Packet().Slice(type).Marker().Build()) + .packets, + IsEmpty()); + } +} + +TEST(H26xPacketBufferTest, H265IdrIsNotKeyFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT( + packet_buffer.InsertPacket(H265Packet().Idr().Marker().Build()).packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, H265SpsPpsIdrIsNotKeyFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H265Packet().Sps().Pps().Idr().Marker().Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, H265VpsPpsIdrIsNotKeyFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H265Packet().Vps().Pps().Idr().Marker().Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, H265VpsSpsIdrIsNotKeyFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT(packet_buffer + .InsertPacket(H265Packet().Vps().Sps().Idr().Marker().Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, H265VpsIdrIsNotKeyFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT( + packet_buffer.InsertPacket(H265Packet().Vps().Idr().Marker().Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, H265SpsIdrIsNotKeyFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT( + packet_buffer.InsertPacket(H265Packet().Sps().Idr().Marker().Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, H265PpsIdrIsNotKeyFrame) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + EXPECT_THAT( + packet_buffer.InsertPacket(H265Packet().Pps().Idr().Marker().Build()) + .packets, + IsEmpty()); +} + +TEST(H26xPacketBufferTest, H265ResolutionSetOnSpsPacket) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED( + packet_buffer.InsertPacket(H265Packet().Aud().SeqNum(1).Time(1).Build())); + auto res = packet_buffer.InsertPacket(H265Packet() + .Vps() + .SpsWithResolution({320, 240}) + .Pps() + .Idr() + .SeqNum(2) + .Time(1) + .Marker() + .Build()); + + ASSERT_THAT(res.packets, SizeIs(2)); + EXPECT_THAT(res.packets[0]->video_header.width, Eq(320)); + EXPECT_THAT(res.packets[0]->video_header.height, Eq(240)); +} + +TEST(H26xPacketBufferTest, H265InsertingVpsSpsPpsLastCompletesKeyframe) { + H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false); + + RTC_UNUSED(packet_buffer.InsertPacket( + H265Packet().Idr().SeqNum(2).Time(1).Marker().Build())); + + EXPECT_THAT(packet_buffer + .InsertPacket( + H265Packet().Vps().Sps().Pps().SeqNum(1).Time(1).Build()) + .packets, + SizeIs(2)); +} +#endif // RTC_ENABLE_H265 + +} // namespace +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h index 17146ce205..d7d54f3989 100644 --- a/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h +++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h @@ -11,10 +11,6 @@ #ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_ #define MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_ -// NOTE: in sync with video_coding_module_defines.h - -// Define return values - #define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT 5 #define WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME 4 #define WEBRTC_VIDEO_CODEC_NO_OUTPUT 1 diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc new file mode 100644 index 0000000000..7e2c08d518 --- /dev/null +++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/include/video_error_codes_utils.h" + +#include "modules/video_coding/include/video_error_codes.h" + +namespace webrtc { + +const char* WebRtcVideoCodecErrorToString(int32_t error_code) { + switch (error_code) { + case WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT: + return "WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT"; + case WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME: + return "WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME"; + case WEBRTC_VIDEO_CODEC_NO_OUTPUT: + return "WEBRTC_VIDEO_CODEC_NO_OUTPUT"; + case WEBRTC_VIDEO_CODEC_ERROR: + return "WEBRTC_VIDEO_CODEC_ERROR"; + case WEBRTC_VIDEO_CODEC_MEMORY: + return "WEBRTC_VIDEO_CODEC_MEMORY"; + case WEBRTC_VIDEO_CODEC_ERR_PARAMETER: + return "WEBRTC_VIDEO_CODEC_ERR_PARAMETER"; + case WEBRTC_VIDEO_CODEC_TIMEOUT: + return "WEBRTC_VIDEO_CODEC_TIMEOUT"; + case WEBRTC_VIDEO_CODEC_UNINITIALIZED: + return "WEBRTC_VIDEO_CODEC_UNINITIALIZED"; + case WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE: + return "WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE"; + case WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED: + return "WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED"; + case WEBRTC_VIDEO_CODEC_ENCODER_FAILURE: + return "WEBRTC_VIDEO_CODEC_ENCODER_FAILURE"; + default: + return "WEBRTC_VIDEO_CODEC_UNKNOWN"; + } +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.h b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.h new file mode 100644 index 0000000000..ae17e29636 --- /dev/null +++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_UTILS_H_ +#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_UTILS_H_ + +#include <stdint.h> + +namespace webrtc { + +const char* WebRtcVideoCodecErrorToString(int32_t error_code); + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_UTILS_H_ diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc index c6e51e8068..ac076fde71 100644 --- a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc +++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc @@ -15,6 +15,8 @@ #include <memory> #include <vector> +#include "api/environment/environment.h" +#include "api/environment/environment_factory.h" #include "api/video/encoded_image.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_encoder.h" @@ -258,8 +260,9 @@ SimulcastTestFixtureImpl::SimulcastTestFixtureImpl( std::unique_ptr<VideoDecoderFactory> decoder_factory, SdpVideoFormat video_format) : codec_type_(PayloadStringToCodecType(video_format.name)) { + Environment env = CreateEnvironment(); encoder_ = encoder_factory->CreateVideoEncoder(video_format); - decoder_ = decoder_factory->CreateVideoDecoder(video_format); + decoder_ = decoder_factory->Create(env, video_format); SetUpCodec((codec_type_ == kVideoCodecVP8 || codec_type_ == kVideoCodecH264) ? kDefaultTemporalLayerProfile : kNoTemporalLayerProfile); diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc index b0edab6004..60ef7aece0 100644 --- a/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc +++ b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc @@ -631,4 +631,25 @@ TEST_F(VideoCodecInitializerTest, Vp9TwoSpatialLayersBitratesAreConsistent) { codec.spatialLayers[0].maxBitrate); } +TEST_F(VideoCodecInitializerTest, UpdatesVp9SpecificFieldsWithScalabilityMode) { + VideoEncoderConfig config; + config.codec_type = VideoCodecType::kVideoCodecVP9; + std::vector<VideoStream> streams = {DefaultStream()}; + streams[0].scalability_mode = ScalabilityMode::kL2T3_KEY; + + VideoCodec codec; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_EQ(codec.VP9()->numberOfSpatialLayers, 2u); + EXPECT_EQ(codec.VP9()->numberOfTemporalLayers, 3u); + EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOnKeyPic); + + streams[0].scalability_mode = ScalabilityMode::kS3T1; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_EQ(codec.VP9()->numberOfSpatialLayers, 3u); + EXPECT_EQ(codec.VP9()->numberOfTemporalLayers, 1u); + EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOff); +} + } // namespace webrtc diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build index 141def9090..c0d139fc6d 100644 --- a/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build +++ b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build @@ -32,6 +32,7 @@ LOCAL_INCLUDES += [ UNIFIED_SOURCES += [ "/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc", + "/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc", "/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc" ] |