summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/modules
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/libwebrtc/modules/audio_coding/BUILD.gn3
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc6
-rw-r--r--third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc59
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc10
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h4
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc5
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h2
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc23
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h3
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc5
-rw-r--r--third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h2
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc29
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h4
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc14
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc13
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_arrival_history.h9
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc140
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h15
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc9
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc131
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h79
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc50
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc90
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h71
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc25
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc2
-rw-r--r--third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build1
-rw-r--r--third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc20
-rw-r--r--third_party/libwebrtc/modules/audio_processing/agc2/input_volume_controller.h2
-rw-r--r--third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc2
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc3
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc11
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h2
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc36
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc31
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h3
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc36
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc8
-rw-r--r--third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h1
-rw-r--r--third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc10
-rw-r--r--third_party/libwebrtc/modules/pacing/bitrate_prober.cc32
-rw-r--r--third_party/libwebrtc/modules/pacing/bitrate_prober.h6
-rw-r--r--third_party/libwebrtc/modules/pacing/pacing_controller.cc4
-rw-r--r--third_party/libwebrtc/modules/pacing/pacing_controller.h3
-rw-r--r--third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc41
-rw-r--r--third_party/libwebrtc/modules/pacing/packet_router.cc10
-rw-r--r--third_party/libwebrtc/modules/pacing/packet_router.h2
-rw-r--r--third_party/libwebrtc/modules/pacing/packet_router_unittest.cc25
-rw-r--r--third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc5
-rw-r--r--third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h3
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn12
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc8
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc2
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_h265_common.h54
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265.cc58
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265_unittest.cc91
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc2
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc2
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc2
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.cc244
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h28
-rw-r--r--third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265_unittest.cc400
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc46
-rw-r--r--third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h8
-rw-r--r--third_party/libwebrtc/modules/video_coding/BUILD.gn26
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc18
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc25
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc1
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h5
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc7
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc12
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc20
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc8
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc6
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h5
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc30
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h6
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc4
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc3
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc48
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h8
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc3
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc20
-rw-r--r--third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc109
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc287
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h56
-rw-r--r--third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc778
-rw-r--r--third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.cc337
-rw-r--r--third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.h57
-rw-r--r--third_party/libwebrtc/modules/video_coding/h26x_packet_buffer_unittest.cc1058
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_error_codes.h4
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc46
-rw-r--r--third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.h22
-rw-r--r--third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc5
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc21
-rw-r--r--third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build1
96 files changed, 3288 insertions, 1805 deletions
diff --git a/third_party/libwebrtc/modules/audio_coding/BUILD.gn b/third_party/libwebrtc/modules/audio_coding/BUILD.gn
index ddd1fd2656..a49df7e7d2 100644
--- a/third_party/libwebrtc/modules/audio_coding/BUILD.gn
+++ b/third_party/libwebrtc/modules/audio_coding/BUILD.gn
@@ -689,8 +689,6 @@ rtc_library("neteq") {
"neteq/packet_arrival_history.h",
"neteq/packet_buffer.cc",
"neteq/packet_buffer.h",
- "neteq/post_decode_vad.cc",
- "neteq/post_decode_vad.h",
"neteq/preemptive_expand.cc",
"neteq/preemptive_expand.h",
"neteq/random_vector.cc",
@@ -1655,7 +1653,6 @@ if (rtc_include_tests) {
"neteq/normal_unittest.cc",
"neteq/packet_arrival_history_unittest.cc",
"neteq/packet_buffer_unittest.cc",
- "neteq/post_decode_vad_unittest.cc",
"neteq/random_vector_unittest.cc",
"neteq/red_payload_splitter_unittest.cc",
"neteq/reorder_optimizer_unittest.cc",
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc
index a5bf88e547..4deabdf7ff 100644
--- a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver.cc
@@ -50,11 +50,7 @@ std::unique_ptr<NetEq> CreateNetEq(
AcmReceiver::Config::Config(
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory)
- : clock(*Clock::GetRealTimeClockRaw()), decoder_factory(decoder_factory) {
- // Post-decode VAD is disabled by default in NetEq, however, Audio
- // Conference Mixer relies on VAD decisions and fails without them.
- neteq_config.enable_post_decode_vad = true;
-}
+ : clock(*Clock::GetRealTimeClockRaw()), decoder_factory(decoder_factory) {}
AcmReceiver::Config::Config(const Config&) = default;
AcmReceiver::Config::~Config() = default;
diff --git a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc
index cda6688157..8b35f4a621 100644
--- a/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc
+++ b/third_party/libwebrtc/modules/audio_coding/acm2/acm_receiver_unittest.cc
@@ -190,9 +190,6 @@ class AcmReceiverTestFaxModeOldApi : public AcmReceiverTestOldApi {
const size_t output_channels = info.num_channels;
const size_t samples_per_ms = rtc::checked_cast<size_t>(
rtc::CheckedDivExact(output_sample_rate_hz, 1000));
- const AudioFrame::VADActivity expected_vad_activity =
- output_sample_rate_hz > 16000 ? AudioFrame::kVadActive
- : AudioFrame::kVadPassive;
// Expect the first output timestamp to be 5*fs/8000 samples before the
// first inserted timestamp (because of NetEq's look-ahead). (This value is
@@ -217,7 +214,6 @@ class AcmReceiverTestFaxModeOldApi : public AcmReceiverTestOldApi {
EXPECT_EQ(output_sample_rate_hz, frame.sample_rate_hz_);
EXPECT_EQ(output_channels, frame.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, frame.speech_type_);
- EXPECT_EQ(expected_vad_activity, frame.vad_activity_);
EXPECT_FALSE(muted);
}
}
@@ -243,61 +239,6 @@ TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFrameOpus) {
}
#if defined(WEBRTC_ANDROID)
-#define MAYBE_PostdecodingVad DISABLED_PostdecodingVad
-#else
-#define MAYBE_PostdecodingVad PostdecodingVad
-#endif
-TEST_F(AcmReceiverTestOldApi, MAYBE_PostdecodingVad) {
- EXPECT_TRUE(config_.neteq_config.enable_post_decode_vad);
- constexpr int payload_type = 34;
- const SdpAudioFormat codec = {"L16", 16000, 1};
- const AudioCodecInfo info = SetEncoder(payload_type, codec);
- receiver_->SetCodecs({{payload_type, codec}});
- constexpr int kNumPackets = 5;
- AudioFrame frame;
- for (int n = 0; n < kNumPackets; ++n) {
- const int num_10ms_frames = InsertOnePacketOfSilence(info);
- for (int k = 0; k < num_10ms_frames; ++k) {
- bool muted;
- ASSERT_EQ(0, receiver_->GetAudio(info.sample_rate_hz, &frame, &muted));
- }
- }
- EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_);
-}
-
-class AcmReceiverTestPostDecodeVadPassiveOldApi : public AcmReceiverTestOldApi {
- protected:
- AcmReceiverTestPostDecodeVadPassiveOldApi() {
- config_.neteq_config.enable_post_decode_vad = false;
- }
-};
-
-#if defined(WEBRTC_ANDROID)
-#define MAYBE_PostdecodingVad DISABLED_PostdecodingVad
-#else
-#define MAYBE_PostdecodingVad PostdecodingVad
-#endif
-TEST_F(AcmReceiverTestPostDecodeVadPassiveOldApi, MAYBE_PostdecodingVad) {
- EXPECT_FALSE(config_.neteq_config.enable_post_decode_vad);
- constexpr int payload_type = 34;
- const SdpAudioFormat codec = {"L16", 16000, 1};
- const AudioCodecInfo info = SetEncoder(payload_type, codec);
- auto const value = encoder_factory_->QueryAudioEncoder(codec);
- ASSERT_TRUE(value.has_value());
- receiver_->SetCodecs({{payload_type, codec}});
- const int kNumPackets = 5;
- AudioFrame frame;
- for (int n = 0; n < kNumPackets; ++n) {
- const int num_10ms_frames = InsertOnePacketOfSilence(info);
- for (int k = 0; k < num_10ms_frames; ++k) {
- bool muted;
- ASSERT_EQ(0, receiver_->GetAudio(info.sample_rate_hz, &frame, &muted));
- }
- }
- EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
-}
-
-#if defined(WEBRTC_ANDROID)
#define MAYBE_LastAudioCodec DISABLED_LastAudioCodec
#else
#define MAYBE_LastAudioCodec LastAudioCodec
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
index 46ac671b30..ff7e919d9b 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
@@ -58,6 +58,11 @@ int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
return static_cast<int>(encoded_len / Channels());
}
+int AudioDecoderPcmU::PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const {
+ return PacketDuration(encoded, encoded_len);
+}
+
void AudioDecoderPcmA::Reset() {}
std::vector<AudioDecoder::ParseResult> AudioDecoderPcmA::ParsePayload(
@@ -99,4 +104,9 @@ int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
return static_cast<int>(encoded_len / Channels());
}
+int AudioDecoderPcmA::PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const {
+ return PacketDuration(encoded, encoded_len);
+}
+
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
index 3fa42cba30..5531d6e7f0 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
@@ -35,6 +35,8 @@ class AudioDecoderPcmU final : public AudioDecoder {
std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp) override;
int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const override;
int SampleRateHz() const override;
size_t Channels() const override;
@@ -62,6 +64,8 @@ class AudioDecoderPcmA final : public AudioDecoder {
std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp) override;
int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const override;
int SampleRateHz() const override;
size_t Channels() const override;
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
index e969ed1189..bca47cea13 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
@@ -63,6 +63,11 @@ int AudioDecoderG722Impl::PacketDuration(const uint8_t* encoded,
return static_cast<int>(2 * encoded_len / Channels());
}
+int AudioDecoderG722Impl::PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const {
+ return PacketDuration(encoded, encoded_len);
+}
+
int AudioDecoderG722Impl::SampleRateHz() const {
return 16000;
}
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h
index 5872fad5de..e7083c3fd6 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h
@@ -30,6 +30,8 @@ class AudioDecoderG722Impl final : public AudioDecoder {
std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp) override;
int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const override;
int SampleRateHz() const override;
size_t Channels() const override;
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
index cff9685548..0f53409f48 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
@@ -17,12 +17,15 @@
#include "api/array_view.h"
#include "modules/audio_coding/codecs/opus/audio_coder_opus_common.h"
#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
namespace webrtc {
AudioDecoderOpusImpl::AudioDecoderOpusImpl(size_t num_channels,
int sample_rate_hz)
- : channels_{num_channels}, sample_rate_hz_{sample_rate_hz} {
+ : channels_(num_channels),
+ sample_rate_hz_(sample_rate_hz),
+ generate_plc_(field_trial::IsEnabled("WebRTC-Audio-OpusGeneratePlc")) {
RTC_DCHECK(num_channels == 1 || num_channels == 2);
RTC_DCHECK(sample_rate_hz == 16000 || sample_rate_hz == 48000);
const int error =
@@ -125,4 +128,22 @@ size_t AudioDecoderOpusImpl::Channels() const {
return channels_;
}
+void AudioDecoderOpusImpl::GeneratePlc(
+ size_t requested_samples_per_channel,
+ rtc::BufferT<int16_t>* concealment_audio) {
+ if (!generate_plc_) {
+ return;
+ }
+ int plc_size = WebRtcOpus_PlcDuration(dec_state_) * channels_;
+ concealment_audio->AppendData(plc_size, [&](rtc::ArrayView<int16_t> decoded) {
+ int16_t temp_type = 1;
+ int ret =
+ WebRtcOpus_Decode(dec_state_, nullptr, 0, decoded.data(), &temp_type);
+ if (ret < 0) {
+ return 0;
+ }
+ return ret;
+ });
+}
+
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h
index e8fd0440bc..2dd62fd4ee 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h
@@ -40,6 +40,8 @@ class AudioDecoderOpusImpl final : public AudioDecoder {
bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
int SampleRateHz() const override;
size_t Channels() const override;
+ void GeneratePlc(size_t requested_samples_per_channel,
+ rtc::BufferT<int16_t>* concealment_audio) override;
protected:
int DecodeInternal(const uint8_t* encoded,
@@ -57,6 +59,7 @@ class AudioDecoderOpusImpl final : public AudioDecoder {
OpusDecInst* dec_state_;
const size_t channels_;
const int sample_rate_hz_;
+ const bool generate_plc_;
};
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
index 7761efe8b3..1e2b5db331 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
@@ -67,4 +67,9 @@ int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
return static_cast<int>(encoded_len / (2 * Channels()));
}
+int AudioDecoderPcm16B::PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const {
+ return PacketDuration(encoded, encoded_len);
+}
+
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
index 6f50161d3f..c31cc5d0a2 100644
--- a/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
+++ b/third_party/libwebrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
@@ -32,6 +32,8 @@ class AudioDecoderPcm16B final : public AudioDecoder {
std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp) override;
int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const override;
int SampleRateHz() const override;
size_t Channels() const override;
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc
index 2c95d3b390..0c33dba47a 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.cc
@@ -17,7 +17,6 @@
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/neteq/audio_multi_vector.h"
#include "modules/audio_coding/neteq/cross_correlation.h"
-#include "modules/audio_coding/neteq/post_decode_vad.h"
namespace webrtc {
namespace {
@@ -44,17 +43,11 @@ void BackgroundNoise::Reset() {
}
}
-bool BackgroundNoise::Update(const AudioMultiVector& input,
- const PostDecodeVad& vad) {
+bool BackgroundNoise::Update(const AudioMultiVector& sync_buffer) {
bool filter_params_saved = false;
- if (vad.running() && vad.active_speech()) {
- // Do not update the background noise parameters if we know that the signal
- // is active speech.
- return filter_params_saved;
- }
int32_t auto_correlation[kMaxLpcOrder + 1];
- int16_t fiter_output[kMaxLpcOrder + kResidualLength];
+ int16_t filter_output[kMaxLpcOrder + kResidualLength];
int16_t reflection_coefficients[kMaxLpcOrder];
int16_t lpc_coefficients[kMaxLpcOrder + 1];
@@ -62,14 +55,13 @@ bool BackgroundNoise::Update(const AudioMultiVector& input,
ChannelParameters& parameters = channel_parameters_[channel_ix];
int16_t temp_signal_array[kVecLen + kMaxLpcOrder] = {0};
int16_t* temp_signal = &temp_signal_array[kMaxLpcOrder];
- RTC_DCHECK_GE(input.Size(), kVecLen);
- input[channel_ix].CopyTo(kVecLen, input.Size() - kVecLen, temp_signal);
+ RTC_DCHECK_GE(sync_buffer.Size(), kVecLen);
+ sync_buffer[channel_ix].CopyTo(kVecLen, sync_buffer.Size() - kVecLen,
+ temp_signal);
int32_t sample_energy =
CalculateAutoCorrelation(temp_signal, kVecLen, auto_correlation);
- if ((!vad.running() &&
- sample_energy < parameters.energy_update_threshold) ||
- (vad.running() && !vad.active_speech())) {
+ if (sample_energy < parameters.energy_update_threshold) {
// Generate LPC coefficients.
if (auto_correlation[0] <= 0) {
// Center value in auto-correlation is not positive. Do not update.
@@ -95,10 +87,10 @@ bool BackgroundNoise::Update(const AudioMultiVector& input,
// Generate the CNG gain factor by looking at the energy of the residual.
WebRtcSpl_FilterMAFastQ12(temp_signal + kVecLen - kResidualLength,
- fiter_output, lpc_coefficients,
+ filter_output, lpc_coefficients,
kMaxLpcOrder + 1, kResidualLength);
int32_t residual_energy = WebRtcSpl_DotProductWithScale(
- fiter_output, fiter_output, kResidualLength, 0);
+ filter_output, filter_output, kResidualLength, 0);
// Check spectral flatness.
// Comparing the residual variance with the input signal variance tells
@@ -117,9 +109,8 @@ bool BackgroundNoise::Update(const AudioMultiVector& input,
filter_params_saved = true;
}
} else {
- // Will only happen if post-decode VAD is disabled and `sample_energy` is
- // not low enough. Increase the threshold for update so that it increases
- // by a factor 4 in 4 seconds.
+ // Will only happen if `sample_energy` is not low enough. Increase the
+ // threshold for update so that it increases by a factor 4 in 4 seconds.
IncrementEnergyThreshold(channel_ix, sample_energy);
}
}
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h
index 8e6d5890a0..9ef0131c92 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/background_noise.h
@@ -39,9 +39,9 @@ class BackgroundNoise {
void Reset();
// Updates the parameter estimates based on the signal currently in the
- // `sync_buffer`, and on the latest decision in `vad` if it is running.
+ // `sync_buffer`.
// Returns true if the filter parameters are updated.
- bool Update(const AudioMultiVector& sync_buffer, const PostDecodeVad& vad);
+ bool Update(const AudioMultiVector& sync_buffer);
// Generates background noise given a random vector and writes the output to
// `buffer`.
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc
index 6648fd8709..f68c05767d 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic.cc
@@ -14,7 +14,6 @@
#include <cstdint>
#include <memory>
-#include <string>
#include "absl/types/optional.h"
#include "api/neteq/neteq.h"
@@ -22,7 +21,6 @@
#include "modules/audio_coding/neteq/packet_arrival_history.h"
#include "modules/audio_coding/neteq/packet_buffer.h"
#include "rtc_base/checks.h"
-#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/experiments/struct_parameters_parser.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
@@ -102,6 +100,7 @@ DecisionLogic::DecisionLogic(
packet_arrival_history_(packet_arrival_history
? std::move(packet_arrival_history)
: std::make_unique<PacketArrivalHistory>(
+ config.tick_timer,
config_.packet_history_size_ms)),
tick_timer_(config.tick_timer),
disallow_time_stretching_(!config.allow_time_stretching),
@@ -221,14 +220,14 @@ absl::optional<int> DecisionLogic::PacketArrived(
packet_length_samples_ = info.packet_length_samples;
delay_manager_->SetPacketAudioLength(packet_length_samples_ * 1000 / fs_hz);
}
- int64_t time_now_ms = tick_timer_->ticks() * tick_timer_->ms_per_tick();
- packet_arrival_history_->Insert(info.main_timestamp, time_now_ms);
- if (packet_arrival_history_->size() < 2) {
+ bool inserted = packet_arrival_history_->Insert(info.main_timestamp,
+ info.packet_length_samples);
+ if (!inserted || packet_arrival_history_->size() < 2) {
// No meaningful delay estimate unless at least 2 packets have arrived.
return absl::nullopt;
}
int arrival_delay_ms =
- packet_arrival_history_->GetDelayMs(info.main_timestamp, time_now_ms);
+ packet_arrival_history_->GetDelayMs(info.main_timestamp);
bool reordered =
!packet_arrival_history_->IsNewestRtpTimestamp(info.main_timestamp);
delay_manager_->Update(arrival_delay_ms, reordered);
@@ -464,8 +463,7 @@ int DecisionLogic::GetPlayoutDelayMs(
NetEqController::NetEqStatus status) const {
uint32_t playout_timestamp =
status.target_timestamp - status.sync_buffer_samples;
- return packet_arrival_history_->GetDelayMs(
- playout_timestamp, tick_timer_->ticks() * tick_timer_->ms_per_tick());
+ return packet_arrival_history_->GetDelayMs(playout_timestamp);
}
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
index 9e9902af50..4b306f2639 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
@@ -14,12 +14,10 @@
#include "api/neteq/neteq_controller.h"
#include "api/neteq/tick_timer.h"
-#include "modules/audio_coding/neteq/buffer_level_filter.h"
#include "modules/audio_coding/neteq/delay_manager.h"
#include "modules/audio_coding/neteq/mock/mock_buffer_level_filter.h"
#include "modules/audio_coding/neteq/mock/mock_delay_manager.h"
#include "modules/audio_coding/neteq/mock/mock_packet_arrival_history.h"
-#include "test/field_trial.h"
#include "test/gtest.h"
namespace webrtc {
@@ -64,7 +62,8 @@ class DecisionLogicTest : public ::testing::Test {
mock_delay_manager_ = delay_manager.get();
auto buffer_level_filter = std::make_unique<MockBufferLevelFilter>();
mock_buffer_level_filter_ = buffer_level_filter.get();
- auto packet_arrival_history = std::make_unique<MockPacketArrivalHistory>();
+ auto packet_arrival_history =
+ std::make_unique<MockPacketArrivalHistory>(&tick_timer_);
mock_packet_arrival_history_ = packet_arrival_history.get();
decision_logic_ = std::make_unique<DecisionLogic>(
config, std::move(delay_manager), std::move(buffer_level_filter),
@@ -82,7 +81,7 @@ class DecisionLogicTest : public ::testing::Test {
TEST_F(DecisionLogicTest, NormalOperation) {
EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
.WillRepeatedly(Return(100));
- EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_, _))
+ EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_))
.WillRepeatedly(Return(100));
EXPECT_CALL(*mock_packet_arrival_history_, GetMaxDelayMs())
.WillRepeatedly(Return(0));
@@ -98,7 +97,7 @@ TEST_F(DecisionLogicTest, NormalOperation) {
TEST_F(DecisionLogicTest, Accelerate) {
EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
.WillRepeatedly(Return(100));
- EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_, _))
+ EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_))
.WillRepeatedly(Return(150));
EXPECT_CALL(*mock_packet_arrival_history_, GetMaxDelayMs())
.WillRepeatedly(Return(0));
@@ -114,7 +113,7 @@ TEST_F(DecisionLogicTest, Accelerate) {
TEST_F(DecisionLogicTest, FastAccelerate) {
EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
.WillRepeatedly(Return(100));
- EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_, _))
+ EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_))
.WillRepeatedly(Return(500));
EXPECT_CALL(*mock_packet_arrival_history_, GetMaxDelayMs())
.WillRepeatedly(Return(0));
@@ -130,7 +129,7 @@ TEST_F(DecisionLogicTest, FastAccelerate) {
TEST_F(DecisionLogicTest, PreemptiveExpand) {
EXPECT_CALL(*mock_delay_manager_, TargetDelayMs())
.WillRepeatedly(Return(100));
- EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_, _))
+ EXPECT_CALL(*mock_packet_arrival_history_, GetDelayMs(_))
.WillRepeatedly(Return(50));
EXPECT_CALL(*mock_packet_arrival_history_, GetMaxDelayMs())
.WillRepeatedly(Return(0));
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_arrival_history.h b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_arrival_history.h
index 1b2080cd94..d4217cf2f8 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_arrival_history.h
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/mock/mock_packet_arrival_history.h
@@ -11,6 +11,7 @@
#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_ARRIVAL_HISTORY_H_
#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_ARRIVAL_HISTORY_H_
+#include "api/neteq/tick_timer.h"
#include "modules/audio_coding/neteq/packet_arrival_history.h"
#include "test/gmock.h"
@@ -18,12 +19,10 @@ namespace webrtc {
class MockPacketArrivalHistory : public PacketArrivalHistory {
public:
- MockPacketArrivalHistory() : PacketArrivalHistory(0) {}
+ MockPacketArrivalHistory(const TickTimer* tick_timer)
+ : PacketArrivalHistory(tick_timer, 0) {}
- MOCK_METHOD(int,
- GetDelayMs,
- (uint32_t rtp_timestamp, int64_t time_ms),
- (const override));
+ MOCK_METHOD(int, GetDelayMs, (uint32_t rtp_timestamp), (const override));
MOCK_METHOD(int, GetMaxDelayMs, (), (const override));
};
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc
index e5c8bf6c08..6a76096b49 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -20,6 +20,7 @@
#include <vector>
#include "api/audio_codecs/audio_decoder.h"
+#include "api/neteq/neteq_controller.h"
#include "api/neteq/tick_timer.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
@@ -36,7 +37,6 @@
#include "modules/audio_coding/neteq/normal.h"
#include "modules/audio_coding/neteq/packet.h"
#include "modules/audio_coding/neteq/packet_buffer.h"
-#include "modules/audio_coding/neteq/post_decode_vad.h"
#include "modules/audio_coding/neteq/preemptive_expand.h"
#include "modules/audio_coding/neteq/red_payload_splitter.h"
#include "modules/audio_coding/neteq/statistics_calculator.h"
@@ -50,6 +50,7 @@
#include "rtc_base/strings/audio_format_to_string.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/field_trial.h"
namespace webrtc {
namespace {
@@ -70,49 +71,26 @@ std::unique_ptr<NetEqController> CreateNetEqController(
return controller_factory.CreateNetEqController(config);
}
-void SetAudioFrameActivityAndType(bool vad_enabled,
- NetEqImpl::OutputType type,
- AudioFrame::VADActivity last_vad_activity,
- AudioFrame* audio_frame) {
+AudioFrame::SpeechType ToSpeechType(NetEqImpl::OutputType type) {
switch (type) {
case NetEqImpl::OutputType::kNormalSpeech: {
- audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
- audio_frame->vad_activity_ = AudioFrame::kVadActive;
- break;
- }
- case NetEqImpl::OutputType::kVadPassive: {
- // This should only be reached if the VAD is enabled.
- RTC_DCHECK(vad_enabled);
- audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
- audio_frame->vad_activity_ = AudioFrame::kVadPassive;
- break;
+ return AudioFrame::kNormalSpeech;
}
case NetEqImpl::OutputType::kCNG: {
- audio_frame->speech_type_ = AudioFrame::kCNG;
- audio_frame->vad_activity_ = AudioFrame::kVadPassive;
- break;
+ return AudioFrame::kCNG;
}
case NetEqImpl::OutputType::kPLC: {
- audio_frame->speech_type_ = AudioFrame::kPLC;
- audio_frame->vad_activity_ = last_vad_activity;
- break;
+ return AudioFrame::kPLC;
}
case NetEqImpl::OutputType::kPLCCNG: {
- audio_frame->speech_type_ = AudioFrame::kPLCCNG;
- audio_frame->vad_activity_ = AudioFrame::kVadPassive;
- break;
+ return AudioFrame::kPLCCNG;
}
case NetEqImpl::OutputType::kCodecPLC: {
- audio_frame->speech_type_ = AudioFrame::kCodecPLC;
- audio_frame->vad_activity_ = last_vad_activity;
- break;
+ return AudioFrame::kCodecPLC;
}
default:
RTC_DCHECK_NOTREACHED();
- }
- if (!vad_enabled) {
- // Always set kVadUnknown when receive VAD is inactive.
- audio_frame->vad_activity_ = AudioFrame::kVadUnknown;
+ return AudioFrame::kUndefined;
}
}
@@ -169,11 +147,12 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
packet_buffer_(std::move(deps.packet_buffer)),
red_payload_splitter_(std::move(deps.red_payload_splitter)),
timestamp_scaler_(std::move(deps.timestamp_scaler)),
- vad_(new PostDecodeVad()),
expand_factory_(std::move(deps.expand_factory)),
accelerate_factory_(std::move(deps.accelerate_factory)),
preemptive_expand_factory_(std::move(deps.preemptive_expand_factory)),
stats_(std::move(deps.stats)),
+ enable_fec_delay_adaptation_(
+ !field_trial::IsDisabled("WebRTC-Audio-NetEqFecDelayAdaptation")),
controller_(std::move(deps.neteq_controller)),
last_mode_(Mode::kNormal),
decoded_buffer_length_(kMaxFrameSize),
@@ -211,10 +190,6 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
if (create_components) {
SetSampleRateAndChannels(fs, 1); // Default is 1 channel.
}
- RTC_DCHECK(!vad_->enabled());
- if (config.enable_post_decode_vad) {
- vad_->Enable();
- }
}
NetEqImpl::~NetEqImpl() = default;
@@ -252,9 +227,7 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame,
audio_frame->sample_rate_hz_,
rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
RTC_DCHECK_EQ(*muted, audio_frame->muted());
- SetAudioFrameActivityAndType(vad_->enabled(), LastOutputType(),
- last_vad_activity_, audio_frame);
- last_vad_activity_ = audio_frame->vad_activity_;
+ audio_frame->speech_type_ = ToSpeechType(LastOutputType());
last_output_sample_rate_hz_ = audio_frame->sample_rate_hz_;
RTC_DCHECK(last_output_sample_rate_hz_ == 8000 ||
last_output_sample_rate_hz_ == 16000 ||
@@ -398,18 +371,6 @@ NetEqOperationsAndState NetEqImpl::GetOperationsAndState() const {
return result;
}
-void NetEqImpl::EnableVad() {
- MutexLock lock(&mutex_);
- RTC_DCHECK(vad_.get());
- vad_->Enable();
-}
-
-void NetEqImpl::DisableVad() {
- MutexLock lock(&mutex_);
- RTC_DCHECK(vad_.get());
- vad_->Disable();
-}
-
absl::optional<uint32_t> NetEqImpl::GetPlayoutTimestamp() const {
MutexLock lock(&mutex_);
if (first_packet_ || last_mode_ == Mode::kRfc3389Cng ||
@@ -695,6 +656,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
packet_buffer_->Flush();
buffer_flush_occured = true;
}
+ NetEqController::PacketArrivedInfo info = ToPacketArrivedInfo(packet);
int return_val = packet_buffer_->InsertPacket(std::move(packet));
if (return_val == PacketBuffer::kFlushed) {
buffer_flush_occured = true;
@@ -702,6 +664,15 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
// An error occurred.
return kOtherError;
}
+ if (enable_fec_delay_adaptation_) {
+ info.buffer_flush = buffer_flush_occured;
+ const bool should_update_stats = !new_codec_ && !buffer_flush_occured;
+ auto relative_delay =
+ controller_->PacketArrived(fs_hz_, should_update_stats, info);
+ if (relative_delay) {
+ stats_->RelativePacketArrivalDelay(relative_delay.value());
+ }
+ }
}
if (buffer_flush_occured) {
@@ -752,24 +723,26 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
}
}
- const DecoderDatabase::DecoderInfo* dec_info =
- decoder_database_->GetDecoderInfo(main_payload_type);
- RTC_DCHECK(dec_info); // Already checked that the payload type is known.
-
- NetEqController::PacketArrivedInfo info;
- info.is_cng_or_dtmf = dec_info->IsComfortNoise() || dec_info->IsDtmf();
- info.packet_length_samples =
- number_of_primary_packets * decoder_frame_length_;
- info.main_timestamp = main_timestamp;
- info.main_sequence_number = main_sequence_number;
- info.is_dtx = is_dtx;
- info.buffer_flush = buffer_flush_occured;
-
- const bool should_update_stats = !new_codec_;
- auto relative_delay =
- controller_->PacketArrived(fs_hz_, should_update_stats, info);
- if (relative_delay) {
- stats_->RelativePacketArrivalDelay(relative_delay.value());
+ if (!enable_fec_delay_adaptation_) {
+ const DecoderDatabase::DecoderInfo* dec_info =
+ decoder_database_->GetDecoderInfo(main_payload_type);
+ RTC_DCHECK(dec_info); // Already checked that the payload type is known.
+
+ NetEqController::PacketArrivedInfo info;
+ info.is_cng_or_dtmf = dec_info->IsComfortNoise() || dec_info->IsDtmf();
+ info.packet_length_samples =
+ number_of_primary_packets * decoder_frame_length_;
+ info.main_timestamp = main_timestamp;
+ info.main_sequence_number = main_sequence_number;
+ info.is_dtx = is_dtx;
+ info.buffer_flush = buffer_flush_occured;
+
+ const bool should_update_stats = !new_codec_;
+ auto relative_delay =
+ controller_->PacketArrived(fs_hz_, should_update_stats, info);
+ if (relative_delay) {
+ stats_->RelativePacketArrivalDelay(relative_delay.value());
+ }
}
return 0;
}
@@ -858,11 +831,8 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
last_decoded_type_ = speech_type;
}
- RTC_DCHECK(vad_.get());
bool sid_frame_available =
(operation == Operation::kRfc3389Cng && !packet_list.empty());
- vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
- sid_frame_available, fs_hz_);
// This is the criterion that we did decode some data through the speech
// decoder, and the operation resulted in comfort noise.
@@ -1012,7 +982,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame,
(last_mode_ == Mode::kPreemptiveExpandFail) ||
(last_mode_ == Mode::kRfc3389Cng) ||
(last_mode_ == Mode::kCodecInternalCng)) {
- background_noise_->Update(*sync_buffer_, *vad_.get());
+ background_noise_->Update(*sync_buffer_);
}
if (operation == Operation::kDtmf) {
@@ -2088,10 +2058,6 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
if (cng_decoder)
cng_decoder->Reset();
- // Reinit post-decode VAD with new sample rate.
- RTC_DCHECK(vad_.get()); // Cannot be NULL here.
- vad_->Init();
-
// Delete algorithm buffer and create a new one.
algorithm_buffer_.reset(new AudioMultiVector(channels));
@@ -2132,7 +2098,6 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
}
NetEqImpl::OutputType NetEqImpl::LastOutputType() {
- RTC_DCHECK(vad_.get());
RTC_DCHECK(expand_.get());
if (last_mode_ == Mode::kCodecInternalCng ||
last_mode_ == Mode::kRfc3389Cng) {
@@ -2142,12 +2107,27 @@ NetEqImpl::OutputType NetEqImpl::LastOutputType() {
return OutputType::kPLCCNG;
} else if (last_mode_ == Mode::kExpand) {
return OutputType::kPLC;
- } else if (vad_->running() && !vad_->active_speech()) {
- return OutputType::kVadPassive;
} else if (last_mode_ == Mode::kCodecPlc) {
return OutputType::kCodecPLC;
} else {
return OutputType::kNormalSpeech;
}
}
+
+NetEqController::PacketArrivedInfo NetEqImpl::ToPacketArrivedInfo(
+ const Packet& packet) const {
+ const DecoderDatabase::DecoderInfo* dec_info =
+ decoder_database_->GetDecoderInfo(packet.payload_type);
+
+ NetEqController::PacketArrivedInfo info;
+ info.is_cng_or_dtmf =
+ dec_info && (dec_info->IsComfortNoise() || dec_info->IsDtmf());
+ info.packet_length_samples =
+ packet.frame ? packet.frame->Duration() : decoder_frame_length_;
+ info.main_timestamp = packet.timestamp;
+ info.main_sequence_number = packet.sequence_number;
+ info.is_dtx = packet.frame && packet.frame->IsDtxPacket();
+ return info;
+}
+
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h
index f8f2b06410..eed7645e7d 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -48,7 +48,6 @@ class Merge;
class NackTracker;
class Normal;
class RedPayloadSplitter;
-class PostDecodeVad;
class PreemptiveExpand;
class RandomVector;
class SyncBuffer;
@@ -171,13 +170,6 @@ class NetEqImpl : public webrtc::NetEq {
NetEqOperationsAndState GetOperationsAndState() const override;
- // Enables post-decode VAD. When enabled, GetAudio() will return
- // kOutputVADPassive when the signal contains no speech.
- void EnableVad() override;
-
- // Disables post-decode VAD.
- void DisableVad() override;
-
absl::optional<uint32_t> GetPlayoutTimestamp() const override;
int last_output_sample_rate_hz() const override;
@@ -342,6 +334,9 @@ class NetEqImpl : public webrtc::NetEq {
NetEqNetworkStatistics CurrentNetworkStatisticsInternal() const
RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ NetEqController::PacketArrivedInfo ToPacketArrivedInfo(
+ const Packet& packet) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
Clock* const clock_;
mutable Mutex mutex_;
@@ -356,13 +351,13 @@ class NetEqImpl : public webrtc::NetEq {
RTC_GUARDED_BY(mutex_);
const std::unique_ptr<TimestampScaler> timestamp_scaler_
RTC_GUARDED_BY(mutex_);
- const std::unique_ptr<PostDecodeVad> vad_ RTC_GUARDED_BY(mutex_);
const std::unique_ptr<ExpandFactory> expand_factory_ RTC_GUARDED_BY(mutex_);
const std::unique_ptr<AccelerateFactory> accelerate_factory_
RTC_GUARDED_BY(mutex_);
const std::unique_ptr<PreemptiveExpandFactory> preemptive_expand_factory_
RTC_GUARDED_BY(mutex_);
const std::unique_ptr<StatisticsCalculator> stats_ RTC_GUARDED_BY(mutex_);
+ const bool enable_fec_delay_adaptation_ RTC_GUARDED_BY(mutex_);
std::unique_ptr<BackgroundNoise> background_noise_ RTC_GUARDED_BY(mutex_);
std::unique_ptr<NetEqController> controller_ RTC_GUARDED_BY(mutex_);
@@ -397,8 +392,6 @@ class NetEqImpl : public webrtc::NetEq {
std::unique_ptr<NackTracker> nack_ RTC_GUARDED_BY(mutex_);
bool nack_enabled_ RTC_GUARDED_BY(mutex_);
const bool enable_muted_state_ RTC_GUARDED_BY(mutex_);
- AudioFrame::VADActivity last_vad_activity_ RTC_GUARDED_BY(mutex_) =
- AudioFrame::kVadPassive;
std::unique_ptr<TickTimer::Stopwatch> generated_noise_stopwatch_
RTC_GUARDED_BY(mutex_);
std::vector<RtpPacketInfo> last_decoded_packet_infos_ RTC_GUARDED_BY(mutex_);
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc
index aec7e580ec..7104b7a6dc 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -76,12 +76,13 @@ TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
const std::string output_checksum =
- "2efdbea92c3fb2383c59f89d881efec9f94001d0|"
- "a6831b946b59913852ae3e53f99fa8f209bb23cd";
+ "434bdc4ec08546510ee903d001c8be1a01c44e24|"
+ "4336be0091e2faad7a194c16ee0a05e727325727|"
+ "cefd2de4adfa8f6a9b66a3639ad63c2f6779d0cd";
const std::string network_stats_checksum =
- "dfaf4399fd60293405290476ccf1c05c807c71a0|"
- "076662525572dba753b11578330bd491923f7f5e";
+ "5f2c8e3dff9cff55dd7a9f4167939de001566d95|"
+ "80ab17c17da030d4f2dfbf314ac44aacdadd7f0c";
DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
absl::GetFlag(FLAGS_gen_ref));
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc
index 2077383f76..a36c8a2b06 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc
@@ -11,95 +11,122 @@
#include "modules/audio_coding/neteq/packet_arrival_history.h"
#include <algorithm>
+#include <cstdint>
#include "api/neteq/tick_timer.h"
+#include "rtc_base/checks.h"
namespace webrtc {
-PacketArrivalHistory::PacketArrivalHistory(int window_size_ms)
- : window_size_ms_(window_size_ms) {}
+PacketArrivalHistory::PacketArrivalHistory(const TickTimer* tick_timer,
+ int window_size_ms)
+ : tick_timer_(tick_timer), window_size_ms_(window_size_ms) {}
-void PacketArrivalHistory::Insert(uint32_t rtp_timestamp,
- int64_t arrival_time_ms) {
- RTC_DCHECK(sample_rate_khz_ > 0);
- int64_t unwrapped_rtp_timestamp = timestamp_unwrapper_.Unwrap(rtp_timestamp);
- if (!newest_rtp_timestamp_ ||
- unwrapped_rtp_timestamp > *newest_rtp_timestamp_) {
- newest_rtp_timestamp_ = unwrapped_rtp_timestamp;
+bool PacketArrivalHistory::Insert(uint32_t rtp_timestamp,
+ int packet_length_samples) {
+ int64_t arrival_timestamp =
+ tick_timer_->ticks() * tick_timer_->ms_per_tick() * sample_rate_khz_;
+ PacketArrival packet(timestamp_unwrapper_.Unwrap(rtp_timestamp),
+ arrival_timestamp, packet_length_samples);
+ if (IsObsolete(packet)) {
+ return false;
}
- history_.emplace_back(unwrapped_rtp_timestamp / sample_rate_khz_,
- arrival_time_ms);
- MaybeUpdateCachedArrivals(history_.back());
- while (history_.front().rtp_timestamp_ms + window_size_ms_ <
- unwrapped_rtp_timestamp / sample_rate_khz_) {
- if (&history_.front() == min_packet_arrival_) {
- min_packet_arrival_ = nullptr;
- }
- if (&history_.front() == max_packet_arrival_) {
- max_packet_arrival_ = nullptr;
- }
- history_.pop_front();
+ if (Contains(packet)) {
+ return false;
+ }
+ history_.emplace(packet.rtp_timestamp, packet);
+ if (packet != history_.rbegin()->second) {
+ // Packet was reordered.
+ return true;
}
- if (!min_packet_arrival_ || !max_packet_arrival_) {
- for (const PacketArrival& packet : history_) {
- MaybeUpdateCachedArrivals(packet);
+ // Remove old packets.
+ while (IsObsolete(history_.begin()->second)) {
+ if (history_.begin()->second == min_packet_arrivals_.front()) {
+ min_packet_arrivals_.pop_front();
}
+ if (history_.begin()->second == max_packet_arrivals_.front()) {
+ max_packet_arrivals_.pop_front();
+ }
+ history_.erase(history_.begin());
}
-}
-
-void PacketArrivalHistory::MaybeUpdateCachedArrivals(
- const PacketArrival& packet_arrival) {
- if (!min_packet_arrival_ || packet_arrival <= *min_packet_arrival_) {
- min_packet_arrival_ = &packet_arrival;
+ // Ensure ordering constraints.
+ while (!min_packet_arrivals_.empty() &&
+ packet <= min_packet_arrivals_.back()) {
+ min_packet_arrivals_.pop_back();
}
- if (!max_packet_arrival_ || packet_arrival >= *max_packet_arrival_) {
- max_packet_arrival_ = &packet_arrival;
+ while (!max_packet_arrivals_.empty() &&
+ packet >= max_packet_arrivals_.back()) {
+ max_packet_arrivals_.pop_back();
}
+ min_packet_arrivals_.push_back(packet);
+ max_packet_arrivals_.push_back(packet);
+ return true;
}
void PacketArrivalHistory::Reset() {
history_.clear();
- min_packet_arrival_ = nullptr;
- max_packet_arrival_ = nullptr;
+ min_packet_arrivals_.clear();
+ max_packet_arrivals_.clear();
timestamp_unwrapper_.Reset();
- newest_rtp_timestamp_ = absl::nullopt;
}
-int PacketArrivalHistory::GetDelayMs(uint32_t rtp_timestamp,
- int64_t time_ms) const {
- RTC_DCHECK(sample_rate_khz_ > 0);
- int64_t unwrapped_rtp_timestamp_ms =
- timestamp_unwrapper_.PeekUnwrap(rtp_timestamp) / sample_rate_khz_;
- PacketArrival packet(unwrapped_rtp_timestamp_ms, time_ms);
+int PacketArrivalHistory::GetDelayMs(uint32_t rtp_timestamp) const {
+ int64_t unwrapped_rtp_timestamp =
+ timestamp_unwrapper_.PeekUnwrap(rtp_timestamp);
+ int64_t current_timestamp =
+ tick_timer_->ticks() * tick_timer_->ms_per_tick() * sample_rate_khz_;
+ PacketArrival packet(unwrapped_rtp_timestamp, current_timestamp,
+ /*duration_ms=*/0);
return GetPacketArrivalDelayMs(packet);
}
int PacketArrivalHistory::GetMaxDelayMs() const {
- if (!max_packet_arrival_) {
+ if (max_packet_arrivals_.empty()) {
return 0;
}
- return GetPacketArrivalDelayMs(*max_packet_arrival_);
+ return GetPacketArrivalDelayMs(max_packet_arrivals_.front());
}
bool PacketArrivalHistory::IsNewestRtpTimestamp(uint32_t rtp_timestamp) const {
- if (!newest_rtp_timestamp_) {
- return false;
+ if (history_.empty()) {
+ return true;
}
int64_t unwrapped_rtp_timestamp =
timestamp_unwrapper_.PeekUnwrap(rtp_timestamp);
- return unwrapped_rtp_timestamp == *newest_rtp_timestamp_;
+ return unwrapped_rtp_timestamp == history_.rbegin()->second.rtp_timestamp;
}
int PacketArrivalHistory::GetPacketArrivalDelayMs(
const PacketArrival& packet_arrival) const {
- if (!min_packet_arrival_) {
+ if (min_packet_arrivals_.empty()) {
return 0;
}
- return std::max(static_cast<int>(packet_arrival.arrival_time_ms -
- min_packet_arrival_->arrival_time_ms -
- (packet_arrival.rtp_timestamp_ms -
- min_packet_arrival_->rtp_timestamp_ms)),
- 0);
+ RTC_DCHECK_NE(sample_rate_khz_, 0);
+ // TODO(jakobi): Timestamps are first converted to millis for bit-exactness.
+ return std::max<int>(
+ packet_arrival.arrival_timestamp / sample_rate_khz_ -
+ min_packet_arrivals_.front().arrival_timestamp / sample_rate_khz_ -
+ (packet_arrival.rtp_timestamp / sample_rate_khz_ -
+ min_packet_arrivals_.front().rtp_timestamp / sample_rate_khz_),
+ 0);
+}
+
+bool PacketArrivalHistory::IsObsolete(
+ const PacketArrival& packet_arrival) const {
+ if (history_.empty()) {
+ return false;
+ }
+ return packet_arrival.rtp_timestamp + window_size_ms_ * sample_rate_khz_ <
+ history_.rbegin()->second.rtp_timestamp;
+}
+
+bool PacketArrivalHistory::Contains(const PacketArrival& packet_arrival) const {
+ auto it = history_.upper_bound(packet_arrival.rtp_timestamp);
+ if (it == history_.begin()) {
+ return false;
+ }
+ --it;
+ return it->second.contains(packet_arrival);
}
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h
index 722caf5688..3fa1ea1fa9 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.h
@@ -11,10 +11,11 @@
#ifndef MODULES_AUDIO_CODING_NETEQ_PACKET_ARRIVAL_HISTORY_H_
#define MODULES_AUDIO_CODING_NETEQ_PACKET_ARRIVAL_HISTORY_H_
+#include <cstddef>
#include <cstdint>
#include <deque>
+#include <map>
-#include "absl/types/optional.h"
#include "api/neteq/tick_timer.h"
#include "rtc_base/numerics/sequence_number_unwrapper.h"
@@ -25,19 +26,22 @@ namespace webrtc {
// pruned.
class PacketArrivalHistory {
public:
- explicit PacketArrivalHistory(int window_size_ms);
+ explicit PacketArrivalHistory(const TickTimer* tick_timer,
+ int window_size_ms);
virtual ~PacketArrivalHistory() = default;
- // Insert packet with `rtp_timestamp` and `arrival_time_ms` into the history.
- void Insert(uint32_t rtp_timestamp, int64_t arrival_time_ms);
+ // Insert packet with `rtp_timestamp` into the history. Returns true if the
+ // packet was inserted, false if the timestamp is too old or if the timestamp
+ // already exists.
+ bool Insert(uint32_t rtp_timestamp, int packet_length_samples);
- // The delay for `rtp_timestamp` at `time_ms` is calculated as
- // `(time_ms - p.arrival_time_ms) - (rtp_timestamp - p.rtp_timestamp)`
- // where `p` is chosen as the packet arrival in the history that maximizes the
- // delay.
- virtual int GetDelayMs(uint32_t rtp_timestamp, int64_t time_ms) const;
+ // The delay for `rtp_timestamp` at time `now` is calculated as
+ // `(now - p.arrival_timestamp) - (rtp_timestamp - p.rtp_timestamp)` where `p`
+ // is chosen as the packet arrival in the history that maximizes the delay.
+ virtual int GetDelayMs(uint32_t rtp_timestamp) const;
- // Get the maximum packet arrival delay observed in the history.
+ // Get the maximum packet arrival delay observed in the history, excluding
+ // reordered packets.
virtual int GetMaxDelayMs() const;
bool IsNewestRtpTimestamp(uint32_t rtp_timestamp) const;
@@ -52,30 +56,53 @@ class PacketArrivalHistory {
private:
struct PacketArrival {
- PacketArrival(int64_t rtp_timestamp_ms, int64_t arrival_time_ms)
- : rtp_timestamp_ms(rtp_timestamp_ms),
- arrival_time_ms(arrival_time_ms) {}
- int64_t rtp_timestamp_ms;
- int64_t arrival_time_ms;
+ PacketArrival(int64_t rtp_timestamp,
+ int64_t arrival_timestamp,
+ int length_samples)
+ : rtp_timestamp(rtp_timestamp),
+ arrival_timestamp(arrival_timestamp),
+ length_samples(length_samples) {}
+ PacketArrival() = default;
+ int64_t rtp_timestamp;
+ int64_t arrival_timestamp;
+ int length_samples;
+ bool operator==(const PacketArrival& other) const {
+ return rtp_timestamp == other.rtp_timestamp &&
+ arrival_timestamp == other.arrival_timestamp &&
+ length_samples == other.length_samples;
+ }
+ bool operator!=(const PacketArrival& other) const {
+ return !(*this == other);
+ }
bool operator<=(const PacketArrival& other) const {
- return arrival_time_ms - rtp_timestamp_ms <=
- other.arrival_time_ms - other.rtp_timestamp_ms;
+ return arrival_timestamp - rtp_timestamp <=
+ other.arrival_timestamp - other.rtp_timestamp;
}
bool operator>=(const PacketArrival& other) const {
- return arrival_time_ms - rtp_timestamp_ms >=
- other.arrival_time_ms - other.rtp_timestamp_ms;
+ return arrival_timestamp - rtp_timestamp >=
+ other.arrival_timestamp - other.rtp_timestamp;
+ }
+ bool contains(const PacketArrival& other) const {
+ return rtp_timestamp <= other.rtp_timestamp &&
+ rtp_timestamp + length_samples >=
+ other.rtp_timestamp + other.length_samples;
}
};
- std::deque<PacketArrival> history_;
int GetPacketArrivalDelayMs(const PacketArrival& packet_arrival) const;
- // Updates `min_packet_arrival_` and `max_packet_arrival_`.
- void MaybeUpdateCachedArrivals(const PacketArrival& packet);
- const PacketArrival* min_packet_arrival_ = nullptr;
- const PacketArrival* max_packet_arrival_ = nullptr;
+ // Checks if the packet is older than the window size.
+ bool IsObsolete(const PacketArrival& packet_arrival) const;
+ // Check if the packet exists or fully overlaps with a packet in the history.
+ bool Contains(const PacketArrival& packet_arrival) const;
+ const TickTimer* tick_timer_;
const int window_size_ms_;
- RtpTimestampUnwrapper timestamp_unwrapper_;
- absl::optional<int64_t> newest_rtp_timestamp_;
int sample_rate_khz_ = 0;
+ RtpTimestampUnwrapper timestamp_unwrapper_;
+ // Packet history ordered by rtp timestamp.
+ std::map<int64_t, PacketArrival> history_;
+ // Tracks min/max packet arrivals in `history_` in ascending/descending order.
+ // Reordered packets are excluded.
+ std::deque<PacketArrival> min_packet_arrivals_;
+ std::deque<PacketArrival> max_packet_arrivals_;
};
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc
index 539a318fe1..dd95fec0f7 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history_unittest.cc
@@ -21,32 +21,36 @@ namespace {
constexpr int kFs = 8000;
constexpr int kFsKhz = kFs / 1000;
constexpr int kFrameSizeMs = 20;
+constexpr int kFrameSizeSamples = kFrameSizeMs * kFsKhz;
constexpr int kWindowSizeMs = 1000;
class PacketArrivalHistoryTest : public testing::Test {
public:
- PacketArrivalHistoryTest() : history_(kWindowSizeMs) {
+ PacketArrivalHistoryTest() : history_(&tick_timer_, kWindowSizeMs) {
history_.set_sample_rate(kFs);
}
- void IncrementTime(int delta_ms) { time_ms_ += delta_ms; }
+ void IncrementTime(int delta_ms) {
+ tick_timer_.Increment(delta_ms / tick_timer_.ms_per_tick());
+ }
int InsertPacketAndGetDelay(int timestamp_delta_ms) {
uint32_t timestamp = timestamp_ + timestamp_delta_ms * kFsKhz;
if (timestamp_delta_ms > 0) {
timestamp_ = timestamp;
}
- history_.Insert(timestamp, time_ms_);
+ EXPECT_TRUE(history_.Insert(timestamp, kFrameSizeSamples));
EXPECT_EQ(history_.IsNewestRtpTimestamp(timestamp),
timestamp_delta_ms >= 0);
- return history_.GetDelayMs(timestamp, time_ms_);
+ return history_.GetDelayMs(timestamp);
}
protected:
- int64_t time_ms_ = 0;
+ TickTimer tick_timer_;
PacketArrivalHistory history_;
uint32_t timestamp_ = 0x12345678;
};
TEST_F(PacketArrivalHistoryTest, RelativeArrivalDelay) {
+ // Insert first packet.
EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
IncrementTime(kFrameSizeMs);
@@ -56,7 +60,7 @@ TEST_F(PacketArrivalHistoryTest, RelativeArrivalDelay) {
EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 20);
// Reordered packet.
- EXPECT_EQ(InsertPacketAndGetDelay(-2 * kFrameSizeMs), 60);
+ EXPECT_EQ(InsertPacketAndGetDelay(-3 * kFrameSizeMs), 80);
IncrementTime(2 * kFrameSizeMs);
EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 40);
@@ -68,7 +72,7 @@ TEST_F(PacketArrivalHistoryTest, RelativeArrivalDelay) {
EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 20);
// Earlier packet is now more delayed due to the new reference packet.
- EXPECT_EQ(history_.GetMaxDelayMs(), 100);
+ EXPECT_EQ(history_.GetMaxDelayMs(), 80);
}
TEST_F(PacketArrivalHistoryTest, ReorderedPackets) {
@@ -86,7 +90,7 @@ TEST_F(PacketArrivalHistoryTest, ReorderedPackets) {
IncrementTime(4 * kFrameSizeMs);
EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 60);
- EXPECT_EQ(history_.GetMaxDelayMs(), 80);
+ EXPECT_EQ(history_.GetMaxDelayMs(), 60);
}
TEST_F(PacketArrivalHistoryTest, MaxHistorySize) {
@@ -117,7 +121,7 @@ TEST_F(PacketArrivalHistoryTest, TimestampWraparound) {
// Insert another in-order packet after the wraparound.
EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 0);
- EXPECT_EQ(history_.GetMaxDelayMs(), 3 * kFrameSizeMs);
+ EXPECT_EQ(history_.GetMaxDelayMs(), kFrameSizeMs);
}
TEST_F(PacketArrivalHistoryTest, TimestampWraparoundBackwards) {
@@ -134,7 +138,33 @@ TEST_F(PacketArrivalHistoryTest, TimestampWraparoundBackwards) {
// Insert another in-order packet after the wraparound.
EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 0);
- EXPECT_EQ(history_.GetMaxDelayMs(), 3 * kFrameSizeMs);
+ EXPECT_EQ(history_.GetMaxDelayMs(), kFrameSizeMs);
+}
+
+TEST_F(PacketArrivalHistoryTest, OldPacketShouldNotBeInserted) {
+ // Insert first packet as reference.
+ EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
+ // Insert packet with timestamp older than the window size compared to the
+ // first packet.
+ EXPECT_FALSE(history_.Insert(timestamp_ - kWindowSizeMs * kFsKhz - 1,
+ kFrameSizeSamples));
+}
+
+TEST_F(PacketArrivalHistoryTest, DuplicatePacketShouldNotBeInserted) {
+ // Insert first packet as reference.
+ uint32_t first_timestamp = timestamp_;
+ EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
+ EXPECT_EQ(InsertPacketAndGetDelay(kFrameSizeMs), 0);
+ // Same timestamp as the first packet.
+ EXPECT_FALSE(history_.Insert(first_timestamp, kFrameSizeSamples));
+}
+
+TEST_F(PacketArrivalHistoryTest, OverlappingPacketShouldNotBeInserted) {
+ // Insert first packet as reference.
+ EXPECT_EQ(InsertPacketAndGetDelay(0), 0);
+ // 10 ms overlap with the previous packet.
+ EXPECT_FALSE(history_.Insert(timestamp_ + kFrameSizeSamples / 2,
+ kFrameSizeSamples / 2));
}
} // namespace
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc
deleted file mode 100644
index 9999d6764b..0000000000
--- a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_coding/neteq/post_decode_vad.h"
-
-namespace webrtc {
-
-PostDecodeVad::~PostDecodeVad() {
- if (vad_instance_)
- WebRtcVad_Free(vad_instance_);
-}
-
-void PostDecodeVad::Enable() {
- if (!vad_instance_) {
- // Create the instance.
- vad_instance_ = WebRtcVad_Create();
- if (vad_instance_ == nullptr) {
- // Failed to create instance.
- Disable();
- return;
- }
- }
- Init();
- enabled_ = true;
-}
-
-void PostDecodeVad::Disable() {
- enabled_ = false;
- running_ = false;
-}
-
-void PostDecodeVad::Init() {
- running_ = false;
- if (vad_instance_) {
- WebRtcVad_Init(vad_instance_);
- WebRtcVad_set_mode(vad_instance_, kVadMode);
- running_ = true;
- }
-}
-
-void PostDecodeVad::Update(int16_t* signal,
- size_t length,
- AudioDecoder::SpeechType speech_type,
- bool sid_frame,
- int fs_hz) {
- if (!vad_instance_ || !enabled_) {
- return;
- }
-
- if (speech_type == AudioDecoder::kComfortNoise || sid_frame ||
- fs_hz > 16000) {
- // TODO(hlundin): Remove restriction on fs_hz.
- running_ = false;
- active_speech_ = true;
- sid_interval_counter_ = 0;
- } else if (!running_) {
- ++sid_interval_counter_;
- }
-
- if (sid_interval_counter_ >= kVadAutoEnable) {
- Init();
- }
-
- if (length > 0 && running_) {
- size_t vad_sample_index = 0;
- active_speech_ = false;
- // Loop through frame sizes 30, 20, and 10 ms.
- for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
- vad_frame_size_ms -= 10) {
- size_t vad_frame_size_samples =
- static_cast<size_t>(vad_frame_size_ms * fs_hz / 1000);
- while (length - vad_sample_index >= vad_frame_size_samples) {
- int vad_return =
- WebRtcVad_Process(vad_instance_, fs_hz, &signal[vad_sample_index],
- vad_frame_size_samples);
- active_speech_ |= (vad_return == 1);
- vad_sample_index += vad_frame_size_samples;
- }
- }
- }
-}
-
-} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h
deleted file mode 100644
index 3bd91b9edb..0000000000
--- a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
-#define MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "api/audio_codecs/audio_decoder.h"
-#include "common_audio/vad/include/webrtc_vad.h"
-
-namespace webrtc {
-
-class PostDecodeVad {
- public:
- PostDecodeVad()
- : enabled_(false),
- running_(false),
- active_speech_(true),
- sid_interval_counter_(0),
- vad_instance_(NULL) {}
-
- virtual ~PostDecodeVad();
-
- PostDecodeVad(const PostDecodeVad&) = delete;
- PostDecodeVad& operator=(const PostDecodeVad&) = delete;
-
- // Enables post-decode VAD.
- void Enable();
-
- // Disables post-decode VAD.
- void Disable();
-
- // Initializes post-decode VAD.
- void Init();
-
- // Updates post-decode VAD with the audio data in `signal` having `length`
- // samples. The data is of type `speech_type`, at the sample rate `fs_hz`.
- void Update(int16_t* signal,
- size_t length,
- AudioDecoder::SpeechType speech_type,
- bool sid_frame,
- int fs_hz);
-
- // Accessors.
- bool enabled() const { return enabled_; }
- bool running() const { return running_; }
- bool active_speech() const { return active_speech_; }
-
- private:
- static const int kVadMode = 0; // Sets aggressiveness to "Normal".
- // Number of Update() calls without CNG/SID before re-enabling VAD.
- static const int kVadAutoEnable = 3000;
-
- bool enabled_;
- bool running_;
- bool active_speech_;
- int sid_interval_counter_;
- ::VadInst* vad_instance_;
-};
-
-} // namespace webrtc
-#endif // MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc b/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc
deleted file mode 100644
index da3e4e864e..0000000000
--- a/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad_unittest.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-// Unit tests for PostDecodeVad class.
-
-#include "modules/audio_coding/neteq/post_decode_vad.h"
-
-#include "test/gtest.h"
-
-namespace webrtc {
-
-TEST(PostDecodeVad, CreateAndDestroy) {
- PostDecodeVad vad;
-}
-
-// TODO(hlundin): Write more tests.
-
-} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc
index 081bd9631f..f1a46cd2df 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc
+++ b/third_party/libwebrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.cc
@@ -107,7 +107,7 @@ void NetEqReplacementInput::ReplacePacket() {
next_hdr->timestamp - packet_->header.timestamp;
const bool opus_dtx = packet_->payload.size() <= 2;
if (next_hdr->sequenceNumber == packet_->header.sequenceNumber + 1 &&
- timestamp_diff <= 120 * 48 && !opus_dtx) {
+ timestamp_diff <= 120 * 48 && timestamp_diff > 0 && !opus_dtx) {
// Packets are in order and the timestamp diff is less than 5760 samples.
// Accept the timestamp diff as a valid frame size.
input_frame_size_timestamps = timestamp_diff;
diff --git a/third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build b/third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build
index 834a8d1265..9b2996fa22 100644
--- a/third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build
+++ b/third_party/libwebrtc/modules/audio_coding/neteq_gn/moz.build
@@ -58,7 +58,6 @@ UNIFIED_SOURCES += [
"/third_party/libwebrtc/modules/audio_coding/neteq/packet.cc",
"/third_party/libwebrtc/modules/audio_coding/neteq/packet_arrival_history.cc",
"/third_party/libwebrtc/modules/audio_coding/neteq/packet_buffer.cc",
- "/third_party/libwebrtc/modules/audio_coding/neteq/post_decode_vad.cc",
"/third_party/libwebrtc/modules/audio_coding/neteq/preemptive_expand.cc",
"/third_party/libwebrtc/modules/audio_coding/neteq/random_vector.cc",
"/third_party/libwebrtc/modules/audio_coding/neteq/red_payload_splitter.cc",
diff --git a/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc b/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc
index 7a122ca84b..cca82977e8 100644
--- a/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc
+++ b/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc
@@ -39,9 +39,9 @@ void RunWavTest(const std::vector<int16_t>& input_samples,
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
- const std::string output_filename =
- test::OutputPath() + "BoundedWavFileWriterTest_" + test_info->name() +
- "_" + std::to_string(std::rand()) + ".wav";
+ const std::string output_filename = test::OutputPathWithRandomDirectory() +
+ "BoundedWavFileWriterTest_" +
+ test_info->name() + ".wav";
static const size_t kSamplesPerFrame = 8;
static const int kSampleRate = kSamplesPerFrame * 100;
@@ -136,9 +136,9 @@ TEST(WavFileReaderTest, RepeatedTrueWithSingleFrameFileReadTwice) {
static const rtc::BufferT<int16_t> kExpectedSamples(kInputSamples.data(),
kInputSamples.size());
- const std::string output_filename = test::OutputPath() +
+ const std::string output_filename = test::OutputPathWithRandomDirectory() +
"WavFileReaderTest_RepeatedTrue_" +
- std::to_string(std::rand()) + ".wav";
+ ".wav";
static const size_t kSamplesPerFrame = 8;
static const int kSampleRate = kSamplesPerFrame * 100;
@@ -175,9 +175,9 @@ void RunRawTestNoRepeat(const std::vector<int16_t>& input_samples,
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
- const std::string output_filename = test::OutputPath() + "RawFileTest_" +
- test_info->name() + "_" +
- std::to_string(std::rand()) + ".raw";
+ const std::string output_filename = test::OutputPathWithRandomDirectory() +
+ "RawFileTest_" + test_info->name() +
+ ".raw";
static const size_t kSamplesPerFrame = 8;
static const int kSampleRate = kSamplesPerFrame * 100;
@@ -281,8 +281,8 @@ TEST(RawFileWriterTest, Repeat) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
- const std::string output_filename = test::OutputPath() + "RawFileTest_" +
- test_info->name() + "_" +
+ const std::string output_filename = test::OutputPathWithRandomDirectory() +
+ "RawFileTest_" + test_info->name() + "_" +
std::to_string(std::rand()) + ".raw";
static const size_t kSamplesPerFrame = 8;
diff --git a/third_party/libwebrtc/modules/audio_processing/agc2/input_volume_controller.h b/third_party/libwebrtc/modules/audio_processing/agc2/input_volume_controller.h
index 21405542dc..0bec7af450 100644
--- a/third_party/libwebrtc/modules/audio_processing/agc2/input_volume_controller.h
+++ b/third_party/libwebrtc/modules/audio_processing/agc2/input_volume_controller.h
@@ -50,7 +50,7 @@ class InputVolumeController final {
// Limited to values higher than 0.
int clipped_wait_frames = 300;
// Enables clipping prediction functionality.
- bool enable_clipping_predictor = false;
+ bool enable_clipping_predictor = true;
// Speech level target range (dBFS). If the speech level is in the range
// [`target_range_min_dbfs`, `target_range_max_dbfs`], no input volume
// adjustments are done based on the speech level. For speech levels below
diff --git a/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc
index 4ac074526c..5f6dd59d02 100644
--- a/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/third_party/libwebrtc/modules/audio_processing/audio_processing_impl.cc
@@ -2382,7 +2382,7 @@ void AudioProcessingImpl::InitializeGainController2() {
!UseApmVadSubModule(config_, gain_controller2_experiment_params_);
submodules_.gain_controller2 = std::make_unique<GainController2>(
config_.gain_controller2, input_volume_controller_config,
- proc_fullband_sample_rate_hz(), num_proc_channels(), use_internal_vad);
+ proc_fullband_sample_rate_hz(), num_output_channels(), use_internal_vad);
submodules_.gain_controller2->SetCaptureOutputUsed(
capture_.capture_output_used);
}
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc
index 94645dcc4a..d8a0ce9d64 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/goog_cc_network_control.cc
@@ -720,7 +720,8 @@ PacerConfig GoogCcNetworkController::GetPacingRates(Timestamp at_time) const {
// Pacing rate is based on target rate before congestion window pushback,
// because we don't want to build queues in the pacer when pushback occurs.
DataRate pacing_rate = DataRate::Zero();
- if (pace_at_max_of_bwe_and_lower_link_capacity_ && estimate_) {
+ if (pace_at_max_of_bwe_and_lower_link_capacity_ && estimate_ &&
+ !bandwidth_estimation_->PaceAtLossBasedEstimate()) {
pacing_rate =
std::max({min_total_allocated_bitrate_, estimate_->link_capacity_lower,
last_loss_based_target_rate_}) *
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc
index 8e1a3c4698..2f47ee0f18 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc
@@ -499,6 +499,8 @@ absl::optional<LossBasedBweV2::Config> LossBasedBweV2::CreateConfig(
FieldTrialParameter<TimeDelta> padding_duration("PaddingDuration",
TimeDelta::Zero());
FieldTrialParameter<bool> bound_best_candidate("BoundBestCandidate", false);
+ FieldTrialParameter<bool> pace_at_loss_based_estimate(
+ "PaceAtLossBasedEstimate", false);
if (key_value_config) {
ParseFieldTrial({&enabled,
&bandwidth_rampup_upper_bound_factor,
@@ -538,7 +540,8 @@ absl::optional<LossBasedBweV2::Config> LossBasedBweV2::CreateConfig(
&hold_duration_factor,
&use_byte_loss_rate,
&padding_duration,
- &bound_best_candidate},
+ &bound_best_candidate,
+ &pace_at_loss_based_estimate},
key_value_config->Lookup("WebRTC-Bwe-LossBasedBweV2"));
}
@@ -604,6 +607,7 @@ absl::optional<LossBasedBweV2::Config> LossBasedBweV2::CreateConfig(
config->use_byte_loss_rate = use_byte_loss_rate.Get();
config->padding_duration = padding_duration.Get();
config->bound_best_candidate = bound_best_candidate.Get();
+ config->pace_at_loss_based_estimate = pace_at_loss_based_estimate.Get();
return config;
}
@@ -1199,4 +1203,9 @@ bool LossBasedBweV2::CanKeepIncreasingState(DataRate estimate) const {
last_padding_info_.padding_rate < estimate;
}
+bool LossBasedBweV2::PaceAtLossBasedEstimate() const {
+ return config_->pace_at_loss_based_estimate &&
+ loss_based_result_.state != LossBasedState::kDelayBasedEstimate;
+}
+
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h
index 9afbb11f1f..34c96c66d9 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h
@@ -74,6 +74,7 @@ class LossBasedBweV2 {
rtc::ArrayView<const PacketResult> packet_results,
DataRate delay_based_estimate,
bool in_alr);
+ bool PaceAtLossBasedEstimate() const;
// For unit testing only.
void SetBandwidthEstimate(DataRate bandwidth_estimate);
@@ -124,6 +125,7 @@ class LossBasedBweV2 {
bool use_byte_loss_rate = false;
TimeDelta padding_duration = TimeDelta::Zero();
bool bound_best_candidate = false;
+ bool pace_at_loss_based_estimate = false;
};
struct Derivatives {
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc
index 9b7ad03148..bb867f4fb0 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc
@@ -1776,5 +1776,41 @@ TEST_F(LossBasedBweV2Test, UseByteLossRate) {
DataRate::KilobitsPerSec(150));
}
+TEST_F(LossBasedBweV2Test, PaceAtLossBasedEstimate) {
+ ExplicitKeyValueConfig key_value_config(ShortObservationConfig(
+ "PaceAtLossBasedEstimate:true,PaddingDuration:1000ms"));
+ LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config);
+ loss_based_bandwidth_estimator.SetBandwidthEstimate(
+ DataRate::KilobitsPerSec(1000));
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero()),
+ /*delay_based_estimate=*/DataRate::KilobitsPerSec(1000),
+ /*in_alr=*/false);
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetLossBasedResult().state,
+ LossBasedState::kDelayBasedEstimate);
+ EXPECT_FALSE(loss_based_bandwidth_estimator.PaceAtLossBasedEstimate());
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ CreatePacketResultsWith100pLossRate(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound),
+ /*delay_based_estimate=*/DataRate::KilobitsPerSec(1000),
+ /*in_alr=*/false);
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetLossBasedResult().state,
+ LossBasedState::kDecreasing);
+ EXPECT_TRUE(loss_based_bandwidth_estimator.PaceAtLossBasedEstimate());
+
+ loss_based_bandwidth_estimator.UpdateBandwidthEstimate(
+ CreatePacketResultsWithReceivedPackets(
+ /*first_packet_timestamp=*/Timestamp::Zero() +
+ kObservationDurationLowerBound * 2),
+ /*delay_based_estimate=*/DataRate::KilobitsPerSec(1000),
+ /*in_alr=*/false);
+ EXPECT_EQ(loss_based_bandwidth_estimator.GetLossBasedResult().state,
+ LossBasedState::kIncreaseUsingPadding);
+ EXPECT_TRUE(loss_based_bandwidth_estimator.PaceAtLossBasedEstimate());
+}
+
} // namespace
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc
index 32b1b93c0b..31727051a8 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.cc
@@ -105,8 +105,7 @@ ProbeControllerConfig::ProbeControllerConfig(
probe_on_max_allocated_bitrate_change("probe_max_allocation", true),
first_allocation_probe_scale("alloc_p1", 1),
second_allocation_probe_scale("alloc_p2", 2),
- allocation_allow_further_probing("alloc_probe_further", false),
- allocation_probe_max("alloc_probe_max", DataRate::PlusInfinity()),
+ allocation_probe_limit_by_current_scale("alloc_current_bwe_limit"),
min_probe_packets_sent("min_probe_packets_sent", 5),
min_probe_duration("min_probe_duration", TimeDelta::Millis(15)),
loss_limited_probe_scale("loss_limited_scale", 1.5),
@@ -118,7 +117,7 @@ ProbeControllerConfig::ProbeControllerConfig(
&further_exponential_probe_scale, &further_probe_threshold,
&alr_probing_interval, &alr_probe_scale,
&probe_on_max_allocated_bitrate_change, &first_allocation_probe_scale,
- &second_allocation_probe_scale, &allocation_allow_further_probing,
+ &second_allocation_probe_scale, &allocation_probe_limit_by_current_scale,
&min_probe_duration, &network_state_estimate_probing_interval,
&probe_if_estimate_lower_than_network_state_estimate_ratio,
&estimate_lower_than_network_state_estimate_probing_interval,
@@ -138,7 +137,7 @@ ProbeControllerConfig::ProbeControllerConfig(
key_value_config->Lookup("WebRTC-Bwe-AlrProbing"));
ParseFieldTrial(
{&first_allocation_probe_scale, &second_allocation_probe_scale,
- &allocation_allow_further_probing, &allocation_probe_max},
+ &allocation_probe_limit_by_current_scale},
key_value_config->Lookup("WebRTC-Bwe-AllocationProbing"));
ParseFieldTrial({&min_probe_packets_sent, &min_probe_duration},
key_value_config->Lookup("WebRTC-Bwe-ProbingBehavior"));
@@ -220,19 +219,31 @@ std::vector<ProbeClusterConfig> ProbeController::OnMaxTotalAllocatedBitrate(
DataRate first_probe_rate = max_total_allocated_bitrate *
config_.first_allocation_probe_scale.Value();
- DataRate probe_cap = config_.allocation_probe_max.Get();
- first_probe_rate = std::min(first_probe_rate, probe_cap);
+ DataRate current_bwe_limit =
+ !config_.allocation_probe_limit_by_current_scale
+ ? DataRate::PlusInfinity()
+ : estimated_bitrate_ *
+ config_.allocation_probe_limit_by_current_scale.Value();
+ bool limited_by_current_bwe = current_bwe_limit < first_probe_rate;
+ if (limited_by_current_bwe) {
+ first_probe_rate = current_bwe_limit;
+ }
+
std::vector<DataRate> probes = {first_probe_rate};
- if (config_.second_allocation_probe_scale) {
+ if (!limited_by_current_bwe && config_.second_allocation_probe_scale) {
DataRate second_probe_rate =
max_total_allocated_bitrate *
config_.second_allocation_probe_scale.Value();
- second_probe_rate = std::min(second_probe_rate, probe_cap);
+ limited_by_current_bwe = current_bwe_limit < second_probe_rate;
+ if (limited_by_current_bwe) {
+ second_probe_rate = current_bwe_limit;
+ }
if (second_probe_rate > first_probe_rate)
probes.push_back(second_probe_rate);
}
- return InitiateProbing(at_time, probes,
- config_.allocation_allow_further_probing.Get());
+ bool allow_further_probing = limited_by_current_bwe;
+
+ return InitiateProbing(at_time, probes, allow_further_probing);
}
max_total_allocated_bitrate_ = max_total_allocated_bitrate;
return std::vector<ProbeClusterConfig>();
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h
index feec81f2dc..25f02aee69 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller.h
@@ -64,8 +64,7 @@ struct ProbeControllerConfig {
FieldTrialParameter<bool> probe_on_max_allocated_bitrate_change;
FieldTrialOptional<double> first_allocation_probe_scale;
FieldTrialOptional<double> second_allocation_probe_scale;
- FieldTrialFlag allocation_allow_further_probing;
- FieldTrialParameter<DataRate> allocation_probe_max;
+ FieldTrialOptional<double> allocation_probe_limit_by_current_scale;
// The minimum number probing packets used.
FieldTrialParameter<int> min_probe_packets_sent;
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc
index 94025b30ea..6e34a2962d 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/probe_controller_unittest.cc
@@ -213,6 +213,42 @@ TEST(ProbeControllerTest, ProbesOnMaxAllocatedBitrateIncreaseOnlyWhenInAlr) {
EXPECT_TRUE(probes.empty());
}
+TEST(ProbeControllerTest, ProbesOnMaxAllocatedBitrateLimitedByCurrentBwe) {
+ ProbeControllerFixture fixture(
+ "WebRTC-Bwe-ProbingConfiguration/"
+ "alloc_current_bwe_limit:1.5/");
+ ASSERT_TRUE(kMaxBitrate > 1.5 * kStartBitrate);
+ std::unique_ptr<ProbeController> probe_controller =
+ fixture.CreateController();
+ ASSERT_THAT(
+ probe_controller->OnNetworkAvailability({.network_available = true}),
+ IsEmpty());
+ auto probes = probe_controller->SetBitrates(
+ kMinBitrate, kStartBitrate, kMaxBitrate, fixture.CurrentTime());
+ probes = probe_controller->SetEstimatedBitrate(
+ kStartBitrate, BandwidthLimitedCause::kDelayBasedLimited,
+ fixture.CurrentTime());
+
+ // Wait long enough to time out exponential probing.
+ fixture.AdvanceTime(kExponentialProbingTimeout);
+ probes = probe_controller->Process(fixture.CurrentTime());
+ EXPECT_TRUE(probes.empty());
+
+ // Probe when in alr.
+ probe_controller->SetAlrStartTimeMs(fixture.CurrentTime().ms());
+ probes = probe_controller->OnMaxTotalAllocatedBitrate(kMaxBitrate,
+ fixture.CurrentTime());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_EQ(probes.at(0).target_data_rate, 1.5 * kStartBitrate);
+
+ // Continue probing if probe succeeds.
+ probes = probe_controller->SetEstimatedBitrate(
+ 1.5 * kStartBitrate, BandwidthLimitedCause::kDelayBasedLimited,
+ fixture.CurrentTime());
+ EXPECT_EQ(probes.size(), 1u);
+ EXPECT_GT(probes.at(0).target_data_rate, 1.5 * kStartBitrate);
+}
+
TEST(ProbeControllerTest, CanDisableProbingOnMaxTotalAllocatedBitrateIncrease) {
ProbeControllerFixture fixture(
"WebRTC-Bwe-ProbingConfiguration/"
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc
index 211d86c95d..7b305f12f1 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc
@@ -700,8 +700,12 @@ bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV2Enabled() const {
bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV2ReadyForUse()
const {
- return LossBasedBandwidthEstimatorV2Enabled() &&
- loss_based_bandwidth_estimator_v2_->IsReady();
+ return loss_based_bandwidth_estimator_v2_->IsReady();
+}
+
+bool SendSideBandwidthEstimation::PaceAtLossBasedEstimate() const {
+ return LossBasedBandwidthEstimatorV2ReadyForUse() &&
+ loss_based_bandwidth_estimator_v2_->PaceAtLossBasedEstimate();
}
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h
index dd4d25a236..1d919af7b6 100644
--- a/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h
+++ b/third_party/libwebrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h
@@ -129,6 +129,7 @@ class SendSideBandwidthEstimation {
BandwidthUsage delay_detector_state,
absl::optional<DataRate> probe_bitrate,
bool in_alr);
+ bool PaceAtLossBasedEstimate() const;
private:
friend class GoogCcStatePrinter;
diff --git a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc
index 9c64125b4e..ac028ce38b 100644
--- a/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc
+++ b/third_party/libwebrtc/modules/desktop_capture/win/dxgi_output_duplicator.cc
@@ -112,9 +112,13 @@ bool DxgiOutputDuplicator::DuplicateOutput() {
memset(&desc_, 0, sizeof(desc_));
duplication_->GetDesc(&desc_);
- if (desc_.ModeDesc.Format != DXGI_FORMAT_B8G8R8A8_UNORM) {
- RTC_LOG(LS_ERROR) << "IDXGIDuplicateOutput does not use RGBA (8 bit) "
- << "format, which is required by downstream components, "
+
+ // DXGI_FORMAT_R16G16B16A16_FLOAT is returned for HDR monitor,
+ // DXGI_FORMAT_B8G8R8A8_UNORM for others.
+ if ((desc_.ModeDesc.Format != DXGI_FORMAT_B8G8R8A8_UNORM) &&
+ (desc_.ModeDesc.Format != DXGI_FORMAT_R16G16B16A16_FLOAT)) {
+ RTC_LOG(LS_ERROR) << "IDXGIDuplicateOutput does not use RGBA (8, 16 bit)"
+ << "which is required by downstream components"
<< "format is " << desc_.ModeDesc.Format;
return false;
}
diff --git a/third_party/libwebrtc/modules/pacing/bitrate_prober.cc b/third_party/libwebrtc/modules/pacing/bitrate_prober.cc
index e60a1e5283..17729b5775 100644
--- a/third_party/libwebrtc/modules/pacing/bitrate_prober.cc
+++ b/third_party/libwebrtc/modules/pacing/bitrate_prober.cc
@@ -52,6 +52,18 @@ void BitrateProber::SetEnabled(bool enable) {
}
}
+void BitrateProber::SetAllowProbeWithoutMediaPacket(bool allow) {
+ config_.allow_start_probing_immediately = allow;
+ MaybeSetActiveState(/*packet_size=*/DataSize::Zero());
+}
+
+void BitrateProber::MaybeSetActiveState(DataSize packet_size) {
+ if (ReadyToSetActiveState(packet_size)) {
+ next_probe_time_ = Timestamp::MinusInfinity();
+ probing_state_ = ProbingState::kActive;
+ }
+}
+
bool BitrateProber::ReadyToSetActiveState(DataSize packet_size) const {
if (clusters_.empty()) {
RTC_DCHECK(probing_state_ == ProbingState::kDisabled ||
@@ -63,19 +75,19 @@ bool BitrateProber::ReadyToSetActiveState(DataSize packet_size) const {
case ProbingState::kActive:
return false;
case ProbingState::kInactive:
- // If config_.min_packet_size > 0, a "large enough" packet must be sent
- // first, before a probe can be generated and sent. Otherwise, send the
- // probe asap.
+ if (config_.allow_start_probing_immediately) {
+ return true;
+ }
+ // If config_.min_packet_size > 0, a "large enough" packet must be
+ // sent first, before a probe can be generated and sent. Otherwise,
+ // send the probe asap.
return packet_size >=
std::min(RecommendedMinProbeSize(), config_.min_packet_size.Get());
}
}
void BitrateProber::OnIncomingPacket(DataSize packet_size) {
- if (ReadyToSetActiveState(packet_size)) {
- next_probe_time_ = Timestamp::MinusInfinity();
- probing_state_ = ProbingState::kActive;
- }
+ MaybeSetActiveState(packet_size);
}
void BitrateProber::CreateProbeCluster(
@@ -101,10 +113,8 @@ void BitrateProber::CreateProbeCluster(
cluster.pace_info.probe_cluster_id = cluster_config.id;
clusters_.push(cluster);
- if (ReadyToSetActiveState(/*packet_size=*/DataSize::Zero())) {
- next_probe_time_ = Timestamp::MinusInfinity();
- probing_state_ = ProbingState::kActive;
- }
+ MaybeSetActiveState(/*packet_size=*/DataSize::Zero());
+
RTC_DCHECK(probing_state_ == ProbingState::kActive ||
probing_state_ == ProbingState::kInactive);
diff --git a/third_party/libwebrtc/modules/pacing/bitrate_prober.h b/third_party/libwebrtc/modules/pacing/bitrate_prober.h
index 82aba6ee3a..821bbf32eb 100644
--- a/third_party/libwebrtc/modules/pacing/bitrate_prober.h
+++ b/third_party/libwebrtc/modules/pacing/bitrate_prober.h
@@ -38,6 +38,9 @@ struct BitrateProberConfig {
// This defines the max min packet size, meaning that on high bitrates
// a packet of at least this size is needed to trigger sending a probe.
FieldTrialParameter<DataSize> min_packet_size;
+
+ // If true, `min_packet_size` is ignored.
+ bool allow_start_probing_immediately = false;
};
// Note that this class isn't thread-safe by itself and therefore relies
@@ -48,6 +51,7 @@ class BitrateProber {
~BitrateProber() = default;
void SetEnabled(bool enable);
+ void SetAllowProbeWithoutMediaPacket(bool allow);
// Returns true if the prober is in a probing session, i.e., it currently
// wants packets to be sent out according to the time returned by
@@ -105,6 +109,8 @@ class BitrateProber {
};
Timestamp CalculateNextProbeTime(const ProbeCluster& cluster) const;
+
+ void MaybeSetActiveState(DataSize packet_size);
bool ReadyToSetActiveState(DataSize packet_size) const;
ProbingState probing_state_;
diff --git a/third_party/libwebrtc/modules/pacing/pacing_controller.cc b/third_party/libwebrtc/modules/pacing/pacing_controller.cc
index 41f97a37fb..a45c5d8f63 100644
--- a/third_party/libwebrtc/modules/pacing/pacing_controller.cc
+++ b/third_party/libwebrtc/modules/pacing/pacing_controller.cc
@@ -252,6 +252,10 @@ void PacingController::SetSendBurstInterval(TimeDelta burst_interval) {
send_burst_interval_ = burst_interval;
}
+void PacingController::SetAllowProbeWithoutMediaPacket(bool allow) {
+ prober_.SetAllowProbeWithoutMediaPacket(allow);
+}
+
TimeDelta PacingController::ExpectedQueueTime() const {
RTC_DCHECK_GT(adjusted_media_rate_, DataRate::Zero());
return QueueSizeData() / adjusted_media_rate_;
diff --git a/third_party/libwebrtc/modules/pacing/pacing_controller.h b/third_party/libwebrtc/modules/pacing/pacing_controller.h
index fe6ee737a9..bdf8bef392 100644
--- a/third_party/libwebrtc/modules/pacing/pacing_controller.h
+++ b/third_party/libwebrtc/modules/pacing/pacing_controller.h
@@ -160,6 +160,9 @@ class PacingController {
// 'burst_interval'.
void SetSendBurstInterval(TimeDelta burst_interval);
+ // A probe may be sent without first waing for a media packet.
+ void SetAllowProbeWithoutMediaPacket(bool allow);
+
// Returns the time when the oldest packet was queued.
Timestamp OldestPacketEnqueueTime() const;
diff --git a/third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc b/third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc
index 2c3a71b369..8a37292b95 100644
--- a/third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc
+++ b/third_party/libwebrtc/modules/pacing/pacing_controller_unittest.cc
@@ -1366,10 +1366,9 @@ TEST_F(PacingControllerTest, CanProbeWithPaddingBeforeFirstMediaPacket) {
const int kInitialBitrateBps = 300000;
PacingControllerProbing packet_sender;
- const test::ExplicitKeyValueConfig trials(
- "WebRTC-Bwe-ProbingBehavior/min_packet_size:0/");
auto pacer =
- std::make_unique<PacingController>(&clock_, &packet_sender, trials);
+ std::make_unique<PacingController>(&clock_, &packet_sender, trials_);
+ pacer->SetAllowProbeWithoutMediaPacket(true);
std::vector<ProbeClusterConfig> probe_clusters = {
{.at_time = clock_.CurrentTime(),
.target_data_rate = kFirstClusterRate,
@@ -1393,16 +1392,46 @@ TEST_F(PacingControllerTest, CanProbeWithPaddingBeforeFirstMediaPacket) {
EXPECT_GT(packet_sender.padding_packets_sent(), 5);
}
+TEST_F(PacingControllerTest, ProbeSentAfterSetAllowProbeWithoutMediaPacket) {
+ const int kInitialBitrateBps = 300000;
+
+ PacingControllerProbing packet_sender;
+ auto pacer =
+ std::make_unique<PacingController>(&clock_, &packet_sender, trials_);
+ std::vector<ProbeClusterConfig> probe_clusters = {
+ {.at_time = clock_.CurrentTime(),
+ .target_data_rate = kFirstClusterRate,
+ .target_duration = TimeDelta::Millis(15),
+ .target_probe_count = 5,
+ .id = 0}};
+ pacer->CreateProbeClusters(probe_clusters);
+
+ pacer->SetPacingRates(
+ DataRate::BitsPerSec(kInitialBitrateBps * kPaceMultiplier),
+ DataRate::Zero());
+
+ pacer->SetAllowProbeWithoutMediaPacket(true);
+
+ Timestamp start = clock_.CurrentTime();
+ Timestamp next_process = pacer->NextSendTime();
+ while (clock_.CurrentTime() < start + TimeDelta::Millis(100) &&
+ next_process.IsFinite()) {
+ AdvanceTimeUntil(next_process);
+ pacer->ProcessPackets();
+ next_process = pacer->NextSendTime();
+ }
+ EXPECT_GT(packet_sender.padding_packets_sent(), 5);
+}
+
TEST_F(PacingControllerTest, CanNotProbeWithPaddingIfGeneratePaddingFails) {
// const size_t kPacketSize = 1200;
const int kInitialBitrateBps = 300000;
PacingControllerProbing packet_sender;
packet_sender.SetCanGeneratePadding(false);
- const test::ExplicitKeyValueConfig trials(
- "WebRTC-Bwe-ProbingBehavior/min_packet_size:0/");
auto pacer =
- std::make_unique<PacingController>(&clock_, &packet_sender, trials);
+ std::make_unique<PacingController>(&clock_, &packet_sender, trials_);
+ pacer->SetAllowProbeWithoutMediaPacket(true);
std::vector<ProbeClusterConfig> probe_clusters = {
{.at_time = clock_.CurrentTime(),
.target_data_rate = kFirstClusterRate,
diff --git a/third_party/libwebrtc/modules/pacing/packet_router.cc b/third_party/libwebrtc/modules/pacing/packet_router.cc
index 4c986ad027..0ad64f212d 100644
--- a/third_party/libwebrtc/modules/pacing/packet_router.cc
+++ b/third_party/libwebrtc/modules/pacing/packet_router.cc
@@ -65,6 +65,16 @@ void PacketRouter::AddSendRtpModule(RtpRtcpInterface* rtp_module,
}
}
+bool PacketRouter::SupportsRtxPayloadPadding() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ for (RtpRtcpInterface* rtp_module : send_modules_list_) {
+ if (rtp_module->SupportsRtxPayloadPadding()) {
+ return true;
+ }
+ }
+ return false;
+}
+
void PacketRouter::AddSendRtpModuleToMap(RtpRtcpInterface* rtp_module,
uint32_t ssrc) {
RTC_DCHECK_RUN_ON(&thread_checker_);
diff --git a/third_party/libwebrtc/modules/pacing/packet_router.h b/third_party/libwebrtc/modules/pacing/packet_router.h
index 61779f49e5..4c5747f7e3 100644
--- a/third_party/libwebrtc/modules/pacing/packet_router.h
+++ b/third_party/libwebrtc/modules/pacing/packet_router.h
@@ -50,6 +50,8 @@ class PacketRouter : public PacingController::PacketSender {
void AddSendRtpModule(RtpRtcpInterface* rtp_module, bool remb_candidate);
void RemoveSendRtpModule(RtpRtcpInterface* rtp_module);
+ bool SupportsRtxPayloadPadding() const;
+
void AddReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender,
bool remb_candidate);
void RemoveReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender);
diff --git a/third_party/libwebrtc/modules/pacing/packet_router_unittest.cc b/third_party/libwebrtc/modules/pacing/packet_router_unittest.cc
index af8534316c..b91c309eec 100644
--- a/third_party/libwebrtc/modules/pacing/packet_router_unittest.cc
+++ b/third_party/libwebrtc/modules/pacing/packet_router_unittest.cc
@@ -125,6 +125,31 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesRtx) {
packet_router_.RemoveSendRtpModule(&rtp_2);
}
+TEST_F(PacketRouterTest, SupportsRtxPayloadPaddingFalseIfNoRtxSendModule) {
+ EXPECT_FALSE(packet_router_.SupportsRtxPayloadPadding());
+
+ NiceMock<MockRtpRtcpInterface> none_rtx_module;
+ ON_CALL(none_rtx_module, SupportsRtxPayloadPadding())
+ .WillByDefault(Return(false));
+
+ packet_router_.AddSendRtpModule(&none_rtx_module, false);
+ EXPECT_FALSE(packet_router_.SupportsRtxPayloadPadding());
+
+ packet_router_.RemoveSendRtpModule(&none_rtx_module);
+ EXPECT_FALSE(packet_router_.SupportsRtxPayloadPadding());
+}
+
+TEST_F(PacketRouterTest, SupportsRtxPayloadPaddingTrueIfRtxSendModule) {
+ NiceMock<MockRtpRtcpInterface> rtx_module;
+ ON_CALL(rtx_module, SupportsRtxPayloadPadding()).WillByDefault(Return(true));
+
+ packet_router_.AddSendRtpModule(&rtx_module, false);
+ EXPECT_TRUE(packet_router_.SupportsRtxPayloadPadding());
+
+ packet_router_.RemoveSendRtpModule(&rtx_module);
+ EXPECT_FALSE(packet_router_.SupportsRtxPayloadPadding());
+}
+
TEST_F(PacketRouterTest, GeneratePaddingPrioritizesVideo) {
// Two RTP modules. Neither support RTX, both support padding,
// but the first one is for audio and second for video.
diff --git a/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc
index f7218e48a1..5559153251 100644
--- a/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc
+++ b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.cc
@@ -52,6 +52,11 @@ void TaskQueuePacedSender::SetSendBurstInterval(TimeDelta burst_interval) {
pacing_controller_.SetSendBurstInterval(burst_interval);
}
+void TaskQueuePacedSender::SetAllowProbeWithoutMediaPacket(bool allow) {
+ RTC_DCHECK_RUN_ON(task_queue_);
+ pacing_controller_.SetAllowProbeWithoutMediaPacket(allow);
+}
+
void TaskQueuePacedSender::EnsureStarted() {
RTC_DCHECK_RUN_ON(task_queue_);
is_started_ = true;
diff --git a/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h
index e29acdf878..a1d2474ca1 100644
--- a/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h
+++ b/third_party/libwebrtc/modules/pacing/task_queue_paced_sender.h
@@ -60,6 +60,9 @@ class TaskQueuePacedSender : public RtpPacketPacer, public RtpPacketSender {
// 'burst_interval'.
void SetSendBurstInterval(TimeDelta burst_interval);
+ // A probe may be sent without first waing for a media packet.
+ void SetAllowProbeWithoutMediaPacket(bool allow);
+
// Ensure that necessary delayed tasks are scheduled.
void EnsureStarted();
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn b/third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn
index b471c2fa76..2c42e53d36 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn
+++ b/third_party/libwebrtc/modules/rtp_rtcp/BUILD.gn
@@ -260,8 +260,11 @@ rtc_library("rtp_rtcp") {
if (rtc_use_h265) {
sources += [
+ "source/rtp_packet_h265_common.h",
"source/rtp_packetizer_h265.cc",
"source/rtp_packetizer_h265.h",
+ "source/video_rtp_depacketizer_h265.cc",
+ "source/video_rtp_depacketizer_h265.h",
]
}
@@ -632,7 +635,10 @@ if (rtc_include_tests) {
"source/video_rtp_depacketizer_vp9_unittest.cc",
]
if (rtc_use_h265) {
- sources += [ "source/rtp_packetizer_h265_unittest.cc" ]
+ sources += [
+ "source/rtp_packetizer_h265_unittest.cc",
+ "source/video_rtp_depacketizer_h265_unittest.cc",
+ ]
}
deps = [
@@ -652,6 +658,7 @@ if (rtc_include_tests) {
"../../api:frame_transformer_factory",
"../../api:make_ref_counted",
"../../api:mock_frame_encryptor",
+ "../../api:mock_frame_transformer",
"../../api:mock_transformable_video_frame",
"../../api:rtp_headers",
"../../api:rtp_packet_info",
@@ -698,7 +705,6 @@ if (rtc_include_tests) {
"../../rtc_base:timeutils",
"../../system_wrappers",
"../../test:explicit_key_value_config",
- "../../test:mock_frame_transformer",
"../../test:mock_transport",
"../../test:rtp_test_utils",
"../../test:run_loop",
@@ -720,13 +726,13 @@ if (rtc_include_tests) {
sources = [ "source/frame_transformer_factory_unittest.cc" ]
deps = [
"../../api:frame_transformer_factory",
+ "../../api:mock_frame_transformer",
"../../api:mock_transformable_audio_frame",
"../../api:mock_transformable_video_frame",
"../../api:transport_api",
"../../call:video_stream_api",
"../../modules/rtp_rtcp",
"../../rtc_base:rtc_event",
- "../../test:mock_frame_transformer",
"../../test:test_support",
"../../video",
]
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc
index 95db212bef..598a86d4ad 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc
@@ -19,6 +19,9 @@
#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h"
#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h"
#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
+#ifdef RTC_ENABLE_H265
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h"
+#endif
namespace webrtc {
@@ -34,8 +37,11 @@ std::unique_ptr<VideoRtpDepacketizer> CreateVideoRtpDepacketizer(
case kVideoCodecAV1:
return std::make_unique<VideoRtpDepacketizerAv1>();
case kVideoCodecH265:
- // TODO(bugs.webrtc.org/13485): Implement VideoRtpDepacketizerH265.
+#ifdef RTC_ENABLE_H265
+ return std::make_unique<VideoRtpDepacketizerH265>();
+#else
return nullptr;
+#endif
case kVideoCodecGeneric:
case kVideoCodecMultiplex:
return std::make_unique<VideoRtpDepacketizerGeneric>();
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc
index a61179e9d3..788052da39 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/frame_transformer_factory_unittest.cc
@@ -17,6 +17,7 @@
#include "absl/memory/memory.h"
#include "api/call/transport.h"
+#include "api/test/mock_frame_transformer.h"
#include "api/test/mock_transformable_audio_frame.h"
#include "api/test/mock_transformable_video_frame.h"
#include "call/video_receive_stream.h"
@@ -24,7 +25,6 @@
#include "rtc_base/event.h"
#include "test/gmock.h"
#include "test/gtest.h"
-#include "test/mock_frame_transformer.h"
namespace webrtc {
namespace {
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_h265_common.h b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_h265_common.h
new file mode 100644
index 0000000000..8655a02001
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packet_h265_common.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKET_H265_COMMON_H_
+#define MODULES_RTP_RTCP_SOURCE_RTP_PACKET_H265_COMMON_H_
+
+#include <string>
+#include <vector>
+
+namespace webrtc {
+// The payload header consists of the same
+// fields (F, Type, LayerId and TID) as the NAL unit header. Refer to
+// section 4.4 in RFC 7798.
+constexpr size_t kH265PayloadHeaderSizeBytes = 2;
+// Unlike H.264, H.265 NAL header is 2-bytes.
+constexpr size_t kH265NalHeaderSizeBytes = 2;
+// H.265's FU is constructed of 2-byte payload header, 1-byte FU header and FU
+// payload.
+constexpr size_t kH265FuHeaderSizeBytes = 1;
+// The NALU size for H.265 RTP aggregated packet indicates the size of the NAL
+// unit is 2-bytes.
+constexpr size_t kH265LengthFieldSizeBytes = 2;
+constexpr size_t kH265ApHeaderSizeBytes =
+ kH265NalHeaderSizeBytes + kH265LengthFieldSizeBytes;
+
+// Bit masks for NAL headers.
+enum NalHdrMasks {
+ kH265FBit = 0x80,
+ kH265TypeMask = 0x7E,
+ kH265LayerIDHMask = 0x1,
+ kH265LayerIDLMask = 0xF8,
+ kH265TIDMask = 0x7,
+ kH265TypeMaskN = 0x81,
+ kH265TypeMaskInFuHeader = 0x3F
+};
+
+// Bit masks for FU headers.
+enum FuBitmasks {
+ kH265SBitMask = 0x80,
+ kH265EBitMask = 0x40,
+ kH265FuTypeBitMask = 0x3F
+};
+
+constexpr uint8_t kStartCode[] = {0, 0, 0, 1};
+
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKET_H265_COMMON_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265.cc
index 313680cc87..5f10120d81 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265.cc
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265.cc
@@ -16,42 +16,10 @@
#include "common_video/h264/h264_common.h"
#include "common_video/h265/h265_common.h"
#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet_h265_common.h"
#include "rtc_base/logging.h"
namespace webrtc {
-namespace {
-
-// The payload header consists of the same
-// fields (F, Type, LayerId and TID) as the NAL unit header. Refer to
-// section 4.2 in RFC 7798.
-constexpr size_t kH265PayloadHeaderSize = 2;
-// Unlike H.264, H265 NAL header is 2-bytes.
-constexpr size_t kH265NalHeaderSize = 2;
-// H265's FU is constructed of 2-byte payload header, 1-byte FU header and FU
-// payload.
-constexpr size_t kH265FuHeaderSize = 1;
-// The NALU size for H265 RTP aggregated packet indicates the size of the NAL
-// unit is 2-bytes.
-constexpr size_t kH265LengthFieldSize = 2;
-
-enum H265NalHdrMasks {
- kH265FBit = 0x80,
- kH265TypeMask = 0x7E,
- kH265LayerIDHMask = 0x1,
- kH265LayerIDLMask = 0xF8,
- kH265TIDMask = 0x7,
- kH265TypeMaskN = 0x81,
- kH265TypeMaskInFuHeader = 0x3F
-};
-
-// Bit masks for FU headers.
-enum H265FuBitmasks {
- kH265SBitMask = 0x80,
- kH265EBitMask = 0x40,
- kH265FuTypeBitMask = 0x3F
-};
-
-} // namespace
RtpPacketizerH265::RtpPacketizerH265(rtc::ArrayView<const uint8_t> payload,
PayloadSizeLimits limits)
@@ -112,7 +80,8 @@ bool RtpPacketizerH265::PacketizeFu(size_t fragment_index) {
// Refer to section 4.4.3 in RFC7798, each FU fragment will have a 2-bytes
// payload header and a one-byte FU header. DONL is not supported so ignore
// its size when calculating max_payload_len.
- limits.max_payload_len -= kH265FuHeaderSize + kH265PayloadHeaderSize;
+ limits.max_payload_len -=
+ kH265FuHeaderSizeBytes + kH265PayloadHeaderSizeBytes;
// Update single/first/last packet reductions unless it is single/first/last
// fragment.
@@ -135,8 +104,8 @@ bool RtpPacketizerH265::PacketizeFu(size_t fragment_index) {
}
// Strip out the original header.
- size_t payload_left = fragment.size() - kH265NalHeaderSize;
- int offset = kH265NalHeaderSize;
+ size_t payload_left = fragment.size() - kH265NalHeaderSizeBytes;
+ int offset = kH265NalHeaderSizeBytes;
std::vector<int> payload_sizes = SplitAboutEqually(payload_left, limits);
if (payload_sizes.empty()) {
@@ -198,12 +167,13 @@ int RtpPacketizerH265::PacketizeAp(size_t fragment_index) {
payload_size_left -= fragment.size();
payload_size_left -= fragment_headers_length;
- fragment_headers_length = kH265LengthFieldSize;
+ fragment_headers_length = kH265LengthFieldSizeBytes;
// If we are going to try to aggregate more fragments into this packet
// we need to add the AP NALU header and a length field for the first
// NALU of this packet.
if (aggregated_fragments == 0) {
- fragment_headers_length += kH265PayloadHeaderSize + kH265LengthFieldSize;
+ fragment_headers_length +=
+ kH265PayloadHeaderSizeBytes + kH265LengthFieldSizeBytes;
}
++aggregated_fragments;
@@ -248,7 +218,7 @@ bool RtpPacketizerH265::NextPacket(RtpPacketToSend* rtp_packet) {
void RtpPacketizerH265::NextAggregatePacket(RtpPacketToSend* rtp_packet) {
size_t payload_capacity = rtp_packet->FreeCapacity();
- RTC_CHECK_GE(payload_capacity, kH265PayloadHeaderSize);
+ RTC_CHECK_GE(payload_capacity, kH265PayloadHeaderSizeBytes);
uint8_t* buffer = rtp_packet->AllocatePayload(payload_capacity);
RTC_CHECK(buffer);
PacketUnit* packet = &packets_.front();
@@ -272,13 +242,13 @@ void RtpPacketizerH265::NextAggregatePacket(RtpPacketToSend* rtp_packet) {
buffer[0] = payload_hdr_h;
buffer[1] = payload_hdr_l;
- int index = kH265PayloadHeaderSize;
+ int index = kH265PayloadHeaderSizeBytes;
bool is_last_fragment = packet->last_fragment;
while (packet->aggregated) {
// Add NAL unit length field.
rtc::ArrayView<const uint8_t> fragment = packet->source_fragment;
ByteWriter<uint16_t>::WriteBigEndian(&buffer[index], fragment.size());
- index += kH265LengthFieldSize;
+ index += kH265LengthFieldSizeBytes;
// Add NAL unit.
memcpy(&buffer[index], fragment.data(), fragment.size());
index += fragment.size();
@@ -332,15 +302,15 @@ void RtpPacketizerH265::NextFragmentPacket(RtpPacketToSend* rtp_packet) {
(H265::NaluType::kFu << 1) | layer_id_h;
rtc::ArrayView<const uint8_t> fragment = packet->source_fragment;
uint8_t* buffer = rtp_packet->AllocatePayload(
- kH265FuHeaderSize + kH265PayloadHeaderSize + fragment.size());
+ kH265FuHeaderSizeBytes + kH265PayloadHeaderSizeBytes + fragment.size());
RTC_CHECK(buffer);
buffer[0] = payload_hdr_h;
buffer[1] = payload_hdr_l;
buffer[2] = fu_header;
// Do not support DONL for fragmentation units, DONL field is not present.
- memcpy(buffer + kH265FuHeaderSize + kH265PayloadHeaderSize, fragment.data(),
- fragment.size());
+ memcpy(buffer + kH265FuHeaderSizeBytes + kH265PayloadHeaderSizeBytes,
+ fragment.data(), fragment.size());
if (packet->last_fragment) {
input_fragments_.pop_front();
}
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265_unittest.cc
index cb1de334c0..8f739e8618 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265_unittest.cc
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_packetizer_h265_unittest.cc
@@ -15,6 +15,7 @@
#include "common_video/h265/h265_common.h"
#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet_h265_common.h"
#include "test/gmock.h"
#include "test/gtest.h"
@@ -29,18 +30,12 @@ using ::testing::IsEmpty;
using ::testing::SizeIs;
constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
-constexpr size_t kMaxPayloadSize = 1200;
-constexpr size_t kLengthFieldLength = 2;
+constexpr size_t kMaxPayloadSizeBytes = 1200;
+constexpr size_t kH265LengthFieldSizeBytes = 2;
constexpr RtpPacketizer::PayloadSizeLimits kNoLimits;
-constexpr size_t kNalHeaderSize = 2;
-constexpr size_t kFuHeaderSize = 3;
-
-constexpr uint8_t kNaluTypeMask = 0x7E;
-
-// Bit masks for FU headers.
-constexpr uint8_t kH265SBit = 0x80;
-constexpr uint8_t kH265EBit = 0x40;
+constexpr size_t kFuHeaderSizeBytes =
+ kH265FuHeaderSizeBytes + kH265PayloadHeaderSizeBytes;
// Creates Buffer that looks like nal unit of given size.
rtc::Buffer GenerateNalUnit(size_t size) {
@@ -127,8 +122,8 @@ TEST(RtpPacketizerH265Test, SingleNalu) {
TEST(RtpPacketizerH265Test, SingleNaluTwoPackets) {
RtpPacketizer::PayloadSizeLimits limits;
- limits.max_payload_len = kMaxPayloadSize;
- rtc::Buffer nalus[] = {GenerateNalUnit(kMaxPayloadSize),
+ limits.max_payload_len = kMaxPayloadSizeBytes;
+ rtc::Buffer nalus[] = {GenerateNalUnit(kMaxPayloadSizeBytes),
GenerateNalUnit(100)};
rtc::Buffer frame = CreateFrame(nalus);
@@ -205,27 +200,28 @@ TEST(RtpPacketizerH265Test, ApRespectsNoPacketReduction) {
ASSERT_THAT(packets, SizeIs(1));
auto payload = packets[0].payload();
int type = H265::ParseNaluType(payload[0]);
- EXPECT_EQ(payload.size(),
- kNalHeaderSize + 3 * kLengthFieldLength + 2 + 2 + 0x123);
+ EXPECT_EQ(payload.size(), kH265NalHeaderSizeBytes +
+ 3 * kH265LengthFieldSizeBytes + 2 + 2 + 0x123);
EXPECT_EQ(type, H265::NaluType::kAp);
- payload = payload.subview(kNalHeaderSize);
+ payload = payload.subview(kH265NalHeaderSizeBytes);
// 1st fragment.
- EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ EXPECT_THAT(payload.subview(0, kH265LengthFieldSizeBytes),
ElementsAre(0, 2)); // Size.
- EXPECT_THAT(payload.subview(kLengthFieldLength, 2),
+ EXPECT_THAT(payload.subview(kH265LengthFieldSizeBytes, 2),
ElementsAreArray(nalus[0]));
- payload = payload.subview(kLengthFieldLength + 2);
+ payload = payload.subview(kH265LengthFieldSizeBytes + 2);
// 2nd fragment.
- EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ EXPECT_THAT(payload.subview(0, kH265LengthFieldSizeBytes),
ElementsAre(0, 2)); // Size.
- EXPECT_THAT(payload.subview(kLengthFieldLength, 2),
+ EXPECT_THAT(payload.subview(kH265LengthFieldSizeBytes, 2),
ElementsAreArray(nalus[1]));
- payload = payload.subview(kLengthFieldLength + 2);
+ payload = payload.subview(kH265LengthFieldSizeBytes + 2);
// 3rd fragment.
- EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ EXPECT_THAT(payload.subview(0, kH265LengthFieldSizeBytes),
ElementsAre(0x1, 0x23)); // Size.
- EXPECT_THAT(payload.subview(kLengthFieldLength), ElementsAreArray(nalus[2]));
+ EXPECT_THAT(payload.subview(kH265LengthFieldSizeBytes),
+ ElementsAreArray(nalus[2]));
}
TEST(RtpPacketizerH265Test, ApRespectsFirstPacketReduction) {
@@ -284,7 +280,7 @@ TEST(RtpPacketizerH265Test, TooSmallForApHeaders) {
RtpPacketizer::PayloadSizeLimits limits;
limits.max_payload_len = 1000;
const size_t kLastFragmentSize =
- limits.max_payload_len - 3 * kLengthFieldLength - 4;
+ limits.max_payload_len - 3 * kH265LengthFieldSizeBytes - 4;
rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/2),
GenerateNalUnit(/*size=*/2),
GenerateNalUnit(/*size=*/kLastFragmentSize)};
@@ -326,7 +322,8 @@ TEST(RtpPacketizerH265Test, LastFragmentFitsInSingleButNotLastPacket) {
// Returns sizes of the payloads excluding FU headers.
std::vector<int> TestFu(size_t frame_payload_size,
const RtpPacketizer::PayloadSizeLimits& limits) {
- rtc::Buffer nalu[] = {GenerateNalUnit(kNalHeaderSize + frame_payload_size)};
+ rtc::Buffer nalu[] = {
+ GenerateNalUnit(kH265NalHeaderSizeBytes + frame_payload_size)};
rtc::Buffer frame = CreateFrame(nalu);
RtpPacketizerH265 packetizer(frame, limits);
@@ -338,18 +335,18 @@ std::vector<int> TestFu(size_t frame_payload_size,
for (const RtpPacketToSend& packet : packets) {
auto payload = packet.payload();
- EXPECT_GT(payload.size(), kFuHeaderSize);
+ EXPECT_GT(payload.size(), kFuHeaderSizeBytes);
// FU header is after the 2-bytes size PayloadHdr according to 4.4.3 in spec
fu_header.push_back(payload[2]);
- payload_sizes.push_back(payload.size() - kFuHeaderSize);
+ payload_sizes.push_back(payload.size() - kFuHeaderSizeBytes);
}
- EXPECT_TRUE(fu_header.front() & kH265SBit);
- EXPECT_TRUE(fu_header.back() & kH265EBit);
+ EXPECT_TRUE(fu_header.front() & kH265SBitMask);
+ EXPECT_TRUE(fu_header.back() & kH265EBitMask);
// Clear S and E bits before testing all are duplicating same original header.
- fu_header.front() &= ~kH265SBit;
- fu_header.back() &= ~kH265EBit;
- uint8_t nalu_type = (nalu[0][0] & kNaluTypeMask) >> 1;
+ fu_header.front() &= ~kH265SBitMask;
+ fu_header.back() &= ~kH265EBitMask;
+ uint8_t nalu_type = (nalu[0][0] & kH265TypeMask) >> 1;
EXPECT_THAT(fu_header, Each(Eq(nalu_type)));
return payload_sizes;
@@ -403,7 +400,7 @@ TEST(RtpPacketizerH265Test, FuBig) {
limits.max_payload_len = 1200;
// Generate 10 full sized packets, leave room for FU headers.
EXPECT_THAT(
- TestFu(10 * (1200 - kFuHeaderSize), limits),
+ TestFu(10 * (1200 - kFuHeaderSizeBytes), limits),
ElementsAre(1197, 1197, 1197, 1197, 1197, 1197, 1197, 1197, 1197, 1197));
}
@@ -449,30 +446,30 @@ TEST_P(RtpPacketizerH265ParametrizedTest, MixedApFu) {
if (expected_packet.aggregated) {
int type = H265::ParseNaluType(packets[i].payload()[0]);
EXPECT_THAT(type, H265::NaluType::kAp);
- auto payload = packets[i].payload().subview(kNalHeaderSize);
+ auto payload = packets[i].payload().subview(kH265NalHeaderSizeBytes);
int offset = 0;
// Generated AP packet header and payload align
for (int j = expected_packet.nalu_index; j < expected_packet.nalu_number;
j++) {
- EXPECT_THAT(payload.subview(0, kLengthFieldLength),
+ EXPECT_THAT(payload.subview(0, kH265LengthFieldSizeBytes),
ElementsAre(0, nalus[j].size()));
- EXPECT_THAT(
- payload.subview(offset + kLengthFieldLength, nalus[j].size()),
- ElementsAreArray(nalus[j]));
- offset += kLengthFieldLength + nalus[j].size();
+ EXPECT_THAT(payload.subview(offset + kH265LengthFieldSizeBytes,
+ nalus[j].size()),
+ ElementsAreArray(nalus[j]));
+ offset += kH265LengthFieldSizeBytes + nalus[j].size();
}
} else {
uint8_t fu_header = 0;
- fu_header |= (expected_packet.first_fragment ? kH265SBit : 0);
- fu_header |= (expected_packet.last_fragment ? kH265EBit : 0);
+ fu_header |= (expected_packet.first_fragment ? kH265SBitMask : 0);
+ fu_header |= (expected_packet.last_fragment ? kH265EBitMask : 0);
fu_header |= H265::NaluType::kTrailR;
- EXPECT_THAT(packets[i].payload().subview(0, kFuHeaderSize),
+ EXPECT_THAT(packets[i].payload().subview(0, kFuHeaderSizeBytes),
ElementsAre(98, 2, fu_header));
- EXPECT_THAT(
- packets[i].payload().subview(kFuHeaderSize),
- ElementsAreArray(nalus[expected_packet.nalu_index].data() +
- kNalHeaderSize + expected_packet.start_offset,
- expected_packet.payload_size));
+ EXPECT_THAT(packets[i].payload().subview(kFuHeaderSizeBytes),
+ ElementsAreArray(nalus[expected_packet.nalu_index].data() +
+ kH265NalHeaderSizeBytes +
+ expected_packet.start_offset,
+ expected_packet.payload_size));
}
}
}
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc
index 6790fc3a71..586836a90e 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc
@@ -12,11 +12,11 @@
#include <utility>
+#include "api/test/mock_frame_transformer.h"
#include "api/test/mock_transformable_video_frame.h"
#include "rtc_base/event.h"
#include "test/gmock.h"
#include "test/gtest.h"
-#include "test/mock_frame_transformer.h"
#include "test/time_controller/simulated_time_controller.h"
namespace webrtc {
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
index 9641d617d9..112a2979fd 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
@@ -21,6 +21,7 @@
#include "api/task_queue/task_queue_base.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/test/mock_frame_encryptor.h"
+#include "api/test/mock_frame_transformer.h"
#include "api/transport/rtp/dependency_descriptor.h"
#include "api/units/timestamp.h"
#include "api/video/video_codec_constants.h"
@@ -46,7 +47,6 @@
#include "test/explicit_key_value_config.h"
#include "test/gmock.h"
#include "test/gtest.h"
-#include "test/mock_frame_transformer.h"
#include "test/time_controller/simulated_time_controller.h"
namespace webrtc {
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
index cf3062610f..192e239535 100644
--- a/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
@@ -17,6 +17,7 @@
#include "absl/memory/memory.h"
#include "api/call/transport.h"
+#include "api/test/mock_frame_transformer.h"
#include "api/test/mock_transformable_video_frame.h"
#include "api/units/timestamp.h"
#include "call/video_receive_stream.h"
@@ -24,7 +25,6 @@
#include "rtc_base/event.h"
#include "test/gmock.h"
#include "test/gtest.h"
-#include "test/mock_frame_transformer.h"
namespace webrtc {
namespace {
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.cc
new file mode 100644
index 0000000000..b54df7c271
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.cc
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/base/macros.h"
+#include "absl/types/optional.h"
+#include "absl/types/variant.h"
+#include "api/video/video_codec_type.h"
+#include "common_video/h264/h264_common.h"
+#include "common_video/h265/h265_bitstream_parser.h"
+#include "common_video/h265/h265_common.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet_h265_common.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+bool ParseApStartOffsets(const uint8_t* nalu_ptr,
+ size_t length_remaining,
+ std::vector<size_t>* offsets) {
+ size_t offset = 0;
+ while (length_remaining > 0) {
+ // Buffer doesn't contain room for additional NALU length.
+ if (length_remaining < kH265LengthFieldSizeBytes)
+ return false;
+ // Read 16-bit NALU size defined in RFC7798 section 4.4.2.
+ uint16_t nalu_size = ByteReader<uint16_t>::ReadBigEndian(nalu_ptr);
+ nalu_ptr += kH265LengthFieldSizeBytes;
+ length_remaining -= kH265LengthFieldSizeBytes;
+ if (nalu_size > length_remaining)
+ return false;
+ nalu_ptr += nalu_size;
+ length_remaining -= nalu_size;
+
+ offsets->push_back(offset + kH265ApHeaderSizeBytes);
+ offset += kH265LengthFieldSizeBytes + nalu_size;
+ }
+ return true;
+}
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> ProcessApOrSingleNalu(
+ rtc::CopyOnWriteBuffer rtp_payload) {
+ // Skip the single NALU header (payload header), aggregated packet case will
+ // be checked later.
+ if (rtp_payload.size() <= kH265PayloadHeaderSizeBytes) {
+ RTC_LOG(LS_ERROR) << "Single NALU header truncated.";
+ return absl::nullopt;
+ }
+ const uint8_t* const payload_data = rtp_payload.cdata();
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload(
+ absl::in_place);
+ parsed_payload->video_header.width = 0;
+ parsed_payload->video_header.height = 0;
+ parsed_payload->video_header.codec = kVideoCodecH265;
+ parsed_payload->video_header.is_first_packet_in_frame = true;
+
+ const uint8_t* nalu_start = payload_data + kH265PayloadHeaderSizeBytes;
+ const size_t nalu_length = rtp_payload.size() - kH265PayloadHeaderSizeBytes;
+ uint8_t nal_type = (payload_data[0] & kH265TypeMask) >> 1;
+ std::vector<size_t> nalu_start_offsets;
+ rtc::CopyOnWriteBuffer video_payload;
+ if (nal_type == H265::NaluType::kAp) {
+ // Skip the aggregated packet header (Aggregated packet NAL type + length).
+ if (rtp_payload.size() <= kH265ApHeaderSizeBytes) {
+ RTC_LOG(LS_ERROR) << "Aggregated packet header truncated.";
+ return absl::nullopt;
+ }
+
+ if (!ParseApStartOffsets(nalu_start, nalu_length, &nalu_start_offsets)) {
+ RTC_LOG(LS_ERROR)
+ << "Aggregated packet with incorrect NALU packet lengths.";
+ return absl::nullopt;
+ }
+
+ nal_type = (payload_data[kH265ApHeaderSizeBytes] & kH265TypeMask) >> 1;
+ } else {
+ nalu_start_offsets.push_back(0);
+ }
+ parsed_payload->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+
+ nalu_start_offsets.push_back(rtp_payload.size() +
+ kH265LengthFieldSizeBytes); // End offset.
+ for (size_t i = 0; i < nalu_start_offsets.size() - 1; ++i) {
+ size_t start_offset = nalu_start_offsets[i];
+ // End offset is actually start offset for next unit, excluding length field
+ // so remove that from this units length.
+ size_t end_offset = nalu_start_offsets[i + 1] - kH265LengthFieldSizeBytes;
+ if (end_offset - start_offset < kH265NalHeaderSizeBytes) {
+ RTC_LOG(LS_ERROR) << "Aggregated packet too short";
+ return absl::nullopt;
+ }
+
+ // Insert start code before each NALU in aggregated packet.
+ video_payload.AppendData(kStartCode);
+ video_payload.AppendData(&payload_data[start_offset],
+ end_offset - start_offset);
+
+ uint8_t nalu_type = (payload_data[start_offset] & kH265TypeMask) >> 1;
+ start_offset += kH265NalHeaderSizeBytes;
+ switch (nalu_type) {
+ case H265::NaluType::kBlaWLp:
+ case H265::NaluType::kBlaWRadl:
+ case H265::NaluType::kBlaNLp:
+ case H265::NaluType::kIdrWRadl:
+ case H265::NaluType::kIdrNLp:
+ case H265::NaluType::kCra:
+ case H265::NaluType::kRsvIrapVcl23:
+ parsed_payload->video_header.frame_type =
+ VideoFrameType::kVideoFrameKey;
+ ABSL_FALLTHROUGH_INTENDED;
+ case H265::NaluType::kSps: {
+ // Copy any previous data first (likely just the first header).
+ std::unique_ptr<rtc::Buffer> output_buffer(new rtc::Buffer());
+ if (start_offset)
+ output_buffer->AppendData(payload_data, start_offset);
+
+ absl::optional<H265SpsParser::SpsState> sps = H265SpsParser::ParseSps(
+ &payload_data[start_offset], end_offset - start_offset);
+
+ if (sps) {
+ // TODO(bugs.webrtc.org/13485): Implement the size calculation taking
+ // VPS->vui_parameters.def_disp_win_xx_offset into account.
+ parsed_payload->video_header.width = sps->width;
+ parsed_payload->video_header.height = sps->height;
+ } else {
+ RTC_LOG(LS_WARNING) << "Failed to parse SPS from SPS slice.";
+ }
+ }
+ ABSL_FALLTHROUGH_INTENDED;
+ case H265::NaluType::kVps:
+ case H265::NaluType::kPps:
+ case H265::NaluType::kTrailN:
+ case H265::NaluType::kTrailR:
+ // Slices below don't contain SPS or PPS ids.
+ case H265::NaluType::kAud:
+ case H265::NaluType::kTsaN:
+ case H265::NaluType::kTsaR:
+ case H265::NaluType::kStsaN:
+ case H265::NaluType::kStsaR:
+ case H265::NaluType::kRadlN:
+ case H265::NaluType::kRadlR:
+ case H265::NaluType::kPrefixSei:
+ case H265::NaluType::kSuffixSei:
+ break;
+ case H265::NaluType::kAp:
+ case H265::NaluType::kFu:
+ case H265::NaluType::kPaci:
+ RTC_LOG(LS_WARNING) << "Unexpected AP, FU or PACI received.";
+ return absl::nullopt;
+ }
+ }
+ parsed_payload->video_payload = video_payload;
+ return parsed_payload;
+}
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> ParseFuNalu(
+ rtc::CopyOnWriteBuffer rtp_payload) {
+ if (rtp_payload.size() < kH265FuHeaderSizeBytes + kH265NalHeaderSizeBytes) {
+ RTC_LOG(LS_ERROR) << "FU NAL units truncated.";
+ return absl::nullopt;
+ }
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload(
+ absl::in_place);
+
+ uint8_t f = rtp_payload.cdata()[0] & kH265FBit;
+ uint8_t layer_id_h = rtp_payload.cdata()[0] & kH265LayerIDHMask;
+ uint8_t layer_id_l_unshifted = rtp_payload.cdata()[1] & kH265LayerIDLMask;
+ uint8_t tid = rtp_payload.cdata()[1] & kH265TIDMask;
+
+ uint8_t original_nal_type = rtp_payload.cdata()[2] & kH265TypeMaskInFuHeader;
+ bool first_fragment = rtp_payload.cdata()[2] & kH265SBitMask;
+ if (first_fragment) {
+ rtp_payload = rtp_payload.Slice(
+ kH265FuHeaderSizeBytes, rtp_payload.size() - kH265FuHeaderSizeBytes);
+ rtp_payload.MutableData()[0] = f | original_nal_type << 1 | layer_id_h;
+ rtp_payload.MutableData()[1] = layer_id_l_unshifted | tid;
+ rtc::CopyOnWriteBuffer video_payload;
+ // Insert start code before the first fragment in FU.
+ video_payload.AppendData(kStartCode);
+ video_payload.AppendData(rtp_payload);
+ parsed_payload->video_payload = video_payload;
+ } else {
+ parsed_payload->video_payload = rtp_payload.Slice(
+ kH265NalHeaderSizeBytes + kH265FuHeaderSizeBytes,
+ rtp_payload.size() - kH265NalHeaderSizeBytes - kH265FuHeaderSizeBytes);
+ }
+
+ if (original_nal_type == H265::NaluType::kIdrWRadl ||
+ original_nal_type == H265::NaluType::kIdrNLp ||
+ original_nal_type == H265::NaluType::kCra) {
+ parsed_payload->video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ } else {
+ parsed_payload->video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ }
+ parsed_payload->video_header.width = 0;
+ parsed_payload->video_header.height = 0;
+ parsed_payload->video_header.codec = kVideoCodecH265;
+ parsed_payload->video_header.is_first_packet_in_frame = first_fragment;
+
+ return parsed_payload;
+}
+
+} // namespace
+
+absl::optional<VideoRtpDepacketizer::ParsedRtpPayload>
+VideoRtpDepacketizerH265::Parse(rtc::CopyOnWriteBuffer rtp_payload) {
+ if (rtp_payload.empty()) {
+ RTC_LOG(LS_ERROR) << "Empty payload.";
+ return absl::nullopt;
+ }
+
+ uint8_t nal_type = (rtp_payload.cdata()[0] & kH265TypeMask) >> 1;
+
+ if (nal_type == H265::NaluType::kFu) {
+ // Fragmented NAL units (FU).
+ return ParseFuNalu(std::move(rtp_payload));
+ } else if (nal_type == H265::NaluType::kPaci) {
+ // TODO(bugs.webrtc.org/13485): Implement PACI parse for H265
+ RTC_LOG(LS_ERROR) << "Not support type:" << nal_type;
+ return absl::nullopt;
+ } else {
+ // Single NAL unit packet or Aggregated packets (AP).
+ return ProcessApOrSingleNalu(std::move(rtp_payload));
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h
new file mode 100644
index 0000000000..ed5290d1cb
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H265_H_
+#define MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H265_H_
+
+#include "absl/types/optional.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "rtc_base/copy_on_write_buffer.h"
+
+namespace webrtc {
+class VideoRtpDepacketizerH265 : public VideoRtpDepacketizer {
+ public:
+ ~VideoRtpDepacketizerH265() override = default;
+
+ absl::optional<ParsedRtpPayload> Parse(
+ rtc::CopyOnWriteBuffer rtp_payload) override;
+};
+} // namespace webrtc
+
+#endif // MODULES_RTP_RTCP_SOURCE_VIDEO_RTP_DEPACKETIZER_H265_H_
diff --git a/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265_unittest.cc b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265_unittest.cc
new file mode 100644
index 0000000000..a630671a71
--- /dev/null
+++ b/third_party/libwebrtc/modules/rtp_rtcp/source/video_rtp_depacketizer_h265_unittest.cc
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h265.h"
+
+#include <cstdint>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/array_view.h"
+#include "common_video/h265/h265_common.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet_h265_common.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+TEST(VideoRtpDepacketizerH265Test, SingleNalu) {
+ uint8_t packet[3] = {0x26, 0x02,
+ 0xFF}; // F=0, Type=19 (Idr), LayerId=0, TID=2.
+ uint8_t expected_packet[] = {0x00, 0x00, 0x00, 0x01, 0x26, 0x02, 0xff};
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH265 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_THAT(rtc::MakeArrayView(parsed->video_payload.cdata(),
+ parsed->video_payload.size()),
+ ElementsAreArray(expected_packet));
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH265);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerH265Test, SingleNaluSpsWithResolution) {
+ // SPS for a 1280x720 camera capture from ffmpeg on linux. Contains
+ // emulation bytes but no cropping. This buffer is generated
+ // with following command:
+ // 1) ffmpeg -i /dev/video0 -r 30 -c:v libx265 -s 1280x720 camera.h265
+ //
+ // 2) Open camera.h265 and find the SPS, generally everything between the
+ // second and third start codes (0 0 0 1 or 0 0 1). The first two bytes
+ // 0x42 and 0x02 shows the nal header of SPS.
+ uint8_t packet[] = {0x42, 0x02, 0x01, 0x04, 0x08, 0x00, 0x00, 0x03,
+ 0x00, 0x9d, 0x08, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x5d, 0xb0, 0x02, 0x80, 0x80, 0x2d, 0x16, 0x59,
+ 0x59, 0xa4, 0x93, 0x2b, 0x80, 0x40, 0x00, 0x00,
+ 0x03, 0x00, 0x40, 0x00, 0x00, 0x07, 0x82};
+ uint8_t expected_packet[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x42, 0x02, 0x01, 0x04, 0x08, 0x00, 0x00,
+ 0x03, 0x00, 0x9d, 0x08, 0x00, 0x00, 0x03, 0x00, 0x00, 0x5d, 0xb0,
+ 0x02, 0x80, 0x80, 0x2d, 0x16, 0x59, 0x59, 0xa4, 0x93, 0x2b, 0x80,
+ 0x40, 0x00, 0x00, 0x03, 0x00, 0x40, 0x00, 0x00, 0x07, 0x82};
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH265 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_THAT(rtc::MakeArrayView(parsed->video_payload.cdata(),
+ parsed->video_payload.size()),
+ ElementsAreArray(expected_packet));
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH265);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed->video_header.width, 1280u);
+ EXPECT_EQ(parsed->video_header.height, 720u);
+}
+
+TEST(VideoRtpDepacketizerH265Test, PaciPackets) {
+ uint8_t packet[2] = {0x64, 0x02}; // F=0, Type=50 (PACI), LayerId=0, TID=2.
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH265 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_FALSE(parsed);
+}
+
+TEST(VideoRtpDepacketizerH265Test, ApKey) {
+ uint8_t payload_header[] = {0x60, 0x02};
+ uint8_t vps_nalu_size[] = {0, 0x17};
+ uint8_t sps_nalu_size[] = {0, 0x27};
+ uint8_t pps_nalu_size[] = {0, 0x32};
+ uint8_t slice_nalu_size[] = {0, 0xa};
+ uint8_t start_code[] = {0x00, 0x00, 0x00, 0x01};
+ // VPS/SPS/PPS/IDR for a 1280x720 camera capture from ffmpeg on linux.
+ // Contains emulation bytes but no cropping. This buffer is generated with
+ // following command: 1) ffmpeg -i /dev/video0 -r 30 -c:v libx265 -s 1280x720
+ // camera.h265
+ //
+ // 2) Open camera.h265 and find:
+ // VPS - generally everything between the first and second start codes (0 0 0
+ // 1 or 0 0 1). The first two bytes 0x40 and 0x02 shows the nal header of VPS.
+ // SPS - generally everything between the
+ // second and third start codes (0 0 0 1 or 0 0 1). The first two bytes
+ // 0x42 and 0x02 shows the nal header of SPS.
+ // PPS - generally everything between the third and fourth start codes (0 0 0
+ // 1 or 0 0 1). The first two bytes 0x44 and 0x02 shows the nal header of PPS.
+ // IDR - Part of the keyframe bitstream (no need to show all the bytes for
+ // depacketizer testing). The first two bytes 0x26 and 0x02 shows the nal
+ // header of IDR frame.
+ uint8_t vps[] = {
+ 0x40, 0x02, 0x1c, 0x01, 0xff, 0xff, 0x04, 0x08, 0x00, 0x00, 0x03, 0x00,
+ 0x9d, 0x08, 0x00, 0x00, 0x03, 0x00, 0x00, 0x78, 0x95, 0x98, 0x09,
+ };
+ uint8_t sps[] = {0x42, 0x02, 0x01, 0x04, 0x08, 0x00, 0x00, 0x03, 0x00, 0x9d,
+ 0x08, 0x00, 0x00, 0x03, 0x00, 0x00, 0x5d, 0xb0, 0x02, 0x80,
+ 0x80, 0x2d, 0x16, 0x59, 0x59, 0xa4, 0x93, 0x2b, 0x80, 0x40,
+ 0x00, 0x00, 0x03, 0x00, 0x40, 0x00, 0x00, 0x07, 0x82};
+ uint8_t pps[] = {0x44, 0x02, 0xa4, 0x04, 0x55, 0xa2, 0x6d, 0xce, 0xc0, 0xc3,
+ 0xed, 0x0b, 0xac, 0xbc, 0x00, 0xc4, 0x44, 0x2e, 0xf7, 0x55,
+ 0xfd, 0x05, 0x86, 0x92, 0x19, 0xdf, 0x58, 0xec, 0x38, 0x36,
+ 0xb7, 0x7c, 0x00, 0x15, 0x33, 0x78, 0x03, 0x67, 0x26, 0x0f,
+ 0x7b, 0x30, 0x1c, 0xd7, 0xd4, 0x3a, 0xec, 0xad, 0xef, 0x73};
+ uint8_t idr[] = {0x26, 0x02, 0xaf, 0x08, 0x4a, 0x31, 0x11, 0x15, 0xe5, 0xc0};
+
+ rtc::Buffer packet;
+ packet.AppendData(payload_header);
+ packet.AppendData(vps_nalu_size);
+ packet.AppendData(vps);
+ packet.AppendData(sps_nalu_size);
+ packet.AppendData(sps);
+ packet.AppendData(pps_nalu_size);
+ packet.AppendData(pps);
+ packet.AppendData(slice_nalu_size);
+ packet.AppendData(idr);
+
+ rtc::Buffer expected_packet;
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(vps);
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(sps);
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(pps);
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(idr);
+
+ // clang-format on
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH265 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_THAT(rtc::MakeArrayView(parsed->video_payload.cdata(),
+ parsed->video_payload.size()),
+ ElementsAreArray(expected_packet));
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH265);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerH265Test, ApNaluSpsWithResolution) {
+ uint8_t payload_header[] = {0x60, 0x02};
+ uint8_t vps_nalu_size[] = {0, 0x17};
+ uint8_t sps_nalu_size[] = {0, 0x27};
+ uint8_t pps_nalu_size[] = {0, 0x32};
+ uint8_t slice_nalu_size[] = {0, 0xa};
+ uint8_t start_code[] = {0x00, 0x00, 0x00, 0x01};
+ // The VPS/SPS/PPS/IDR bytes are generated using the same way as above case.
+ uint8_t vps[] = {
+ 0x40, 0x02, 0x1c, 0x01, 0xff, 0xff, 0x04, 0x08, 0x00, 0x00, 0x03, 0x00,
+ 0x9d, 0x08, 0x00, 0x00, 0x03, 0x00, 0x00, 0x78, 0x95, 0x98, 0x09,
+ };
+ uint8_t sps[] = {0x42, 0x02, 0x01, 0x04, 0x08, 0x00, 0x00, 0x03, 0x00, 0x9d,
+ 0x08, 0x00, 0x00, 0x03, 0x00, 0x00, 0x5d, 0xb0, 0x02, 0x80,
+ 0x80, 0x2d, 0x16, 0x59, 0x59, 0xa4, 0x93, 0x2b, 0x80, 0x40,
+ 0x00, 0x00, 0x03, 0x00, 0x40, 0x00, 0x00, 0x07, 0x82};
+ uint8_t pps[] = {0x44, 0x02, 0xa4, 0x04, 0x55, 0xa2, 0x6d, 0xce, 0xc0, 0xc3,
+ 0xed, 0x0b, 0xac, 0xbc, 0x00, 0xc4, 0x44, 0x2e, 0xf7, 0x55,
+ 0xfd, 0x05, 0x86, 0x92, 0x19, 0xdf, 0x58, 0xec, 0x38, 0x36,
+ 0xb7, 0x7c, 0x00, 0x15, 0x33, 0x78, 0x03, 0x67, 0x26, 0x0f,
+ 0x7b, 0x30, 0x1c, 0xd7, 0xd4, 0x3a, 0xec, 0xad, 0xef, 0x73};
+ uint8_t idr[] = {0x26, 0x02, 0xaf, 0x08, 0x4a, 0x31, 0x11, 0x15, 0xe5, 0xc0};
+
+ rtc::Buffer packet;
+ packet.AppendData(payload_header);
+ packet.AppendData(vps_nalu_size);
+ packet.AppendData(vps);
+ packet.AppendData(sps_nalu_size);
+ packet.AppendData(sps);
+ packet.AppendData(pps_nalu_size);
+ packet.AppendData(pps);
+ packet.AppendData(slice_nalu_size);
+ packet.AppendData(idr);
+
+ rtc::Buffer expected_packet;
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(vps);
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(sps);
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(pps);
+ expected_packet.AppendData(start_code);
+ expected_packet.AppendData(idr);
+
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH265 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_THAT(rtc::MakeArrayView(parsed->video_payload.cdata(),
+ parsed->video_payload.size()),
+ ElementsAreArray(expected_packet));
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH265);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed->video_header.width, 1280u);
+ EXPECT_EQ(parsed->video_header.height, 720u);
+}
+
+TEST(VideoRtpDepacketizerH265Test, EmptyApRejected) {
+ uint8_t lone_empty_packet[] = {0x60, 0x02, // F=0, Type=48 (kH265Ap).
+ 0x00, 0x00};
+ uint8_t leading_empty_packet[] = {0x60, 0x02, // F=0, Type=48 (kH265Ap).
+ 0x00, 0x00, 0x00, 0x05, 0x26,
+ 0x02, 0xFF, 0x00, 0x11}; // kIdrWRadl
+ uint8_t middle_empty_packet[] = {0x60, 0x02, // F=0, Type=48 (kH265Ap).
+ 0x00, 0x04, 0x26, 0x02, 0xFF,
+ 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x26, 0x02, 0xFF, 0x00, 0x11}; // kIdrWRadl
+ uint8_t trailing_empty_packet[] = {0x60, 0x02, // F=0, Type=48 (kH265Ap).
+ 0x00, 0x04, 0x26,
+ 0x02, 0xFF, 0x00, // kIdrWRadl
+ 0x00, 0x00};
+
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(lone_empty_packet)));
+ EXPECT_FALSE(
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(leading_empty_packet)));
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(middle_empty_packet)));
+ EXPECT_FALSE(
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(trailing_empty_packet)));
+}
+
+TEST(VideoRtpDepacketizerH265Test, ApDelta) {
+ uint8_t packet[20] = {0x60, 0x02, // F=0, Type=48 (kH265Ap).
+ // Length, nal header, payload.
+ 0, 0x03, 0x02, 0x02, 0xFF, // TrailR
+ 0, 0x04, 0x02, 0x02, 0xFF, 0x00, // TrailR
+ 0, 0x05, 0x02, 0x02, 0xFF, 0x00, 0x11}; // TrailR
+ uint8_t expected_packet[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x02, 0x02, 0xFF, // TrailR
+ 0x00, 0x00, 0x00, 0x01, 0x02, 0x02, 0xFF, 0x00, // TrailR
+ 0x00, 0x00, 0x00, 0x01, 0x02, 0x02, 0xFF, 0x00, 0x11}; // TrailR
+ rtc::CopyOnWriteBuffer rtp_payload(packet);
+
+ VideoRtpDepacketizerH265 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed =
+ depacketizer.Parse(rtp_payload);
+ ASSERT_TRUE(parsed);
+
+ EXPECT_THAT(rtc::MakeArrayView(parsed->video_payload.cdata(),
+ parsed->video_payload.size()),
+ ElementsAreArray(expected_packet));
+
+ EXPECT_EQ(parsed->video_header.frame_type, VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(parsed->video_header.codec, kVideoCodecH265);
+ EXPECT_TRUE(parsed->video_header.is_first_packet_in_frame);
+}
+
+TEST(VideoRtpDepacketizerH265Test, Fu) {
+ // clang-format off
+ uint8_t packet1[] = {
+ 0x62, 0x02, // F=0, Type=49 (kH265Fu).
+ 0x93, // FU header kH265SBitMask | H265::kIdrWRadl.
+ 0xaf, 0x08, 0x4a, 0x31, 0x11, 0x15, 0xe5, 0xc0 // Payload.
+ };
+ // clang-format on
+ // F=0, Type=19, (kIdrWRadl), tid=1, nalu header: 00100110 00000010, which is
+ // 0x26, 0x02
+ const uint8_t kExpected1[] = {0x00, 0x00, 0x00, 0x01, 0x26, 0x02, 0xaf,
+ 0x08, 0x4a, 0x31, 0x11, 0x15, 0xe5, 0xc0};
+
+ uint8_t packet2[] = {
+ 0x62, 0x02, // F=0, Type=49 (kH265Fu).
+ H265::kIdrWRadl, // FU header.
+ 0x02 // Payload.
+ };
+ const uint8_t kExpected2[] = {0x02};
+
+ uint8_t packet3[] = {
+ 0x62, 0x02, // F=0, Type=49 (kH265Fu).
+ 0x33, // FU header kH265EBitMask | H265::kIdrWRadl.
+ 0x03 // Payload.
+ };
+ const uint8_t kExpected3[] = {0x03};
+
+ VideoRtpDepacketizerH265 depacketizer;
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed1 =
+ depacketizer.Parse(rtc::CopyOnWriteBuffer(packet1));
+ ASSERT_TRUE(parsed1);
+ // We expect that the first packet is one byte shorter since the FU header
+ // has been replaced by the original nal header.
+ EXPECT_THAT(rtc::MakeArrayView(parsed1->video_payload.cdata(),
+ parsed1->video_payload.size()),
+ ElementsAreArray(kExpected1));
+ EXPECT_EQ(parsed1->video_header.frame_type, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(parsed1->video_header.codec, kVideoCodecH265);
+ EXPECT_TRUE(parsed1->video_header.is_first_packet_in_frame);
+
+ // Following packets will be 2 bytes shorter since they will only be appended
+ // onto the first packet.
+ auto parsed2 = depacketizer.Parse(rtc::CopyOnWriteBuffer(packet2));
+ EXPECT_THAT(rtc::MakeArrayView(parsed2->video_payload.cdata(),
+ parsed2->video_payload.size()),
+ ElementsAreArray(kExpected2));
+ EXPECT_FALSE(parsed2->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed2->video_header.codec, kVideoCodecH265);
+
+ auto parsed3 = depacketizer.Parse(rtc::CopyOnWriteBuffer(packet3));
+ EXPECT_THAT(rtc::MakeArrayView(parsed3->video_payload.cdata(),
+ parsed3->video_payload.size()),
+ ElementsAreArray(kExpected3));
+ EXPECT_FALSE(parsed3->video_header.is_first_packet_in_frame);
+ EXPECT_EQ(parsed3->video_header.codec, kVideoCodecH265);
+}
+
+TEST(VideoRtpDepacketizerH265Test, EmptyPayload) {
+ rtc::CopyOnWriteBuffer empty;
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(empty));
+}
+
+TEST(VideoRtpDepacketizerH265Test, TruncatedFuNalu) {
+ const uint8_t kPayload[] = {0x62};
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH265Test, TruncatedSingleApNalu) {
+ const uint8_t kPayload[] = {0xe0, 0x02, 0x40};
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH265Test, ApPacketWithTruncatedNalUnits) {
+ const uint8_t kPayload[] = {0x60, 0x02, 0xED, 0xDF};
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH265Test, TruncationJustAfterSingleApNalu) {
+ const uint8_t kPayload[] = {0x60, 0x02, 0x40, 0x40};
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH265Test, ShortSpsPacket) {
+ const uint8_t kPayload[] = {0x40, 0x80, 0x00};
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_TRUE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH265Test, InvalidNaluSizeApNalu) {
+ const uint8_t kPayload[] = {0x60, 0x02, // F=0, Type=48 (kH265Ap).
+ // Length, nal header, payload.
+ 0, 0xff, 0x02, 0x02, 0xFF, // TrailR
+ 0, 0x05, 0x02, 0x02, 0xFF, 0x00,
+ 0x11}; // TrailR;
+ VideoRtpDepacketizerH265 depacketizer;
+ EXPECT_FALSE(depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload)));
+}
+
+TEST(VideoRtpDepacketizerH265Test, SeiPacket) {
+ const uint8_t kPayload[] = {
+ 0x4e, 0x02, // F=0, Type=39 (kPrefixSei).
+ 0x03, 0x03, 0x03, 0x03 // Payload.
+ };
+ VideoRtpDepacketizerH265 depacketizer;
+ auto parsed = depacketizer.Parse(rtc::CopyOnWriteBuffer(kPayload));
+ ASSERT_TRUE(parsed);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc
index 08d23f7f58..b6e7e79a2a 100644
--- a/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc
+++ b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.cc
@@ -62,6 +62,7 @@ VideoCaptureModuleV4L2::VideoCaptureModuleV4L2()
_deviceId(-1),
_deviceFd(-1),
_buffersAllocatedByDevice(-1),
+ _streaming(false),
_captureStarted(false),
_pool(NULL) {}
@@ -110,6 +111,7 @@ int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8) {
VideoCaptureModuleV4L2::~VideoCaptureModuleV4L2() {
RTC_DCHECK_RUN_ON(&api_checker_);
+ RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
StopCapture();
if (_deviceFd != -1)
@@ -128,6 +130,14 @@ int32_t VideoCaptureModuleV4L2::StartCapture(
}
}
+ {
+ // We don't want members above to be guarded by capture_checker_ as
+ // it's meant to be for members that are accessed on the API thread
+ // only when we are not capturing. The code above can be called many
+ // times while sharing instance of VideoCaptureV4L2 between websites
+ // and therefore it would not follow the requirements of this checker.
+ RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
+
// Set a baseline of configured parameters. It is updated here during
// configuration, then read from the capture thread.
configured_capability_ = capability;
@@ -289,18 +299,23 @@ int32_t VideoCaptureModuleV4L2::StartCapture(
_requestedCapability = capability;
_captureStarted = true;
+ _streaming = true;
// start capture thread;
- if (_captureThread.empty()) {
- quit_ = false;
- _captureThread = rtc::PlatformThread::SpawnJoinable(
- [self = scoped_refptr(this)] {
- while (self->CaptureProcess()) {
- }
- },
- "CaptureThread",
- rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kHigh));
+ if (!_captureThread.empty()) {
+ return 0;
+ }
+
+ quit_ = false;
}
+
+ _captureThread = rtc::PlatformThread::SpawnJoinable(
+ [self = scoped_refptr(this)] {
+ while (self->CaptureProcess()) {
+ }
+ },
+ "CaptureThread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kHigh));
return 0;
}
@@ -316,9 +331,12 @@ int32_t VideoCaptureModuleV4L2::StopCapture() {
_captureThread.Finalize();
}
+ _captureStarted = false;
+
+ RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
MutexLock lock(&capture_lock_);
- if (_captureStarted) {
- _captureStarted = false;
+ if (_streaming) {
+ _streaming = false;
DeAllocateVideoBuffers();
close(_deviceFd);
@@ -333,6 +351,7 @@ int32_t VideoCaptureModuleV4L2::StopCapture() {
// critical section protected by the caller
bool VideoCaptureModuleV4L2::AllocateVideoBuffers() {
+ RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
struct v4l2_requestbuffers rbuffer;
memset(&rbuffer, 0, sizeof(v4l2_requestbuffers));
@@ -383,6 +402,7 @@ bool VideoCaptureModuleV4L2::AllocateVideoBuffers() {
}
bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers() {
+ RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
// unmap buffers
for (int i = 0; i < _buffersAllocatedByDevice; i++)
munmap(_pool[i].start, _pool[i].length);
@@ -400,10 +420,12 @@ bool VideoCaptureModuleV4L2::DeAllocateVideoBuffers() {
}
bool VideoCaptureModuleV4L2::CaptureStarted() {
+ RTC_DCHECK_RUN_ON(&api_checker_);
return _captureStarted;
}
bool VideoCaptureModuleV4L2::CaptureProcess() {
+ RTC_CHECK_RUNS_SERIALIZED(&capture_checker_);
int retVal = 0;
struct pollfd rSet;
@@ -432,7 +454,7 @@ bool VideoCaptureModuleV4L2::CaptureProcess() {
return true;
}
- if (_captureStarted) {
+ if (_streaming) {
struct v4l2_buffer buf;
memset(&buf, 0, sizeof(struct v4l2_buffer));
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
diff --git a/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h
index 61358d0325..9bc4ce8402 100644
--- a/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h
+++ b/third_party/libwebrtc/modules/video_capture/linux/video_capture_v4l2.h
@@ -45,11 +45,13 @@ class VideoCaptureModuleV4L2 : public VideoCaptureImpl {
Mutex capture_lock_ RTC_ACQUIRED_BEFORE(api_lock_);
bool quit_ RTC_GUARDED_BY(capture_lock_);
int32_t _deviceId RTC_GUARDED_BY(api_checker_);
- int32_t _deviceFd;
+ int32_t _deviceFd RTC_GUARDED_BY(capture_checker_);
int32_t _buffersAllocatedByDevice RTC_GUARDED_BY(capture_lock_);
- VideoCaptureCapability configured_capability_;
- bool _captureStarted;
+ VideoCaptureCapability configured_capability_
+ RTC_GUARDED_BY(capture_checker_);
+ bool _streaming RTC_GUARDED_BY(capture_checker_);
+ bool _captureStarted RTC_GUARDED_BY(api_checker_);
struct Buffer {
void* start;
size_t length;
diff --git a/third_party/libwebrtc/modules/video_coding/BUILD.gn b/third_party/libwebrtc/modules/video_coding/BUILD.gn
index 0457b818c3..db5b57dff4 100644
--- a/third_party/libwebrtc/modules/video_coding/BUILD.gn
+++ b/third_party/libwebrtc/modules/video_coding/BUILD.gn
@@ -124,10 +124,10 @@ rtc_library("packet_buffer") {
]
}
-rtc_library("h264_packet_buffer") {
+rtc_library("h26x_packet_buffer") {
sources = [
- "h264_packet_buffer.cc",
- "h264_packet_buffer.h",
+ "h26x_packet_buffer.cc",
+ "h26x_packet_buffer.h",
]
deps = [
":codec_globals_headers",
@@ -287,6 +287,8 @@ rtc_library("video_codec_interface") {
"include/video_codec_interface.h",
"include/video_coding_defines.h",
"include/video_error_codes.h",
+ "include/video_error_codes_utils.cc",
+ "include/video_error_codes_utils.h",
"video_coding_defines.cc",
]
deps = [
@@ -527,6 +529,7 @@ rtc_library("webrtc_multiplex") {
":video_coding_utility",
"../../api:fec_controller_api",
"../../api:scoped_refptr",
+ "../../api/environment",
"../../api/video:encoded_image",
"../../api/video:video_frame",
"../../api/video:video_rtp_headers",
@@ -583,7 +586,10 @@ rtc_library("webrtc_vp8") {
":webrtc_vp8_scalability",
":webrtc_vp8_temporal_layers",
"../../api:fec_controller_api",
+ "../../api:field_trials_view",
"../../api:scoped_refptr",
+ "../../api/environment",
+ "../../api/transport:field_trial_based_config",
"../../api/units:time_delta",
"../../api/units:timestamp",
"../../api/video:encoded_image",
@@ -820,6 +826,8 @@ if (rtc_include_tests) {
"../../api:mock_video_decoder",
"../../api:mock_video_encoder",
"../../api:simulcast_test_fixture_api",
+ "../../api/environment",
+ "../../api/environment:environment_factory",
"../../api/video:encoded_image",
"../../api/video:video_frame",
"../../api/video:video_rtp_headers",
@@ -932,6 +940,8 @@ if (rtc_include_tests) {
":webrtc_vp9_helpers",
"../../api:array_view",
"../../api:videocodec_test_fixture_api",
+ "../../api/environment",
+ "../../api/environment:environment_factory",
"../../api/test/metrics:global_metrics_logger_and_exporter",
"../../api/test/metrics:metric",
"../../api/test/video:function_video_factory",
@@ -999,6 +1009,8 @@ if (rtc_include_tests) {
deps = [
":video_codec_interface",
+ "../../api/environment",
+ "../../api/environment:environment_factory",
"../../api/test/metrics:global_metrics_logger_and_exporter",
"../../api/units:data_rate",
"../../api/units:frequency",
@@ -1008,6 +1020,8 @@ if (rtc_include_tests) {
"../../modules/video_coding/svc:scalability_mode_util",
"../../rtc_base:logging",
"../../rtc_base:stringutils",
+ "../../test:explicit_key_value_config",
+ "../../test:field_trial",
"../../test:fileutils",
"../../test:test_flags",
"../../test:test_main",
@@ -1077,6 +1091,8 @@ if (rtc_include_tests) {
"../../api:scoped_refptr",
"../../api:videocodec_test_fixture_api",
"../../api:videocodec_test_stats_api",
+ "../../api/environment",
+ "../../api/environment:environment_factory",
"../../api/test/metrics:global_metrics_logger_and_exporter",
"../../api/test/video:function_video_factory",
"../../api/video:encoded_image",
@@ -1152,9 +1168,9 @@ if (rtc_include_tests) {
"frame_dependencies_calculator_unittest.cc",
"frame_helpers_unittest.cc",
"generic_decoder_unittest.cc",
- "h264_packet_buffer_unittest.cc",
"h264_sprop_parameter_sets_unittest.cc",
"h264_sps_pps_tracker_unittest.cc",
+ "h26x_packet_buffer_unittest.cc",
"histogram_unittest.cc",
"loss_notification_controller_unittest.cc",
"nack_requester_unittest.cc",
@@ -1189,7 +1205,7 @@ if (rtc_include_tests) {
":encoded_frame",
":frame_dependencies_calculator",
":frame_helpers",
- ":h264_packet_buffer",
+ ":h26x_packet_buffer",
":nack_requester",
":packet_buffer",
":simulcast_test_fixture_impl",
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
index 4ff22bfe34..03bb367fe0 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
@@ -133,6 +133,7 @@ class LibaomAv1Encoder final : public VideoEncoder {
// TODO(webrtc:15225): Kill switch for disabling frame dropping. Remove it
// after frame dropping is fully rolled out.
bool disable_frame_dropping_;
+ int max_consec_frame_drop_;
};
int32_t VerifyCodecSettings(const VideoCodec& codec_settings) {
@@ -163,6 +164,14 @@ int32_t VerifyCodecSettings(const VideoCodec& codec_settings) {
return WEBRTC_VIDEO_CODEC_OK;
}
+int GetMaxConsecutiveFrameDrop(const FieldTrialsView& field_trials) {
+ webrtc::FieldTrialParameter<int> maxdrop("maxdrop", 0);
+ webrtc::ParseFieldTrial(
+ {&maxdrop},
+ field_trials.Lookup("WebRTC-LibaomAv1Encoder-MaxConsecFrameDrop"));
+ return maxdrop;
+}
+
LibaomAv1Encoder::LibaomAv1Encoder(
const absl::optional<LibaomAv1EncoderAuxConfig>& aux_config,
const FieldTrialsView& trials)
@@ -174,7 +183,8 @@ LibaomAv1Encoder::LibaomAv1Encoder(
timestamp_(0),
disable_frame_dropping_(absl::StartsWith(
trials.Lookup("WebRTC-LibaomAv1Encoder-DisableFrameDropping"),
- "Enabled")) {}
+ "Enabled")),
+ max_consec_frame_drop_(GetMaxConsecutiveFrameDrop(trials)) {}
LibaomAv1Encoder::~LibaomAv1Encoder() {
Release();
@@ -297,6 +307,12 @@ int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings,
SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_ENABLE_PALETTE, 0);
}
+ if (codec_settings->mode == VideoCodecMode::kRealtimeVideo &&
+ encoder_settings_.GetFrameDropEnabled() && max_consec_frame_drop_ > 0) {
+ SET_ENCODER_PARAM_OR_RETURN_ERROR(AV1E_SET_MAX_CONSEC_FRAME_DROP_CBR,
+ max_consec_frame_drop_);
+ }
+
if (cfg_.g_threads == 8) {
// Values passed to AV1E_SET_TILE_ROWS and AV1E_SET_TILE_COLUMNS are log2()
// based.
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc
index 04ee9162ba..127aadb275 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc
@@ -188,6 +188,31 @@ TEST(LibaomAv1EncoderTest, CheckOddDimensionsWithSpatialLayers) {
ASSERT_THAT(encoded_frames, SizeIs(6));
}
+TEST(LibaomAv1EncoderTest, WithMaximumConsecutiveFrameDrop) {
+ test::ScopedFieldTrials field_trials(
+ "WebRTC-LibaomAv1Encoder-MaxConsecFrameDrop/maxdrop:2/");
+ VideoBitrateAllocation allocation;
+ allocation.SetBitrate(0, 0, 1000); // some very low bitrate
+ std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
+ VideoCodec codec_settings = DefaultCodecSettings();
+ codec_settings.SetFrameDropEnabled(true);
+ codec_settings.SetScalabilityMode(ScalabilityMode::kL1T1);
+ codec_settings.startBitrate = allocation.get_sum_kbps();
+ ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()),
+ WEBRTC_VIDEO_CODEC_OK);
+ encoder->SetRates(VideoEncoder::RateControlParameters(
+ allocation, codec_settings.maxFramerate));
+ EncodedVideoFrameProducer evfp(*encoder);
+ evfp.SetResolution(
+ RenderResolution{codec_settings.width, codec_settings.height});
+ // We should code the first frame, skip two, then code another frame.
+ std::vector<EncodedVideoFrameProducer::EncodedFrame> encoded_frames =
+ evfp.SetNumInputFrames(4).Encode();
+ ASSERT_THAT(encoded_frames, SizeIs(2));
+ // The 4 frames have default Rtp-timestamps of 1000, 4000, 7000, 10000.
+ ASSERT_THAT(encoded_frames[1].encoded_image.RtpTimestamp(), 10000);
+}
+
TEST(LibaomAv1EncoderTest, EncoderInfoWithoutResolutionBitrateLimits) {
std::unique_ptr<VideoEncoder> encoder = CreateLibaomAv1Encoder();
EXPECT_TRUE(encoder->GetEncoderInfo().resolution_bitrate_limits.empty());
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc
index d486c1d062..6a135e2bab 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/av1/libaom_av1_unittest.cc
@@ -62,6 +62,7 @@ VideoCodec DefaultCodecSettings() {
codec_settings.height = kHeight;
codec_settings.maxFramerate = kFramerate;
codec_settings.maxBitrate = 1000;
+ codec_settings.startBitrate = 1;
codec_settings.qpMax = 63;
return codec_settings;
}
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h
index d58981e4b2..ed02f2d72b 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h
@@ -15,6 +15,7 @@
#include <memory>
#include <vector>
+#include "api/environment/environment.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
@@ -25,7 +26,8 @@ namespace webrtc {
class MultiplexDecoderAdapter : public VideoDecoder {
public:
// `factory` is not owned and expected to outlive this class.
- MultiplexDecoderAdapter(VideoDecoderFactory* factory,
+ MultiplexDecoderAdapter(const Environment& env,
+ VideoDecoderFactory* factory,
const SdpVideoFormat& associated_format,
bool supports_augmenting_data = false);
virtual ~MultiplexDecoderAdapter();
@@ -62,6 +64,7 @@ class MultiplexDecoderAdapter : public VideoDecoder {
std::unique_ptr<uint8_t[]> augmenting_data,
uint16_t augmenting_data_length);
+ const Environment env_;
VideoDecoderFactory* const factory_;
const SdpVideoFormat associated_format_;
std::vector<std::unique_ptr<VideoDecoder>> decoders_;
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
index 551a9490b0..7cebbe14d0 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
@@ -10,6 +10,7 @@
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
+#include "api/environment/environment.h"
#include "api/video/encoded_image.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame_buffer.h"
@@ -93,10 +94,12 @@ struct MultiplexDecoderAdapter::AugmentingData {
};
MultiplexDecoderAdapter::MultiplexDecoderAdapter(
+ const Environment& env,
VideoDecoderFactory* factory,
const SdpVideoFormat& associated_format,
bool supports_augmenting_data)
- : factory_(factory),
+ : env_(env),
+ factory_(factory),
associated_format_(associated_format),
supports_augmenting_data_(supports_augmenting_data) {}
@@ -111,7 +114,7 @@ bool MultiplexDecoderAdapter::Configure(const Settings& settings) {
PayloadStringToCodecType(associated_format_.name));
for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
std::unique_ptr<VideoDecoder> decoder =
- factory_->CreateVideoDecoder(associated_format_);
+ factory_->Create(env_, associated_format_);
if (!decoder->Configure(associated_settings)) {
return false;
}
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
index a2f36a306d..9c6300e368 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
@@ -16,6 +16,8 @@
#include <vector>
#include "absl/types/optional.h"
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
#include "api/scoped_refptr.h"
#include "api/test/mock_video_decoder_factory.h"
#include "api/test/mock_video_encoder_factory.h"
@@ -63,7 +65,8 @@ class TestMultiplexAdapter : public VideoCodecUnitTest,
protected:
std::unique_ptr<VideoDecoder> CreateDecoder() override {
return std::make_unique<MultiplexDecoderAdapter>(
- decoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName),
+ env_, decoder_factory_.get(),
+ SdpVideoFormat(kMultiplexAssociatedCodecName),
supports_augmenting_data_);
}
@@ -182,9 +185,9 @@ class TestMultiplexAdapter : public VideoCodecUnitTest,
EXPECT_CALL(*decoder_factory_, Die);
// The decoders/encoders will be owned by the caller of
// CreateVideoDecoder()/CreateVideoEncoder().
- EXPECT_CALL(*decoder_factory_, CreateVideoDecoder)
- .Times(2)
- .WillRepeatedly([] { return VP9Decoder::Create(); });
+ EXPECT_CALL(*decoder_factory_, Create).Times(2).WillRepeatedly([] {
+ return VP9Decoder::Create();
+ });
EXPECT_CALL(*encoder_factory_, Die);
EXPECT_CALL(*encoder_factory_, CreateVideoEncoder)
@@ -194,6 +197,7 @@ class TestMultiplexAdapter : public VideoCodecUnitTest,
VideoCodecUnitTest::SetUp();
}
+ const Environment env_ = CreateEnvironment();
const std::unique_ptr<webrtc::MockVideoDecoderFactory> decoder_factory_;
const std::unique_ptr<webrtc::MockVideoEncoderFactory> encoder_factory_;
const bool supports_augmenting_data_;
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc
index 2ab1106a59..0811685e33 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_codec_test.cc
@@ -14,6 +14,8 @@
#include "absl/flags/flag.h"
#include "absl/functional/any_invocable.h"
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
#include "api/test/metrics/global_metrics_logger_and_exporter.h"
#include "api/units/data_rate.h"
#include "api/units/frequency.h"
@@ -26,6 +28,8 @@
#include "modules/video_coding/svc/scalability_mode_util.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
+#include "test/explicit_key_value_config.h"
+#include "test/field_trial.h"
#include "test/gtest.h"
#include "test/test_flags.h"
#include "test/testsupport/file_utils.h"
@@ -58,6 +62,7 @@ ABSL_FLAG(double,
30.0,
"Encode target frame rate of the top temporal layer in fps.");
ABSL_FLAG(int, num_frames, 300, "Number of frames to encode and/or decode.");
+ABSL_FLAG(std::string, field_trials, "", "Field trials to apply.");
ABSL_FLAG(std::string, test_name, "", "Test name.");
ABSL_FLAG(bool, dump_decoder_input, false, "Dump decoder input.");
ABSL_FLAG(bool, dump_decoder_output, false, "Dump decoder output.");
@@ -178,6 +183,7 @@ std::string TestOutputPath() {
} // namespace
std::unique_ptr<VideoCodecStats> RunEncodeDecodeTest(
+ const Environment& env,
std::string encoder_impl,
std::string decoder_impl,
const VideoInfo& video_info,
@@ -247,7 +253,7 @@ std::unique_ptr<VideoCodecStats> RunEncodeDecodeTest(
}
return VideoCodecTester::RunEncodeDecodeTest(
- source_settings, encoder_factory.get(), decoder_factory.get(),
+ env, source_settings, encoder_factory.get(), decoder_factory.get(),
encoder_settings, decoder_settings, encoding_settings);
}
@@ -313,6 +319,7 @@ class SpatialQualityTest : public ::testing::TestWithParam<std::tuple<
};
TEST_P(SpatialQualityTest, SpatialQuality) {
+ const Environment env = CreateEnvironment();
auto [codec_type, codec_impl, video_info, coding_settings] = GetParam();
auto [width, height, framerate_fps, bitrate_kbps, expected_min_psnr] =
coding_settings;
@@ -324,8 +331,8 @@ TEST_P(SpatialQualityTest, SpatialQuality) {
codec_type, /*scalability_mode=*/"L1T1", width, height,
{bitrate_kbps}, framerate_fps, num_frames);
- std::unique_ptr<VideoCodecStats> stats =
- RunEncodeDecodeTest(codec_impl, codec_impl, video_info, frames_settings);
+ std::unique_ptr<VideoCodecStats> stats = RunEncodeDecodeTest(
+ env, codec_impl, codec_impl, video_info, frames_settings);
VideoCodecStats::Stream stream;
if (stats != nullptr) {
@@ -527,6 +534,11 @@ INSTANTIATE_TEST_SUITE_P(
FramerateAdaptationTest::TestParamsToString);
TEST(VideoCodecTest, DISABLED_EncodeDecode) {
+ ScopedFieldTrials field_trials(absl::GetFlag(FLAGS_field_trials));
+ const Environment env =
+ CreateEnvironment(std::make_unique<ExplicitKeyValueConfig>(
+ absl::GetFlag(FLAGS_field_trials)));
+
std::vector<std::string> bitrate_str = absl::GetFlag(FLAGS_bitrate_kbps);
std::vector<int> bitrate_kbps;
std::transform(bitrate_str.begin(), bitrate_str.end(),
@@ -544,7 +556,7 @@ TEST(VideoCodecTest, DISABLED_EncodeDecode) {
// logged test name (implies lossing history in the chromeperf dashboard).
// Sync with changes in Stream::LogMetrics (see TODOs there).
std::unique_ptr<VideoCodecStats> stats = RunEncodeDecodeTest(
- CodecNameToCodecImpl(absl::GetFlag(FLAGS_encoder)),
+ env, CodecNameToCodecImpl(absl::GetFlag(FLAGS_encoder)),
CodecNameToCodecImpl(absl::GetFlag(FLAGS_decoder)),
kRawVideos.at(absl::GetFlag(FLAGS_video_name)), frames_settings);
ASSERT_NE(nullptr, stats);
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc
index 41f2304748..581750768d 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/video_encoder_decoder_instantiation_tests.cc
@@ -11,6 +11,8 @@
#include <memory>
#include <vector>
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_decoder_factory.h"
@@ -86,6 +88,8 @@ class VideoEncoderDecoderInstantiationTest
}
}
+ const Environment env_ = CreateEnvironment();
+
const SdpVideoFormat vp8_format_;
const SdpVideoFormat vp9_format_;
const SdpVideoFormat h264cbp_format_;
@@ -126,7 +130,7 @@ TEST_P(VideoEncoderDecoderInstantiationTest, DISABLED_InstantiateVp8Codecs) {
for (int i = 0; i < num_decoders_; ++i) {
std::unique_ptr<VideoDecoder> decoder =
- decoder_factory_->CreateVideoDecoder(vp8_format_);
+ decoder_factory_->Create(env_, vp8_format_);
ASSERT_THAT(decoder, NotNull());
EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecVP8)));
decoders_.emplace_back(std::move(decoder));
@@ -144,7 +148,7 @@ TEST_P(VideoEncoderDecoderInstantiationTest,
for (int i = 0; i < num_decoders_; ++i) {
std::unique_ptr<VideoDecoder> decoder =
- decoder_factory_->CreateVideoDecoder(h264cbp_format_);
+ decoder_factory_->Create(env_, h264cbp_format_);
ASSERT_THAT(decoder, NotNull());
EXPECT_TRUE(decoder->Configure(DecoderSettings(kVideoCodecH264)));
decoders_.push_back(std::move(decoder));
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
index 35355d4387..508ac384b0 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -24,6 +24,8 @@
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "api/array_view.h"
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
#include "api/test/metrics/global_metrics_logger_and_exporter.h"
#include "api/test/metrics/metric.h"
#include "api/transport/field_trial_based_config.h"
@@ -685,6 +687,8 @@ void VideoCodecTestFixtureImpl::VerifyVideoStatistic(
}
bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() {
+ const Environment env = CreateEnvironment();
+
SdpVideoFormat encoder_format(CreateSdpVideoFormat(config_));
SdpVideoFormat decoder_format = encoder_format;
@@ -709,7 +713,7 @@ bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() {
config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers());
for (size_t i = 0; i < num_simulcast_or_spatial_layers; ++i) {
std::unique_ptr<VideoDecoder> decoder =
- decoder_factory_->CreateVideoDecoder(decoder_format);
+ decoder_factory_->Create(env, decoder_format);
EXPECT_TRUE(decoder) << "Decoder not successfully created.";
if (decoder == nullptr) {
return false;
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h
index 2fc647874f..45b7cee00a 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/include/vp8.h
@@ -14,6 +14,7 @@
#include <memory>
#include <vector>
+#include "api/environment/environment.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/vp8_frame_buffer_controller.h"
#include "modules/video_coding/include/video_codec_interface.h"
@@ -40,11 +41,15 @@ class VP8Encoder {
static std::unique_ptr<VideoEncoder> Create(Settings settings);
};
+// TODO: bugs.webrtc.org/15791 - Deprecate and delete in favor of the
+// CreateVp8Decoder function.
class VP8Decoder {
public:
static std::unique_ptr<VideoDecoder> Create();
};
+std::unique_ptr<VideoDecoder> CreateVp8Decoder(const Environment& env);
+
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
index 9b77388f10..4c06aca5ad 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -18,7 +18,10 @@
#include <string>
#include "absl/types/optional.h"
+#include "api/environment/environment.h"
+#include "api/field_trials_view.h"
#include "api/scoped_refptr.h"
+#include "api/transport/field_trial_based_config.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_frame.h"
#include "api/video/video_frame_buffer.h"
@@ -28,7 +31,6 @@
#include "rtc_base/checks.h"
#include "rtc_base/numerics/exp_filter.h"
#include "rtc_base/time_utils.h"
-#include "system_wrappers/include/field_trial.h"
#include "system_wrappers/include/metrics.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include "vpx/vp8.h"
@@ -59,9 +61,9 @@ absl::optional<LibvpxVp8Decoder::DeblockParams> DefaultDeblockParams() {
}
absl::optional<LibvpxVp8Decoder::DeblockParams>
-GetPostProcParamsFromFieldTrialGroup() {
- std::string group = webrtc::field_trial::FindFullName(
- kIsArm ? kVp8PostProcArmFieldTrial : kVp8PostProcFieldTrial);
+GetPostProcParamsFromFieldTrialGroup(const FieldTrialsView& field_trials) {
+ std::string group = field_trials.Lookup(kIsArm ? kVp8PostProcArmFieldTrial
+ : kVp8PostProcFieldTrial);
if (group.empty()) {
return DefaultDeblockParams();
}
@@ -89,6 +91,10 @@ std::unique_ptr<VideoDecoder> VP8Decoder::Create() {
return std::make_unique<LibvpxVp8Decoder>();
}
+std::unique_ptr<VideoDecoder> CreateVp8Decoder(const Environment& env) {
+ return std::make_unique<LibvpxVp8Decoder>(env);
+}
+
class LibvpxVp8Decoder::QpSmoother {
public:
QpSmoother() : last_sample_ms_(rtc::TimeMillis()), smoother_(kAlpha) {}
@@ -114,9 +120,14 @@ class LibvpxVp8Decoder::QpSmoother {
};
LibvpxVp8Decoder::LibvpxVp8Decoder()
- : use_postproc_(
- kIsArm ? webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial)
- : true),
+ : LibvpxVp8Decoder(FieldTrialBasedConfig()) {}
+
+LibvpxVp8Decoder::LibvpxVp8Decoder(const Environment& env)
+ : LibvpxVp8Decoder(env.field_trials()) {}
+
+LibvpxVp8Decoder::LibvpxVp8Decoder(const FieldTrialsView& field_trials)
+ : use_postproc_(kIsArm ? field_trials.IsEnabled(kVp8PostProcArmFieldTrial)
+ : true),
buffer_pool_(false, 300 /* max_number_of_buffers*/),
decode_complete_callback_(NULL),
inited_(false),
@@ -124,8 +135,9 @@ LibvpxVp8Decoder::LibvpxVp8Decoder()
last_frame_width_(0),
last_frame_height_(0),
key_frame_required_(true),
- deblock_params_(use_postproc_ ? GetPostProcParamsFromFieldTrialGroup()
- : absl::nullopt),
+ deblock_params_(use_postproc_
+ ? GetPostProcParamsFromFieldTrialGroup(field_trials)
+ : absl::nullopt),
qp_smoother_(use_postproc_ ? new QpSmoother() : nullptr) {}
LibvpxVp8Decoder::~LibvpxVp8Decoder() {
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h
index 74f4dc7c89..8ed8e7ca88 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h
@@ -14,6 +14,8 @@
#include <memory>
#include "absl/types/optional.h"
+#include "api/environment/environment.h"
+#include "api/field_trials_view.h"
#include "api/video/encoded_image.h"
#include "api/video_codecs/video_decoder.h"
#include "common_video/include/video_frame_buffer_pool.h"
@@ -26,7 +28,10 @@ namespace webrtc {
class LibvpxVp8Decoder : public VideoDecoder {
public:
+ // TODO: bugs.webrtc.org/15791 - Delete default constructor when
+ // Environment is always propagated.
LibvpxVp8Decoder();
+ explicit LibvpxVp8Decoder(const Environment& env);
~LibvpxVp8Decoder() override;
bool Configure(const Settings& settings) override;
@@ -56,6 +61,7 @@ class LibvpxVp8Decoder : public VideoDecoder {
private:
class QpSmoother;
+ explicit LibvpxVp8Decoder(const FieldTrialsView& field_trials);
int ReturnFrame(const vpx_image_t* img,
uint32_t timeStamp,
int qp,
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
index 4ca3de20d5..3f13066892 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
@@ -27,7 +27,9 @@ std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() {
[]() { return VP8Encoder::Create(); });
std::unique_ptr<VideoDecoderFactory> decoder_factory =
std::make_unique<FunctionVideoDecoderFactory>(
- []() { return VP8Decoder::Create(); });
+ [](const Environment& env, const SdpVideoFormat& format) {
+ return CreateVp8Decoder(env);
+ });
return CreateSimulcastTestFixture(std::move(encoder_factory),
std::move(decoder_factory),
SdpVideoFormat("VP8"));
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index a6f570f855..514d3d7e1d 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -13,6 +13,7 @@
#include <algorithm>
#include <memory>
+#include "api/environment/environment_factory.h"
#include "api/test/create_frame_generator.h"
#include "api/test/frame_generator_interface.h"
#include "api/test/mock_video_decoder.h"
@@ -70,7 +71,7 @@ class TestVp8Impl : public VideoCodecUnitTest {
}
std::unique_ptr<VideoDecoder> CreateDecoder() override {
- return VP8Decoder::Create();
+ return CreateVp8Decoder(CreateEnvironment());
}
void ModifyCodecSettings(VideoCodec* codec_settings) override {
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
index 5330eb7e8c..edbe781639 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
@@ -267,7 +267,8 @@ LibvpxVp9Encoder::LibvpxVp9Encoder(const cricket::VideoCodec& codec,
"Disabled")),
performance_flags_(ParsePerformanceFlagsFromTrials(trials)),
num_steady_state_frames_(0),
- config_changed_(true) {
+ config_changed_(true),
+ svc_frame_drop_config_(ParseSvcFrameDropConfig(trials)) {
codec_ = {};
memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
}
@@ -838,6 +839,8 @@ int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) {
// 1:2 scaling in each dimension.
svc_params_.scaling_factor_num[i] = scaling_factor_num;
svc_params_.scaling_factor_den[i] = 256;
+ if (inst->mode != VideoCodecMode::kScreensharing)
+ scaling_factor_num /= 2;
}
}
@@ -924,11 +927,24 @@ int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) {
svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
}
} else {
- // Configure encoder to drop entire superframe whenever it needs to drop
- // a layer. This mode is preferred over per-layer dropping which causes
- // quality flickering and is not compatible with RTP non-flexible mode.
- svc_drop_frame_.framedrop_mode = FULL_SUPERFRAME_DROP;
- svc_drop_frame_.max_consec_drop = std::numeric_limits<int>::max();
+ if (svc_frame_drop_config_.enabled &&
+ svc_frame_drop_config_.layer_drop_mode == LAYER_DROP &&
+ is_flexible_mode_ && svc_controller_ &&
+ (inter_layer_pred_ == InterLayerPredMode::kOff ||
+ inter_layer_pred_ == InterLayerPredMode::kOnKeyPic)) {
+ // SVC controller is required since it properly accounts for dropped
+ // refs (unlike SetReferences(), which assumes full superframe drop).
+ svc_drop_frame_.framedrop_mode = LAYER_DROP;
+ } else {
+ // Configure encoder to drop entire superframe whenever it needs to drop
+ // a layer. This mode is preferred over per-layer dropping which causes
+ // quality flickering and is not compatible with RTP non-flexible mode.
+ svc_drop_frame_.framedrop_mode = FULL_SUPERFRAME_DROP;
+ }
+ svc_drop_frame_.max_consec_drop =
+ svc_frame_drop_config_.enabled
+ ? svc_frame_drop_config_.max_consec_drop
+ : std::numeric_limits<int>::max();
for (size_t i = 0; i < num_spatial_layers_; ++i) {
svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
}
@@ -1960,6 +1976,26 @@ LibvpxVp9Encoder::ParseQualityScalerConfig(const FieldTrialsView& trials) {
return config;
}
+LibvpxVp9Encoder::SvcFrameDropConfig LibvpxVp9Encoder::ParseSvcFrameDropConfig(
+ const FieldTrialsView& trials) {
+ FieldTrialFlag enabled = FieldTrialFlag("Enabled");
+ FieldTrialParameter<int> layer_drop_mode("layer_drop_mode",
+ FULL_SUPERFRAME_DROP);
+ FieldTrialParameter<int> max_consec_drop("max_consec_drop",
+ std::numeric_limits<int>::max());
+ ParseFieldTrial({&enabled, &layer_drop_mode, &max_consec_drop},
+ trials.Lookup("WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig"));
+ SvcFrameDropConfig config;
+ config.enabled = enabled.Get();
+ config.layer_drop_mode = layer_drop_mode.Get();
+ config.max_consec_drop = max_consec_drop.Get();
+ RTC_LOG(LS_INFO) << "Libvpx VP9 encoder SVC frame drop config: "
+ << (config.enabled ? "enabled" : "disabled")
+ << " layer_drop_mode " << config.layer_drop_mode
+ << " max_consec_drop " << config.max_consec_drop;
+ return config;
+}
+
void LibvpxVp9Encoder::UpdatePerformanceFlags() {
flat_map<int, PerformanceFlags::ParameterSet> params_by_resolution;
if (codec_.GetVideoEncoderComplexity() ==
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
index 1953923f81..ea4e5810ac 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
@@ -240,6 +240,14 @@ class LibvpxVp9Encoder : public VP9Encoder {
bool config_changed_;
const LibvpxVp9EncoderInfoSettings encoder_info_override_;
+
+ const struct SvcFrameDropConfig {
+ bool enabled;
+ int layer_drop_mode; // SVC_LAYER_DROP_MODE
+ int max_consec_drop;
+ } svc_frame_drop_config_;
+ static SvcFrameDropConfig ParseSvcFrameDropConfig(
+ const FieldTrialsView& trials);
};
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc
index 7af8cab3cb..555af835a5 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config.cc
@@ -190,6 +190,9 @@ std::vector<SpatialLayer> GetVp9SvcConfig(VideoCodec& codec) {
codec.SetScalabilityMode(limited_scalability_mode);
}
+ codec.VP9()->interLayerPred =
+ ScalabilityModeToInterLayerPredMode(*scalability_mode);
+
absl::optional<ScalableVideoController::StreamLayersConfig> info =
ScalabilityStructureConfig(*scalability_mode);
if (!info.has_value()) {
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc
index 1b1abe0f6d..2515b1ce4b 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/svc_config_unittest.cc
@@ -13,6 +13,7 @@
#include <cstddef>
#include <vector>
+#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
#include "test/gmock.h"
#include "test/gtest.h"
@@ -65,6 +66,25 @@ TEST(SvcConfig, NumSpatialLayersWithScalabilityMode) {
EXPECT_EQ(codec.GetScalabilityMode(), ScalabilityMode::kL3T3_KEY);
}
+TEST(SvcConfig, UpdatesInterLayerPredModeBasedOnScalabilityMode) {
+ VideoCodec codec;
+ codec.codecType = kVideoCodecVP9;
+ codec.width = 1280;
+ codec.height = 720;
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3_KEY);
+
+ std::vector<SpatialLayer> spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOnKeyPic);
+
+ codec.SetScalabilityMode(ScalabilityMode::kL3T3);
+ spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOn);
+
+ codec.SetScalabilityMode(ScalabilityMode::kS3T3);
+ spatial_layers = GetVp9SvcConfig(codec);
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOff);
+}
+
TEST(SvcConfig, NumSpatialLayersLimitedWithScalabilityMode) {
VideoCodec codec;
codec.codecType = kVideoCodecVP9;
diff --git a/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
index 993fd245ad..50e9cf2369 100644
--- a/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
+++ b/third_party/libwebrtc/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
@@ -2459,4 +2459,113 @@ TEST(Vp9SpeedSettingsTrialsTest, DefaultPerLayerFlagsWithSvc) {
}
}
+struct SvcFrameDropConfigTestParameters {
+ bool flexible_mode;
+ absl::optional<ScalabilityMode> scalability_mode;
+ std::string field_trial;
+ int expected_framedrop_mode;
+ int expected_max_consec_drop;
+};
+
+class TestVp9ImplSvcFrameDropConfig
+ : public ::testing::TestWithParam<SvcFrameDropConfigTestParameters> {};
+
+TEST_P(TestVp9ImplSvcFrameDropConfig, SvcFrameDropConfig) {
+ SvcFrameDropConfigTestParameters test_params = GetParam();
+ auto* const vpx = new NiceMock<MockLibvpxInterface>();
+ LibvpxVp9Encoder encoder(
+ cricket::CreateVideoCodec(cricket::kVp9CodecName),
+ absl::WrapUnique<LibvpxInterface>(vpx),
+ test::ExplicitKeyValueConfig(test_params.field_trial));
+
+ vpx_image_t img;
+ ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img));
+
+ EXPECT_CALL(*vpx,
+ codec_control(_, VP9E_SET_SVC_FRAME_DROP_LAYER,
+ SafeMatcherCast<vpx_svc_frame_drop_t*>(AllOf(
+ Field(&vpx_svc_frame_drop_t::framedrop_mode,
+ test_params.expected_framedrop_mode),
+ Field(&vpx_svc_frame_drop_t::max_consec_drop,
+ test_params.expected_max_consec_drop)))));
+
+ VideoCodec settings = DefaultCodecSettings();
+ settings.VP9()->flexibleMode = test_params.flexible_mode;
+ if (test_params.scalability_mode.has_value()) {
+ settings.SetScalabilityMode(*test_params.scalability_mode);
+ }
+ settings.VP9()->numberOfSpatialLayers =
+ 3; // to execute SVC code paths even when scalability_mode is not set.
+
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ All,
+ TestVp9ImplSvcFrameDropConfig,
+ ::testing::Values(
+ // Flexible mode is disabled. Layer drop is not allowed. Ignore
+ // layer_drop_mode from field trial.
+ SvcFrameDropConfigTestParameters{
+ .flexible_mode = false,
+ .scalability_mode = ScalabilityMode::kL3T3_KEY,
+ .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/"
+ "Enabled,layer_drop_mode:1,max_consec_drop:7/",
+ .expected_framedrop_mode = FULL_SUPERFRAME_DROP,
+ .expected_max_consec_drop = 7},
+ // Flexible mode is enabled but the field trial is not set. Use default
+ // settings.
+ SvcFrameDropConfigTestParameters{
+ .flexible_mode = true,
+ .scalability_mode = ScalabilityMode::kL3T3_KEY,
+ .field_trial = "",
+ .expected_framedrop_mode = FULL_SUPERFRAME_DROP,
+ .expected_max_consec_drop = std::numeric_limits<int>::max()},
+ // Flexible mode is enabled but the field trial is disabled. Use default
+ // settings.
+ SvcFrameDropConfigTestParameters{
+ .flexible_mode = true,
+ .scalability_mode = ScalabilityMode::kL3T3_KEY,
+ .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/"
+ "Disabled,layer_drop_mode:1,max_consec_drop:7/",
+ .expected_framedrop_mode = FULL_SUPERFRAME_DROP,
+ .expected_max_consec_drop = std::numeric_limits<int>::max()},
+ // Flexible mode is enabled, layer drop is enabled, KSVC. Apply config
+ // from field trial.
+ SvcFrameDropConfigTestParameters{
+ .flexible_mode = true,
+ .scalability_mode = ScalabilityMode::kL3T3_KEY,
+ .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/"
+ "Enabled,layer_drop_mode:1,max_consec_drop:7/",
+ .expected_framedrop_mode = LAYER_DROP,
+ .expected_max_consec_drop = 7},
+ // Flexible mode is enabled, layer drop is enabled, simulcast. Apply
+ // config from field trial.
+ SvcFrameDropConfigTestParameters{
+ .flexible_mode = true,
+ .scalability_mode = ScalabilityMode::kS3T3,
+ .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/"
+ "Enabled,layer_drop_mode:1,max_consec_drop:7/",
+ .expected_framedrop_mode = LAYER_DROP,
+ .expected_max_consec_drop = 7},
+ // Flexible mode is enabled, layer drop is enabled, full SVC. Apply
+ // config from field trial.
+ SvcFrameDropConfigTestParameters{
+ .flexible_mode = false,
+ .scalability_mode = ScalabilityMode::kL3T3,
+ .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/"
+ "Enabled,layer_drop_mode:1,max_consec_drop:7/",
+ .expected_framedrop_mode = FULL_SUPERFRAME_DROP,
+ .expected_max_consec_drop = 7},
+ // Flexible mode is enabled, layer-drop is enabled, scalability mode is
+ // not set (i.e., SVC controller is not enabled). Ignore layer_drop_mode
+ // from field trial.
+ SvcFrameDropConfigTestParameters{
+ .flexible_mode = true,
+ .scalability_mode = absl::nullopt,
+ .field_trial = "WebRTC-LibvpxVp9Encoder-SvcFrameDropConfig/"
+ "Enabled,layer_drop_mode:1,max_consec_drop:7/",
+ .expected_framedrop_mode = FULL_SUPERFRAME_DROP,
+ .expected_max_consec_drop = 7}));
+
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc
deleted file mode 100644
index 6096665bda..0000000000
--- a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/video_coding/h264_packet_buffer.h"
-
-#include <algorithm>
-#include <cstdint>
-#include <utility>
-#include <vector>
-
-#include "api/array_view.h"
-#include "api/rtp_packet_info.h"
-#include "api/video/video_frame_type.h"
-#include "common_video/h264/h264_common.h"
-#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
-#include "modules/rtp_rtcp/source/rtp_packet_received.h"
-#include "modules/rtp_rtcp/source/rtp_video_header.h"
-#include "modules/video_coding/codecs/h264/include/h264_globals.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/copy_on_write_buffer.h"
-#include "rtc_base/logging.h"
-#include "rtc_base/numerics/sequence_number_util.h"
-
-namespace webrtc {
-namespace {
-int64_t EuclideanMod(int64_t n, int64_t div) {
- RTC_DCHECK_GT(div, 0);
- return (n %= div) < 0 ? n + div : n;
-}
-
-rtc::ArrayView<const NaluInfo> GetNaluInfos(
- const RTPVideoHeaderH264& h264_header) {
- if (h264_header.nalus_length > kMaxNalusPerPacket) {
- return {};
- }
-
- return rtc::MakeArrayView(h264_header.nalus, h264_header.nalus_length);
-}
-
-bool IsFirstPacketOfFragment(const RTPVideoHeaderH264& h264_header) {
- return h264_header.nalus_length > 0;
-}
-
-bool BeginningOfIdr(const H264PacketBuffer::Packet& packet) {
- const auto& h264_header =
- absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
- const bool contains_idr_nalu =
- absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
- return nalu_info.type == H264::NaluType::kIdr;
- });
- switch (h264_header.packetization_type) {
- case kH264StapA:
- case kH264SingleNalu: {
- return contains_idr_nalu;
- }
- case kH264FuA: {
- return contains_idr_nalu && IsFirstPacketOfFragment(h264_header);
- }
- }
-}
-
-bool HasSps(const H264PacketBuffer::Packet& packet) {
- auto& h264_header =
- absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
- return absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
- return nalu_info.type == H264::NaluType::kSps;
- });
-}
-
-// TODO(bugs.webrtc.org/13157): Update the H264 depacketizer so we don't have to
-// fiddle with the payload at this point.
-rtc::CopyOnWriteBuffer FixVideoPayload(rtc::ArrayView<const uint8_t> payload,
- const RTPVideoHeader& video_header) {
- constexpr uint8_t kStartCode[] = {0, 0, 0, 1};
-
- const auto& h264_header =
- absl::get<RTPVideoHeaderH264>(video_header.video_type_header);
-
- rtc::CopyOnWriteBuffer result;
- switch (h264_header.packetization_type) {
- case kH264StapA: {
- const uint8_t* payload_end = payload.data() + payload.size();
- const uint8_t* nalu_ptr = payload.data() + 1;
- while (nalu_ptr < payload_end - 1) {
- // The first two bytes describe the length of the segment, where a
- // segment is the nalu type plus nalu payload.
- uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
- nalu_ptr += 2;
-
- if (nalu_ptr + segment_length <= payload_end) {
- result.AppendData(kStartCode);
- result.AppendData(nalu_ptr, segment_length);
- }
- nalu_ptr += segment_length;
- }
- return result;
- }
-
- case kH264FuA: {
- if (IsFirstPacketOfFragment(h264_header)) {
- result.AppendData(kStartCode);
- }
- result.AppendData(payload);
- return result;
- }
-
- case kH264SingleNalu: {
- result.AppendData(kStartCode);
- result.AppendData(payload);
- return result;
- }
- }
-
- RTC_DCHECK_NOTREACHED();
- return result;
-}
-
-} // namespace
-
-H264PacketBuffer::H264PacketBuffer(bool idr_only_keyframes_allowed)
- : idr_only_keyframes_allowed_(idr_only_keyframes_allowed) {}
-
-H264PacketBuffer::InsertResult H264PacketBuffer::InsertPacket(
- std::unique_ptr<Packet> packet) {
- RTC_DCHECK(packet->video_header.codec == kVideoCodecH264);
-
- InsertResult result;
- if (!absl::holds_alternative<RTPVideoHeaderH264>(
- packet->video_header.video_type_header)) {
- return result;
- }
-
- int64_t unwrapped_seq_num = seq_num_unwrapper_.Unwrap(packet->seq_num);
- auto& packet_slot = GetPacket(unwrapped_seq_num);
- if (packet_slot != nullptr &&
- AheadOrAt(packet_slot->timestamp, packet->timestamp)) {
- // The incoming `packet` is old or a duplicate.
- return result;
- } else {
- packet_slot = std::move(packet);
- }
-
- result.packets = FindFrames(unwrapped_seq_num);
- return result;
-}
-
-std::unique_ptr<H264PacketBuffer::Packet>& H264PacketBuffer::GetPacket(
- int64_t unwrapped_seq_num) {
- return buffer_[EuclideanMod(unwrapped_seq_num, kBufferSize)];
-}
-
-bool H264PacketBuffer::BeginningOfStream(
- const H264PacketBuffer::Packet& packet) const {
- return HasSps(packet) ||
- (idr_only_keyframes_allowed_ && BeginningOfIdr(packet));
-}
-
-std::vector<std::unique_ptr<H264PacketBuffer::Packet>>
-H264PacketBuffer::FindFrames(int64_t unwrapped_seq_num) {
- std::vector<std::unique_ptr<Packet>> found_frames;
-
- Packet* packet = GetPacket(unwrapped_seq_num).get();
- RTC_CHECK(packet != nullptr);
-
- // Check if the packet is continuous or the beginning of a new coded video
- // sequence.
- if (unwrapped_seq_num - 1 != last_continuous_unwrapped_seq_num_) {
- if (unwrapped_seq_num <= last_continuous_unwrapped_seq_num_ ||
- !BeginningOfStream(*packet)) {
- return found_frames;
- }
-
- last_continuous_unwrapped_seq_num_ = unwrapped_seq_num;
- }
-
- for (int64_t seq_num = unwrapped_seq_num;
- seq_num < unwrapped_seq_num + kBufferSize;) {
- RTC_DCHECK_GE(seq_num, *last_continuous_unwrapped_seq_num_);
-
- // Packets that were never assembled into a completed frame will stay in
- // the 'buffer_'. Check that the `packet` sequence number match the expected
- // unwrapped sequence number.
- if (static_cast<uint16_t>(seq_num) != packet->seq_num) {
- return found_frames;
- }
-
- last_continuous_unwrapped_seq_num_ = seq_num;
- // Last packet of the frame, try to assemble the frame.
- if (packet->marker_bit) {
- uint32_t rtp_timestamp = packet->timestamp;
-
- // Iterate backwards to find where the frame starts.
- for (int64_t seq_num_start = seq_num;
- seq_num_start > seq_num - kBufferSize; --seq_num_start) {
- auto& prev_packet = GetPacket(seq_num_start - 1);
-
- if (prev_packet == nullptr || prev_packet->timestamp != rtp_timestamp) {
- if (MaybeAssembleFrame(seq_num_start, seq_num, found_frames)) {
- // Frame was assembled, continue to look for more frames.
- break;
- } else {
- // Frame was not assembled, no subsequent frame will be continuous.
- return found_frames;
- }
- }
- }
- }
-
- seq_num++;
- packet = GetPacket(seq_num).get();
- if (packet == nullptr) {
- return found_frames;
- }
- }
-
- return found_frames;
-}
-
-bool H264PacketBuffer::MaybeAssembleFrame(
- int64_t start_seq_num_unwrapped,
- int64_t end_sequence_number_unwrapped,
- std::vector<std::unique_ptr<Packet>>& frames) {
- bool has_sps = false;
- bool has_pps = false;
- bool has_idr = false;
-
- int width = -1;
- int height = -1;
-
- for (int64_t seq_num = start_seq_num_unwrapped;
- seq_num <= end_sequence_number_unwrapped; ++seq_num) {
- const auto& packet = GetPacket(seq_num);
- const auto& h264_header =
- absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
- for (const auto& nalu : GetNaluInfos(h264_header)) {
- has_idr |= nalu.type == H264::NaluType::kIdr;
- has_sps |= nalu.type == H264::NaluType::kSps;
- has_pps |= nalu.type == H264::NaluType::kPps;
- }
-
- width = std::max<int>(packet->video_header.width, width);
- height = std::max<int>(packet->video_header.height, height);
- }
-
- if (has_idr) {
- if (!idr_only_keyframes_allowed_ && (!has_sps || !has_pps)) {
- return false;
- }
- }
-
- for (int64_t seq_num = start_seq_num_unwrapped;
- seq_num <= end_sequence_number_unwrapped; ++seq_num) {
- auto& packet = GetPacket(seq_num);
-
- packet->video_header.is_first_packet_in_frame =
- (seq_num == start_seq_num_unwrapped);
- packet->video_header.is_last_packet_in_frame =
- (seq_num == end_sequence_number_unwrapped);
-
- if (packet->video_header.is_first_packet_in_frame) {
- if (width > 0 && height > 0) {
- packet->video_header.width = width;
- packet->video_header.height = height;
- }
-
- packet->video_header.frame_type = has_idr
- ? VideoFrameType::kVideoFrameKey
- : VideoFrameType::kVideoFrameDelta;
- }
-
- packet->video_payload =
- FixVideoPayload(packet->video_payload, packet->video_header);
-
- frames.push_back(std::move(packet));
- }
-
- return true;
-}
-
-} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h
deleted file mode 100644
index a72c240e82..0000000000
--- a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
-#define MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
-
-#include <array>
-#include <memory>
-#include <vector>
-
-#include "absl/base/attributes.h"
-#include "absl/types/optional.h"
-#include "modules/video_coding/packet_buffer.h"
-#include "rtc_base/numerics/sequence_number_unwrapper.h"
-
-namespace webrtc {
-
-class H264PacketBuffer {
- public:
- // The H264PacketBuffer does the same job as the PacketBuffer but for H264
- // only. To make it fit in with surronding code the PacketBuffer input/output
- // classes are used.
- using Packet = video_coding::PacketBuffer::Packet;
- using InsertResult = video_coding::PacketBuffer::InsertResult;
-
- explicit H264PacketBuffer(bool idr_only_keyframes_allowed);
-
- ABSL_MUST_USE_RESULT InsertResult
- InsertPacket(std::unique_ptr<Packet> packet);
-
- private:
- static constexpr int kBufferSize = 2048;
-
- std::unique_ptr<Packet>& GetPacket(int64_t unwrapped_seq_num);
- bool BeginningOfStream(const Packet& packet) const;
- std::vector<std::unique_ptr<Packet>> FindFrames(int64_t unwrapped_seq_num);
- bool MaybeAssembleFrame(int64_t start_seq_num_unwrapped,
- int64_t end_sequence_number_unwrapped,
- std::vector<std::unique_ptr<Packet>>& packets);
-
- const bool idr_only_keyframes_allowed_;
- std::array<std::unique_ptr<Packet>, kBufferSize> buffer_;
- absl::optional<int64_t> last_continuous_unwrapped_seq_num_;
- SeqNumUnwrapper<uint16_t> seq_num_unwrapper_;
-};
-
-} // namespace webrtc
-
-#endif // MODULES_VIDEO_CODING_H264_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc
deleted file mode 100644
index 4f2331da28..0000000000
--- a/third_party/libwebrtc/modules/video_coding/h264_packet_buffer_unittest.cc
+++ /dev/null
@@ -1,778 +0,0 @@
-/*
- * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-#include "modules/video_coding/h264_packet_buffer.h"
-
-#include <cstring>
-#include <limits>
-#include <ostream>
-#include <string>
-#include <utility>
-
-#include "api/array_view.h"
-#include "api/video/render_resolution.h"
-#include "common_video/h264/h264_common.h"
-#include "rtc_base/system/unused.h"
-#include "test/gmock.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-namespace {
-
-using ::testing::ElementsAreArray;
-using ::testing::Eq;
-using ::testing::IsEmpty;
-using ::testing::SizeIs;
-
-using H264::NaluType::kAud;
-using H264::NaluType::kFuA;
-using H264::NaluType::kIdr;
-using H264::NaluType::kPps;
-using H264::NaluType::kSlice;
-using H264::NaluType::kSps;
-using H264::NaluType::kStapA;
-
-constexpr int kBufferSize = 2048;
-
-std::vector<uint8_t> StartCode() {
- return {0, 0, 0, 1};
-}
-
-NaluInfo MakeNaluInfo(uint8_t type) {
- NaluInfo res;
- res.type = type;
- res.sps_id = -1;
- res.pps_id = -1;
- return res;
-}
-
-class Packet {
- public:
- explicit Packet(H264PacketizationTypes type);
-
- Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9});
- Packet& Slice(std::vector<uint8_t> payload = {9, 9, 9});
- Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9});
- Packet& SpsWithResolution(RenderResolution resolution,
- std::vector<uint8_t> payload = {9, 9, 9});
- Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9});
- Packet& Aud();
- Packet& Marker();
- Packet& AsFirstFragment();
- Packet& Time(uint32_t rtp_timestamp);
- Packet& SeqNum(uint16_t rtp_seq_num);
-
- std::unique_ptr<H264PacketBuffer::Packet> Build();
-
- private:
- rtc::CopyOnWriteBuffer BuildFuaPayload() const;
- rtc::CopyOnWriteBuffer BuildSingleNaluPayload() const;
- rtc::CopyOnWriteBuffer BuildStapAPayload() const;
-
- RTPVideoHeaderH264& H264Header() {
- return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
- }
- const RTPVideoHeaderH264& H264Header() const {
- return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
- }
-
- H264PacketizationTypes type_;
- RTPVideoHeader video_header_;
- bool first_fragment_ = false;
- bool marker_bit_ = false;
- uint32_t rtp_timestamp_ = 0;
- uint16_t rtp_seq_num_ = 0;
- std::vector<std::vector<uint8_t>> nalu_payloads_;
-};
-
-Packet::Packet(H264PacketizationTypes type) : type_(type) {
- video_header_.video_type_header.emplace<RTPVideoHeaderH264>();
-}
-
-Packet& Packet::Idr(std::vector<uint8_t> payload) {
- auto& h264_header = H264Header();
- h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kIdr);
- nalu_payloads_.push_back(std::move(payload));
- return *this;
-}
-
-Packet& Packet::Slice(std::vector<uint8_t> payload) {
- auto& h264_header = H264Header();
- h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSlice);
- nalu_payloads_.push_back(std::move(payload));
- return *this;
-}
-
-Packet& Packet::Sps(std::vector<uint8_t> payload) {
- auto& h264_header = H264Header();
- h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
- nalu_payloads_.push_back(std::move(payload));
- return *this;
-}
-
-Packet& Packet::SpsWithResolution(RenderResolution resolution,
- std::vector<uint8_t> payload) {
- auto& h264_header = H264Header();
- h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
- video_header_.width = resolution.Width();
- video_header_.height = resolution.Height();
- nalu_payloads_.push_back(std::move(payload));
- return *this;
-}
-
-Packet& Packet::Pps(std::vector<uint8_t> payload) {
- auto& h264_header = H264Header();
- h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kPps);
- nalu_payloads_.push_back(std::move(payload));
- return *this;
-}
-
-Packet& Packet::Aud() {
- auto& h264_header = H264Header();
- h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kAud);
- nalu_payloads_.push_back({});
- return *this;
-}
-
-Packet& Packet::Marker() {
- marker_bit_ = true;
- return *this;
-}
-
-Packet& Packet::AsFirstFragment() {
- first_fragment_ = true;
- return *this;
-}
-
-Packet& Packet::Time(uint32_t rtp_timestamp) {
- rtp_timestamp_ = rtp_timestamp;
- return *this;
-}
-
-Packet& Packet::SeqNum(uint16_t rtp_seq_num) {
- rtp_seq_num_ = rtp_seq_num;
- return *this;
-}
-
-std::unique_ptr<H264PacketBuffer::Packet> Packet::Build() {
- auto res = std::make_unique<H264PacketBuffer::Packet>();
-
- auto& h264_header = H264Header();
- switch (type_) {
- case kH264FuA: {
- RTC_CHECK_EQ(h264_header.nalus_length, 1);
- res->video_payload = BuildFuaPayload();
- break;
- }
- case kH264SingleNalu: {
- RTC_CHECK_EQ(h264_header.nalus_length, 1);
- res->video_payload = BuildSingleNaluPayload();
- break;
- }
- case kH264StapA: {
- RTC_CHECK_GT(h264_header.nalus_length, 1);
- RTC_CHECK_LE(h264_header.nalus_length, kMaxNalusPerPacket);
- res->video_payload = BuildStapAPayload();
- break;
- }
- }
-
- if (type_ == kH264FuA && !first_fragment_) {
- h264_header.nalus_length = 0;
- }
-
- h264_header.packetization_type = type_;
- res->marker_bit = marker_bit_;
- res->video_header = video_header_;
- res->timestamp = rtp_timestamp_;
- res->seq_num = rtp_seq_num_;
- res->video_header.codec = kVideoCodecH264;
-
- return res;
-}
-
-rtc::CopyOnWriteBuffer Packet::BuildFuaPayload() const {
- return rtc::CopyOnWriteBuffer(nalu_payloads_[0]);
-}
-
-rtc::CopyOnWriteBuffer Packet::BuildSingleNaluPayload() const {
- rtc::CopyOnWriteBuffer res;
- auto& h264_header = H264Header();
- res.AppendData(&h264_header.nalus[0].type, 1);
- res.AppendData(nalu_payloads_[0]);
- return res;
-}
-
-rtc::CopyOnWriteBuffer Packet::BuildStapAPayload() const {
- rtc::CopyOnWriteBuffer res;
-
- const uint8_t indicator = H264::NaluType::kStapA;
- res.AppendData(&indicator, 1);
-
- auto& h264_header = H264Header();
- for (size_t i = 0; i < h264_header.nalus_length; ++i) {
- // The two first bytes indicates the nalu segment size.
- uint8_t length_as_array[2] = {
- 0, static_cast<uint8_t>(nalu_payloads_[i].size() + 1)};
- res.AppendData(length_as_array);
-
- res.AppendData(&h264_header.nalus[i].type, 1);
- res.AppendData(nalu_payloads_[i]);
- }
- return res;
-}
-
-rtc::ArrayView<const uint8_t> PacketPayload(
- const std::unique_ptr<H264PacketBuffer::Packet>& packet) {
- return packet->video_payload;
-}
-
-std::vector<uint8_t> FlatVector(
- const std::vector<std::vector<uint8_t>>& elems) {
- std::vector<uint8_t> res;
- for (const auto& elem : elems) {
- res.insert(res.end(), elem.begin(), elem.end());
- }
- return res;
-}
-
-TEST(H264PacketBufferTest, IdrIsKeyframe) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true);
-
- EXPECT_THAT(
- packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build())
- .packets,
- SizeIs(1));
-}
-
-TEST(H264PacketBufferTest, IdrIsNotKeyframe) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(
- packet_buffer.InsertPacket(Packet(kH264SingleNalu).Idr().Marker().Build())
- .packets,
- IsEmpty());
-}
-
-TEST(H264PacketBufferTest, IdrIsKeyframeFuaRequiresFirstFragmet) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/true);
-
- // Not marked as the first fragment
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build())
- .packets,
- IsEmpty());
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(
- Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
- .packets,
- IsEmpty());
-
- // Marked as first fragment
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264FuA)
- .Idr()
- .SeqNum(2)
- .Time(1)
- .AsFirstFragment()
- .Build())
- .packets,
- IsEmpty());
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(
- Packet(kH264FuA).Idr().SeqNum(3).Time(1).Marker().Build())
- .packets,
- SizeIs(2));
-}
-
-TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeSingleNalus) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Pps().SeqNum(1).Time(0).Build()));
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(
- Packet(kH264SingleNalu).Idr().SeqNum(2).Time(0).Marker().Build())
- .packets,
- SizeIs(3));
-}
-
-TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeSingleNalus) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Pps().SeqNum(0).Time(0).Build()));
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(
- Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build())
- .packets,
- IsEmpty());
-}
-
-TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeSingleNalus) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(
- Packet(kH264SingleNalu).Idr().SeqNum(1).Time(0).Marker().Build())
- .packets,
- IsEmpty());
-}
-
-TEST(H264PacketBufferTest, SpsPpsIdrIsKeyframeStapA) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(0)
- .Time(0)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-}
-
-TEST(H264PacketBufferTest, PpsIdrIsNotKeyframeStapA) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(
- Packet(kH264StapA).Pps().Idr().SeqNum(0).Time(0).Marker().Build())
- .packets,
- IsEmpty());
-}
-
-TEST(H264PacketBufferTest, SpsIdrIsNotKeyframeStapA) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(
- Packet(kH264StapA).Sps().Idr().SeqNum(2).Time(2).Marker().Build())
- .packets,
- IsEmpty());
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(3)
- .Time(3)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-}
-
-TEST(H264PacketBufferTest, InsertingSpsPpsLastCompletesKeyframe) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Idr().SeqNum(2).Time(1).Marker().Build()));
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(
- Packet(kH264StapA).Sps().Pps().SeqNum(1).Time(1).Build())
- .packets,
- SizeIs(2));
-}
-
-TEST(H264PacketBufferTest, InsertingMidFuaCompletesFrame) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(0)
- .Time(0)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264FuA).Slice().SeqNum(1).Time(1).AsFirstFragment().Build()));
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264FuA).Slice().SeqNum(3).Time(1).Marker().Build()));
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(1).Build())
- .packets,
- SizeIs(3));
-}
-
-TEST(H264PacketBufferTest, SeqNumJumpDoesNotCompleteFrame) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(0)
- .Time(0)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(Packet(kH264FuA).Slice().SeqNum(1).Time(1).Build())
- .packets,
- IsEmpty());
-
- // Add `kBufferSize` to make the index of the sequence number wrap and end up
- // where the packet with sequence number 2 would have ended up.
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264FuA)
- .Slice()
- .SeqNum(2 + kBufferSize)
- .Time(3)
- .Marker()
- .Build())
- .packets,
- IsEmpty());
-}
-
-TEST(H264PacketBufferTest, OldFramesAreNotCompletedAfterBufferWrap) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264SingleNalu)
- .Slice()
- .SeqNum(1)
- .Time(1)
- .Marker()
- .Build())
- .packets,
- IsEmpty());
-
- // New keyframe, preceedes packet with sequence number 1 in the buffer.
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(kBufferSize)
- .Time(kBufferSize)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-}
-
-TEST(H264PacketBufferTest, OldPacketsDontBlockNewPackets) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(kBufferSize)
- .Time(kBufferSize)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-
- RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
- .Slice()
- .SeqNum(kBufferSize + 1)
- .Time(kBufferSize + 1)
- .AsFirstFragment()
- .Build()));
-
- RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
- .Slice()
- .SeqNum(kBufferSize + 3)
- .Time(kBufferSize + 1)
- .Marker()
- .Build()));
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build())
- .packets,
- IsEmpty());
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264FuA)
- .Slice()
- .SeqNum(kBufferSize + 2)
- .Time(kBufferSize + 1)
- .Build())
- .packets,
- SizeIs(3));
-}
-
-TEST(H264PacketBufferTest, OldPacketDoesntCompleteFrame) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(kBufferSize)
- .Time(kBufferSize)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264FuA)
- .Slice()
- .SeqNum(kBufferSize + 3)
- .Time(kBufferSize + 1)
- .Marker()
- .Build())
- .packets,
- IsEmpty());
-
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(
- Packet(kH264FuA).Slice().SeqNum(2).Time(2).Marker().Build())
- .packets,
- IsEmpty());
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264FuA)
- .Slice()
- .SeqNum(kBufferSize + 1)
- .Time(kBufferSize + 1)
- .AsFirstFragment()
- .Build())
- .packets,
- IsEmpty());
-}
-
-TEST(H264PacketBufferTest, FrameBoundariesAreSet) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- auto key = packet_buffer.InsertPacket(
- Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build());
-
- ASSERT_THAT(key.packets, SizeIs(1));
- EXPECT_TRUE(key.packets[0]->video_header.is_first_packet_in_frame);
- EXPECT_TRUE(key.packets[0]->video_header.is_last_packet_in_frame);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build()));
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264FuA).Slice().SeqNum(3).Time(2).Build()));
- auto delta = packet_buffer.InsertPacket(
- Packet(kH264FuA).Slice().SeqNum(4).Time(2).Marker().Build());
-
- ASSERT_THAT(delta.packets, SizeIs(3));
- EXPECT_TRUE(delta.packets[0]->video_header.is_first_packet_in_frame);
- EXPECT_FALSE(delta.packets[0]->video_header.is_last_packet_in_frame);
-
- EXPECT_FALSE(delta.packets[1]->video_header.is_first_packet_in_frame);
- EXPECT_FALSE(delta.packets[1]->video_header.is_last_packet_in_frame);
-
- EXPECT_FALSE(delta.packets[2]->video_header.is_first_packet_in_frame);
- EXPECT_TRUE(delta.packets[2]->video_header.is_last_packet_in_frame);
-}
-
-TEST(H264PacketBufferTest, ResolutionSetOnFirstPacket) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
- auto res = packet_buffer.InsertPacket(Packet(kH264StapA)
- .SpsWithResolution({320, 240})
- .Pps()
- .Idr()
- .SeqNum(2)
- .Time(1)
- .Marker()
- .Build());
-
- ASSERT_THAT(res.packets, SizeIs(2));
- EXPECT_THAT(res.packets[0]->video_header.width, Eq(320));
- EXPECT_THAT(res.packets[0]->video_header.height, Eq(240));
-}
-
-TEST(H264PacketBufferTest, KeyframeAndDeltaFrameSetOnFirstPacket) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
- auto key = packet_buffer.InsertPacket(
- Packet(kH264StapA).Sps().Pps().Idr().SeqNum(2).Time(1).Marker().Build());
-
- auto delta = packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Slice().SeqNum(3).Time(2).Marker().Build());
-
- ASSERT_THAT(key.packets, SizeIs(2));
- EXPECT_THAT(key.packets[0]->video_header.frame_type,
- Eq(VideoFrameType::kVideoFrameKey));
- ASSERT_THAT(delta.packets, SizeIs(1));
- EXPECT_THAT(delta.packets[0]->video_header.frame_type,
- Eq(VideoFrameType::kVideoFrameDelta));
-}
-
-TEST(H264PacketBufferTest, RtpSeqNumWrap) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264StapA).Sps().Pps().SeqNum(0xffff).Time(0).Build()));
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build()));
- EXPECT_THAT(packet_buffer
- .InsertPacket(
- Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
- .packets,
- SizeIs(3));
-}
-
-TEST(H264PacketBufferTest, StapAFixedBitstream) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- auto packets = packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps({1, 2, 3})
- .Pps({4, 5, 6})
- .Idr({7, 8, 9})
- .SeqNum(0)
- .Time(0)
- .Marker()
- .Build())
- .packets;
-
- ASSERT_THAT(packets, SizeIs(1));
- EXPECT_THAT(PacketPayload(packets[0]),
- ElementsAreArray(FlatVector({StartCode(),
- {kSps, 1, 2, 3},
- StartCode(),
- {kPps, 4, 5, 6},
- StartCode(),
- {kIdr, 7, 8, 9}})));
-}
-
-TEST(H264PacketBufferTest, SingleNaluFixedBitstream) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Sps({1, 2, 3}).SeqNum(0).Time(0).Build()));
- RTC_UNUSED(packet_buffer.InsertPacket(
- Packet(kH264SingleNalu).Pps({4, 5, 6}).SeqNum(1).Time(0).Build()));
- auto packets = packet_buffer
- .InsertPacket(Packet(kH264SingleNalu)
- .Idr({7, 8, 9})
- .SeqNum(2)
- .Time(0)
- .Marker()
- .Build())
- .packets;
-
- ASSERT_THAT(packets, SizeIs(3));
- EXPECT_THAT(PacketPayload(packets[0]),
- ElementsAreArray(FlatVector({StartCode(), {kSps, 1, 2, 3}})));
- EXPECT_THAT(PacketPayload(packets[1]),
- ElementsAreArray(FlatVector({StartCode(), {kPps, 4, 5, 6}})));
- EXPECT_THAT(PacketPayload(packets[2]),
- ElementsAreArray(FlatVector({StartCode(), {kIdr, 7, 8, 9}})));
-}
-
-TEST(H264PacketBufferTest, StapaAndFuaFixedBitstream) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264StapA)
- .Sps({1, 2, 3})
- .Pps({4, 5, 6})
- .SeqNum(0)
- .Time(0)
- .Build()));
- RTC_UNUSED(packet_buffer.InsertPacket(Packet(kH264FuA)
- .Idr({8, 8, 8})
- .SeqNum(1)
- .Time(0)
- .AsFirstFragment()
- .Build()));
- auto packets = packet_buffer
- .InsertPacket(Packet(kH264FuA)
- .Idr({9, 9, 9})
- .SeqNum(2)
- .Time(0)
- .Marker()
- .Build())
- .packets;
-
- ASSERT_THAT(packets, SizeIs(3));
- EXPECT_THAT(
- PacketPayload(packets[0]),
- ElementsAreArray(FlatVector(
- {StartCode(), {kSps, 1, 2, 3}, StartCode(), {kPps, 4, 5, 6}})));
- EXPECT_THAT(PacketPayload(packets[1]),
- ElementsAreArray(FlatVector({StartCode(), {8, 8, 8}})));
- // Third is a continuation of second, so only the payload is expected.
- EXPECT_THAT(PacketPayload(packets[2]),
- ElementsAreArray(FlatVector({{9, 9, 9}})));
-}
-
-TEST(H264PacketBufferTest, FullPacketBufferDoesNotBlockKeyframe) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- for (int i = 0; i < kBufferSize; ++i) {
- EXPECT_THAT(
- packet_buffer
- .InsertPacket(
- Packet(kH264SingleNalu).Slice().SeqNum(i).Time(0).Build())
- .packets,
- IsEmpty());
- }
-
- EXPECT_THAT(packet_buffer
- .InsertPacket(Packet(kH264StapA)
- .Sps()
- .Pps()
- .Idr()
- .SeqNum(kBufferSize)
- .Time(1)
- .Marker()
- .Build())
- .packets,
- SizeIs(1));
-}
-
-TEST(H264PacketBufferTest, TooManyNalusInPacket) {
- H264PacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
-
- std::unique_ptr<H264PacketBuffer::Packet> packet(
- Packet(kH264StapA).Sps().Pps().Idr().SeqNum(1).Time(1).Marker().Build());
- auto& h264_header =
- absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
- h264_header.nalus_length = kMaxNalusPerPacket + 1;
-
- EXPECT_THAT(packet_buffer.InsertPacket(std::move(packet)).packets, IsEmpty());
-}
-
-} // namespace
-} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.cc b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.cc
new file mode 100644
index 0000000000..bca2b5ce29
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.cc
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/h26x_packet_buffer.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/rtp_packet_info.h"
+#include "api/video/video_frame_type.h"
+#include "common_video/h264/h264_common.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copy_on_write_buffer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#ifdef RTC_ENABLE_H265
+#include "common_video/h265/h265_common.h"
+#endif
+
+namespace webrtc {
+namespace {
+
+int64_t EuclideanMod(int64_t n, int64_t div) {
+ RTC_DCHECK_GT(div, 0);
+ return (n %= div) < 0 ? n + div : n;
+}
+
+rtc::ArrayView<const NaluInfo> GetNaluInfos(
+ const RTPVideoHeaderH264& h264_header) {
+ if (h264_header.nalus_length > kMaxNalusPerPacket) {
+ return {};
+ }
+
+ return rtc::MakeArrayView(h264_header.nalus, h264_header.nalus_length);
+}
+
+bool IsFirstPacketOfFragment(const RTPVideoHeaderH264& h264_header) {
+ return h264_header.nalus_length > 0;
+}
+
+bool BeginningOfIdr(const H26xPacketBuffer::Packet& packet) {
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ const bool contains_idr_nalu =
+ absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
+ return nalu_info.type == H264::NaluType::kIdr;
+ });
+ switch (h264_header.packetization_type) {
+ case kH264StapA:
+ case kH264SingleNalu: {
+ return contains_idr_nalu;
+ }
+ case kH264FuA: {
+ return contains_idr_nalu && IsFirstPacketOfFragment(h264_header);
+ }
+ }
+}
+
+bool HasSps(const H26xPacketBuffer::Packet& packet) {
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
+ return absl::c_any_of(GetNaluInfos(h264_header), [](const auto& nalu_info) {
+ return nalu_info.type == H264::NaluType::kSps;
+ });
+}
+
+#ifdef RTC_ENABLE_H265
+bool HasVps(const H26xPacketBuffer::Packet& packet) {
+ std::vector<H265::NaluIndex> nalu_indices = H265::FindNaluIndices(
+ packet.video_payload.cdata(), packet.video_payload.size());
+ return absl::c_any_of((nalu_indices), [&packet](
+ const H265::NaluIndex& nalu_index) {
+ return H265::ParseNaluType(
+ packet.video_payload.cdata()[nalu_index.payload_start_offset]) ==
+ H265::NaluType::kVps;
+ });
+}
+#endif
+
+// TODO(bugs.webrtc.org/13157): Update the H264 depacketizer so we don't have to
+// fiddle with the payload at this point.
+rtc::CopyOnWriteBuffer FixH264VideoPayload(
+ rtc::ArrayView<const uint8_t> payload,
+ const RTPVideoHeader& video_header) {
+ constexpr uint8_t kStartCode[] = {0, 0, 0, 1};
+
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(video_header.video_type_header);
+
+ rtc::CopyOnWriteBuffer result;
+ switch (h264_header.packetization_type) {
+ case kH264StapA: {
+ const uint8_t* payload_end = payload.data() + payload.size();
+ const uint8_t* nalu_ptr = payload.data() + 1;
+ while (nalu_ptr < payload_end - 1) {
+ // The first two bytes describe the length of the segment, where a
+ // segment is the nalu type plus nalu payload.
+ uint16_t segment_length = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += 2;
+
+ if (nalu_ptr + segment_length <= payload_end) {
+ result.AppendData(kStartCode);
+ result.AppendData(nalu_ptr, segment_length);
+ }
+ nalu_ptr += segment_length;
+ }
+ return result;
+ }
+
+ case kH264FuA: {
+ if (IsFirstPacketOfFragment(h264_header)) {
+ result.AppendData(kStartCode);
+ }
+ result.AppendData(payload);
+ return result;
+ }
+
+ case kH264SingleNalu: {
+ result.AppendData(kStartCode);
+ result.AppendData(payload);
+ return result;
+ }
+ }
+
+ RTC_DCHECK_NOTREACHED();
+ return result;
+}
+
+} // namespace
+
+H26xPacketBuffer::H26xPacketBuffer(bool h264_idr_only_keyframes_allowed)
+ : h264_idr_only_keyframes_allowed_(h264_idr_only_keyframes_allowed) {}
+
+H26xPacketBuffer::InsertResult H26xPacketBuffer::InsertPacket(
+ std::unique_ptr<Packet> packet) {
+ RTC_DCHECK(packet->video_header.codec == kVideoCodecH264 ||
+ packet->video_header.codec == kVideoCodecH265);
+
+ InsertResult result;
+
+ int64_t unwrapped_seq_num = seq_num_unwrapper_.Unwrap(packet->seq_num);
+ auto& packet_slot = GetPacket(unwrapped_seq_num);
+ if (packet_slot != nullptr &&
+ AheadOrAt(packet_slot->timestamp, packet->timestamp)) {
+ // The incoming `packet` is old or a duplicate.
+ return result;
+ } else {
+ packet_slot = std::move(packet);
+ }
+
+ result.packets = FindFrames(unwrapped_seq_num);
+ return result;
+}
+
+std::unique_ptr<H26xPacketBuffer::Packet>& H26xPacketBuffer::GetPacket(
+ int64_t unwrapped_seq_num) {
+ return buffer_[EuclideanMod(unwrapped_seq_num, kBufferSize)];
+}
+
+bool H26xPacketBuffer::BeginningOfStream(
+ const H26xPacketBuffer::Packet& packet) const {
+ if (packet.codec() == kVideoCodecH264) {
+ return HasSps(packet) ||
+ (h264_idr_only_keyframes_allowed_ && BeginningOfIdr(packet));
+#ifdef RTC_ENABLE_H265
+ } else if (packet.codec() == kVideoCodecH265) {
+ return HasVps(packet);
+#endif
+ }
+ RTC_DCHECK_NOTREACHED();
+ return false;
+}
+
+std::vector<std::unique_ptr<H26xPacketBuffer::Packet>>
+H26xPacketBuffer::FindFrames(int64_t unwrapped_seq_num) {
+ std::vector<std::unique_ptr<Packet>> found_frames;
+
+ Packet* packet = GetPacket(unwrapped_seq_num).get();
+ RTC_CHECK(packet != nullptr);
+
+ // Check if the packet is continuous or the beginning of a new coded video
+ // sequence.
+ if (unwrapped_seq_num - 1 != last_continuous_unwrapped_seq_num_) {
+ if (unwrapped_seq_num <= last_continuous_unwrapped_seq_num_ ||
+ !BeginningOfStream(*packet)) {
+ return found_frames;
+ }
+
+ last_continuous_unwrapped_seq_num_ = unwrapped_seq_num;
+ }
+
+ for (int64_t seq_num = unwrapped_seq_num;
+ seq_num < unwrapped_seq_num + kBufferSize;) {
+ RTC_DCHECK_GE(seq_num, *last_continuous_unwrapped_seq_num_);
+
+ // Packets that were never assembled into a completed frame will stay in
+ // the 'buffer_'. Check that the `packet` sequence number match the expected
+ // unwrapped sequence number.
+ if (static_cast<uint16_t>(seq_num) != packet->seq_num) {
+ return found_frames;
+ }
+
+ last_continuous_unwrapped_seq_num_ = seq_num;
+ // Last packet of the frame, try to assemble the frame.
+ if (packet->marker_bit) {
+ uint32_t rtp_timestamp = packet->timestamp;
+
+ // Iterate backwards to find where the frame starts.
+ for (int64_t seq_num_start = seq_num;
+ seq_num_start > seq_num - kBufferSize; --seq_num_start) {
+ auto& prev_packet = GetPacket(seq_num_start - 1);
+
+ if (prev_packet == nullptr || prev_packet->timestamp != rtp_timestamp) {
+ if (MaybeAssembleFrame(seq_num_start, seq_num, found_frames)) {
+ // Frame was assembled, continue to look for more frames.
+ break;
+ } else {
+ // Frame was not assembled, no subsequent frame will be continuous.
+ return found_frames;
+ }
+ }
+ }
+ }
+
+ seq_num++;
+ packet = GetPacket(seq_num).get();
+ if (packet == nullptr) {
+ return found_frames;
+ }
+ }
+
+ return found_frames;
+}
+
+bool H26xPacketBuffer::MaybeAssembleFrame(
+ int64_t start_seq_num_unwrapped,
+ int64_t end_sequence_number_unwrapped,
+ std::vector<std::unique_ptr<Packet>>& frames) {
+#ifdef RTC_ENABLE_H265
+ bool has_vps = false;
+#endif
+ bool has_sps = false;
+ bool has_pps = false;
+ bool has_idr = false;
+ bool has_irap = false;
+
+ int width = -1;
+ int height = -1;
+
+ for (int64_t seq_num = start_seq_num_unwrapped;
+ seq_num <= end_sequence_number_unwrapped; ++seq_num) {
+ const auto& packet = GetPacket(seq_num);
+ if (packet->codec() == kVideoCodecH264) {
+ const auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
+ for (const auto& nalu : GetNaluInfos(h264_header)) {
+ has_idr |= nalu.type == H264::NaluType::kIdr;
+ has_sps |= nalu.type == H264::NaluType::kSps;
+ has_pps |= nalu.type == H264::NaluType::kPps;
+ }
+ if (has_idr) {
+ if (!h264_idr_only_keyframes_allowed_ && (!has_sps || !has_pps)) {
+ return false;
+ }
+ }
+#ifdef RTC_ENABLE_H265
+ } else if (packet->codec() == kVideoCodecH265) {
+ std::vector<H265::NaluIndex> nalu_indices = H265::FindNaluIndices(
+ packet->video_payload.cdata(), packet->video_payload.size());
+ for (const auto& nalu_index : nalu_indices) {
+ uint8_t nalu_type = H265::ParseNaluType(
+ packet->video_payload.cdata()[nalu_index.payload_start_offset]);
+ has_irap |= (nalu_type >= H265::NaluType::kBlaWLp &&
+ nalu_type <= H265::NaluType::kRsvIrapVcl23);
+ has_vps |= nalu_type == H265::NaluType::kVps;
+ has_sps |= nalu_type == H265::NaluType::kSps;
+ has_pps |= nalu_type == H265::NaluType::kPps;
+ }
+ if (has_irap) {
+ if (!has_vps || !has_sps || !has_pps) {
+ return false;
+ }
+ }
+#endif // RTC_ENABLE_H265
+ }
+
+ width = std::max<int>(packet->video_header.width, width);
+ height = std::max<int>(packet->video_header.height, height);
+ }
+
+ for (int64_t seq_num = start_seq_num_unwrapped;
+ seq_num <= end_sequence_number_unwrapped; ++seq_num) {
+ auto& packet = GetPacket(seq_num);
+
+ packet->video_header.is_first_packet_in_frame =
+ (seq_num == start_seq_num_unwrapped);
+ packet->video_header.is_last_packet_in_frame =
+ (seq_num == end_sequence_number_unwrapped);
+
+ if (packet->video_header.is_first_packet_in_frame) {
+ if (width > 0 && height > 0) {
+ packet->video_header.width = width;
+ packet->video_header.height = height;
+ }
+
+ packet->video_header.frame_type = has_idr || has_irap
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ }
+
+ // Start code is inserted by depacktizer for H.265.
+ if (packet->codec() == kVideoCodecH264) {
+ packet->video_payload =
+ FixH264VideoPayload(packet->video_payload, packet->video_header);
+ }
+
+ frames.push_back(std::move(packet));
+ }
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.h b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.h
new file mode 100644
index 0000000000..21601562c5
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_H26X_PACKET_BUFFER_H_
+#define MODULES_VIDEO_CODING_H26X_PACKET_BUFFER_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "absl/types/optional.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "rtc_base/numerics/sequence_number_unwrapper.h"
+
+namespace webrtc {
+
+class H26xPacketBuffer {
+ public:
+ // The H26xPacketBuffer does the same job as the PacketBuffer but for H264 and
+ // H265 only. To make it fit in with surronding code the PacketBuffer
+ // input/output classes are used.
+ using Packet = video_coding::PacketBuffer::Packet;
+ using InsertResult = video_coding::PacketBuffer::InsertResult;
+
+ // |h264_idr_only_keyframes_allowed| is ignored if H.265 is used.
+ explicit H26xPacketBuffer(bool h264_idr_only_keyframes_allowed);
+
+ ABSL_MUST_USE_RESULT InsertResult
+ InsertPacket(std::unique_ptr<Packet> packet);
+
+ private:
+ static constexpr int kBufferSize = 2048;
+
+ std::unique_ptr<Packet>& GetPacket(int64_t unwrapped_seq_num);
+ bool BeginningOfStream(const Packet& packet) const;
+ std::vector<std::unique_ptr<Packet>> FindFrames(int64_t unwrapped_seq_num);
+ bool MaybeAssembleFrame(int64_t start_seq_num_unwrapped,
+ int64_t end_sequence_number_unwrapped,
+ std::vector<std::unique_ptr<Packet>>& packets);
+
+ const bool h264_idr_only_keyframes_allowed_;
+ std::array<std::unique_ptr<Packet>, kBufferSize> buffer_;
+ absl::optional<int64_t> last_continuous_unwrapped_seq_num_;
+ SeqNumUnwrapper<uint16_t> seq_num_unwrapper_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_H26X_PACKET_BUFFER_H_
diff --git a/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer_unittest.cc b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer_unittest.cc
new file mode 100644
index 0000000000..ac5bcb735b
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/h26x_packet_buffer_unittest.cc
@@ -0,0 +1,1058 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/video_coding/h26x_packet_buffer.h"
+
+#include <cstring>
+#include <limits>
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "api/array_view.h"
+#include "api/video/render_resolution.h"
+#include "common_video/h264/h264_common.h"
+#include "rtc_base/system/unused.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#ifdef RTC_ENABLE_H265
+#include "common_video/h265/h265_common.h"
+#endif
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAreArray;
+using ::testing::Eq;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+using H264::NaluType::kAud;
+using H264::NaluType::kFuA;
+using H264::NaluType::kIdr;
+using H264::NaluType::kPps;
+using H264::NaluType::kSlice;
+using H264::NaluType::kSps;
+using H264::NaluType::kStapA;
+
+constexpr int kBufferSize = 2048;
+
+std::vector<uint8_t> StartCode() {
+ return {0, 0, 0, 1};
+}
+
+NaluInfo MakeNaluInfo(uint8_t type) {
+ NaluInfo res;
+ res.type = type;
+ res.sps_id = -1;
+ res.pps_id = -1;
+ return res;
+}
+
+class H264Packet {
+ public:
+ explicit H264Packet(H264PacketizationTypes type);
+
+ H264Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9});
+ H264Packet& Slice(std::vector<uint8_t> payload = {9, 9, 9});
+ H264Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9});
+ H264Packet& SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload = {9, 9, 9});
+ H264Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9});
+ H264Packet& Aud();
+ H264Packet& Marker();
+ H264Packet& AsFirstFragment();
+ H264Packet& Time(uint32_t rtp_timestamp);
+ H264Packet& SeqNum(uint16_t rtp_seq_num);
+
+ std::unique_ptr<H26xPacketBuffer::Packet> Build();
+
+ private:
+ rtc::CopyOnWriteBuffer BuildFuaPayload() const;
+ rtc::CopyOnWriteBuffer BuildSingleNaluPayload() const;
+ rtc::CopyOnWriteBuffer BuildStapAPayload() const;
+
+ RTPVideoHeaderH264& H264Header() {
+ return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
+ }
+ const RTPVideoHeaderH264& H264Header() const {
+ return absl::get<RTPVideoHeaderH264>(video_header_.video_type_header);
+ }
+
+ H264PacketizationTypes type_;
+ RTPVideoHeader video_header_;
+ bool first_fragment_ = false;
+ bool marker_bit_ = false;
+ uint32_t rtp_timestamp_ = 0;
+ uint16_t rtp_seq_num_ = 0;
+ std::vector<std::vector<uint8_t>> nalu_payloads_;
+};
+
+H264Packet::H264Packet(H264PacketizationTypes type) : type_(type) {
+ video_header_.video_type_header.emplace<RTPVideoHeaderH264>();
+}
+
+H264Packet& H264Packet::Idr(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kIdr);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+H264Packet& H264Packet::Slice(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSlice);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+H264Packet& H264Packet::Sps(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+H264Packet& H264Packet::SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kSps);
+ video_header_.width = resolution.Width();
+ video_header_.height = resolution.Height();
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+H264Packet& H264Packet::Pps(std::vector<uint8_t> payload) {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kPps);
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+H264Packet& H264Packet::Aud() {
+ auto& h264_header = H264Header();
+ h264_header.nalus[h264_header.nalus_length++] = MakeNaluInfo(kAud);
+ nalu_payloads_.push_back({});
+ return *this;
+}
+
+H264Packet& H264Packet::Marker() {
+ marker_bit_ = true;
+ return *this;
+}
+
+H264Packet& H264Packet::AsFirstFragment() {
+ first_fragment_ = true;
+ return *this;
+}
+
+H264Packet& H264Packet::Time(uint32_t rtp_timestamp) {
+ rtp_timestamp_ = rtp_timestamp;
+ return *this;
+}
+
+H264Packet& H264Packet::SeqNum(uint16_t rtp_seq_num) {
+ rtp_seq_num_ = rtp_seq_num;
+ return *this;
+}
+
+std::unique_ptr<H26xPacketBuffer::Packet> H264Packet::Build() {
+ auto res = std::make_unique<H26xPacketBuffer::Packet>();
+
+ auto& h264_header = H264Header();
+ switch (type_) {
+ case kH264FuA: {
+ RTC_CHECK_EQ(h264_header.nalus_length, 1);
+ res->video_payload = BuildFuaPayload();
+ break;
+ }
+ case kH264SingleNalu: {
+ RTC_CHECK_EQ(h264_header.nalus_length, 1);
+ res->video_payload = BuildSingleNaluPayload();
+ break;
+ }
+ case kH264StapA: {
+ RTC_CHECK_GT(h264_header.nalus_length, 1);
+ RTC_CHECK_LE(h264_header.nalus_length, kMaxNalusPerPacket);
+ res->video_payload = BuildStapAPayload();
+ break;
+ }
+ }
+
+ if (type_ == kH264FuA && !first_fragment_) {
+ h264_header.nalus_length = 0;
+ }
+
+ h264_header.packetization_type = type_;
+ res->marker_bit = marker_bit_;
+ res->video_header = video_header_;
+ res->timestamp = rtp_timestamp_;
+ res->seq_num = rtp_seq_num_;
+ res->video_header.codec = kVideoCodecH264;
+
+ return res;
+}
+
+rtc::CopyOnWriteBuffer H264Packet::BuildFuaPayload() const {
+ return rtc::CopyOnWriteBuffer(nalu_payloads_[0]);
+}
+
+rtc::CopyOnWriteBuffer H264Packet::BuildSingleNaluPayload() const {
+ rtc::CopyOnWriteBuffer res;
+ auto& h264_header = H264Header();
+ res.AppendData(&h264_header.nalus[0].type, 1);
+ res.AppendData(nalu_payloads_[0]);
+ return res;
+}
+
+rtc::CopyOnWriteBuffer H264Packet::BuildStapAPayload() const {
+ rtc::CopyOnWriteBuffer res;
+
+ const uint8_t indicator = H264::NaluType::kStapA;
+ res.AppendData(&indicator, 1);
+
+ auto& h264_header = H264Header();
+ for (size_t i = 0; i < h264_header.nalus_length; ++i) {
+ // The two first bytes indicates the nalu segment size.
+ uint8_t length_as_array[2] = {
+ 0, static_cast<uint8_t>(nalu_payloads_[i].size() + 1)};
+ res.AppendData(length_as_array);
+
+ res.AppendData(&h264_header.nalus[i].type, 1);
+ res.AppendData(nalu_payloads_[i]);
+ }
+ return res;
+}
+
+#ifdef RTC_ENABLE_H265
+class H265Packet {
+ public:
+ H265Packet() = default;
+
+ H265Packet& Idr(std::vector<uint8_t> payload = {9, 9, 9});
+ H265Packet& Slice(H265::NaluType type,
+ std::vector<uint8_t> payload = {9, 9, 9});
+ H265Packet& Vps(std::vector<uint8_t> payload = {9, 9, 9});
+ H265Packet& Sps(std::vector<uint8_t> payload = {9, 9, 9});
+ H265Packet& SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload = {9, 9, 9});
+ H265Packet& Pps(std::vector<uint8_t> payload = {9, 9, 9});
+ H265Packet& Aud();
+ H265Packet& Marker();
+ H265Packet& AsFirstFragment();
+ H265Packet& Time(uint32_t rtp_timestamp);
+ H265Packet& SeqNum(uint16_t rtp_seq_num);
+
+ std::unique_ptr<H26xPacketBuffer::Packet> Build();
+
+ private:
+ H265Packet& StartCode();
+
+ RTPVideoHeader video_header_;
+ bool first_fragment_ = false;
+ bool marker_bit_ = false;
+ uint32_t rtp_timestamp_ = 0;
+ uint16_t rtp_seq_num_ = 0;
+ std::vector<std::vector<uint8_t>> nalu_payloads_;
+};
+
+H265Packet& H265Packet::Idr(std::vector<uint8_t> payload) {
+ return Slice(H265::NaluType::kIdrNLp, std::move(payload));
+}
+
+H265Packet& H265Packet::Slice(H265::NaluType type,
+ std::vector<uint8_t> payload) {
+ StartCode();
+ // Nalu header. Assume layer ID is 0 and TID is 2.
+ nalu_payloads_.push_back({static_cast<uint8_t>(type << 1), 0x02});
+ nalu_payloads_.push_back(std::move(payload));
+ return *this;
+}
+
+H265Packet& H265Packet::Vps(std::vector<uint8_t> payload) {
+ return Slice(H265::NaluType::kVps, std::move(payload));
+}
+
+H265Packet& H265Packet::Sps(std::vector<uint8_t> payload) {
+ return Slice(H265::NaluType::kSps, std::move(payload));
+}
+
+H265Packet& H265Packet::SpsWithResolution(RenderResolution resolution,
+ std::vector<uint8_t> payload) {
+ video_header_.width = resolution.Width();
+ video_header_.height = resolution.Height();
+ return Sps(std::move(payload));
+}
+
+H265Packet& H265Packet::Pps(std::vector<uint8_t> payload) {
+ return Slice(H265::NaluType::kPps, std::move(payload));
+}
+
+H265Packet& H265Packet::Aud() {
+ return Slice(H265::NaluType::kAud, {});
+}
+
+H265Packet& H265Packet::Marker() {
+ marker_bit_ = true;
+ return *this;
+}
+
+H265Packet& H265Packet::StartCode() {
+ nalu_payloads_.push_back({0x00, 0x00, 0x00, 0x01});
+ return *this;
+}
+
+std::unique_ptr<H26xPacketBuffer::Packet> H265Packet::Build() {
+ auto res = std::make_unique<H26xPacketBuffer::Packet>();
+ res->marker_bit = marker_bit_;
+ res->video_header = video_header_;
+ res->timestamp = rtp_timestamp_;
+ res->seq_num = rtp_seq_num_;
+ res->video_header.codec = kVideoCodecH265;
+ res->video_payload = rtc::CopyOnWriteBuffer();
+ for (const auto& payload : nalu_payloads_) {
+ res->video_payload.AppendData(payload);
+ }
+
+ return res;
+}
+
+H265Packet& H265Packet::AsFirstFragment() {
+ first_fragment_ = true;
+ return *this;
+}
+
+H265Packet& H265Packet::Time(uint32_t rtp_timestamp) {
+ rtp_timestamp_ = rtp_timestamp;
+ return *this;
+}
+
+H265Packet& H265Packet::SeqNum(uint16_t rtp_seq_num) {
+ rtp_seq_num_ = rtp_seq_num;
+ return *this;
+}
+#endif
+
+rtc::ArrayView<const uint8_t> PacketPayload(
+ const std::unique_ptr<H26xPacketBuffer::Packet>& packet) {
+ return packet->video_payload;
+}
+
+std::vector<uint8_t> FlatVector(
+ const std::vector<std::vector<uint8_t>>& elems) {
+ std::vector<uint8_t> res;
+ for (const auto& elem : elems) {
+ res.insert(res.end(), elem.begin(), elem.end());
+ }
+ return res;
+}
+
+TEST(H26xPacketBufferTest, IdrIsKeyframe) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/true);
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(H264Packet(kH264SingleNalu).Idr().Marker().Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H26xPacketBufferTest, IdrIsNotKeyframe) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(H264Packet(kH264SingleNalu).Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, IdrIsKeyframeFuaRequiresFirstFragmet) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/true);
+
+ // Not marked as the first fragment
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(H264Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ H264Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ // Marked as first fragment
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264FuA)
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .AsFirstFragment()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ H264Packet(kH264FuA).Idr().SeqNum(3).Time(1).Marker().Build())
+ .packets,
+ SizeIs(2));
+}
+
+TEST(H26xPacketBufferTest, SpsPpsIdrIsKeyframeSingleNalus) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Pps().SeqNum(1).Time(0).Build()));
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264SingleNalu)
+ .Idr()
+ .SeqNum(2)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H26xPacketBufferTest, PpsIdrIsNotKeyframeSingleNalus) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Pps().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264SingleNalu)
+ .Idr()
+ .SeqNum(1)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, SpsIdrIsNotKeyframeSingleNalus) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Sps().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264SingleNalu)
+ .Idr()
+ .SeqNum(1)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, SpsPpsIdrIsKeyframeStapA) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H26xPacketBufferTest, PpsIdrIsNotKeyframeStapA) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, SpsIdrIsNotKeyframeStapA) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Idr()
+ .SeqNum(2)
+ .Time(2)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(3)
+ .Time(3)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H26xPacketBufferTest, InsertingSpsPpsLastCompletesKeyframe) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Idr().SeqNum(2).Time(1).Marker().Build()));
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ H264Packet(kH264StapA).Sps().Pps().SeqNum(1).Time(1).Build())
+ .packets,
+ SizeIs(2));
+}
+
+TEST(H26xPacketBufferTest, InsertingMidFuaCompletesFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA)
+ .Slice()
+ .SeqNum(1)
+ .Time(1)
+ .AsFirstFragment()
+ .Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264FuA).Slice().SeqNum(3).Time(1).Marker().Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(H264Packet(kH264FuA).Slice().SeqNum(2).Time(1).Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H26xPacketBufferTest, SeqNumJumpDoesNotCompleteFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(H264Packet(kH264FuA).Slice().SeqNum(1).Time(1).Build())
+ .packets,
+ IsEmpty());
+
+ // Add `kBufferSize` to make the index of the sequence number wrap and end up
+ // where the packet with sequence number 2 would have ended up.
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264FuA)
+ .Slice()
+ .SeqNum(2 + kBufferSize)
+ .Time(3)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, OldFramesAreNotCompletedAfterBufferWrap) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264SingleNalu)
+ .Slice()
+ .SeqNum(1)
+ .Time(1)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ // New keyframe, preceedes packet with sequence number 1 in the buffer.
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H26xPacketBufferTest, OldPacketsDontBlockNewPackets) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 1)
+ .Time(kBufferSize + 1)
+ .AsFirstFragment()
+ .Build()));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 3)
+ .Time(kBufferSize + 1)
+ .Marker()
+ .Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(H264Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 2)
+ .Time(kBufferSize + 1)
+ .Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H26xPacketBufferTest, OldPacketDoesntCompleteFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(kBufferSize)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 3)
+ .Time(kBufferSize + 1)
+ .Marker()
+ .Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ H264Packet(kH264FuA).Slice().SeqNum(2).Time(2).Marker().Build())
+ .packets,
+ IsEmpty());
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264FuA)
+ .Slice()
+ .SeqNum(kBufferSize + 1)
+ .Time(kBufferSize + 1)
+ .AsFirstFragment()
+ .Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, FrameBoundariesAreSet) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ auto key = packet_buffer.InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(1)
+ .Time(1)
+ .Marker()
+ .Build());
+
+ ASSERT_THAT(key.packets, SizeIs(1));
+ EXPECT_TRUE(key.packets[0]->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(key.packets[0]->video_header.is_last_packet_in_frame);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264FuA).Slice().SeqNum(2).Time(2).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264FuA).Slice().SeqNum(3).Time(2).Build()));
+ auto delta = packet_buffer.InsertPacket(
+ H264Packet(kH264FuA).Slice().SeqNum(4).Time(2).Marker().Build());
+
+ ASSERT_THAT(delta.packets, SizeIs(3));
+ EXPECT_TRUE(delta.packets[0]->video_header.is_first_packet_in_frame);
+ EXPECT_FALSE(delta.packets[0]->video_header.is_last_packet_in_frame);
+
+ EXPECT_FALSE(delta.packets[1]->video_header.is_first_packet_in_frame);
+ EXPECT_FALSE(delta.packets[1]->video_header.is_last_packet_in_frame);
+
+ EXPECT_FALSE(delta.packets[2]->video_header.is_first_packet_in_frame);
+ EXPECT_TRUE(delta.packets[2]->video_header.is_last_packet_in_frame);
+}
+
+TEST(H26xPacketBufferTest, ResolutionSetOnFirstPacket) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
+ auto res = packet_buffer.InsertPacket(H264Packet(kH264StapA)
+ .SpsWithResolution({320, 240})
+ .Pps()
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .Marker()
+ .Build());
+
+ ASSERT_THAT(res.packets, SizeIs(2));
+ EXPECT_THAT(res.packets[0]->video_header.width, Eq(320));
+ EXPECT_THAT(res.packets[0]->video_header.height, Eq(240));
+}
+
+TEST(H26xPacketBufferTest, KeyframeAndDeltaFrameSetOnFirstPacket) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Aud().SeqNum(1).Time(1).Build()));
+ auto key = packet_buffer.InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .Marker()
+ .Build());
+
+ auto delta = packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Slice().SeqNum(3).Time(2).Marker().Build());
+
+ ASSERT_THAT(key.packets, SizeIs(2));
+ EXPECT_THAT(key.packets[0]->video_header.frame_type,
+ Eq(VideoFrameType::kVideoFrameKey));
+ ASSERT_THAT(delta.packets, SizeIs(1));
+ EXPECT_THAT(delta.packets[0]->video_header.frame_type,
+ Eq(VideoFrameType::kVideoFrameDelta));
+}
+
+TEST(H26xPacketBufferTest, RtpSeqNumWrap) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264StapA).Sps().Pps().SeqNum(0xffff).Time(0).Build()));
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264FuA).Idr().SeqNum(0).Time(0).Build()));
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ H264Packet(kH264FuA).Idr().SeqNum(1).Time(0).Marker().Build())
+ .packets,
+ SizeIs(3));
+}
+
+TEST(H26xPacketBufferTest, StapAFixedBitstream) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ auto packets = packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps({1, 2, 3})
+ .Pps({4, 5, 6})
+ .Idr({7, 8, 9})
+ .SeqNum(0)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(1));
+ EXPECT_THAT(PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector({StartCode(),
+ {kSps, 1, 2, 3},
+ StartCode(),
+ {kPps, 4, 5, 6},
+ StartCode(),
+ {kIdr, 7, 8, 9}})));
+}
+
+TEST(H26xPacketBufferTest, SingleNaluFixedBitstream) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Sps({1, 2, 3}).SeqNum(0).Time(0).Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H264Packet(kH264SingleNalu).Pps({4, 5, 6}).SeqNum(1).Time(0).Build()));
+ auto packets = packet_buffer
+ .InsertPacket(H264Packet(kH264SingleNalu)
+ .Idr({7, 8, 9})
+ .SeqNum(2)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector({StartCode(), {kSps, 1, 2, 3}})));
+ EXPECT_THAT(PacketPayload(packets[1]),
+ ElementsAreArray(FlatVector({StartCode(), {kPps, 4, 5, 6}})));
+ EXPECT_THAT(PacketPayload(packets[2]),
+ ElementsAreArray(FlatVector({StartCode(), {kIdr, 7, 8, 9}})));
+}
+
+TEST(H26xPacketBufferTest, StapaAndFuaFixedBitstream) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264StapA)
+ .Sps({1, 2, 3})
+ .Pps({4, 5, 6})
+ .SeqNum(0)
+ .Time(0)
+ .Build()));
+ RTC_UNUSED(packet_buffer.InsertPacket(H264Packet(kH264FuA)
+ .Idr({8, 8, 8})
+ .SeqNum(1)
+ .Time(0)
+ .AsFirstFragment()
+ .Build()));
+ auto packets = packet_buffer
+ .InsertPacket(H264Packet(kH264FuA)
+ .Idr({9, 9, 9})
+ .SeqNum(2)
+ .Time(0)
+ .Marker()
+ .Build())
+ .packets;
+
+ ASSERT_THAT(packets, SizeIs(3));
+ EXPECT_THAT(
+ PacketPayload(packets[0]),
+ ElementsAreArray(FlatVector(
+ {StartCode(), {kSps, 1, 2, 3}, StartCode(), {kPps, 4, 5, 6}})));
+ EXPECT_THAT(PacketPayload(packets[1]),
+ ElementsAreArray(FlatVector({StartCode(), {8, 8, 8}})));
+ // Third is a continuation of second, so only the payload is expected.
+ EXPECT_THAT(PacketPayload(packets[2]),
+ ElementsAreArray(FlatVector({{9, 9, 9}})));
+}
+
+TEST(H26xPacketBufferTest, FullPacketBufferDoesNotBlockKeyframe) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ for (int i = 0; i < kBufferSize; ++i) {
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(
+ H264Packet(kH264SingleNalu).Slice().SeqNum(i).Time(0).Build())
+ .packets,
+ IsEmpty());
+ }
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(kBufferSize)
+ .Time(1)
+ .Marker()
+ .Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H26xPacketBufferTest, TooManyNalusInPacket) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ std::unique_ptr<H26xPacketBuffer::Packet> packet(H264Packet(kH264StapA)
+ .Sps()
+ .Pps()
+ .Idr()
+ .SeqNum(1)
+ .Time(1)
+ .Marker()
+ .Build());
+ auto& h264_header =
+ absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
+ h264_header.nalus_length = kMaxNalusPerPacket + 1;
+
+ EXPECT_THAT(packet_buffer.InsertPacket(std::move(packet)).packets, IsEmpty());
+}
+
+#ifdef RTC_ENABLE_H265
+TEST(H26xPacketBufferTest, H265VpsSpsPpsIdrIsKeyframe) {
+ H26xPacketBuffer packet_buffer(/*allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer
+ .InsertPacket(H265Packet().Vps().Sps().Pps().Idr().Marker().Build())
+ .packets,
+ SizeIs(1));
+}
+
+TEST(H26xPacketBufferTest, H265IrapIsNotKeyframe) {
+ std::vector<const H265::NaluType> irap_types = {
+ H265::NaluType::kBlaWLp, H265::NaluType::kBlaWRadl,
+ H265::NaluType::kBlaNLp, H265::NaluType::kIdrWRadl,
+ H265::NaluType::kIdrNLp, H265::NaluType::kCra,
+ H265::NaluType::kRsvIrapVcl23};
+ for (const H265::NaluType type : irap_types) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(H265Packet().Slice(type).Marker().Build())
+ .packets,
+ IsEmpty());
+ }
+}
+
+TEST(H26xPacketBufferTest, H265IdrIsNotKeyFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(H265Packet().Idr().Marker().Build()).packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, H265SpsPpsIdrIsNotKeyFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H265Packet().Sps().Pps().Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, H265VpsPpsIdrIsNotKeyFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H265Packet().Vps().Pps().Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, H265VpsSpsIdrIsNotKeyFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(H265Packet().Vps().Sps().Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, H265VpsIdrIsNotKeyFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(H265Packet().Vps().Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, H265SpsIdrIsNotKeyFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(H265Packet().Sps().Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, H265PpsIdrIsNotKeyFrame) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ EXPECT_THAT(
+ packet_buffer.InsertPacket(H265Packet().Pps().Idr().Marker().Build())
+ .packets,
+ IsEmpty());
+}
+
+TEST(H26xPacketBufferTest, H265ResolutionSetOnSpsPacket) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(
+ packet_buffer.InsertPacket(H265Packet().Aud().SeqNum(1).Time(1).Build()));
+ auto res = packet_buffer.InsertPacket(H265Packet()
+ .Vps()
+ .SpsWithResolution({320, 240})
+ .Pps()
+ .Idr()
+ .SeqNum(2)
+ .Time(1)
+ .Marker()
+ .Build());
+
+ ASSERT_THAT(res.packets, SizeIs(2));
+ EXPECT_THAT(res.packets[0]->video_header.width, Eq(320));
+ EXPECT_THAT(res.packets[0]->video_header.height, Eq(240));
+}
+
+TEST(H26xPacketBufferTest, H265InsertingVpsSpsPpsLastCompletesKeyframe) {
+ H26xPacketBuffer packet_buffer(/*h264_allow_idr_only_keyframes=*/false);
+
+ RTC_UNUSED(packet_buffer.InsertPacket(
+ H265Packet().Idr().SeqNum(2).Time(1).Marker().Build()));
+
+ EXPECT_THAT(packet_buffer
+ .InsertPacket(
+ H265Packet().Vps().Sps().Pps().SeqNum(1).Time(1).Build())
+ .packets,
+ SizeIs(2));
+}
+#endif // RTC_ENABLE_H265
+
+} // namespace
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h
index 17146ce205..d7d54f3989 100644
--- a/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h
+++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes.h
@@ -11,10 +11,6 @@
#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
-// NOTE: in sync with video_coding_module_defines.h
-
-// Define return values
-
#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT 5
#define WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME 4
#define WEBRTC_VIDEO_CODEC_NO_OUTPUT 1
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc
new file mode 100644
index 0000000000..7e2c08d518
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/include/video_error_codes_utils.h"
+
+#include "modules/video_coding/include/video_error_codes.h"
+
+namespace webrtc {
+
+const char* WebRtcVideoCodecErrorToString(int32_t error_code) {
+ switch (error_code) {
+ case WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT:
+ return "WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT";
+ case WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME:
+ return "WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME";
+ case WEBRTC_VIDEO_CODEC_NO_OUTPUT:
+ return "WEBRTC_VIDEO_CODEC_NO_OUTPUT";
+ case WEBRTC_VIDEO_CODEC_ERROR:
+ return "WEBRTC_VIDEO_CODEC_ERROR";
+ case WEBRTC_VIDEO_CODEC_MEMORY:
+ return "WEBRTC_VIDEO_CODEC_MEMORY";
+ case WEBRTC_VIDEO_CODEC_ERR_PARAMETER:
+ return "WEBRTC_VIDEO_CODEC_ERR_PARAMETER";
+ case WEBRTC_VIDEO_CODEC_TIMEOUT:
+ return "WEBRTC_VIDEO_CODEC_TIMEOUT";
+ case WEBRTC_VIDEO_CODEC_UNINITIALIZED:
+ return "WEBRTC_VIDEO_CODEC_UNINITIALIZED";
+ case WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE:
+ return "WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE";
+ case WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED:
+ return "WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED";
+ case WEBRTC_VIDEO_CODEC_ENCODER_FAILURE:
+ return "WEBRTC_VIDEO_CODEC_ENCODER_FAILURE";
+ default:
+ return "WEBRTC_VIDEO_CODEC_UNKNOWN";
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.h b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.h
new file mode 100644
index 0000000000..ae17e29636
--- /dev/null
+++ b/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_UTILS_H_
+#define MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_UTILS_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+const char* WebRtcVideoCodecErrorToString(int32_t error_code);
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_UTILS_H_
diff --git a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc
index c6e51e8068..ac076fde71 100644
--- a/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc
+++ b/third_party/libwebrtc/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -15,6 +15,8 @@
#include <memory>
#include <vector>
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
#include "api/video/encoded_image.h"
#include "api/video_codecs/sdp_video_format.h"
#include "api/video_codecs/video_encoder.h"
@@ -258,8 +260,9 @@ SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
std::unique_ptr<VideoDecoderFactory> decoder_factory,
SdpVideoFormat video_format)
: codec_type_(PayloadStringToCodecType(video_format.name)) {
+ Environment env = CreateEnvironment();
encoder_ = encoder_factory->CreateVideoEncoder(video_format);
- decoder_ = decoder_factory->CreateVideoDecoder(video_format);
+ decoder_ = decoder_factory->Create(env, video_format);
SetUpCodec((codec_type_ == kVideoCodecVP8 || codec_type_ == kVideoCodecH264)
? kDefaultTemporalLayerProfile
: kNoTemporalLayerProfile);
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc
index b0edab6004..60ef7aece0 100644
--- a/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_initializer_unittest.cc
@@ -631,4 +631,25 @@ TEST_F(VideoCodecInitializerTest, Vp9TwoSpatialLayersBitratesAreConsistent) {
codec.spatialLayers[0].maxBitrate);
}
+TEST_F(VideoCodecInitializerTest, UpdatesVp9SpecificFieldsWithScalabilityMode) {
+ VideoEncoderConfig config;
+ config.codec_type = VideoCodecType::kVideoCodecVP9;
+ std::vector<VideoStream> streams = {DefaultStream()};
+ streams[0].scalability_mode = ScalabilityMode::kL2T3_KEY;
+
+ VideoCodec codec;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_EQ(codec.VP9()->numberOfSpatialLayers, 2u);
+ EXPECT_EQ(codec.VP9()->numberOfTemporalLayers, 3u);
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOnKeyPic);
+
+ streams[0].scalability_mode = ScalabilityMode::kS3T1;
+ EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec));
+
+ EXPECT_EQ(codec.VP9()->numberOfSpatialLayers, 3u);
+ EXPECT_EQ(codec.VP9()->numberOfTemporalLayers, 1u);
+ EXPECT_EQ(codec.VP9()->interLayerPred, InterLayerPredMode::kOff);
+}
+
} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build
index 141def9090..c0d139fc6d 100644
--- a/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build
+++ b/third_party/libwebrtc/modules/video_coding/video_codec_interface_gn/moz.build
@@ -32,6 +32,7 @@ LOCAL_INCLUDES += [
UNIFIED_SOURCES += [
"/third_party/libwebrtc/modules/video_coding/include/video_codec_interface.cc",
+ "/third_party/libwebrtc/modules/video_coding/include/video_error_codes_utils.cc",
"/third_party/libwebrtc/modules/video_coding/video_coding_defines.cc"
]