summaryrefslogtreecommitdiffstats
path: root/dom/media
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:37 +0000
commita90a5cba08fdf6c0ceb95101c275108a152a3aed (patch)
tree532507288f3defd7f4dcf1af49698bcb76034855 /dom/media
parentAdding debian version 126.0.1-1. (diff)
downloadfirefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.tar.xz
firefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.zip
Merging upstream version 127.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/media')
-rw-r--r--dom/media/AsyncLogger.h2
-rw-r--r--dom/media/AudioInputSource.cpp105
-rw-r--r--dom/media/AudioInputSource.h13
-rw-r--r--dom/media/AudioRingBuffer.cpp14
-rw-r--r--dom/media/AudioRingBuffer.h10
-rw-r--r--dom/media/AudioStream.cpp3
-rw-r--r--dom/media/AudioStream.h2
-rw-r--r--dom/media/ChannelMediaResource.cpp14
-rw-r--r--dom/media/CubebInputStream.cpp7
-rw-r--r--dom/media/CubebInputStream.h3
-rw-r--r--dom/media/CubebUtils.cpp45
-rw-r--r--dom/media/CubebUtils.h9
-rw-r--r--dom/media/DeviceInputTrack.cpp57
-rw-r--r--dom/media/DeviceInputTrack.h11
-rw-r--r--dom/media/EncoderTraits.cpp2
-rw-r--r--dom/media/ExternalEngineStateMachine.h4
-rw-r--r--dom/media/GraphDriver.cpp141
-rw-r--r--dom/media/GraphDriver.h48
-rw-r--r--dom/media/ImageConversion.cpp (renamed from dom/media/ImageToI420.cpp)55
-rw-r--r--dom/media/ImageConversion.h (renamed from dom/media/ImageToI420.h)6
-rw-r--r--dom/media/MediaFormatReader.cpp71
-rw-r--r--dom/media/MediaFormatReader.h12
-rw-r--r--dom/media/MediaInfo.h27
-rw-r--r--dom/media/MediaManager.cpp257
-rw-r--r--dom/media/MediaManager.h2
-rw-r--r--dom/media/MediaResult.h2
-rw-r--r--dom/media/MediaTimer.cpp4
-rw-r--r--dom/media/MediaTimer.h6
-rw-r--r--dom/media/MediaTrackGraph.cpp63
-rw-r--r--dom/media/MediaTrackGraph.h18
-rw-r--r--dom/media/MediaTrackGraphImpl.h13
-rw-r--r--dom/media/SeekJob.cpp4
-rw-r--r--dom/media/SeekJob.h4
-rw-r--r--dom/media/VideoFrameConverter.h2
-rw-r--r--dom/media/VideoUtils.cpp21
-rw-r--r--dom/media/VideoUtils.h5
-rw-r--r--dom/media/WavDumper.h15
-rw-r--r--dom/media/doctor/DDLifetime.cpp1
-rw-r--r--dom/media/doctor/test/gtest/moz.build7
-rw-r--r--dom/media/driftcontrol/AudioDriftCorrection.cpp9
-rw-r--r--dom/media/driftcontrol/AudioResampler.cpp17
-rw-r--r--dom/media/driftcontrol/AudioResampler.h21
-rw-r--r--dom/media/driftcontrol/DriftController.cpp32
-rw-r--r--dom/media/driftcontrol/DriftController.h33
-rw-r--r--dom/media/driftcontrol/DynamicResampler.cpp83
-rw-r--r--dom/media/driftcontrol/DynamicResampler.h148
-rw-r--r--dom/media/driftcontrol/gtest/TestAudioDriftCorrection.cpp60
-rw-r--r--dom/media/driftcontrol/gtest/TestAudioResampler.cpp50
-rw-r--r--dom/media/driftcontrol/gtest/TestDriftController.cpp97
-rw-r--r--dom/media/driftcontrol/gtest/TestDynamicResampler.cpp76
-rwxr-xr-xdom/media/driftcontrol/plot.py8
-rw-r--r--dom/media/encoder/VP8TrackEncoder.cpp2
-rw-r--r--dom/media/gmp/CDMStorageIdProvider.cpp2
-rw-r--r--dom/media/gmp/ChromiumCDMChild.h2
-rw-r--r--dom/media/gmp/GMPVideoDecoderChild.cpp55
-rw-r--r--dom/media/gmp/GMPVideoDecoderChild.h1
-rw-r--r--dom/media/gmp/GMPVideoEncoderChild.cpp43
-rw-r--r--dom/media/gmp/GMPVideoEncoderChild.h1
-rw-r--r--dom/media/gmp/mozIGeckoMediaPluginService.idl2
-rw-r--r--dom/media/gtest/AudioVerifier.h4
-rw-r--r--dom/media/gtest/GMPTestMonitor.h4
-rw-r--r--dom/media/gtest/MockCubeb.cpp301
-rw-r--r--dom/media/gtest/MockCubeb.h184
-rw-r--r--dom/media/gtest/TestAudioCallbackDriver.cpp457
-rw-r--r--dom/media/gtest/TestAudioInputProcessing.cpp175
-rw-r--r--dom/media/gtest/TestAudioInputSource.cpp114
-rw-r--r--dom/media/gtest/TestAudioRingBuffer.cpp50
-rw-r--r--dom/media/gtest/TestAudioTrackGraph.cpp740
-rw-r--r--dom/media/gtest/TestCDMStorage.cpp24
-rw-r--r--dom/media/gtest/TestDeviceInputTrack.cpp18
-rw-r--r--dom/media/gtest/TestMP4Demuxer.cpp4
-rw-r--r--dom/media/gtest/TestMediaDataEncoder.cpp2
-rw-r--r--dom/media/gtest/TestWebMWriter.cpp6
-rw-r--r--dom/media/ipc/MFCDMChild.cpp7
-rw-r--r--dom/media/ipc/MFCDMChild.h2
-rw-r--r--dom/media/ipc/RDDChild.cpp9
-rw-r--r--dom/media/ipc/RDDParent.cpp16
-rw-r--r--dom/media/ipc/RemoteImageHolder.h12
-rw-r--r--dom/media/mediacapabilities/KeyValueStorage.cpp2
-rw-r--r--dom/media/mediacontrol/ContentMediaController.cpp31
-rw-r--r--dom/media/mediacontrol/ContentMediaController.h3
-rw-r--r--dom/media/mediacontrol/MediaControlKeyManager.cpp7
-rw-r--r--dom/media/mediacontrol/MediaControlService.cpp1
-rw-r--r--dom/media/mediacontrol/MediaPlaybackStatus.cpp64
-rw-r--r--dom/media/mediacontrol/MediaPlaybackStatus.h16
-rw-r--r--dom/media/mediacontrol/MediaStatusManager.cpp29
-rw-r--r--dom/media/mediacontrol/MediaStatusManager.h17
-rw-r--r--dom/media/mediacontrol/tests/browser/browser_media_control_position_state.js243
-rw-r--r--dom/media/mediacontrol/tests/browser/head.js81
-rw-r--r--dom/media/mediasource/MediaSource.cpp3
-rw-r--r--dom/media/mediasource/MediaSourceDemuxer.cpp20
-rw-r--r--dom/media/mediasource/MediaSourceDemuxer.h11
-rw-r--r--dom/media/metrics.yaml24
-rw-r--r--dom/media/moz.build6
-rw-r--r--dom/media/ogg/OggDemuxer.cpp4
-rw-r--r--dom/media/platforms/EncoderConfig.cpp42
-rw-r--r--dom/media/platforms/EncoderConfig.h2
-rw-r--r--dom/media/platforms/MediaCodecsSupport.cpp9
-rw-r--r--dom/media/platforms/MediaCodecsSupport.h2
-rw-r--r--dom/media/platforms/PDMFactory.cpp62
-rw-r--r--dom/media/platforms/PDMFactory.h5
-rw-r--r--dom/media/platforms/SimpleMap.h86
-rw-r--r--dom/media/platforms/agnostic/AOMDecoder.cpp2
-rw-r--r--dom/media/platforms/agnostic/DAV1DDecoder.cpp2
-rw-r--r--dom/media/platforms/agnostic/TheoraDecoder.cpp2
-rw-r--r--dom/media/platforms/agnostic/VPXDecoder.cpp2
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp32
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h2
-rw-r--r--dom/media/platforms/android/AndroidDecoderModule.cpp68
-rw-r--r--dom/media/platforms/android/AndroidDecoderModule.h15
-rw-r--r--dom/media/platforms/android/AndroidEncoderModule.cpp3
-rw-r--r--dom/media/platforms/android/RemoteDataDecoder.cpp4
-rw-r--r--dom/media/platforms/apple/AppleEncoderModule.cpp3
-rw-r--r--dom/media/platforms/apple/AppleVTDecoder.cpp2
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp23
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp11
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp21
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataEncoder.h2
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp7
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp121
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.h4
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp8
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegUtils.h30
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp92
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h3
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp4
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h2
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/COPYING.LGPLv2.1504
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avcodec.h3121
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avdct.h85
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/bsf.h335
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec.h382
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_desc.h134
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_id.h676
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_par.h250
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/defs.h344
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/packet.h871
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/vdpau.h168
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version.h45
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version_major.h52
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/attributes.h173
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avconfig.h6
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avutil.h363
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/buffer.h324
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/channel_layout.h804
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/common.h587
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/cpu.h153
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/dict.h259
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/error.h158
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/frame.h1112
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext.h594
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_drm.h169
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_vaapi.h117
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/intfloat.h73
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/log.h388
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/macros.h87
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mathematics.h305
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mem.h611
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/pixfmt.h920
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/rational.h226
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/samplefmt.h275
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/version.h121
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg61/moz.build47
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp48
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/moz.build2
-rw-r--r--dom/media/platforms/ffmpeg/moz.build1
-rw-r--r--dom/media/platforms/omx/OmxDataDecoder.cpp6
-rw-r--r--dom/media/platforms/omx/OmxDataDecoder.h4
-rw-r--r--dom/media/platforms/wmf/MFTEncoder.cpp22
-rw-r--r--dom/media/platforms/wmf/WMF.h43
-rw-r--r--dom/media/platforms/wmf/WMFDataEncoderUtils.cpp221
-rw-r--r--dom/media/platforms/wmf/WMFDataEncoderUtils.h140
-rw-r--r--dom/media/platforms/wmf/WMFDecoderModule.cpp34
-rw-r--r--dom/media/platforms/wmf/WMFDecoderModule.h15
-rw-r--r--dom/media/platforms/wmf/WMFEncoderModule.cpp3
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataEncoder.cpp399
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataEncoder.h302
-rw-r--r--dom/media/platforms/wmf/WMFVideoMFTManager.cpp6
-rw-r--r--dom/media/platforms/wmf/moz.build2
-rw-r--r--dom/media/test/complete_length_worker.js80
-rw-r--r--dom/media/test/mochitest.toml4
-rw-r--r--dom/media/test/rdd_process_xpcom/RddProcessTest.cpp3
-rw-r--r--dom/media/test/reftest/reftest.list2
-rw-r--r--dom/media/test/test_complete_length.html49
-rw-r--r--dom/media/tests/crashtests/crashtests.list4
-rw-r--r--dom/media/utils/PerformanceRecorder.cpp26
-rw-r--r--dom/media/utils/PerformanceRecorder.h56
-rw-r--r--dom/media/utils/TelemetryProbesReporter.cpp28
-rw-r--r--dom/media/utils/TelemetryProbesReporter.h4
-rw-r--r--dom/media/webaudio/FFTBlock.cpp3
-rw-r--r--dom/media/webcodecs/DecoderAgent.cpp8
-rw-r--r--dom/media/webcodecs/DecoderTemplate.cpp102
-rw-r--r--dom/media/webcodecs/DecoderTemplate.h39
-rw-r--r--dom/media/webcodecs/EncoderTemplate.cpp195
-rw-r--r--dom/media/webcodecs/EncoderTemplate.h17
-rw-r--r--dom/media/webcodecs/VideoEncoder.cpp15
-rw-r--r--dom/media/webcodecs/crashtests/1889831.html21
-rw-r--r--dom/media/webcodecs/crashtests/crashtests.list2
-rw-r--r--dom/media/webrtc/MediaEnginePrefs.h2
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.cpp125
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.h36
-rw-r--r--dom/media/webrtc/jsapi/RTCTransformEventRunnable.cpp4
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoConduit.cpp14
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp5
-rw-r--r--dom/media/webrtc/metrics.yaml81
-rw-r--r--dom/media/webrtc/sdp/rsdparsa_capi/src/lib.rs2
-rw-r--r--dom/media/webrtc/sdp/rsdparsa_capi/src/types.rs1
-rw-r--r--dom/media/webrtc/tests/mochitests/head.js2
-rw-r--r--dom/media/webrtc/tests/mochitests/iceTestUtils.js21
-rw-r--r--dom/media/webrtc/tests/mochitests/test_peerConnection_glean.html182
-rw-r--r--dom/media/webrtc/third_party_build/default_config_env20
-rw-r--r--dom/media/webrtc/third_party_build/elm_rebase.sh25
-rw-r--r--dom/media/webrtc/third_party_build/fetch_github_repo.py11
-rw-r--r--dom/media/webrtc/third_party_build/loop-ff.sh6
-rw-r--r--dom/media/webrtc/third_party_build/prep_repo.sh48
-rw-r--r--dom/media/webrtc/third_party_build/verify_vendoring.sh29
-rw-r--r--dom/media/webrtc/transport/test/moz.build30
-rw-r--r--dom/media/webrtc/transport/test_nr_socket.cpp38
-rw-r--r--dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs.c161
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/log/r_log.c20
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry.c2
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_local.c36
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/registry/registrycb.c12
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_assoc.c18
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_crc32.c2
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_data.c4
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_list.c6
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_memory.c10
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_replace.c2
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/p_buf.c8
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/util.c83
-rw-r--r--dom/media/webrtc/transport/third_party/nrappkit/src/util/util.h5
-rw-r--r--dom/media/webrtc/transport/transportlayerdtls.cpp103
-rw-r--r--dom/media/webrtc/transport/transportlayerdtls.h2
-rw-r--r--dom/media/webvtt/vtt.sys.mjs62
235 files changed, 20787 insertions, 2367 deletions
diff --git a/dom/media/AsyncLogger.h b/dom/media/AsyncLogger.h
index adc4101382..2dedabea7c 100644
--- a/dom/media/AsyncLogger.h
+++ b/dom/media/AsyncLogger.h
@@ -17,7 +17,7 @@
#include "mozilla/Sprintf.h"
#include "mozilla/TimeStamp.h"
#include "GeckoProfiler.h"
-#include "MPSCQueue.h"
+#include "mozilla/dom/MPSCQueue.h"
#if defined(_WIN32)
# include <process.h>
diff --git a/dom/media/AudioInputSource.cpp b/dom/media/AudioInputSource.cpp
index 1ba2d81938..ef471530d6 100644
--- a/dom/media/AudioInputSource.cpp
+++ b/dom/media/AudioInputSource.cpp
@@ -55,46 +55,63 @@ AudioInputSource::AudioInputSource(RefPtr<EventListener>&& aListener,
mSandboxed(CubebUtils::SandboxEnabled()),
mAudioThreadId(ProfilerThreadId{}),
mEventListener(std::move(aListener)),
- mTaskThread(CUBEB_TASK_THREAD),
+ mTaskThread(CubebUtils::GetCubebOperationThread()),
mDriftCorrector(static_cast<uint32_t>(aSourceRate),
static_cast<uint32_t>(aTargetRate), aPrincipalHandle) {
MOZ_ASSERT(mChannelCount > 0);
MOZ_ASSERT(mEventListener);
}
-void AudioInputSource::Start() {
+void AudioInputSource::Init() {
// This is called on MediaTrackGraph's graph thread, which can be the cubeb
// stream's callback thread. Running cubeb operations within cubeb stream
// callback thread can cause the deadlock on Linux, so we dispatch those
// operations to the task thread.
MOZ_ASSERT(mTaskThread);
- LOG("AudioInputSource %p, start", this);
+ LOG("AudioInputSource %p, init", this);
MOZ_ALWAYS_SUCCEEDS(mTaskThread->Dispatch(
- NS_NewRunnableFunction(__func__, [self = RefPtr(this)]() mutable {
- self->mStream = CubebInputStream::Create(
- self->mDeviceId, self->mChannelCount,
- static_cast<uint32_t>(self->mRate), self->mIsVoice, self.get());
- if (!self->mStream) {
+ NS_NewRunnableFunction(__func__, [this, self = RefPtr(this)]() mutable {
+ mStream = CubebInputStream::Create(mDeviceId, mChannelCount,
+ static_cast<uint32_t>(mRate),
+ mIsVoice, this);
+ if (!mStream) {
LOGE("AudioInputSource %p, cannot create an audio input stream!",
self.get());
return;
}
+ })));
+}
- if (uint32_t latency = 0;
- self->mStream->Latency(&latency) == CUBEB_OK) {
- Data data(LatencyChangeData{media::TimeUnit(latency, self->mRate)});
- if (self->mSPSCQueue.Enqueue(data) == 0) {
+void AudioInputSource::Start() {
+ // This is called on MediaTrackGraph's graph thread, which can be the cubeb
+ // stream's callback thread. Running cubeb operations within cubeb stream
+ // callback thread can cause the deadlock on Linux, so we dispatch those
+ // operations to the task thread.
+ MOZ_ASSERT(mTaskThread);
+
+ LOG("AudioInputSource %p, start", this);
+ MOZ_ALWAYS_SUCCEEDS(mTaskThread->Dispatch(
+ NS_NewRunnableFunction(__func__, [this, self = RefPtr(this)]() mutable {
+ if (!mStream) {
+ LOGE("AudioInputSource %p, no audio input stream!", self.get());
+ return;
+ }
+
+ if (uint32_t latency = 0; mStream->Latency(&latency) == CUBEB_OK) {
+ Data data(LatencyChangeData{media::TimeUnit(latency, mRate)});
+ if (mSPSCQueue.Enqueue(data) == 0) {
LOGE("AudioInputSource %p, failed to enqueue latency change",
self.get());
}
}
- if (int r = self->mStream->Start(); r != CUBEB_OK) {
+ if (int r = mStream->Start(); r != CUBEB_OK) {
LOGE(
"AudioInputSource %p, cannot start its audio input stream! The "
"stream is destroyed directly!",
self.get());
- self->mStream = nullptr;
+ mStream = nullptr;
+ mConfiguredProcessingParams = CUBEB_INPUT_PROCESSING_PARAM_NONE;
}
})));
}
@@ -108,20 +125,72 @@ void AudioInputSource::Stop() {
LOG("AudioInputSource %p, stop", this);
MOZ_ALWAYS_SUCCEEDS(mTaskThread->Dispatch(
- NS_NewRunnableFunction(__func__, [self = RefPtr(this)]() mutable {
- if (!self->mStream) {
+ NS_NewRunnableFunction(__func__, [this, self = RefPtr(this)]() mutable {
+ if (!mStream) {
LOGE("AudioInputSource %p, has no audio input stream to stop!",
self.get());
return;
}
- if (int r = self->mStream->Stop(); r != CUBEB_OK) {
+ if (int r = mStream->Stop(); r != CUBEB_OK) {
LOGE(
"AudioInputSource %p, cannot stop its audio input stream! The "
"stream is going to be destroyed forcefully",
self.get());
}
- self->mStream = nullptr;
+ mStream = nullptr;
+ mConfiguredProcessingParams = CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ })));
+}
+
+auto AudioInputSource::SetRequestedProcessingParams(
+ cubeb_input_processing_params aParams)
+ -> RefPtr<SetRequestedProcessingParamsPromise> {
+ // This is called on MediaTrackGraph's graph thread, which can be the cubeb
+ // stream's callback thread. Running cubeb operations within cubeb stream
+ // callback thread can cause the deadlock on Linux, so we dispatch those
+ // operations to the task thread.
+ MOZ_ASSERT(mTaskThread);
+
+ LOG("AudioInputSource %p, SetProcessingParams(%s)", this,
+ CubebUtils::ProcessingParamsToString(aParams).get());
+ using ProcessingPromise = SetRequestedProcessingParamsPromise;
+ MozPromiseHolder<ProcessingPromise> holder;
+ RefPtr<ProcessingPromise> p = holder.Ensure(__func__);
+ MOZ_ALWAYS_SUCCEEDS(mTaskThread->Dispatch(NS_NewRunnableFunction(
+ __func__, [this, self = RefPtr(this), holder = std::move(holder),
+ aParams]() mutable {
+ if (!mStream) {
+ LOGE(
+ "AudioInputSource %p, has no audio input stream to set "
+ "processing params on!",
+ this);
+ holder.Reject(CUBEB_ERROR,
+ "AudioInputSource::SetProcessingParams no stream");
+ return;
+ }
+ cubeb_input_processing_params supportedParams;
+ auto handle = CubebUtils::GetCubeb();
+ int r = cubeb_get_supported_input_processing_params(handle->Context(),
+ &supportedParams);
+ if (r != CUBEB_OK) {
+ holder.Reject(CUBEB_ERROR_NOT_SUPPORTED,
+ "AudioInputSource::SetProcessingParams");
+ return;
+ }
+ aParams &= supportedParams;
+ if (aParams == mConfiguredProcessingParams) {
+ holder.Resolve(aParams, "AudioInputSource::SetProcessingParams");
+ return;
+ }
+ mConfiguredProcessingParams = aParams;
+ r = mStream->SetProcessingParams(aParams);
+ if (r == CUBEB_OK) {
+ holder.Resolve(aParams, "AudioInputSource::SetProcessingParams");
+ return;
+ }
+ holder.Reject(r, "AudioInputSource::SetProcessingParams");
})));
+ return p;
}
AudioSegment AudioInputSource::GetAudioSegment(TrackTime aDuration,
diff --git a/dom/media/AudioInputSource.h b/dom/media/AudioInputSource.h
index b44a3ae43a..8f69165719 100644
--- a/dom/media/AudioInputSource.h
+++ b/dom/media/AudioInputSource.h
@@ -12,6 +12,7 @@
#include "CubebInputStream.h"
#include "CubebUtils.h"
#include "TimeUnits.h"
+#include "mozilla/MozPromise.h"
#include "mozilla/ProfilerUtils.h"
#include "mozilla/RefPtr.h"
#include "mozilla/SPSCQueue.h"
@@ -54,10 +55,17 @@ class AudioInputSource : public CubebInputStream::Listener {
// The following functions should always be called in the same thread: They
// are always run on MediaTrackGraph's graph thread.
+ // Sets up mStream.
+ void Init();
// Starts producing audio data.
void Start();
// Stops producing audio data.
void Stop();
+ // Set the params to be applied in the platform for this source.
+ using SetRequestedProcessingParamsPromise =
+ MozPromise<cubeb_input_processing_params, int, true>;
+ RefPtr<SetRequestedProcessingParamsPromise> SetRequestedProcessingParams(
+ cubeb_input_processing_params aParams);
// Returns the AudioSegment with aDuration of data inside.
// The graph thread can change behind the scene, e.g., cubeb stream reinit due
// to default output device changed). When this happens, we need to notify
@@ -118,6 +126,11 @@ class AudioInputSource : public CubebInputStream::Listener {
// An input-only cubeb stream operated within mTaskThread.
UniquePtr<CubebInputStream> mStream;
+ // The params configured on the cubeb stream, after filtering away unsupported
+ // params. mTaskThread only.
+ cubeb_input_processing_params mConfiguredProcessingParams =
+ CUBEB_INPUT_PROCESSING_PARAM_NONE;
+
struct Empty {};
struct LatencyChangeData {
diff --git a/dom/media/AudioRingBuffer.cpp b/dom/media/AudioRingBuffer.cpp
index 475de653b8..cbeb64a2f6 100644
--- a/dom/media/AudioRingBuffer.cpp
+++ b/dom/media/AudioRingBuffer.cpp
@@ -270,10 +270,13 @@ class RingBuffer final {
* Re-allocates memory if a larger buffer is requested than what is already
* allocated.
*/
- bool SetLengthBytes(uint32_t aLengthBytes) {
+ bool EnsureLengthBytes(uint32_t aLengthBytes) {
MOZ_ASSERT(aLengthBytes % sizeof(T) == 0,
"Length in bytes is not a whole number of samples");
+ if (mMemoryBuffer.Length() >= aLengthBytes) {
+ return true;
+ }
uint32_t lengthSamples = aLengthBytes / sizeof(T);
uint32_t oldLengthSamples = Capacity();
uint32_t availableRead = AvailableRead();
@@ -530,14 +533,17 @@ uint32_t AudioRingBuffer::Clear() {
return mPtr->mFloatRingBuffer->Clear();
}
-bool AudioRingBuffer::SetLengthBytes(uint32_t aLengthBytes) {
+bool AudioRingBuffer::EnsureLengthBytes(uint32_t aLengthBytes) {
if (mPtr->mFloatRingBuffer) {
- return mPtr->mFloatRingBuffer->SetLengthBytes(aLengthBytes);
+ return mPtr->mFloatRingBuffer->EnsureLengthBytes(aLengthBytes);
}
if (mPtr->mIntRingBuffer) {
- return mPtr->mIntRingBuffer->SetLengthBytes(aLengthBytes);
+ return mPtr->mIntRingBuffer->EnsureLengthBytes(aLengthBytes);
}
if (mPtr->mBackingBuffer) {
+ if (mPtr->mBackingBuffer->Length() >= aLengthBytes) {
+ return true;
+ }
return mPtr->mBackingBuffer->SetLength(aLengthBytes);
}
MOZ_ASSERT_UNREACHABLE("Unexpected");
diff --git a/dom/media/AudioRingBuffer.h b/dom/media/AudioRingBuffer.h
index 892a7cd408..a08211ba15 100644
--- a/dom/media/AudioRingBuffer.h
+++ b/dom/media/AudioRingBuffer.h
@@ -93,12 +93,12 @@ class AudioRingBuffer final {
uint32_t Clear();
/**
- * Set the length of the ring buffer in bytes. Must be divisible by the sample
- * size. Will not deallocate memory if the underlying buffer is large enough.
- * Returns false if setting the length requires allocating memory and the
- * allocation fails.
+ * Increase the ring buffer size if necessary to at least the specified length
+ * in bytes. Must be divisible by the sample size.
+ * Will not deallocate memory if the underlying buffer is large enough.
+ * Returns false if memory allocation is required and fails.
*/
- bool SetLengthBytes(uint32_t aLengthBytes);
+ bool EnsureLengthBytes(uint32_t aLengthBytes);
/**
* Return the number of samples this buffer can hold.
diff --git a/dom/media/AudioStream.cpp b/dom/media/AudioStream.cpp
index 7d80a3738e..bb0248d942 100644
--- a/dom/media/AudioStream.cpp
+++ b/dom/media/AudioStream.cpp
@@ -316,7 +316,8 @@ void AudioStream::SetStreamName(const nsAString& aStreamName) {
}
MonitorAutoLock mon(mMonitor);
- if (InvokeCubeb(cubeb_stream_set_name, aRawStreamName.get()) != CUBEB_OK) {
+ int r = InvokeCubeb(cubeb_stream_set_name, aRawStreamName.get());
+ if (r && r != CUBEB_ERROR_NOT_SUPPORTED) {
LOGE("Could not set cubeb stream name.");
}
}
diff --git a/dom/media/AudioStream.h b/dom/media/AudioStream.h
index 11a61b9fe7..708278314d 100644
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -333,7 +333,7 @@ class AudioStream final {
const AudioConfig::ChannelLayout::ChannelMap mChannelMap;
// The monitor is held to protect all access to member variables below.
- Monitor mMonitor MOZ_UNANNOTATED;
+ Monitor mMonitor;
const uint32_t mOutChannels;
diff --git a/dom/media/ChannelMediaResource.cpp b/dom/media/ChannelMediaResource.cpp
index aefedb37d1..15f048d735 100644
--- a/dom/media/ChannelMediaResource.cpp
+++ b/dom/media/ChannelMediaResource.cpp
@@ -227,7 +227,7 @@ nsresult ChannelMediaResource::OnStartRequest(nsIRequest* aRequest,
// at this stage.
// For now, tell the decoder that the stream is infinite.
if (rangeTotal != -1) {
- contentLength = std::max(contentLength, rangeTotal);
+ length = std::max(contentLength, rangeTotal);
}
}
acceptsRanges = gotRangeHeader;
@@ -240,11 +240,9 @@ nsresult ChannelMediaResource::OnStartRequest(nsIRequest* aRequest,
// to assume seeking doesn't work.
acceptsRanges = false;
}
- }
- if (aRequestOffset == 0 && contentLength >= 0 &&
- (responseStatus == HTTP_OK_CODE ||
- responseStatus == HTTP_PARTIAL_RESPONSE_CODE)) {
- length = contentLength;
+ if (contentLength >= 0) {
+ length = contentLength;
+ }
}
// XXX we probably should examine the Content-Range header in case
// the server gave us a range which is not quite what we asked for
@@ -488,9 +486,9 @@ int64_t ChannelMediaResource::CalculateStreamLength() const {
bool gotRangeHeader = NS_SUCCEEDED(
ParseContentRangeHeader(hc, rangeStart, rangeEnd, rangeTotal));
if (gotRangeHeader && rangeTotal != -1) {
- contentLength = std::max(contentLength, rangeTotal);
+ return std::max(contentLength, rangeTotal);
}
- return contentLength;
+ return -1;
}
nsresult ChannelMediaResource::Open(nsIStreamListener** aStreamListener) {
diff --git a/dom/media/CubebInputStream.cpp b/dom/media/CubebInputStream.cpp
index 38b66315f9..55677d2dc1 100644
--- a/dom/media/CubebInputStream.cpp
+++ b/dom/media/CubebInputStream.cpp
@@ -129,7 +129,7 @@ CubebInputStream::CubebInputStream(
void CubebInputStream::Init() {
// cubeb_stream_register_device_changed_callback is only supported on macOS
- // platform and MockCubebfor now.
+ // platform and MockCubeb for now.
InvokeCubebWithLog(cubeb_stream_register_device_changed_callback,
CubebInputStream::DeviceChangedCallback_s);
}
@@ -138,6 +138,11 @@ int CubebInputStream::Start() { return InvokeCubebWithLog(cubeb_stream_start); }
int CubebInputStream::Stop() { return InvokeCubebWithLog(cubeb_stream_stop); }
+int CubebInputStream::SetProcessingParams(
+ cubeb_input_processing_params aParams) {
+ return InvokeCubebWithLog(cubeb_stream_set_input_processing_params, aParams);
+}
+
int CubebInputStream::Latency(uint32_t* aLatencyFrames) {
return InvokeCubebWithLog(cubeb_stream_get_input_latency, aLatencyFrames);
}
diff --git a/dom/media/CubebInputStream.h b/dom/media/CubebInputStream.h
index a35924a976..107d0f4f6e 100644
--- a/dom/media/CubebInputStream.h
+++ b/dom/media/CubebInputStream.h
@@ -51,6 +51,9 @@ class CubebInputStream final {
// Stop producing audio data.
int Stop();
+ // Apply the given processing params.
+ int SetProcessingParams(cubeb_input_processing_params aParams);
+
// Gets the approximate stream latency in frames.
int Latency(uint32_t* aLatencyFrames);
diff --git a/dom/media/CubebUtils.cpp b/dom/media/CubebUtils.cpp
index dbdab3a56a..3c93e65095 100644
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -14,6 +14,7 @@
#include "mozilla/Logging.h"
#include "mozilla/Preferences.h"
#include "mozilla/Components.h"
+#include "mozilla/SharedThreadPool.h"
#include "mozilla/Sprintf.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/StaticPtr.h"
@@ -186,6 +187,43 @@ static const uint32_t CUBEB_NORMAL_LATENCY_MS = 100;
static const uint32_t CUBEB_NORMAL_LATENCY_FRAMES = 1024;
namespace CubebUtils {
+nsCString ProcessingParamsToString(cubeb_input_processing_params aParams) {
+ if (aParams == CUBEB_INPUT_PROCESSING_PARAM_NONE) {
+ return "NONE"_ns;
+ }
+ nsCString str;
+ for (auto p : {CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL,
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION,
+ CUBEB_INPUT_PROCESSING_PARAM_VOICE_ISOLATION}) {
+ if (!(aParams & p)) {
+ continue;
+ }
+ if (!str.IsEmpty()) {
+ str.Append(" | ");
+ }
+ str.Append([&p] {
+ switch (p) {
+ case CUBEB_INPUT_PROCESSING_PARAM_NONE:
+ // Handled above.
+ MOZ_CRASH(
+ "NONE is the absence of a param, thus not for logging here.");
+ case CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION:
+ return "AEC";
+ case CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL:
+ return "AGC";
+ case CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION:
+ return "NS";
+ case CUBEB_INPUT_PROCESSING_PARAM_VOICE_ISOLATION:
+ return "VOICE";
+ }
+ MOZ_ASSERT_UNREACHABLE("Unexpected input processing param");
+ return "<Unknown input processing param>";
+ }());
+ }
+ return str;
+}
+
RefPtr<CubebHandle> GetCubebUnlocked();
void GetPrefAndSetString(const char* aPref, StaticAutoPtr<char>& aStorage) {
@@ -765,6 +803,13 @@ bool SandboxEnabled() {
#endif
}
+already_AddRefed<SharedThreadPool> GetCubebOperationThread() {
+ RefPtr<SharedThreadPool> pool = SharedThreadPool::Get("CubebOperation"_ns, 1);
+ const uint32_t kIdleThreadTimeoutMs = 2000;
+ pool->SetIdleThreadTimeout(PR_MillisecondsToInterval(kIdleThreadTimeoutMs));
+ return pool.forget();
+}
+
uint32_t MaxNumberOfChannels() {
RefPtr<CubebHandle> handle = GetCubeb();
uint32_t maxNumberOfChannels;
diff --git a/dom/media/CubebUtils.h b/dom/media/CubebUtils.h
index c05c8d2449..a59d72bbd6 100644
--- a/dom/media/CubebUtils.h
+++ b/dom/media/CubebUtils.h
@@ -16,10 +16,12 @@
class AudioDeviceInfo;
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(cubeb_stream_prefs)
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(cubeb_input_processing_params)
namespace mozilla {
class CallbackThreadRegistry;
+class SharedThreadPool;
namespace CubebUtils {
@@ -35,6 +37,8 @@ struct ToCubebFormat<AUDIO_FORMAT_S16> {
static const cubeb_sample_format value = CUBEB_SAMPLE_S16NE;
};
+nsCString ProcessingParamsToString(cubeb_input_processing_params aParams);
+
class CubebHandle {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CubebHandle)
@@ -62,6 +66,11 @@ void ShutdownLibrary();
bool SandboxEnabled();
+// A thread pool containing only one thread to execute the cubeb operations. We
+// should always use this thread to init, destroy, start, or stop cubeb streams,
+// to avoid data racing or deadlock issues across platforms.
+already_AddRefed<SharedThreadPool> GetCubebOperationThread();
+
// Returns the maximum number of channels supported by the audio hardware.
uint32_t MaxNumberOfChannels();
diff --git a/dom/media/DeviceInputTrack.cpp b/dom/media/DeviceInputTrack.cpp
index 5d69f7107a..3bf2e87558 100644
--- a/dom/media/DeviceInputTrack.cpp
+++ b/dom/media/DeviceInputTrack.cpp
@@ -316,6 +316,20 @@ bool DeviceInputTrack::HasVoiceInput() const {
return false;
}
+cubeb_input_processing_params DeviceInputTrack::RequestedProcessingParams()
+ const {
+ AssertOnGraphThreadOrNotRunning();
+ Maybe<cubeb_input_processing_params> params;
+ for (const auto& listener : mListeners) {
+ if (params) {
+ *params &= listener->RequestedInputProcessingParams(mGraph);
+ } else {
+ params = Some(listener->RequestedInputProcessingParams(mGraph));
+ }
+ }
+ return params.valueOr(CUBEB_INPUT_PROCESSING_PARAM_NONE);
+}
+
void DeviceInputTrack::DeviceChanged(MediaTrackGraph* aGraph) const {
AssertOnGraphThreadOrNotRunning();
MOZ_ASSERT(aGraph == mGraph,
@@ -326,6 +340,16 @@ void DeviceInputTrack::DeviceChanged(MediaTrackGraph* aGraph) const {
}
}
+void DeviceInputTrack::NotifySetRequestedProcessingParamsResult(
+ MediaTrackGraph* aGraph, cubeb_input_processing_params aRequestedParams,
+ const Result<cubeb_input_processing_params, int>& aResult) {
+ AssertOnGraphThread();
+ for (const auto& listener : mListeners) {
+ listener->NotifySetRequestedInputProcessingParamsResult(
+ mGraph, aRequestedParams, aResult);
+ }
+}
+
void DeviceInputTrack::ReevaluateInputDevice() {
MOZ_ASSERT(NS_IsMainThread());
QueueControlMessageWithNoShutdown([self = RefPtr{this}, this] {
@@ -491,6 +515,8 @@ void NonNativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
// GraphRunner keeps the same thread.
MOZ_ASSERT(!HasGraphThreadChanged());
+ ReevaluateProcessingParams();
+
AudioSegment data = mAudioSource->GetAudioSegment(delta, consumer);
MOZ_ASSERT(data.GetDuration() == delta);
GetData<AudioSegment>()->AppendFrom(&data);
@@ -512,6 +538,8 @@ void NonNativeInputTrack::StartAudio(
mGraphThreadId = std::this_thread::get_id();
#endif
mAudioSource = std::move(aAudioInputSource);
+ mAudioSource->Init();
+ ReevaluateProcessingParams();
mAudioSource->Start();
}
@@ -571,6 +599,35 @@ AudioInputSource::Id NonNativeInputTrack::GenerateSourceId() {
return mSourceIdNumber++;
}
+void NonNativeInputTrack::ReevaluateProcessingParams() {
+ AssertOnGraphThread();
+ MOZ_ASSERT(mAudioSource);
+ auto params = RequestedProcessingParams();
+ if (mRequestedProcessingParams == params) {
+ return;
+ }
+ mRequestedProcessingParams = params;
+ using Promise = AudioInputSource::SetRequestedProcessingParamsPromise;
+ mAudioSource->SetRequestedProcessingParams(params)->Then(
+ GetMainThreadSerialEventTarget(), __func__,
+ [this, self = RefPtr(this),
+ params](Promise::ResolveOrRejectValue&& aValue) {
+ if (IsDestroyed()) {
+ return;
+ }
+ auto result = ([&]() -> Result<cubeb_input_processing_params, int> {
+ if (aValue.IsResolve()) {
+ return aValue.ResolveValue();
+ }
+ return Err(aValue.RejectValue());
+ })();
+ QueueControlMessageWithNoShutdown(
+ [this, self = RefPtr(this), params, result = std::move(result)] {
+ NotifySetRequestedProcessingParamsResult(Graph(), params, result);
+ });
+ });
+}
+
#ifdef DEBUG
bool NonNativeInputTrack::HasGraphThreadChanged() {
AssertOnGraphThread();
diff --git a/dom/media/DeviceInputTrack.h b/dom/media/DeviceInputTrack.h
index 0a92ded13c..8be53f415d 100644
--- a/dom/media/DeviceInputTrack.h
+++ b/dom/media/DeviceInputTrack.h
@@ -172,11 +172,19 @@ class DeviceInputTrack : public ProcessedMediaTrack {
// Main thread API:
const nsTArray<RefPtr<DeviceInputConsumerTrack>>& GetConsumerTracks() const;
+ // Handle the result of an async operation to set processing params on a cubeb
+ // stream. If the operation failed, signal this to listeners and then disable
+ // processing. If the operation succeeded, directly signal this to listeners.
+ void NotifySetRequestedProcessingParamsResult(
+ MediaTrackGraph* aGraph, cubeb_input_processing_params aRequestedParams,
+ const Result<cubeb_input_processing_params, int>& aResult);
// Graph thread APIs:
// Query audio settings from its users.
uint32_t MaxRequestedInputChannels() const;
bool HasVoiceInput() const;
+ // Query for the aggregate processing params from all users.
+ cubeb_input_processing_params RequestedProcessingParams() const;
// Deliver notification to its users.
void DeviceChanged(MediaTrackGraph* aGraph) const;
@@ -265,6 +273,7 @@ class NonNativeInputTrack final : public DeviceInputTrack {
void NotifyDeviceChanged(AudioInputSource::Id aSourceId);
void NotifyInputStopped(AudioInputSource::Id aSourceId);
AudioInputSource::Id GenerateSourceId();
+ void ReevaluateProcessingParams();
private:
~NonNativeInputTrack() = default;
@@ -272,6 +281,8 @@ class NonNativeInputTrack final : public DeviceInputTrack {
// Graph thread only.
RefPtr<AudioInputSource> mAudioSource;
AudioInputSource::Id mSourceIdNumber;
+ cubeb_input_processing_params mRequestedProcessingParams =
+ CUBEB_INPUT_PROCESSING_PARAM_NONE;
#ifdef DEBUG
// Graph thread only.
diff --git a/dom/media/EncoderTraits.cpp b/dom/media/EncoderTraits.cpp
index ba6d43f826..7abc8a0e14 100644
--- a/dom/media/EncoderTraits.cpp
+++ b/dom/media/EncoderTraits.cpp
@@ -14,4 +14,4 @@ bool Supports(const EncoderConfig& aConfig) {
RefPtr<PEMFactory> pem = new PEMFactory();
return pem->Supports(aConfig);
}
-}
+} // namespace mozilla::EncodeTraits
diff --git a/dom/media/ExternalEngineStateMachine.h b/dom/media/ExternalEngineStateMachine.h
index 83250b0f3c..e020695bcc 100644
--- a/dom/media/ExternalEngineStateMachine.h
+++ b/dom/media/ExternalEngineStateMachine.h
@@ -154,12 +154,12 @@ class ExternalEngineStateMachine final
mSeekJob = SeekJob();
mSeekJob.mTarget = Some(aTarget);
}
- void Resolve(const char* aCallSite) {
+ void Resolve(StaticString aCallSite) {
MOZ_ASSERT(mSeekJob.Exists());
mSeekJob.Resolve(aCallSite);
mSeekJob = SeekJob();
}
- void RejectIfExists(const char* aCallSite) {
+ void RejectIfExists(StaticString aCallSite) {
mSeekJob.RejectIfExists(aCallSite);
}
bool IsSeeking() const { return mSeekRequest.Exists(); }
diff --git a/dom/media/GraphDriver.cpp b/dom/media/GraphDriver.cpp
index 744de30bb5..b37aaa010a 100644
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -342,6 +342,13 @@ class AudioCallbackDriver::FallbackWrapper : public GraphInterface {
uint32_t aAlreadyBuffered) override {
MOZ_CRASH("Unexpected NotifyInputData from fallback SystemClockDriver");
}
+ void NotifySetRequestedInputProcessingParamsResult(
+ AudioCallbackDriver* aDriver,
+ cubeb_input_processing_params aRequestedParams,
+ Result<cubeb_input_processing_params, int>&& aResult) override {
+ MOZ_CRASH(
+ "Unexpected processing params result from fallback SystemClockDriver");
+ }
void DeviceChanged() override {
MOZ_CRASH("Unexpected DeviceChanged from fallback SystemClockDriver");
}
@@ -448,20 +455,17 @@ NS_IMPL_ISUPPORTS0(AudioCallbackDriver::FallbackWrapper)
/* static */
already_AddRefed<TaskQueue> AudioCallbackDriver::CreateTaskQueue() {
- RefPtr<SharedThreadPool> pool = CUBEB_TASK_THREAD;
- const uint32_t kIdleThreadTimeoutMs = 2000;
- pool->SetIdleThreadTimeout(PR_MillisecondsToInterval(kIdleThreadTimeoutMs));
-
- RefPtr<TaskQueue> queue =
- TaskQueue::Create(pool.forget(), "AudioCallbackDriver cubeb task queue");
- return queue.forget();
+ return TaskQueue::Create(CubebUtils::GetCubebOperationThread(),
+ "AudioCallbackDriver cubeb task queue")
+ .forget();
}
AudioCallbackDriver::AudioCallbackDriver(
GraphInterface* aGraphInterface, GraphDriver* aPreviousDriver,
uint32_t aSampleRate, uint32_t aOutputChannelCount,
uint32_t aInputChannelCount, CubebUtils::AudioDeviceID aOutputDeviceID,
- CubebUtils::AudioDeviceID aInputDeviceID, AudioInputType aAudioInputType)
+ CubebUtils::AudioDeviceID aInputDeviceID, AudioInputType aAudioInputType,
+ cubeb_input_processing_params aRequestedInputProcessingParams)
: GraphDriver(aGraphInterface, aPreviousDriver, aSampleRate),
mOutputChannelCount(aOutputChannelCount),
mInputChannelCount(aInputChannelCount),
@@ -469,6 +473,7 @@ AudioCallbackDriver::AudioCallbackDriver(
mInputDeviceID(aInputDeviceID),
mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS),
mCubebOperationThread(CreateTaskQueue()),
+ mRequestedInputProcessingParams(aRequestedInputProcessingParams),
mAudioThreadId(ProfilerThreadId{}),
mAudioThreadIdInCb(std::thread::id()),
mFallback("AudioCallbackDriver::mFallback"),
@@ -485,7 +490,12 @@ AudioCallbackDriver::AudioCallbackDriver(
if (aAudioInputType == AudioInputType::Voice &&
StaticPrefs::
media_getusermedia_microphone_prefer_voice_stream_with_processing_enabled()) {
- LOG(LogLevel::Debug, ("VOICE."));
+ LOG(LogLevel::Debug,
+ ("%p: AudioCallbackDriver %p ctor - using VOICE and requesting input "
+ "processing params %s.",
+ Graph(), this,
+ CubebUtils::ProcessingParamsToString(aRequestedInputProcessingParams)
+ .get()));
mInputDevicePreference = CUBEB_DEVICE_PREF_VOICE;
CubebUtils::SetInCommunication(true);
} else {
@@ -670,6 +680,10 @@ void AudioCallbackDriver::Init(const nsCString& aStreamName) {
PanOutputIfNeeded(inputWanted);
#endif
+ if (inputWanted && InputDevicePreference() == AudioInputType::Voice) {
+ SetInputProcessingParams(mRequestedInputProcessingParams);
+ }
+
cubeb_stream_register_device_changed_callback(
mAudioStream, AudioCallbackDriver::DeviceChangedCallback_s);
@@ -1256,6 +1270,11 @@ TimeDuration AudioCallbackDriver::AudioOutputLatency() {
mSampleRate);
}
+bool AudioCallbackDriver::HasFallback() const {
+ MOZ_ASSERT(InIteration());
+ return mFallbackDriverState != FallbackDriverState::None;
+}
+
bool AudioCallbackDriver::OnFallback() const {
MOZ_ASSERT(InIteration());
return mFallbackDriverState == FallbackDriverState::Running;
@@ -1309,6 +1328,9 @@ void AudioCallbackDriver::FallbackToSystemClockDriver() {
void AudioCallbackDriver::FallbackDriverStopped(GraphTime aIterationEnd,
GraphTime aStateComputedTime,
FallbackDriverState aState) {
+ LOG(LogLevel::Debug,
+ ("%p: AudioCallbackDriver %p Fallback driver has stopped.", Graph(),
+ this));
mIterationEnd = aIterationEnd;
mStateComputedTime = aStateComputedTime;
mNextReInitAttempt = TimeStamp();
@@ -1367,6 +1389,107 @@ void AudioCallbackDriver::MaybeStartAudioStream() {
Start();
}
+cubeb_input_processing_params
+AudioCallbackDriver::RequestedInputProcessingParams() const {
+ MOZ_ASSERT(InIteration());
+ return mRequestedInputProcessingParams;
+}
+
+void AudioCallbackDriver::SetRequestedInputProcessingParams(
+ cubeb_input_processing_params aParams) {
+ MOZ_ASSERT(InIteration());
+ if (mRequestedInputProcessingParams == aParams) {
+ return;
+ }
+ LOG(LogLevel::Info,
+ ("AudioCallbackDriver %p, Input processing params %s requested.", this,
+ CubebUtils::ProcessingParamsToString(aParams).get()));
+ mRequestedInputProcessingParams = aParams;
+ MOZ_ALWAYS_SUCCEEDS(mCubebOperationThread->Dispatch(
+ NS_NewRunnableFunction(__func__, [this, self = RefPtr(this), aParams] {
+ SetInputProcessingParams(aParams);
+ })));
+}
+
+void AudioCallbackDriver::SetInputProcessingParams(
+ cubeb_input_processing_params aParams) {
+ MOZ_ASSERT(OnCubebOperationThread());
+ auto requested = aParams;
+ auto result = ([&]() -> Maybe<Result<cubeb_input_processing_params, int>> {
+ // This function decides how to handle the request.
+ // Returning Nothing() does nothing, because either
+ // 1) there is no update since the previous state, or
+ // 2) handling is deferred to a later time.
+ // Returning Some() result will forward that result to
+ // AudioDataListener::OnInputProcessingParamsResult on the callback
+ // thread.
+ if (!mAudioStream) {
+ // No Init yet.
+ LOG(LogLevel::Debug, ("AudioCallbackDriver %p, has no cubeb stream to "
+ "set processing params on!",
+ this));
+ return Nothing();
+ }
+ if (mAudioStreamState == AudioStreamState::None) {
+ // Driver (and cubeb stream) was stopped.
+ return Nothing();
+ }
+ cubeb_input_processing_params supported;
+ auto handle = CubebUtils::GetCubeb();
+ int r = cubeb_get_supported_input_processing_params(handle->Context(),
+ &supported);
+ if (r != CUBEB_OK) {
+ LOG(LogLevel::Debug,
+ ("AudioCallbackDriver %p, no supported processing params", this));
+ return Some(Err(CUBEB_ERROR_NOT_SUPPORTED));
+ }
+ aParams &= supported;
+ LOG(LogLevel::Debug,
+ ("AudioCallbackDriver %p, requested processing params %s reduced to %s "
+ "by supported params %s",
+ this, CubebUtils::ProcessingParamsToString(requested).get(),
+ CubebUtils::ProcessingParamsToString(aParams).get(),
+ CubebUtils::ProcessingParamsToString(supported).get()));
+ if (aParams == mConfiguredInputProcessingParams) {
+ LOG(LogLevel::Debug,
+ ("AudioCallbackDriver %p, no change in processing params %s. Not "
+ "attempting reconfiguration.",
+ this, CubebUtils::ProcessingParamsToString(aParams).get()));
+ return Some(aParams);
+ }
+ mConfiguredInputProcessingParams = aParams;
+ r = cubeb_stream_set_input_processing_params(mAudioStream, aParams);
+ if (r == CUBEB_OK) {
+ LOG(LogLevel::Info,
+ ("AudioCallbackDriver %p, input processing params set to %s", this,
+ CubebUtils::ProcessingParamsToString(aParams).get()));
+ return Some(aParams);
+ }
+ LOG(LogLevel::Info,
+ ("AudioCallbackDriver %p, failed setting input processing params to "
+ "%s. r=%d",
+ this, CubebUtils::ProcessingParamsToString(aParams).get(), r));
+ return Some(Err(r));
+ })();
+ if (!result) {
+ return;
+ }
+ MOZ_ALWAYS_SUCCEEDS(NS_DispatchToMainThread(
+ NS_NewRunnableFunction(__func__, [this, self = RefPtr(this), requested,
+ result = result.extract()]() mutable {
+ LOG(LogLevel::Debug,
+ ("AudioCallbackDriver %p, Notifying of input processing params %s. "
+ "r=%d",
+ this,
+ CubebUtils::ProcessingParamsToString(
+ result.unwrapOr(CUBEB_INPUT_PROCESSING_PARAM_NONE))
+ .get(),
+ result.isErr() ? result.inspectErr() : CUBEB_OK));
+ mGraphInterface->NotifySetRequestedInputProcessingParamsResult(
+ this, requested, std::move(result));
+ })));
+}
+
} // namespace mozilla
// avoid redefined macro in unified build
diff --git a/dom/media/GraphDriver.h b/dom/media/GraphDriver.h
index 9ada03e7e6..4608913c7f 100644
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -30,12 +30,6 @@ class nsAutoRefTraits<cubeb_stream> : public nsPointerRefTraits<cubeb_stream> {
};
namespace mozilla {
-
-// A thread pool containing only one thread to execute the cubeb operations. We
-// should always use this thread to init, destroy, start, or stop cubeb streams,
-// to avoid data racing or deadlock issues across platforms.
-#define CUBEB_TASK_THREAD SharedThreadPool::Get("CubebOperation"_ns, 1)
-
/**
* Assume we can run an iteration of the MediaTrackGraph loop in this much time
* or less.
@@ -185,6 +179,12 @@ struct GraphInterface : public nsISupports {
virtual void NotifyInputData(const AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels,
uint32_t aAlreadyBuffered) = 0;
+ /* Called on the main thread after an AudioCallbackDriver has attempted an
+ * operation to set aRequestedParams on the cubeb stream. */
+ virtual void NotifySetRequestedInputProcessingParamsResult(
+ AudioCallbackDriver* aDriver,
+ cubeb_input_processing_params aRequestedParams,
+ Result<cubeb_input_processing_params, int>&& aResult) = 0;
/* Called every time there are changes to input/output audio devices like
* plug/unplug etc. This can be called on any thread, and posts a message to
* the main thread so that it can post a message to the graph thread. */
@@ -553,12 +553,12 @@ class AudioCallbackDriver : public GraphDriver, public MixerCallbackReceiver {
AudioCallbackDriver, mCubebOperationThread, override);
/** If aInputChannelCount is zero, then this driver is output-only. */
- AudioCallbackDriver(GraphInterface* aGraphInterface,
- GraphDriver* aPreviousDriver, uint32_t aSampleRate,
- uint32_t aOutputChannelCount, uint32_t aInputChannelCount,
- CubebUtils::AudioDeviceID aOutputDeviceID,
- CubebUtils::AudioDeviceID aInputDeviceID,
- AudioInputType aAudioInputType);
+ AudioCallbackDriver(
+ GraphInterface* aGraphInterface, GraphDriver* aPreviousDriver,
+ uint32_t aSampleRate, uint32_t aOutputChannelCount,
+ uint32_t aInputChannelCount, CubebUtils::AudioDeviceID aOutputDeviceID,
+ CubebUtils::AudioDeviceID aInputDeviceID, AudioInputType aAudioInputType,
+ cubeb_input_processing_params aRequestedInputProcessingParams);
void Start() override;
MOZ_CAN_RUN_SCRIPT void Shutdown() override;
@@ -610,11 +610,16 @@ class AudioCallbackDriver : public GraphDriver, public MixerCallbackReceiver {
return AudioInputType::Unknown;
}
+ /* Get the input processing params requested from this driver, so that an
+ * external caller can decide whether it is necessary to call the setter,
+ * since it may allocate or dispatch. */
+ cubeb_input_processing_params RequestedInputProcessingParams() const;
+
+ /* Set the input processing params requested from this driver. */
+ void SetRequestedInputProcessingParams(cubeb_input_processing_params aParams);
+
std::thread::id ThreadId() const { return mAudioThreadIdInCb.load(); }
- /* Called when the thread servicing the callback has changed. This can be
- * fairly expensive */
- void OnThreadIdChanged();
/* Called at the beginning of the audio callback to check if the thread id has
* changed. */
bool CheckThreadIdChanged();
@@ -637,6 +642,9 @@ class AudioCallbackDriver : public GraphDriver, public MixerCallbackReceiver {
// Returns the output latency for the current audio output stream.
TimeDuration AudioOutputLatency();
+ /* Returns true if this driver has a fallback driver and handover to the audio
+ * callback has not been completed. */
+ bool HasFallback() const;
/* Returns true if this driver is currently driven by the fallback driver. */
bool OnFallback() const;
@@ -655,6 +663,9 @@ class AudioCallbackDriver : public GraphDriver, public MixerCallbackReceiver {
void Init(const nsCString& aStreamName);
void SetCubebStreamName(const nsCString& aStreamName);
void Stop();
+ /* After the requested input processing params has changed, this applies them
+ * on the cubeb stream. */
+ void SetInputProcessingParams(cubeb_input_processing_params aParams);
/* Calls FallbackToSystemClockDriver() if in FallbackDriverState::None.
* Returns Ok(true) if the fallback driver was started, or the old
* FallbackDriverState in an Err otherwise. */
@@ -724,6 +735,13 @@ class AudioCallbackDriver : public GraphDriver, public MixerCallbackReceiver {
* must run serially for access to mAudioStream. */
const RefPtr<TaskQueue> mCubebOperationThread;
cubeb_device_pref mInputDevicePreference;
+ /* Params that have been attempted to set on mAudioStream, after filtering by
+ * supported processing params. Cubeb operation thread only. */
+ cubeb_input_processing_params mConfiguredInputProcessingParams =
+ CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ /* The input processing params requested from this audio driver. Once started,
+ * audio callback thread only. */
+ cubeb_input_processing_params mRequestedInputProcessingParams;
/* Contains the id of the audio thread, from profiler_current_thread_id. */
std::atomic<ProfilerThreadId> mAudioThreadId;
/* This allows implementing AutoInCallback. This is equal to the current
diff --git a/dom/media/ImageToI420.cpp b/dom/media/ImageConversion.cpp
index 0f7976cb63..ea8d258279 100644
--- a/dom/media/ImageToI420.cpp
+++ b/dom/media/ImageConversion.cpp
@@ -3,10 +3,11 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "ImageToI420.h"
+#include "ImageConversion.h"
#include "ImageContainer.h"
#include "libyuv/convert.h"
+#include "libyuv/convert_from_argb.h"
#include "mozilla/dom/ImageBitmapBinding.h"
#include "mozilla/dom/ImageUtils.h"
#include "mozilla/gfx/Point.h"
@@ -151,4 +152,56 @@ nsresult ConvertToI420(Image* aImage, uint8_t* aDestY, int aDestStrideY,
}
}
+nsresult ConvertToNV12(layers::Image* aImage, uint8_t* aDestY, int aDestStrideY,
+ uint8_t* aDestUV, int aDestStrideUV) {
+ if (!aImage->IsValid()) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (const PlanarYCbCrData* data = GetPlanarYCbCrData(aImage)) {
+ const ImageUtils imageUtils(aImage);
+ Maybe<dom::ImageBitmapFormat> format = imageUtils.GetFormat();
+ if (format.isNothing()) {
+ MOZ_ASSERT_UNREACHABLE("YUV format conversion not implemented");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ if (format.value() != ImageBitmapFormat::YUV420P) {
+ NS_WARNING("ConvertToNV12: Convert YUV data in I420 only");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ return MapRv(libyuv::I420ToNV12(
+ data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride,
+ data->mCrChannel, data->mCbCrStride, aDestY, aDestStrideY, aDestUV,
+ aDestStrideUV, aImage->GetSize().width, aImage->GetSize().height));
+ }
+
+ RefPtr<SourceSurface> surf = GetSourceSurface(aImage);
+ if (!surf) {
+ return NS_ERROR_FAILURE;
+ }
+
+ RefPtr<DataSourceSurface> data = surf->GetDataSurface();
+ if (!data) {
+ return NS_ERROR_FAILURE;
+ }
+
+ DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ);
+ if (!map.IsMapped()) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (surf->GetFormat() != SurfaceFormat::B8G8R8A8 &&
+ surf->GetFormat() != SurfaceFormat::B8G8R8X8) {
+ NS_WARNING("ConvertToNV12: Convert SurfaceFormat in BGR* only");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ return MapRv(
+ libyuv::ARGBToNV12(static_cast<uint8_t*>(map.GetData()), map.GetStride(),
+ aDestY, aDestStrideY, aDestUV, aDestStrideUV,
+ aImage->GetSize().width, aImage->GetSize().height));
+}
+
} // namespace mozilla
diff --git a/dom/media/ImageToI420.h b/dom/media/ImageConversion.h
index 24a66ebc9f..8f1396f9e9 100644
--- a/dom/media/ImageToI420.h
+++ b/dom/media/ImageConversion.h
@@ -21,6 +21,12 @@ nsresult ConvertToI420(layers::Image* aImage, uint8_t* aDestY, int aDestStrideY,
uint8_t* aDestU, int aDestStrideU, uint8_t* aDestV,
int aDestStrideV);
+/**
+ * Converts aImage to an NV12 image and writes it to the given buffers.
+ */
+nsresult ConvertToNV12(layers::Image* aImage, uint8_t* aDestY, int aDestStrideY,
+ uint8_t* aDestUV, int aDestStrideUV);
+
} // namespace mozilla
#endif /* ImageToI420Converter_h */
diff --git a/dom/media/MediaFormatReader.cpp b/dom/media/MediaFormatReader.cpp
index 7eb8e4e5e2..0d7daaa3d8 100644
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1770,6 +1770,13 @@ void MediaFormatReader::NotifyNewOutput(
if (aTrack == TrackInfo::kAudioTrack) {
decoder.mNumOfConsecutiveUtilityCrashes = 0;
}
+ decoder.mDecodePerfRecorder->Record(
+ sample->mTime.ToMicroseconds(),
+ [startTime = sample->mTime.ToMicroseconds(),
+ endTime =
+ sample->GetEndTime().ToMicroseconds()](PlaybackStage& aStage) {
+ aStage.SetStartTimeAndEndTime(startTime, endTime);
+ });
}
}
LOG("Done processing new %s samples", TrackTypeToStr(aTrack));
@@ -1962,6 +1969,38 @@ void MediaFormatReader::RequestDemuxSamples(TrackType aTrack) {
}
}
+void MediaFormatReader::DecoderData::StartRecordDecodingPerf(
+ const TrackType aTrack, const MediaRawData* aSample) {
+ if (!mDecodePerfRecorder) {
+ mDecodePerfRecorder.reset(new PerformanceRecorderMulti<PlaybackStage>());
+ }
+ const int32_t height = aTrack == TrackInfo::kVideoTrack
+ ? GetCurrentInfo()->GetAsVideoInfo()->mImage.height
+ : 0;
+ MediaInfoFlag flag = MediaInfoFlag::None;
+ flag |=
+ aSample->mKeyframe ? MediaInfoFlag::KeyFrame : MediaInfoFlag::NonKeyFrame;
+ if (aTrack == TrackInfo::kVideoTrack) {
+ flag |= mIsHardwareAccelerated ? MediaInfoFlag::HardwareDecoding
+ : MediaInfoFlag::SoftwareDecoding;
+ const nsCString& mimeType = GetCurrentInfo()->mMimeType;
+ if (MP4Decoder::IsH264(mimeType)) {
+ flag |= MediaInfoFlag::VIDEO_H264;
+ } else if (VPXDecoder::IsVPX(mimeType, VPXDecoder::VP8)) {
+ flag |= MediaInfoFlag::VIDEO_VP8;
+ } else if (VPXDecoder::IsVPX(mimeType, VPXDecoder::VP9)) {
+ flag |= MediaInfoFlag::VIDEO_VP9;
+ }
+#ifdef MOZ_AV1
+ else if (AOMDecoder::IsAV1(mimeType)) {
+ flag |= MediaInfoFlag::VIDEO_AV1;
+ }
+#endif
+ }
+ mDecodePerfRecorder->Start(aSample->mTime.ToMicroseconds(),
+ MediaStage::RequestDecode, height, flag);
+}
+
void MediaFormatReader::DecodeDemuxedSamples(TrackType aTrack,
MediaRawData* aSample) {
MOZ_ASSERT(OnTaskQueue());
@@ -1980,41 +2019,15 @@ void MediaFormatReader::DecodeDemuxedSamples(TrackType aTrack,
aSample->mDuration.ToMicroseconds(), aSample->mKeyframe ? " kf" : "",
aSample->mEOS ? " eos" : "");
- const int32_t height =
- aTrack == TrackInfo::kVideoTrack
- ? decoder.GetCurrentInfo()->GetAsVideoInfo()->mImage.height
- : 0;
- MediaInfoFlag flag = MediaInfoFlag::None;
- flag |=
- aSample->mKeyframe ? MediaInfoFlag::KeyFrame : MediaInfoFlag::NonKeyFrame;
- if (aTrack == TrackInfo::kVideoTrack) {
- flag |= VideoIsHardwareAccelerated() ? MediaInfoFlag::HardwareDecoding
- : MediaInfoFlag::SoftwareDecoding;
- const nsCString& mimeType = decoder.GetCurrentInfo()->mMimeType;
- if (MP4Decoder::IsH264(mimeType)) {
- flag |= MediaInfoFlag::VIDEO_H264;
- } else if (VPXDecoder::IsVPX(mimeType, VPXDecoder::VP8)) {
- flag |= MediaInfoFlag::VIDEO_VP8;
- } else if (VPXDecoder::IsVPX(mimeType, VPXDecoder::VP9)) {
- flag |= MediaInfoFlag::VIDEO_VP9;
- }
-#ifdef MOZ_AV1
- else if (AOMDecoder::IsAV1(mimeType)) {
- flag |= MediaInfoFlag::VIDEO_AV1;
- }
-#endif
- }
- PerformanceRecorder<PlaybackStage> perfRecorder(MediaStage::RequestDecode,
- height, flag);
+ decoder.StartRecordDecodingPerf(aTrack, aSample);
if (mMediaEngineId && aSample->mCrypto.IsEncrypted()) {
aSample->mShouldCopyCryptoToRemoteRawData = true;
}
decoder.mDecoder->Decode(aSample)
->Then(
mTaskQueue, __func__,
- [self, aTrack, &decoder, perfRecorder(std::move(perfRecorder))](
- MediaDataDecoder::DecodedData&& aResults) mutable {
- perfRecorder.Record();
+ [self, aTrack,
+ &decoder](MediaDataDecoder::DecodedData&& aResults) mutable {
decoder.mDecodeRequest.Complete();
self->NotifyNewOutput(aTrack, std::move(aResults));
},
diff --git a/dom/media/MediaFormatReader.h b/dom/media/MediaFormatReader.h
index 5c4e04172d..b29d25db83 100644
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -430,6 +430,7 @@ class MediaFormatReader final
Maybe<TimeStamp> mWaitingForDataStartTime;
bool mWaitingForKey;
bool mReceivedNewData;
+ UniquePtr<PerformanceRecorderMulti<PlaybackStage>> mDecodePerfRecorder;
// Pending seek.
MozPromiseRequestHolder<MediaTrackDemuxer::SeekPromise> mSeekRequest;
@@ -474,6 +475,9 @@ class MediaFormatReader final
mDrainState = DrainState::DrainRequested;
}
+ void StartRecordDecodingPerf(const TrackType aTrack,
+ const MediaRawData* aSample);
+
// Track decoding error and fail when we hit the limit.
uint32_t mNumOfConsecutiveDecodingError;
uint32_t mMaxConsecutiveDecodingError;
@@ -552,7 +556,7 @@ class MediaFormatReader final
// Rejecting the promise will stop the reader from decoding ahead.
virtual bool HasPromise() const = 0;
virtual void RejectPromise(const MediaResult& aError,
- const char* aMethodName) = 0;
+ StaticString aMethodName) = 0;
// Clear track demuxer related data.
void ResetDemuxer() {
@@ -688,20 +692,20 @@ class MediaFormatReader final
bool HasPromise() const override { return mHasPromise; }
- RefPtr<DataPromise<Type>> EnsurePromise(const char* aMethodName) {
+ RefPtr<DataPromise<Type>> EnsurePromise(StaticString aMethodName) {
MOZ_ASSERT(mOwner->OnTaskQueue());
mHasPromise = true;
return mPromise.Ensure(aMethodName);
}
- void ResolvePromise(Type* aData, const char* aMethodName) {
+ void ResolvePromise(Type* aData, StaticString aMethodName) {
MOZ_ASSERT(mOwner->OnTaskQueue());
mPromise.Resolve(aData, aMethodName);
mHasPromise = false;
}
void RejectPromise(const MediaResult& aError,
- const char* aMethodName) override {
+ StaticString aMethodName) override {
MOZ_ASSERT(mOwner->OnTaskQueue());
mPromise.Reject(aError, aMethodName);
mHasPromise = false;
diff --git a/dom/media/MediaInfo.h b/dom/media/MediaInfo.h
index 7ab5df4e0a..00f322bac7 100644
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -349,7 +349,32 @@ class VideoInfo : public TrackInfo {
mExtraData(new MediaByteBuffer),
mRotation(VideoRotation::kDegree_0) {}
- VideoInfo(const VideoInfo& aOther) = default;
+ VideoInfo(const VideoInfo& aOther) : TrackInfo(aOther) {
+ if (aOther.mCodecSpecificConfig) {
+ mCodecSpecificConfig = new MediaByteBuffer();
+ mCodecSpecificConfig->AppendElements(
+ reinterpret_cast<uint8_t*>(aOther.mCodecSpecificConfig->Elements()),
+ aOther.mCodecSpecificConfig->Length());
+ }
+ if (aOther.mExtraData) {
+ mExtraData = new MediaByteBuffer();
+ mExtraData->AppendElements(
+ reinterpret_cast<uint8_t*>(aOther.mExtraData->Elements()),
+ aOther.mExtraData->Length());
+ }
+ mDisplay = aOther.mDisplay;
+ mStereoMode = aOther.mStereoMode;
+ mImage = aOther.mImage;
+ mRotation = aOther.mRotation;
+ mColorDepth = aOther.mColorDepth;
+ mColorSpace = aOther.mColorSpace;
+ mColorPrimaries = aOther.mColorPrimaries;
+ mTransferFunction = aOther.mTransferFunction;
+ mColorRange = aOther.mColorRange;
+ mImageRect = aOther.mImageRect;
+ mAlphaPresent = aOther.mAlphaPresent;
+ mFrameRate = aOther.mFrameRate;
+ };
bool operator==(const VideoInfo& rhs) const;
diff --git a/dom/media/MediaManager.cpp b/dom/media/MediaManager.cpp
index fb4384c826..0eb1a0977d 100644
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -10,13 +10,16 @@
#include "AudioDeviceInfo.h"
#include "AudioStreamTrack.h"
#include "CubebDeviceEnumerator.h"
+#include "CubebInputStream.h"
#include "MediaTimer.h"
#include "MediaTrackConstraints.h"
#include "MediaTrackGraph.h"
#include "MediaTrackListener.h"
#include "VideoStreamTrack.h"
+#include "Tracing.h"
#include "VideoUtils.h"
#include "mozilla/Base64.h"
+#include "mozilla/EventTargetCapability.h"
#include "mozilla/MozPromise.h"
#include "mozilla/NullPrincipal.h"
#include "mozilla/PeerIdentity.h"
@@ -291,6 +294,72 @@ void MediaManager::CallOnSuccess(GetUserMediaSuccessCallback& aCallback,
aCallback.Call(aStream);
}
+enum class PersistentPermissionState : uint32_t {
+ Unknown = nsIPermissionManager::UNKNOWN_ACTION,
+ Allow = nsIPermissionManager::ALLOW_ACTION,
+ Deny = nsIPermissionManager::DENY_ACTION,
+ Prompt = nsIPermissionManager::PROMPT_ACTION,
+};
+
+static PersistentPermissionState CheckPermission(
+ PersistentPermissionState aPermission) {
+ switch (aPermission) {
+ case PersistentPermissionState::Unknown:
+ case PersistentPermissionState::Allow:
+ case PersistentPermissionState::Deny:
+ case PersistentPermissionState::Prompt:
+ return aPermission;
+ }
+ MOZ_CRASH("Unexpected permission value");
+}
+
+struct WindowPersistentPermissionState {
+ PersistentPermissionState mCameraPermission;
+ PersistentPermissionState mMicrophonePermission;
+};
+
+static Result<WindowPersistentPermissionState, nsresult>
+GetPersistentPermissions(uint64_t aWindowId) {
+ auto* window = nsGlobalWindowInner::GetInnerWindowWithId(aWindowId);
+ if (NS_WARN_IF(!window) || NS_WARN_IF(!window->GetPrincipal())) {
+ return Err(NS_ERROR_INVALID_ARG);
+ }
+
+ Document* doc = window->GetExtantDoc();
+ if (NS_WARN_IF(!doc)) {
+ return Err(NS_ERROR_INVALID_ARG);
+ }
+
+ nsIPrincipal* principal = window->GetPrincipal();
+ if (NS_WARN_IF(!principal)) {
+ return Err(NS_ERROR_INVALID_ARG);
+ }
+
+ nsresult rv;
+ RefPtr<PermissionDelegateHandler> permDelegate =
+ doc->GetPermissionDelegateHandler();
+ if (NS_WARN_IF(!permDelegate)) {
+ return Err(NS_ERROR_INVALID_ARG);
+ }
+
+ uint32_t audio = nsIPermissionManager::UNKNOWN_ACTION;
+ uint32_t video = nsIPermissionManager::UNKNOWN_ACTION;
+ {
+ rv = permDelegate->GetPermission("microphone"_ns, &audio, true);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return Err(rv);
+ }
+ rv = permDelegate->GetPermission("camera"_ns, &video, true);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return Err(rv);
+ }
+ }
+
+ return WindowPersistentPermissionState{
+ CheckPermission(static_cast<PersistentPermissionState>(video)),
+ CheckPermission(static_cast<PersistentPermissionState>(audio))};
+}
+
/**
* DeviceListener has threadsafe refcounting for use across the main, media and
* MTG threads. But it has a non-threadsafe SupportsWeakPtr for WeakPtr usage
@@ -1458,9 +1527,63 @@ class GetUserMediaStreamTask final : public GetUserMediaTask {
const MediaStreamConstraints& GetConstraints() { return mConstraints; }
+ void PrimeVoiceProcessing() {
+ mPrimingStream = MakeAndAddRef<PrimingCubebVoiceInputStream>();
+ mPrimingStream->Init();
+ }
+
private:
void PrepareDOMStream();
+ class PrimingCubebVoiceInputStream {
+ class Listener final : public CubebInputStream::Listener {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Listener, override);
+
+ private:
+ ~Listener() = default;
+
+ long DataCallback(const void*, long) override {
+ MOZ_CRASH("Unexpected data callback");
+ }
+ void StateCallback(cubeb_state) override {}
+ void DeviceChangedCallback() override {}
+ };
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING_WITH_DELETE_ON_EVENT_TARGET(
+ PrimingCubebVoiceInputStream, mCubebThread.GetEventTarget())
+
+ public:
+ void Init() {
+ mCubebThread.GetEventTarget()->Dispatch(
+ NS_NewRunnableFunction(__func__, [this, self = RefPtr(this)] {
+ mCubebThread.AssertOnCurrentThread();
+ LOG("Priming voice processing with stream %p", this);
+ TRACE("PrimingCubebVoiceInputStream::Init");
+ const cubeb_devid default_device = nullptr;
+ const uint32_t mono = 1;
+ const uint32_t rate = CubebUtils::PreferredSampleRate(false);
+ const bool isVoice = true;
+ mCubebStream =
+ CubebInputStream::Create(default_device, mono, rate, isVoice,
+ MakeRefPtr<Listener>().get());
+ }));
+ }
+
+ private:
+ ~PrimingCubebVoiceInputStream() {
+ mCubebThread.AssertOnCurrentThread();
+ LOG("Releasing primed voice processing stream %p", this);
+ mCubebStream = nullptr;
+ }
+
+ const EventTargetCapability<nsISerialEventTarget> mCubebThread =
+ EventTargetCapability<nsISerialEventTarget>(
+ TaskQueue::Create(CubebUtils::GetCubebOperationThread(),
+ "PrimingCubebInputStream::mCubebThread")
+ .get());
+ UniquePtr<CubebInputStream> mCubebStream MOZ_GUARDED_BY(mCubebThread);
+ };
+
// Constraints derived from those passed to getUserMedia() but adjusted for
// preferences, defaults, and security
const MediaStreamConstraints mConstraints;
@@ -1473,6 +1596,7 @@ class GetUserMediaStreamTask final : public GetUserMediaTask {
// MediaDevices are set when selected and Allowed() by the UI.
RefPtr<LocalMediaDevice> mAudioDevice;
RefPtr<LocalMediaDevice> mVideoDevice;
+ RefPtr<PrimingCubebVoiceInputStream> mPrimingStream;
// Tracking id unique for a video frame source. Set when the corresponding
// device has been allocated.
Maybe<TrackingId> mVideoTrackingId;
@@ -2220,6 +2344,7 @@ MediaManager::MediaManager(already_AddRefed<TaskQueue> aMediaThread)
mPrefs.mWidth = 0; // adaptive default
mPrefs.mHeight = 0; // adaptive default
mPrefs.mFPS = MediaEnginePrefs::DEFAULT_VIDEO_FPS;
+ mPrefs.mUsePlatformProcessing = false;
mPrefs.mAecOn = false;
mPrefs.mUseAecMobile = false;
mPrefs.mAgcOn = false;
@@ -2272,14 +2397,14 @@ static void ForeachObservedPref(const Function& aFunction) {
aFunction("media.video_loopback_dev"_ns);
aFunction("media.getusermedia.fake-camera-name"_ns);
#ifdef MOZ_WEBRTC
- aFunction("media.getusermedia.aec_enabled"_ns);
- aFunction("media.getusermedia.aec"_ns);
- aFunction("media.getusermedia.agc_enabled"_ns);
- aFunction("media.getusermedia.agc"_ns);
- aFunction("media.getusermedia.hpf_enabled"_ns);
- aFunction("media.getusermedia.noise_enabled"_ns);
- aFunction("media.getusermedia.noise"_ns);
- aFunction("media.getusermedia.channels"_ns);
+ aFunction("media.getusermedia.audio.processing.aec.enabled"_ns);
+ aFunction("media.getusermedia.audio.processing.aec"_ns);
+ aFunction("media.getusermedia.audio.processing.agc.enabled"_ns);
+ aFunction("media.getusermedia.audio.processing.agc"_ns);
+ aFunction("media.getusermedia.audio.processing.hpf.enabled"_ns);
+ aFunction("media.getusermedia.audio.processing.noise.enabled"_ns);
+ aFunction("media.getusermedia.audio.processing.noise"_ns);
+ aFunction("media.getusermedia.audio.max_channels"_ns);
aFunction("media.navigator.streams.fake"_ns);
#endif
}
@@ -2392,7 +2517,7 @@ void MediaManager::Dispatch(already_AddRefed<Runnable> task) {
template <typename MozPromiseType, typename FunctionType>
/* static */
-RefPtr<MozPromiseType> MediaManager::Dispatch(const char* aName,
+RefPtr<MozPromiseType> MediaManager::Dispatch(StaticString aName,
FunctionType&& aFunction) {
MozPromiseHolder<MozPromiseType> holder;
RefPtr<MozPromiseType> promise = holder.Ensure(aName);
@@ -2851,7 +2976,7 @@ RefPtr<MediaManager::StreamPromise> MediaManager::GetUserMedia(
case MediaSourceEnum::AudioCapture:
// Only enable AudioCapture if the pref is enabled. If it's not, we can
// deny right away.
- if (!Preferences::GetBool("media.getusermedia.audiocapture.enabled")) {
+ if (!Preferences::GetBool("media.getusermedia.audio.capture.enabled")) {
return StreamPromise::CreateAndReject(
MakeRefPtr<MediaMgrError>(MediaMgrError::Name::NotAllowedError),
__func__);
@@ -3044,6 +3169,36 @@ RefPtr<MediaManager::StreamPromise> MediaManager::GetUserMedia(
std::move(audioListener), std::move(videoListener), prefs,
principalInfo, aCallerType, focusSource);
+ // It is time to ask for user permission, prime voice processing
+ // now. Use a local lambda to enable a guard pattern.
+ [&] {
+ if (!StaticPrefs::
+ media_getusermedia_microphone_voice_stream_priming_enabled() ||
+ !StaticPrefs::
+ media_getusermedia_microphone_prefer_voice_stream_with_processing_enabled()) {
+ return;
+ }
+
+ if (const auto fc = FlattenedConstraints(
+ NormalizedConstraints(GetInvariant(c.mAudio)));
+ !fc.mEchoCancellation.Get(prefs.mAecOn) &&
+ !fc.mAutoGainControl.Get(prefs.mAgcOn && prefs.mAecOn) &&
+ !fc.mNoiseSuppression.Get(prefs.mNoiseOn && prefs.mAecOn)) {
+ return;
+ }
+
+ if (GetPersistentPermissions(windowID)
+ .map([](auto&& aState) {
+ return aState.mMicrophonePermission ==
+ PersistentPermissionState::Deny;
+ })
+ .unwrapOr(true)) {
+ return;
+ }
+
+ task->PrimeVoiceProcessing();
+ }();
+
size_t taskCount =
self->AddTaskAndGetCount(windowID, callID, std::move(task));
@@ -3474,14 +3629,19 @@ void MediaManager::GetPrefs(nsIPrefBranch* aBranch, const char* aData) {
GetPref(aBranch, "media.navigator.audio.fake_frequency", aData,
&mPrefs.mFreq);
#ifdef MOZ_WEBRTC
- GetPrefBool(aBranch, "media.getusermedia.aec_enabled", aData, &mPrefs.mAecOn);
- GetPrefBool(aBranch, "media.getusermedia.agc_enabled", aData, &mPrefs.mAgcOn);
- GetPrefBool(aBranch, "media.getusermedia.hpf_enabled", aData, &mPrefs.mHPFOn);
- GetPrefBool(aBranch, "media.getusermedia.noise_enabled", aData,
- &mPrefs.mNoiseOn);
- GetPrefBool(aBranch, "media.getusermedia.transient_enabled", aData,
- &mPrefs.mTransientOn);
- GetPrefBool(aBranch, "media.getusermedia.agc2_forced", aData,
+ GetPrefBool(aBranch, "media.getusermedia.audio.processing.platform.enabled",
+ aData, &mPrefs.mUsePlatformProcessing);
+ GetPrefBool(aBranch, "media.getusermedia.audio.processing.aec.enabled", aData,
+ &mPrefs.mAecOn);
+ GetPrefBool(aBranch, "media.getusermedia.audio.processing.agc.enabled", aData,
+ &mPrefs.mAgcOn);
+ GetPrefBool(aBranch, "media.getusermedia.audio.processing.hpf.enabled", aData,
+ &mPrefs.mHPFOn);
+ GetPrefBool(aBranch, "media.getusermedia.audio.processing.noise.enabled",
+ aData, &mPrefs.mNoiseOn);
+ GetPrefBool(aBranch, "media.getusermedia.audio.processing.transient.enabled",
+ aData, &mPrefs.mTransientOn);
+ GetPrefBool(aBranch, "media.getusermedia.audio.processing.agc2.forced", aData,
&mPrefs.mAgc2Forced);
// Use 0 or 1 to force to false or true
// EchoCanceller3Config::echo_removal_control.has_clock_drift.
@@ -3489,14 +3649,19 @@ void MediaManager::GetPrefs(nsIPrefBranch* aBranch, const char* aData) {
// deemed appropriate.
GetPref(aBranch, "media.getusermedia.audio.processing.aec.expect_drift",
aData, &mPrefs.mExpectDrift);
- GetPref(aBranch, "media.getusermedia.agc", aData, &mPrefs.mAgc);
- GetPref(aBranch, "media.getusermedia.noise", aData, &mPrefs.mNoise);
- GetPref(aBranch, "media.getusermedia.channels", aData, &mPrefs.mChannels);
+ GetPref(aBranch, "media.getusermedia.audio.processing.agc", aData,
+ &mPrefs.mAgc);
+ GetPref(aBranch, "media.getusermedia.audio.processing.noise", aData,
+ &mPrefs.mNoise);
+ GetPref(aBranch, "media.getusermedia.audio.max_channels", aData,
+ &mPrefs.mChannels);
#endif
- LOG("%s: default prefs: %dx%d @%dfps, %dHz test tones, aec: %s, "
- "agc: %s, hpf: %s, noise: %s, drift: %s, agc level: %d, agc version: %s, "
- "noise level: %d, transient: %s, channels %d",
+ LOG("%s: default prefs: %dx%d @%dfps, %dHz test tones, platform processing: "
+ "%s, aec: %s, agc: %s, hpf: %s, noise: %s, drift: %s, agc level: %d, agc "
+ "version: "
+ "%s, noise level: %d, transient: %s, channels %d",
__FUNCTION__, mPrefs.mWidth, mPrefs.mHeight, mPrefs.mFPS, mPrefs.mFreq,
+ mPrefs.mUsePlatformProcessing ? "on" : "off",
mPrefs.mAecOn ? "on" : "off", mPrefs.mAgcOn ? "on" : "off",
mPrefs.mHPFOn ? "on" : "off", mPrefs.mNoiseOn ? "on" : "off",
mPrefs.mExpectDrift < 0 ? "auto"
@@ -3948,43 +4113,13 @@ bool MediaManager::IsActivelyCapturingOrHasAPermission(uint64_t aWindowId) {
// Or are persistent permissions (audio or video) granted?
- auto* window = nsGlobalWindowInner::GetInnerWindowWithId(aWindowId);
- if (NS_WARN_IF(!window) || NS_WARN_IF(!window->GetPrincipal())) {
- return false;
- }
-
- Document* doc = window->GetExtantDoc();
- if (NS_WARN_IF(!doc)) {
- return false;
- }
-
- nsIPrincipal* principal = window->GetPrincipal();
- if (NS_WARN_IF(!principal)) {
- return false;
- }
-
- // Check if this site has persistent permissions.
- nsresult rv;
- RefPtr<PermissionDelegateHandler> permDelegate =
- doc->GetPermissionDelegateHandler();
- if (NS_WARN_IF(!permDelegate)) {
- return false;
- }
-
- uint32_t audio = nsIPermissionManager::UNKNOWN_ACTION;
- uint32_t video = nsIPermissionManager::UNKNOWN_ACTION;
- {
- rv = permDelegate->GetPermission("microphone"_ns, &audio, true);
- if (NS_WARN_IF(NS_FAILED(rv))) {
- return false;
- }
- rv = permDelegate->GetPermission("camera"_ns, &video, true);
- if (NS_WARN_IF(NS_FAILED(rv))) {
- return false;
- }
- }
- return audio == nsIPermissionManager::ALLOW_ACTION ||
- video == nsIPermissionManager::ALLOW_ACTION;
+ return GetPersistentPermissions(aWindowId)
+ .map([](auto&& aState) {
+ return aState.mMicrophonePermission ==
+ PersistentPermissionState::Allow ||
+ aState.mCameraPermission == PersistentPermissionState::Allow;
+ })
+ .unwrapOr(false);
}
DeviceListener::DeviceListener()
diff --git a/dom/media/MediaManager.h b/dom/media/MediaManager.h
index 738ccd795d..81f23bfe05 100644
--- a/dom/media/MediaManager.h
+++ b/dom/media/MediaManager.h
@@ -220,7 +220,7 @@ class MediaManager final : public nsIMediaManagerService,
* manager thread.
*/
template <typename MozPromiseType, typename FunctionType>
- static RefPtr<MozPromiseType> Dispatch(const char* aName,
+ static RefPtr<MozPromiseType> Dispatch(StaticString aName,
FunctionType&& aFunction);
#ifdef DEBUG
diff --git a/dom/media/MediaResult.h b/dom/media/MediaResult.h
index 58e3bf7671..9b4031e95c 100644
--- a/dom/media/MediaResult.h
+++ b/dom/media/MediaResult.h
@@ -9,7 +9,7 @@
#include "nsString.h" // Required before 'mozilla/ErrorNames.h'!?
#include "mozilla/ErrorNames.h"
-#include "mozilla/TimeStamp.h"
+#include "mozilla/IntegerPrintfMacros.h"
#include "nsError.h"
#include "nsPrintfCString.h"
diff --git a/dom/media/MediaTimer.cpp b/dom/media/MediaTimer.cpp
index c34eb816fa..231e0e2441 100644
--- a/dom/media/MediaTimer.cpp
+++ b/dom/media/MediaTimer.cpp
@@ -68,12 +68,12 @@ bool MediaTimer::OnMediaTimerThread() {
}
RefPtr<MediaTimerPromise> MediaTimer::WaitFor(const TimeDuration& aDuration,
- const char* aCallSite) {
+ StaticString aCallSite) {
return WaitUntil(TimeStamp::Now() + aDuration, aCallSite);
}
RefPtr<MediaTimerPromise> MediaTimer::WaitUntil(const TimeStamp& aTimeStamp,
- const char* aCallSite) {
+ StaticString aCallSite) {
MonitorAutoLock mon(mMonitor);
TIMER_LOG("MediaTimer::WaitUntil %" PRId64, RelativeMicroseconds(aTimeStamp));
Entry e(aTimeStamp, aCallSite);
diff --git a/dom/media/MediaTimer.h b/dom/media/MediaTimer.h
index 837a1591b3..2ab3f2e569 100644
--- a/dom/media/MediaTimer.h
+++ b/dom/media/MediaTimer.h
@@ -44,9 +44,9 @@ class MediaTimer {
DispatchDestroy());
RefPtr<MediaTimerPromise> WaitFor(const TimeDuration& aDuration,
- const char* aCallSite);
+ StaticString aCallSite);
RefPtr<MediaTimerPromise> WaitUntil(const TimeStamp& aTimeStamp,
- const char* aCallSite);
+ StaticString aCallSite);
void Cancel(); // Cancel and reject any unresolved promises with false.
private:
@@ -81,7 +81,7 @@ class MediaTimer {
TimeStamp mTimeStamp;
RefPtr<MediaTimerPromise::Private> mPromise;
- explicit Entry(const TimeStamp& aTimeStamp, const char* aCallSite)
+ explicit Entry(const TimeStamp& aTimeStamp, StaticString aCallSite)
: mTimeStamp(aTimeStamp),
mPromise(new MediaTimerPromise::Private(aCallSite)) {}
diff --git a/dom/media/MediaTrackGraph.cpp b/dom/media/MediaTrackGraph.cpp
index a4f81d0071..e9356548b8 100644
--- a/dom/media/MediaTrackGraph.cpp
+++ b/dom/media/MediaTrackGraph.cpp
@@ -440,11 +440,10 @@ void MediaTrackGraphImpl::CheckDriver() {
NativeInputTrack* native =
mDeviceInputTrackManagerGraphThread.GetNativeInputTrack();
CubebUtils::AudioDeviceID inputDevice = native ? native->mDeviceId : nullptr;
- uint32_t inputChannelCount =
- native ? AudioInputChannelCount(native->mDeviceId) : 0;
- AudioInputType inputPreference =
- native ? AudioInputDevicePreference(native->mDeviceId)
- : AudioInputType::Unknown;
+ uint32_t inputChannelCount = AudioInputChannelCount(inputDevice);
+ AudioInputType inputPreference = AudioInputDevicePreference(inputDevice);
+ cubeb_input_processing_params inputProcessingParams =
+ RequestedAudioInputProcessingParams(inputDevice);
uint32_t primaryOutputChannelCount = PrimaryOutputChannelCount();
if (!audioCallbackDriver) {
@@ -452,7 +451,7 @@ void MediaTrackGraphImpl::CheckDriver() {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, primaryOutputChannelCount,
inputChannelCount, PrimaryOutputDeviceID(), inputDevice,
- inputPreference);
+ inputPreference, inputProcessingParams);
SwitchAtNextIteration(driver);
}
return;
@@ -468,9 +467,14 @@ void MediaTrackGraphImpl::CheckDriver() {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, primaryOutputChannelCount,
inputChannelCount, PrimaryOutputDeviceID(), inputDevice,
- inputPreference);
+ inputPreference, inputProcessingParams);
SwitchAtNextIteration(driver);
}
+
+ if (native) {
+ audioCallbackDriver->SetRequestedInputProcessingParams(
+ inputProcessingParams);
+ }
}
void MediaTrackGraphImpl::UpdateTrackOrder() {
@@ -770,7 +774,8 @@ void MediaTrackGraphImpl::OpenAudioInputImpl(DeviceInputTrack* aTrack) {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, PrimaryOutputChannelCount(),
AudioInputChannelCount(aTrack->mDeviceId), PrimaryOutputDeviceID(),
- aTrack->mDeviceId, AudioInputDevicePreference(aTrack->mDeviceId));
+ aTrack->mDeviceId, AudioInputDevicePreference(aTrack->mDeviceId),
+ aTrack->RequestedProcessingParams());
LOG(LogLevel::Debug,
("%p OpenAudioInputImpl: starting new AudioCallbackDriver(input) %p",
this, driver));
@@ -842,7 +847,8 @@ void MediaTrackGraphImpl::CloseAudioInputImpl(DeviceInputTrack* aTrack) {
driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, PrimaryOutputChannelCount(),
AudioInputChannelCount(aTrack->mDeviceId), PrimaryOutputDeviceID(),
- nullptr, AudioInputDevicePreference(aTrack->mDeviceId));
+ nullptr, AudioInputDevicePreference(aTrack->mDeviceId),
+ aTrack->RequestedProcessingParams());
SwitchAtNextIteration(driver);
} else if (CurrentDriver()->AsAudioCallbackDriver()) {
LOG(LogLevel::Debug,
@@ -937,6 +943,32 @@ void MediaTrackGraphImpl::NotifyInputData(const AudioDataValue* aBuffer,
aAlreadyBuffered);
}
+void MediaTrackGraphImpl::NotifySetRequestedInputProcessingParamsResult(
+ AudioCallbackDriver* aDriver,
+ cubeb_input_processing_params aRequestedParams,
+ Result<cubeb_input_processing_params, int>&& aResult) {
+ MOZ_ASSERT(NS_IsMainThread());
+ NativeInputTrack* native =
+ mDeviceInputTrackManagerMainThread.GetNativeInputTrack();
+ if (!native) {
+ return;
+ }
+ QueueControlMessageWithNoShutdown([this, self = RefPtr(this),
+ driver = RefPtr(aDriver), aRequestedParams,
+ result = std::move(aResult)]() mutable {
+ NativeInputTrack* native =
+ mDeviceInputTrackManagerGraphThread.GetNativeInputTrack();
+ if (!native) {
+ return;
+ }
+ if (driver != mDriver) {
+ return;
+ }
+ native->NotifySetRequestedProcessingParamsResult(this, aRequestedParams,
+ result);
+ });
+}
+
void MediaTrackGraphImpl::DeviceChangedImpl() {
MOZ_ASSERT(OnGraphThread());
NativeInputTrack* native =
@@ -1115,7 +1147,7 @@ void MediaTrackGraphImpl::ReevaluateInputDevice(CubebUtils::AudioDeviceID aID) {
AudioCallbackDriver* newDriver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, PrimaryOutputChannelCount(),
AudioInputChannelCount(aID), PrimaryOutputDeviceID(), aID,
- AudioInputDevicePreference(aID));
+ AudioInputDevicePreference(aID), track->RequestedProcessingParams());
SwitchAtNextIteration(newDriver);
}
}
@@ -3459,7 +3491,7 @@ void MediaTrackGraphImpl::Init(GraphDriverType aDriverRequested,
// for the input channel.
mDriver = new AudioCallbackDriver(
this, nullptr, mSampleRate, aChannelCount, 0, PrimaryOutputDeviceID(),
- nullptr, AudioInputType::Unknown);
+ nullptr, AudioInputType::Unknown, CUBEB_INPUT_PROCESSING_PARAM_NONE);
} else {
mDriver = new SystemClockDriver(this, nullptr, mSampleRate);
}
@@ -4257,6 +4289,15 @@ AudioInputType MediaTrackGraphImpl::AudioInputDevicePreference(
: AudioInputType::Unknown;
}
+cubeb_input_processing_params
+MediaTrackGraphImpl::RequestedAudioInputProcessingParams(
+ CubebUtils::AudioDeviceID aID) {
+ MOZ_ASSERT(OnGraphThreadOrNotRunning());
+ DeviceInputTrack* t =
+ mDeviceInputTrackManagerGraphThread.GetDeviceInputTrack(aID);
+ return t ? t->RequestedProcessingParams() : CUBEB_INPUT_PROCESSING_PARAM_NONE;
+}
+
void MediaTrackGraphImpl::SetNewNativeInput() {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(!mDeviceInputTrackManagerMainThread.GetNativeInputTrack());
diff --git a/dom/media/MediaTrackGraph.h b/dom/media/MediaTrackGraph.h
index a754b158eb..53783aca27 100644
--- a/dom/media/MediaTrackGraph.h
+++ b/dom/media/MediaTrackGraph.h
@@ -112,12 +112,20 @@ class AudioDataListenerInterface {
/**
* Number of audio input channels.
*/
- virtual uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) = 0;
+ virtual uint32_t RequestedInputChannelCount(
+ MediaTrackGraph* aGraph) const = 0;
+
+ /**
+ * The input processing params this listener wants the platform to apply.
+ */
+ virtual cubeb_input_processing_params RequestedInputProcessingParams(
+ MediaTrackGraph* aGraph) const = 0;
/**
* Whether the underlying audio device is used for voice input.
*/
virtual bool IsVoiceInput(MediaTrackGraph* aGraph) const = 0;
+
/**
* Called when the underlying audio device has changed.
*/
@@ -127,6 +135,14 @@ class AudioDataListenerInterface {
* Called when the underlying audio device is being closed.
*/
virtual void Disconnect(MediaTrackGraph* aGraph) = 0;
+
+ /**
+ * Called after an attempt to set the input processing params on the
+ * underlying input track.
+ */
+ virtual void NotifySetRequestedInputProcessingParamsResult(
+ MediaTrackGraph* aGraph, cubeb_input_processing_params aRequestedParams,
+ const Result<cubeb_input_processing_params, int>& aResult) = 0;
};
class AudioDataListener : public AudioDataListenerInterface {
diff --git a/dom/media/MediaTrackGraphImpl.h b/dom/media/MediaTrackGraphImpl.h
index 5daed83ef3..44c04caaa0 100644
--- a/dom/media/MediaTrackGraphImpl.h
+++ b/dom/media/MediaTrackGraphImpl.h
@@ -509,6 +509,12 @@ class MediaTrackGraphImpl : public MediaTrackGraph,
void NotifyInputData(const AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels,
uint32_t aAlreadyBuffered) override;
+ /* Called on the main thread after an AudioCallbackDriver has attempted an
+ * operation to set aRequestedParams on the cubeb stream. */
+ void NotifySetRequestedInputProcessingParamsResult(
+ AudioCallbackDriver* aDriver,
+ cubeb_input_processing_params aRequestedParams,
+ Result<cubeb_input_processing_params, int>&& aResult) override;
/* Called every time there are changes to input/output audio devices like
* plug/unplug etc. This can be called on any thread, and posts a message to
* the main thread so that it can post a message to the graph thread. */
@@ -586,6 +592,13 @@ class MediaTrackGraphImpl : public MediaTrackGraph,
AudioInputType AudioInputDevicePreference(CubebUtils::AudioDeviceID aID);
+ /**
+ * The input processing params requested for any processing tracks tied to the
+ * input device with id aID.
+ */
+ cubeb_input_processing_params RequestedAudioInputProcessingParams(
+ CubebUtils::AudioDeviceID aID);
+
double MediaTimeToSeconds(GraphTime aTime) const {
NS_ASSERTION(aTime > -TRACK_TIME_MAX && aTime <= TRACK_TIME_MAX,
"Bad time");
diff --git a/dom/media/SeekJob.cpp b/dom/media/SeekJob.cpp
index 93911638ce..1760cceb35 100644
--- a/dom/media/SeekJob.cpp
+++ b/dom/media/SeekJob.cpp
@@ -18,12 +18,12 @@ bool SeekJob::Exists() const {
return mTarget.isSome();
}
-void SeekJob::Resolve(const char* aCallSite) {
+void SeekJob::Resolve(StaticString aCallSite) {
mPromise.Resolve(true, aCallSite);
mTarget.reset();
}
-void SeekJob::RejectIfExists(const char* aCallSite) {
+void SeekJob::RejectIfExists(StaticString aCallSite) {
mTarget.reset();
mPromise.RejectIfExists(true, aCallSite);
}
diff --git a/dom/media/SeekJob.h b/dom/media/SeekJob.h
index 1bdd6913f3..9e1e404d91 100644
--- a/dom/media/SeekJob.h
+++ b/dom/media/SeekJob.h
@@ -20,8 +20,8 @@ struct SeekJob {
~SeekJob();
bool Exists() const;
- void Resolve(const char* aCallSite);
- void RejectIfExists(const char* aCallSite);
+ void Resolve(StaticString aCallSite);
+ void RejectIfExists(StaticString aCallSite);
Maybe<SeekTarget> mTarget;
MozPromiseHolder<MediaDecoder::SeekPromise> mPromise;
diff --git a/dom/media/VideoFrameConverter.h b/dom/media/VideoFrameConverter.h
index 31b3104955..43a4075b04 100644
--- a/dom/media/VideoFrameConverter.h
+++ b/dom/media/VideoFrameConverter.h
@@ -7,7 +7,7 @@
#define VideoFrameConverter_h
#include "ImageContainer.h"
-#include "ImageToI420.h"
+#include "ImageConversion.h"
#include "Pacer.h"
#include "PerformanceRecorder.h"
#include "VideoSegment.h"
diff --git a/dom/media/VideoUtils.cpp b/dom/media/VideoUtils.cpp
index 24b1f0dd59..31fe3242dc 100644
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -10,10 +10,13 @@
#include "ImageContainer.h"
#include "MediaContainerType.h"
#include "MediaResource.h"
+#include "PDMFactory.h"
#include "TimeUnits.h"
#include "mozilla/Base64.h"
#include "mozilla/dom/ContentChild.h"
+#include "mozilla/gfx/gfxVars.h"
#include "mozilla/SchedulerGroup.h"
+#include "mozilla/ScopeExit.h"
#include "mozilla/SharedThreadPool.h"
#include "mozilla/StaticPrefs_accessibility.h"
#include "mozilla/StaticPrefs_media.h"
@@ -28,6 +31,10 @@
#include "nsServiceManagerUtils.h"
#include "nsThreadUtils.h"
+#ifdef XP_WIN
+# include "WMFDecoderModule.h"
+#endif
+
namespace mozilla {
using gfx::ColorRange;
@@ -1247,4 +1254,18 @@ void DetermineResolutionForTelemetry(const MediaInfo& aInfo,
aResolutionOut.AppendASCII(resolution);
}
+bool ContainHardwareCodecsSupported(
+ const media::MediaCodecsSupported& aSupport) {
+ return aSupport.contains(
+ mozilla::media::MediaCodecsSupport::H264HardwareDecode) ||
+ aSupport.contains(
+ mozilla::media::MediaCodecsSupport::VP8HardwareDecode) ||
+ aSupport.contains(
+ mozilla::media::MediaCodecsSupport::VP9HardwareDecode) ||
+ aSupport.contains(
+ mozilla::media::MediaCodecsSupport::AV1HardwareDecode) ||
+ aSupport.contains(
+ mozilla::media::MediaCodecsSupport::HEVCHardwareDecode);
+}
+
} // end namespace mozilla
diff --git a/dom/media/VideoUtils.h b/dom/media/VideoUtils.h
index b1dbb0cf2b..ce20226a02 100644
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -9,6 +9,7 @@
#include "AudioSampleFormat.h"
#include "MediaInfo.h"
+#include "MediaCodecsSupport.h"
#include "VideoLimits.h"
#include "mozilla/AbstractThread.h"
#include "mozilla/Attributes.h"
@@ -553,6 +554,10 @@ bool IsWaveMimetype(const nsACString& aMimeType);
void DetermineResolutionForTelemetry(const MediaInfo& aInfo,
nsCString& aResolutionOut);
+// True if given MediaCodecsSupported contains any hardware decoding support.
+bool ContainHardwareCodecsSupported(
+ const media::MediaCodecsSupported& aSupport);
+
} // end namespace mozilla
#endif
diff --git a/dom/media/WavDumper.h b/dom/media/WavDumper.h
index de4195066a..971fa8a32f 100644
--- a/dom/media/WavDumper.h
+++ b/dom/media/WavDumper.h
@@ -107,13 +107,23 @@ class WavDumper {
if (!mFile) {
return;
}
- WriteDumpFileHelper(aBuffer, aSamples);
+ if (aBuffer) {
+ WriteDumpFileHelper(aBuffer, aSamples);
+ } else {
+ constexpr size_t blockSize = 128;
+ T block[blockSize] = {};
+ for (size_t remaining = aSamples; remaining;) {
+ size_t toWrite = std::min(remaining, blockSize);
+ fwrite(block, sizeof(T), toWrite, mFile);
+ remaining -= toWrite;
+ }
+ }
+ fflush(mFile);
}
private:
void WriteDumpFileHelper(const int16_t* aInput, size_t aSamples) {
mozilla::Unused << fwrite(aInput, sizeof(int16_t), aSamples, mFile);
- fflush(mFile);
}
void WriteDumpFileHelper(const float* aInput, size_t aSamples) {
@@ -127,7 +137,6 @@ class WavDumper {
MOZ_ASSERT(rv);
}
mozilla::Unused << fwrite(buf.Elements(), buf.Length(), 1, mFile);
- fflush(mFile);
}
FILE* mFile = nullptr;
diff --git a/dom/media/doctor/DDLifetime.cpp b/dom/media/doctor/DDLifetime.cpp
index 2d4c6cb966..c1d2a01a1c 100644
--- a/dom/media/doctor/DDLifetime.cpp
+++ b/dom/media/doctor/DDLifetime.cpp
@@ -5,6 +5,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "DDLifetime.h"
+#include "mozilla/IntegerPrintfMacros.h"
namespace mozilla {
diff --git a/dom/media/doctor/test/gtest/moz.build b/dom/media/doctor/test/gtest/moz.build
index 7ae9eae130..3ee3f23f4d 100644
--- a/dom/media/doctor/test/gtest/moz.build
+++ b/dom/media/doctor/test/gtest/moz.build
@@ -6,10 +6,15 @@
if CONFIG["OS_TARGET"] != "Android":
UNIFIED_SOURCES += [
- "TestMultiWriterQueue.cpp",
"TestRollingNumber.cpp",
]
+ # Bug 1894309 - Fails under TSAN
+ if not CONFIG["MOZ_TSAN"]:
+ UNIFIED_SOURCES += [
+ "TestMultiWriterQueue.cpp",
+ ]
+
include("/ipc/chromium/chromium-config.mozbuild")
LOCAL_INCLUDES += [
diff --git a/dom/media/driftcontrol/AudioDriftCorrection.cpp b/dom/media/driftcontrol/AudioDriftCorrection.cpp
index e66c435c36..1b86a99a44 100644
--- a/dom/media/driftcontrol/AudioDriftCorrection.cpp
+++ b/dom/media/driftcontrol/AudioDriftCorrection.cpp
@@ -35,8 +35,8 @@ AudioDriftCorrection::AudioDriftCorrection(
: mTargetRate(aTargetRate),
mDriftController(MakeUnique<DriftController>(aSourceRate, aTargetRate,
mDesiredBuffering)),
- mResampler(MakeUnique<AudioResampler>(
- aSourceRate, aTargetRate, mDesiredBuffering, aPrincipalHandle)) {}
+ mResampler(MakeUnique<AudioResampler>(aSourceRate, aTargetRate, 0,
+ aPrincipalHandle)) {}
AudioDriftCorrection::~AudioDriftCorrection() = default;
@@ -94,7 +94,7 @@ AudioSegment AudioDriftCorrection::RequestFrames(const AudioSegment& aInput,
mDriftController->UpdateClock(inputDuration, outputDuration,
CurrentBuffering(), BufferSize());
// Update resampler's rate if there is a new correction.
- mResampler->UpdateOutRate(mDriftController->GetCorrectedTargetRate());
+ mResampler->UpdateInRate(mDriftController->GetCorrectedSourceRate());
if (hasUnderrun) {
if (!mIsHandlingUnderrun) {
NS_WARNING("Drift-correction: Underrun");
@@ -171,7 +171,8 @@ void AudioDriftCorrection::SetDesiredBuffering(
media::TimeUnit aDesiredBuffering) {
mDesiredBuffering = aDesiredBuffering;
mDriftController->SetDesiredBuffering(mDesiredBuffering);
- mResampler->SetPreBufferDuration(mDesiredBuffering);
+ mResampler->SetInputPreBufferFrameCount(
+ mDesiredBuffering.ToTicksAtRate(mDriftController->mSourceRate));
}
} // namespace mozilla
diff --git a/dom/media/driftcontrol/AudioResampler.cpp b/dom/media/driftcontrol/AudioResampler.cpp
index ecef033a5c..1402fae39e 100644
--- a/dom/media/driftcontrol/AudioResampler.cpp
+++ b/dom/media/driftcontrol/AudioResampler.cpp
@@ -5,12 +5,14 @@
#include "AudioResampler.h"
+#include "TimeUnits.h"
+
namespace mozilla {
AudioResampler::AudioResampler(uint32_t aInRate, uint32_t aOutRate,
- media::TimeUnit aPreBufferDuration,
+ uint32_t aInputPreBufferFrameCount,
const PrincipalHandle& aPrincipalHandle)
- : mResampler(aInRate, aOutRate, aPreBufferDuration),
+ : mResampler(aInRate, aOutRate, aInputPreBufferFrameCount),
mOutputChunks(aOutRate / 10, STEREO, aPrincipalHandle) {}
void AudioResampler::AppendInput(const AudioSegment& aInSegment) {
@@ -59,11 +61,11 @@ AudioSegment AudioResampler::Resample(uint32_t aOutFrames, bool* aHasUnderrun) {
return segment;
}
- media::TimeUnit outDuration(aOutFrames, mResampler.GetOutRate());
+ media::TimeUnit outDuration(aOutFrames, mResampler.mOutRate);
mResampler.EnsurePreBuffer(outDuration);
const media::TimeUnit chunkCapacity(mOutputChunks.ChunkCapacity(),
- mResampler.GetOutRate());
+ mResampler.mOutRate);
while (!outDuration.IsZero()) {
MOZ_ASSERT(outDuration.IsPositive());
@@ -71,8 +73,7 @@ AudioSegment AudioResampler::Resample(uint32_t aOutFrames, bool* aHasUnderrun) {
const media::TimeUnit chunkDuration = std::min(outDuration, chunkCapacity);
outDuration -= chunkDuration;
- const uint32_t outFrames =
- chunkDuration.ToTicksAtRate(mResampler.GetOutRate());
+ const uint32_t outFrames = chunkDuration.ToTicksAtRate(mResampler.mOutRate);
for (uint32_t i = 0; i < chunk.ChannelCount(); ++i) {
if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
*aHasUnderrun |= mResampler.Resample(
@@ -92,8 +93,8 @@ AudioSegment AudioResampler::Resample(uint32_t aOutFrames, bool* aHasUnderrun) {
return segment;
}
-void AudioResampler::Update(uint32_t aOutRate, uint32_t aChannels) {
- mResampler.UpdateResampler(aOutRate, aChannels);
+void AudioResampler::Update(uint32_t aInRate, uint32_t aChannels) {
+ mResampler.UpdateResampler(aInRate, aChannels);
mOutputChunks.Update(aChannels);
}
diff --git a/dom/media/driftcontrol/AudioResampler.h b/dom/media/driftcontrol/AudioResampler.h
index 20e4f1051b..c5982c3a39 100644
--- a/dom/media/driftcontrol/AudioResampler.h
+++ b/dom/media/driftcontrol/AudioResampler.h
@@ -9,12 +9,11 @@
#include "AudioChunkList.h"
#include "AudioSegment.h"
#include "DynamicResampler.h"
-#include "TimeUnits.h"
namespace mozilla {
/**
- * Audio Resampler is a resampler able to change the output rate and channels
+ * Audio Resampler is a resampler able to change the input rate and channels
* count on the fly. The API is simple and it is based in AudioSegment in order
* to be used MTG. Memory allocations, for input and output buffers, will happen
* in the constructor, when channel count changes and if the amount of input
@@ -36,7 +35,7 @@ namespace mozilla {
class AudioResampler final {
public:
AudioResampler(uint32_t aInRate, uint32_t aOutRate,
- media::TimeUnit aPreBufferDuration,
+ uint32_t aInputPreBufferFrameCount,
const PrincipalHandle& aPrincipalHandle);
/**
@@ -69,24 +68,24 @@ class AudioResampler final {
AudioSegment Resample(uint32_t aOutFrames, bool* aHasUnderrun);
/*
- * Updates the output rate that will be used by the resampler.
+ * Updates the input rate that will be used by the resampler.
*/
- void UpdateOutRate(uint32_t aOutRate) {
- Update(aOutRate, mResampler.GetChannels());
+ void UpdateInRate(uint32_t aInRate) {
+ Update(aInRate, mResampler.GetChannels());
}
/**
- * Set the duration that should be used for pre-buffering.
+ * Set the number of frames that should be used for input pre-buffering.
*/
- void SetPreBufferDuration(media::TimeUnit aPreBufferDuration) {
- mResampler.SetPreBufferDuration(aPreBufferDuration);
+ void SetInputPreBufferFrameCount(uint32_t aInputPreBufferFrameCount) {
+ mResampler.SetInputPreBufferFrameCount(aInputPreBufferFrameCount);
}
private:
void UpdateChannels(uint32_t aChannels) {
- Update(mResampler.GetOutRate(), aChannels);
+ Update(mResampler.GetInRate(), aChannels);
}
- void Update(uint32_t aOutRate, uint32_t aChannels);
+ void Update(uint32_t aInRate, uint32_t aChannels);
private:
DynamicResampler mResampler;
diff --git a/dom/media/driftcontrol/DriftController.cpp b/dom/media/driftcontrol/DriftController.cpp
index b5603f72bb..791bfe9614 100644
--- a/dom/media/driftcontrol/DriftController.cpp
+++ b/dom/media/driftcontrol/DriftController.cpp
@@ -50,7 +50,7 @@ DriftController::DriftController(uint32_t aSourceRate, uint32_t aTargetRate,
mSourceRate(aSourceRate),
mTargetRate(aTargetRate),
mDesiredBuffering(aDesiredBuffering),
- mCorrectedTargetRate(static_cast<float>(aTargetRate)),
+ mCorrectedSourceRate(static_cast<float>(aSourceRate)),
mMeasuredSourceLatency(5),
mMeasuredTargetLatency(5) {
LOG_CONTROLLER(
@@ -76,8 +76,8 @@ void DriftController::ResetAfterUnderrun() {
mTargetClock = mAdjustmentInterval;
}
-uint32_t DriftController::GetCorrectedTargetRate() const {
- return std::lround(mCorrectedTargetRate);
+uint32_t DriftController::GetCorrectedSourceRate() const {
+ return std::lround(mCorrectedSourceRate);
}
void DriftController::UpdateClock(media::TimeUnit aSourceDuration,
@@ -112,14 +112,16 @@ void DriftController::CalculateCorrection(uint32_t aBufferedFrames,
static constexpr float kDerivativeGain = 0.12;
// Maximum 0.1% change per update.
- const float cap = static_cast<float>(mTargetRate) / 1000.0f;
+ const float cap = static_cast<float>(mSourceRate) / 1000.0f;
// The integral term can make us grow far outside the cap. Impose a cap on
// it individually that is roughly equivalent to the final cap.
const float integralCap = cap / kIntegralGain;
- int32_t error = CheckedInt32(mDesiredBuffering.ToTicksAtRate(mSourceRate) -
- aBufferedFrames)
+ // Use nominal (not corrected) source rate when interpreting desired
+ // buffering so that the set point is independent of the control value.
+ int32_t error = CheckedInt32(aBufferedFrames -
+ mDesiredBuffering.ToTicksAtRate(mSourceRate))
.value();
int32_t proportional = error;
// targetClockSec is the number of target clock seconds since last
@@ -135,12 +137,12 @@ void DriftController::CalculateCorrection(uint32_t aBufferedFrames,
kIntegralGain * mIntegral +
kDerivativeGain * derivative;
float correctedRate =
- std::clamp(static_cast<float>(mTargetRate) + controlSignal,
- mCorrectedTargetRate - cap, mCorrectedTargetRate + cap);
+ std::clamp(static_cast<float>(mSourceRate) + controlSignal,
+ mCorrectedSourceRate - cap, mCorrectedSourceRate + cap);
// mDesiredBuffering is divided by this to calculate the amount of
// hysteresis to apply. With a denominator of 5, an error within +/- 20% of
- // the desired buffering will not make corrections to the target sample
+ // the desired buffering will not make corrections to the source sample
// rate.
static constexpr uint32_t kHysteresisDenominator = 5; // +/- 20%
@@ -183,7 +185,7 @@ void DriftController::CalculateCorrection(uint32_t aBufferedFrames,
return correctedRate;
}
- return mCorrectedTargetRate;
+ return mCorrectedSourceRate;
}();
if (mDurationWithinHysteresis > mIntegralCapTimeLimit) {
@@ -201,10 +203,10 @@ void DriftController::CalculateCorrection(uint32_t aBufferedFrames,
LOG_CONTROLLER(
LogLevel::Verbose, this,
"Recalculating Correction: Nominal: %uHz->%uHz, Corrected: "
- "%uHz->%.2fHz (diff %.2fHz), error: %.2fms (hysteresisThreshold: "
+ "%.2fHz->%uHz (diff %.2fHz), error: %.2fms (hysteresisThreshold: "
"%.2fms), buffering: %.2fms, desired buffering: %.2fms",
- mSourceRate, mTargetRate, mSourceRate, hysteresisCorrectedRate,
- hysteresisCorrectedRate - mCorrectedTargetRate,
+ mSourceRate, mTargetRate, hysteresisCorrectedRate, mTargetRate,
+ hysteresisCorrectedRate - mCorrectedSourceRate,
media::TimeUnit(error, mSourceRate).ToSeconds() * 1000.0,
media::TimeUnit(hysteresisThreshold, mSourceRate).ToSeconds() * 1000.0,
media::TimeUnit(aBufferedFrames, mSourceRate).ToSeconds() * 1000.0,
@@ -219,13 +221,13 @@ void DriftController::CalculateCorrection(uint32_t aBufferedFrames,
kProportionalGain * proportional, kIntegralGain * mIntegral,
kDerivativeGain * derivative, controlSignal);
- if (std::lround(mCorrectedTargetRate) !=
+ if (std::lround(mCorrectedSourceRate) !=
std::lround(hysteresisCorrectedRate)) {
++mNumCorrectionChanges;
}
mPreviousError = error;
- mCorrectedTargetRate = hysteresisCorrectedRate;
+ mCorrectedSourceRate = std::max(1.f, hysteresisCorrectedRate);
// Reset the counters to prepare for the next period.
mTargetClock = media::TimeUnit::Zero();
diff --git a/dom/media/driftcontrol/DriftController.h b/dom/media/driftcontrol/DriftController.h
index 0bd745c737..e8dbc57e0e 100644
--- a/dom/media/driftcontrol/DriftController.h
+++ b/dom/media/driftcontrol/DriftController.h
@@ -22,8 +22,8 @@ namespace mozilla {
* the calculations.
*
* The DriftController looks at how the current buffering level differs from the
- * desired buffering level and sets a corrected target rate. A resampler should
- * be configured to resample from the nominal source rate to the corrected
+ * desired buffering level and sets a corrected source rate. A resampler should
+ * be configured to resample from the corrected source rate to the nominal
* target rate. It assumes that the resampler is initially configured to
* resample from the nominal source rate to the nominal target rate.
*
@@ -53,12 +53,12 @@ class DriftController final {
void ResetAfterUnderrun();
/**
- * Returns the drift-corrected target rate.
+ * Returns the drift-corrected source rate.
*/
- uint32_t GetCorrectedTargetRate() const;
+ uint32_t GetCorrectedSourceRate() const;
/**
- * The number of times mCorrectedTargetRate has been changed to adjust to
+ * The number of times mCorrectedSourceRate has been changed to adjust to
* drift.
*/
uint32_t NumCorrectionChanges() const { return mNumCorrectionChanges; }
@@ -102,9 +102,12 @@ class DriftController final {
// This implements a simple PID controller with feedback.
// Set point: SP = mDesiredBuffering.
// Process value: PV(t) = aBufferedFrames. This is the feedback.
- // Error: e(t) = mDesiredBuffering - aBufferedFrames.
- // Control value: CV(t) = the number to add to the nominal target rate, i.e.
- // the corrected target rate = CV(t) + nominal target rate.
+ // Error: e(t) = aBufferedFrames - mDesiredBuffering.
+ // Error is positive when the process value is high, which is
+ // the opposite of conventional PID controllers because this
+ // is a reverse-acting system.
+ // Control value: CV(t) = the value to add to the nominal source rate, i.e.
+ // the corrected source rate = nominal source rate + CV(t).
//
// Controller:
// Proportional part: The error, p(t) = e(t), multiplied by a gain factor, Kp.
@@ -115,13 +118,13 @@ class DriftController final {
// Control signal: The sum of the parts' output,
// u(t) = Kp*p(t) + Ki*i(t) + Kd*d(t).
//
- // Control action: Converting the control signal to a target sample rate.
+ // Control action: Converting the control signal to a source sample rate.
// Simplified, a positive control signal means the buffer is
- // lower than desired (because the error is positive), so the
- // target sample rate must be increased in order to consume
- // input data slower. We calculate the corrected target rate
- // by simply adding the control signal, u(t), to the nominal
- // target rate.
+ // higher than desired (because the error is positive),
+ // so the source sample rate must be increased in order to
+ // consume input data faster.
+ // We calculate the corrected source rate by simply adding
+ // the control signal, u(t), to the nominal source rate.
//
// Hysteresis: As long as the error is within a threshold of 20% of the set
// point (desired buffering level) (up to 10ms for >50ms desired
@@ -144,7 +147,7 @@ class DriftController final {
int32_t mPreviousError = 0;
float mIntegral = 0.0;
Maybe<float> mIntegralCenterForCap;
- float mCorrectedTargetRate;
+ float mCorrectedSourceRate;
Maybe<int32_t> mLastHysteresisBoundaryCorrection;
media::TimeUnit mDurationWithinHysteresis;
uint32_t mNumCorrectionChanges = 0;
diff --git a/dom/media/driftcontrol/DynamicResampler.cpp b/dom/media/driftcontrol/DynamicResampler.cpp
index e6f230278e..65a2ae3b57 100644
--- a/dom/media/driftcontrol/DynamicResampler.cpp
+++ b/dom/media/driftcontrol/DynamicResampler.cpp
@@ -8,14 +8,13 @@
namespace mozilla {
DynamicResampler::DynamicResampler(uint32_t aInRate, uint32_t aOutRate,
- media::TimeUnit aPreBufferDuration)
- : mInRate(aInRate),
- mPreBufferDuration(aPreBufferDuration),
- mOutRate(aOutRate) {
+ uint32_t aInputPreBufferFrameCount)
+ : mOutRate(aOutRate),
+ mInputPreBufferFrameCount(aInputPreBufferFrameCount),
+ mInRate(aInRate) {
MOZ_ASSERT(aInRate);
MOZ_ASSERT(aOutRate);
- MOZ_ASSERT(aPreBufferDuration.IsPositiveOrZero());
- UpdateResampler(mOutRate, STEREO);
+ UpdateResampler(mInRate, STEREO);
mInputStreamFile.Open("DynamicResamplerInFirstChannel", 1, mInRate);
mOutputStreamFile.Open("DynamicResamplerOutFirstChannel", 1, mOutRate);
}
@@ -35,7 +34,10 @@ void DynamicResampler::SetSampleFormat(AudioSampleFormat aFormat) {
b.SetSampleFormat(mSampleFormat);
}
- EnsureInputBufferDuration(CalculateInputBufferDuration());
+ // Pre-allocate something big.
+ // EnsureInputBufferDuration() adds 50ms for jitter to this first allocation
+ // so the 50ms argument means at least 100ms.
+ EnsureInputBufferSizeInFrames(mInRate / 20);
}
void DynamicResampler::EnsurePreBuffer(media::TimeUnit aDuration) {
@@ -43,33 +45,36 @@ void DynamicResampler::EnsurePreBuffer(media::TimeUnit aDuration) {
return;
}
- media::TimeUnit buffered(mInternalInBuffer[0].AvailableRead(), mInRate);
- if (buffered.IsZero()) {
+ uint32_t buffered = mInternalInBuffer[0].AvailableRead();
+ if (buffered == 0) {
// Wait for the first input segment before deciding how much to pre-buffer.
// If it is large it indicates high-latency, and the buffer would have to
- // handle that.
+ // handle that. This also means that the pre-buffer is not set up just
+ // before a large input segment would extend the buffering beyond the
+ // desired level.
return;
}
mIsPreBufferSet = true;
- media::TimeUnit needed = aDuration + mPreBufferDuration;
- EnsureInputBufferDuration(needed);
+ uint32_t needed =
+ aDuration.ToTicksAtRate(mInRate) + mInputPreBufferFrameCount;
+ EnsureInputBufferSizeInFrames(needed);
if (needed > buffered) {
for (auto& b : mInternalInBuffer) {
- b.PrependSilence((needed - buffered).ToTicksAtRate(mInRate));
+ b.PrependSilence(needed - buffered);
}
} else if (needed < buffered) {
for (auto& b : mInternalInBuffer) {
- b.Discard((buffered - needed).ToTicksAtRate(mInRate));
+ b.Discard(buffered - needed);
}
}
}
-void DynamicResampler::SetPreBufferDuration(media::TimeUnit aDuration) {
- MOZ_ASSERT(aDuration.IsPositive());
- mPreBufferDuration = aDuration;
+void DynamicResampler::SetInputPreBufferFrameCount(
+ uint32_t aInputPreBufferFrameCount) {
+ mInputPreBufferFrameCount = aInputPreBufferFrameCount;
}
bool DynamicResampler::Resample(float* aOutBuffer, uint32_t aOutFrames,
@@ -93,7 +98,6 @@ void DynamicResampler::ResampleInternal(const float* aInBuffer,
MOZ_ASSERT(mInRate);
MOZ_ASSERT(mOutRate);
- MOZ_ASSERT(aInBuffer);
MOZ_ASSERT(aInFrames);
MOZ_ASSERT(*aInFrames > 0);
MOZ_ASSERT(aOutBuffer);
@@ -125,7 +129,6 @@ void DynamicResampler::ResampleInternal(const int16_t* aInBuffer,
MOZ_ASSERT(mInRate);
MOZ_ASSERT(mOutRate);
- MOZ_ASSERT(aInBuffer);
MOZ_ASSERT(aInFrames);
MOZ_ASSERT(*aInFrames > 0);
MOZ_ASSERT(aOutBuffer);
@@ -147,19 +150,20 @@ void DynamicResampler::ResampleInternal(const int16_t* aInBuffer,
}
}
-void DynamicResampler::UpdateResampler(uint32_t aOutRate, uint32_t aChannels) {
- MOZ_ASSERT(aOutRate);
+void DynamicResampler::UpdateResampler(uint32_t aInRate, uint32_t aChannels) {
+ MOZ_ASSERT(aInRate);
MOZ_ASSERT(aChannels);
if (mChannels != aChannels) {
+ uint32_t bufferSizeInFrames = InFramesBufferSize();
if (mResampler) {
speex_resampler_destroy(mResampler);
}
- mResampler = speex_resampler_init(aChannels, mInRate, aOutRate,
+ mResampler = speex_resampler_init(aChannels, aInRate, mOutRate,
SPEEX_RESAMPLER_QUALITY_MIN, nullptr);
MOZ_ASSERT(mResampler);
mChannels = aChannels;
- mOutRate = aOutRate;
+ mInRate = aInRate;
// Between mono and stereo changes, keep always allocated 2 channels to
// avoid reallocations in the most common case.
if ((mChannels == STEREO || mChannels == 1) &&
@@ -192,14 +196,12 @@ void DynamicResampler::UpdateResampler(uint32_t aOutRate, uint32_t aChannels) {
b->SetSampleFormat(mSampleFormat);
}
}
- media::TimeUnit d = mSetBufferDuration;
- mSetBufferDuration = media::TimeUnit::Zero();
- EnsureInputBufferDuration(d);
+ EnsureInputBufferSizeInFrames(bufferSizeInFrames);
mInputTail.SetLength(mChannels);
return;
}
- if (mOutRate != aOutRate) {
+ if (mInRate != aInRate) {
// If the rates was the same the resampler was not being used so warm up.
if (mOutRate == mInRate) {
WarmUpResampler(true);
@@ -208,9 +210,9 @@ void DynamicResampler::UpdateResampler(uint32_t aOutRate, uint32_t aChannels) {
#ifdef DEBUG
int rv =
#endif
- speex_resampler_set_rate(mResampler, mInRate, aOutRate);
+ speex_resampler_set_rate(mResampler, aInRate, mOutRate);
MOZ_ASSERT(rv == RESAMPLER_ERR_SUCCESS);
- mOutRate = aOutRate;
+ mInRate = aInRate;
}
}
@@ -236,13 +238,9 @@ void DynamicResampler::WarmUpResampler(bool aSkipLatency) {
}
}
if (aSkipLatency) {
- int inputLatency = speex_resampler_get_input_latency(mResampler);
- MOZ_ASSERT(inputLatency > 0);
- uint32_t ratioNum, ratioDen;
- speex_resampler_get_ratio(mResampler, &ratioNum, &ratioDen);
- // Ratio at this point is one so only skip the input latency. No special
- // calculations are needed.
- speex_resampler_set_skip_frac_num(mResampler, inputLatency * ratioDen);
+ // Don't generate output frames corresponding to times before the next
+ // input sample.
+ speex_resampler_skip_zeros(mResampler);
}
mIsWarmingUp = false;
}
@@ -268,7 +266,16 @@ void DynamicResampler::AppendInputSilence(const uint32_t aInFrames) {
}
uint32_t DynamicResampler::InFramesBufferSize() const {
- return mSetBufferDuration.ToTicksAtRate(mInRate);
+ if (mSampleFormat == AUDIO_FORMAT_SILENCE) {
+ return 0;
+ }
+ // Buffers may have different capacities if a memory allocation has failed.
+ MOZ_ASSERT(!mInternalInBuffer.IsEmpty());
+ uint32_t min = std::numeric_limits<uint32_t>::max();
+ for (const auto& b : mInternalInBuffer) {
+ min = std::min(min, b.Capacity());
+ }
+ return min;
}
uint32_t DynamicResampler::InFramesBuffered(uint32_t aChannelIndex) const {
@@ -276,7 +283,7 @@ uint32_t DynamicResampler::InFramesBuffered(uint32_t aChannelIndex) const {
MOZ_ASSERT(aChannelIndex <= mChannels);
MOZ_ASSERT(aChannelIndex <= mInternalInBuffer.Length());
if (!mIsPreBufferSet) {
- return mPreBufferDuration.ToTicksAtRate(mInRate);
+ return mInputPreBufferFrameCount;
}
return mInternalInBuffer[aChannelIndex].AvailableRead();
}
diff --git a/dom/media/driftcontrol/DynamicResampler.h b/dom/media/driftcontrol/DynamicResampler.h
index c1b9000aa0..1f601c898b 100644
--- a/dom/media/driftcontrol/DynamicResampler.h
+++ b/dom/media/driftcontrol/DynamicResampler.h
@@ -27,15 +27,13 @@ const uint32_t STEREO = 2;
* to allow the requested to be resampled and returned.
*
* Input data buffering makes use of the AudioRingBuffer. The capacity of the
- * buffer is initially 100ms of float audio and it is pre-allocated at the
- * constructor. Should the input data grow beyond that, the input buffer is
- * re-allocated on the fly. In addition to that, due to special feature of
+ * buffer is initially 100ms of audio and it is pre-allocated during
+ * SetSampleFormat(). Should the input data grow beyond that, the input buffer
+ * is re-allocated on the fly. In addition to that, due to special feature of
* AudioRingBuffer, no extra copies take place when the input data is fed to the
* resampler.
*
- * The sample format must be set before using any method. If the provided sample
- * format is of type short the pre-allocated capacity of the input buffer
- * becomes 200ms of short audio.
+ * The sample format must be set before using any method.
*
* The DynamicResampler is not thread-safe, so all the methods appart from the
* constructor must be called on the same thread.
@@ -47,16 +45,15 @@ class DynamicResampler final {
* The channel count will be set to stereo. Memory allocation will take
* place. The input buffer is non-interleaved.
*/
- DynamicResampler(
- uint32_t aInRate, uint32_t aOutRate,
- media::TimeUnit aPreBufferDuration = media::TimeUnit::Zero());
+ DynamicResampler(uint32_t aInRate, uint32_t aOutRate,
+ uint32_t aInputPreBufferFrameCount = 0);
~DynamicResampler();
/**
* Set the sample format type to float or short.
*/
void SetSampleFormat(AudioSampleFormat aFormat);
- uint32_t GetOutRate() const { return mOutRate; }
+ uint32_t GetInRate() const { return mInRate; }
uint32_t GetChannels() const { return mChannels; }
/**
@@ -81,16 +78,16 @@ class DynamicResampler final {
/**
* Prepends existing input data with a silent pre-buffer if not already done.
- * Data will be prepended so that after resampling aOutFrames worth of output
- * data, the buffering level will be as close as possible to
- * mPreBufferDuration, which is the desired buffering level.
+ * Data will be prepended so that after resampling aDuration of data,
+ * the buffering level will be as close as possible to
+ * mInputPreBufferFrameCount, which is the desired buffering level.
*/
void EnsurePreBuffer(media::TimeUnit aDuration);
/**
- * Set the duration that should be used for pre-buffering.
+ * Set the number of frames that should be used for input pre-buffering.
*/
- void SetPreBufferDuration(media::TimeUnit aDuration);
+ void SetInputPreBufferFrameCount(uint32_t aInputPreBufferFrameCount);
/*
* Resample as much frames as needed from the internal input buffer to the
@@ -114,14 +111,14 @@ class DynamicResampler final {
/**
* Update the output rate or/and the channel count. If a value is not updated
- * compared to the current one nothing happens. Changing the `aOutRate`
+ * compared to the current one nothing happens. Changing the `aInRate`
* results in recalculation in the resampler. Changing `aChannels` results in
* the reallocation of the internal input buffer with the exception of
* changes between mono to stereo and vice versa where no reallocation takes
* place. A stereo internal input buffer is always maintained even if the
* sound is mono.
*/
- void UpdateResampler(uint32_t aOutRate, uint32_t aChannels);
+ void UpdateResampler(uint32_t aInRate, uint32_t aChannels);
private:
template <typename T>
@@ -174,24 +171,24 @@ class DynamicResampler final {
}
uint32_t totalOutFramesNeeded = aOutFrames;
- auto resample = [&] {
- mInternalInBuffer[aChannelIndex].ReadNoCopy(
- [&](const Span<const T>& aInBuffer) -> uint32_t {
- if (!totalOutFramesNeeded) {
- return 0;
- }
- uint32_t outFramesResampled = totalOutFramesNeeded;
- uint32_t inFrames = aInBuffer.Length();
- ResampleInternal(aInBuffer.data(), &inFrames, aOutBuffer,
- &outFramesResampled, aChannelIndex);
- aOutBuffer += outFramesResampled;
- totalOutFramesNeeded -= outFramesResampled;
- mInputTail[aChannelIndex].StoreTail<T>(aInBuffer.To(inFrames));
- return inFrames;
- });
+ auto resample = [&](const T* aInBuffer, uint32_t aInLength) -> uint32_t {
+ uint32_t outFramesResampled = totalOutFramesNeeded;
+ uint32_t inFrames = aInLength;
+ ResampleInternal(aInBuffer, &inFrames, aOutBuffer, &outFramesResampled,
+ aChannelIndex);
+ aOutBuffer += outFramesResampled;
+ totalOutFramesNeeded -= outFramesResampled;
+ mInputTail[aChannelIndex].StoreTail<T>(aInBuffer, inFrames);
+ return inFrames;
};
- resample();
+ mInternalInBuffer[aChannelIndex].ReadNoCopy(
+ [&](const Span<const T>& aInBuffer) -> uint32_t {
+ if (!totalOutFramesNeeded) {
+ return 0;
+ }
+ return resample(aInBuffer.Elements(), aInBuffer.Length());
+ });
if (totalOutFramesNeeded == 0) {
return false;
@@ -204,8 +201,7 @@ class DynamicResampler final {
((CheckedUint32(totalOutFramesNeeded) * mInRate + mOutRate - 1) /
mOutRate)
.value();
- mInternalInBuffer[aChannelIndex].WriteSilence(totalInFramesNeeded);
- resample();
+ resample(nullptr, totalInFramesNeeded);
}
mIsPreBufferSet = false;
return true;
@@ -219,33 +215,14 @@ class DynamicResampler final {
MOZ_ASSERT(mChannels);
MOZ_ASSERT(aChannelIndex < mChannels);
MOZ_ASSERT(aChannelIndex < mInternalInBuffer.Length());
- EnsureInputBufferDuration(media::TimeUnit(
- CheckedInt64(mInternalInBuffer[aChannelIndex].AvailableRead()) +
- aInFrames,
- mInRate));
+ EnsureInputBufferSizeInFrames(
+ mInternalInBuffer[aChannelIndex].AvailableRead() + aInFrames);
mInternalInBuffer[aChannelIndex].Write(Span(aInBuffer, aInFrames));
}
void WarmUpResampler(bool aSkipLatency);
- media::TimeUnit CalculateInputBufferDuration() const {
- // Pre-allocate something big, twice the pre-buffer, or at least 100ms.
- return std::max(mPreBufferDuration * 2, media::TimeUnit::FromSeconds(0.1));
- }
-
- bool EnsureInputBufferDuration(media::TimeUnit aDuration) {
- if (aDuration <= mSetBufferDuration) {
- // Buffer size is sufficient.
- return true;
- }
-
- // 5 second cap.
- const media::TimeUnit cap = media::TimeUnit::FromSeconds(5);
- if (mSetBufferDuration == cap) {
- // Already at the cap.
- return false;
- }
-
+ bool EnsureInputBufferSizeInFrames(uint32_t aSizeInFrames) {
uint32_t sampleSize = 0;
if (mSampleFormat == AUDIO_FORMAT_FLOAT32) {
sampleSize = sizeof(float);
@@ -258,53 +235,62 @@ class DynamicResampler final {
return true;
}
+ uint32_t sizeInFrames = InFramesBufferSize();
+ if (aSizeInFrames <= sizeInFrames) {
+ // Buffer size is sufficient.
+ return true; // no reallocation necessary
+ }
+
+ // 5 second cap.
+ const uint32_t cap = 5 * mInRate;
+ if (sizeInFrames >= cap) {
+ // Already at the cap.
+ return false;
+ }
+
// As a backoff strategy, at least double the previous size.
- media::TimeUnit duration = mSetBufferDuration * 2;
+ sizeInFrames *= 2;
- if (aDuration > duration) {
+ if (aSizeInFrames > sizeInFrames) {
// A larger buffer than the normal backoff strategy provides is needed, or
- // this is the first time setting the buffer size. Round up to the nearest
- // 100ms, some jitter is expected.
- duration = aDuration.ToBase<media::TimeUnit::CeilingPolicy>(10);
+ // this is the first time setting the buffer size. Add another 50ms, as
+ // some jitter is expected.
+ sizeInFrames = aSizeInFrames + mInRate / 20;
}
- duration = std::min(cap, duration);
+ // mInputPreBufferFrameCount is an indication of the desired average
+ // buffering. Provide for at least twice this.
+ sizeInFrames = std::max(sizeInFrames, mInputPreBufferFrameCount * 2);
+
+ sizeInFrames = std::min(cap, sizeInFrames);
bool success = true;
for (auto& b : mInternalInBuffer) {
- success = success &&
- b.SetLengthBytes(sampleSize * duration.ToTicksAtRate(mInRate));
+ success = success && b.EnsureLengthBytes(sampleSize * sizeInFrames);
}
if (success) {
// All buffers have the new size.
- mSetBufferDuration = duration;
return true;
}
- const uint32_t sizeInFrames =
- static_cast<uint32_t>(mSetBufferDuration.ToTicksAtRate(mInRate));
// Allocating an input buffer failed. We stick with the old buffer size.
NS_WARNING(nsPrintfCString("Failed to allocate a buffer of %u bytes (%u "
"frames). Expect glitches.",
sampleSize * sizeInFrames, sizeInFrames)
.get());
- for (auto& b : mInternalInBuffer) {
- MOZ_ALWAYS_TRUE(b.SetLengthBytes(sampleSize * sizeInFrames));
- }
return false;
}
public:
- const uint32_t mInRate;
+ const uint32_t mOutRate;
private:
bool mIsPreBufferSet = false;
bool mIsWarmingUp = false;
- media::TimeUnit mPreBufferDuration;
- media::TimeUnit mSetBufferDuration = media::TimeUnit::Zero();
+ uint32_t mInputPreBufferFrameCount;
uint32_t mChannels = 0;
- uint32_t mOutRate;
+ uint32_t mInRate;
AutoTArray<AudioRingBuffer, STEREO> mInternalInBuffer;
@@ -324,16 +310,16 @@ class DynamicResampler final {
}
template <typename T>
void StoreTail(const T* aInBuffer, uint32_t aInFrames) {
- if (aInFrames >= MAXSIZE) {
- PodCopy(Buffer<T>(), aInBuffer + aInFrames - MAXSIZE, MAXSIZE);
- mSize = MAXSIZE;
+ const T* inBuffer = aInBuffer;
+ mSize = std::min(aInFrames, MAXSIZE);
+ if (inBuffer) {
+ PodCopy(Buffer<T>(), inBuffer + aInFrames - mSize, mSize);
} else {
- PodCopy(Buffer<T>(), aInBuffer, aInFrames);
- mSize = aInFrames;
+ std::fill_n(Buffer<T>(), mSize, static_cast<T>(0));
}
}
uint32_t Length() { return mSize; }
- static const uint32_t MAXSIZE = 20;
+ static constexpr uint32_t MAXSIZE = 20;
private:
float mBuffer[MAXSIZE] = {};
diff --git a/dom/media/driftcontrol/gtest/TestAudioDriftCorrection.cpp b/dom/media/driftcontrol/gtest/TestAudioDriftCorrection.cpp
index c13f443d37..9d3f0f091a 100644
--- a/dom/media/driftcontrol/gtest/TestAudioDriftCorrection.cpp
+++ b/dom/media/driftcontrol/gtest/TestAudioDriftCorrection.cpp
@@ -260,10 +260,10 @@ TEST(TestAudioDriftCorrection, LargerTransmitterBlockSizeThanDesiredBuffering)
// Input is stable so no corrections should occur.
EXPECT_EQ(ad.NumCorrectionChanges(), 0U);
- // The drift correction buffer size had to be larger than the desired (the
- // buffer size is twice the initial buffering level), to accomodate the large
- // input block size.
- EXPECT_EQ(ad.BufferSize(), 9600U);
+ // The desired buffering and pre-buffering level was
+ // transmitterBlockSize * 11 / 10 to accomodate the large input block size.
+ // The buffer size was twice the pre-buffering level.
+ EXPECT_EQ(ad.BufferSize(), transmitterBlockSize * 11 / 10 * 2);
}
TEST(TestAudioDriftCorrection, LargerReceiverBlockSizeThanDesiredBuffering)
@@ -275,9 +275,9 @@ TEST(TestAudioDriftCorrection, LargerReceiverBlockSizeThanDesiredBuffering)
MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
AudioDriftCorrection ad(sampleRate, sampleRate, testPrincipal);
+ AudioSegment inSegment;
for (uint32_t i = 0; i < (sampleRate / 1000) * 500;
i += transmitterBlockSize) {
- AudioSegment inSegment;
AudioChunk chunk =
CreateAudioChunk<float>(transmitterBlockSize, 1, AUDIO_FORMAT_FLOAT32);
inSegment.AppendAndConsumeChunk(std::move(chunk));
@@ -285,6 +285,7 @@ TEST(TestAudioDriftCorrection, LargerReceiverBlockSizeThanDesiredBuffering)
if (i % receiverBlockSize == 0) {
AudioSegment outSegment = ad.RequestFrames(inSegment, receiverBlockSize);
EXPECT_EQ(outSegment.GetDuration(), receiverBlockSize);
+ inSegment.Clear();
}
if (i >= receiverBlockSize) {
@@ -294,11 +295,12 @@ TEST(TestAudioDriftCorrection, LargerReceiverBlockSizeThanDesiredBuffering)
// Input is stable so no corrections should occur.
EXPECT_EQ(ad.NumCorrectionChanges(), 0U);
+ EXPECT_EQ(ad.NumUnderruns(), 0U);
// The drift correction buffer size had to be larger than the desired (the
// buffer size is twice the initial buffering level), to accomodate the large
// input block size that gets buffered in the resampler only when processing
// output.
- EXPECT_EQ(ad.BufferSize(), 19200U);
+ EXPECT_EQ(ad.BufferSize(), 9600U);
}
TEST(TestAudioDriftCorrection, DynamicInputBufferSizeChanges)
@@ -329,9 +331,9 @@ TEST(TestAudioDriftCorrection, DynamicInputBufferSizeChanges)
if (((receivedFramesStart - transmittedFramesStart + i) /
aTransmitterBlockSize) > numBlocksTransmitted) {
tone.Generate(inSegment, aTransmitterBlockSize);
- MOZ_ASSERT(!inSegment.IsNull());
+ MOZ_RELEASE_ASSERT(!inSegment.IsNull());
inToneVerifier.AppendData(inSegment);
- MOZ_ASSERT(!inSegment.IsNull());
+ MOZ_RELEASE_ASSERT(!inSegment.IsNull());
++numBlocksTransmitted;
totalFramesTransmitted += aTransmitterBlockSize;
}
@@ -459,29 +461,50 @@ TEST(TestAudioDriftCorrection, DriftStepResponseUnderrunHighLatencyInput)
constexpr uint32_t iterations = 200;
const PrincipalHandle testPrincipal =
MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
- uint32_t inputRate = nominalRate * 1005 / 1000; // 0.5% drift
- uint32_t inputInterval = inputRate;
+ uint32_t inputRate1 = nominalRate * 1005 / 1000; // 0.5% drift
+ uint32_t inputInterval1 = inputRate1;
AudioGenerator<AudioDataValue> tone(1, nominalRate, 440);
AudioDriftCorrection ad(nominalRate, nominalRate, testPrincipal);
for (uint32_t i = 0; i < interval * iterations; i += interval / 100) {
AudioSegment inSegment;
if (i > 0 && i % interval == 0) {
- tone.Generate(inSegment, inputInterval);
+ tone.Generate(inSegment, inputInterval1);
}
ad.RequestFrames(inSegment, interval / 100);
}
- inputRate = nominalRate * 995 / 1000; // -0.5% drift
- inputInterval = inputRate;
+ uint32_t inputRate2 = nominalRate * 995 / 1000; // -0.5% drift
+ uint32_t inputInterval2 = inputRate2;
for (uint32_t i = 0; i < interval * iterations; i += interval / 100) {
AudioSegment inSegment;
+ // The first segment is skipped to cause an underrun.
if (i > 0 && i % interval == 0) {
- tone.Generate(inSegment, inputInterval);
+ tone.Generate(inSegment, inputInterval2);
}
ad.RequestFrames(inSegment, interval / 100);
+ if (i >= interval / 10 && i < interval) {
+ // While the DynamicResampler has not set its pre-buffer after the
+ // underrun, InFramesBuffered() reports the pre-buffer size.
+ // The initial desired buffer and pre-buffer size was
+ // inputInterval1 * 11 / 10 to accomodate the large input block size.
+ // This was doubled when the underrun occurred.
+ EXPECT_EQ(ad.CurrentBuffering(), inputInterval1 * 11 / 10 * 2)
+ << "for i=" << i;
+ } else if (i == interval) {
+ // After the pre-buffer was set and used to generate the first output
+ // block, the actual number of frames buffered almost matches the
+ // pre-buffer size, with some rounding from output to input frame count
+ // conversion.
+ EXPECT_EQ(ad.CurrentBuffering(), inputInterval1 * 11 / 10 * 2 - 1)
+ << "after first input after underrun";
+ }
}
- EXPECT_EQ(ad.BufferSize(), 220800U);
+ // The initial desired buffering and pre-buffering level was
+ // inputInterval1 * 11 / 10 to accomodate the large input block size.
+ // The buffer size was initially twice the pre-buffering level, and then
+ // doubled when the underrun occurred.
+ EXPECT_EQ(ad.BufferSize(), inputInterval1 * 11 / 10 * 2 * 2);
EXPECT_EQ(ad.NumUnderruns(), 1u);
}
@@ -511,7 +534,7 @@ TEST(TestAudioDriftCorrection, DriftStepResponseOverrun)
ad.RequestFrames(inSegment, interval / 100);
}
- // Change input callbacks to 2000ms (+0.5% drift) = 48200 frames, which will
+ // Change input callbacks to 1000ms (+0.5% drift) = 48200 frames, which will
// overrun the ring buffer.
for (uint32_t i = 0; i < interval * iterations; i += interval / 100) {
AudioSegment inSegment;
@@ -524,6 +547,9 @@ TEST(TestAudioDriftCorrection, DriftStepResponseOverrun)
ad.RequestFrames(inSegment, interval / 100);
}
- EXPECT_EQ(ad.BufferSize(), 105600U);
+ // The desired buffering and pre-buffering levels were increased to
+ // inputInterval * 11 / 10 to accomodate the large input block size.
+ // The buffer size was increased to twice the pre-buffering level.
+ EXPECT_EQ(ad.BufferSize(), inputInterval * 11 / 10 * 2);
EXPECT_EQ(ad.NumUnderruns(), 1u);
}
diff --git a/dom/media/driftcontrol/gtest/TestAudioResampler.cpp b/dom/media/driftcontrol/gtest/TestAudioResampler.cpp
index f04bc87314..7122b60a1a 100644
--- a/dom/media/driftcontrol/gtest/TestAudioResampler.cpp
+++ b/dom/media/driftcontrol/gtest/TestAudioResampler.cpp
@@ -64,8 +64,7 @@ TEST(TestAudioResampler, OutAudioSegment_Float)
uint32_t pre_buffer = 21;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, testPrincipal);
AudioSegment inSegment =
CreateAudioSegment<float>(in_frames, channels, AUDIO_FORMAT_FLOAT32);
@@ -91,9 +90,9 @@ TEST(TestAudioResampler, OutAudioSegment_Float)
}
}
- // Update out rate
- out_rate = 44100;
- dr.UpdateOutRate(out_rate);
+ // Update in rate
+ in_rate = 26122;
+ dr.UpdateInRate(in_rate);
out_frames = in_frames * out_rate / in_rate;
EXPECT_EQ(out_frames, 18u);
// Even if we provide no input if we have enough buffered input, we can create
@@ -121,8 +120,7 @@ TEST(TestAudioResampler, OutAudioSegment_Short)
uint32_t pre_buffer = 21;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, testPrincipal);
AudioSegment inSegment =
CreateAudioSegment<short>(in_frames, channels, AUDIO_FORMAT_S16);
@@ -148,9 +146,9 @@ TEST(TestAudioResampler, OutAudioSegment_Short)
}
}
- // Update out rate
- out_rate = 44100;
- dr.UpdateOutRate(out_rate);
+ // Update in rate
+ in_rate = 26122;
+ dr.UpdateInRate(out_rate);
out_frames = in_frames * out_rate / in_rate;
EXPECT_EQ(out_frames, 18u);
// Even if we provide no input if we have enough buffered input, we can create
@@ -175,8 +173,7 @@ TEST(TestAudioResampler, OutAudioSegmentLargerThanResampledInput_Float)
uint32_t pre_buffer = 5;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- PRINCIPAL_HANDLE_NONE);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, PRINCIPAL_HANDLE_NONE);
AudioSegment inSegment =
CreateAudioSegment<float>(in_frames, channels, AUDIO_FORMAT_FLOAT32);
@@ -209,8 +206,7 @@ TEST(TestAudioResampler, InAudioSegment_Float)
uint32_t out_rate = 48000;
uint32_t pre_buffer = 10;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, testPrincipal);
AudioSegment inSegment;
@@ -275,8 +271,7 @@ TEST(TestAudioResampler, InAudioSegment_Short)
uint32_t out_rate = 48000;
uint32_t pre_buffer = 10;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, testPrincipal);
AudioSegment inSegment;
@@ -342,8 +337,7 @@ TEST(TestAudioResampler, ChannelChange_MonoToStereo)
uint32_t pre_buffer = 0;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, testPrincipal);
AudioChunk monoChunk =
CreateAudioChunk<float>(in_frames, 1, AUDIO_FORMAT_FLOAT32);
@@ -378,8 +372,7 @@ TEST(TestAudioResampler, ChannelChange_StereoToMono)
uint32_t pre_buffer = 0;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, testPrincipal);
AudioChunk monoChunk =
CreateAudioChunk<float>(in_frames, 1, AUDIO_FORMAT_FLOAT32);
@@ -414,8 +407,7 @@ TEST(TestAudioResampler, ChannelChange_StereoToQuad)
uint32_t pre_buffer = 0;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, pre_buffer, testPrincipal);
AudioChunk stereoChunk =
CreateAudioChunk<float>(in_frames, 2, AUDIO_FORMAT_FLOAT32);
@@ -452,7 +444,7 @@ TEST(TestAudioResampler, ChannelChange_QuadToStereo)
uint32_t in_rate = 24000;
uint32_t out_rate = 48000;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit::Zero(), testPrincipal);
+ AudioResampler dr(in_rate, out_rate, 0, testPrincipal);
AudioChunk stereoChunk =
CreateAudioChunk<float>(in_frames, 2, AUDIO_FORMAT_FLOAT32);
@@ -497,7 +489,7 @@ TEST(TestAudioResampler, ChannelChange_Discontinuity)
uint32_t in_frames = in_rate / 100;
uint32_t out_frames = out_rate / 100;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit::Zero(), testPrincipal);
+ AudioResampler dr(in_rate, out_rate, 0, testPrincipal);
AudioChunk monoChunk =
CreateAudioChunk<float>(in_frames, 1, AUDIO_FORMAT_FLOAT32);
@@ -560,8 +552,7 @@ TEST(TestAudioResampler, ChannelChange_Discontinuity2)
uint32_t in_frames = in_rate / 100;
uint32_t out_frames = out_rate / 100;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(10, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, 10, testPrincipal);
AudioChunk monoChunk =
CreateAudioChunk<float>(in_frames / 2, 1, AUDIO_FORMAT_FLOAT32);
@@ -630,8 +621,7 @@ TEST(TestAudioResampler, ChannelChange_Discontinuity3)
uint32_t in_frames = in_rate / 100;
uint32_t out_frames = out_rate / 100;
- AudioResampler dr(in_rate, out_rate, media::TimeUnit(10, in_rate),
- testPrincipal);
+ AudioResampler dr(in_rate, out_rate, 10, testPrincipal);
AudioChunk stereoChunk =
CreateAudioChunk<float>(in_frames, 2, AUDIO_FORMAT_FLOAT32);
@@ -660,9 +650,9 @@ TEST(TestAudioResampler, ChannelChange_Discontinuity3)
// The resampler here is updated due to the rate change. This is because the
// in and out rate was the same so a pass through logic was used. By updating
- // the out rate to something different than the in rate, the resampler will
+ // the in rate to something different than the out rate, the resampler will
// start being used and discontinuity will exist.
- dr.UpdateOutRate(out_rate + 400);
+ dr.UpdateInRate(in_rate - 400);
dr.AppendInput(inSegment);
AudioSegment s2 = dr.Resample(out_frames, &hasUnderrun);
EXPECT_FALSE(hasUnderrun);
diff --git a/dom/media/driftcontrol/gtest/TestDriftController.cpp b/dom/media/driftcontrol/gtest/TestDriftController.cpp
index 33486f945f..132577e44a 100644
--- a/dom/media/driftcontrol/gtest/TestDriftController.cpp
+++ b/dom/media/driftcontrol/gtest/TestDriftController.cpp
@@ -18,50 +18,50 @@ TEST(TestDriftController, Basic)
constexpr uint32_t bufferedHigh = 7 * 480;
DriftController c(48000, 48000, media::TimeUnit::FromSeconds(0.05));
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000U);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000U);
// The adjustment interval is 1s.
const auto oneSec = media::TimeUnit(48000, 48000);
c.UpdateClock(oneSec, oneSec, buffered, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, bufferedLow, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48048u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 47952u);
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 47952u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48048u);
}
TEST(TestDriftController, BasicResampler)
{
// The buffer level is the only input to the controller logic.
- constexpr uint32_t buffered = 5 * 240;
- constexpr uint32_t bufferedLow = 3 * 240;
- constexpr uint32_t bufferedHigh = 7 * 240;
+ constexpr uint32_t buffered = 5 * 480;
+ constexpr uint32_t bufferedLow = 3 * 480;
+ constexpr uint32_t bufferedHigh = 7 * 480;
- DriftController c(24000, 48000, media::TimeUnit::FromSeconds(0.05));
+ DriftController c(48000, 24000, media::TimeUnit::FromSeconds(0.05));
// The adjustment interval is 1s.
const auto oneSec = media::TimeUnit(48000, 48000);
c.UpdateClock(oneSec, oneSec, buffered, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
// low
c.UpdateClock(oneSec, oneSec, bufferedLow, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48048u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 47952u);
// high
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
// high
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 47964u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48048u);
}
TEST(TestDriftController, BufferedInput)
@@ -72,56 +72,56 @@ TEST(TestDriftController, BufferedInput)
constexpr uint32_t bufferedHigh = 7 * 480;
DriftController c(48000, 48000, media::TimeUnit::FromSeconds(0.05));
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
// The adjustment interval is 1s.
const auto oneSec = media::TimeUnit(48000, 48000);
c.UpdateClock(oneSec, oneSec, buffered, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
// 0 buffered when updating correction
c.UpdateClock(oneSec, oneSec, 0, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48048u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 47952u);
c.UpdateClock(oneSec, oneSec, bufferedLow, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, buffered, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 47952u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48048u);
}
TEST(TestDriftController, BufferedInputWithResampling)
{
// The buffer level is the only input to the controller logic.
- constexpr uint32_t buffered = 5 * 240;
- constexpr uint32_t bufferedLow = 3 * 240;
- constexpr uint32_t bufferedHigh = 7 * 240;
+ constexpr uint32_t buffered = 5 * 480;
+ constexpr uint32_t bufferedLow = 3 * 480;
+ constexpr uint32_t bufferedHigh = 7 * 480;
- DriftController c(24000, 48000, media::TimeUnit::FromSeconds(0.05));
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ DriftController c(48000, 24000, media::TimeUnit::FromSeconds(0.05));
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
// The adjustment interval is 1s.
const auto oneSec = media::TimeUnit(24000, 24000);
c.UpdateClock(oneSec, oneSec, buffered, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
// 0 buffered when updating correction
c.UpdateClock(oneSec, oneSec, 0, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48048u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 47952u);
c.UpdateClock(oneSec, oneSec, bufferedLow, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, buffered, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 47952u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48048u);
}
TEST(TestDriftController, SmallError)
@@ -132,21 +132,21 @@ TEST(TestDriftController, SmallError)
constexpr uint32_t bufferedHigh = buffered + 48;
DriftController c(48000, 48000, media::TimeUnit::FromSeconds(0.05));
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
// The adjustment interval is 1s.
const auto oneSec = media::TimeUnit(48000, 48000);
c.UpdateClock(oneSec, oneSec, buffered, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, bufferedLow, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
c.UpdateClock(oneSec, oneSec, bufferedHigh, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000u);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000u);
}
TEST(TestDriftController, SmallBufferedFrames)
@@ -158,11 +158,34 @@ TEST(TestDriftController, SmallBufferedFrames)
media::TimeUnit oneSec = media::TimeUnit::FromSeconds(1);
media::TimeUnit hundredMillis = oneSec / 10;
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000U);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000U);
for (uint32_t i = 0; i < 9; ++i) {
c.UpdateClock(hundredMillis, hundredMillis, bufferedLow, 0);
}
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48000U);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 48000U);
c.UpdateClock(hundredMillis, hundredMillis, bufferedLow, 0);
- EXPECT_EQ(c.GetCorrectedTargetRate(), 48048U);
+ EXPECT_EQ(c.GetCorrectedSourceRate(), 47952U);
+}
+
+TEST(TestDriftController, VerySmallBufferedFrames)
+{
+ // The buffer level is the only input to the controller logic.
+ uint32_t bufferedLow = 1;
+ uint32_t nominalRate = 48000;
+
+ DriftController c(nominalRate, nominalRate, media::TimeUnit::FromSeconds(1));
+ media::TimeUnit oneSec = media::TimeUnit::FromSeconds(1);
+ media::TimeUnit sourceDuration(1, nominalRate);
+
+ EXPECT_EQ(c.GetCorrectedSourceRate(), nominalRate);
+ uint32_t previousCorrected = nominalRate;
+ // Steps are limited to nominalRate/1000.
+ // Perform 1001 steps to check the corrected rate does not underflow zero.
+ for (uint32_t i = 0; i < 1001; ++i) {
+ c.UpdateClock(sourceDuration, oneSec, bufferedLow, 0);
+ uint32_t correctedRate = c.GetCorrectedSourceRate();
+ EXPECT_LE(correctedRate, previousCorrected) << "for i=" << i;
+ EXPECT_GT(correctedRate, 0u) << "for i=" << i;
+ previousCorrected = correctedRate;
+ }
}
diff --git a/dom/media/driftcontrol/gtest/TestDynamicResampler.cpp b/dom/media/driftcontrol/gtest/TestDynamicResampler.cpp
index fb8ac52ae4..539dfbfbea 100644
--- a/dom/media/driftcontrol/gtest/TestDynamicResampler.cpp
+++ b/dom/media/driftcontrol/gtest/TestDynamicResampler.cpp
@@ -19,7 +19,7 @@ TEST(TestDynamicResampler, SameRates_Float1)
DynamicResampler dr(in_rate, out_rate);
dr.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
// float in_ch1[] = {.1, .2, .3, .4, .5, .6, .7, .8, .9, 1.0};
@@ -76,7 +76,7 @@ TEST(TestDynamicResampler, SameRates_Short1)
DynamicResampler dr(in_rate, out_rate);
dr.SetSampleFormat(AUDIO_FORMAT_S16);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
short in_ch1[] = {1, 2, 3};
@@ -298,9 +298,9 @@ TEST(TestDynamicResampler, UpdateOutRate_Float)
uint32_t pre_buffer = 20;
- DynamicResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate));
+ DynamicResampler dr(in_rate, out_rate);
dr.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
float in_ch1[10] = {};
@@ -329,10 +329,10 @@ TEST(TestDynamicResampler, UpdateOutRate_Float)
EXPECT_FLOAT_EQ(out_ch2[i], 0.0);
}
- // Update out rate
- out_rate = 44100;
- dr.UpdateResampler(out_rate, channels);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ // Update in rate
+ in_rate = 26122;
+ dr.UpdateResampler(in_rate, channels);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
out_frames = in_frames * out_rate / in_rate;
EXPECT_EQ(out_frames, 18u);
@@ -354,9 +354,9 @@ TEST(TestDynamicResampler, UpdateOutRate_Short)
uint32_t pre_buffer = 20;
- DynamicResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate));
+ DynamicResampler dr(in_rate, out_rate);
dr.SetSampleFormat(AUDIO_FORMAT_S16);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
short in_ch1[10] = {};
@@ -385,10 +385,10 @@ TEST(TestDynamicResampler, UpdateOutRate_Short)
EXPECT_EQ(out_ch2[i], 0.0);
}
- // Update out rate
- out_rate = 44100;
- dr.UpdateResampler(out_rate, channels);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ // Update in rate
+ in_rate = 26122;
+ dr.UpdateResampler(in_rate, channels);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
out_frames = in_frames * out_rate / in_rate;
EXPECT_EQ(out_frames, 18u);
@@ -400,16 +400,15 @@ TEST(TestDynamicResampler, UpdateOutRate_Short)
EXPECT_FALSE(hasUnderrun);
}
-TEST(TestDynamicResampler, BigRangeOutRates_Float)
+TEST(TestDynamicResampler, BigRangeInRates_Float)
{
uint32_t in_frames = 10;
uint32_t out_frames = 10;
uint32_t channels = 2;
uint32_t in_rate = 44100;
uint32_t out_rate = 44100;
- uint32_t pre_buffer = 20;
- DynamicResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate));
+ DynamicResampler dr(in_rate, out_rate);
dr.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
const uint32_t in_capacity = 40;
@@ -427,10 +426,14 @@ TEST(TestDynamicResampler, BigRangeOutRates_Float)
float out_ch1[out_capacity] = {};
float out_ch2[out_capacity] = {};
- for (uint32_t rate = 10000; rate < 90000; ++rate) {
- out_rate = rate;
- dr.UpdateResampler(out_rate, channels);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ // Downsampling at a high enough ratio happens to have enough excess
+ // in_frames from rounding in the out_frames calculation to cover the
+ // skipped input latency when switching from zero-latency 44100->44100 to a
+ // non-1:1 ratio.
+ for (uint32_t rate = 100000; rate >= 10000; rate -= 2) {
+ in_rate = rate;
+ dr.UpdateResampler(in_rate, channels);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
in_frames = 20; // more than we need
out_frames = in_frames * out_rate / in_rate;
@@ -444,16 +447,15 @@ TEST(TestDynamicResampler, BigRangeOutRates_Float)
}
}
-TEST(TestDynamicResampler, BigRangeOutRates_Short)
+TEST(TestDynamicResampler, BigRangeInRates_Short)
{
uint32_t in_frames = 10;
uint32_t out_frames = 10;
uint32_t channels = 2;
uint32_t in_rate = 44100;
uint32_t out_rate = 44100;
- uint32_t pre_buffer = 20;
- DynamicResampler dr(in_rate, out_rate, media::TimeUnit(pre_buffer, in_rate));
+ DynamicResampler dr(in_rate, out_rate);
dr.SetSampleFormat(AUDIO_FORMAT_S16);
const uint32_t in_capacity = 40;
@@ -471,9 +473,9 @@ TEST(TestDynamicResampler, BigRangeOutRates_Short)
short out_ch1[out_capacity] = {};
short out_ch2[out_capacity] = {};
- for (uint32_t rate = 10000; rate < 90000; ++rate) {
- out_rate = rate;
- dr.UpdateResampler(out_rate, channels);
+ for (uint32_t rate = 100000; rate >= 10000; rate -= 2) {
+ in_rate = rate;
+ dr.UpdateResampler(in_rate, channels);
in_frames = 20; // more than we need
out_frames = in_frames * out_rate / in_rate;
for (uint32_t y = 0; y < 2; ++y) {
@@ -517,8 +519,8 @@ TEST(TestDynamicResampler, UpdateChannels_Float)
EXPECT_FALSE(hasUnderrun);
// Add 3rd channel
- dr.UpdateResampler(out_rate, 3);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ dr.UpdateResampler(in_rate, 3);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), 3u);
float in_ch3[10] = {};
@@ -546,8 +548,8 @@ TEST(TestDynamicResampler, UpdateChannels_Float)
in_buffer[3] = in_ch4;
float out_ch4[10] = {};
- dr.UpdateResampler(out_rate, 4);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ dr.UpdateResampler(in_rate, 4);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), 4u);
dr.AppendInput(in_buffer, in_frames);
@@ -592,8 +594,8 @@ TEST(TestDynamicResampler, UpdateChannels_Short)
EXPECT_FALSE(hasUnderrun);
// Add 3rd channel
- dr.UpdateResampler(out_rate, 3);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ dr.UpdateResampler(in_rate, 3);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), 3u);
short in_ch3[10] = {};
@@ -622,8 +624,8 @@ TEST(TestDynamicResampler, UpdateChannels_Short)
in_buffer[3] = in_ch4;
short out_ch4[10] = {};
- dr.UpdateResampler(out_rate, 4);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ dr.UpdateResampler(in_rate, 4);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), 4u);
dr.AppendInput(in_buffer, in_frames);
@@ -647,7 +649,7 @@ TEST(TestDynamicResampler, Underrun)
DynamicResampler dr(in_rate, out_rate);
dr.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
- EXPECT_EQ(dr.GetOutRate(), out_rate);
+ EXPECT_EQ(dr.GetInRate(), in_rate);
EXPECT_EQ(dr.GetChannels(), channels);
float in_ch1[in_frames] = {};
@@ -689,7 +691,7 @@ TEST(TestDynamicResampler, Underrun)
}
// Now try with resampling.
- dr.UpdateResampler(out_rate / 2, channels);
+ dr.UpdateResampler(in_rate * 2, channels);
dr.AppendInput(in_buffer, in_frames);
hasUnderrun = dr.Resample(out_ch1, out_frames, 0);
EXPECT_TRUE(hasUnderrun);
diff --git a/dom/media/driftcontrol/plot.py b/dom/media/driftcontrol/plot.py
index d55c0f7de0..c3685ead7c 100755
--- a/dom/media/driftcontrol/plot.py
+++ b/dom/media/driftcontrol/plot.py
@@ -86,23 +86,23 @@ MOZ_LOG_FILE=/tmp/driftcontrol.csv \
[d + h for (d, h) in zip(desired, hysteresisthreshold)],
alpha=0.2,
color="goldenrod",
- legend_label="Hysteresis Threshold (won't correct out rate within area)",
+ legend_label="Hysteresis Threshold (won't correct in rate within area)",
)
fig2 = figure(x_range=fig1.x_range)
fig2.line(t, inrate, color="hotpink", legend_label="Nominal in sample rate")
fig2.line(t, outrate, color="firebrick", legend_label="Nominal out sample rate")
fig2.line(
- t, corrected, color="dodgerblue", legend_label="Corrected out sample rate"
+ t, corrected, color="dodgerblue", legend_label="Corrected in sample rate"
)
fig2.line(
t,
hysteresiscorrected,
color="seagreen",
- legend_label="Hysteresis-corrected out sample rate",
+ legend_label="Hysteresis-corrected in sample rate",
)
fig2.line(
- t, configured, color="goldenrod", legend_label="Configured out sample rate"
+ t, configured, color="goldenrod", legend_label="Configured in sample rate"
)
fig3 = figure(x_range=fig1.x_range)
diff --git a/dom/media/encoder/VP8TrackEncoder.cpp b/dom/media/encoder/VP8TrackEncoder.cpp
index 0c7f3de1f4..36680b6552 100644
--- a/dom/media/encoder/VP8TrackEncoder.cpp
+++ b/dom/media/encoder/VP8TrackEncoder.cpp
@@ -9,7 +9,7 @@
#include <vpx/vpx_encoder.h>
#include "DriftCompensation.h"
-#include "ImageToI420.h"
+#include "ImageConversion.h"
#include "mozilla/gfx/2D.h"
#include "prsystem.h"
#include "VideoSegment.h"
diff --git a/dom/media/gmp/CDMStorageIdProvider.cpp b/dom/media/gmp/CDMStorageIdProvider.cpp
index 52255879b3..9af4580d9e 100644
--- a/dom/media/gmp/CDMStorageIdProvider.cpp
+++ b/dom/media/gmp/CDMStorageIdProvider.cpp
@@ -6,7 +6,7 @@
#include "CDMStorageIdProvider.h"
#include "GMPLog.h"
#include "nsCOMPtr.h"
-#include "nsComponentManagerUtils.h"
+#include "mozilla/IntegerPrintfMacros.h"
#include "nsICryptoHash.h"
#ifdef SUPPORT_STORAGE_ID
diff --git a/dom/media/gmp/ChromiumCDMChild.h b/dom/media/gmp/ChromiumCDMChild.h
index 1bf153a5d5..fae54bbb5c 100644
--- a/dom/media/gmp/ChromiumCDMChild.h
+++ b/dom/media/gmp/ChromiumCDMChild.h
@@ -125,7 +125,7 @@ class ChromiumCDMChild : public PChromiumCDMChild, public cdm::Host_10 {
GMPContentChild* mPlugin = nullptr;
cdm::ContentDecryptionModule_10* mCDM = nullptr;
- typedef SimpleMap<uint64_t> DurationMap;
+ typedef SimpleMap<int64_t, uint64_t, ThreadSafePolicy> DurationMap;
DurationMap mFrameDurations;
nsTArray<uint32_t> mLoadSessionPromiseIds;
diff --git a/dom/media/gmp/GMPVideoDecoderChild.cpp b/dom/media/gmp/GMPVideoDecoderChild.cpp
index 0f605cca9b..f60952ee51 100644
--- a/dom/media/gmp/GMPVideoDecoderChild.cpp
+++ b/dom/media/gmp/GMPVideoDecoderChild.cpp
@@ -36,12 +36,17 @@ void GMPVideoDecoderChild::Init(GMPVideoDecoder* aDecoder) {
GMPVideoHostImpl& GMPVideoDecoderChild::Host() { return mVideoHost; }
void GMPVideoDecoderChild::Decoded(GMPVideoi420Frame* aDecodedFrame) {
- MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
-
if (!aDecodedFrame) {
MOZ_CRASH("Not given a decoded frame!");
}
+ if (NS_WARN_IF(!mPlugin)) {
+ aDecodedFrame->Destroy();
+ return;
+ }
+
+ MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
+
auto df = static_cast<GMPVideoi420FrameImpl*>(aDecodedFrame);
GMPVideoi420FrameData frameData;
@@ -53,36 +58,60 @@ void GMPVideoDecoderChild::Decoded(GMPVideoi420Frame* aDecodedFrame) {
void GMPVideoDecoderChild::ReceivedDecodedReferenceFrame(
const uint64_t aPictureId) {
+ if (NS_WARN_IF(!mPlugin)) {
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
SendReceivedDecodedReferenceFrame(aPictureId);
}
void GMPVideoDecoderChild::ReceivedDecodedFrame(const uint64_t aPictureId) {
+ if (NS_WARN_IF(!mPlugin)) {
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
SendReceivedDecodedFrame(aPictureId);
}
void GMPVideoDecoderChild::InputDataExhausted() {
+ if (NS_WARN_IF(!mPlugin)) {
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
SendInputDataExhausted();
}
void GMPVideoDecoderChild::DrainComplete() {
+ if (NS_WARN_IF(!mPlugin)) {
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
SendDrainComplete();
}
void GMPVideoDecoderChild::ResetComplete() {
+ if (NS_WARN_IF(!mPlugin)) {
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
SendResetComplete();
}
void GMPVideoDecoderChild::Error(GMPErr aError) {
+ if (NS_WARN_IF(!mPlugin)) {
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
SendError(aError);
@@ -121,9 +150,9 @@ mozilla::ipc::IPCResult GMPVideoDecoderChild::RecvDecode(
mozilla::ipc::IPCResult GMPVideoDecoderChild::RecvChildShmemForPool(
Shmem&& aFrameBuffer) {
- if (aFrameBuffer.IsWritable()) {
- mVideoHost.SharedMemMgr()->MgrDeallocShmem(GMPSharedMem::kGMPFrameData,
- aFrameBuffer);
+ GMPSharedMemManager* memMgr = mVideoHost.SharedMemMgr();
+ if (memMgr && aFrameBuffer.IsWritable()) {
+ memMgr->MgrDeallocShmem(GMPSharedMem::kGMPFrameData, aFrameBuffer);
}
return IPC_OK();
}
@@ -153,6 +182,7 @@ mozilla::ipc::IPCResult GMPVideoDecoderChild::RecvDrain() {
}
mozilla::ipc::IPCResult GMPVideoDecoderChild::RecvDecodingComplete() {
+ MOZ_ASSERT(mPlugin);
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
if (mNeedShmemIntrCount) {
@@ -163,6 +193,13 @@ mozilla::ipc::IPCResult GMPVideoDecoderChild::RecvDecodingComplete() {
mPendingDecodeComplete = true;
return IPC_OK();
}
+
+ // This will call ActorDestroy.
+ Unused << Send__delete__(this);
+ return IPC_OK();
+}
+
+void GMPVideoDecoderChild::ActorDestroy(ActorDestroyReason why) {
if (mVideoDecoder) {
// Ignore any return code. It is OK for this to fail without killing the
// process.
@@ -173,13 +210,13 @@ mozilla::ipc::IPCResult GMPVideoDecoderChild::RecvDecodingComplete() {
mVideoHost.DoneWithAPI();
mPlugin = nullptr;
-
- Unused << Send__delete__(this);
-
- return IPC_OK();
}
bool GMPVideoDecoderChild::Alloc(size_t aSize, Shmem* aMem) {
+ if (NS_WARN_IF(!mPlugin)) {
+ return false;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
bool rv;
diff --git a/dom/media/gmp/GMPVideoDecoderChild.h b/dom/media/gmp/GMPVideoDecoderChild.h
index 3c74e5f02c..527d6cad44 100644
--- a/dom/media/gmp/GMPVideoDecoderChild.h
+++ b/dom/media/gmp/GMPVideoDecoderChild.h
@@ -59,6 +59,7 @@ class GMPVideoDecoderChild : public PGMPVideoDecoderChild,
mozilla::ipc::IPCResult RecvReset();
mozilla::ipc::IPCResult RecvDrain();
mozilla::ipc::IPCResult RecvDecodingComplete();
+ void ActorDestroy(ActorDestroyReason why) override;
GMPContentChild* mPlugin;
GMPVideoDecoder* mVideoDecoder;
diff --git a/dom/media/gmp/GMPVideoEncoderChild.cpp b/dom/media/gmp/GMPVideoEncoderChild.cpp
index 19a96b5efe..01a913f920 100644
--- a/dom/media/gmp/GMPVideoEncoderChild.cpp
+++ b/dom/media/gmp/GMPVideoEncoderChild.cpp
@@ -38,6 +38,11 @@ GMPVideoHostImpl& GMPVideoEncoderChild::Host() { return mVideoHost; }
void GMPVideoEncoderChild::Encoded(GMPVideoEncodedFrame* aEncodedFrame,
const uint8_t* aCodecSpecificInfo,
uint32_t aCodecSpecificInfoLength) {
+ if (NS_WARN_IF(!mPlugin)) {
+ aEncodedFrame->Destroy();
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
auto ef = static_cast<GMPVideoEncodedFrameImpl*>(aEncodedFrame);
@@ -53,6 +58,10 @@ void GMPVideoEncoderChild::Encoded(GMPVideoEncodedFrame* aEncodedFrame,
}
void GMPVideoEncoderChild::Error(GMPErr aError) {
+ if (NS_WARN_IF(!mPlugin)) {
+ return;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
SendError(aError);
@@ -95,9 +104,9 @@ mozilla::ipc::IPCResult GMPVideoEncoderChild::RecvEncode(
mozilla::ipc::IPCResult GMPVideoEncoderChild::RecvChildShmemForPool(
Shmem&& aEncodedBuffer) {
- if (aEncodedBuffer.IsWritable()) {
- mVideoHost.SharedMemMgr()->MgrDeallocShmem(GMPSharedMem::kGMPEncodedData,
- aEncodedBuffer);
+ GMPSharedMemManager* memMgr = mVideoHost.SharedMemMgr();
+ if (memMgr && aEncodedBuffer.IsWritable()) {
+ memMgr->MgrDeallocShmem(GMPSharedMem::kGMPEncodedData, aEncodedBuffer);
}
return IPC_OK();
}
@@ -142,6 +151,7 @@ mozilla::ipc::IPCResult GMPVideoEncoderChild::RecvSetPeriodicKeyFrames(
}
mozilla::ipc::IPCResult GMPVideoEncoderChild::RecvEncodingComplete() {
+ MOZ_ASSERT(mPlugin);
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
if (mNeedShmemIntrCount) {
@@ -153,26 +163,29 @@ mozilla::ipc::IPCResult GMPVideoEncoderChild::RecvEncodingComplete() {
return IPC_OK();
}
- if (!mVideoEncoder) {
- // There is not much to clean up anymore.
- Unused << Send__delete__(this);
- return IPC_OK();
- }
+ // This will call ActorDestroy.
+ Unused << Send__delete__(this);
+ return IPC_OK();
+}
- // Ignore any return code. It is OK for this to fail without killing the
- // process.
- mVideoEncoder->EncodingComplete();
+void GMPVideoEncoderChild::ActorDestroy(ActorDestroyReason why) {
+ if (mVideoEncoder) {
+ // Ignore any return code. It is OK for this to fail without killing the
+ // process.
+ mVideoEncoder->EncodingComplete();
+ mVideoEncoder = nullptr;
+ }
mVideoHost.DoneWithAPI();
mPlugin = nullptr;
-
- Unused << Send__delete__(this);
-
- return IPC_OK();
}
bool GMPVideoEncoderChild::Alloc(size_t aSize, Shmem* aMem) {
+ if (NS_WARN_IF(!mPlugin)) {
+ return false;
+ }
+
MOZ_ASSERT(mPlugin->GMPMessageLoop() == MessageLoop::current());
bool rv;
diff --git a/dom/media/gmp/GMPVideoEncoderChild.h b/dom/media/gmp/GMPVideoEncoderChild.h
index dd3c0fdf37..344a55b388 100644
--- a/dom/media/gmp/GMPVideoEncoderChild.h
+++ b/dom/media/gmp/GMPVideoEncoderChild.h
@@ -59,6 +59,7 @@ class GMPVideoEncoderChild : public PGMPVideoEncoderChild,
const uint32_t& aFrameRate);
mozilla::ipc::IPCResult RecvSetPeriodicKeyFrames(const bool& aEnable);
mozilla::ipc::IPCResult RecvEncodingComplete();
+ void ActorDestroy(ActorDestroyReason why) override;
GMPContentChild* mPlugin;
GMPVideoEncoder* mVideoEncoder;
diff --git a/dom/media/gmp/mozIGeckoMediaPluginService.idl b/dom/media/gmp/mozIGeckoMediaPluginService.idl
index 000cfef2f5..a4e3253cba 100644
--- a/dom/media/gmp/mozIGeckoMediaPluginService.idl
+++ b/dom/media/gmp/mozIGeckoMediaPluginService.idl
@@ -57,7 +57,7 @@ native GetGMPVideoEncoderCallback(mozilla::UniquePtr<GetGMPVideoEncoderCallback>
native GetNodeIdCallback(mozilla::UniquePtr<GetNodeIdCallback>&&);
native GMPCrashHelperPtr(mozilla::GMPCrashHelper*);
-[scriptable, uuid(44d362ae-937a-4803-bee6-f2512a0149d1)]
+[scriptable, builtinclass, uuid(44d362ae-937a-4803-bee6-f2512a0149d1)]
interface mozIGeckoMediaPluginService : nsISupports
{
diff --git a/dom/media/gtest/AudioVerifier.h b/dom/media/gtest/AudioVerifier.h
index ba67f6e489..2ff8ed9269 100644
--- a/dom/media/gtest/AudioVerifier.h
+++ b/dom/media/gtest/AudioVerifier.h
@@ -99,7 +99,7 @@ class AudioVerifier {
void CountZeroCrossing(Sample aCurrentSample) {
if (mPrevious > 0 && aCurrentSample <= 0) {
if (mZeroCrossCount++) {
- MOZ_ASSERT(mZeroCrossCount > 1);
+ MOZ_RELEASE_ASSERT(mZeroCrossCount > 1);
mSumPeriodInSamples += mTotalFramesSoFar - mLastZeroCrossPosition;
}
mLastZeroCrossPosition = mTotalFramesSoFar;
@@ -120,7 +120,7 @@ class AudioVerifier {
return;
}
- MOZ_ASSERT(mCurrentDiscontinuityFrameCount == 0);
+ MOZ_RELEASE_ASSERT(mCurrentDiscontinuityFrameCount == 0);
if (!discontinuity) {
return;
}
diff --git a/dom/media/gtest/GMPTestMonitor.h b/dom/media/gtest/GMPTestMonitor.h
index 27477b6a42..9f4e8f0a84 100644
--- a/dom/media/gtest/GMPTestMonitor.h
+++ b/dom/media/gtest/GMPTestMonitor.h
@@ -15,7 +15,7 @@ class GMPTestMonitor {
GMPTestMonitor() : mFinished(false) {}
void AwaitFinished() {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
mozilla::SpinEventLoopUntil("GMPTestMonitor::AwaitFinished"_ns,
[&]() { return mFinished; });
mFinished = false;
@@ -23,7 +23,7 @@ class GMPTestMonitor {
private:
void MarkFinished() {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
mFinished = true;
}
diff --git a/dom/media/gtest/MockCubeb.cpp b/dom/media/gtest/MockCubeb.cpp
index ae3a676ac8..9d61ccf4b5 100644
--- a/dom/media/gtest/MockCubeb.cpp
+++ b/dom/media/gtest/MockCubeb.cpp
@@ -168,79 +168,93 @@ MockCubebStream::MockCubebStream(
mAudioVerifier(aInputStreamParams ? aInputStreamParams->rate
: aOutputStreamParams->rate,
100 /* aFrequency */) {
- MOZ_ASSERT(mAudioGenerator.ChannelCount() <= MAX_INPUT_CHANNELS,
- "mInputBuffer has no enough space to hold generated data");
- MOZ_ASSERT_IF(mFrozenStart, mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mAudioGenerator.ChannelCount() <= MAX_INPUT_CHANNELS,
+ "mInputBuffer has no enough space to hold generated data");
+ if (mFrozenStart) {
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
+ }
if (aInputStreamParams) {
mInputParams = *aInputStreamParams;
}
if (aOutputStreamParams) {
mOutputParams = *aOutputStreamParams;
- MOZ_ASSERT(SampleRate() == mOutputParams.rate);
+ MOZ_RELEASE_ASSERT(SampleRate() == mOutputParams.rate);
}
}
MockCubebStream::~MockCubebStream() = default;
int MockCubebStream::Start() {
- NotifyState(CUBEB_STATE_STARTED);
- mStreamStop = false;
- if (mFrozenStart) {
- // We need to grab mFrozenStartMonitor before returning to avoid races in
- // the calling code -- it controls when to mFrozenStartMonitor.Notify().
- // TempData helps facilitate this by holding what's needed to block the
- // calling thread until the background thread has grabbed the lock.
- struct TempData {
- NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TempData)
- static_assert(HasThreadSafeRefCnt::value,
- "Silence a -Wunused-local-typedef warning");
- Monitor mMonitor{"MockCubebStream::Start::TempData::mMonitor"};
- bool mFinished = false;
-
- private:
- ~TempData() = default;
- };
- auto temp = MakeRefPtr<TempData>();
- MonitorAutoLock lock(temp->mMonitor);
- NS_DispatchBackgroundTask(NS_NewRunnableFunction(
- "MockCubebStream::WaitForThawBeforeStart",
- [temp, this, self = RefPtr<SmartMockCubebStream>(mSelf)]() mutable {
- MonitorAutoLock lock(mFrozenStartMonitor);
- {
- // Unblock MockCubebStream::Start now that we have locked the frozen
- // start monitor.
- MonitorAutoLock tempLock(temp->mMonitor);
- temp->mFinished = true;
- temp->mMonitor.Notify();
- temp = nullptr;
- }
- while (mFrozenStart) {
- mFrozenStartMonitor.Wait();
- }
- if (!mStreamStop) {
+ {
+ MutexAutoLock l(mMutex);
+ NotifyState(CUBEB_STATE_STARTED);
+ }
+ {
+ MonitorAutoLock lock(mFrozenStartMonitor);
+ if (mFrozenStart) {
+ // We need to grab mFrozenStartMonitor before returning to avoid races in
+ // the calling code -- it controls when to mFrozenStartMonitor.Notify().
+ // TempData helps facilitate this by holding what's needed to block the
+ // calling thread until the background thread has grabbed the lock.
+ struct TempData {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TempData)
+ static_assert(HasThreadSafeRefCnt::value,
+ "Silence a -Wunused-local-typedef warning");
+ Monitor mMonitor{"MockCubebStream::Start::TempData::mMonitor"};
+ bool mFinished = false;
+
+ private:
+ ~TempData() = default;
+ };
+ auto temp = MakeRefPtr<TempData>();
+ MonitorAutoLock lock(temp->mMonitor);
+ NS_DispatchBackgroundTask(NS_NewRunnableFunction(
+ "MockCubebStream::WaitForThawBeforeStart",
+ [temp, this, self = RefPtr<SmartMockCubebStream>(mSelf)]() mutable {
+ {
+ // Unblock MockCubebStream::Start now that we have locked the
+ // frozen start monitor.
+ MonitorAutoLock tempLock(temp->mMonitor);
+ temp->mFinished = true;
+ temp->mMonitor.Notify();
+ temp = nullptr;
+ }
+ {
+ MonitorAutoLock lock(mFrozenStartMonitor);
+ while (mFrozenStart) {
+ mFrozenStartMonitor.Wait();
+ }
+ }
+ if (MutexAutoLock l(mMutex);
+ !mState || *mState != CUBEB_STATE_STARTED) {
+ return;
+ }
MockCubeb::AsMock(context)->StartStream(mSelf);
- }
- }));
- while (!temp->mFinished) {
- temp->mMonitor.Wait();
+ }));
+ while (!temp->mFinished) {
+ temp->mMonitor.Wait();
+ }
+ return CUBEB_OK;
}
- return CUBEB_OK;
}
MockCubeb::AsMock(context)->StartStream(this);
return CUBEB_OK;
}
int MockCubebStream::Stop() {
+ MockCubeb::AsMock(context)->StopStream(this);
+ MutexAutoLock l(mMutex);
mOutputVerificationEvent.Notify(std::make_tuple(
mAudioVerifier.PreSilenceSamples(), mAudioVerifier.EstimatedFreq(),
mAudioVerifier.CountDiscontinuities()));
- MockCubeb::AsMock(context)->StopStream(this);
- mStreamStop = true;
NotifyState(CUBEB_STATE_STOPPED);
return CUBEB_OK;
}
-uint64_t MockCubebStream::Position() { return mPosition; }
+uint64_t MockCubebStream::Position() {
+ MutexAutoLock l(mMutex);
+ return mPosition;
+}
void MockCubebStream::Destroy() {
// Stop() even if cubeb_stream_stop() has already been called, as with
@@ -248,11 +262,15 @@ void MockCubebStream::Destroy() {
// This provides an extra STOPPED state callback as with audioipc.
// It also ensures that this stream is removed from MockCubeb::mLiveStreams.
Stop();
- mDestroyed = true;
+ {
+ MutexAutoLock l(mMutex);
+ mDestroyed = true;
+ }
MockCubeb::AsMock(context)->StreamDestroy(this);
}
int MockCubebStream::SetName(char const* aName) {
+ MutexAutoLock l(mMutex);
mName = aName;
mNameSetEvent.Notify(mName);
return CUBEB_OK;
@@ -260,6 +278,7 @@ int MockCubebStream::SetName(char const* aName) {
int MockCubebStream::RegisterDeviceChangedCallback(
cubeb_device_changed_callback aDeviceChangedCallback) {
+ MutexAutoLock l(mMutex);
if (mDeviceChangedCallback && aDeviceChangedCallback) {
return CUBEB_ERROR_INVALID_PARAMETER;
}
@@ -267,14 +286,41 @@ int MockCubebStream::RegisterDeviceChangedCallback(
return CUBEB_OK;
}
+int MockCubebStream::SetInputProcessingParams(
+ cubeb_input_processing_params aParams) {
+ MockCubeb* mock = MockCubeb::AsMock(context);
+ auto res = mock->SupportedInputProcessingParams();
+ if (res.isErr()) {
+ return CUBEB_ERROR_NOT_SUPPORTED;
+ }
+ cubeb_input_processing_params supported = res.unwrap();
+ if ((supported & aParams) != aParams) {
+ return CUBEB_ERROR_INVALID_PARAMETER;
+ }
+ return mock->InputProcessingApplyRv();
+}
+
cubeb_stream* MockCubebStream::AsCubebStream() {
- MOZ_ASSERT(!mDestroyed);
+ MutexAutoLock l(mMutex);
+ return AsCubebStreamLocked();
+}
+
+cubeb_stream* MockCubebStream::AsCubebStreamLocked() {
+ MOZ_RELEASE_ASSERT(!mDestroyed);
+ mMutex.AssertCurrentThreadOwns();
return reinterpret_cast<cubeb_stream*>(this);
}
MockCubebStream* MockCubebStream::AsMock(cubeb_stream* aStream) {
auto* mockStream = reinterpret_cast<MockCubebStream*>(aStream);
- MOZ_ASSERT(!mockStream->mDestroyed);
+ MutexAutoLock l(mockStream->mMutex);
+ return AsMockLocked(aStream);
+}
+
+MockCubebStream* MockCubebStream::AsMockLocked(cubeb_stream* aStream) {
+ auto* mockStream = reinterpret_cast<MockCubebStream*>(aStream);
+ mockStream->mMutex.AssertCurrentThreadOwns();
+ MOZ_RELEASE_ASSERT(!mockStream->mDestroyed);
return mockStream;
}
@@ -285,58 +331,96 @@ cubeb_devid MockCubebStream::GetOutputDeviceID() const {
}
uint32_t MockCubebStream::InputChannels() const {
+ MutexAutoLock l(mMutex);
+ return InputChannelsLocked();
+}
+
+uint32_t MockCubebStream::InputChannelsLocked() const {
+ mMutex.AssertCurrentThreadOwns();
return mAudioGenerator.ChannelCount();
}
uint32_t MockCubebStream::OutputChannels() const {
+ MutexAutoLock l(mMutex);
+ return OutputChannelsLocked();
+}
+
+uint32_t MockCubebStream::OutputChannelsLocked() const {
+ mMutex.AssertCurrentThreadOwns();
return mOutputParams.channels;
}
uint32_t MockCubebStream::SampleRate() const {
+ MutexAutoLock l(mMutex);
+ return SampleRateLocked();
+}
+
+uint32_t MockCubebStream::SampleRateLocked() const {
+ mMutex.AssertCurrentThreadOwns();
return mAudioGenerator.mSampleRate;
}
uint32_t MockCubebStream::InputFrequency() const {
+ MutexAutoLock l(mMutex);
+ return InputFrequencyLocked();
+}
+
+uint32_t MockCubebStream::InputFrequencyLocked() const {
+ mMutex.AssertCurrentThreadOwns();
return mAudioGenerator.mFrequency;
}
+Maybe<cubeb_state> MockCubebStream::State() const {
+ MutexAutoLock l(mMutex);
+ return mState;
+}
+
nsTArray<AudioDataValue>&& MockCubebStream::TakeRecordedOutput() {
+ MutexAutoLock l(mMutex);
return std::move(mRecordedOutput);
}
nsTArray<AudioDataValue>&& MockCubebStream::TakeRecordedInput() {
+ MutexAutoLock l(mMutex);
return std::move(mRecordedInput);
}
void MockCubebStream::SetDriftFactor(float aDriftFactor) {
- MOZ_ASSERT(mRunningMode == MockCubeb::RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == MockCubeb::RunningMode::Automatic);
+ MutexAutoLock l(mMutex);
mDriftFactor = aDriftFactor;
}
-void MockCubebStream::ForceError() { mForceErrorState = true; }
+void MockCubebStream::ForceError() {
+ MutexAutoLock l(mMutex);
+ mForceErrorState = true;
+}
void MockCubebStream::ForceDeviceChanged() {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
+ MutexAutoLock l(mMutex);
mForceDeviceChanged = true;
};
void MockCubebStream::NotifyDeviceChangedNow() {
- MOZ_ASSERT(mRunningMode == RunningMode::Manual);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Manual);
NotifyDeviceChanged();
}
void MockCubebStream::Thaw() {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
MonitorAutoLock l(mFrozenStartMonitor);
mFrozenStart = false;
mFrozenStartMonitor.Notify();
}
void MockCubebStream::SetOutputRecordingEnabled(bool aEnabled) {
+ MutexAutoLock l(mMutex);
mOutputRecordingEnabled = aEnabled;
}
void MockCubebStream::SetInputRecordingEnabled(bool aEnabled) {
+ MutexAutoLock l(mMutex);
mInputRecordingEnabled = aEnabled;
}
@@ -370,33 +454,43 @@ MediaEventSource<void>& MockCubebStream::DeviceChangeForcedEvent() {
}
KeepProcessing MockCubebStream::ManualDataCallback(long aNrFrames) {
- MOZ_ASSERT(mRunningMode == RunningMode::Manual);
- MOZ_ASSERT(aNrFrames <= kMaxNrFrames);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Manual);
+ MOZ_RELEASE_ASSERT(aNrFrames <= kMaxNrFrames);
+ MutexAutoLock l(mMutex);
return Process(aNrFrames);
}
KeepProcessing MockCubebStream::Process(long aNrFrames) {
+ mMutex.AssertCurrentThreadOwns();
+ if (!mState || *mState != CUBEB_STATE_STARTED) {
+ return KeepProcessing::InvalidState;
+ }
if (mInputParams.rate) {
mAudioGenerator.GenerateInterleaved(mInputBuffer, aNrFrames);
}
- cubeb_stream* stream = AsCubebStream();
+ cubeb_stream* stream = AsCubebStreamLocked();
const long outframes =
mDataCallback(stream, mUserPtr, mHasInput ? mInputBuffer : nullptr,
mHasOutput ? mOutputBuffer : nullptr, aNrFrames);
- if (mInputRecordingEnabled && mHasInput) {
- mRecordedInput.AppendElements(mInputBuffer, outframes * InputChannels());
- }
- if (mOutputRecordingEnabled && mHasOutput) {
- mRecordedOutput.AppendElements(mOutputBuffer, outframes * OutputChannels());
- }
- mAudioVerifier.AppendDataInterleaved(mOutputBuffer, outframes,
- MAX_OUTPUT_CHANNELS);
- mPosition += outframes;
+ if (outframes > 0) {
+ if (mInputRecordingEnabled && mHasInput) {
+ mRecordedInput.AppendElements(mInputBuffer,
+ outframes * InputChannelsLocked());
+ }
+ if (mOutputRecordingEnabled && mHasOutput) {
+ mRecordedOutput.AppendElements(mOutputBuffer,
+
+ outframes * OutputChannelsLocked());
+ }
+ mAudioVerifier.AppendDataInterleaved(mOutputBuffer, outframes,
+ MAX_OUTPUT_CHANNELS);
+ mPosition += outframes;
- mFramesProcessedEvent.Notify(outframes);
- if (mAudioVerifier.PreSilenceEnded()) {
- mFramesVerifiedEvent.Notify(outframes);
+ mFramesProcessedEvent.Notify(outframes);
+ if (mAudioVerifier.PreSilenceEnded()) {
+ mFramesVerifiedEvent.Notify(outframes);
+ }
}
if (outframes < aNrFrames) {
@@ -422,8 +516,9 @@ KeepProcessing MockCubebStream::Process(long aNrFrames) {
}
KeepProcessing MockCubebStream::Process10Ms() {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
- uint32_t rate = SampleRate();
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
+ MutexAutoLock l(mMutex);
+ uint32_t rate = SampleRateLocked();
const long nrFrames =
static_cast<long>(static_cast<float>(rate * 10) * mDriftFactor) /
PR_MSEC_PER_SEC;
@@ -431,11 +526,14 @@ KeepProcessing MockCubebStream::Process10Ms() {
}
void MockCubebStream::NotifyState(cubeb_state aState) {
- mStateCallback(AsCubebStream(), mUserPtr, aState);
+ mMutex.AssertCurrentThreadOwns();
+ mState = Some(aState);
+ mStateCallback(AsCubebStreamLocked(), mUserPtr, aState);
mStateEvent.Notify(aState);
}
void MockCubebStream::NotifyDeviceChanged() {
+ MutexAutoLock l(mMutex);
mDeviceChangedCallback(this->mUserPtr);
mDeviceChangedForcedEvent.Notify();
}
@@ -445,20 +543,20 @@ MockCubeb::MockCubeb() : MockCubeb(MockCubeb::RunningMode::Automatic) {}
MockCubeb::MockCubeb(RunningMode aRunningMode)
: ops(&mock_ops), mRunningMode(aRunningMode) {}
-MockCubeb::~MockCubeb() { MOZ_ASSERT(!mFakeAudioThread); };
+MockCubeb::~MockCubeb() { MOZ_RELEASE_ASSERT(!mFakeAudioThread); };
void MockCubeb::Destroy() {
- MOZ_ASSERT(mHasCubebContext);
+ MOZ_RELEASE_ASSERT(mHasCubebContext);
{
auto streams = mLiveStreams.Lock();
- MOZ_ASSERT(streams->IsEmpty());
+ MOZ_RELEASE_ASSERT(streams->IsEmpty());
}
mDestroyed = true;
Release();
}
cubeb* MockCubeb::AsCubebContext() {
- MOZ_ASSERT(!mDestroyed);
+ MOZ_RELEASE_ASSERT(!mDestroyed);
if (mHasCubebContext.compareExchange(false, true)) {
AddRef();
}
@@ -467,7 +565,7 @@ cubeb* MockCubeb::AsCubebContext() {
MockCubeb* MockCubeb::AsMock(cubeb* aContext) {
auto* mockCubeb = reinterpret_cast<MockCubeb*>(aContext);
- MOZ_ASSERT(!mockCubeb->mDestroyed);
+ MOZ_RELEASE_ASSERT(!mockCubeb->mDestroyed);
return mockCubeb;
}
@@ -528,6 +626,28 @@ int MockCubeb::RegisterDeviceCollectionChangeCallback(
return CUBEB_OK;
}
+Result<cubeb_input_processing_params, int>
+MockCubeb::SupportedInputProcessingParams() const {
+ const auto& [params, rv] = mSupportedInputProcessingParams;
+ if (rv != CUBEB_OK) {
+ return Err(rv);
+ }
+ return params;
+}
+
+void MockCubeb::SetSupportedInputProcessingParams(
+ cubeb_input_processing_params aParams, int aRv) {
+ mSupportedInputProcessingParams = std::make_pair(aParams, aRv);
+}
+
+void MockCubeb::SetInputProcessingApplyRv(int aRv) {
+ mInputProcessingParamsApplyRv = aRv;
+}
+
+int MockCubeb::InputProcessingApplyRv() const {
+ return mInputProcessingParamsApplyRv;
+}
+
void MockCubeb::AddDevice(cubeb_device_info aDevice) {
if (aDevice.type == CUBEB_DEVICE_TYPE_INPUT) {
mInputDevices.AppendElement(aDevice);
@@ -613,12 +733,12 @@ void MockCubeb::SetSupportDeviceChangeCallback(bool aSupports) {
void MockCubeb::ForceStreamInitError() { mStreamInitErrorState = true; }
void MockCubeb::SetStreamStartFreezeEnabled(bool aEnabled) {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
mStreamStartFreezeEnabled = aEnabled;
}
auto MockCubeb::ForceAudioThread() -> RefPtr<ForcedAudioThreadPromise> {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
RefPtr<ForcedAudioThreadPromise> p =
mForcedAudioThreadPromise.Ensure(__func__);
mForcedAudioThread = true;
@@ -627,7 +747,7 @@ auto MockCubeb::ForceAudioThread() -> RefPtr<ForcedAudioThreadPromise> {
}
void MockCubeb::UnforceAudioThread() {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
mForcedAudioThread = false;
}
@@ -660,12 +780,12 @@ void MockCubeb::StreamDestroy(MockCubebStream* aStream) {
}
void MockCubeb::GoFaster() {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
mFastMode = true;
}
void MockCubeb::DontGoFaster() {
- MOZ_ASSERT(mRunningMode == RunningMode::Automatic);
+ MOZ_RELEASE_ASSERT(mRunningMode == RunningMode::Automatic);
mFastMode = false;
}
@@ -679,13 +799,17 @@ MockCubeb::StreamDestroyEvent() {
}
void MockCubeb::StartStream(MockCubebStream* aStream) {
+ if (aStream) {
+ aStream->mMutex.AssertNotCurrentThreadOwns();
+ }
auto streams = mLiveStreams.Lock();
- MOZ_ASSERT_IF(!aStream, mForcedAudioThread);
- // Forcing an audio thread must happen before starting streams
- MOZ_ASSERT_IF(!aStream, streams->IsEmpty());
if (aStream) {
- MOZ_ASSERT(!streams->Contains(aStream->mSelf));
+ MOZ_RELEASE_ASSERT(!streams->Contains(aStream->mSelf));
streams->AppendElement(aStream->mSelf);
+ } else {
+ MOZ_RELEASE_ASSERT(mForcedAudioThread);
+ // Forcing an audio thread must happen before starting streams
+ MOZ_RELEASE_ASSERT(streams->IsEmpty());
}
if (!mFakeAudioThread && mRunningMode == RunningMode::Automatic) {
AddRef(); // released when the thread exits
@@ -694,6 +818,7 @@ void MockCubeb::StartStream(MockCubebStream* aStream) {
}
void MockCubeb::StopStream(MockCubebStream* aStream) {
+ aStream->mMutex.AssertNotCurrentThreadOwns();
{
auto streams = mLiveStreams.Lock();
if (!streams->Contains(aStream->mSelf)) {
@@ -719,7 +844,7 @@ void MockCubeb::ThreadFunction() {
}
}
streams->RemoveElementsBy([](const auto& stream) { return !stream; });
- MOZ_ASSERT(mFakeAudioThread);
+ MOZ_RELEASE_ASSERT(mFakeAudioThread);
if (streams->IsEmpty() && !mForcedAudioThread) {
// This leaks the std::thread if Gecko's main thread has already been
// shut down.
diff --git a/dom/media/gtest/MockCubeb.h b/dom/media/gtest/MockCubeb.h
index ed6342a779..15689a4a4e 100644
--- a/dom/media/gtest/MockCubeb.h
+++ b/dom/media/gtest/MockCubeb.h
@@ -11,12 +11,15 @@
#include "MediaEventSource.h"
#include "mozilla/DataMutex.h"
#include "mozilla/MozPromise.h"
+#include "mozilla/Result.h"
+#include "mozilla/ResultVariant.h"
#include "mozilla/ThreadSafeWeakPtr.h"
#include "nsTArray.h"
#include <thread>
#include <atomic>
#include <chrono>
+#include <utility>
namespace mozilla {
const uint32_t MAX_OUTPUT_CHANNELS = 2;
@@ -68,6 +71,8 @@ struct cubeb_ops {
// Keep those and the struct definition in sync with cubeb.h and
// cubeb-internal.h
void cubeb_mock_destroy(cubeb* context);
+static int cubeb_mock_get_supported_input_processing_params(
+ cubeb* context, cubeb_input_processing_params* params);
static int cubeb_mock_enumerate_devices(cubeb* context, cubeb_device_type type,
cubeb_device_collection* out);
@@ -101,6 +106,9 @@ static int cubeb_mock_stream_set_volume(cubeb_stream* stream, float volume);
static int cubeb_mock_stream_set_name(cubeb_stream* stream,
char const* stream_name);
+static int cubeb_mock_stream_set_input_processing_params(
+ cubeb_stream* stream, cubeb_input_processing_params);
+
static int cubeb_mock_stream_register_device_changed_callback(
cubeb_stream* stream,
cubeb_device_changed_callback device_changed_callback);
@@ -122,7 +130,7 @@ cubeb_ops const mock_ops = {
/*.get_min_latency =*/cubeb_mock_get_min_latency,
/*.get_preferred_sample_rate =*/cubeb_mock_get_preferred_sample_rate,
/*.get_supported_input_processing_params =*/
- NULL,
+ cubeb_mock_get_supported_input_processing_params,
/*.enumerate_devices =*/cubeb_mock_enumerate_devices,
/*.device_collection_destroy =*/cubeb_mock_device_collection_destroy,
/*.destroy =*/cubeb_mock_destroy,
@@ -139,7 +147,7 @@ cubeb_ops const mock_ops = {
/*.stream_set_input_mute =*/NULL,
/*.stream_set_input_processing_params =*/
- NULL,
+ cubeb_mock_stream_set_input_processing_params,
/*.stream_device_destroy =*/NULL,
/*.stream_register_device_changed_callback =*/
cubeb_mock_stream_register_device_changed_callback,
@@ -160,7 +168,7 @@ class MockCubebStream {
void* mUserPtr;
public:
- enum class KeepProcessing { No, Yes };
+ enum class KeepProcessing { No, Yes, InvalidState };
enum class RunningMode { Automatic, Manual };
MockCubebStream(cubeb* aContext, char const* aStreamName,
@@ -175,50 +183,56 @@ class MockCubebStream {
~MockCubebStream();
- int Start();
- int Stop();
- uint64_t Position();
- void Destroy();
- int SetName(char const* aName);
+ int Start() MOZ_EXCLUDES(mMutex);
+ int Stop() MOZ_EXCLUDES(mMutex);
+ uint64_t Position() MOZ_EXCLUDES(mMutex);
+ void Destroy() MOZ_EXCLUDES(mMutex);
+ int SetName(char const* aName) MOZ_EXCLUDES(mMutex);
int RegisterDeviceChangedCallback(
- cubeb_device_changed_callback aDeviceChangedCallback);
+ cubeb_device_changed_callback aDeviceChangedCallback)
+ MOZ_EXCLUDES(mMutex);
+ int SetInputProcessingParams(cubeb_input_processing_params aParams);
- cubeb_stream* AsCubebStream();
+ cubeb_stream* AsCubebStream() MOZ_EXCLUDES(mMutex);
static MockCubebStream* AsMock(cubeb_stream* aStream);
- char const* StreamName() const { return mName.get(); }
+ char const* StreamName() const MOZ_EXCLUDES(mMutex) {
+ MutexAutoLock l(mMutex);
+ return mName.get();
+ }
cubeb_devid GetInputDeviceID() const;
cubeb_devid GetOutputDeviceID() const;
- uint32_t InputChannels() const;
- uint32_t OutputChannels() const;
- uint32_t SampleRate() const;
- uint32_t InputFrequency() const;
+ uint32_t InputChannels() const MOZ_EXCLUDES(mMutex);
+ uint32_t OutputChannels() const MOZ_EXCLUDES(mMutex);
+ uint32_t SampleRate() const MOZ_EXCLUDES(mMutex);
+ uint32_t InputFrequency() const MOZ_EXCLUDES(mMutex);
+ Maybe<cubeb_state> State() const MOZ_EXCLUDES(mMutex);
- void SetDriftFactor(float aDriftFactor);
- void ForceError();
- void ForceDeviceChanged();
- void Thaw();
+ void SetDriftFactor(float aDriftFactor) MOZ_EXCLUDES(mMutex);
+ void ForceError() MOZ_EXCLUDES(mMutex);
+ void ForceDeviceChanged() MOZ_EXCLUDES(mMutex);
+ void Thaw() MOZ_EXCLUDES(mMutex);
// For RunningMode::Manual, drive this MockCubebStream forward.
- KeepProcessing ManualDataCallback(long aNrFrames);
+ KeepProcessing ManualDataCallback(long aNrFrames) MOZ_EXCLUDES(mMutex);
// For RunningMode::Manual, notify the client of a DeviceChanged event
// synchronously.
- void NotifyDeviceChangedNow();
+ void NotifyDeviceChangedNow() MOZ_EXCLUDES(mMutex);
// Enable input recording for this driver. This is best called before
// the thread is running, but is safe to call whenever.
- void SetOutputRecordingEnabled(bool aEnabled);
+ void SetOutputRecordingEnabled(bool aEnabled) MOZ_EXCLUDES(mMutex);
// Enable input recording for this driver. This is best called before
// the thread is running, but is safe to call whenever.
- void SetInputRecordingEnabled(bool aEnabled);
+ void SetInputRecordingEnabled(bool aEnabled) MOZ_EXCLUDES(mMutex);
// Get the recorded output from this stream. This doesn't copy, and therefore
// only works once.
- nsTArray<AudioDataValue>&& TakeRecordedOutput();
+ nsTArray<AudioDataValue>&& TakeRecordedOutput() MOZ_EXCLUDES(mMutex);
// Get the recorded input from this stream. This doesn't copy, and therefore
// only works once.
- nsTArray<AudioDataValue>&& TakeRecordedInput();
+ nsTArray<AudioDataValue>&& TakeRecordedInput() MOZ_EXCLUDES(mMutex);
MediaEventSource<nsCString>& NameSetEvent();
MediaEventSource<cubeb_state>& StateEvent();
@@ -232,7 +246,13 @@ class MockCubebStream {
MediaEventSource<void>& DeviceChangeForcedEvent();
private:
- KeepProcessing Process(long aNrFrames);
+ cubeb_stream* AsCubebStreamLocked() MOZ_REQUIRES(mMutex);
+ static MockCubebStream* AsMockLocked(cubeb_stream* aStream);
+ uint32_t InputChannelsLocked() const MOZ_REQUIRES(mMutex);
+ uint32_t OutputChannelsLocked() const MOZ_REQUIRES(mMutex);
+ uint32_t SampleRateLocked() const MOZ_REQUIRES(mMutex);
+ uint32_t InputFrequencyLocked() const MOZ_REQUIRES(mMutex);
+ KeepProcessing Process(long aNrFrames) MOZ_REQUIRES(mMutex);
KeepProcessing Process10Ms();
public:
@@ -242,52 +262,58 @@ class MockCubebStream {
SmartMockCubebStream* const mSelf;
private:
- void NotifyState(cubeb_state aState);
- void NotifyDeviceChanged();
+ void NotifyState(cubeb_state aState) MOZ_REQUIRES(mMutex);
+ void NotifyDeviceChanged() MOZ_EXCLUDES(mMutex);
static constexpr long kMaxNrFrames = 1920;
+ // Mutex guarding most members to ensure state is in sync.
+ mutable Mutex mMutex{"MockCubebStream::mMutex"};
// Monitor used to block start until mFrozenStart is false.
- Monitor mFrozenStartMonitor MOZ_UNANNOTATED;
+ Monitor mFrozenStartMonitor MOZ_ACQUIRED_BEFORE(mMutex);
// Whether this stream should wait for an explicit start request before
- // starting. Protected by FrozenStartMonitor.
- bool mFrozenStart;
+ // starting.
+ bool mFrozenStart MOZ_GUARDED_BY(mFrozenStartMonitor);
+ // The stream's most recently issued state change, if any has occurred.
// Used to abort a frozen start if cubeb_stream_start() is called currently
// with a blocked cubeb_stream_start() call.
- std::atomic_bool mStreamStop{true};
+ Maybe<cubeb_state> mState MOZ_GUARDED_BY(mMutex);
// Whether or not the output-side of this stream (what is written from the
// callback output buffer) is recorded in an internal buffer. The data is then
// available via `GetRecordedOutput`.
- std::atomic_bool mOutputRecordingEnabled{false};
+ bool mOutputRecordingEnabled MOZ_GUARDED_BY(mMutex) = false;
// Whether or not the input-side of this stream (what is written from the
// callback input buffer) is recorded in an internal buffer. The data is then
// available via `TakeRecordedInput`.
- std::atomic_bool mInputRecordingEnabled{false};
+ bool mInputRecordingEnabled MOZ_GUARDED_BY(mMutex) = false;
// The audio buffer used on data callback.
- AudioDataValue mOutputBuffer[MAX_OUTPUT_CHANNELS * kMaxNrFrames] = {};
- AudioDataValue mInputBuffer[MAX_INPUT_CHANNELS * kMaxNrFrames] = {};
+ AudioDataValue mOutputBuffer[MAX_OUTPUT_CHANNELS *
+ kMaxNrFrames] MOZ_GUARDED_BY(mMutex) = {};
+ AudioDataValue mInputBuffer[MAX_INPUT_CHANNELS * kMaxNrFrames] MOZ_GUARDED_BY(
+ mMutex) = {};
// The audio callback
- cubeb_data_callback mDataCallback = nullptr;
+ const cubeb_data_callback mDataCallback = nullptr;
// The stream state callback
- cubeb_state_callback mStateCallback = nullptr;
+ const cubeb_state_callback mStateCallback = nullptr;
// The device changed callback
- cubeb_device_changed_callback mDeviceChangedCallback = nullptr;
+ cubeb_device_changed_callback mDeviceChangedCallback MOZ_GUARDED_BY(mMutex) =
+ nullptr;
// A name for this stream
- nsCString mName;
+ nsCString mName MOZ_GUARDED_BY(mMutex);
// The stream params
- cubeb_stream_params mOutputParams = {};
- cubeb_stream_params mInputParams = {};
+ cubeb_stream_params mOutputParams MOZ_GUARDED_BY(mMutex) = {};
+ cubeb_stream_params mInputParams MOZ_GUARDED_BY(mMutex) = {};
/* Device IDs */
- cubeb_devid mInputDeviceID;
- cubeb_devid mOutputDeviceID;
-
- std::atomic<float> mDriftFactor{1.0};
- std::atomic_bool mFastMode{false};
- std::atomic_bool mForceErrorState{false};
- std::atomic_bool mForceDeviceChanged{false};
- std::atomic_bool mDestroyed{false};
- std::atomic<uint64_t> mPosition{0};
- AudioGenerator<AudioDataValue> mAudioGenerator;
- AudioVerifier<AudioDataValue> mAudioVerifier;
+ const cubeb_devid mInputDeviceID;
+ const cubeb_devid mOutputDeviceID;
+
+ float mDriftFactor MOZ_GUARDED_BY(mMutex) = 1.0;
+ bool mFastMode MOZ_GUARDED_BY(mMutex) = false;
+ bool mForceErrorState MOZ_GUARDED_BY(mMutex) = false;
+ bool mForceDeviceChanged MOZ_GUARDED_BY(mMutex) = false;
+ bool mDestroyed MOZ_GUARDED_BY(mMutex) = false;
+ uint64_t mPosition MOZ_GUARDED_BY(mMutex) = 0;
+ AudioGenerator<AudioDataValue> mAudioGenerator MOZ_GUARDED_BY(mMutex);
+ AudioVerifier<AudioDataValue> mAudioVerifier MOZ_GUARDED_BY(mMutex);
MediaEventProducer<nsCString> mNameSetEvent;
MediaEventProducer<cubeb_state> mStateEvent;
@@ -299,12 +325,29 @@ class MockCubebStream {
MediaEventProducer<void> mDeviceChangedForcedEvent;
// The recorded data, copied from the output_buffer of the callback.
// Interleaved.
- nsTArray<AudioDataValue> mRecordedOutput;
+ nsTArray<AudioDataValue> mRecordedOutput MOZ_GUARDED_BY(mMutex);
// The recorded data, copied from the input buffer of the callback.
// Interleaved.
- nsTArray<AudioDataValue> mRecordedInput;
+ nsTArray<AudioDataValue> mRecordedInput MOZ_GUARDED_BY(mMutex);
};
+inline std::ostream& operator<<(std::ostream& aStream,
+ const MockCubebStream::KeepProcessing& aVal) {
+ switch (aVal) {
+ case MockCubebStream::KeepProcessing::Yes:
+ aStream << "KeepProcessing::Yes";
+ return aStream;
+ case MockCubebStream::KeepProcessing::No:
+ aStream << "KeepProcessing::No";
+ return aStream;
+ case MockCubebStream::KeepProcessing::InvalidState:
+ aStream << "KeepProcessing::InvalidState";
+ return aStream;
+ }
+ aStream << "KeepProcessing(invalid " << static_cast<uint32_t>(aVal) << ")";
+ return aStream;
+}
+
class SmartMockCubebStream
: public MockCubebStream,
public SupportsThreadSafeWeakPtr<SmartMockCubebStream> {
@@ -362,6 +405,16 @@ class MockCubeb {
cubeb_device_type aDevType,
cubeb_device_collection_changed_callback aCallback, void* aUserPtr);
+ Result<cubeb_input_processing_params, int> SupportedInputProcessingParams()
+ const;
+ void SetSupportedInputProcessingParams(cubeb_input_processing_params aParams,
+ int aRv);
+ // Set the rv to be returned when SetInputProcessingParams for any stream of
+ // this context would apply the params to the stream, i.e. after passing the
+ // supported-params check.
+ void SetInputProcessingApplyRv(int aRv);
+ int InputProcessingApplyRv() const;
+
// Control API
// Add an input or output device to this backend. This calls the device
@@ -455,6 +508,10 @@ class MockCubeb {
// notification via a system callback. If not, Gecko is expected to re-query
// the list every time.
bool mSupportsDeviceCollectionChangedCallback = true;
+ std::pair<cubeb_input_processing_params, int>
+ mSupportedInputProcessingParams = std::make_pair(
+ CUBEB_INPUT_PROCESSING_PARAM_NONE, CUBEB_ERROR_NOT_SUPPORTED);
+ int mInputProcessingParamsApplyRv = CUBEB_OK;
const RunningMode mRunningMode;
Atomic<bool> mStreamInitErrorState;
// Whether new MockCubebStreams should be frozen on start.
@@ -502,6 +559,18 @@ int cubeb_mock_register_device_collection_changed(
devtype, callback, user_ptr);
}
+int cubeb_mock_get_supported_input_processing_params(
+ cubeb* context, cubeb_input_processing_params* params) {
+ Result<cubeb_input_processing_params, int> res =
+ MockCubeb::AsMock(context)->SupportedInputProcessingParams();
+ if (res.isErr()) {
+ *params = CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ return res.unwrapErr();
+ }
+ *params = res.unwrap();
+ return CUBEB_OK;
+}
+
int cubeb_mock_stream_init(
cubeb* context, cubeb_stream** stream, char const* stream_name,
cubeb_devid input_device, cubeb_stream_params* input_stream_params,
@@ -562,6 +631,11 @@ int cubeb_mock_stream_register_device_changed_callback(
device_changed_callback);
}
+static int cubeb_mock_stream_set_input_processing_params(
+ cubeb_stream* stream, cubeb_input_processing_params params) {
+ return MockCubebStream::AsMock(stream)->SetInputProcessingParams(params);
+}
+
int cubeb_mock_get_min_latency(cubeb* context, cubeb_stream_params params,
uint32_t* latency_ms) {
*latency_ms = 10;
diff --git a/dom/media/gtest/TestAudioCallbackDriver.cpp b/dom/media/gtest/TestAudioCallbackDriver.cpp
index 050395fa44..9c8c9bd107 100644
--- a/dom/media/gtest/TestAudioCallbackDriver.cpp
+++ b/dom/media/gtest/TestAudioCallbackDriver.cpp
@@ -9,34 +9,39 @@
#include "GraphDriver.h"
#include "gmock/gmock.h"
-#include "gtest/gtest-printers.h"
#include "gtest/gtest.h"
#include "MediaTrackGraphImpl.h"
#include "mozilla/gtest/WaitFor.h"
#include "mozilla/Attributes.h"
#include "mozilla/SyncRunnable.h"
-#include "mozilla/UniquePtr.h"
#include "nsTArray.h"
#include "MockCubeb.h"
-using namespace mozilla;
+namespace mozilla {
+
using IterationResult = GraphInterface::IterationResult;
using ::testing::_;
using ::testing::AnyNumber;
+using ::testing::AtMost;
+using ::testing::Eq;
+using ::testing::InSequence;
using ::testing::NiceMock;
class MockGraphInterface : public GraphInterface {
NS_DECL_THREADSAFE_ISUPPORTS
explicit MockGraphInterface(TrackRate aSampleRate)
: mSampleRate(aSampleRate) {}
- MOCK_METHOD0(NotifyInputStopped, void());
- MOCK_METHOD5(NotifyInputData, void(const AudioDataValue*, size_t, TrackRate,
- uint32_t, uint32_t));
- MOCK_METHOD0(DeviceChanged, void());
+ MOCK_METHOD(void, NotifyInputStopped, ());
+ MOCK_METHOD(void, NotifyInputData,
+ (const AudioDataValue*, size_t, TrackRate, uint32_t, uint32_t));
+ MOCK_METHOD(void, NotifySetRequestedInputProcessingParamsResult,
+ (AudioCallbackDriver*, cubeb_input_processing_params,
+ (Result<cubeb_input_processing_params, int>&&)));
+ MOCK_METHOD(void, DeviceChanged, ());
#ifdef DEBUG
- MOCK_CONST_METHOD1(InDriverIteration, bool(const GraphDriver*));
+ MOCK_METHOD(bool, InDriverIteration, (const GraphDriver*), (const));
#endif
/* OneIteration cannot be mocked because IterationResult is non-memmovable and
* cannot be passed as a parameter, which GMock does internally. */
@@ -81,7 +86,7 @@ class MockGraphInterface : public GraphInterface {
RefPtr<Runnable> aSwitchedRunnable = NS_NewRunnableFunction(
"DefaultNoopSwitchedRunnable", [] {})) {
auto guard = mNextDriver.Lock();
- MOZ_ASSERT(guard->isNothing());
+ MOZ_RELEASE_ASSERT(guard->isNothing());
*guard =
Some(std::make_tuple(std::move(aDriver), std::move(aSwitchedRunnable)));
}
@@ -108,7 +113,7 @@ class MockGraphInterface : public GraphInterface {
NS_IMPL_ISUPPORTS0(MockGraphInterface)
TEST(TestAudioCallbackDriver, StartStop)
-MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
+MOZ_CAN_RUN_SCRIPT_BOUNDARY {
const TrackRate rate = 44100;
MockCubeb* cubeb = new MockCubeb();
CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
@@ -118,7 +123,8 @@ MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
EXPECT_CALL(*graph, NotifyInputStopped).Times(0);
driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, rate, 2, 0, nullptr,
- nullptr, AudioInputType::Unknown);
+ nullptr, AudioInputType::Unknown,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
@@ -135,7 +141,7 @@ MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
}
-void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
+void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_BOUNDARY {
std::cerr << "TestSlowStart with rate " << aRate << std::endl;
MockCubeb* cubeb = new MockCubeb();
@@ -177,15 +183,17 @@ void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
});
driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, aRate, 2, 2, nullptr,
- (void*)1, AudioInputType::Voice);
+ (void*)1, AudioInputType::Voice,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
graph->SetCurrentDriver(driver);
graph->SetEnsureNextIteration(true);
+ auto initPromise = TakeN(cubeb->StreamInitEvent(), 1);
driver->Start();
- RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ auto [stream] = WaitFor(initPromise).unwrap()[0];
cubeb->SetStreamStartFreezeEnabled(false);
const size_t fallbackIterations = 3;
@@ -234,7 +242,7 @@ void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
}
TEST(TestAudioCallbackDriver, SlowStart)
-MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
+{
TestSlowStart(1000); // 10ms = 10 <<< 128 samples
TestSlowStart(8000); // 10ms = 80 < 128 samples
TestSlowStart(44100); // 10ms = 441 > 128 samples
@@ -252,7 +260,7 @@ class MOZ_STACK_CLASS AutoSetter {
: mVal(aVal), mNew(aNew), mOld(mVal.exchange(aNew)) {}
~AutoSetter() {
DebugOnly<T> oldNew = mVal.exchange(mOld);
- MOZ_ASSERT(oldNew == mNew);
+ MOZ_RELEASE_ASSERT(oldNew == mNew);
}
};
#endif
@@ -265,12 +273,13 @@ MOZ_CAN_RUN_SCRIPT_BOUNDARY {
auto graph = MakeRefPtr<MockGraphInterface>(rate);
auto driver = MakeRefPtr<AudioCallbackDriver>(
- graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice);
+ graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
#ifdef DEBUG
- std::atomic<std::thread::id> threadInDriverIteration((std::thread::id()));
+ std::atomic<std::thread::id> threadInDriverIteration{std::thread::id()};
EXPECT_CALL(*graph, InDriverIteration(driver.get())).WillRepeatedly([&] {
return std::this_thread::get_id() == threadInDriverIteration;
});
@@ -279,17 +288,24 @@ MOZ_CAN_RUN_SCRIPT_BOUNDARY {
EXPECT_CALL(*graph, NotifyInputData(_, 0, rate, 1, _)).Times(AnyNumber());
EXPECT_CALL(*graph, NotifyInputData(_, ignoredFrameCount, _, _, _)).Times(0);
EXPECT_CALL(*graph, DeviceChanged);
+ Result<cubeb_input_processing_params, int> expected =
+ Err(CUBEB_ERROR_NOT_SUPPORTED);
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(), CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ Eq(std::ref(expected))));
graph->SetCurrentDriver(driver);
graph->SetEnsureNextIteration(true);
// This starts the fallback driver.
+ auto initPromise = TakeN(cubeb->StreamInitEvent(), 1);
driver->Start();
- RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ auto [stream] = WaitFor(initPromise).unwrap()[0];
// Wait for the audio driver to have started the stream before running data
// callbacks. driver->Start() does a dispatch to the cubeb operation thread
// and starts the stream there.
- nsCOMPtr<nsIEventTarget> cubebOpThread = CUBEB_TASK_THREAD;
+ nsCOMPtr<nsIEventTarget> cubebOpThread =
+ CubebUtils::GetCubebOperationThread();
MOZ_ALWAYS_SUCCEEDS(SyncRunnable::DispatchToThread(
cubebOpThread, NS_NewRunnableFunction(__func__, [] {})));
@@ -393,85 +409,412 @@ MOZ_CAN_RUN_SCRIPT_BOUNDARY {
auto graph = MakeRefPtr<MockGraphInterface>(rate);
auto driver = MakeRefPtr<AudioCallbackDriver>(
- graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice);
+ graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
auto newDriver = MakeRefPtr<AudioCallbackDriver>(
- graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice);
+ graph, nullptr, rate, 2, 1, nullptr, (void*)1, AudioInputType::Voice,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
EXPECT_FALSE(newDriver->ThreadRunning()) << "Verify thread is not running";
EXPECT_FALSE(newDriver->IsStarted()) << "Verify thread is not started";
#ifdef DEBUG
- std::atomic<std::thread::id> threadInDriverIteration(
- (std::this_thread::get_id()));
+ std::atomic<std::thread::id> threadInDriverIteration{
+ std::this_thread::get_id()};
EXPECT_CALL(*graph, InDriverIteration(_)).WillRepeatedly([&] {
return std::this_thread::get_id() == threadInDriverIteration;
});
#endif
EXPECT_CALL(*graph, NotifyInputData(_, 0, rate, 1, _)).Times(AnyNumber());
- EXPECT_CALL(*graph, DeviceChanged);
+ // This only happens if the first fallback driver is stopped by the audio
+ // driver handover rather than the driver switch. It happens when the
+ // subsequent audio callback performs the switch.
+ EXPECT_CALL(*graph, NotifyInputStopped()).Times(AtMost(1));
+ Result<cubeb_input_processing_params, int> expected =
+ Err(CUBEB_ERROR_NOT_SUPPORTED);
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(), CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ Eq(std::ref(expected))));
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ newDriver.get(), CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ Eq(std::ref(expected))));
graph->SetCurrentDriver(driver);
graph->SetEnsureNextIteration(true);
+ auto initPromise = TakeN(cubeb->StreamInitEvent(), 1);
// This starts the fallback driver.
driver->Start();
- RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ RefPtr<SmartMockCubebStream> stream;
+ std::tie(stream) = WaitFor(initPromise).unwrap()[0];
// Wait for the audio driver to have started or the DeviceChanged event will
// be ignored. driver->Start() does a dispatch to the cubeb operation thread
// and starts the stream there.
- nsCOMPtr<nsIEventTarget> cubebOpThread = CUBEB_TASK_THREAD;
+ nsCOMPtr<nsIEventTarget> cubebOpThread =
+ CubebUtils::GetCubebOperationThread();
MOZ_ALWAYS_SUCCEEDS(SyncRunnable::DispatchToThread(
cubebOpThread, NS_NewRunnableFunction(__func__, [] {})));
-#ifdef DEBUG
- AutoSetter as(threadInDriverIteration, std::this_thread::get_id());
-#endif
+ initPromise = TakeN(cubeb->StreamInitEvent(), 1);
+ Monitor mon(__func__);
+ bool canContinueToStartNextDriver = false;
+ bool continued = false;
// This marks the audio driver as running.
EXPECT_EQ(stream->ManualDataCallback(0),
MockCubebStream::KeepProcessing::Yes);
- // If a fallback driver callback happens between the audio callback above, and
- // the SwitchTo below, the audio driver will perform the switch instead of the
- // fallback since the fallback will have stopped. This test may therefore
- // intermittently take different code paths.
-
- // Stop the fallback driver by switching audio driver in the graph.
- {
- Monitor mon(__func__);
- MonitorAutoLock lock(mon);
- bool switched = false;
- graph->SwitchTo(newDriver, NS_NewRunnableFunction(__func__, [&] {
- MonitorAutoLock lock(mon);
- switched = true;
- lock.Notify();
- }));
- while (!switched) {
- lock.Wait();
- }
+ // To satisfy TSAN's lock-order-inversion checking we avoid locking stream's
+ // mMutex (by calling ManualDataCallback) under mon. The SwitchTo runnable
+ // below already locks mon under stream's mMutex.
+ MonitorAutoLock lock(mon);
+
+ // If a fallback driver callback happens between the audio callback
+ // above, and the SwitchTo below, the driver will enter
+ // `FallbackDriverState::None`, relying on the audio driver to
+ // iterate the graph, including performing the driver switch. This
+ // test may therefore intermittently take different code paths.
+ // Note however that the fallback driver runs every ~10ms while the
+ // time from the manual callback above to telling the mock graph to
+ // switch drivers below is much much shorter. The vast majority of
+ // test runs will exercise the intended code path.
+
+ // Make the fallback driver enter FallbackDriverState::Stopped by
+ // switching audio driver in the graph.
+ graph->SwitchTo(newDriver, NS_NewRunnableFunction(__func__, [&] {
+ MonitorAutoLock lock(mon);
+ // Block the fallback driver on its thread until
+ // the test on main thread has finished testing
+ // what it needs.
+ while (!canContinueToStartNextDriver) {
+ lock.Wait();
+ }
+ // Notify the test that it can take these
+ // variables off the stack now.
+ continued = true;
+ lock.Notify();
+ }));
+
+ // Wait for the fallback driver to stop running.
+ while (driver->OnFallback()) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
- {
+ if (driver->HasFallback()) {
+ // Driver entered FallbackDriverState::Stopped as desired.
+ // Proceed with a DeviceChangedCallback.
+
+ EXPECT_CALL(*graph, DeviceChanged);
+
+ {
#ifdef DEBUG
- AutoSetter as(threadInDriverIteration, std::thread::id());
+ AutoSetter as(threadInDriverIteration, std::thread::id());
#endif
- // After stopping the fallback driver, but before newDriver has stopped the
- // old audio driver, fire a DeviceChanged event to ensure it is handled
- // properly.
- AudioCallbackDriver::DeviceChangedCallback_s(driver);
+ // After stopping the fallback driver, but before newDriver has
+ // stopped the old audio driver, fire a DeviceChanged event to
+ // ensure it is handled properly.
+ AudioCallbackDriver::DeviceChangedCallback_s(driver);
+ }
+
+ EXPECT_FALSE(driver->OnFallback())
+ << "DeviceChangedCallback after stopping must not start the "
+ "fallback driver again";
}
+ // Iterate the audio driver on a background thread in case the fallback
+ // driver completed the handover to the audio driver before the switch
+ // above. Doing the switch would deadlock as the switch runnable waits on
+ // mon.
+ NS_DispatchBackgroundTask(NS_NewRunnableFunction(
+ "DeviceChangeAfterStop::postSwitchManualAudioCallback", [stream] {
+ // An audio callback after switching must tell the stream to stop.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::No);
+ }));
+
+ // Unblock the fallback driver.
+ canContinueToStartNextDriver = true;
+ lock.Notify();
+
+ // Wait for the fallback driver to continue, so we can clear the
+ // stack.
+ while (!continued) {
+ lock.Wait();
+ }
+
+ // Wait for newDriver's cubeb stream to init.
+ std::tie(stream) = WaitFor(initPromise).unwrap()[0];
+
graph->StopIterating();
newDriver->EnsureNextIteration();
while (newDriver->OnFallback()) {
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
- // This will block until all events have been queued.
- MOZ_KnownLive(driver)->Shutdown();
- MOZ_KnownLive(newDriver)->Shutdown();
+ {
+#ifdef DEBUG
+ AutoSetter as(threadInDriverIteration, std::thread::id());
+#endif
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::No);
+ }
+
// Drain the event queue.
NS_ProcessPendingEvents(nullptr);
}
+
+void TestInputProcessingOnStart(
+ MockCubeb* aCubeb, cubeb_input_processing_params aRequested,
+ const Result<cubeb_input_processing_params, int>& aExpected)
+ MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ const TrackRate rate = 44100;
+
+ auto graph = MakeRefPtr<NiceMock<MockGraphInterface>>(rate);
+ auto driver = MakeRefPtr<AudioCallbackDriver>(
+ graph, nullptr, rate, 2, 1, nullptr, nullptr, AudioInputType::Voice,
+ aRequested);
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+
+#ifdef DEBUG
+ std::atomic_bool inGraphIteration{false};
+ ON_CALL(*graph, InDriverIteration(_)).WillByDefault([&] {
+ return inGraphIteration.load() && NS_IsMainThread();
+ });
+#endif
+ bool notified = false;
+ EXPECT_CALL(*graph, NotifyInputStopped).Times(0);
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(), aRequested, Eq(std::ref(aExpected))))
+ .WillOnce([&] { notified = true; });
+
+ graph->SetCurrentDriver(driver);
+ auto initPromise = TakeN(aCubeb->StreamInitEvent(), 1);
+ driver->Start();
+ auto [stream] = WaitFor(initPromise).unwrap()[0];
+
+ // Wait for the audio driver to have started the stream before running data
+ // callbacks. driver->Start() does a dispatch to the cubeb operation thread
+ // and starts the stream there.
+ nsCOMPtr<nsIEventTarget> cubebOpThread =
+ CubebUtils::GetCubebOperationThread();
+ MOZ_ALWAYS_SUCCEEDS(SyncRunnable::DispatchToThread(
+ cubebOpThread, NS_NewRunnableFunction(__func__, [] {})));
+
+ // This makes the fallback driver stop on its next callback.
+ {
+#ifdef DEBUG
+ AutoSetter as(inGraphIteration, true);
+#endif
+ while (driver->OnFallback()) {
+ stream->ManualDataCallback(0);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ }
+
+ while (!notified) {
+ NS_ProcessNextEvent();
+ }
+
+ // This will block untill all events have been executed.
+ MOZ_KnownLive(driver)->Shutdown();
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+}
+
+TEST(TestAudioCallbackDriver, InputProcessingOnStart)
+{
+ constexpr cubeb_input_processing_params allParams =
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
+ CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL |
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION |
+ CUBEB_INPUT_PROCESSING_PARAM_VOICE_ISOLATION;
+
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ // Not supported by backend.
+ cubeb->SetSupportedInputProcessingParams(CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_ERROR_NOT_SUPPORTED);
+ TestInputProcessingOnStart(cubeb,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ Err(CUBEB_ERROR_NOT_SUPPORTED));
+
+ // Not supported by params.
+ cubeb->SetSupportedInputProcessingParams(CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_OK);
+ TestInputProcessingOnStart(cubeb,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+
+ // Successful all.
+ cubeb->SetSupportedInputProcessingParams(allParams, CUBEB_OK);
+ TestInputProcessingOnStart(cubeb, allParams, allParams);
+
+ // Successful partial.
+ TestInputProcessingOnStart(cubeb,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+
+ // Not supported by stream.
+ cubeb->SetInputProcessingApplyRv(CUBEB_ERROR);
+ TestInputProcessingOnStart(
+ cubeb, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION, Err(CUBEB_ERROR));
+}
+
+TEST(TestAudioCallbackDriver, InputProcessingWhileRunning)
+MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ constexpr TrackRate rate = 44100;
+ constexpr cubeb_input_processing_params allParams =
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
+ CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL |
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION |
+ CUBEB_INPUT_PROCESSING_PARAM_VOICE_ISOLATION;
+ constexpr int applyError = 99;
+
+ int numNotifications = 0;
+ const auto signal = [&]() mutable {
+ MOZ_ASSERT(NS_IsMainThread());
+ ++numNotifications;
+ };
+ const auto waitForSignal = [&](int aNotification) {
+ while (numNotifications < aNotification) {
+ NS_ProcessNextEvent();
+ }
+ };
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ auto graph = MakeRefPtr<NiceMock<MockGraphInterface>>(rate);
+ auto driver = MakeRefPtr<AudioCallbackDriver>(
+ graph, nullptr, rate, 2, 1, nullptr, nullptr, AudioInputType::Voice,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+
+ EXPECT_CALL(*graph, NotifyInputStopped).Times(0);
+ // Expectations
+ const Result<cubeb_input_processing_params, int> noneResult =
+ CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ const Result<cubeb_input_processing_params, int> aecResult =
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION;
+ const Result<cubeb_input_processing_params, int> allResult = allParams;
+ const Result<cubeb_input_processing_params, int> notSupportedResult =
+ Err(CUBEB_ERROR_NOT_SUPPORTED);
+ const Result<cubeb_input_processing_params, int> applyErrorResult =
+ Err(applyError);
+ {
+ InSequence s;
+
+ // Notified on start.
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(), CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ Eq(std::ref(notSupportedResult))))
+ .WillOnce(signal);
+ // Not supported by backend.
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(),
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION,
+ Eq(std::ref(notSupportedResult))))
+ .WillOnce(signal);
+ // Not supported by params.
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(),
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ Eq(std::ref(noneResult))))
+ .WillOnce(signal);
+ // Successful all.
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(), allParams, Eq(std::ref(allResult))))
+ .WillOnce(signal);
+ // Successful partial.
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(),
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ Eq(std::ref(aecResult))))
+ .WillOnce(signal);
+ // Not supported by stream.
+ EXPECT_CALL(*graph, NotifySetRequestedInputProcessingParamsResult(
+ driver.get(),
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION,
+ Eq(std::ref(applyErrorResult))))
+ .WillOnce(signal);
+ }
+
+#ifdef DEBUG
+ std::atomic_bool inGraphIteration{false};
+ ON_CALL(*graph, InDriverIteration(_)).WillByDefault([&] {
+ return inGraphIteration.load() && NS_IsMainThread();
+ });
+#endif
+
+ const auto setParams = [&](cubeb_input_processing_params aParams) {
+ {
+#ifdef DEBUG
+ AutoSetter as(inGraphIteration, true);
+#endif
+ driver->SetRequestedInputProcessingParams(aParams);
+ }
+ };
+
+ graph->SetCurrentDriver(driver);
+ auto initPromise = TakeN(cubeb->StreamInitEvent(), 1);
+ driver->Start();
+ auto [stream] = WaitFor(initPromise).unwrap()[0];
+
+ // Wait for the audio driver to have started the stream before running data
+ // callbacks. driver->Start() does a dispatch to the cubeb operation thread
+ // and starts the stream there.
+ nsCOMPtr<nsIEventTarget> cubebOpThread =
+ CubebUtils::GetCubebOperationThread();
+ MOZ_ALWAYS_SUCCEEDS(SyncRunnable::DispatchToThread(
+ cubebOpThread, NS_NewRunnableFunction(__func__, [] {})));
+
+ // This makes the fallback driver stop on its next callback.
+
+ {
+#ifdef DEBUG
+ AutoSetter as(inGraphIteration, true);
+#endif
+ while (driver->OnFallback()) {
+ stream->ManualDataCallback(0);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ }
+ waitForSignal(1);
+
+ // Not supported by backend.
+ cubeb->SetSupportedInputProcessingParams(CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_ERROR_NOT_SUPPORTED);
+ setParams(CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
+ waitForSignal(2);
+
+ // Not supported by params.
+ cubeb->SetSupportedInputProcessingParams(CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_OK);
+ setParams(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+ waitForSignal(3);
+
+ // Successful all.
+ cubeb->SetSupportedInputProcessingParams(allParams, CUBEB_OK);
+ setParams(allParams);
+ waitForSignal(4);
+
+ // Successful partial.
+ setParams(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+ waitForSignal(5);
+
+ // Not supported by stream.
+ cubeb->SetInputProcessingApplyRv(applyError);
+ setParams(CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
+ waitForSignal(6);
+
+ // This will block untill all events have been executed.
+ MOZ_KnownLive(driver)->Shutdown();
+ EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running";
+ EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started";
+}
+
+} // namespace mozilla
diff --git a/dom/media/gtest/TestAudioInputProcessing.cpp b/dom/media/gtest/TestAudioInputProcessing.cpp
index d21c37a900..e357839768 100644
--- a/dom/media/gtest/TestAudioInputProcessing.cpp
+++ b/dom/media/gtest/TestAudioInputProcessing.cpp
@@ -428,3 +428,178 @@ TEST(TestAudioInputProcessing, Downmixing)
aip->Stop(graph);
track->Destroy();
}
+
+TEST(TestAudioInputProcessing, DisabledPlatformProcessing)
+{
+ const TrackRate rate = 44100;
+ const uint32_t channels = 1;
+ auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
+ graph->Init(channels);
+
+ auto aip = MakeRefPtr<AudioInputProcessing>(channels);
+
+ MediaEnginePrefs settings;
+ settings.mUsePlatformProcessing = false;
+ settings.mAecOn = true;
+ aip->ApplySettings(graph, nullptr, settings);
+ aip->Start(graph);
+
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+
+ aip->Stop(graph);
+ graph->Destroy();
+}
+
+TEST(TestAudioInputProcessing, EnabledPlatformProcessing)
+{
+ const TrackRate rate = 44100;
+ const uint32_t channels = 1;
+ auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
+ graph->Init(channels);
+
+ auto aip = MakeRefPtr<AudioInputProcessing>(channels);
+
+ MediaEnginePrefs settings;
+ settings.mUsePlatformProcessing = true;
+ settings.mAecOn = true;
+ aip->ApplySettings(graph, nullptr, settings);
+ aip->Start(graph);
+
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+
+ aip->Stop(graph);
+ graph->Destroy();
+}
+
+namespace webrtc {
+bool operator==(const AudioProcessing::Config& aLhs,
+ const AudioProcessing::Config& aRhs) {
+ return aLhs.echo_canceller.enabled == aRhs.echo_canceller.enabled &&
+ (aLhs.gain_controller1.enabled == aRhs.gain_controller1.enabled ||
+ aLhs.gain_controller2.enabled == aRhs.gain_controller2.enabled) &&
+ aLhs.noise_suppression.enabled == aRhs.noise_suppression.enabled;
+}
+
+static std::ostream& operator<<(
+ std::ostream& aStream, const webrtc::AudioProcessing::Config& aConfig) {
+ aStream << "webrtc::AudioProcessing::Config[";
+ bool hadPrior = false;
+ if (aConfig.echo_canceller.enabled) {
+ aStream << "AEC";
+ hadPrior = true;
+ }
+ if (aConfig.gain_controller1.enabled || aConfig.gain_controller2.enabled) {
+ if (hadPrior) {
+ aStream << ", ";
+ }
+ aStream << "AGC";
+ }
+ if (aConfig.noise_suppression.enabled) {
+ if (hadPrior) {
+ aStream << ", ";
+ }
+ aStream << "NS";
+ }
+ aStream << "]";
+ return aStream;
+}
+} // namespace webrtc
+
+TEST(TestAudioInputProcessing, PlatformProcessing)
+{
+ const TrackRate rate = 44100;
+ const uint32_t channels = 1;
+ auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
+ graph->Init(channels);
+
+ auto aip = MakeRefPtr<AudioInputProcessing>(channels);
+
+ MediaEnginePrefs settings;
+ settings.mUsePlatformProcessing = true;
+ settings.mAecOn = true;
+ aip->ApplySettings(graph, nullptr, settings);
+ aip->Start(graph);
+
+ webrtc::AudioProcessing::Config echoOnlyConfig;
+ echoOnlyConfig.echo_canceller.enabled = true;
+ webrtc::AudioProcessing::Config echoNoiseConfig = echoOnlyConfig;
+ echoNoiseConfig.noise_suppression.enabled = true;
+
+ // Config is applied, and platform processing requested.
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+ EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
+ EXPECT_FALSE(aip->IsPassThrough(graph));
+
+ // Platform processing params successfully applied.
+ aip->NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+ // Turns off the equivalent APM config.
+ EXPECT_EQ(aip->AppliedConfig(graph), webrtc::AudioProcessing::Config());
+ EXPECT_TRUE(aip->IsPassThrough(graph));
+
+ // Simulate an error after a driver switch.
+ aip->NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION, Err(CUBEB_ERROR));
+ // The APM config is turned back on, and platform processing is requested to
+ // be turned off.
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+ EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
+ EXPECT_FALSE(aip->IsPassThrough(graph));
+
+ // Pretend there was a response for an old request.
+ aip->NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+ // It does nothing since we are requesting NONE now.
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+ EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
+ EXPECT_FALSE(aip->IsPassThrough(graph));
+
+ // Turn it off as requested.
+ aip->NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+ EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
+ EXPECT_FALSE(aip->IsPassThrough(graph));
+
+ // Test partial support for the requested params.
+ settings.mNoiseOn = true;
+ aip->ApplySettings(graph, nullptr, settings);
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
+ EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
+ EXPECT_FALSE(aip->IsPassThrough(graph));
+ // Only noise suppression was supported in the platform.
+ aip->NotifySetRequestedInputProcessingParamsResult(
+ graph,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION,
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
+ // In the APM only echo cancellation is applied.
+ EXPECT_EQ(aip->AppliedConfig(graph), echoOnlyConfig);
+ EXPECT_FALSE(aip->IsPassThrough(graph));
+
+ // Test error for partial support.
+ aip->NotifySetRequestedInputProcessingParamsResult(
+ graph,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION,
+ Err(CUBEB_ERROR));
+ // The full config is applied in the APM, and NONE is requested.
+ EXPECT_EQ(aip->RequestedInputProcessingParams(graph),
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+ EXPECT_EQ(aip->AppliedConfig(graph), echoNoiseConfig);
+ EXPECT_FALSE(aip->IsPassThrough(graph));
+
+ aip->Stop(graph);
+ graph->Destroy();
+}
diff --git a/dom/media/gtest/TestAudioInputSource.cpp b/dom/media/gtest/TestAudioInputSource.cpp
index f3f18b26a9..5defd1d053 100644
--- a/dom/media/gtest/TestAudioInputSource.cpp
+++ b/dom/media/gtest/TestAudioInputSource.cpp
@@ -10,16 +10,22 @@
#include "gtest/gtest.h"
#include "MockCubeb.h"
+#include "mozilla/Result.h"
#include "mozilla/gtest/WaitFor.h"
#include "nsContentUtils.h"
using namespace mozilla;
using testing::ContainerEq;
-namespace {
+// Short-hand for DispatchToCurrentThread with a function.
#define DispatchFunction(f) \
NS_DispatchToCurrentThread(NS_NewRunnableFunction(__func__, f))
-} // namespace
+
+// Short-hand for draining the current threads event queue, i.e. processing
+// those runnables dispatched per above.
+#define ProcessEventQueue() \
+ while (NS_ProcessNextEvent(nullptr, false)) { \
+ }
class MockEventListener : public AudioInputSource::EventListener {
public:
@@ -63,7 +69,10 @@ TEST(TestAudioInputSource, StartAndStop)
// Make sure start and stop works.
{
- DispatchFunction([&] { ais->Start(); });
+ DispatchFunction([&] {
+ ais->Init();
+ ais->Start();
+ });
RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream->mHasInput);
EXPECT_FALSE(stream->mHasOutput);
@@ -79,7 +88,10 @@ TEST(TestAudioInputSource, StartAndStop)
// Make sure restart is ok.
{
- DispatchFunction([&] { ais->Start(); });
+ DispatchFunction([&] {
+ ais->Init();
+ ais->Start();
+ });
RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream->mHasInput);
EXPECT_FALSE(stream->mHasOutput);
@@ -133,7 +145,10 @@ TEST(TestAudioInputSource, DataOutputBeforeStartAndAfterStop)
EXPECT_TRUE(data.IsNull());
}
- DispatchFunction([&] { ais->Start(); });
+ DispatchFunction([&] {
+ ais->Init();
+ ais->Start();
+ });
RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream->mHasInput);
EXPECT_FALSE(stream->mHasOutput);
@@ -206,7 +221,10 @@ TEST(TestAudioInputSource, ErrorCallback)
sourceRate, targetRate);
ASSERT_TRUE(ais);
- DispatchFunction([&] { ais->Start(); });
+ DispatchFunction([&] {
+ ais->Init();
+ ais->Start();
+ });
RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream->mHasInput);
EXPECT_FALSE(stream->mHasOutput);
@@ -251,7 +269,10 @@ TEST(TestAudioInputSource, DeviceChangedCallback)
sourceRate, targetRate);
ASSERT_TRUE(ais);
- DispatchFunction([&] { ais->Start(); });
+ DispatchFunction([&] {
+ ais->Init();
+ ais->Start();
+ });
RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream->mHasInput);
EXPECT_FALSE(stream->mHasOutput);
@@ -267,3 +288,82 @@ TEST(TestAudioInputSource, DeviceChangedCallback)
ais = nullptr; // Drop the SharedThreadPool here.
}
+
+TEST(TestAudioInputSource, InputProcessing)
+{
+ MockCubeb* cubeb = new MockCubeb();
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ const AudioInputSource::Id sourceId = 1;
+ const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
+ const uint32_t channels = 2;
+ const PrincipalHandle testPrincipal =
+ MakePrincipalHandle(nsContentUtils::GetSystemPrincipal());
+ const TrackRate sourceRate = 44100;
+ const TrackRate targetRate = 48000;
+ using ProcessingPromise =
+ AudioInputSource::SetRequestedProcessingParamsPromise;
+
+ auto listener = MakeRefPtr<MockEventListener>();
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Started))
+ .Times(0);
+ EXPECT_CALL(*listener,
+ AudioStateCallback(
+ sourceId, AudioInputSource::EventListener::State::Stopped))
+ .Times(10);
+
+ RefPtr<AudioInputSource> ais = MakeRefPtr<AudioInputSource>(
+ std::move(listener), sourceId, deviceId, channels, true, testPrincipal,
+ sourceRate, targetRate);
+
+ const auto test =
+ [&](cubeb_input_processing_params aRequested,
+ const Result<cubeb_input_processing_params, int>& aExpected) {
+ RefPtr<ProcessingPromise> p;
+ DispatchFunction([&] {
+ ais->Init();
+ p = ais->SetRequestedProcessingParams(aRequested);
+ });
+ ProcessEventQueue();
+ EXPECT_EQ(WaitFor(p), aExpected);
+
+ DispatchFunction([&] { ais->Stop(); });
+ Unused << WaitFor(cubeb->StreamDestroyEvent());
+ };
+
+ // Not supported by backend.
+ cubeb->SetSupportedInputProcessingParams(CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_ERROR_NOT_SUPPORTED);
+ test(CUBEB_INPUT_PROCESSING_PARAM_NONE, Err(CUBEB_ERROR_NOT_SUPPORTED));
+
+ // Not supported by params.
+ cubeb->SetSupportedInputProcessingParams(CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_OK);
+ test(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+
+ constexpr cubeb_input_processing_params allParams =
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION |
+ CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL |
+ CUBEB_INPUT_PROCESSING_PARAM_VOICE_ISOLATION;
+
+ // Successful all.
+ cubeb->SetSupportedInputProcessingParams(allParams, CUBEB_OK);
+ test(allParams, allParams);
+
+ // Successful partial.
+ test(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+
+ // Not supported by stream.
+ // Note this also tests that AudioInputSource resets its configured params
+ // state from the previous successful test.
+ constexpr int propagatedError = 99;
+ cubeb->SetInputProcessingApplyRv(propagatedError);
+ test(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION, Err(propagatedError));
+
+ ais = nullptr; // Drop the SharedThreadPool here.
+}
diff --git a/dom/media/gtest/TestAudioRingBuffer.cpp b/dom/media/gtest/TestAudioRingBuffer.cpp
index 082323efd1..3da39780f1 100644
--- a/dom/media/gtest/TestAudioRingBuffer.cpp
+++ b/dom/media/gtest/TestAudioRingBuffer.cpp
@@ -1094,7 +1094,7 @@ TEST(TestAudioRingBuffer, PrependSilenceNoWrapShort)
EXPECT_THAT(out, ElementsAre(2, 3, 4, 5, 0, 0, 6, 7));
}
-TEST(TestAudioRingBuffer, SetLengthBytesNoWrapFloat)
+TEST(TestAudioRingBuffer, EnsureLengthBytesNoWrapFloat)
{
AudioRingBuffer rb(6 * sizeof(float));
rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
@@ -1106,7 +1106,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesNoWrapFloat)
EXPECT_EQ(rb.AvailableWrite(), 0u);
EXPECT_EQ(rb.Capacity(), 6u);
- EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(float)));
+ EXPECT_TRUE(rb.EnsureLengthBytes(11 * sizeof(float)));
float out[10] = {};
rv = rb.Read(Span(out, 10));
EXPECT_EQ(rv, 5u);
@@ -1116,7 +1116,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesNoWrapFloat)
EXPECT_THAT(out, ElementsAre(.1, .2, .3, .4, .5, 0, 0, 0, 0, 0));
}
-TEST(TestAudioRingBuffer, SetLengthBytesNoWrapShort)
+TEST(TestAudioRingBuffer, EnsureLengthBytesNoWrapShort)
{
AudioRingBuffer rb(6 * sizeof(short));
rb.SetSampleFormat(AUDIO_FORMAT_S16);
@@ -1128,7 +1128,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesNoWrapShort)
EXPECT_EQ(rb.AvailableWrite(), 0u);
EXPECT_EQ(rb.Capacity(), 6u);
- EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(short)));
+ EXPECT_TRUE(rb.EnsureLengthBytes(11 * sizeof(short)));
short out[10] = {};
rv = rb.Read(Span(out, 10));
EXPECT_EQ(rv, 5u);
@@ -1138,7 +1138,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesNoWrapShort)
EXPECT_THAT(out, ElementsAre(1, 2, 3, 4, 5, 0, 0, 0, 0, 0));
}
-TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartFloat)
+TEST(TestAudioRingBuffer, EnsureLengthBytesWrap1PartFloat)
{
AudioRingBuffer rb(6 * sizeof(float));
rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
@@ -1158,7 +1158,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartFloat)
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 0u);
- EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(float)));
+ EXPECT_TRUE(rb.EnsureLengthBytes(11 * sizeof(float)));
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 5u);
@@ -1175,7 +1175,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartFloat)
EXPECT_THAT(out, ElementsAre(.1, .2, .3, .4, .5, .6, .7, 0, 0, 0));
}
-TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartShort)
+TEST(TestAudioRingBuffer, EnsureLengthBytesWrap1PartShort)
{
AudioRingBuffer rb(6 * sizeof(short));
rb.SetSampleFormat(AUDIO_FORMAT_S16);
@@ -1195,7 +1195,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartShort)
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 0u);
- EXPECT_TRUE(rb.SetLengthBytes(11 * sizeof(short)));
+ EXPECT_TRUE(rb.EnsureLengthBytes(11 * sizeof(short)));
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 5u);
@@ -1212,7 +1212,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap1PartShort)
EXPECT_THAT(out, ElementsAre(1, 2, 3, 4, 5, 6, 7, 0, 0, 0));
}
-TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsFloat)
+TEST(TestAudioRingBuffer, EnsureLengthBytesWrap2PartsFloat)
{
AudioRingBuffer rb(6 * sizeof(float));
rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
@@ -1232,7 +1232,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsFloat)
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 0u);
- EXPECT_TRUE(rb.SetLengthBytes(8 * sizeof(float)));
+ EXPECT_TRUE(rb.EnsureLengthBytes(8 * sizeof(float)));
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 2u);
@@ -1249,7 +1249,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsFloat)
EXPECT_THAT(out, ElementsAre(.1, .2, .3, .4, .5, .6, .7, 0));
}
-TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsShort)
+TEST(TestAudioRingBuffer, EnsureLengthBytesWrap2PartsShort)
{
AudioRingBuffer rb(6 * sizeof(short));
rb.SetSampleFormat(AUDIO_FORMAT_S16);
@@ -1269,7 +1269,7 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsShort)
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 0u);
- EXPECT_TRUE(rb.SetLengthBytes(8 * sizeof(short)));
+ EXPECT_TRUE(rb.EnsureLengthBytes(8 * sizeof(short)));
EXPECT_EQ(rb.AvailableRead(), 5u);
EXPECT_EQ(rb.AvailableWrite(), 2u);
@@ -1285,3 +1285,29 @@ TEST(TestAudioRingBuffer, SetLengthBytesWrap2PartsShort)
EXPECT_EQ(rb.Capacity(), 8u);
EXPECT_THAT(out, ElementsAre(1, 2, 3, 4, 5, 6, 7, 0));
}
+
+TEST(TestAudioRingBuffer, EnsureLengthShorter)
+{
+ AudioRingBuffer rb(5 * sizeof(float));
+ rb.SetSampleFormat(AUDIO_FORMAT_FLOAT32);
+
+ float in[5] = {.1, .2, .3, .4, .5};
+ EXPECT_EQ(rb.Write(Span(in, 5)), 4u);
+ EXPECT_EQ(rb.AvailableRead(), 4u);
+ EXPECT_EQ(rb.AvailableWrite(), 0u);
+ EXPECT_EQ(rb.Capacity(), 5u);
+
+ float out[5] = {};
+ EXPECT_EQ(rb.Read(Span(out, 3)), 3u);
+ EXPECT_THAT(out, ElementsAre(.1, .2, .3, 0, 0));
+ EXPECT_EQ(rb.AvailableRead(), 1u);
+ EXPECT_EQ(rb.AvailableWrite(), 3u);
+
+ EXPECT_TRUE(rb.EnsureLengthBytes(3 * sizeof(float)));
+ EXPECT_EQ(rb.AvailableRead(), 1u);
+ EXPECT_EQ(rb.AvailableWrite(), 3u);
+ EXPECT_EQ(rb.Capacity(), 5u);
+ EXPECT_EQ(rb.Write(Span(in, 5)), 3u);
+ EXPECT_EQ(rb.Read(Span(out, 5)), 4u);
+ EXPECT_THAT(out, ElementsAre(.4, .1, .2, .3, 0));
+}
diff --git a/dom/media/gtest/TestAudioTrackGraph.cpp b/dom/media/gtest/TestAudioTrackGraph.cpp
index 7be1224ab9..b1202277ce 100644
--- a/dom/media/gtest/TestAudioTrackGraph.cpp
+++ b/dom/media/gtest/TestAudioTrackGraph.cpp
@@ -21,9 +21,13 @@
#include "WavDumper.h"
using namespace mozilla;
+using testing::Eq;
+using testing::InSequence;
+using testing::Return;
+using testing::StrictMock;
// Short-hand for InvokeAsync on the current thread.
-#define Invoke(f) InvokeAsync(GetCurrentSerialEventTarget(), __func__, f)
+#define InvokeAsync(f) InvokeAsync(GetCurrentSerialEventTarget(), __func__, f)
// Short-hand for DispatchToCurrentThread with a function.
#define DispatchFunction(f) \
@@ -33,6 +37,12 @@ using namespace mozilla;
#define DispatchMethod(t, m, args...) \
NS_DispatchToCurrentThread(NewRunnableMethod(__func__, t, m, ##args))
+// Short-hand for draining the current threads event queue, i.e. processing
+// those runnables dispatched per above.
+#define ProcessEventQueue() \
+ while (NS_ProcessNextEvent(nullptr, false)) { \
+ }
+
namespace {
#ifdef MOZ_WEBRTC
/*
@@ -111,6 +121,44 @@ struct StopNonNativeInput : public ControlMessage {
void Run() override { mInputTrack->StopAudio(); }
};
+// Helper for detecting when fallback driver has been switched away, for use
+// with RunningMode::Manual.
+class OnFallbackListener : public MediaTrackListener {
+ const RefPtr<MediaTrack> mTrack;
+ Atomic<bool> mOnFallback{true};
+
+ public:
+ explicit OnFallbackListener(MediaTrack* aTrack) : mTrack(aTrack) {}
+
+ void Reset() { mOnFallback = true; }
+ bool OnFallback() { return mOnFallback; }
+
+ void NotifyOutput(MediaTrackGraph*, TrackTime) override {
+ if (auto* ad =
+ mTrack->GraphImpl()->CurrentDriver()->AsAudioCallbackDriver()) {
+ mOnFallback = ad->OnFallback();
+ }
+ }
+};
+
+class MockAudioDataListener : public AudioDataListener {
+ protected:
+ ~MockAudioDataListener() = default;
+
+ public:
+ MockAudioDataListener() = default;
+
+ MOCK_METHOD(uint32_t, RequestedInputChannelCount, (MediaTrackGraph*),
+ (const));
+ MOCK_METHOD(cubeb_input_processing_params, RequestedInputProcessingParams,
+ (MediaTrackGraph*), (const));
+ MOCK_METHOD(bool, IsVoiceInput, (MediaTrackGraph*), (const));
+ MOCK_METHOD(void, DeviceChanged, (MediaTrackGraph*));
+ MOCK_METHOD(void, Disconnect, (MediaTrackGraph*));
+ MOCK_METHOD(void, NotifySetRequestedInputProcessingParamsResult,
+ (MediaTrackGraph*, cubeb_input_processing_params,
+ (const Result<cubeb_input_processing_params, int>&)));
+};
} // namespace
/*
@@ -164,7 +212,7 @@ TEST(TestAudioTrackGraph, DifferentDeviceIDs)
// graph from the global hash table and let it shutdown.
using SourceTrackPromise = MozPromise<SourceMediaTrack*, nsresult, true>;
- auto p = Invoke([g] {
+ auto p = InvokeAsync([g] {
return SourceTrackPromise::CreateAndResolve(
g->CreateSourceTrack(MediaSegment::AUDIO), __func__);
});
@@ -256,7 +304,7 @@ TEST(TestAudioTrackGraph, NotifyDeviceStarted)
nullptr, GetMainThreadSerialEventTarget());
RefPtr<SourceMediaTrack> dummySource;
- Unused << WaitFor(Invoke([&] {
+ Unused << WaitFor(InvokeAsync([&] {
// Dummy track to make graph rolling. Add it and remove it to remove the
// graph from the global hash table and let it shutdown.
dummySource = graph->CreateSourceTrack(MediaSegment::AUDIO);
@@ -522,7 +570,7 @@ TEST(TestAudioTrackGraph, NonNativeInputTrackErrorCallback)
class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
public:
static TestDeviceInputConsumerTrack* Create(MediaTrackGraph* aGraph) {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
TestDeviceInputConsumerTrack* track =
new TestDeviceInputConsumerTrack(aGraph->GraphRate());
aGraph->AddTrack(track);
@@ -530,7 +578,7 @@ class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
}
void Destroy() {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
DisconnectDeviceInput();
DeviceInputConsumerTrack::Destroy();
}
@@ -543,7 +591,7 @@ class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
if (mInputs.IsEmpty()) {
GetData<AudioSegment>()->AppendNullData(aTo - aFrom);
} else {
- MOZ_ASSERT(mInputs.Length() == 1);
+ MOZ_RELEASE_ASSERT(mInputs.Length() == 1);
AudioSegment data;
DeviceInputConsumerTrack::GetInputSourceData(data, aFrom, aTo);
GetData<AudioSegment>()->AppendFrom(&data);
@@ -555,7 +603,7 @@ class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
return 0;
}
DeviceInputTrack* t = mInputs[0]->GetSource()->AsDeviceInputTrack();
- MOZ_ASSERT(t);
+ MOZ_RELEASE_ASSERT(t);
return t->NumberOfChannels();
}
@@ -574,30 +622,23 @@ TEST(TestAudioTrackGraph, DeviceChangedCallback)
CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
nullptr, GetMainThreadSerialEventTarget());
- class TestAudioDataListener : public AudioDataListener {
+ class TestAudioDataListener : public StrictMock<MockAudioDataListener> {
public:
- TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
- : mChannelCount(aChannelCount),
- mIsVoice(aIsVoice),
- mDeviceChangedCount(0) {}
-
- uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
- return mChannelCount;
- }
- bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
- return mIsVoice;
- };
- void DeviceChanged(MediaTrackGraph* aGraph) override {
- ++mDeviceChangedCount;
+ TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice) {
+ EXPECT_CALL(*this, RequestedInputChannelCount)
+ .WillRepeatedly(Return(aChannelCount));
+ EXPECT_CALL(*this, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_NONE));
+ EXPECT_CALL(*this, IsVoiceInput).WillRepeatedly(Return(aIsVoice));
+ {
+ InSequence s;
+ EXPECT_CALL(*this, DeviceChanged);
+ EXPECT_CALL(*this, Disconnect);
+ }
}
- void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
- uint32_t DeviceChangedCount() { return mDeviceChangedCount; }
private:
~TestAudioDataListener() = default;
- const uint32_t mChannelCount;
- const bool mIsVoice;
- std::atomic<uint32_t> mDeviceChangedCount;
};
// Create a full-duplex AudioCallbackDriver by creating a NativeInputTrack.
@@ -610,7 +651,7 @@ TEST(TestAudioTrackGraph, DeviceChangedCallback)
EXPECT_TRUE(track1->ConnectedToNativeDevice());
EXPECT_FALSE(track1->ConnectedToNonNativeDevice());
auto started =
- Invoke([&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
+ InvokeAsync([&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream1->mHasInput);
EXPECT_TRUE(stream1->mHasOutput);
@@ -648,9 +689,6 @@ TEST(TestAudioTrackGraph, DeviceChangedCallback)
WaitFor(cubeb->StreamDestroyEvent());
EXPECT_EQ(destroyedStream.get(), stream2.get());
- // Make sure we only have one device-changed event for the NativeInputTrack.
- EXPECT_EQ(listener2->DeviceChangedCount(), 1U);
-
// Destroy the NativeInputTrack.
DispatchFunction([&] {
track1->DisconnectDeviceInput();
@@ -658,9 +696,6 @@ TEST(TestAudioTrackGraph, DeviceChangedCallback)
});
destroyedStream = WaitFor(cubeb->StreamDestroyEvent());
EXPECT_EQ(destroyedStream.get(), stream1.get());
-
- // Make sure we only have one device-changed event for the NativeInputTrack.
- EXPECT_EQ(listener1->DeviceChangedCount(), 1U);
}
// The native audio stream (a.k.a. GraphDriver) and the non-native audio stream
@@ -692,57 +727,32 @@ TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged)
// A test-only AudioDataListener that simulates AudioInputProcessing's setter
// and getter for the input channel count.
- class TestAudioDataListener : public AudioDataListener {
+ class TestAudioDataListener : public StrictMock<MockAudioDataListener> {
public:
- TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
- : mChannelCount(aChannelCount), mIsVoice(aIsVoice) {}
+ TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice) {
+ EXPECT_CALL(*this, RequestedInputChannelCount)
+ .WillRepeatedly(Return(aChannelCount));
+ EXPECT_CALL(*this, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_NONE));
+ EXPECT_CALL(*this, IsVoiceInput).WillRepeatedly(Return(aIsVoice));
+ EXPECT_CALL(*this, Disconnect);
+ }
// Main thread API
void SetInputChannelCount(MediaTrackGraph* aGraph,
CubebUtils::AudioDeviceID aDevice,
uint32_t aChannelCount) {
- MOZ_ASSERT(NS_IsMainThread());
-
- struct Message : public ControlMessage {
- MediaTrackGraph* mGraph;
- TestAudioDataListener* mListener;
- CubebUtils::AudioDeviceID mDevice;
- uint32_t mChannelCount;
-
- Message(MediaTrackGraph* aGraph, TestAudioDataListener* aListener,
- CubebUtils::AudioDeviceID aDevice, uint32_t aChannelCount)
- : ControlMessage(nullptr),
- mGraph(aGraph),
- mListener(aListener),
- mDevice(aDevice),
- mChannelCount(aChannelCount) {}
- void Run() override {
- mListener->mChannelCount = mChannelCount;
- mGraph->ReevaluateInputDevice(mDevice);
- }
- };
-
- static_cast<MediaTrackGraphImpl*>(aGraph)->AppendMessage(
- MakeUnique<Message>(aGraph, this, aDevice, aChannelCount));
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
+ static_cast<MediaTrackGraphImpl*>(aGraph)
+ ->QueueControlMessageWithNoShutdown(
+ [this, self = RefPtr(this), aGraph, aDevice, aChannelCount] {
+ EXPECT_CALL(*this, RequestedInputChannelCount)
+ .WillRepeatedly(Return(aChannelCount));
+ aGraph->ReevaluateInputDevice(aDevice);
+ });
}
- // Graph thread APIs: AudioDataListenerInterface implementations.
- uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
- aGraph->AssertOnGraphThread();
- return mChannelCount;
- }
- bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
- return mIsVoice;
- };
- void DeviceChanged(MediaTrackGraph* aGraph) override { /* Ignored */
- }
- void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
private:
~TestAudioDataListener() = default;
-
- // Graph thread-only.
- uint32_t mChannelCount;
- // Any thread.
- const bool mIsVoice;
};
// Request a new input channel count and expect to have a new stream.
@@ -841,8 +851,8 @@ TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged)
EXPECT_TRUE(track1->ConnectedToNativeDevice());
EXPECT_FALSE(track1->ConnectedToNonNativeDevice());
- auto started =
- Invoke([&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
+ auto started = InvokeAsync(
+ [&] { return graphImpl->NotifyWhenDeviceStarted(nullptr); });
nativeStream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(nativeStream->mHasInput);
EXPECT_TRUE(nativeStream->mHasOutput);
@@ -947,30 +957,18 @@ TEST(TestAudioTrackGraph, RestartAudioIfMaxChannelCountChanged)
// AudioDataListener. However, it only tests when MOZ_WEBRTC is defined.
TEST(TestAudioTrackGraph, SwitchNativeInputDevice)
{
- class TestAudioDataListener : public AudioDataListener {
+ class TestAudioDataListener : public StrictMock<MockAudioDataListener> {
public:
- TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
- : mChannelCount(aChannelCount),
- mIsVoice(aIsVoice),
- mDeviceChangedCount(0) {}
-
- uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
- return mChannelCount;
- }
- bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
- return mIsVoice;
- };
- void DeviceChanged(MediaTrackGraph* aGraph) override {
- ++mDeviceChangedCount;
+ TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice) {
+ EXPECT_CALL(*this, RequestedInputChannelCount)
+ .WillRepeatedly(Return(aChannelCount));
+ EXPECT_CALL(*this, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_NONE));
+ EXPECT_CALL(*this, IsVoiceInput).WillRepeatedly(Return(aIsVoice));
}
- void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
- uint32_t DeviceChangedCount() { return mDeviceChangedCount; }
private:
~TestAudioDataListener() = default;
- const uint32_t mChannelCount;
- const bool mIsVoice;
- std::atomic<uint32_t> mDeviceChangedCount;
};
MockCubeb* cubeb = new MockCubeb();
@@ -1049,11 +1047,12 @@ TEST(TestAudioTrackGraph, SwitchNativeInputDevice)
RefPtr<TestDeviceInputConsumerTrack> track1 =
TestDeviceInputConsumerTrack::Create(graph);
RefPtr<TestAudioDataListener> listener1 = new TestAudioDataListener(1, false);
+ EXPECT_CALL(*listener1, Disconnect);
track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
EXPECT_EQ(track1->DeviceId().value(), device1);
auto started =
- Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+ InvokeAsync([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream1->mHasInput);
@@ -1069,6 +1068,7 @@ TEST(TestAudioTrackGraph, SwitchNativeInputDevice)
RefPtr<TestDeviceInputConsumerTrack> track2 =
TestDeviceInputConsumerTrack::Create(graph);
RefPtr<TestAudioDataListener> listener2 = new TestAudioDataListener(2, false);
+ EXPECT_CALL(*listener2, Disconnect).Times(2);
track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
EXPECT_EQ(track2->DeviceId().value(), device2);
@@ -1085,6 +1085,7 @@ TEST(TestAudioTrackGraph, SwitchNativeInputDevice)
RefPtr<TestDeviceInputConsumerTrack> track3 =
TestDeviceInputConsumerTrack::Create(graph);
RefPtr<TestAudioDataListener> listener3 = new TestAudioDataListener(1, false);
+ EXPECT_CALL(*listener3, Disconnect).Times(2);
track3->ConnectDeviceInput(device3, listener3, PRINCIPAL_HANDLE_NONE);
EXPECT_EQ(track3->DeviceId().value(), device3);
@@ -1160,7 +1161,7 @@ TEST(TestAudioTrackGraph, ErrorCallback)
// output from the graph.
RefPtr<AudioProcessingTrack> processingTrack;
RefPtr<AudioInputProcessing> listener;
- auto started = Invoke([&] {
+ auto started = InvokeAsync([&] {
processingTrack = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(processingTrack, listener);
@@ -1225,7 +1226,7 @@ TEST(TestAudioTrackGraph, AudioProcessingTrack)
RefPtr<ProcessedMediaTrack> outputTrack;
RefPtr<MediaInputPort> port;
RefPtr<AudioInputProcessing> listener;
- auto p = Invoke([&] {
+ auto p = InvokeAsync([&] {
processingTrack = AudioProcessingTrack::Create(graph);
outputTrack = graph->CreateForwardedInputTrack(MediaSegment::AUDIO);
outputTrack->QueueSetAutoend(false);
@@ -1282,7 +1283,7 @@ TEST(TestAudioTrackGraph, AudioProcessingTrack)
EXPECT_EQ(estimatedFreq, inputFrequency);
std::cerr << "PreSilence: " << preSilenceSamples << std::endl;
- // We buffer 128 frames. See DeviceInputTrack::ProcessInput.
+ // We buffer 128 frames. See NativeInputTrack::NotifyInputData.
EXPECT_GE(preSilenceSamples, 128U);
// If the fallback system clock driver is doing a graph iteration before the
// first audio driver iteration comes in, that iteration is ignored and
@@ -1297,13 +1298,17 @@ TEST(TestAudioTrackGraph, AudioProcessingTrack)
TEST(TestAudioTrackGraph, ReConnectDeviceInput)
{
- MockCubeb* cubeb = new MockCubeb();
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
// 48k is a native processing rate, and avoids a resampling pass compared
// to 44.1k. The resampler may add take a few frames to stabilize, which show
// as unexected discontinuities in the test.
const TrackRate rate = 48000;
+ // Use a drift factor so that we don't dont produce perfect 10ms-chunks.
+ // This will exercise whatever buffers are in the audio processing pipeline,
+ // and the bookkeeping surrounding them.
+ const long step = 10 * rate * 1111 / 1000 / PR_MSEC_PER_SEC;
MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1, rate, nullptr,
@@ -1315,7 +1320,8 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
RefPtr<ProcessedMediaTrack> outputTrack;
RefPtr<MediaInputPort> port;
RefPtr<AudioInputProcessing> listener;
- auto p = Invoke([&] {
+ RefPtr<OnFallbackListener> fallbackListener;
+ DispatchFunction([&] {
processingTrack = AudioProcessingTrack::Create(graph);
outputTrack = graph->CreateForwardedInputTrack(MediaSegment::AUDIO);
outputTrack->QueueSetAutoend(false);
@@ -1337,54 +1343,65 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
settings.mAgc2Forced = true;
QueueApplySettings(processingTrack, listener, settings);
- return graph->NotifyWhenDeviceStarted(nullptr);
+ fallbackListener = new OnFallbackListener(processingTrack);
+ processingTrack->AddListener(fallbackListener);
});
RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream->mHasInput);
- Unused << WaitFor(p);
- // Set a drift factor so that we don't dont produce perfect 10ms-chunks. This
- // will exercise whatever buffers are in the audio processing pipeline, and
- // the bookkeeping surrounding them.
- stream->SetDriftFactor(1.111);
+ while (
+ stream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
- // Wait for a second worth of audio data. GoFaster is dispatched through a
- // ControlMessage so that it is called in the first audio driver iteration.
- // Otherwise the audio driver might be going very fast while the fallback
- // system clock driver is still in an iteration.
- DispatchFunction([&] {
- processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
- });
- {
- uint32_t totalFrames = 0;
- WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
- totalFrames += aFrames;
- return totalFrames > static_cast<uint32_t>(graph->GraphRate());
- });
+ // Wait for the AudioCallbackDriver to come into effect.
+ while (fallbackListener->OnFallback()) {
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Iterate for a second worth of audio data.
+ for (long frames = 0; frames < graph->GraphRate(); frames += step) {
+ stream->ManualDataCallback(step);
}
- cubeb->DontGoFaster();
// Close the input to see that no asserts go off due to bad state.
DispatchFunction([&] { processingTrack->DisconnectDeviceInput(); });
- stream = WaitFor(cubeb->StreamInitEvent());
+ // Dispatch the disconnect message.
+ ProcessEventQueue();
+ // Run the disconnect message.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ // Switch driver.
+ auto initPromise = TakeN(cubeb->StreamInitEvent(), 1);
+ EXPECT_EQ(stream->ManualDataCallback(0), MockCubebStream::KeepProcessing::No);
+ std::tie(stream) = WaitFor(initPromise).unwrap()[0];
EXPECT_FALSE(stream->mHasInput);
- Unused << WaitFor(
- Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); }));
- // Output-only. Wait for another second before unmuting.
- DispatchFunction([&] {
- processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
- });
- {
- uint32_t totalFrames = 0;
- WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
- totalFrames += aFrames;
- return totalFrames > static_cast<uint32_t>(graph->GraphRate());
- });
+ while (
+ stream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Wait for the new AudioCallbackDriver to come into effect.
+ fallbackListener->Reset();
+ while (fallbackListener->OnFallback()) {
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Output-only. Iterate for another second before unmuting.
+ for (long frames = 0; frames < graph->GraphRate(); frames += step) {
+ stream->ManualDataCallback(step);
}
- cubeb->DontGoFaster();
// Re-open the input to again see that no asserts go off due to bad state.
DispatchFunction([&] {
@@ -1392,27 +1409,40 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
processingTrack->ConnectDeviceInput(deviceId, listener,
PRINCIPAL_HANDLE_NONE);
});
-
- stream = WaitFor(cubeb->StreamInitEvent());
+ // Dispatch the connect message.
+ ProcessEventQueue();
+ // Run the connect message.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ // Switch driver.
+ initPromise = TakeN(cubeb->StreamInitEvent(), 1);
+ EXPECT_EQ(stream->ManualDataCallback(0), MockCubebStream::KeepProcessing::No);
+ std::tie(stream) = WaitFor(initPromise).unwrap()[0];
EXPECT_TRUE(stream->mHasInput);
- Unused << WaitFor(
- Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); }));
- // Full-duplex. Wait for another second before finishing.
- DispatchFunction([&] {
- processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
- });
- {
- uint32_t totalFrames = 0;
- WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
- totalFrames += aFrames;
- return totalFrames > static_cast<uint32_t>(graph->GraphRate());
- });
+ while (
+ stream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Wait for the new AudioCallbackDriver to come into effect.
+ fallbackListener->Reset();
+ while (fallbackListener->OnFallback()) {
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Full-duplex. Iterate for another second before finishing.
+ for (long frames = 0; frames < graph->GraphRate(); frames += step) {
+ stream->ManualDataCallback(step);
}
- cubeb->DontGoFaster();
// Clean up.
DispatchFunction([&] {
+ processingTrack->RemoveListener(fallbackListener);
outputTrack->RemoveAudioOutput((void*)1);
outputTrack->Destroy();
port->Destroy();
@@ -1422,7 +1452,14 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
processingTrack->Destroy();
});
- uint32_t inputRate = stream->SampleRate();
+ // Dispatch the clean-up messages.
+ ProcessEventQueue();
+ // Run the clean-up messages.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ // Shut down driver.
+ EXPECT_EQ(stream->ManualDataCallback(0), MockCubebStream::KeepProcessing::No);
+
uint32_t inputFrequency = stream->InputFrequency();
uint64_t preSilenceSamples;
uint32_t estimatedFreq;
@@ -1431,17 +1468,12 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
WaitFor(stream->OutputVerificationEvent());
EXPECT_EQ(estimatedFreq, inputFrequency);
- std::cerr << "PreSilence: " << preSilenceSamples << std::endl;
- // We buffer 10ms worth of frames in non-passthrough mode, plus up to 128
- // frames as we round up to the nearest block. See
- // AudioInputProcessing::Process and DeviceInputTrack::PrcoessInput.
- EXPECT_GE(preSilenceSamples, 128U + inputRate / 100);
- // If the fallback system clock driver is doing a graph iteration before the
- // first audio driver iteration comes in, that iteration is ignored and
- // results in zeros. It takes one fallback driver iteration *after* the audio
- // driver has started to complete the switch, *usually* resulting two
- // 10ms-iterations of silence; sometimes only one.
- EXPECT_LE(preSilenceSamples, 128U + 3 * inputRate / 100 /* 3*10ms */);
+ std::cerr << "PreSilence: " << preSilenceSamples << "\n";
+ // We buffer 128 frames. See NativeInputTrack::NotifyInputData.
+ // When not in passthrough the AudioInputProcessing packetizer also buffers
+ // 10ms of silence, pulled in from NativeInputTrack when being run by the
+ // fallback SystemClockDriver.
+ EXPECT_EQ(preSilenceSamples, WEBAUDIO_BLOCK_SIZE + rate / 100);
// The waveform from AudioGenerator starts at 0, but we don't control its
// ending, so we expect a discontinuity there. Note that this check is only
// for the waveform on the stream *after* re-opening the input.
@@ -1467,7 +1499,7 @@ float rmsf32(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames) {
TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
{
- MockCubeb* cubeb = new MockCubeb();
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
@@ -1481,7 +1513,8 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
RefPtr<ProcessedMediaTrack> outputTrack;
RefPtr<MediaInputPort> port;
RefPtr<AudioInputProcessing> listener;
- auto p = Invoke([&] {
+ RefPtr<OnFallbackListener> fallbackListener;
+ DispatchFunction([&] {
processingTrack = AudioProcessingTrack::Create(graph);
outputTrack = graph->CreateForwardedInputTrack(MediaSegment::AUDIO);
outputTrack->QueueSetAutoend(false);
@@ -1495,32 +1528,36 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
PRINCIPAL_HANDLE_NONE);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
- return graph->NotifyWhenDeviceStarted(nullptr);
+ fallbackListener = new OnFallbackListener(processingTrack);
+ processingTrack->AddListener(fallbackListener);
});
+ ProcessEventQueue();
+
RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream->mHasInput);
- Unused << WaitFor(p);
+
+ while (
+ stream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Wait for the AudioCallbackDriver to come into effect.
+ while (fallbackListener->OnFallback()) {
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
stream->SetOutputRecordingEnabled(true);
// Wait for a second worth of audio data.
- uint64_t targetPosition = graph->GraphRate();
- auto AdvanceToTargetPosition = [&] {
- DispatchFunction([&] {
- processingTrack->GraphImpl()->AppendMessage(MakeUnique<GoFaster>(cubeb));
- });
- WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
- // Position() gives a more up-to-date indication than summing aFrames if
- // multiple events are queued.
- if (stream->Position() < targetPosition) {
- return false;
- }
- cubeb->DontGoFaster();
- return true;
- });
- };
- AdvanceToTargetPosition();
+ const long step = graph->GraphRate() / 100; // 10ms
+ for (long frames = 0; frames < graph->GraphRate(); frames += step) {
+ stream->ManualDataCallback(step);
+ }
const uint32_t ITERATION_COUNT = 5;
uint32_t iterations = ITERATION_COUNT;
@@ -1537,8 +1574,11 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
}
});
- targetPosition += graph->GraphRate();
- AdvanceToTargetPosition();
+ ProcessEventQueue();
+
+ for (long frames = 0; frames < graph->GraphRate(); frames += step) {
+ stream->ManualDataCallback(step);
+ }
}
// Clean up.
@@ -1548,10 +1588,18 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
port->Destroy();
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StopInputProcessing>(processingTrack, listener));
+ processingTrack->RemoveListener(fallbackListener);
processingTrack->DisconnectDeviceInput();
processingTrack->Destroy();
});
+ ProcessEventQueue();
+
+ // Close the input and switch driver.
+ while (stream->ManualDataCallback(0) != MockCubebStream::KeepProcessing::No) {
+ std::cerr << "Waiting for switch...\n";
+ }
+
uint64_t preSilenceSamples;
uint32_t estimatedFreq;
uint32_t nrDiscontinuities;
@@ -1604,7 +1652,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
EXPECT_EQ(track1->DeviceId().value(), device1);
auto started =
- Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+ InvokeAsync([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream1->mHasInput);
@@ -1829,7 +1877,7 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
EXPECT_EQ(track1->DeviceId().value(), nativeDevice);
auto started =
- Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+ InvokeAsync([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
nativeStream = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(nativeStream->mHasInput);
@@ -1961,44 +2009,23 @@ TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)
const CubebUtils::AudioDeviceID deviceId = (CubebUtils::AudioDeviceID)1;
RefPtr<AudioProcessingTrack> track;
RefPtr<AudioInputProcessing> listener;
- {
- MozPromiseHolder<GenericPromise> h;
- RefPtr<GenericPromise> p = h.Ensure(__func__);
-
- struct GuardMessage : public ControlMessage {
- MozPromiseHolder<GenericPromise> mHolder;
-
- GuardMessage(MediaTrack* aTrack,
- MozPromiseHolder<GenericPromise>&& aHolder)
- : ControlMessage(aTrack), mHolder(std::move(aHolder)) {}
- void Run() override {
- mTrack->GraphImpl()->Dispatch(NS_NewRunnableFunction(
- "TestAudioTrackGraph::SetInputChannel::Message::Resolver",
- [holder = std::move(mHolder)]() mutable {
- holder.Resolve(true, __func__);
- }));
- }
- };
-
- DispatchFunction([&] {
- track = AudioProcessingTrack::Create(graph);
- listener = new AudioInputProcessing(2);
- QueueExpectIsPassThrough(track, listener);
- track->SetInputProcessing(listener);
-
- MediaEnginePrefs settings;
- settings.mChannels = 1;
- QueueApplySettings(track, listener, settings);
+ DispatchFunction([&] {
+ track = AudioProcessingTrack::Create(graph);
+ listener = new AudioInputProcessing(2);
+ QueueExpectIsPassThrough(track, listener);
+ track->SetInputProcessing(listener);
- track->GraphImpl()->AppendMessage(
- MakeUnique<GuardMessage>(track, std::move(h)));
- });
+ MediaEnginePrefs settings;
+ settings.mChannels = 1;
+ QueueApplySettings(track, listener, settings);
+ });
- Unused << WaitFor(p);
- }
+ // Wait for AudioCallbackDriver to init output-only stream.
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_FALSE(stream->mHasInput);
+ EXPECT_TRUE(stream->mHasOutput);
// Open a full-duplex AudioCallbackDriver.
-
RefPtr<MediaInputPort> port;
DispatchFunction([&] {
track->GraphImpl()->AppendMessage(
@@ -2006,22 +2033,13 @@ TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)
track->ConnectDeviceInput(deviceId, listener, PRINCIPAL_HANDLE_NONE);
});
- // MediaTrackGraph will create a output-only AudioCallbackDriver in
- // CheckDriver before we open an audio input above, since AudioProcessingTrack
- // is a audio-type MediaTrack, so we need to wait here until the duplex
- // AudioCallbackDriver is created.
- RefPtr<SmartMockCubebStream> stream;
- SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
- "TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)"_ns,
- [&] {
- stream = WaitFor(cubeb->StreamInitEvent());
- EXPECT_TRUE(stream->mHasOutput);
- return stream->mHasInput;
- });
+ stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_TRUE(stream->mHasOutput);
EXPECT_EQ(stream->InputChannels(), 1U);
Unused << WaitFor(
- Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); }));
+ InvokeAsync([&] { return graph->NotifyWhenDeviceStarted(nullptr); }));
// Clean up.
DispatchFunction([&] {
@@ -2257,7 +2275,7 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
EXPECT_EQ(track1->DeviceId().value(), device1);
auto started =
- Invoke([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
+ InvokeAsync([&] { return graph->NotifyWhenDeviceStarted(nullptr); });
RefPtr<SmartMockCubebStream> stream1 = WaitFor(cubeb->StreamInitEvent());
EXPECT_TRUE(stream1->mHasInput);
@@ -2350,23 +2368,6 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
std::cerr << "No native input now" << std::endl;
}
-class OnFallbackListener : public MediaTrackListener {
- const RefPtr<MediaTrack> mTrack;
- Atomic<bool> mOnFallback{true};
-
- public:
- explicit OnFallbackListener(MediaTrack* aTrack) : mTrack(aTrack) {}
-
- bool OnFallback() { return mOnFallback; }
-
- void NotifyOutput(MediaTrackGraph*, TrackTime) override {
- if (auto* ad =
- mTrack->GraphImpl()->CurrentDriver()->AsAudioCallbackDriver()) {
- mOnFallback = ad->OnFallback();
- }
- }
-};
-
void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
float aDriftFactor, uint32_t aRunTimeSeconds = 10,
uint32_t aNumExpectedUnderruns = 0) {
@@ -2409,6 +2410,13 @@ void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
RefPtr<SmartMockCubebStream> inputStream = WaitFor(cubeb->StreamInitEvent());
+ while (
+ inputStream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
// Wait for the primary AudioCallbackDriver to come into effect.
while (primaryFallbackListener->OnFallback()) {
EXPECT_EQ(inputStream->ManualDataCallback(0),
@@ -2441,6 +2449,13 @@ void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
RefPtr<SmartMockCubebStream> partnerStream =
WaitFor(cubeb->StreamInitEvent());
+ while (
+ partnerStream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
// Process the CrossGraphTransmitter on the primary graph.
EXPECT_EQ(inputStream->ManualDataCallback(0),
MockCubebStream::KeepProcessing::Yes);
@@ -2453,8 +2468,7 @@ void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
}
DispatchFunction([&] { receiver->RemoveListener(partnerFallbackListener); });
- while (NS_ProcessNextEvent(nullptr, false)) {
- }
+ ProcessEventQueue();
nsIThread* currentThread = NS_GetCurrentThread();
cubeb_state inputState = CUBEB_STATE_STARTED;
@@ -2499,8 +2513,7 @@ void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
processingTrack->Destroy();
});
- while (NS_ProcessNextEvent(nullptr, false)) {
- }
+ ProcessEventQueue();
EXPECT_EQ(inputStream->ManualDataCallback(0),
MockCubebStream::KeepProcessing::Yes);
@@ -2816,6 +2829,221 @@ TEST(TestAudioInputProcessing, ClockDriftExpectation)
}
#endif // MOZ_WEBRTC
+TEST(TestAudioTrackGraph, PlatformProcessing)
+{
+ constexpr cubeb_input_processing_params allParams =
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION |
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION |
+ CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL |
+ CUBEB_INPUT_PROCESSING_PARAM_VOICE_ISOLATION;
+ MockCubeb* cubeb = new MockCubeb(MockCubeb::RunningMode::Manual);
+ cubeb->SetSupportedInputProcessingParams(allParams, CUBEB_OK);
+ CubebUtils::ForceSetCubebContext(cubeb->AsCubebContext());
+
+ MediaTrackGraph* graph = MediaTrackGraphImpl::GetInstance(
+ MediaTrackGraph::SYSTEM_THREAD_DRIVER, /*Window ID*/ 1,
+ CubebUtils::PreferredSampleRate(/* aShouldResistFingerprinting */ false),
+ nullptr, GetMainThreadSerialEventTarget());
+
+ const CubebUtils::AudioDeviceID device = (CubebUtils::AudioDeviceID)1;
+
+ // Set up mock listener.
+ RefPtr<MockAudioDataListener> listener = MakeRefPtr<MockAudioDataListener>();
+ EXPECT_CALL(*listener, IsVoiceInput).WillRepeatedly(Return(true));
+ EXPECT_CALL(*listener, RequestedInputChannelCount).WillRepeatedly(Return(1));
+ EXPECT_CALL(*listener, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION));
+ EXPECT_CALL(*listener, Disconnect);
+
+ // Expectations.
+ const Result<cubeb_input_processing_params, int> notSupportedResult(
+ Err(CUBEB_ERROR_NOT_SUPPORTED));
+ const Result<cubeb_input_processing_params, int> errorResult(
+ Err(CUBEB_ERROR));
+ const Result<cubeb_input_processing_params, int> noneResult(
+ CUBEB_INPUT_PROCESSING_PARAM_NONE);
+ const Result<cubeb_input_processing_params, int> echoResult(
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION);
+ const Result<cubeb_input_processing_params, int> noiseResult(
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION);
+ Atomic<int> numProcessingParamsResults(0);
+ {
+ InSequence s;
+ // On first driver start.
+ EXPECT_CALL(*listener,
+ NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ Eq(std::ref(echoResult))))
+ .WillOnce([&] { ++numProcessingParamsResults; });
+ // After requesting something else.
+ EXPECT_CALL(*listener,
+ NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION,
+ Eq(std::ref(noiseResult))))
+ .WillOnce([&] { ++numProcessingParamsResults; });
+ // After error request.
+ EXPECT_CALL(*listener,
+ NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ Eq(std::ref(errorResult))))
+ .WillOnce([&] { ++numProcessingParamsResults; });
+ // After requesting None.
+ EXPECT_CALL(*listener, NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ Eq(std::ref(noneResult))))
+ .WillOnce([&] { ++numProcessingParamsResults; });
+ // After driver switch.
+ EXPECT_CALL(*listener, NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ Eq(std::ref(noneResult))))
+ .WillOnce([&] { ++numProcessingParamsResults; });
+ // After requesting something not supported.
+ EXPECT_CALL(*listener,
+ NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION,
+ Eq(std::ref(noneResult))))
+ .WillOnce([&] { ++numProcessingParamsResults; });
+ // After requesting something with backend not supporting processing params.
+ EXPECT_CALL(*listener,
+ NotifySetRequestedInputProcessingParamsResult(
+ graph, CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION,
+ Eq(std::ref(notSupportedResult))))
+ .WillOnce([&] { ++numProcessingParamsResults; });
+ }
+
+ // Open a device.
+ RefPtr<TestDeviceInputConsumerTrack> track;
+ RefPtr<OnFallbackListener> fallbackListener;
+ DispatchFunction([&] {
+ track = TestDeviceInputConsumerTrack::Create(graph);
+ track->ConnectDeviceInput(device, listener, PRINCIPAL_HANDLE_NONE);
+ fallbackListener = new OnFallbackListener(track);
+ track->AddListener(fallbackListener);
+ });
+
+ RefPtr<SmartMockCubebStream> stream = WaitFor(cubeb->StreamInitEvent());
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_TRUE(stream->mHasOutput);
+ EXPECT_EQ(stream->InputChannels(), 1U);
+ EXPECT_EQ(stream->GetInputDeviceID(), device);
+
+ while (
+ stream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ const auto waitForResult = [&](int aNumResult) {
+ while (numProcessingParamsResults < aNumResult) {
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ NS_ProcessNextEvent();
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ };
+
+ // Wait for the AudioCallbackDriver to come into effect.
+ while (fallbackListener->OnFallback()) {
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Wait for the first result after driver creation.
+ waitForResult(1);
+
+ // Request new processing params.
+ EXPECT_CALL(*listener, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION));
+ waitForResult(2);
+
+ // Test with returning error on new request.
+ cubeb->SetInputProcessingApplyRv(CUBEB_ERROR);
+ EXPECT_CALL(*listener, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION));
+ waitForResult(3);
+
+ // Test unsetting all params.
+ cubeb->SetInputProcessingApplyRv(CUBEB_OK);
+ EXPECT_CALL(*listener, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_NONE));
+ waitForResult(4);
+
+ // Switch driver.
+ EXPECT_CALL(*listener, RequestedInputChannelCount).WillRepeatedly(Return(2));
+ DispatchFunction([&] {
+ track->QueueControlMessageWithNoShutdown(
+ [&] { graph->ReevaluateInputDevice(device); });
+ });
+ ProcessEventQueue();
+ // Process the reevaluation message.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ // Perform the switch.
+ auto initPromise = TakeN(cubeb->StreamInitEvent(), 1);
+ EXPECT_EQ(stream->ManualDataCallback(0), MockCubebStream::KeepProcessing::No);
+ std::tie(stream) = WaitFor(initPromise).unwrap()[0];
+ EXPECT_TRUE(stream->mHasInput);
+ EXPECT_TRUE(stream->mHasOutput);
+ EXPECT_EQ(stream->InputChannels(), 2U);
+ EXPECT_EQ(stream->GetInputDeviceID(), device);
+
+ while (
+ stream->State()
+ .map([](cubeb_state aState) { return aState != CUBEB_STATE_STARTED; })
+ .valueOr(true)) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Wait for the new AudioCallbackDriver to come into effect.
+ fallbackListener->Reset();
+ while (fallbackListener->OnFallback()) {
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+
+ // Wait for the first result after driver creation.
+ waitForResult(5);
+
+ // Test requesting something not supported.
+ cubeb->SetSupportedInputProcessingParams(
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION |
+ CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL |
+ CUBEB_INPUT_PROCESSING_PARAM_VOICE_ISOLATION,
+ CUBEB_OK);
+ EXPECT_CALL(*listener, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION));
+ waitForResult(6);
+
+ // Test requesting something when unsupported by backend.
+ cubeb->SetSupportedInputProcessingParams(CUBEB_INPUT_PROCESSING_PARAM_NONE,
+ CUBEB_ERROR_NOT_SUPPORTED);
+ EXPECT_CALL(*listener, RequestedInputProcessingParams)
+ .WillRepeatedly(Return(CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION));
+ waitForResult(7);
+
+ // Clean up.
+ DispatchFunction([&] {
+ track->RemoveListener(fallbackListener);
+ track->Destroy();
+ });
+ ProcessEventQueue();
+ // Process the destroy message.
+ EXPECT_EQ(stream->ManualDataCallback(0),
+ MockCubebStream::KeepProcessing::Yes);
+ // Shut down.
+ EXPECT_EQ(stream->ManualDataCallback(0), MockCubebStream::KeepProcessing::No);
+ RefPtr<SmartMockCubebStream> destroyedStream =
+ WaitFor(cubeb->StreamDestroyEvent());
+ EXPECT_EQ(destroyedStream.get(), stream.get());
+ {
+ NativeInputTrack* native = graph->GetNativeInputTrackMainThread();
+ ASSERT_TRUE(!native);
+ }
+}
+
#undef Invoke
#undef DispatchFunction
#undef DispatchMethod
diff --git a/dom/media/gtest/TestCDMStorage.cpp b/dom/media/gtest/TestCDMStorage.cpp
index d6249cc95f..57bbbd9298 100644
--- a/dom/media/gtest/TestCDMStorage.cpp
+++ b/dom/media/gtest/TestCDMStorage.cpp
@@ -58,7 +58,7 @@ template <typename T>
static nsresult EnumerateCDMStorageDir(const nsACString& aDir, T&& aDirIter) {
RefPtr<GeckoMediaPluginServiceParent> service =
GeckoMediaPluginServiceParent::GetSingleton();
- MOZ_ASSERT(service);
+ MOZ_RELEASE_ASSERT(service);
// $profileDir/gmp/$platform/
nsCOMPtr<nsIFile> path;
@@ -94,7 +94,7 @@ class GMPShutdownObserver : public nsIRunnable, public nsIObserver {
NS_DECL_THREADSAFE_ISUPPORTS
NS_IMETHOD Run() override {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
EXPECT_TRUE(observerService);
@@ -133,7 +133,7 @@ class NotifyObserversTask : public Runnable {
explicit NotifyObserversTask(const char* aTopic)
: mozilla::Runnable("NotifyObserversTask"), mTopic(aTopic) {}
NS_IMETHOD Run() override {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
if (observerService) {
@@ -153,7 +153,7 @@ class ClearCDMStorageTask : public nsIRunnable, public nsIObserver {
NS_DECL_THREADSAFE_ISUPPORTS
NS_IMETHOD Run() override {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
EXPECT_TRUE(observerService);
@@ -272,7 +272,7 @@ static nsCString GetNodeId(const nsAString& aOrigin,
static bool IsCDMStorageIsEmpty() {
RefPtr<GeckoMediaPluginServiceParent> service =
GeckoMediaPluginServiceParent::GetSingleton();
- MOZ_ASSERT(service);
+ MOZ_RELEASE_ASSERT(service);
nsCOMPtr<nsIFile> storage;
nsresult rv = service->GetStorageDir(getter_AddRefs(storage));
EXPECT_NS_SUCCEEDED(rv);
@@ -286,14 +286,14 @@ static bool IsCDMStorageIsEmpty() {
static void AssertIsOnGMPThread() {
RefPtr<GeckoMediaPluginService> service =
GeckoMediaPluginService::GetGeckoMediaPluginService();
- MOZ_ASSERT(service);
+ MOZ_RELEASE_ASSERT(service);
nsCOMPtr<nsIThread> thread;
service->GetThread(getter_AddRefs(thread));
- MOZ_ASSERT(thread);
+ MOZ_RELEASE_ASSERT(thread);
nsCOMPtr<nsIThread> currentThread;
- DebugOnly<nsresult> rv = NS_GetCurrentThread(getter_AddRefs(currentThread));
- MOZ_ASSERT(NS_SUCCEEDED(rv));
- MOZ_ASSERT(currentThread == thread);
+ nsresult rv = NS_GetCurrentThread(getter_AddRefs(currentThread));
+ MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
+ MOZ_RELEASE_ASSERT(currentThread == thread);
}
class CDMStorageTest {
@@ -1049,8 +1049,8 @@ class CDMStorageTest {
constexpr auto data = "Just_some_arbitrary_data."_ns;
- MOZ_ASSERT(longRecordName.Length() < GMP_MAX_RECORD_NAME_SIZE);
- MOZ_ASSERT(longRecordName.Length() > 260); // Windows MAX_PATH
+ MOZ_RELEASE_ASSERT(longRecordName.Length() < GMP_MAX_RECORD_NAME_SIZE);
+ MOZ_RELEASE_ASSERT(longRecordName.Length() > 260); // Windows MAX_PATH
nsCString response("stored ");
response.Append(longRecordName);
diff --git a/dom/media/gtest/TestDeviceInputTrack.cpp b/dom/media/gtest/TestDeviceInputTrack.cpp
index 14b5227f9d..fca06d5e4a 100644
--- a/dom/media/gtest/TestDeviceInputTrack.cpp
+++ b/dom/media/gtest/TestDeviceInputTrack.cpp
@@ -87,7 +87,7 @@ TEST_F(TestDeviceInputTrack, DeviceInputConsumerTrack) {
class TestDeviceInputConsumerTrack : public DeviceInputConsumerTrack {
public:
static TestDeviceInputConsumerTrack* Create(MediaTrackGraph* aGraph) {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
TestDeviceInputConsumerTrack* track =
new TestDeviceInputConsumerTrack(aGraph->GraphRate());
aGraph->AddTrack(track);
@@ -95,7 +95,7 @@ TEST_F(TestDeviceInputTrack, DeviceInputConsumerTrack) {
}
void Destroy() {
- MOZ_ASSERT(NS_IsMainThread());
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
DisconnectDeviceInput();
DeviceInputConsumerTrack::Destroy();
}
@@ -108,7 +108,7 @@ TEST_F(TestDeviceInputTrack, DeviceInputConsumerTrack) {
return 0;
}
DeviceInputTrack* t = mInputs[0]->GetSource()->AsDeviceInputTrack();
- MOZ_ASSERT(t);
+ MOZ_RELEASE_ASSERT(t);
return t->NumberOfChannels();
}
@@ -122,16 +122,26 @@ TEST_F(TestDeviceInputTrack, DeviceInputConsumerTrack) {
TestAudioDataListener(uint32_t aChannelCount, bool aIsVoice)
: mChannelCount(aChannelCount), mIsVoice(aIsVoice) {}
// Graph thread APIs: AudioDataListenerInterface implementations.
- uint32_t RequestedInputChannelCount(MediaTrackGraph* aGraph) override {
+ uint32_t RequestedInputChannelCount(
+ MediaTrackGraph* aGraph) const override {
aGraph->AssertOnGraphThread();
return mChannelCount;
}
+ cubeb_input_processing_params RequestedInputProcessingParams(
+ MediaTrackGraph*) const override {
+ return CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ }
bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
return mIsVoice;
};
void DeviceChanged(MediaTrackGraph* aGraph) override { /* Ignored */
}
void Disconnect(MediaTrackGraph* aGraph) override{/* Ignored */};
+ void NotifySetRequestedInputProcessingParamsResult(
+ MediaTrackGraph* aGraph, cubeb_input_processing_params aRequestedParams,
+ const Result<cubeb_input_processing_params, int>& aResult) override {
+ /* Ignored */
+ }
private:
~TestAudioDataListener() = default;
diff --git a/dom/media/gtest/TestMP4Demuxer.cpp b/dom/media/gtest/TestMP4Demuxer.cpp
index 43dfdf19a4..1a1bde8035 100644
--- a/dom/media/gtest/TestMP4Demuxer.cpp
+++ b/dom/media/gtest/TestMP4Demuxer.cpp
@@ -56,7 +56,7 @@ class MP4DemuxerBinding {
}
RefPtr<GenericPromise> CheckTrackKeyFrame(MediaTrackDemuxer* aTrackDemuxer) {
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_RELEASE_ASSERT(mTaskQueue->IsCurrentThreadIn());
RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
RefPtr<MP4DemuxerBinding> binding = this;
@@ -97,7 +97,7 @@ class MP4DemuxerBinding {
}
RefPtr<GenericPromise> CheckTrackSamples(MediaTrackDemuxer* aTrackDemuxer) {
- MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_RELEASE_ASSERT(mTaskQueue->IsCurrentThreadIn());
RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
RefPtr<MP4DemuxerBinding> binding = this;
diff --git a/dom/media/gtest/TestMediaDataEncoder.cpp b/dom/media/gtest/TestMediaDataEncoder.cpp
index 39c92fb19c..fefedcce43 100644
--- a/dom/media/gtest/TestMediaDataEncoder.cpp
+++ b/dom/media/gtest/TestMediaDataEncoder.cpp
@@ -193,7 +193,7 @@ static already_AddRefed<MediaDataEncoder> CreateH264Encoder(
}
void WaitForShutdown(const RefPtr<MediaDataEncoder>& aEncoder) {
- MOZ_ASSERT(aEncoder);
+ MOZ_RELEASE_ASSERT(aEncoder);
Maybe<bool> result;
// media::Await() supports exclusive promises only, but ShutdownPromise is
diff --git a/dom/media/gtest/TestWebMWriter.cpp b/dom/media/gtest/TestWebMWriter.cpp
index 837ee6a2c6..5384fd7c99 100644
--- a/dom/media/gtest/TestWebMWriter.cpp
+++ b/dom/media/gtest/TestWebMWriter.cpp
@@ -223,7 +223,7 @@ struct WebMioData {
};
static int webm_read(void* aBuffer, size_t aLength, void* aUserData) {
- NS_ASSERTION(aUserData, "aUserData must point to a valid WebMioData");
+ MOZ_RELEASE_ASSERT(aUserData, "aUserData must point to a valid WebMioData");
WebMioData* ioData = static_cast<WebMioData*>(aUserData);
// Check the read length.
@@ -247,7 +247,7 @@ static int webm_read(void* aBuffer, size_t aLength, void* aUserData) {
}
static int webm_seek(int64_t aOffset, int aWhence, void* aUserData) {
- NS_ASSERTION(aUserData, "aUserData must point to a valid WebMioData");
+ MOZ_RELEASE_ASSERT(aUserData, "aUserData must point to a valid WebMioData");
WebMioData* ioData = static_cast<WebMioData*>(aUserData);
if (Abs(aOffset) > ioData->data.Length()) {
@@ -281,7 +281,7 @@ static int webm_seek(int64_t aOffset, int aWhence, void* aUserData) {
}
static int64_t webm_tell(void* aUserData) {
- NS_ASSERTION(aUserData, "aUserData must point to a valid WebMioData");
+ MOZ_RELEASE_ASSERT(aUserData, "aUserData must point to a valid WebMioData");
WebMioData* ioData = static_cast<WebMioData*>(aUserData);
return ioData->offset.isValid() ? ioData->offset.value() : -1;
}
diff --git a/dom/media/ipc/MFCDMChild.cpp b/dom/media/ipc/MFCDMChild.cpp
index 2ba2bdaf4e..9df86b82f4 100644
--- a/dom/media/ipc/MFCDMChild.cpp
+++ b/dom/media/ipc/MFCDMChild.cpp
@@ -7,6 +7,7 @@
#include "mozilla/EMEUtils.h"
#include "mozilla/KeySystemConfig.h"
#include "mozilla/RefPtr.h"
+#include "mozilla/StaticString.h"
#include "mozilla/WMFCDMProxyCallback.h"
#include "nsString.h"
#include "RemoteDecoderManagerChild.h"
@@ -44,7 +45,7 @@ namespace mozilla {
#define INVOKE_ASYNC(method, promiseId, param1) \
do { \
- auto callsite = __func__; \
+ StaticString callsite = __func__; \
using ParamType = std::remove_reference<decltype(param1)>::type; \
mManagerThread->Dispatch(NS_NewRunnableFunction( \
callsite, [self = RefPtr{this}, callsite, promiseId, \
@@ -56,7 +57,7 @@ namespace mozilla {
#define INVOKE_ASYNC2(method, promiseId, param1, param2) \
do { \
- auto callsite = __func__; \
+ StaticString callsite = __func__; \
using ParamType1 = std::remove_reference<decltype(param1)>::type; \
using ParamType2 = std::remove_reference<decltype(param2)>::type; \
mManagerThread->Dispatch(NS_NewRunnableFunction( \
@@ -188,7 +189,7 @@ void MFCDMChild::AssertSendable() {
template <typename PromiseType>
already_AddRefed<PromiseType> MFCDMChild::InvokeAsync(
- std::function<void()>&& aCall, const char* aCallerName,
+ std::function<void()>&& aCall, StaticString aCallerName,
MozPromiseHolder<PromiseType>& aPromise) {
AssertSendable();
diff --git a/dom/media/ipc/MFCDMChild.h b/dom/media/ipc/MFCDMChild.h
index 3396b0c790..ec766cab24 100644
--- a/dom/media/ipc/MFCDMChild.h
+++ b/dom/media/ipc/MFCDMChild.h
@@ -30,7 +30,7 @@ class MFCDMChild final : public PMFCDMChild {
template <typename PromiseType>
already_AddRefed<PromiseType> InvokeAsync(
- std::function<void()>&& aCall, const char* aCallerName,
+ std::function<void()>&& aCall, StaticString aCallerName,
MozPromiseHolder<PromiseType>& aPromise);
using InitPromise = MozPromise<MFCDMInitIPDL, nsresult, true>;
diff --git a/dom/media/ipc/RDDChild.cpp b/dom/media/ipc/RDDChild.cpp
index fb2e14bb4f..6180ec7391 100644
--- a/dom/media/ipc/RDDChild.cpp
+++ b/dom/media/ipc/RDDChild.cpp
@@ -5,6 +5,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "RDDChild.h"
+#include "TelemetryProbesReporter.h"
+#include "VideoUtils.h"
#include "mozilla/FOGIPC.h"
#include "mozilla/RDDProcessManager.h"
#include "mozilla/dom/ContentParent.h"
@@ -142,6 +144,13 @@ mozilla::ipc::IPCResult RDDChild::RecvGetModulesTrust(
mozilla::ipc::IPCResult RDDChild::RecvUpdateMediaCodecsSupported(
const media::MediaCodecsSupported& aSupported) {
+#if defined(XP_MACOSX) || defined(XP_LINUX)
+ // We report this on GPUChild on Windows and Android
+ if (ContainHardwareCodecsSupported(aSupported)) {
+ mozilla::TelemetryProbesReporter::ReportDeviceMediaCodecSupported(
+ aSupported);
+ }
+#endif
dom::ContentParent::BroadcastMediaCodecsSupportedUpdate(
RemoteDecodeIn::RddProcess, aSupported);
return IPC_OK();
diff --git a/dom/media/ipc/RDDParent.cpp b/dom/media/ipc/RDDParent.cpp
index 8892e8fbbe..4b6c1372ce 100644
--- a/dom/media/ipc/RDDParent.cpp
+++ b/dom/media/ipc/RDDParent.cpp
@@ -52,6 +52,10 @@
# include "mozilla/SandboxTestingChild.h"
#endif
+#if defined(XP_MACOSX) || defined(XP_LINUX)
+# include "VideoUtils.h"
+#endif
+
namespace mozilla {
using namespace ipc;
@@ -159,18 +163,6 @@ mozilla::ipc::IPCResult RDDParent::RecvInit(
}
IPCResult RDDParent::RecvUpdateVar(const GfxVarUpdate& aUpdate) {
-#if defined(XP_WIN)
- auto scopeExit = MakeScopeExit(
- [couldUseHWDecoder = gfx::gfxVars::CanUseHardwareVideoDecoding()] {
- if (couldUseHWDecoder != gfx::gfxVars::CanUseHardwareVideoDecoding()) {
- // The capabilities of the system may have changed, force a refresh by
- // re-initializing the WMF PDM.
- WMFDecoderModule::Init();
- Unused << RDDParent::GetSingleton()->SendUpdateMediaCodecsSupported(
- PDMFactory::Supported(true /* force refresh */));
- }
- });
-#endif
gfxVars::ApplyUpdate(aUpdate);
return IPC_OK();
}
diff --git a/dom/media/ipc/RemoteImageHolder.h b/dom/media/ipc/RemoteImageHolder.h
index 981e24d150..36deab1ef1 100644
--- a/dom/media/ipc/RemoteImageHolder.h
+++ b/dom/media/ipc/RemoteImageHolder.h
@@ -58,12 +58,12 @@ class RemoteImageHolder final {
gfx::ColorRange mColorRange = {};
};
- template <>
- struct ipc::IPDLParamTraits<RemoteImageHolder> {
- static void Write(IPC::MessageWriter* aWriter, IProtocol* aActor,
- RemoteImageHolder&& aParam);
- static bool Read(IPC::MessageReader* aReader, IProtocol* aActor,
- RemoteImageHolder* aResult);
+template <>
+struct ipc::IPDLParamTraits<RemoteImageHolder> {
+ static void Write(IPC::MessageWriter* aWriter, IProtocol* aActor,
+ RemoteImageHolder&& aParam);
+ static bool Read(IPC::MessageReader* aReader, IProtocol* aActor,
+ RemoteImageHolder* aResult);
};
} // namespace mozilla
diff --git a/dom/media/mediacapabilities/KeyValueStorage.cpp b/dom/media/mediacapabilities/KeyValueStorage.cpp
index f0ac0aad7d..a2e007c598 100644
--- a/dom/media/mediacapabilities/KeyValueStorage.cpp
+++ b/dom/media/mediacapabilities/KeyValueStorage.cpp
@@ -94,7 +94,7 @@ class VoidCallback final : public nsIKeyValueVoidCallback {
mResultPromise.Reject(NS_ERROR_FAILURE, __func__);
return NS_OK;
}
- RefPtr<GenericPromise> Ensure(const char* aMethodName) {
+ RefPtr<GenericPromise> Ensure(StaticString aMethodName) {
return mResultPromise.Ensure(aMethodName);
}
diff --git a/dom/media/mediacontrol/ContentMediaController.cpp b/dom/media/mediacontrol/ContentMediaController.cpp
index e1fe574d9b..0c3bbbecdc 100644
--- a/dom/media/mediacontrol/ContentMediaController.cpp
+++ b/dom/media/mediacontrol/ContentMediaController.cpp
@@ -304,6 +304,37 @@ void ContentMediaAgent::UpdatePositionState(
}
}
+void ContentMediaAgent::UpdateGuessedPositionState(
+ uint64_t aBrowsingContextId, const nsID& aMediaId,
+ const Maybe<PositionState>& aState) {
+ RefPtr<BrowsingContext> bc = GetBrowsingContextForAgent(aBrowsingContextId);
+ if (!bc || bc->IsDiscarded()) {
+ return;
+ }
+
+ if (aState) {
+ LOG("Update guessed position state for BC %" PRId64
+ " media id %s (duration=%f, playbackRate=%f, position=%f)",
+ bc->Id(), aMediaId.ToString().get(), aState->mDuration,
+ aState->mPlaybackRate, aState->mLastReportedPlaybackPosition);
+ } else {
+ LOG("Clear guessed position state for BC %" PRId64 " media id %s", bc->Id(),
+ aMediaId.ToString().get());
+ }
+
+ if (XRE_IsContentProcess()) {
+ ContentChild* contentChild = ContentChild::GetSingleton();
+ Unused << contentChild->SendNotifyGuessedPositionStateChanged(bc, aMediaId,
+ aState);
+ return;
+ }
+ // This would only happen when we disable e10s.
+ if (RefPtr<IMediaInfoUpdater> updater =
+ bc->Canonical()->GetMediaController()) {
+ updater->UpdateGuessedPositionState(bc->Id(), aMediaId, aState);
+ }
+}
+
ContentMediaController::ContentMediaController(uint64_t aId) {
LOG("Create content media controller for BC %" PRId64, aId);
}
diff --git a/dom/media/mediacontrol/ContentMediaController.h b/dom/media/mediacontrol/ContentMediaController.h
index a58be24b9d..236b3b254d 100644
--- a/dom/media/mediacontrol/ContentMediaController.h
+++ b/dom/media/mediacontrol/ContentMediaController.h
@@ -67,6 +67,9 @@ class ContentMediaAgent : public IMediaInfoUpdater {
bool aIsInFullScreen) override;
void UpdatePositionState(uint64_t aBrowsingContextId,
const Maybe<PositionState>& aState) override;
+ void UpdateGuessedPositionState(uint64_t aBrowsingContextId,
+ const nsID& aMediaId,
+ const Maybe<PositionState>& aState) override;
// Use these methods to register/unregister `ContentMediaControlKeyReceiver`
// in order to listen to media control key events.
diff --git a/dom/media/mediacontrol/MediaControlKeyManager.cpp b/dom/media/mediacontrol/MediaControlKeyManager.cpp
index b40d3af91e..92e2679bdd 100644
--- a/dom/media/mediacontrol/MediaControlKeyManager.cpp
+++ b/dom/media/mediacontrol/MediaControlKeyManager.cpp
@@ -107,6 +107,7 @@ void MediaControlKeyManager::StopMonitoringControlKeys() {
nullptr);
obs->NotifyObservers(nullptr, "media-displayed-metadata-changed",
nullptr);
+ obs->NotifyObservers(nullptr, "media-position-state-changed", nullptr);
}
}
}
@@ -197,6 +198,12 @@ void MediaControlKeyManager::SetPositionState(
if (mEventSource && mEventSource->IsOpened()) {
mEventSource->SetPositionState(aState);
}
+
+ if (StaticPrefs::media_mediacontrol_testingevents_enabled()) {
+ if (nsCOMPtr<nsIObserverService> obs = services::GetObserverService()) {
+ obs->NotifyObservers(nullptr, "media-position-state-changed", nullptr);
+ }
+ }
}
void MediaControlKeyManager::OnPreferenceChange() {
diff --git a/dom/media/mediacontrol/MediaControlService.cpp b/dom/media/mediacontrol/MediaControlService.cpp
index f45ab4253d..c64749f556 100644
--- a/dom/media/mediacontrol/MediaControlService.cpp
+++ b/dom/media/mediacontrol/MediaControlService.cpp
@@ -482,6 +482,7 @@ void MediaControlService::ControllerManager::UpdateMainControllerInternal(
mSource->SetPlaybackState(mMainController->PlaybackState());
mSource->SetMediaMetadata(mMainController->GetCurrentMediaMetadata());
mSource->SetSupportedMediaKeys(mMainController->GetSupportedMediaKeys());
+ mSource->SetPositionState(mMainController->GetCurrentPositionState());
ConnectMainControllerEvents();
}
diff --git a/dom/media/mediacontrol/MediaPlaybackStatus.cpp b/dom/media/mediacontrol/MediaPlaybackStatus.cpp
index 80dedf8599..434d6dbd7e 100644
--- a/dom/media/mediacontrol/MediaPlaybackStatus.cpp
+++ b/dom/media/mediacontrol/MediaPlaybackStatus.cpp
@@ -71,6 +71,23 @@ void MediaPlaybackStatus::UpdateMediaAudibleState(uint64_t aContextId,
}
}
+void MediaPlaybackStatus::UpdateGuessedPositionState(
+ uint64_t aContextId, const nsID& aElementId,
+ const Maybe<PositionState>& aState) {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (aState) {
+ LOG("Update guessed position state for context %" PRIu64
+ " element %s (duration=%f, playbackRate=%f, position=%f)",
+ aContextId, aElementId.ToString().get(), aState->mDuration,
+ aState->mPlaybackRate, aState->mLastReportedPlaybackPosition);
+ } else {
+ LOG("Clear guessed position state for context %" PRIu64 " element %s",
+ aContextId, aElementId.ToString().get());
+ }
+ ContextMediaInfo& info = GetNotNullContextInfo(aContextId);
+ info.UpdateGuessedPositionState(aElementId, aState);
+}
+
bool MediaPlaybackStatus::IsPlaying() const {
MOZ_ASSERT(NS_IsMainThread());
return std::any_of(mContextInfoMap.Values().cbegin(),
@@ -92,6 +109,35 @@ bool MediaPlaybackStatus::IsAnyMediaBeingControlled() const {
[](const auto& info) { return info->IsAnyMediaBeingControlled(); });
}
+Maybe<PositionState> MediaPlaybackStatus::GuessedMediaPositionState(
+ Maybe<uint64_t> aPreferredContextId) const {
+ auto contextId = aPreferredContextId;
+ if (!contextId) {
+ contextId = mOwningAudioFocusContextId;
+ }
+
+ // either the preferred or focused context
+ if (contextId) {
+ auto entry = mContextInfoMap.Lookup(*contextId);
+ if (!entry) {
+ return Nothing();
+ }
+ LOG("Using guessed position state from preferred/focused BC %" PRId64,
+ *contextId);
+ return entry.Data()->GuessedPositionState();
+ }
+
+ // look for the first position state
+ for (const auto& context : mContextInfoMap.Values()) {
+ auto state = context->GuessedPositionState();
+ if (state) {
+ LOG("Using guessed position state from BC %" PRId64, context->Id());
+ return state;
+ }
+ }
+ return Nothing();
+}
+
MediaPlaybackStatus::ContextMediaInfo&
MediaPlaybackStatus::GetNotNullContextInfo(uint64_t aContextId) {
MOZ_ASSERT(NS_IsMainThread());
@@ -139,4 +185,22 @@ bool MediaPlaybackStatus::IsContextOwningAudioFocus(uint64_t aContextId) const {
: false;
}
+Maybe<PositionState>
+MediaPlaybackStatus::ContextMediaInfo::GuessedPositionState() const {
+ if (mGuessedPositionStateMap.Count() != 1) {
+ LOG("Count is %d", mGuessedPositionStateMap.Count());
+ return Nothing();
+ }
+ return Some(mGuessedPositionStateMap.begin()->GetData());
+}
+
+void MediaPlaybackStatus::ContextMediaInfo::UpdateGuessedPositionState(
+ const nsID& aElementId, const Maybe<PositionState>& aState) {
+ if (aState) {
+ mGuessedPositionStateMap.InsertOrUpdate(aElementId, *aState);
+ } else {
+ mGuessedPositionStateMap.Remove(aElementId);
+ }
+}
+
} // namespace mozilla::dom
diff --git a/dom/media/mediacontrol/MediaPlaybackStatus.h b/dom/media/mediacontrol/MediaPlaybackStatus.h
index da597e4dfa..f9ac25f73d 100644
--- a/dom/media/mediacontrol/MediaPlaybackStatus.h
+++ b/dom/media/mediacontrol/MediaPlaybackStatus.h
@@ -7,9 +7,11 @@
#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
+#include "mozilla/dom/MediaSession.h"
#include "nsISupportsImpl.h"
#include "nsTArray.h"
#include "nsTHashMap.h"
+#include "nsID.h"
namespace mozilla::dom {
@@ -63,10 +65,14 @@ class MediaPlaybackStatus final {
public:
void UpdateMediaPlaybackState(uint64_t aContextId, MediaPlaybackState aState);
void UpdateMediaAudibleState(uint64_t aContextId, MediaAudibleState aState);
+ void UpdateGuessedPositionState(uint64_t aContextId, const nsID& aElementId,
+ const Maybe<PositionState>& aState);
bool IsPlaying() const;
bool IsAudible() const;
bool IsAnyMediaBeingControlled() const;
+ Maybe<PositionState> GuessedMediaPositionState(
+ Maybe<uint64_t> aPreferredContextId) const;
Maybe<uint64_t> GetAudioFocusOwnerContextId() const;
@@ -121,6 +127,10 @@ class MediaPlaybackStatus final {
bool IsAnyMediaBeingControlled() const { return mControlledMediaNum > 0; }
uint64_t Id() const { return mContextId; }
+ Maybe<PositionState> GuessedPositionState() const;
+ void UpdateGuessedPositionState(const nsID& aElementId,
+ const Maybe<PositionState>& aState);
+
private:
/**
* The possible value for those three numbers should follow this rule,
@@ -130,6 +140,12 @@ class MediaPlaybackStatus final {
uint32_t mAudibleMediaNum = 0;
uint32_t mPlayingMediaNum = 0;
uint64_t mContextId = 0;
+
+ /**
+ * Contains the guessed position state of all media elements in this
+ * browsing context identified by their ID.
+ */
+ nsTHashMap<nsID, PositionState> mGuessedPositionStateMap;
};
ContextMediaInfo& GetNotNullContextInfo(uint64_t aContextId);
diff --git a/dom/media/mediacontrol/MediaStatusManager.cpp b/dom/media/mediacontrol/MediaStatusManager.cpp
index 633ae19a44..6e86dbf2eb 100644
--- a/dom/media/mediacontrol/MediaStatusManager.cpp
+++ b/dom/media/mediacontrol/MediaStatusManager.cpp
@@ -380,6 +380,29 @@ void MediaStatusManager::UpdatePositionState(
mPositionStateChangedEvent.Notify(aState);
}
+void MediaStatusManager::UpdateGuessedPositionState(
+ uint64_t aBrowsingContextId, const nsID& aMediaId,
+ const Maybe<PositionState>& aGuessedState) {
+ mPlaybackStatusDelegate.UpdateGuessedPositionState(aBrowsingContextId,
+ aMediaId, aGuessedState);
+
+ // The position state comes from a non-active media session and
+ // there is another one active (with some metadata).
+ if (mActiveMediaSessionContextId &&
+ *mActiveMediaSessionContextId != aBrowsingContextId) {
+ return;
+ }
+
+ // media session is declared for the updated session, but there's no active
+ // session - it will get emitted once the session becomes active
+ if (mMediaSessionInfoMap.Contains(aBrowsingContextId) &&
+ !mActiveMediaSessionContextId) {
+ return;
+ }
+
+ mPositionStateChangedEvent.Notify(GetCurrentPositionState());
+}
+
void MediaStatusManager::NotifySupportedKeysChangedIfNeeded(
uint64_t aBrowsingContextId) {
// Only the active media session's supported actions would be shown in virtual
@@ -431,11 +454,13 @@ MediaMetadataBase MediaStatusManager::GetCurrentMediaMetadata() const {
Maybe<PositionState> MediaStatusManager::GetCurrentPositionState() const {
if (mActiveMediaSessionContextId) {
auto info = mMediaSessionInfoMap.Lookup(*mActiveMediaSessionContextId);
- if (info) {
+ if (info && info->mPositionState) {
return info->mPositionState;
}
}
- return Nothing();
+
+ return mPlaybackStatusDelegate.GuessedMediaPositionState(
+ mActiveMediaSessionContextId);
}
void MediaStatusManager::FillMissingTitleAndArtworkIfNeeded(
diff --git a/dom/media/mediacontrol/MediaStatusManager.h b/dom/media/mediacontrol/MediaStatusManager.h
index a4216c8453..45f3ccccc5 100644
--- a/dom/media/mediacontrol/MediaStatusManager.h
+++ b/dom/media/mediacontrol/MediaStatusManager.h
@@ -120,6 +120,12 @@ class IMediaInfoUpdater {
// Use this method when media session update its position state.
virtual void UpdatePositionState(uint64_t aBrowsingContextId,
const Maybe<PositionState>& aState) = 0;
+
+ // Use this method to update controlled media's position state and the
+ // browsing context where controlled media exists.
+ virtual void UpdateGuessedPositionState(
+ uint64_t aBrowsingContextId, const nsID& aMediaId,
+ const Maybe<PositionState>& aGuessedState) = 0;
};
/**
@@ -165,12 +171,19 @@ class MediaStatusManager : public IMediaInfoUpdater {
MediaSessionAction aAction) override;
void UpdatePositionState(uint64_t aBrowsingContextId,
const Maybe<PositionState>& aState) override;
+ void UpdateGuessedPositionState(
+ uint64_t aBrowsingContextId, const nsID& aMediaId,
+ const Maybe<PositionState>& aGuessedState) override;
// Return active media session's metadata if active media session exists and
// it has already set its metadata. Otherwise, return default media metadata
// which is based on website's title and favicon.
MediaMetadataBase GetCurrentMediaMetadata() const;
+ // Return the active media session's position state. If the active media
+ // session doesn't exist or doesn't have any state, Nothing is returned.
+ Maybe<PositionState> GetCurrentPositionState() const;
+
bool IsMediaAudible() const;
bool IsMediaPlaying() const;
bool IsAnyMediaBeingControlled() const;
@@ -247,10 +260,6 @@ class MediaStatusManager : public IMediaInfoUpdater {
// media session doesn't exist, return 'None' instead.
MediaSessionPlaybackState GetCurrentDeclaredPlaybackState() const;
- // Return the active media session's position state. If the active media
- // session doesn't exist or doesn't have any state, Nothing is returned.
- Maybe<PositionState> GetCurrentPositionState() const;
-
// This state can match to the `guessed playback state` in the spec [1], it
// indicates if we have any media element playing within the tab which this
// controller belongs to. But currently we only take media elements into
diff --git a/dom/media/mediacontrol/tests/browser/browser_media_control_position_state.js b/dom/media/mediacontrol/tests/browser/browser_media_control_position_state.js
index 6074e2ee16..75f65eb34b 100644
--- a/dom/media/mediacontrol/tests/browser/browser_media_control_position_state.js
+++ b/dom/media/mediacontrol/tests/browser/browser_media_control_position_state.js
@@ -4,6 +4,7 @@ const IFRAME_URL =
"https://example.com/browser/dom/media/mediacontrol/tests/browser/file_iframe_media.html";
const testVideoId = "video";
+const videoDuration = 5.589333;
add_task(async function setupTestingPref() {
await SpecialPowers.pushPrefEnv({
@@ -18,9 +19,15 @@ add_task(async function setupTestingPref() {
add_task(async function testSetPositionState() {
info(`open media page`);
const tab = await createLoadedTabWrapper(PAGE_URL);
+ logPositionStateChangeEvents(tab);
+
+ info(`apply initial position state`);
+ await applyPositionState(tab, { duration: 10 });
info(`start media`);
+ const initialPositionState = isNextPositionState(tab, { duration: 10 });
await playMedia(tab, testVideoId);
+ await initialPositionState;
info(`set duration only`);
await setPositionState(tab, {
@@ -47,9 +54,15 @@ add_task(async function testSetPositionState() {
add_task(async function testSetPositionStateFromInactiveMediaSession() {
info(`open media page`);
const tab = await createLoadedTabWrapper(PAGE_URL);
+ logPositionStateChangeEvents(tab);
+
+ info(`apply initial position state`);
+ await applyPositionState(tab, { duration: 10 });
info(`start media`);
+ const initialPositionState = isNextPositionState(tab, { duration: 10 });
await playMedia(tab, testVideoId);
+ await initialPositionState;
info(
`add an event listener to measure how many times the position state changes`
@@ -82,48 +95,193 @@ add_task(async function testSetPositionStateFromInactiveMediaSession() {
});
/**
- * The following are helper functions.
+ *
+ * @param {boolean} withMetadata
+ * Specifies if the tab should set metadata for the playing video
*/
-async function setPositionState(tab, positionState) {
+async function testGuessedPositionState(withMetadata) {
+ info(`open media page`);
+ const tab = await createLoadedTabWrapper(PAGE_URL);
+ logPositionStateChangeEvents(tab);
+
+ if (withMetadata) {
+ info(`set media metadata`);
+ await setMediaMetadata(tab, { title: "A Video" });
+ }
+
+ info(`start media`);
+ await emitsPositionState(() => playMedia(tab, testVideoId), tab, {
+ duration: videoDuration,
+ position: 0,
+ playbackRate: 1.0,
+ });
+
+ info(`set playback rate to 2x`);
+ await emitsPositionState(() => setPlaybackRate(tab, testVideoId, 2.0), tab, {
+ duration: videoDuration,
+ position: null, // ignored,
+ playbackRate: 2.0,
+ });
+
+ info(`seek to 1s`);
+ await emitsPositionState(() => setCurrentTime(tab, testVideoId, 1.0), tab, {
+ duration: videoDuration,
+ position: 1.0,
+ playbackRate: 2.0,
+ });
+
+ let positionChangedNum = 0;
const controller = tab.linkedBrowser.browsingContext.mediaController;
- const positionStateChanged = new Promise(r => {
- controller.addEventListener(
- "positionstatechange",
- event => {
- const { duration, playbackRate, position } = positionState;
- // duration is mandatory.
- is(
- event.duration,
- duration,
- `expected duration ${event.duration} is equal to ${duration}`
- );
-
- // Playback rate is optional, if it's not present, default should be 1.0
- if (playbackRate) {
- is(
- event.playbackRate,
- playbackRate,
- `expected playbackRate ${event.playbackRate} is equal to ${playbackRate}`
- );
- } else {
- is(event.playbackRate, 1.0, `expected default playbackRate is 1.0`);
- }
-
- // Position state is optional, if it's not present, default should be 0.0
- if (position) {
- is(
- event.position,
- position,
- `expected position ${event.position} is equal to ${position}`
- );
- } else {
- is(event.position, 0.0, `expected default position is 0.0`);
- }
- r();
- },
- { once: true }
- );
+ controller.onpositionstatechange = () => positionChangedNum++;
+
+ info(`pause media`);
+ // shouldn't generate an event
+ await pauseMedia(tab, testVideoId);
+
+ info(`seek to 2s`);
+ await emitsPositionState(() => setCurrentTime(tab, testVideoId, 2.0), tab, {
+ duration: videoDuration,
+ position: 2.0,
+ playbackRate: 2.0,
+ });
+
+ info(`start media`);
+ await emitsPositionState(() => playMedia(tab, testVideoId), tab, {
+ duration: videoDuration,
+ position: 2.0,
+ playbackRate: 2.0,
});
+
+ is(
+ positionChangedNum,
+ 2,
+ `We should only receive two of position changes, because pausing is effectless`
+ );
+
+ info(`remove tab`);
+ await tab.close();
+}
+
+add_task(async function testGuessedPositionStateWithMetadata() {
+ testGuessedPositionState(true);
+});
+
+add_task(async function testGuessedPositionStateWithoutMetadata() {
+ testGuessedPositionState(false);
+});
+
+/**
+ * @typedef {{
+ * duration: number,
+ * playbackRate?: number | null,
+ * position?: number | null,
+ * }} ExpectedPositionState
+ */
+
+/**
+ * Checks if the next received position state matches the expected one.
+ *
+ * @param {tab} tab
+ * The tab that contains the media
+ * @param {ExpectedPositionState} positionState
+ * The expected position state. `duration` is mandatory. `playbackRate`
+ * and `position` are optional. If they're `null`, they're ignored,
+ * otherwise if they're not present or undefined, they're expected to
+ * be the default value.
+ * @returns {Promise}
+ * Resolves when the event has been received
+ */
+async function isNextPositionState(tab, positionState) {
+ const got = await nextPositionState(tab);
+ isPositionState(got, positionState);
+}
+
+/**
+ * Waits for the next position state and returns it
+ *
+ * @param {tab} tab The tab to receive position state from
+ * @returns {Promise<MediaPositionState>} The emitted position state
+ */
+function nextPositionState(tab) {
+ const controller = tab.linkedBrowser.browsingContext.mediaController;
+ return new Promise(r => {
+ controller.addEventListener("positionstatechange", r, { once: true });
+ });
+}
+
+/**
+ * @param {MediaPositionState} got
+ * The received position state
+ * @param {ExpectedPositionState} expected
+ * The expected position state. `duration` is mandatory. `playbackRate`
+ * and `position` are optional. If they're `null`, they're ignored,
+ * otherwise if they're not present or undefined, they're expected to
+ * be the default value.
+ */
+function isPositionState(got, expected) {
+ const { duration, playbackRate, position } = expected;
+ // duration is mandatory.
+ isFuzzyEq(got.duration, duration, "duration");
+
+ // Playback rate is optional, if it's not present, default should be 1.0
+ if (typeof playbackRate === "number") {
+ isFuzzyEq(got.playbackRate, playbackRate, "playbackRate");
+ } else if (playbackRate !== null) {
+ is(got.playbackRate, 1.0, `expected default playbackRate is 1.0`);
+ }
+
+ // Position is optional, if it's not present, default should be 0.0
+ if (typeof position === "number") {
+ isFuzzyEq(got.position, position, "position");
+ } else if (position !== null) {
+ is(got.position, 0.0, `expected default position is 0.0`);
+ }
+}
+
+/**
+ * Checks if two numbers are equal within one significant digit
+ *
+ * @param {number} got
+ * The value received while testing
+ * @param {number} expected
+ * The expected value
+ * @param {string} role
+ * The role of the check (used for formatting)
+ */
+function isFuzzyEq(got, expected, role) {
+ expected = expected.toFixed(1);
+ got = got.toFixed(1);
+ is(got, expected, `expected ${role} ${got} to equal ${expected}`);
+}
+
+/**
+ * Test if `cb` emits a position state event.
+ *
+ * @param {() => (void | Promise<void>)} cb
+ * A callback that is expected to generate a position state event
+ * @param {tab} tab
+ * The tab that contains the media
+ * @param {ExpectedPositionState} positionState
+ * The expected position state to be generated.
+ */
+async function emitsPositionState(cb, tab, positionState) {
+ const positionStateChanged = isNextPositionState(tab, positionState);
+ await cb();
+ await positionStateChanged;
+}
+
+/**
+ * The following are helper functions.
+ */
+async function setPositionState(tab, positionState) {
+ await emitsPositionState(
+ () => applyPositionState(tab, positionState),
+ tab,
+ positionState
+ );
+}
+
+async function applyPositionState(tab, positionState) {
await SpecialPowers.spawn(
tab.linkedBrowser,
[positionState],
@@ -131,7 +289,12 @@ async function setPositionState(tab, positionState) {
content.navigator.mediaSession.setPositionState(positionState);
}
);
- await positionStateChanged;
+}
+
+async function setMediaMetadata(tab, metadata) {
+ await SpecialPowers.spawn(tab.linkedBrowser, [metadata], data => {
+ content.navigator.mediaSession.metadata = new content.MediaMetadata(data);
+ });
}
async function setPositionStateOnInactiveMediaSession(tab) {
diff --git a/dom/media/mediacontrol/tests/browser/head.js b/dom/media/mediacontrol/tests/browser/head.js
index cac96c0bff..7c6a1e37e4 100644
--- a/dom/media/mediacontrol/tests/browser/head.js
+++ b/dom/media/mediacontrol/tests/browser/head.js
@@ -195,6 +195,58 @@ function checkOrWaitUntilMediaStartedPlaying(tab, elementId) {
}
/**
+ * Set the playback rate on a media element.
+ *
+ * @param {tab} tab
+ * The tab that contains the media which we would check
+ * @param {string} elementId
+ * The element Id of the media which we would check
+ * @param {number} rate
+ * The playback rate to set
+ * @return {Promise}
+ * Resolve when the playback rate has been set
+ */
+function setPlaybackRate(tab, elementId, rate) {
+ return SpecialPowers.spawn(
+ tab.linkedBrowser,
+ [elementId, rate],
+ (Id, rate) => {
+ const video = content.document.getElementById(Id);
+ if (!video) {
+ ok(false, `can't get the media element!`);
+ }
+ video.playbackRate = rate;
+ }
+ );
+}
+
+/**
+ * Set the time on a media element.
+ *
+ * @param {tab} tab
+ * The tab that contains the media which we would check
+ * @param {string} elementId
+ * The element Id of the media which we would check
+ * @param {number} currentTime
+ * The time to set
+ * @return {Promise}
+ * Resolve when the time has been set
+ */
+function setCurrentTime(tab, elementId, currentTime) {
+ return SpecialPowers.spawn(
+ tab.linkedBrowser,
+ [elementId, currentTime],
+ (Id, currentTime) => {
+ const video = content.document.getElementById(Id);
+ if (!video) {
+ ok(false, `can't get the media element!`);
+ }
+ video.currentTime = currentTime;
+ }
+ );
+}
+
+/**
* Returns a promise that resolves when the specific media stops playing.
*
* @param {tab} tab
@@ -390,6 +442,18 @@ function waitUntilMediaControllerAmountChanged() {
}
/**
+ * Wait until the position state that would be displayed on the virtual control
+ * interface changes. we would observe that by listening for
+ * `media-position-state-changed` notification.
+ *
+ * @return {Promise}
+ * Resolve when observing `media-position-state-changed`
+ */
+function waitUntilPositionStateChanged() {
+ return BrowserUtils.promiseObserved("media-position-state-changed");
+}
+
+/**
* check if the media controll from given tab is active. If not, return a
* promise and resolve it when controller become active.
*/
@@ -400,3 +464,20 @@ async function checkOrWaitUntilControllerBecomeActive(tab) {
}
await new Promise(r => (controller.onactivated = r));
}
+
+/**
+ * Logs all `positionstatechange` events in a tab.
+ */
+function logPositionStateChangeEvents(tab) {
+ tab.linkedBrowser.browsingContext.mediaController.addEventListener(
+ "positionstatechange",
+ event =>
+ info(
+ `got position state: ${JSON.stringify({
+ duration: event.duration,
+ playbackRate: event.playbackRate,
+ position: event.position,
+ })}`
+ )
+ );
+}
diff --git a/dom/media/mediasource/MediaSource.cpp b/dom/media/mediasource/MediaSource.cpp
index 94e9904262..8f4cd0c514 100644
--- a/dom/media/mediasource/MediaSource.cpp
+++ b/dom/media/mediasource/MediaSource.cpp
@@ -191,8 +191,7 @@ void MediaSource::IsTypeSupported(const nsAString& aType,
return;
}
if (mimeType == MEDIAMIMETYPE("audio/webm")) {
- if (!(StaticPrefs::media_mediasource_webm_enabled() ||
- StaticPrefs::media_mediasource_webm_audio_enabled())) {
+ if (!StaticPrefs::media_mediasource_webm_enabled()) {
// Don't leak information about the fact that it's pref-disabled; just act
// like we can't play it. Or should this throw "Unknown type"?
return aRv.ThrowNotSupportedError("Can't play type");
diff --git a/dom/media/mediasource/MediaSourceDemuxer.cpp b/dom/media/mediasource/MediaSourceDemuxer.cpp
index 6df15cb2d4..b846beb403 100644
--- a/dom/media/mediasource/MediaSourceDemuxer.cpp
+++ b/dom/media/mediasource/MediaSourceDemuxer.cpp
@@ -271,7 +271,7 @@ MediaSourceTrackDemuxer::MediaSourceTrackDemuxer(MediaSourceDemuxer* aParent,
: mParent(aParent),
mTaskQueue(mParent->GetTaskQueue()),
mType(aType),
- mMutex("MediaSourceTrackDemuxer"),
+ mMutex("MediaSourceTrackDemuxer", this),
mManager(aManager),
mReset(true),
mPreRoll(TimeUnit::FromMicroseconds(
@@ -316,6 +316,7 @@ void MediaSourceTrackDemuxer::Reset() {
RefPtr<MediaSourceTrackDemuxer> self = this;
nsCOMPtr<nsIRunnable> task =
NS_NewRunnableFunction("MediaSourceTrackDemuxer::Reset", [self]() {
+ self->mMutex.AssertOnWritingThread();
self->mNextSample.reset();
self->mReset = true;
if (!self->mManager) {
@@ -324,7 +325,7 @@ void MediaSourceTrackDemuxer::Reset() {
MOZ_ASSERT(self->OnTaskQueue());
self->mManager->Seek(self->mType, TimeUnit::Zero(), TimeUnit::Zero());
{
- MutexAutoLock mon(self->mMutex);
+ MutexSingleWriterAutoLockOnThread(lock, self->mMutex);
self->mNextRandomAccessPoint =
self->mManager->GetNextRandomAccessPoint(
self->mType, MediaSourceDemuxer::EOS_FUZZ);
@@ -336,7 +337,7 @@ void MediaSourceTrackDemuxer::Reset() {
}
nsresult MediaSourceTrackDemuxer::GetNextRandomAccessPoint(TimeUnit* aTime) {
- MutexAutoLock mon(mMutex);
+ MutexSingleWriterAutoLock mon(mMutex);
*aTime = mNextRandomAccessPoint;
return NS_OK;
}
@@ -350,7 +351,7 @@ MediaSourceTrackDemuxer::SkipToNextRandomAccessPoint(
}
media::TimeIntervals MediaSourceTrackDemuxer::GetBuffered() {
- MutexAutoLock mon(mMutex);
+ MutexSingleWriterAutoLock mon(mMutex);
if (!mManager) {
return media::TimeIntervals();
}
@@ -371,6 +372,7 @@ void MediaSourceTrackDemuxer::BreakCycles() {
RefPtr<MediaSourceTrackDemuxer::SeekPromise> MediaSourceTrackDemuxer::DoSeek(
const TimeUnit& aTime) {
+ mMutex.AssertOnWritingThread();
if (!mManager) {
return SeekPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_CANCELED,
@@ -426,7 +428,7 @@ RefPtr<MediaSourceTrackDemuxer::SeekPromise> MediaSourceTrackDemuxer::DoSeek(
}
mReset = false;
{
- MutexAutoLock mon(mMutex);
+ MutexSingleWriterAutoLockOnThread(lock, mMutex);
mNextRandomAccessPoint =
mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ);
}
@@ -435,6 +437,7 @@ RefPtr<MediaSourceTrackDemuxer::SeekPromise> MediaSourceTrackDemuxer::DoSeek(
RefPtr<MediaSourceTrackDemuxer::SamplesPromise>
MediaSourceTrackDemuxer::DoGetSamples(int32_t aNumSamples) {
+ mMutex.AssertOnWritingThread();
if (!mManager) {
return SamplesPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_CANCELED,
@@ -487,7 +490,7 @@ MediaSourceTrackDemuxer::DoGetSamples(int32_t aNumSamples) {
RefPtr<SamplesHolder> samples = new SamplesHolder;
samples->AppendSample(sample);
{
- MutexAutoLock mon(mMutex); // spurious warning will be given
+ MutexSingleWriterAutoLockOnThread(lock, mMutex);
// Diagnostic asserts for bug 1810396
MOZ_DIAGNOSTIC_ASSERT(sample, "Invalid sample pointer found!");
MOZ_DIAGNOSTIC_ASSERT(sample->HasValidTime(), "Invalid sample time found!");
@@ -505,6 +508,7 @@ MediaSourceTrackDemuxer::DoGetSamples(int32_t aNumSamples) {
RefPtr<MediaSourceTrackDemuxer::SkipAccessPointPromise>
MediaSourceTrackDemuxer::DoSkipToNextRandomAccessPoint(
const TimeUnit& aTimeThreadshold) {
+ mMutex.AssertOnWritingThread();
if (!mManager) {
return SkipAccessPointPromise::CreateAndReject(
SkipFailureHolder(MediaResult(NS_ERROR_DOM_MEDIA_CANCELED,
@@ -534,13 +538,13 @@ MediaSourceTrackDemuxer::DoSkipToNextRandomAccessPoint(
}
bool MediaSourceTrackDemuxer::HasManager(TrackBuffersManager* aManager) const {
- MOZ_ASSERT(OnTaskQueue());
+ mMutex.AssertOnWritingThread();
return mManager == aManager;
}
void MediaSourceTrackDemuxer::DetachManager() {
MOZ_ASSERT(OnTaskQueue());
- MutexAutoLock mon(mMutex);
+ MutexSingleWriterAutoLock mon(mMutex);
mManager = nullptr;
}
diff --git a/dom/media/mediasource/MediaSourceDemuxer.h b/dom/media/mediasource/MediaSourceDemuxer.h
index 177aae769b..fa25878af9 100644
--- a/dom/media/mediasource/MediaSourceDemuxer.h
+++ b/dom/media/mediasource/MediaSourceDemuxer.h
@@ -101,13 +101,16 @@ class MediaSourceDemuxer : public MediaDataDemuxer,
class MediaSourceTrackDemuxer
: public MediaTrackDemuxer,
- public DecoderDoctorLifeLogger<MediaSourceTrackDemuxer> {
+ public DecoderDoctorLifeLogger<MediaSourceTrackDemuxer>,
+ public SingleWriterLockOwner {
public:
MediaSourceTrackDemuxer(MediaSourceDemuxer* aParent,
TrackInfo::TrackType aType,
TrackBuffersManager* aManager)
MOZ_REQUIRES(aParent->mMutex);
+ bool OnWritingThread() const override { return OnTaskQueue(); }
+
UniquePtr<TrackInfo> GetInfo() const override;
RefPtr<SeekPromise> Seek(const media::TimeUnit& aTime) override;
@@ -146,12 +149,12 @@ class MediaSourceTrackDemuxer
TrackInfo::TrackType mType;
// Mutex protecting members below accessed from multiple threads.
- Mutex mMutex MOZ_UNANNOTATED;
- media::TimeUnit mNextRandomAccessPoint;
+ MutexSingleWriter mMutex;
+ media::TimeUnit mNextRandomAccessPoint MOZ_GUARDED_BY(mMutex);
// Would be accessed in MFR's demuxer proxy task queue and TaskQueue, and
// only be set on the TaskQueue. It can be accessed while on TaskQueue without
// the need for the lock.
- RefPtr<TrackBuffersManager> mManager;
+ RefPtr<TrackBuffersManager> mManager MOZ_GUARDED_BY(mMutex);
// Only accessed on TaskQueue
Maybe<RefPtr<MediaRawData>> mNextSample;
diff --git a/dom/media/metrics.yaml b/dom/media/metrics.yaml
index 58e525174b..3f735b0273 100644
--- a/dom/media/metrics.yaml
+++ b/dom/media/metrics.yaml
@@ -166,3 +166,27 @@ media.playback:
description: True if the first frame is decoded by a hardware decoder.
type: boolean
expires: never
+ device_hardware_decoder_support:
+ type: labeled_boolean
+ description:
+ The results of hardware decoder support for different video codecs. True
+ means that codec can be decoded by hardware on user's device.
+ metadata:
+ tags:
+ - 'Core :: Audio/Video: Playback'
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1892516
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1892516#c4
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - media-alerts@mozilla.com
+ expires: never
+ labels:
+ - h264
+ - vp8
+ - vp9
+ - av1
+ - hevc
+ telemetry_mirror: MEDIA_DEVICE_HARDWARE_DECODING_SUPPORT
diff --git a/dom/media/moz.build b/dom/media/moz.build
index ac62e9b67e..7f256387c7 100644
--- a/dom/media/moz.build
+++ b/dom/media/moz.build
@@ -157,7 +157,7 @@ EXPORTS += [
"FileBlockCache.h",
"ForwardedInputTrack.h",
"FrameStatistics.h",
- "ImageToI420.h",
+ "ImageConversion.h",
"Intervals.h",
"MediaCache.h",
"MediaContainerType.h",
@@ -190,7 +190,6 @@ EXPORTS += [
"MediaTrackList.h",
"MediaTrackListener.h",
"MemoryBlockCache.h",
- "MPSCQueue.h",
"nsIDocumentActivity.h",
"PrincipalChangeObserver.h",
"PrincipalHandle.h",
@@ -237,6 +236,7 @@ EXPORTS.mozilla.dom += [
"MediaDevices.h",
"MediaStreamError.h",
"MediaStreamTrack.h",
+ "MPSCQueue.h",
"VideoPlaybackQuality.h",
"VideoStreamTrack.h",
"VideoTrack.h",
@@ -282,7 +282,7 @@ UNIFIED_SOURCES += [
"GetUserMediaRequest.cpp",
"GraphDriver.cpp",
"GraphRunner.cpp",
- "ImageToI420.cpp",
+ "ImageConversion.cpp",
"MediaCache.cpp",
"MediaContainerType.cpp",
"MediaDecoder.cpp",
diff --git a/dom/media/ogg/OggDemuxer.cpp b/dom/media/ogg/OggDemuxer.cpp
index 3f14887617..db9477cf1c 100644
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -2050,11 +2050,7 @@ nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType,
interval = 0;
break;
}
-
backsteps = std::min(backsteps + 1, maxBackStep);
- // We reset mustBackoff. If we still need to backoff further, it will
- // be set to true again.
- mustBackoff = false;
} else {
backsteps = 0;
}
diff --git a/dom/media/platforms/EncoderConfig.cpp b/dom/media/platforms/EncoderConfig.cpp
index ed780b947c..2c32e4c2ff 100644
--- a/dom/media/platforms/EncoderConfig.cpp
+++ b/dom/media/platforms/EncoderConfig.cpp
@@ -7,6 +7,7 @@
#include "EncoderConfig.h"
#include "MP4Decoder.h"
#include "VPXDecoder.h"
+#include "mozilla/dom/BindingUtils.h"
namespace mozilla {
@@ -24,4 +25,45 @@ CodecType EncoderConfig::CodecTypeForMime(const nsACString& aMimeType) {
return CodecType::Unknown;
}
+const char* CodecTypeStrings[] = {
+ "BeginVideo", "H264", "VP8", "VP9", "EndVideo", "Opus", "Vorbis",
+ "Flac", "AAC", "PCM", "G722", "EndAudio", "Unknown"};
+
+nsCString EncoderConfig::ToString() const {
+ nsCString rv;
+ rv.Append(CodecTypeStrings[UnderlyingValue(mCodec)]);
+ rv.AppendLiteral(mBitrateMode == BitrateMode::Constant ? " (CBR)" : " (VBR)");
+ rv.AppendPrintf("%" PRIu32 "bps", mBitrate);
+ if (mUsage == Usage::Realtime) {
+ rv.AppendLiteral(", realtime");
+ } else {
+ rv.AppendLiteral(", record");
+ }
+ if (mCodec > CodecType::_BeginVideo_ && mCodec < CodecType::_EndVideo_) {
+ rv.AppendPrintf(" [%dx%d]", mSize.Width(), mSize.Height());
+ if (mHardwarePreference == HardwarePreference::RequireHardware) {
+ rv.AppendLiteral(", hw required");
+ } else if (mHardwarePreference == HardwarePreference::RequireSoftware) {
+ rv.AppendLiteral(", sw required");
+ } else {
+ rv.AppendLiteral(", hw: no preference");
+ }
+ rv.AppendPrintf(" format: %s", GetEnumString(mPixelFormat).get());
+ rv.AppendPrintf(" format (source): %s",
+ GetEnumString(mSourcePixelFormat).get());
+ if (mScalabilityMode == ScalabilityMode::L1T2) {
+ rv.AppendLiteral(" (L1T2)");
+ } else if (mScalabilityMode == ScalabilityMode::L1T3) {
+ rv.AppendLiteral(" (L1T2)");
+ }
+ rv.AppendPrintf(", fps: %" PRIu8, mFramerate);
+ rv.AppendPrintf(", kf interval: %zu", mKeyframeInterval);
+ } else {
+ rv.AppendPrintf(", ch: %" PRIu32 ", %" PRIu32 "Hz", mNumberOfChannels,
+ mSampleRate);
+ }
+ rv.AppendPrintf("(w/%s codec specific)", mCodecSpecific ? "" : "o");
+ return rv;
+};
+
} // namespace mozilla
diff --git a/dom/media/platforms/EncoderConfig.h b/dom/media/platforms/EncoderConfig.h
index e0da1709d6..15241b71c1 100644
--- a/dom/media/platforms/EncoderConfig.h
+++ b/dom/media/platforms/EncoderConfig.h
@@ -159,6 +159,8 @@ class EncoderConfig final {
static CodecType CodecTypeForMime(const nsACString& aMimeType);
+ nsCString ToString() const;
+
bool IsVideo() const {
return mCodec > CodecType::_BeginVideo_ && mCodec < CodecType::_EndVideo_;
}
diff --git a/dom/media/platforms/MediaCodecsSupport.cpp b/dom/media/platforms/MediaCodecsSupport.cpp
index 13c10ab389..1386e87a6c 100644
--- a/dom/media/platforms/MediaCodecsSupport.cpp
+++ b/dom/media/platforms/MediaCodecsSupport.cpp
@@ -195,15 +195,12 @@ CodecDefinition MCSInfo::GetCodecDefinition(const MediaCodec& aCodec) {
}
MediaCodecsSupport MCSInfo::GetMediaCodecsSupportEnum(
- const MediaCodec& aCodec, const DecodeSupportSet& aSupport) {
- if (aSupport.isEmpty()) {
- return MediaCodecsSupport{};
- }
+ const MediaCodec& aCodec, const DecodeSupport& aSupport) {
const CodecDefinition cd = GetCodecDefinition(aCodec);
- if (aSupport.contains(DecodeSupport::SoftwareDecode)) {
+ if (aSupport == DecodeSupport::SoftwareDecode) {
return cd.swDecodeSupport;
}
- if (aSupport.contains(DecodeSupport::HardwareDecode)) {
+ if (aSupport == DecodeSupport::HardwareDecode) {
return cd.hwDecodeSupport;
}
return MediaCodecsSupport::SENTINEL;
diff --git a/dom/media/platforms/MediaCodecsSupport.h b/dom/media/platforms/MediaCodecsSupport.h
index ead4426259..5176b4ecb2 100644
--- a/dom/media/platforms/MediaCodecsSupport.h
+++ b/dom/media/platforms/MediaCodecsSupport.h
@@ -183,7 +183,7 @@ class MCSInfo final {
// Returns a MediaCodecsSupport enum corresponding to the provided
// codec type and decode support level requested.
static MediaCodecsSupport GetMediaCodecsSupportEnum(
- const MediaCodec& aCodec, const DecodeSupportSet& aSupport);
+ const MediaCodec& aCodec, const DecodeSupport& aSupport);
// Returns true if SW/HW decode enum for a given codec is present in the args.
static bool SupportsSoftwareDecode(
diff --git a/dom/media/platforms/PDMFactory.cpp b/dom/media/platforms/PDMFactory.cpp
index 00f46385e2..f640ee4506 100644
--- a/dom/media/platforms/PDMFactory.cpp
+++ b/dom/media/platforms/PDMFactory.cpp
@@ -27,6 +27,7 @@
#include "mozilla/RemoteDecoderManagerChild.h"
#include "mozilla/RemoteDecoderModule.h"
#include "mozilla/SharedThreadPool.h"
+#include "mozilla/StaticMutex.h"
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/SyncRunnable.h"
#include "mozilla/TaskQueue.h"
@@ -474,7 +475,7 @@ DecodeSupportSet PDMFactory::Supports(
void PDMFactory::CreatePDMs() {
if (StaticPrefs::media_use_blank_decoder()) {
- CreateAndStartupPDM<BlankDecoderModule>();
+ StartupPDM(BlankDecoderModule::Create());
// The Blank PDM SupportsMimeType reports true for all codecs; the creation
// of its decoder is infallible. As such it will be used for all media, we
// can stop creating more PDM from this point.
@@ -500,7 +501,7 @@ void PDMFactory::CreatePDMs() {
void PDMFactory::CreateGpuPDMs() {
#ifdef XP_WIN
if (StaticPrefs::media_wmf_enabled()) {
- CreateAndStartupPDM<WMFDecoderModule>();
+ StartupPDM(WMFDecoderModule::Create());
}
#endif
}
@@ -529,12 +530,12 @@ void PDMFactory::CreateRddPDMs() {
#ifdef XP_WIN
if (StaticPrefs::media_wmf_enabled() &&
StaticPrefs::media_rdd_wmf_enabled()) {
- CreateAndStartupPDM<WMFDecoderModule>();
+ StartupPDM(WMFDecoderModule::Create());
}
#endif
#ifdef MOZ_APPLEMEDIA
if (StaticPrefs::media_rdd_applemedia_enabled()) {
- CreateAndStartupPDM<AppleDecoderModule>();
+ StartupPDM(AppleDecoderModule::Create());
}
#endif
StartupPDM(FFVPXRuntimeLinker::CreateDecoder());
@@ -546,7 +547,8 @@ void PDMFactory::CreateRddPDMs() {
FFmpegRuntimeLinker::LinkStatusCode());
}
#endif
- CreateAndStartupPDM<AgnosticDecoderModule>();
+ StartupPDM(AgnosticDecoderModule::Create(),
+ StaticPrefs::media_prefer_non_ffvpx());
}
void PDMFactory::CreateUtilityPDMs() {
@@ -555,13 +557,13 @@ void PDMFactory::CreateUtilityPDMs() {
if (StaticPrefs::media_wmf_enabled() &&
StaticPrefs::media_utility_wmf_enabled() &&
aKind == ipc::SandboxingKind::UTILITY_AUDIO_DECODING_WMF) {
- CreateAndStartupPDM<WMFDecoderModule>();
+ StartupPDM(WMFDecoderModule::Create());
}
#endif
#ifdef MOZ_APPLEMEDIA
if (StaticPrefs::media_utility_applemedia_enabled() &&
aKind == ipc::SandboxingKind::UTILITY_AUDIO_DECODING_APPLE_MEDIA) {
- CreateAndStartupPDM<AppleDecoderModule>();
+ StartupPDM(AppleDecoderModule::Create());
}
#endif
if (aKind == ipc::SandboxingKind::GENERIC_UTILITY) {
@@ -582,12 +584,13 @@ void PDMFactory::CreateUtilityPDMs() {
StaticPrefs::media_android_media_codec_preferred());
}
#endif
- CreateAndStartupPDM<AgnosticDecoderModule>();
+ StartupPDM(AgnosticDecoderModule::Create(),
+ StaticPrefs::media_prefer_non_ffvpx());
}
#ifdef MOZ_WMF_MEDIA_ENGINE
if (aKind == ipc::SandboxingKind::MF_MEDIA_ENGINE_CDM) {
if (StaticPrefs::media_wmf_media_engine_enabled()) {
- CreateAndStartupPDM<MFMediaEngineDecoderModule>();
+ StartupPDM(MFMediaEngineDecoderModule::Create());
}
}
#endif
@@ -595,31 +598,30 @@ void PDMFactory::CreateUtilityPDMs() {
void PDMFactory::CreateContentPDMs() {
if (StaticPrefs::media_gpu_process_decoder()) {
- CreateAndStartupPDM<RemoteDecoderModule>(RemoteDecodeIn::GpuProcess);
+ StartupPDM(RemoteDecoderModule::Create(RemoteDecodeIn::GpuProcess));
}
if (StaticPrefs::media_rdd_process_enabled()) {
- CreateAndStartupPDM<RemoteDecoderModule>(RemoteDecodeIn::RddProcess);
+ StartupPDM(RemoteDecoderModule::Create(RemoteDecodeIn::RddProcess));
}
if (StaticPrefs::media_utility_process_enabled()) {
#ifdef MOZ_APPLEMEDIA
- CreateAndStartupPDM<RemoteDecoderModule>(
- RemoteDecodeIn::UtilityProcess_AppleMedia);
+ StartupPDM(
+ RemoteDecoderModule::Create(RemoteDecodeIn::UtilityProcess_AppleMedia));
#endif
#ifdef XP_WIN
- CreateAndStartupPDM<RemoteDecoderModule>(
- RemoteDecodeIn::UtilityProcess_WMF);
+ StartupPDM(RemoteDecoderModule::Create(RemoteDecodeIn::UtilityProcess_WMF));
#endif
// WMF and AppleMedia should be created before Generic because the order
// affects what decoder module would be chose first.
- CreateAndStartupPDM<RemoteDecoderModule>(
- RemoteDecodeIn::UtilityProcess_Generic);
+ StartupPDM(
+ RemoteDecoderModule::Create(RemoteDecodeIn::UtilityProcess_Generic));
}
#ifdef MOZ_WMF_MEDIA_ENGINE
if (StaticPrefs::media_wmf_media_engine_enabled()) {
- CreateAndStartupPDM<RemoteDecoderModule>(
- RemoteDecodeIn::UtilityProcess_MFMediaEngineCDM);
+ StartupPDM(RemoteDecoderModule::Create(
+ RemoteDecodeIn::UtilityProcess_MFMediaEngineCDM));
}
#endif
@@ -631,7 +633,7 @@ void PDMFactory::CreateContentPDMs() {
# ifdef MOZ_WMF
if (!StaticPrefs::media_rdd_process_enabled() ||
!StaticPrefs::media_rdd_wmf_enabled()) {
- if (!CreateAndStartupPDM<WMFDecoderModule>()) {
+ if (!StartupPDM(WMFDecoderModule::Create())) {
mFailureFlags += DecoderDoctorDiagnostics::Flags::WMFFailedToLoad;
}
}
@@ -642,11 +644,11 @@ void PDMFactory::CreateContentPDMs() {
#endif
#ifdef MOZ_APPLEMEDIA
- CreateAndStartupPDM<AppleDecoderModule>();
+ StartupPDM(AppleDecoderModule::Create());
#endif
#ifdef MOZ_OMX
if (StaticPrefs::media_omx_enabled()) {
- CreateAndStartupPDM<OmxDecoderModule>();
+ StartupPDM(OmxDecoderModule::Create());
}
#endif
StartupPDM(FFVPXRuntimeLinker::CreateDecoder());
@@ -658,7 +660,8 @@ void PDMFactory::CreateContentPDMs() {
}
#endif
- CreateAndStartupPDM<AgnosticDecoderModule>();
+ StartupPDM(AgnosticDecoderModule::Create(),
+ StaticPrefs::media_prefer_non_ffvpx());
#if !defined(MOZ_WIDGET_ANDROID) // Still required for video?
}
#endif // !defined(MOZ_WIDGET_ANDROID)
@@ -681,7 +684,7 @@ void PDMFactory::CreateContentPDMs() {
void PDMFactory::CreateDefaultPDMs() {
#ifdef XP_WIN
if (StaticPrefs::media_wmf_enabled()) {
- if (!CreateAndStartupPDM<WMFDecoderModule>()) {
+ if (!StartupPDM(WMFDecoderModule::Create())) {
mFailureFlags += DecoderDoctorDiagnostics::Flags::WMFFailedToLoad;
}
} else if (StaticPrefs::media_decoder_doctor_wmf_disabled_is_failure()) {
@@ -690,11 +693,11 @@ void PDMFactory::CreateDefaultPDMs() {
#endif
#ifdef MOZ_APPLEMEDIA
- CreateAndStartupPDM<AppleDecoderModule>();
+ StartupPDM(AppleDecoderModule::Create());
#endif
#ifdef MOZ_OMX
if (StaticPrefs::media_omx_enabled()) {
- CreateAndStartupPDM<OmxDecoderModule>();
+ StartupPDM(OmxDecoderModule::Create());
}
#endif
StartupPDM(FFVPXRuntimeLinker::CreateDecoder());
@@ -712,7 +715,8 @@ void PDMFactory::CreateDefaultPDMs() {
}
#endif
- CreateAndStartupPDM<AgnosticDecoderModule>();
+ StartupPDM(AgnosticDecoderModule::Create(),
+ StaticPrefs::media_prefer_non_ffvpx());
if (StaticPrefs::media_gmp_decoder_enabled() &&
!StartupPDM(GMPDecoderModule::Create(),
@@ -783,9 +787,11 @@ void PDMFactory::SetCDMProxy(CDMProxy* aProxy) {
mEMEPDM = MakeRefPtr<EMEDecoderModule>(aProxy, m);
}
+StaticMutex sSupportedMutex;
+
/* static */
media::MediaCodecsSupported PDMFactory::Supported(bool aForceRefresh) {
- MOZ_ASSERT(NS_IsMainThread());
+ StaticMutexAutoLock lock(sSupportedMutex);
static auto calculate = []() {
auto pdm = MakeRefPtr<PDMFactory>();
diff --git a/dom/media/platforms/PDMFactory.h b/dom/media/platforms/PDMFactory.h
index c56c11c506..9a4d4ff6b9 100644
--- a/dom/media/platforms/PDMFactory.h
+++ b/dom/media/platforms/PDMFactory.h
@@ -79,11 +79,6 @@ class PDMFactory final {
void CreateContentPDMs();
void CreateDefaultPDMs();
- template <typename DECODER_MODULE, typename... ARGS>
- bool CreateAndStartupPDM(ARGS&&... aArgs) {
- return StartupPDM(DECODER_MODULE::Create(std::forward<ARGS>(aArgs)...));
- }
-
// Startup the provided PDM and add it to our list if successful.
bool StartupPDM(already_AddRefed<PlatformDecoderModule> aPDM,
bool aInsertAtBeginning = false);
diff --git a/dom/media/platforms/SimpleMap.h b/dom/media/platforms/SimpleMap.h
index c26bff1e9a..635ba6f085 100644
--- a/dom/media/platforms/SimpleMap.h
+++ b/dom/media/platforms/SimpleMap.h
@@ -5,49 +5,101 @@
#ifndef mozilla_SimpleMap_h
#define mozilla_SimpleMap_h
+#include <utility>
+
+#include "mozilla/Maybe.h"
#include "mozilla/Mutex.h"
#include "nsTArray.h"
-#include <utility>
-
namespace mozilla {
-template <typename T>
+struct ThreadSafePolicy {
+ struct PolicyLock {
+ explicit PolicyLock(const char* aName) : mMutex(aName) {}
+ Mutex mMutex MOZ_UNANNOTATED;
+ };
+ PolicyLock& mPolicyLock;
+ explicit ThreadSafePolicy(PolicyLock& aPolicyLock)
+ : mPolicyLock(aPolicyLock) {
+ mPolicyLock.mMutex.Lock();
+ }
+ ~ThreadSafePolicy() { mPolicyLock.mMutex.Unlock(); }
+};
+
+struct NoOpPolicy {
+ struct PolicyLock {
+ explicit PolicyLock(const char*) {}
+ };
+ explicit NoOpPolicy(PolicyLock&) {}
+ ~NoOpPolicy() = default;
+};
+
+// An map employing an array instead of a hash table to optimize performance,
+// particularly beneficial when the number of expected items in the map is
+// small.
+template <typename K, typename V, typename Policy = NoOpPolicy>
class SimpleMap {
- public:
- typedef std::pair<int64_t, T> Element;
+ using ElementType = std::pair<K, V>;
+ using MapType = AutoTArray<ElementType, 16>;
- SimpleMap() : mMutex("SimpleMap") {}
+ public:
+ SimpleMap() : mLock("SimpleMap"){};
+ // Check if aKey is in the map.
+ bool Contains(const K& aKey) {
+ struct Comparator {
+ bool Equals(const ElementType& aElement, const K& aKey) const {
+ return aElement.first == aKey;
+ }
+ };
+ Policy guard(mLock);
+ return mMap.Contains(aKey, Comparator());
+ }
// Insert Key and Value pair at the end of our map.
- void Insert(int64_t aKey, const T& aValue) {
- MutexAutoLock lock(mMutex);
+ void Insert(const K& aKey, const V& aValue) {
+ Policy guard(mLock);
mMap.AppendElement(std::make_pair(aKey, aValue));
}
// Sets aValue matching aKey and remove it from the map if found.
// The element returned is the first one found.
// Returns true if found, false otherwise.
- bool Find(int64_t aKey, T& aValue) {
- MutexAutoLock lock(mMutex);
+ bool Find(const K& aKey, V& aValue) {
+ if (Maybe<V> v = Take(aKey)) {
+ aValue = v.extract();
+ return true;
+ }
+ return false;
+ }
+ // Take the value matching aKey and remove it from the map if found.
+ Maybe<V> Take(const K& aKey) {
+ Policy guard(mLock);
for (uint32_t i = 0; i < mMap.Length(); i++) {
- Element& element = mMap[i];
+ ElementType& element = mMap[i];
if (element.first == aKey) {
- aValue = element.second;
+ Maybe<V> value = Some(element.second);
mMap.RemoveElementAt(i);
- return true;
+ return value;
}
}
- return false;
+ return Nothing();
}
// Remove all elements of the map.
void Clear() {
- MutexAutoLock lock(mMutex);
+ Policy guard(mLock);
mMap.Clear();
}
+ // Iterate through all elements of the map and call the function F.
+ template <typename F>
+ void ForEach(F&& aCallback) {
+ Policy guard(mLock);
+ for (const auto& element : mMap) {
+ aCallback(element.first, element.second);
+ }
+ }
private:
- Mutex mMutex MOZ_UNANNOTATED; // To protect mMap.
- AutoTArray<Element, 16> mMap;
+ typename Policy::PolicyLock mLock;
+ MapType mMap;
};
} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/AOMDecoder.cpp b/dom/media/platforms/agnostic/AOMDecoder.cpp
index cb7f784848..284c209d51 100644
--- a/dom/media/platforms/agnostic/AOMDecoder.cpp
+++ b/dom/media/platforms/agnostic/AOMDecoder.cpp
@@ -284,6 +284,8 @@ RefPtr<MediaDataDecoder::DecodePromise> AOMDecoder::ProcessDecode(
aStage.SetYUVColorSpace(b.mYUVColorSpace);
aStage.SetColorRange(b.mColorRange);
aStage.SetColorDepth(b.mColorDepth);
+ aStage.SetStartTimeAndEndTime(v->mTime.ToMicroseconds(),
+ v->GetEndTime().ToMicroseconds());
});
results.AppendElement(std::move(v));
}
diff --git a/dom/media/platforms/agnostic/DAV1DDecoder.cpp b/dom/media/platforms/agnostic/DAV1DDecoder.cpp
index e93ceb27a1..e7339dd8a9 100644
--- a/dom/media/platforms/agnostic/DAV1DDecoder.cpp
+++ b/dom/media/platforms/agnostic/DAV1DDecoder.cpp
@@ -352,6 +352,8 @@ Result<already_AddRefed<VideoData>, MediaResult> DAV1DDecoder::ConstructImage(
aStage.SetYUVColorSpace(b.mYUVColorSpace);
aStage.SetColorRange(b.mColorRange);
aStage.SetColorDepth(b.mColorDepth);
+ aStage.SetStartTimeAndEndTime(aPicture.m.timestamp,
+ aPicture.m.timestamp + aPicture.m.duration);
});
return VideoData::CreateAndCopyData(
diff --git a/dom/media/platforms/agnostic/TheoraDecoder.cpp b/dom/media/platforms/agnostic/TheoraDecoder.cpp
index d60093a204..30dde14697 100644
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -236,6 +236,8 @@ RefPtr<MediaDataDecoder::DecodePromise> TheoraDecoder::ProcessDecode(
aStage.SetYUVColorSpace(b.mYUVColorSpace);
aStage.SetColorRange(b.mColorRange);
aStage.SetColorDepth(b.mColorDepth);
+ aStage.SetStartTimeAndEndTime(v->mTime.ToMicroseconds(),
+ v->GetEndTime().ToMicroseconds());
});
});
diff --git a/dom/media/platforms/agnostic/VPXDecoder.cpp b/dom/media/platforms/agnostic/VPXDecoder.cpp
index 1b07606bd5..637c6a6452 100644
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -272,6 +272,8 @@ RefPtr<MediaDataDecoder::DecodePromise> VPXDecoder::ProcessDecode(
aStage.SetYUVColorSpace(b.mYUVColorSpace);
aStage.SetColorRange(b.mColorRange);
aStage.SetColorDepth(b.mColorDepth);
+ aStage.SetStartTimeAndEndTime(v->mTime.ToMicroseconds(),
+ v->GetEndTime().ToMicroseconds());
});
});
diff --git a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
index b964036a4a..06b5f7c476 100644
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -114,15 +114,17 @@ void GMPVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame) {
RefPtr<VideoData> v = r.unwrap();
MOZ_ASSERT(v);
- mPerformanceRecorder.Record(static_cast<int64_t>(decodedFrame->Timestamp()),
- [&](DecodeStage& aStage) {
- aStage.SetImageFormat(DecodeStage::YUV420P);
- aStage.SetResolution(decodedFrame->Width(),
- decodedFrame->Height());
- aStage.SetYUVColorSpace(b.mYUVColorSpace);
- aStage.SetColorDepth(b.mColorDepth);
- aStage.SetColorRange(b.mColorRange);
- });
+ mPerformanceRecorder.Record(
+ static_cast<int64_t>(decodedFrame->Timestamp()),
+ [&](DecodeStage& aStage) {
+ aStage.SetImageFormat(DecodeStage::YUV420P);
+ aStage.SetResolution(decodedFrame->Width(), decodedFrame->Height());
+ aStage.SetYUVColorSpace(b.mYUVColorSpace);
+ aStage.SetColorDepth(b.mColorDepth);
+ aStage.SetColorRange(b.mColorRange);
+ aStage.SetStartTimeAndEndTime(v->mTime.ToMicroseconds(),
+ v->GetEndTime().ToMicroseconds());
+ });
if (mReorderFrames) {
mReorderQueue.Push(std::move(v));
@@ -130,11 +132,11 @@ void GMPVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame) {
mUnorderedData.AppendElement(std::move(v));
}
- if (mSamples.IsEmpty()) {
- // If we have no remaining samples in the table, then we have processed
- // all outstanding decode requests.
- ProcessReorderQueue(mDecodePromise, __func__);
- }
+ if (mSamples.IsEmpty()) {
+ // If we have no remaining samples in the table, then we have processed
+ // all outstanding decode requests.
+ ProcessReorderQueue(mDecodePromise, __func__);
+ }
}
void GMPVideoDecoder::ReceivedDecodedReferenceFrame(const uint64_t aPictureId) {
@@ -201,7 +203,7 @@ void GMPVideoDecoder::Terminated() {
}
void GMPVideoDecoder::ProcessReorderQueue(
- MozPromiseHolder<DecodePromise>& aPromise, const char* aMethodName) {
+ MozPromiseHolder<DecodePromise>& aPromise, StaticString aMethodName) {
if (aPromise.IsEmpty()) {
return;
}
diff --git a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
index 1f0f59c685..6f831e5fbc 100644
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
@@ -72,7 +72,7 @@ class GMPVideoDecoder final : public MediaDataDecoder,
virtual GMPUniquePtr<GMPVideoEncodedFrame> CreateFrame(MediaRawData* aSample);
virtual const VideoInfo& GetConfig() const;
void ProcessReorderQueue(MozPromiseHolder<DecodePromise>& aPromise,
- const char* aMethodName);
+ StaticString aMethodName);
private:
~GMPVideoDecoder() = default;
diff --git a/dom/media/platforms/android/AndroidDecoderModule.cpp b/dom/media/platforms/android/AndroidDecoderModule.cpp
index fff8669a74..21d0ede270 100644
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -64,21 +64,28 @@ AndroidDecoderModule::AndroidDecoderModule(CDMProxy* aProxy) {
mProxy = static_cast<MediaDrmCDMProxy*>(aProxy);
}
-StaticAutoPtr<nsTArray<nsCString>> AndroidDecoderModule::sSupportedSwMimeTypes;
-StaticAutoPtr<nsTArray<nsCString>> AndroidDecoderModule::sSupportedHwMimeTypes;
-StaticAutoPtr<MediaCodecsSupported> AndroidDecoderModule::sSupportedCodecs;
+/* static */ bool AndroidDecoderModule::AreSupportedMimeTypesReady() {
+ StaticMutexAutoLock lock(sMutex);
+ return sSupportedSwMimeTypes && sSupportedHwMimeTypes;
+}
+
+/* static */ bool AndroidDecoderModule::IsSupportedCodecsReady() {
+ StaticMutexAutoLock lock(sMutex);
+ return sSupportedCodecs;
+}
/* static */
media::MediaCodecsSupported AndroidDecoderModule::GetSupportedCodecs() {
- if (!sSupportedSwMimeTypes || !sSupportedHwMimeTypes || !sSupportedCodecs) {
+ if (!AreSupportedMimeTypesReady() || !IsSupportedCodecsReady()) {
SetSupportedMimeTypes();
}
+ StaticMutexAutoLock lock(sMutex);
return *sSupportedCodecs;
}
DecodeSupportSet AndroidDecoderModule::SupportsMimeType(
const nsACString& aMimeType) {
- if (!sSupportedSwMimeTypes) {
+ if (!AreSupportedMimeTypesReady()) {
SetSupportedMimeTypes();
}
@@ -135,13 +142,16 @@ DecodeSupportSet AndroidDecoderModule::SupportsMimeType(
// If a codec has no special handling or can't be determined from the
// MIME type string, check if the MIME type string itself is supported.
- if (sSupportedHwMimeTypes &&
- sSupportedHwMimeTypes->Contains(TranslateMimeType(aMimeType))) {
- return DecodeSupport::HardwareDecode;
- }
- if (sSupportedSwMimeTypes &&
- sSupportedSwMimeTypes->Contains(TranslateMimeType(aMimeType))) {
- return DecodeSupport::SoftwareDecode;
+ {
+ StaticMutexAutoLock lock(sMutex);
+ if (sSupportedHwMimeTypes &&
+ sSupportedHwMimeTypes->Contains(TranslateMimeType(aMimeType))) {
+ return DecodeSupport::HardwareDecode;
+ }
+ if (sSupportedSwMimeTypes &&
+ sSupportedSwMimeTypes->Contains(TranslateMimeType(aMimeType))) {
+ return DecodeSupport::SoftwareDecode;
+ }
}
return media::DecodeSupportSet{};
}
@@ -179,24 +189,45 @@ void AndroidDecoderModule::SetSupportedMimeTypes() {
// Inbound MIME types prefixed with SW/HW need to be processed
void AndroidDecoderModule::SetSupportedMimeTypes(
nsTArray<nsCString>&& aSupportedTypes) {
+ StaticMutexAutoLock lock(sMutex);
// Return if support is already cached
if (sSupportedSwMimeTypes && sSupportedHwMimeTypes && sSupportedCodecs) {
return;
}
if (!sSupportedSwMimeTypes) {
sSupportedSwMimeTypes = new nsTArray<nsCString>;
- ClearOnShutdown(&sSupportedSwMimeTypes);
+ if (NS_IsMainThread()) {
+ ClearOnShutdown(&sSupportedSwMimeTypes);
+ } else {
+ Unused << NS_DispatchToMainThread(NS_NewRunnableFunction(__func__, []() {
+ StaticMutexAutoLock lock(sMutex);
+ ClearOnShutdown(&sSupportedSwMimeTypes);
+ }));
+ }
}
if (!sSupportedHwMimeTypes) {
sSupportedHwMimeTypes = new nsTArray<nsCString>;
- ClearOnShutdown(&sSupportedHwMimeTypes);
+ if (NS_IsMainThread()) {
+ ClearOnShutdown(&sSupportedHwMimeTypes);
+ } else {
+ Unused << NS_DispatchToMainThread(NS_NewRunnableFunction(__func__, []() {
+ StaticMutexAutoLock lock(sMutex);
+ ClearOnShutdown(&sSupportedHwMimeTypes);
+ }));
+ }
}
if (!sSupportedCodecs) {
sSupportedCodecs = new MediaCodecsSupported();
- ClearOnShutdown(&sSupportedCodecs);
+ if (NS_IsMainThread()) {
+ ClearOnShutdown(&sSupportedCodecs);
+ } else {
+ Unused << NS_DispatchToMainThread(NS_NewRunnableFunction(__func__, []() {
+ StaticMutexAutoLock lock(sMutex);
+ ClearOnShutdown(&sSupportedCodecs);
+ }));
+ }
}
- DecodeSupportSet support;
// Process each MIME type string
for (const auto& s : aSupportedTypes) {
// Verify MIME type string present
@@ -212,12 +243,13 @@ void AndroidDecoderModule::SetSupportedMimeTypes(
// Extract SW/HW support prefix
const auto caps = Substring(s, 0, 2);
+ DecodeSupport support{};
if (caps == "SW"_ns) {
sSupportedSwMimeTypes->AppendElement(mimeType);
- support += DecodeSupport::SoftwareDecode;
+ support = DecodeSupport::SoftwareDecode;
} else if (caps == "HW"_ns) {
sSupportedHwMimeTypes->AppendElement(mimeType);
- support += DecodeSupport::HardwareDecode;
+ support = DecodeSupport::HardwareDecode;
} else {
SLOG("Error parsing acceleration info from JNI codec string %s",
s.Data());
diff --git a/dom/media/platforms/android/AndroidDecoderModule.h b/dom/media/platforms/android/AndroidDecoderModule.h
index 37a0f08588..5550e123f3 100644
--- a/dom/media/platforms/android/AndroidDecoderModule.h
+++ b/dom/media/platforms/android/AndroidDecoderModule.h
@@ -54,16 +54,25 @@ class AndroidDecoderModule : public PlatformDecoderModule {
private:
explicit AndroidDecoderModule(CDMProxy* aProxy = nullptr);
virtual ~AndroidDecoderModule() = default;
+
+ static bool AreSupportedMimeTypesReady();
+ static bool IsSupportedCodecsReady();
+
RefPtr<MediaDrmCDMProxy> mProxy;
// SW compatible MIME type strings
- static StaticAutoPtr<nsTArray<nsCString>> sSupportedSwMimeTypes;
+ static inline StaticAutoPtr<nsTArray<nsCString>> sSupportedSwMimeTypes
+ MOZ_GUARDED_BY(sMutex);
// HW compatible MIME type strings
- static StaticAutoPtr<nsTArray<nsCString>> sSupportedHwMimeTypes;
+ static inline StaticAutoPtr<nsTArray<nsCString>> sSupportedHwMimeTypes
+ MOZ_GUARDED_BY(sMutex);
// EnumSet containing SW/HW codec support information parsed from
// MIME type strings. If a specific codec could not be determined
// it will not be included in this EnumSet. All supported MIME type strings
// are still stored in sSupportedSwMimeTypes and sSupportedHwMimeTypes.
- static StaticAutoPtr<media::MediaCodecsSupported> sSupportedCodecs;
+ static inline StaticAutoPtr<media::MediaCodecsSupported> sSupportedCodecs
+ MOZ_GUARDED_BY(sMutex);
+
+ static inline StaticMutex sMutex;
};
extern LazyLogModule sAndroidDecoderModuleLog;
diff --git a/dom/media/platforms/android/AndroidEncoderModule.cpp b/dom/media/platforms/android/AndroidEncoderModule.cpp
index 15b23330e2..23c76cba5f 100644
--- a/dom/media/platforms/android/AndroidEncoderModule.cpp
+++ b/dom/media/platforms/android/AndroidEncoderModule.cpp
@@ -29,6 +29,9 @@ bool AndroidEncoderModule::Supports(const EncoderConfig& aConfig) const {
if (!CanLikelyEncode(aConfig)) {
return false;
}
+ if (aConfig.mScalabilityMode != ScalabilityMode::None) {
+ return false;
+ }
return SupportsCodec(aConfig.mCodec);
}
diff --git a/dom/media/platforms/android/RemoteDataDecoder.cpp b/dom/media/platforms/android/RemoteDataDecoder.cpp
index f0fbc7a77c..260b70abdb 100644
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -527,6 +527,8 @@ class RemoteVideoDecoder final : public RemoteDataDecoder {
});
aStage.SetResolution(v->mImage->GetSize().Width(),
v->mImage->GetSize().Height());
+ aStage.SetStartTimeAndEndTime(v->mTime.ToMicroseconds(),
+ v->GetEndTime().ToMicroseconds());
});
RemoteDataDecoder::UpdateOutputStatus(std::move(v));
@@ -574,7 +576,7 @@ class RemoteVideoDecoder final : public RemoteDataDecoder {
bool mIsHardwareAccelerated = false;
// Accessed on mThread and reader's thread. SimpleMap however is
// thread-safe, so it's okay to do so.
- SimpleMap<InputInfo> mInputInfos;
+ SimpleMap<int64_t, InputInfo, ThreadSafePolicy> mInputInfos;
// Only accessed on mThread.
Maybe<TimeUnit> mSeekTarget;
Maybe<TimeUnit> mLatestOutputTime;
diff --git a/dom/media/platforms/apple/AppleEncoderModule.cpp b/dom/media/platforms/apple/AppleEncoderModule.cpp
index e18d9a05c4..6fa4a53ab5 100644
--- a/dom/media/platforms/apple/AppleEncoderModule.cpp
+++ b/dom/media/platforms/apple/AppleEncoderModule.cpp
@@ -27,6 +27,9 @@ bool AppleEncoderModule::Supports(const EncoderConfig& aConfig) const {
if (!CanLikelyEncode(aConfig)) {
return false;
}
+ if (aConfig.mScalabilityMode != ScalabilityMode::None) {
+ return false;
+ }
return aConfig.mCodec == CodecType::H264;
}
diff --git a/dom/media/platforms/apple/AppleVTDecoder.cpp b/dom/media/platforms/apple/AppleVTDecoder.cpp
index 6a70ed19d5..17d462c8c9 100644
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -575,6 +575,8 @@ void AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
aStage.SetColorDepth(mColorDepth);
aStage.SetYUVColorSpace(mColorSpace);
aStage.SetColorRange(mColorRange);
+ aStage.SetStartTimeAndEndTime(data->mTime.ToMicroseconds(),
+ data->GetEndTime().ToMicroseconds());
});
// Frames come out in DTS order but we need to output them
diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
index 1e8e488e25..381cbf71a8 100644
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -5,6 +5,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "FFmpegAudioDecoder.h"
+#include "FFmpegUtils.h"
#include "AudioSampleFormat.h"
#include "FFmpegLog.h"
#include "TimeUnits.h"
@@ -250,7 +251,7 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::PostProcessOutput(
aSample->mDuration.ToString().get(),
mLib->av_get_sample_fmt_name(mFrame->format));
- uint32_t numChannels = mCodecContext->channels;
+ uint32_t numChannels = ChannelCount(mCodecContext);
uint32_t samplingRate = mCodecContext->sample_rate;
if (!numChannels) {
numChannels = mAudioInfo.mChannels;
@@ -284,7 +285,7 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::PostProcessOutput(
RefPtr<AudioData> data =
new AudioData(aSample->mOffset, pts, std::move(audio), numChannels,
- samplingRate, mCodecContext->channel_layout);
+ samplingRate, numChannels);
MOZ_ASSERT(duration == data->mDuration, "must be equal");
aResults.AppendElement(std::move(data));
@@ -395,16 +396,23 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
DecodedData& aResults) {
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
PROCESS_DECODE_LOG(aSample);
- AVPacket packet;
- mLib->av_init_packet(&packet);
+ AVPacket* packet;
+#if LIBAVCODEC_VERSION_MAJOR >= 61
+ packet = mLib->av_packet_alloc();
+ auto freePacket = MakeScopeExit([&] { mLib->av_packet_free(&packet); });
+#else
+ AVPacket packet_mem;
+ packet = &packet_mem;
+ mLib->av_init_packet(packet);
+#endif
FFMPEG_LOG("FFmpegAudioDecoder::DoDecode: %d bytes, [%s,%s] (Duration: %s)",
aSize, aSample->mTime.ToString().get(),
aSample->GetEndTime().ToString().get(),
aSample->mDuration.ToString().get());
- packet.data = const_cast<uint8_t*>(aData);
- packet.size = aSize;
+ packet->data = const_cast<uint8_t*>(aData);
+ packet->size = aSize;
if (aGotFrame) {
*aGotFrame = false;
@@ -418,8 +426,9 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
}
bool decoded = false;
- auto rv = DecodeUsingFFmpeg(&packet, decoded, aSample, aResults, aGotFrame);
+ auto rv = DecodeUsingFFmpeg(packet, decoded, aSample, aResults, aGotFrame);
NS_ENSURE_SUCCESS(rv, rv);
+
return NS_OK;
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp b/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp
index 28db667732..284d1067a9 100644
--- a/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioEncoder.cpp
@@ -101,12 +101,13 @@ nsresult FFmpegAudioEncoder<LIBAV_VER>::InitSpecific() {
// And now the audio-specific part
mCodecContext->sample_rate = AssertedCast<int>(mConfig.mSampleRate);
- mCodecContext->channels = AssertedCast<int>(mConfig.mNumberOfChannels);
#if LIBAVCODEC_VERSION_MAJOR >= 60
// Gecko's ordering intentionnally matches ffmepg's ordering
mLib->av_channel_layout_default(&mCodecContext->ch_layout,
- AssertedCast<int>(mCodecContext->channels));
+ AssertedCast<int>(mConfig.mNumberOfChannels));
+#else
+ mCodecContext->channels = AssertedCast<int>(mConfig.mNumberOfChannels);
#endif
switch (mConfig.mCodec) {
@@ -206,7 +207,7 @@ FFmpegAudioEncoder<LIBAV_VER>::EncodeOnePacket(Span<float> aSamples,
// packets smaller than the packet size are allowed when draining.
MOZ_ASSERT(AssertedCast<int>(frameCount) <= mCodecContext->frame_size);
- mFrame->channels = AssertedCast<int>(mConfig.mNumberOfChannels);
+ ChannelCount(mFrame) = AssertedCast<int>(mConfig.mNumberOfChannels);
# if LIBAVCODEC_VERSION_MAJOR >= 60
int rv = mLib->av_channel_layout_copy(&mFrame->ch_layout,
@@ -229,10 +230,10 @@ FFmpegAudioEncoder<LIBAV_VER>::EncodeOnePacket(Span<float> aSamples,
AVRational{.num = 1, .den = static_cast<int>(mConfig.mSampleRate)};
# endif
mFrame->pts = aPts.ToTicksAtRate(mConfig.mSampleRate);
- mFrame->pkt_duration = frameCount;
# if LIBAVCODEC_VERSION_MAJOR >= 60
mFrame->duration = frameCount;
# else
+ mFrame->pkt_duration = frameCount;
// Save duration in the time_base unit.
mDurationMap.Insert(mFrame->pts, mFrame->pkt_duration);
# endif
@@ -258,7 +259,7 @@ FFmpegAudioEncoder<LIBAV_VER>::EncodeOnePacket(Span<float> aSamples,
MOZ_ASSERT(mCodecContext->sample_fmt == AV_SAMPLE_FMT_FLTP);
for (uint32_t i = 0; i < mConfig.mNumberOfChannels; i++) {
DeinterleaveAndConvertBuffer(aSamples.data(), mFrame->nb_samples,
- mFrame->channels, mFrame->data);
+ mConfig.mNumberOfChannels, mFrame->data);
}
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
index 30422987cf..e86ff63dba 100644
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
@@ -231,11 +231,22 @@ FFmpegDataDecoder<LIBAV_VER>::ProcessDrain() {
empty->mTimecode = mLastInputDts;
bool gotFrame = false;
DecodedData results;
- // When draining the FFmpeg decoder will return either a single frame at a
- // time until gotFrame is set to false; or return a block of frames with
- // NS_ERROR_DOM_MEDIA_END_OF_STREAM
- while (NS_SUCCEEDED(DoDecode(empty, &gotFrame, results)) && gotFrame) {
- }
+ // When draining the underlying FFmpeg decoder without encountering any
+ // problems, DoDecode will either return a single frame at a time until
+ // gotFrame is set to false, or it will return a block of frames with
+ // NS_ERROR_DOM_MEDIA_END_OF_STREAM (EOS). However, if any issue arises, such
+ // as pending data in the pipeline being corrupt or invalid, non-EOS errors
+ // like NS_ERROR_DOM_MEDIA_DECODE_ERR will be returned and must be handled
+ // accordingly.
+ do {
+ MediaResult r = DoDecode(empty, &gotFrame, results);
+ if (NS_FAILED(r)) {
+ if (r.Code() == NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
+ break;
+ }
+ return DecodePromise::CreateAndReject(r, __func__);
+ }
+ } while (gotFrame);
return DecodePromise::CreateAndResolve(std::move(results), __func__);
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataEncoder.h b/dom/media/platforms/ffmpeg/FFmpegDataEncoder.h
index de80ed36ca..c9a4585913 100644
--- a/dom/media/platforms/ffmpeg/FFmpegDataEncoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDataEncoder.h
@@ -28,7 +28,7 @@ class FFmpegDataEncoder : public MediaDataEncoder {};
template <>
class FFmpegDataEncoder<LIBAV_VER> : public MediaDataEncoder {
- using DurationMap = SimpleMap<int64_t>;
+ using DurationMap = SimpleMap<int64_t, int64_t, ThreadSafePolicy>;
public:
FFmpegDataEncoder(const FFmpegLibWrapper* aLib, AVCodecID aCodecID,
diff --git a/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp b/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp
index b6e734268d..cb507a0810 100644
--- a/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegEncoderModule.cpp
@@ -20,6 +20,13 @@ bool FFmpegEncoderModule<V>::Supports(const EncoderConfig& aConfig) const {
if (!CanLikelyEncode(aConfig)) {
return false;
}
+ // We only support L1T2 and L1T3 ScalabilityMode in VP8 and VP9 encoders via
+ // libvpx for now.
+ if ((aConfig.mScalabilityMode != ScalabilityMode::None)) {
+ if (aConfig.mCodec != CodecType::VP8 && aConfig.mCodec != CodecType::VP9) {
+ return false;
+ }
+ }
return SupportsCodec(aConfig.mCodec) != AV_CODEC_ID_NONE;
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
index 5fd6102a34..8557a1eb19 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -69,6 +69,7 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC_58 = 1 << 5,
AV_FUNC_59 = 1 << 6,
AV_FUNC_60 = 1 << 7,
+ AV_FUNC_61 = 1 << 7,
AV_FUNC_AVUTIL_53 = AV_FUNC_53 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_54 = AV_FUNC_54 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_55 = AV_FUNC_55 | AV_FUNC_AVUTIL_MASK,
@@ -77,8 +78,10 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC_AVUTIL_58 = AV_FUNC_58 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_59 = AV_FUNC_59 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVUTIL_60 = AV_FUNC_60 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_61 = AV_FUNC_61 | AV_FUNC_AVUTIL_MASK,
AV_FUNC_AVCODEC_ALL = AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 | AV_FUNC_56 |
- AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60,
+ AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 |
+ AV_FUNC_61,
AV_FUNC_AVUTIL_ALL = AV_FUNC_AVCODEC_ALL | AV_FUNC_AVUTIL_MASK
};
@@ -107,6 +110,9 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
case 60:
version = AV_FUNC_60;
break;
+ case 61:
+ version = AV_FUNC_61;
+ break;
default:
FFMPEGV_LOG("Unknown avcodec version: %d", macro);
Unlink();
@@ -153,14 +159,17 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(avcodec_decode_video2, AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 |
AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58)
AV_FUNC(avcodec_find_decoder, AV_FUNC_AVCODEC_ALL)
- AV_FUNC(avcodec_find_decoder_by_name, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC(avcodec_find_decoder_by_name,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC(avcodec_find_encoder, AV_FUNC_AVCODEC_ALL)
- AV_FUNC(avcodec_find_encoder_by_name, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC(avcodec_find_encoder_by_name,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC(avcodec_flush_buffers, AV_FUNC_AVCODEC_ALL)
AV_FUNC(avcodec_open2, AV_FUNC_AVCODEC_ALL)
AV_FUNC(avcodec_register_all, AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 |
AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58)
- AV_FUNC(av_init_packet, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_init_packet, (AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58 |
+ AV_FUNC_59 | AV_FUNC_60))
AV_FUNC(av_parser_init, AV_FUNC_AVCODEC_ALL)
AV_FUNC(av_parser_close, AV_FUNC_AVCODEC_ALL)
AV_FUNC(av_parser_parse2, AV_FUNC_AVCODEC_ALL)
@@ -168,53 +177,68 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(avcodec_alloc_frame, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_get_frame_defaults, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_free_frame, AV_FUNC_54)
- AV_FUNC(avcodec_send_packet, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC(avcodec_receive_packet, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC(avcodec_send_frame, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC(avcodec_receive_frame, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC(avcodec_default_get_buffer2, (AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 |
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
- AV_FUNC(av_packet_alloc, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
- AV_FUNC(av_packet_unref, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
- AV_FUNC(av_packet_free, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60))
+ AV_FUNC(avcodec_send_packet,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC(avcodec_receive_packet,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC(avcodec_send_frame, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC(avcodec_receive_frame,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC(avcodec_default_get_buffer2,
+ (AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
+ AV_FUNC_60 | AV_FUNC_61))
+ AV_FUNC(av_packet_alloc,
+ (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61))
+ AV_FUNC(av_packet_unref,
+ (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61))
+ AV_FUNC(av_packet_free,
+ (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61))
AV_FUNC(avcodec_descriptor_get, AV_FUNC_AVCODEC_ALL)
AV_FUNC(av_log_set_level, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_malloc, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_freep, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_frame_alloc,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+ AV_FUNC_AVUTIL_61))
AV_FUNC(av_frame_free,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+ AV_FUNC_AVUTIL_61))
AV_FUNC(av_frame_unref,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+ AV_FUNC_AVUTIL_61))
AV_FUNC(av_frame_get_buffer,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+ AV_FUNC_AVUTIL_61))
AV_FUNC(av_frame_make_writable,
(AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+ AV_FUNC_AVUTIL_61))
AV_FUNC(av_image_check_size, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_image_get_buffer_size, AV_FUNC_AVUTIL_ALL)
- AV_FUNC_OPTION(av_channel_layout_default, AV_FUNC_AVUTIL_60)
- AV_FUNC_OPTION(av_channel_layout_from_mask, AV_FUNC_AVUTIL_60)
- AV_FUNC_OPTION(av_channel_layout_copy, AV_FUNC_AVUTIL_60)
+ AV_FUNC_OPTION(av_channel_layout_default,
+ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
+ AV_FUNC_OPTION(av_channel_layout_from_mask,
+ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
+ AV_FUNC_OPTION(av_channel_layout_copy, AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
AV_FUNC_OPTION(av_buffer_get_opaque,
(AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58 |
- AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
- AV_FUNC(av_buffer_create,
- (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
+ AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61))
+ AV_FUNC(
+ av_buffer_create,
+ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 | AV_FUNC_61))
AV_FUNC_OPTION(av_frame_get_colorspace,
AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
AV_FUNC_AVUTIL_58)
AV_FUNC_OPTION(av_frame_get_color_range,
AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
AV_FUNC_AVUTIL_58)
- AV_FUNC(av_strerror,
- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60)
+ AV_FUNC(av_strerror, AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
AV_FUNC(av_get_sample_fmt_name, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_dict_set, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_dict_free, AV_FUNC_AVUTIL_ALL)
@@ -224,35 +248,38 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
#ifdef MOZ_WIDGET_GTK
AV_FUNC_OPTION_SILENT(avcodec_get_hw_config,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC_OPTION_SILENT(av_codec_iterate, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC_OPTION_SILENT(av_codec_iterate,
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_codec_is_decoder,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_init,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_alloc,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_hwdevice_hwconfig_alloc,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_hwdevice_get_hwframe_constraints,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_hwframe_constraints_free,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC_OPTION_SILENT(av_buffer_ref,
- AV_FUNC_AVUTIL_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC_OPTION_SILENT(av_buffer_unref,
- AV_FUNC_AVUTIL_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC_OPTION_SILENT(av_buffer_ref, AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+ AV_FUNC_AVUTIL_60 |
+ AV_FUNC_AVUTIL_61)
+ AV_FUNC_OPTION_SILENT(av_buffer_unref, AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+ AV_FUNC_AVUTIL_60 |
+ AV_FUNC_AVUTIL_61)
AV_FUNC_OPTION_SILENT(av_hwframe_transfer_get_formats,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_create_derived,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
AV_FUNC_OPTION_SILENT(av_hwframe_ctx_alloc,
- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC_OPTION_SILENT(avcodec_get_name,
- AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
- AV_FUNC_OPTION_SILENT(av_get_pix_fmt_string, AV_FUNC_AVUTIL_58 |
- AV_FUNC_AVUTIL_59 |
- AV_FUNC_AVUTIL_60)
+ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC_OPTION_SILENT(avcodec_get_name, AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
+ AV_FUNC_60 | AV_FUNC_61)
+ AV_FUNC_OPTION_SILENT(av_get_pix_fmt_string,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
#endif
AV_FUNC_OPTION(av_tx_init, AV_FUNC_AVUTIL_ALL)
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
index 226b4fc8cb..d3b1be90f3 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
@@ -138,10 +138,12 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
int flags);
// libavcodec >= v57
- AVPacket* (*av_packet_alloc)(void);
void (*av_packet_unref)(AVPacket* pkt);
void (*av_packet_free)(AVPacket** pkt);
+ // libavcodec >= 61
+ AVPacket* (*av_packet_alloc)();
+
// libavcodec v58 and later only
int (*avcodec_send_packet)(AVCodecContext* avctx, const AVPacket* avpkt);
int (*avcodec_receive_packet)(AVCodecContext* avctx, AVPacket* avpkt);
diff --git a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
index 2019a859e4..81eb2c0441 100644
--- a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
@@ -33,6 +33,7 @@ static FFmpegLibWrapper sLibAV;
static const char* sLibs[] = {
// clang-format off
#if defined(XP_DARWIN)
+ "libavcodec.61.dylib",
"libavcodec.60.dylib",
"libavcodec.59.dylib",
"libavcodec.58.dylib",
@@ -45,6 +46,7 @@ static const char* sLibs[] = {
"libavcodec.so", // OpenBSD hardly controls the major/minor library version
// of ffmpeg and update it regulary on ABI/API changes
#else
+ "libavcodec.so.61",
"libavcodec.so.60",
"libavcodec.so.59",
"libavcodec.so.58",
@@ -174,6 +176,9 @@ already_AddRefed<PlatformDecoderModule> FFmpegRuntimeLinker::CreateDecoder() {
case 60:
module = FFmpegDecoderModule<60>::Create(&sLibAV);
break;
+ case 61:
+ module = FFmpegDecoderModule<61>::Create(&sLibAV);
+ break;
default:
module = nullptr;
}
@@ -209,6 +214,9 @@ already_AddRefed<PlatformEncoderModule> FFmpegRuntimeLinker::CreateEncoder() {
case 60:
module = FFmpegEncoderModule<60>::Create(&sLibAV);
break;
+ case 61:
+ module = FFmpegEncoderModule<61>::Create(&sLibAV);
+ break;
default:
module = nullptr;
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegUtils.h b/dom/media/platforms/ffmpeg/FFmpegUtils.h
index fe588ed14c..bdbb184cf2 100644
--- a/dom/media/platforms/ffmpeg/FFmpegUtils.h
+++ b/dom/media/platforms/ffmpeg/FFmpegUtils.h
@@ -51,6 +51,36 @@ inline bool IsVideoCodec(AVCodecID aCodecID) {
}
}
+// Access the correct location for the channel count, based on ffmpeg version.
+template <typename T>
+inline int& ChannelCount(T* aObject) {
+#if LIBAVCODEC_VERSION_MAJOR <= 59
+ return aObject->channels;
+#else
+ return aObject->ch_layout.nb_channels;
+#endif
+}
+
+// Access the correct location for the duration, based on ffmpeg version.
+template <typename T>
+inline int64_t& Duration(T* aObject) {
+#if LIBAVCODEC_VERSION_MAJOR < 61
+ return aObject->pkt_duration;
+#else
+ return aObject->duration;
+#endif
+}
+
+// Access the correct location for the duration, based on ffmpeg version.
+template <typename T>
+inline const int64_t& Duration(const T* aObject) {
+#if LIBAVCODEC_VERSION_MAJOR < 61
+ return aObject->pkt_duration;
+#else
+ return aObject->duration;
+#endif
+}
+
} // namespace mozilla
#endif // DOM_MEDIA_PLATFORMS_FFMPEG_FFMPEGUTILS_H_
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
index 3fe46938fd..e116ca594f 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -7,6 +7,7 @@
#include "FFmpegVideoDecoder.h"
#include "FFmpegLog.h"
+#include "FFmpegUtils.h"
#include "ImageContainer.h"
#include "MP4Decoder.h"
#include "MediaInfo.h"
@@ -45,6 +46,7 @@
# define AV_PIX_FMT_YUV444P PIX_FMT_YUV444P
# define AV_PIX_FMT_YUV444P10LE PIX_FMT_YUV444P10LE
# define AV_PIX_FMT_GBRP PIX_FMT_GBRP
+# define AV_PIX_FMT_GBRP10LE PIX_FMT_GBRP10LE
# define AV_PIX_FMT_NONE PIX_FMT_NONE
# define AV_PIX_FMT_VAAPI_VLD PIX_FMT_VAAPI_VLD
#endif
@@ -136,6 +138,9 @@ static AVPixelFormat ChoosePixelFormat(AVCodecContext* aCodecContext,
case AV_PIX_FMT_GBRP:
FFMPEGV_LOG("Requesting pixel format GBRP.");
return AV_PIX_FMT_GBRP;
+ case AV_PIX_FMT_GBRP10LE:
+ FFMPEGV_LOG("Requesting pixel format GBRP10LE.");
+ return AV_PIX_FMT_GBRP10LE;
default:
break;
}
@@ -209,7 +214,7 @@ template <>
class VAAPIDisplayHolder<LIBAV_VER> {
public:
VAAPIDisplayHolder(FFmpegLibWrapper* aLib, VADisplay aDisplay, int aDRMFd)
- : mLib(aLib), mDisplay(aDisplay), mDRMFd(aDRMFd){};
+ : mLib(aLib), mDisplay(aDisplay), mDRMFd(aDRMFd) {};
~VAAPIDisplayHolder() {
mLib->vaTerminate(mDisplay);
close(mDRMFd);
@@ -612,6 +617,7 @@ static gfx::ColorDepth GetColorDepth(const AVPixelFormat& aFormat) {
case AV_PIX_FMT_YUV420P10LE:
case AV_PIX_FMT_YUV422P10LE:
case AV_PIX_FMT_YUV444P10LE:
+ case AV_PIX_FMT_GBRP10LE:
return gfx::ColorDepth::COLOR_10;
#if LIBAVCODEC_VERSION_MAJOR >= 57
case AV_PIX_FMT_YUV420P12LE:
@@ -629,7 +635,7 @@ static gfx::ColorDepth GetColorDepth(const AVPixelFormat& aFormat) {
}
static bool IsYUVFormat(const AVPixelFormat& aFormat) {
- return aFormat != AV_PIX_FMT_GBRP;
+ return aFormat != AV_PIX_FMT_GBRP && aFormat != AV_PIX_FMT_GBRP10LE;
}
static gfx::YUVColorSpace TransferAVColorSpaceToColorSpace(
@@ -871,7 +877,9 @@ int FFmpegVideoDecoder<LIBAV_VER>::GetVideoBuffer(
aFrame->height = aCodecContext->coded_height;
aFrame->format = aCodecContext->pix_fmt;
aFrame->extended_data = aFrame->data;
+# if LIBAVCODEC_VERSION_MAJOR < 61
aFrame->reordered_opaque = aCodecContext->reordered_opaque;
+# endif
MOZ_ASSERT(aFrame->data[0] && aFrame->data[1] && aFrame->data[2]);
// This will hold a reference to image, and the reference would be dropped
@@ -991,12 +999,7 @@ void FFmpegVideoDecoder<LIBAV_VER>::DecodeStats::UpdateDecodeTimes(
float decodeTime = (now - mDecodeStart).ToMilliseconds();
mDecodeStart = now;
- if (aFrame->pkt_duration <= 0) {
- FFMPEGV_LOG("Incorrect frame duration, skipping decode stats.");
- return;
- }
-
- float frameDuration = aFrame->pkt_duration / 1000.0f;
+ const float frameDuration = Duration(aFrame) / 1000.0f;
mDecodedFrames++;
mAverageFrameDuration =
@@ -1044,19 +1047,27 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame,
MediaDataDecoder::DecodedData& aResults) {
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
- AVPacket packet;
- mLib->av_init_packet(&packet);
+ AVPacket* packet;
+
+#if LIBAVCODEC_VERSION_MAJOR >= 61
+ packet = mLib->av_packet_alloc();
+ auto raii = MakeScopeExit([&]() { mLib->av_packet_free(&packet); });
+#else
+ AVPacket packet_mem;
+ packet = &packet_mem;
+ mLib->av_init_packet(packet);
+#endif
#if LIBAVCODEC_VERSION_MAJOR >= 58
mDecodeStats.DecodeStart();
#endif
- packet.data = aData;
- packet.size = aSize;
- packet.dts = aSample->mTimecode.ToMicroseconds();
- packet.pts = aSample->mTime.ToMicroseconds();
- packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
- packet.pos = aSample->mOffset;
+ packet->data = aData;
+ packet->size = aSize;
+ packet->dts = aSample->mTimecode.ToMicroseconds();
+ packet->pts = aSample->mTime.ToMicroseconds();
+ packet->flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
+ packet->pos = aSample->mOffset;
mTrackingId.apply([&](const auto& aId) {
MediaInfoFlag flag = MediaInfoFlag::None;
@@ -1087,14 +1098,14 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
break;
}
mPerformanceRecorder.Start(
- packet.dts,
+ packet->dts,
nsPrintfCString("FFmpegVideoDecoder(%d)", LIBAVCODEC_VERSION_MAJOR),
aId, flag);
});
#if LIBAVCODEC_VERSION_MAJOR >= 58
- packet.duration = aSample->mDuration.ToMicroseconds();
- int res = mLib->avcodec_send_packet(mCodecContext, &packet);
+ packet->duration = aSample->mDuration.ToMicroseconds();
+ int res = mLib->avcodec_send_packet(mCodecContext, packet);
if (res < 0) {
// In theory, avcodec_send_packet could sent -EAGAIN should its internal
// buffers be full. In practice this can't happen as we only feed one frame
@@ -1102,7 +1113,9 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
char errStr[AV_ERROR_MAX_STRING_SIZE];
mLib->av_strerror(res, errStr, AV_ERROR_MAX_STRING_SIZE);
FFMPEG_LOG("avcodec_send_packet error: %s", errStr);
- return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ return MediaResult(res == int(AVERROR_EOF)
+ ? NS_ERROR_DOM_MEDIA_END_OF_STREAM
+ : NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("avcodec_send_packet error: %s", errStr));
}
if (aGotFrame) {
@@ -1154,10 +1167,10 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
}
if (mUsingV4L2) {
rv = CreateImageV4L2(mFrame->pkt_pos, GetFramePts(mFrame),
- mFrame->pkt_duration, aResults);
+ Duration(mFrame), aResults);
} else {
rv = CreateImageVAAPI(mFrame->pkt_pos, GetFramePts(mFrame),
- mFrame->pkt_duration, aResults);
+ Duration(mFrame), aResults);
}
// If VA-API/V4L2 playback failed, just quit. Decoder is going to be
@@ -1171,8 +1184,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
} else
# endif
{
- rv = CreateImage(mFrame->pkt_pos, GetFramePts(mFrame),
- mFrame->pkt_duration, aResults);
+ rv = CreateImage(mFrame->pkt_pos, GetFramePts(mFrame), Duration(mFrame),
+ aResults);
}
if (NS_FAILED(rv)) {
return rv;
@@ -1202,6 +1215,7 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
# endif
return Some(DecodeStage::YUV444P);
case AV_PIX_FMT_GBRP:
+ case AV_PIX_FMT_GBRP10LE:
return Some(DecodeStage::GBRP);
case AV_PIX_FMT_VAAPI_VLD:
return Some(DecodeStage::VAAPI_SURFACE);
@@ -1213,6 +1227,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
aStage.SetColorDepth(GetColorDepth(mCodecContext->pix_fmt));
aStage.SetYUVColorSpace(GetFrameColorSpace());
aStage.SetColorRange(GetFrameColorRange());
+ aStage.SetStartTimeAndEndTime(aSample->mTime.ToMicroseconds(),
+ aSample->GetEndTime().ToMicroseconds());
});
if (aGotFrame) {
*aGotFrame = true;
@@ -1237,14 +1253,14 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
int decoded;
int bytesConsumed =
- mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);
+ mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, packet);
FFMPEG_LOG(
"DoDecodeFrame:decode_video: rv=%d decoded=%d "
"(Input: pts(%" PRId64 ") dts(%" PRId64 ") Output: pts(%" PRId64
") "
"opaque(%" PRId64 ") pts(%" PRId64 ") pkt_dts(%" PRId64 "))",
- bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
+ bytesConsumed, decoded, packet->pts, packet->dts, mFrame->pts,
mFrame->reordered_opaque, mFrame->pts, mFrame->pkt_dts);
if (bytesConsumed < 0) {
@@ -1306,6 +1322,7 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
# endif
return Some(DecodeStage::YUV444P);
case AV_PIX_FMT_GBRP:
+ case AV_PIX_FMT_GBRP10LE:
return Some(DecodeStage::GBRP);
default:
return Nothing();
@@ -1315,6 +1332,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
aStage.SetColorDepth(GetColorDepth(mCodecContext->pix_fmt));
aStage.SetYUVColorSpace(GetFrameColorSpace());
aStage.SetColorRange(GetFrameColorRange());
+ aStage.SetStartTimeAndEndTime(aSample->mTime.ToMicroseconds(),
+ aSample->GetEndTime().ToMicroseconds());
});
});
@@ -1372,8 +1391,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImage(
int64_t aOffset, int64_t aPts, int64_t aDuration,
MediaDataDecoder::DecodedData& aResults) const {
FFMPEG_LOG("Got one frame output with pts=%" PRId64 " dts=%" PRId64
- " duration=%" PRId64 " opaque=%" PRId64,
- aPts, mFrame->pkt_dts, aDuration, mCodecContext->reordered_opaque);
+ " duration=%" PRId64,
+ aPts, mFrame->pkt_dts, aDuration);
VideoData::YCbCrBuffer b;
b.mPlanes[0].mData = mFrame->data[0];
@@ -1392,14 +1411,16 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImage(
b.mPlanes[0].mHeight = mFrame->height;
if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P ||
mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P10LE ||
- mCodecContext->pix_fmt == AV_PIX_FMT_GBRP
+ mCodecContext->pix_fmt == AV_PIX_FMT_GBRP ||
+ mCodecContext->pix_fmt == AV_PIX_FMT_GBRP10LE
#if LIBAVCODEC_VERSION_MAJOR >= 57
|| mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P12LE
#endif
) {
b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
- if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P10LE) {
+ if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P10LE ||
+ mCodecContext->pix_fmt == AV_PIX_FMT_GBRP10LE) {
b.mColorDepth = gfx::ColorDepth::COLOR_10;
}
#if LIBAVCODEC_VERSION_MAJOR >= 57
@@ -1501,8 +1522,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImageVAAPI(
int64_t aOffset, int64_t aPts, int64_t aDuration,
MediaDataDecoder::DecodedData& aResults) {
FFMPEG_LOG("VA-API Got one frame output with pts=%" PRId64 " dts=%" PRId64
- " duration=%" PRId64 " opaque=%" PRId64,
- aPts, mFrame->pkt_dts, aDuration, mCodecContext->reordered_opaque);
+ " duration=%" PRId64,
+ aPts, mFrame->pkt_dts, aDuration);
VADRMPRIMESurfaceDescriptor vaDesc;
if (!GetVAAPISurfaceDescriptor(&vaDesc)) {
@@ -1547,8 +1568,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER>::CreateImageV4L2(
int64_t aOffset, int64_t aPts, int64_t aDuration,
MediaDataDecoder::DecodedData& aResults) {
FFMPEG_LOG("V4L2 Got one frame output with pts=%" PRId64 " dts=%" PRId64
- " duration=%" PRId64 " opaque=%" PRId64,
- aPts, mFrame->pkt_dts, aDuration, mCodecContext->reordered_opaque);
+ " duration=%" PRId64,
+ aPts, mFrame->pkt_dts, aDuration);
AVDRMFrameDescriptor* desc = (AVDRMFrameDescriptor*)mFrame->data[0];
if (!desc) {
@@ -1673,8 +1694,7 @@ static const struct {
VAProfile va_profile;
char name[100];
} vaapi_profile_map[] = {
-# define MAP(c, v, n) \
- { AV_CODEC_ID_##c, VAProfile##v, n }
+# define MAP(c, v, n) {AV_CODEC_ID_##c, VAProfile##v, n}
MAP(H264, H264ConstrainedBaseline, "H264ConstrainedBaseline"),
MAP(H264, H264Main, "H264Main"),
MAP(H264, H264High, "H264High"),
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
index fda38069ba..2d6771a7b1 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -43,7 +43,7 @@ class FFmpegVideoDecoder<LIBAV_VER>
typedef mozilla::layers::Image Image;
typedef mozilla::layers::ImageContainer ImageContainer;
typedef mozilla::layers::KnowsCompositor KnowsCompositor;
- typedef SimpleMap<int64_t> DurationMap;
+ typedef SimpleMap<int64_t, int64_t, ThreadSafePolicy> DurationMap;
public:
FFmpegVideoDecoder(FFmpegLibWrapper* aLib, const VideoInfo& aConfig,
@@ -205,6 +205,7 @@ class FFmpegVideoDecoder<LIBAV_VER>
const bool mLowLatency;
const Maybe<TrackingId> mTrackingId;
PerformanceRecorderMulti<DecodeStage> mPerformanceRecorder;
+ PerformanceRecorderMulti<DecodeStage> mPerformanceRecorder2;
// True if we're allocating shmem for ffmpeg decode buffer.
Maybe<Atomic<bool>> mIsUsingShmemBufferForDecode;
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
index 9d1dbcf80f..83b0f98c5b 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
@@ -15,7 +15,7 @@
#include "libavutil/pixfmt.h"
#include "mozilla/dom/ImageUtils.h"
#include "nsPrintfCString.h"
-#include "ImageToI420.h"
+#include "ImageConversion.h"
#include "libyuv.h"
#include "FFmpegRuntimeLinker.h"
@@ -510,7 +510,7 @@ Result<MediaDataEncoder::EncodedData, nsresult> FFmpegVideoEncoder<
// Save duration in the time_base unit.
mDurationMap.Insert(mFrame->pts, aSample->mDuration.ToMicroseconds());
# endif
- mFrame->pkt_duration = aSample->mDuration.ToMicroseconds();
+ Duration(mFrame) = aSample->mDuration.ToMicroseconds();
// Now send the AVFrame to ffmpeg for encoding, same code for audio and video.
return FFmpegDataEncoder<LIBAV_VER>::EncodeWithModernAPIs();
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
index 0ee5f52aec..2c4b20c441 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.h
@@ -22,7 +22,7 @@ class FFmpegVideoEncoder : public MediaDataEncoder {};
template <>
class FFmpegVideoEncoder<LIBAV_VER> : public FFmpegDataEncoder<LIBAV_VER> {
- using DurationMap = SimpleMap<int64_t>;
+ using DurationMap = SimpleMap<int64_t, int64_t, ThreadSafePolicy>;
public:
FFmpegVideoEncoder(const FFmpegLibWrapper* aLib, AVCodecID aCodecID,
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/COPYING.LGPLv2.1 b/dom/media/platforms/ffmpeg/ffmpeg61/include/COPYING.LGPLv2.1
new file mode 100644
index 0000000000..00b4fedfe7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/COPYING.LGPLv2.1
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avcodec.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avcodec.h
new file mode 100644
index 0000000000..5216bff1f8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avcodec.h
@@ -0,0 +1,3121 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec external API header
+ */
+
+#include "libavutil/samplefmt.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "codec.h"
+#include "codec_id.h"
+#include "defs.h"
+#include "packet.h"
+#include "version_major.h"
+#ifndef HAVE_AV_CONFIG_H
+/* When included as part of the ffmpeg build, only include the major version
+ * to avoid unnecessary rebuilds. When included externally, keep including
+ * the full version information. */
+# include "version.h"
+
+# include "codec_desc.h"
+# include "codec_par.h"
+#endif
+
+struct AVCodecParameters;
+
+/**
+ * @defgroup libavc libavcodec
+ * Encoding/Decoding Library
+ *
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ */
+
+/**
+ * @ingroup libavc
+ * @defgroup lavc_encdec send/receive encoding and decoding API overview
+ * @{
+ *
+ * The avcodec_send_packet()/avcodec_receive_frame()/avcodec_send_frame()/
+ * avcodec_receive_packet() functions provide an encode/decode API, which
+ * decouples input and output.
+ *
+ * The API is very similar for encoding/decoding and audio/video, and works as
+ * follows:
+ * - Set up and open the AVCodecContext as usual.
+ * - Send valid input:
+ * - For decoding, call avcodec_send_packet() to give the decoder raw
+ * compressed data in an AVPacket.
+ * - For encoding, call avcodec_send_frame() to give the encoder an AVFrame
+ * containing uncompressed audio or video.
+ *
+ * In both cases, it is recommended that AVPackets and AVFrames are
+ * refcounted, or libavcodec might have to copy the input data. (libavformat
+ * always returns refcounted AVPackets, and av_frame_get_buffer() allocates
+ * refcounted AVFrames.)
+ * - Receive output in a loop. Periodically call one of the avcodec_receive_*()
+ * functions and process their output:
+ * - For decoding, call avcodec_receive_frame(). On success, it will return
+ * an AVFrame containing uncompressed audio or video data.
+ * - For encoding, call avcodec_receive_packet(). On success, it will return
+ * an AVPacket with a compressed frame.
+ *
+ * Repeat this call until it returns AVERROR(EAGAIN) or an error. The
+ * AVERROR(EAGAIN) return value means that new input data is required to
+ * return new output. In this case, continue with sending input. For each
+ * input frame/packet, the codec will typically return 1 output frame/packet,
+ * but it can also be 0 or more than 1.
+ *
+ * At the beginning of decoding or encoding, the codec might accept multiple
+ * input frames/packets without returning a frame, until its internal buffers
+ * are filled. This situation is handled transparently if you follow the steps
+ * outlined above.
+ *
+ * In theory, sending input can result in EAGAIN - this should happen only if
+ * not all output was received. You can use this to structure alternative decode
+ * or encode loops other than the one suggested above. For example, you could
+ * try sending new input on each iteration, and try to receive output if that
+ * returns EAGAIN.
+ *
+ * End of stream situations. These require "flushing" (aka draining) the codec,
+ * as the codec might buffer multiple frames or packets internally for
+ * performance or out of necessity (consider B-frames).
+ * This is handled as follows:
+ * - Instead of valid input, send NULL to the avcodec_send_packet() (decoding)
+ * or avcodec_send_frame() (encoding) functions. This will enter draining
+ * mode.
+ * - Call avcodec_receive_frame() (decoding) or avcodec_receive_packet()
+ * (encoding) in a loop until AVERROR_EOF is returned. The functions will
+ * not return AVERROR(EAGAIN), unless you forgot to enter draining mode.
+ * - Before decoding can be resumed again, the codec has to be reset with
+ * avcodec_flush_buffers().
+ *
+ * Using the API as outlined above is highly recommended. But it is also
+ * possible to call functions outside of this rigid schema. For example, you can
+ * call avcodec_send_packet() repeatedly without calling
+ * avcodec_receive_frame(). In this case, avcodec_send_packet() will succeed
+ * until the codec's internal buffer has been filled up (which is typically of
+ * size 1 per output frame, after initial input), and then reject input with
+ * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but to
+ * read at least some output.
+ *
+ * Not all codecs will follow a rigid and predictable dataflow; the only
+ * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on
+ * one end implies that a receive/send call on the other end will succeed, or
+ * at least will not fail with AVERROR(EAGAIN). In general, no codec will
+ * permit unlimited buffering of input or output.
+ *
+ * A codec is not allowed to return AVERROR(EAGAIN) for both sending and
+ * receiving. This would be an invalid state, which could put the codec user
+ * into an endless loop. The API has no concept of time either: it cannot happen
+ * that trying to do avcodec_send_packet() results in AVERROR(EAGAIN), but a
+ * repeated call 1 second later accepts the packet (with no other receive/flush
+ * API calls involved). The API is a strict state machine, and the passage of
+ * time is not supposed to influence it. Some timing-dependent behavior might
+ * still be deemed acceptable in certain cases. But it must never result in both
+ * send/receive returning EAGAIN at the same time at any point. It must also
+ * absolutely be avoided that the current state is "unstable" and can
+ * "flip-flop" between the send/receive APIs allowing progress. For example,
+ * it's not allowed that the codec randomly decides that it actually wants to
+ * consume a packet now instead of returning a frame, after it just returned
+ * AVERROR(EAGAIN) on an avcodec_send_packet() call.
+ * @}
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+#if FF_API_BUFFER_MIN_SIZE
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ * @deprecated Unused: avcodec_receive_packet() does not work
+ * with preallocated packet buffers.
+ */
+# define AV_INPUT_BUFFER_MIN_SIZE 16384
+#endif
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride {
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define AV_CODEC_FLAG_UNALIGNED (1 << 0)
+/**
+ * Use fixed qscale.
+ */
+#define AV_CODEC_FLAG_QSCALE (1 << 1)
+/**
+ * 4 MV per MB allowed / advanced prediction for H.263.
+ */
+#define AV_CODEC_FLAG_4MV (1 << 2)
+/**
+ * Output even those frames that might be corrupted.
+ */
+#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3)
+/**
+ * Use qpel MC.
+ */
+#define AV_CODEC_FLAG_QPEL (1 << 4)
+#if FF_API_DROPCHANGED
+/**
+ * Don't output frames whose parameters differ from first
+ * decoded frame in stream.
+ *
+ * @deprecated callers should implement this functionality in their own code
+ */
+# define AV_CODEC_FLAG_DROPCHANGED (1 << 5)
+#endif
+/**
+ * Request the encoder to output reconstructed frames, i.e.\ frames that would
+ * be produced by decoding the encoded bistream. These frames may be retrieved
+ * by calling avcodec_receive_frame() immediately after a successful call to
+ * avcodec_receive_packet().
+ *
+ * Should only be used with encoders flagged with the
+ * @ref AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
+ *
+ * @note
+ * Each reconstructed frame returned by the encoder corresponds to the last
+ * encoded packet, i.e. the frames are returned in coded order rather than
+ * presentation order.
+ *
+ * @note
+ * Frame parameters (like pixel format or dimensions) do not have to match the
+ * AVCodecContext values. Make sure to use the values from the returned frame.
+ */
+#define AV_CODEC_FLAG_RECON_FRAME (1 << 6)
+/**
+ * @par decoding
+ * Request the decoder to propagate each packet's AVPacket.opaque and
+ * AVPacket.opaque_ref to its corresponding output AVFrame.
+ *
+ * @par encoding:
+ * Request the encoder to propagate each frame's AVFrame.opaque and
+ * AVFrame.opaque_ref values to its corresponding output AVPacket.
+ *
+ * @par
+ * May only be set on encoders that have the
+ * @ref AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability flag.
+ *
+ * @note
+ * While in typical cases one input frame produces exactly one output packet
+ * (perhaps after a delay), in general the mapping of frames to packets is
+ * M-to-N, so
+ * - Any number of input frames may be associated with any given output packet.
+ * This includes zero - e.g. some encoders may output packets that carry only
+ * metadata about the whole stream.
+ * - A given input frame may be associated with any number of output packets.
+ * Again this includes zero - e.g. some encoders may drop frames under certain
+ * conditions.
+ * .
+ * This implies that when using this flag, the caller must NOT assume that
+ * - a given input frame's opaques will necessarily appear on some output
+ * packet;
+ * - every output packet will have some non-NULL opaque value.
+ * .
+ * When an output packet contains multiple frames, the opaque values will be
+ * taken from the first of those.
+ *
+ * @note
+ * The converse holds for decoders, with frames and packets switched.
+ */
+#define AV_CODEC_FLAG_COPY_OPAQUE (1 << 7)
+/**
+ * Signal to the encoder that the values of AVFrame.duration are valid and
+ * should be used (typically for transferring them to output packets).
+ *
+ * If this flag is not set, frame durations are ignored.
+ */
+#define AV_CODEC_FLAG_FRAME_DURATION (1 << 8)
+/**
+ * Use internal 2pass ratecontrol in first pass mode.
+ */
+#define AV_CODEC_FLAG_PASS1 (1 << 9)
+/**
+ * Use internal 2pass ratecontrol in second pass mode.
+ */
+#define AV_CODEC_FLAG_PASS2 (1 << 10)
+/**
+ * loop filter.
+ */
+#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11)
+/**
+ * Only decode/encode grayscale.
+ */
+#define AV_CODEC_FLAG_GRAY (1 << 13)
+/**
+ * error[?] variables will be set during encoding.
+ */
+#define AV_CODEC_FLAG_PSNR (1 << 15)
+/**
+ * Use interlaced DCT.
+ */
+#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18)
+/**
+ * Force low delay.
+ */
+#define AV_CODEC_FLAG_LOW_DELAY (1 << 19)
+/**
+ * Place global headers in extradata instead of every keyframe.
+ */
+#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22)
+/**
+ * Use only bitexact stuff (except (I)DCT).
+ */
+#define AV_CODEC_FLAG_BITEXACT (1 << 23)
+/* Fx : Flag for H.263+ extra options */
+/**
+ * H.263 advanced intra coding / MPEG-4 AC prediction
+ */
+#define AV_CODEC_FLAG_AC_PRED (1 << 24)
+/**
+ * interlaced motion estimation
+ */
+#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29)
+#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31)
+
+/**
+ * Allow non spec compliant speedup tricks.
+ */
+#define AV_CODEC_FLAG2_FAST (1 << 0)
+/**
+ * Skip bitstream encoding.
+ */
+#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2)
+/**
+ * Place global headers at every keyframe instead of in extradata.
+ */
+#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
+
+/**
+ * Input bitstream might be truncated at a packet boundaries
+ * instead of only at frame boundaries.
+ */
+#define AV_CODEC_FLAG2_CHUNKS (1 << 15)
+/**
+ * Discard cropping information from SPS.
+ */
+#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16)
+
+/**
+ * Show all frames before the first keyframe
+ */
+#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22)
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28)
+/**
+ * Do not skip samples and export skip information as frame side data
+ */
+#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29)
+/**
+ * Do not reset ASS ReadOrder field on flush (subtitles decoding)
+ */
+#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30)
+/**
+ * Generate/parse ICC profiles on encode/decode, as appropriate for the type of
+ * file. No effect on codecs which cannot contain embedded ICC profiles, or
+ * when compiled without support for lcms2.
+ */
+#define AV_CODEC_FLAG2_ICC_PROFILES (1U << 31)
+
+/* Exported side data.
+ These flags can be passed in AVCodecContext.export_side_data before
+ initialization.
+*/
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_EXPORT_DATA_MVS (1 << 0)
+/**
+ * Export encoder Producer Reference Time through packet side data
+ */
+#define AV_CODEC_EXPORT_DATA_PRFT (1 << 1)
+/**
+ * Decoding only.
+ * Export the AVVideoEncParams structure through frame side data.
+ */
+#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS (1 << 2)
+/**
+ * Decoding only.
+ * Do not apply film grain, export it instead.
+ */
+#define AV_CODEC_EXPORT_DATA_FILM_GRAIN (1 << 3)
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * The encoder will keep a reference to the packet and may reuse it later.
+ */
+#define AV_GET_ENCODE_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * You can use AVOptions (av_opt* / av_set/get*()) to access these fields from
+ * user applications. The name string for AVOptions options matches the
+ * associated command line parameter name and can be found in
+ * libavcodec/options_table.h The AVOption/command line parameter names differ
+ * in some cases from the C structure field names for historic reasons or
+ * brevity. sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass* av_class;
+ int log_level_offset;
+
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ const struct AVCodec* codec;
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify
+ * the codec. If there are multiple such fields in a container then the
+ * demuxer should choose the one which maximizes the information about the
+ * used codec. If the codec tag field in a container is larger than 32 bits
+ * then the demuxer should remap the longer ID to 32 bits with a table or
+ * other structure. Alternatively a new extra_codec_tag + size could be added
+ * but for this a clear advantage must be demonstrated first.
+ * - encoding: Set by user, if not then the default based on codec_id will be
+ * used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec
+ * during init.
+ */
+ unsigned int codec_tag;
+
+ void* priv_data;
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal* internal;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void* opaque;
+
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by user, may be overwritten by libavcodec
+ * if this info is available in the stream
+ */
+ int64_t bit_rate;
+
+ /**
+ * AV_CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * AV_CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * MJPEG: Huffman tables
+ * rv10: additional flags
+ * MPEG-4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid problems if it is read with the bitstream
+ * reader. The bytewise contents of extradata must not depend on the
+ * architecture or CPU endianness. Must be allocated with the av_malloc()
+ * family of functions.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t* extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * This often, but not always is the inverse of the frame rate or field rate
+ * for video. 1/time_base is not the average frame rate if the frame rate is
+ * not constant.
+ *
+ * Like containers, elementary streams also can store timestamps, 1/time_base
+ * is the unit in which these timestamps are specified.
+ * As example of such codec time base see ISO/IEC 14496-2:2001(E)
+ * vop_time_increment_resolution and fixed_vop_rate
+ * (fixed_vop_rate == 0 implies that it is different from the framerate)
+ *
+ * - encoding: MUST be set by user.
+ * - decoding: unused.
+ */
+ AVRational time_base;
+
+ /**
+ * Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
+ * - encoding: unused.
+ * - decoding: set by user.
+ */
+ AVRational pkt_timebase;
+
+ /**
+ * - decoding: For codecs that store a framerate value in the compressed
+ * bitstream, the decoder may export it here. { 0, 1} when
+ * unknown.
+ * - encoding: May be used to signal the framerate of CFR content to an
+ * encoder.
+ */
+ AVRational framerate;
+
+#if FF_API_TICKS_PER_FRAME
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame
+ * rate. Most notably, H.264 and MPEG-2 specify time_base as half of frame
+ * duration if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it
+ * to 2.
+ *
+ * @deprecated
+ * - decoding: Use AVCodecDescriptor.props & AV_CODEC_PROP_FIELDS
+ * - encoding: Set AVCodecContext.framerate instead
+ *
+ */
+ attribute_deprecated int ticks_per_frame;
+#endif
+
+ /**
+ * Codec delay.
+ *
+ * Encoding: Number of frames delay there will be from the encoder input to
+ * the decoder output. (we assume the decoder matches the spec)
+ * Decoding: Number of frames delay in addition to what a standard decoder
+ * as specified in the spec would produce.
+ *
+ * Video:
+ * Number of frames the decoded output will be delayed relative to the
+ * encoded input.
+ *
+ * Audio:
+ * For encoding, this field is unused (see initial_padding).
+ *
+ * For decoding, this is the number of samples the decoder needs to
+ * output before the decoder's output is valid. When seeking, you should
+ * start decoding this many samples prior to your desired seek point.
+ *
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int delay;
+
+ /* video only */
+ /**
+ * picture width / height.
+ *
+ * @note Those fields may not match the values of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: MUST be set by user.
+ * - decoding: May be set by the user before opening the decoder if known e.g.
+ * from the container. Some decoders will require the dimensions
+ * to be set by the caller. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int width, height;
+
+ /**
+ * Bitstream width / height, may be different from width/height e.g. when
+ * the decoded frame is cropped before being output or lowres is enabled.
+ *
+ * @note Those field may not match the value of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: unused
+ * - decoding: May be set by the user before opening the decoder if known
+ * e.g. from the container. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int coded_width, coded_height;
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for
+ * some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Pixel format, see AV_PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overridden by the decoder if it knows better.
+ *
+ * @note This field may not match the value of the last
+ * AVFrame output by avcodec_receive_frame() due frame
+ * reordering.
+ *
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec while
+ * parsing the data.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
+ * - encoding: unused.
+ * - decoding: Set by libavcodec before calling get_format()
+ */
+ enum AVPixelFormat sw_pix_fmt;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user to override the default output color range value,
+ * If not specified, libavcodec sets the color range depending on the
+ * output format.
+ * - decoding: Set by libavcodec, can be set by the user to propagate the
+ * color range to components reading from the decoder context.
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by user.
+ */
+ enum AVFieldOrder field_order;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER \
+ 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD \
+ 0x0002 ///< allow draw_horiz_band() with field slices (MPEG-2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE \
+ 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be
+ * read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext* s, const AVFrame* src,
+ int offset[AV_NUM_DATA_POINTERS], int y, int type,
+ int height);
+
+ /**
+ * Callback to negotiate the pixel format. Decoding only, may be set by the
+ * caller before avcodec_open2().
+ *
+ * Called by some decoders to select the pixel format that will be used for
+ * the output frames. This is mainly used to set up hardware acceleration,
+ * then the provided format list contains the corresponding hwaccel pixel
+ * formats alongside the "software" one. The software pixel format may also
+ * be retrieved from \ref sw_pix_fmt.
+ *
+ * This callback will be called when the coded frame properties (such as
+ * resolution, pixel format, etc.) change and more than one output format is
+ * supported for those new properties. If a hardware pixel format is chosen
+ * and initialization for it fails, the callback may be called again
+ * immediately.
+ *
+ * This callback may be called from different threads if the decoder is
+ * multi-threaded, but not from more than one thread simultaneously.
+ *
+ * @param fmt list of formats which may be used in the current
+ * configuration, terminated by AV_PIX_FMT_NONE.
+ * @warning Behavior is undefined if the callback returns a value other
+ * than one of the formats in fmt or AV_PIX_FMT_NONE.
+ * @return the chosen format or AV_PIX_FMT_NONE
+ */
+ enum AVPixelFormat (*get_format)(struct AVCodecContext* s,
+ const enum AVPixelFormat* fmt);
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q=
+ * lastp_q*factor+offset). If < 0 then normal ratecontrol will be done (q=
+ * -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * qscale factor between P- and I-frames
+ * If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor +
+ * offset). If < 0 then normal ratecontrol will be done (q=
+ * -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * noise vs. sse weight for the nsse comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_MEDIAN_SAD 15
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * Must be allocated with the av_malloc() family of functions, and will be
+ * freed in avcodec_free_context().
+ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
+ * - decoding: Set/allocated/freed by libavcodec.
+ */
+ uint16_t* intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * Must be allocated with the av_malloc() family of functions, and will be
+ * freed in avcodec_free_context().
+ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
+ * - decoding: Set/allocated/freed by libavcodec.
+ */
+ uint16_t* inter_matrix;
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: unused.
+ */
+ uint16_t* chroma_intra_matrix;
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec
+ */
+ int intra_dc_precision;
+
+ /**
+ * minimum MB Lagrange multiplier
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB Lagrange multiplier
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /**
+ * Audio channel layout.
+ * - encoding: must be set by the caller, to one of AVCodec.ch_layouts.
+ * - decoding: may be set by the caller if known e.g. from the container.
+ * The decoder can then override during decoding as needed.
+ */
+ AVChannelLayout ch_layout;
+
+ /* The following data should not be initialized. */
+ /**
+ * Number of samples per channel in an audio frame.
+ *
+ * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+ * except the last must contain exactly frame_size samples per channel.
+ * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then
+ * the frame size is not restricted.
+ * - decoding: may be set by some decoders to indicate constant frame size
+ */
+ int frame_size;
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * desired sample format
+ * - encoding: Not used.
+ * - decoding: Set by user.
+ * Decoder will decode to this format if it can.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+ /**
+ * Audio only. The number of "priming" samples (padding) inserted by the
+ * encoder at the beginning of the audio. I.e. this number of leading
+ * decoded samples must be discarded by the caller to get the original audio
+ * without leading padding.
+ *
+ * - decoding: unused
+ * - encoding: Set by libavcodec. The timestamps on the output packets are
+ * adjusted by the encoder so that they always refer to the
+ * first sample of the data actually contained in the packet,
+ * including any added padding. E.g. if the timebase is
+ * 1/samplerate and the timestamp of the first input sample is
+ * 0, the timestamp of the first output packet will be
+ * -initial_padding.
+ */
+ int initial_padding;
+
+ /**
+ * Audio only. The amount of padding (in samples) appended by the encoder to
+ * the end of the audio. I.e. this number of decoded samples must be
+ * discarded by the caller from the end of the stream to get the original
+ * audio without any trailing padding.
+ *
+ * - decoding: unused
+ * - encoding: unused
+ */
+ int trailing_padding;
+
+ /**
+ * Number of samples to skip after a discontinuity
+ * - decoding: unused
+ * - encoding: set by libavcodec
+ */
+ int seek_preroll;
+
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. What
+ * this means is, you may set however many entries in buf[] you feel
+ * necessary. Each buffer must be reference-counted using the AVBuffer API
+ * (see description of buf[] below).
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise extended_data must point to data
+ * - buf[] must contain one or more pointers to AVBufferRef structures. Each
+ * of the frame's data and extended_data pointers must be contained in these.
+ * That is, one AVBufferRef for each allocated chunk of memory, not
+ * necessarily one AVBufferRef per data[] entry. See: av_buffer_create(),
+ * av_buffer_alloc(), and av_buffer_ref().
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * Some decoders do not support linesizes changing between frames.
+ *
+ * If frame multithreading is used, this callback may be called from a
+ * different thread, but not from more than one at once. Does not need to be
+ * reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext* s, AVFrame* frame, int flags);
+
+ /* - encoding parameters */
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ float qcompress; ///< amount of qscale change between easy & hard scenes
+ ///< (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: May be set by libavcodec.
+ */
+ int rc_buffer_size;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ int rc_override_count;
+ RcOverride* rc_override;
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: Set by user, may be overwritten by libavcodec.
+ */
+ int64_t rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t rc_min_rate;
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without
+ * an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to
+ * prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding
+ * starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char* stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char* stats_in;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected
+ * automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS \
+ 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+#define FF_BUG_IEDGE 32768
+
+ /**
+ * strictly follow the standard (MPEG-4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ * This may only be set to one of the FF_COMPLIANCE_* values in defs.h.
+ */
+ int strict_std_compliance;
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+#define FF_EC_FAVOR_INTER 256
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+#define FF_DEBUG_GREEN_MD 0x00800000
+#define FF_DEBUG_NOMC 0x01000000
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * This is a bitfield of the AV_EF_* values defined in defs.h.
+ *
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ const struct AVHWAccel* hwaccel;
+
+ /**
+ * Legacy hardware accelerator context.
+ *
+ * For some hardware acceleration methods, the caller may use this field to
+ * signal hwaccel-specific data to the codec. The struct pointed to by this
+ * pointer is hwaccel-dependent and defined in the respective header. Please
+ * refer to the FFmpeg HW accelerator documentation to know how to fill
+ * this.
+ *
+ * In most cases this field is optional - the necessary information may also
+ * be provided to libavcodec through @ref hw_frames_ctx or @ref
+ * hw_device_ctx (see avcodec_get_hw_config()). However, in some cases it
+ * may be the only method of signalling some (optional) information.
+ *
+ * The struct and its contents are owned by the caller.
+ *
+ * - encoding: May be set by the caller before avcodec_open2(). Must remain
+ * valid until avcodec_free_context().
+ * - decoding: May be set by the caller in the get_format() callback.
+ * Must remain valid until the next get_format() call,
+ * or avcodec_free_context() (whichever comes first).
+ */
+ void* hwaccel_context;
+
+ /**
+ * A reference to the AVHWFramesContext describing the input (for encoding)
+ * or output (decoding) frames. The reference is set by the caller and
+ * afterwards owned (and freed) by libavcodec - it should never be read by
+ * the caller after being set.
+ *
+ * - decoding: This field should be set by the caller from the get_format()
+ * callback. The previous reference (if any) will always be
+ * unreffed by libavcodec before the get_format() call.
+ *
+ * If the default get_buffer2() is used with a hwaccel pixel
+ * format, then this AVHWFramesContext will be used for
+ * allocating the frame buffers.
+ *
+ * - encoding: For hardware encoders configured to use a hwaccel pixel
+ * format, this field should be set by the caller to a reference
+ * to the AVHWFramesContext describing input frames.
+ * AVHWFramesContext.format must be equal to
+ * AVCodecContext.pix_fmt.
+ *
+ * This field should be set before avcodec_open2() is called.
+ */
+ AVBufferRef* hw_frames_ctx;
+
+ /**
+ * A reference to the AVHWDeviceContext describing the device which will
+ * be used by a hardware encoder/decoder. The reference is set by the
+ * caller and afterwards owned (and freed) by libavcodec.
+ *
+ * This should be used if either the codec device does not require
+ * hardware frames or any that are used are to be allocated internally by
+ * libavcodec. If the user wishes to supply any of the frames used as
+ * encoder input or decoder output then hw_frames_ctx should be used
+ * instead. When hw_frames_ctx is set in get_format() for a decoder, this
+ * field will be ignored while decoding the associated stream segment, but
+ * may again be used on a following one after another get_format() call.
+ *
+ * For both encoders and decoders this field should be set before
+ * avcodec_open2() is called and must not be written to thereafter.
+ *
+ * Note that some decoders may require this field to be set initially in
+ * order to support hw_frames_ctx at all - in that case, all frames
+ * contexts used must be created on the same device.
+ */
+ AVBufferRef* hw_device_ctx;
+
+ /**
+ * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated
+ * decoding (if active).
+ * - encoding: unused
+ * - decoding: Set by user (either before avcodec_open2(), or in the
+ * AVCodecContext.get_format callback)
+ */
+ int hwaccel_flags;
+
+ /**
+ * Video decoding only. Sets the number of extra hardware frames which
+ * the decoder will allocate for use by the caller. This must be set
+ * before avcodec_open2() is called.
+ *
+ * Some hardware decoders require all frames that they will use for
+ * output to be defined in advance before decoding starts. For such
+ * decoders, the hardware frame pool must therefore be of a fixed size.
+ * The extra frames set here are on top of any number that the decoder
+ * needs internally in order to operate normally (for example, frames
+ * used as reference pictures).
+ */
+ int extra_hw_frames;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#define FF_IDCT_SIMPLEARM 10
+#define FF_IDCT_XVID 14
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_SIMPLENEON 22
+#define FF_IDCT_SIMPLEAUTO 128
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per
+ * thread, so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE \
+ 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext* c,
+ int (*func)(struct AVCodecContext* c2, void* arg), void* arg2,
+ int* ret, int count, int size);
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count"
+ * values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to
+ * count-1. threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS
+ * and so that no two instances of func executing at the same time will have
+ * the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement
+ * where when any call to func returns < 0 no further calls to func may be
+ * done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext* c,
+ int (*func)(struct AVCodecContext* c2, void* arg, int jobnr,
+ int threadnr),
+ void* arg2, int* ret, int count);
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ * See the AV_PROFILE_* defines in defs.h.
+ */
+ int profile;
+#if FF_API_FF_PROFILE_LEVEL
+ /** @deprecated The following defines are deprecated; use AV_PROFILE_*
+ * in defs.h instead. */
+# define FF_PROFILE_UNKNOWN -99
+# define FF_PROFILE_RESERVED -100
+
+# define FF_PROFILE_AAC_MAIN 0
+# define FF_PROFILE_AAC_LOW 1
+# define FF_PROFILE_AAC_SSR 2
+# define FF_PROFILE_AAC_LTP 3
+# define FF_PROFILE_AAC_HE 4
+# define FF_PROFILE_AAC_HE_V2 28
+# define FF_PROFILE_AAC_LD 22
+# define FF_PROFILE_AAC_ELD 38
+# define FF_PROFILE_MPEG2_AAC_LOW 128
+# define FF_PROFILE_MPEG2_AAC_HE 131
+
+# define FF_PROFILE_DNXHD 0
+# define FF_PROFILE_DNXHR_LB 1
+# define FF_PROFILE_DNXHR_SQ 2
+# define FF_PROFILE_DNXHR_HQ 3
+# define FF_PROFILE_DNXHR_HQX 4
+# define FF_PROFILE_DNXHR_444 5
+
+# define FF_PROFILE_DTS 20
+# define FF_PROFILE_DTS_ES 30
+# define FF_PROFILE_DTS_96_24 40
+# define FF_PROFILE_DTS_HD_HRA 50
+# define FF_PROFILE_DTS_HD_MA 60
+# define FF_PROFILE_DTS_EXPRESS 70
+# define FF_PROFILE_DTS_HD_MA_X 61
+# define FF_PROFILE_DTS_HD_MA_X_IMAX 62
+
+# define FF_PROFILE_EAC3_DDP_ATMOS 30
+
+# define FF_PROFILE_TRUEHD_ATMOS 30
+
+# define FF_PROFILE_MPEG2_422 0
+# define FF_PROFILE_MPEG2_HIGH 1
+# define FF_PROFILE_MPEG2_SS 2
+# define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+# define FF_PROFILE_MPEG2_MAIN 4
+# define FF_PROFILE_MPEG2_SIMPLE 5
+
+# define FF_PROFILE_H264_CONSTRAINED (1 << 9) // 8+1; constraint_set1_flag
+# define FF_PROFILE_H264_INTRA (1 << 11) // 8+3; constraint_set3_flag
+
+# define FF_PROFILE_H264_BASELINE 66
+# define FF_PROFILE_H264_CONSTRAINED_BASELINE \
+ (66 | FF_PROFILE_H264_CONSTRAINED)
+# define FF_PROFILE_H264_MAIN 77
+# define FF_PROFILE_H264_EXTENDED 88
+# define FF_PROFILE_H264_HIGH 100
+# define FF_PROFILE_H264_HIGH_10 110
+# define FF_PROFILE_H264_HIGH_10_INTRA (110 | FF_PROFILE_H264_INTRA)
+# define FF_PROFILE_H264_MULTIVIEW_HIGH 118
+# define FF_PROFILE_H264_HIGH_422 122
+# define FF_PROFILE_H264_HIGH_422_INTRA (122 | FF_PROFILE_H264_INTRA)
+# define FF_PROFILE_H264_STEREO_HIGH 128
+# define FF_PROFILE_H264_HIGH_444 144
+# define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+# define FF_PROFILE_H264_HIGH_444_INTRA (244 | FF_PROFILE_H264_INTRA)
+# define FF_PROFILE_H264_CAVLC_444 44
+
+# define FF_PROFILE_VC1_SIMPLE 0
+# define FF_PROFILE_VC1_MAIN 1
+# define FF_PROFILE_VC1_COMPLEX 2
+# define FF_PROFILE_VC1_ADVANCED 3
+
+# define FF_PROFILE_MPEG4_SIMPLE 0
+# define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+# define FF_PROFILE_MPEG4_CORE 2
+# define FF_PROFILE_MPEG4_MAIN 3
+# define FF_PROFILE_MPEG4_N_BIT 4
+# define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+# define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+# define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+# define FF_PROFILE_MPEG4_HYBRID 8
+# define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+# define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+# define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+# define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+# define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+# define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+# define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+# define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1
+# define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2
+# define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768
+# define FF_PROFILE_JPEG2000_DCINEMA_2K 3
+# define FF_PROFILE_JPEG2000_DCINEMA_4K 4
+
+# define FF_PROFILE_VP9_0 0
+# define FF_PROFILE_VP9_1 1
+# define FF_PROFILE_VP9_2 2
+# define FF_PROFILE_VP9_3 3
+
+# define FF_PROFILE_HEVC_MAIN 1
+# define FF_PROFILE_HEVC_MAIN_10 2
+# define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+# define FF_PROFILE_HEVC_REXT 4
+# define FF_PROFILE_HEVC_SCC 9
+
+# define FF_PROFILE_VVC_MAIN_10 1
+# define FF_PROFILE_VVC_MAIN_10_444 33
+
+# define FF_PROFILE_AV1_MAIN 0
+# define FF_PROFILE_AV1_HIGH 1
+# define FF_PROFILE_AV1_PROFESSIONAL 2
+
+# define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0
+# define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
+# define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2
+# define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3
+# define FF_PROFILE_MJPEG_JPEG_LS 0xf7
+
+# define FF_PROFILE_SBC_MSBC 1
+
+# define FF_PROFILE_PRORES_PROXY 0
+# define FF_PROFILE_PRORES_LT 1
+# define FF_PROFILE_PRORES_STANDARD 2
+# define FF_PROFILE_PRORES_HQ 3
+# define FF_PROFILE_PRORES_4444 4
+# define FF_PROFILE_PRORES_XQ 5
+
+# define FF_PROFILE_ARIB_PROFILE_A 0
+# define FF_PROFILE_ARIB_PROFILE_C 1
+
+# define FF_PROFILE_KLVA_SYNC 0
+# define FF_PROFILE_KLVA_ASYNC 1
+
+# define FF_PROFILE_EVC_BASELINE 0
+# define FF_PROFILE_EVC_MAIN 1
+#endif
+
+ /**
+ * Encoding level descriptor.
+ * - encoding: Set by user, corresponds to a specific level defined by the
+ * codec, usually corresponding to the profile level, if not specified it
+ * is set to FF_LEVEL_UNKNOWN.
+ * - decoding: Set by libavcodec.
+ * See AV_LEVEL_* in defs.h.
+ */
+ int level;
+#if FF_API_FF_PROFILE_LEVEL
+ /** @deprecated The following define is deprecated; use AV_LEVEL_UNKOWN
+ * in defs.h instead. */
+# define FF_LEVEL_UNKNOWN -99
+#endif
+
+ /**
+ * Properties of the stream that gets decoded
+ * - encoding: unused
+ * - decoding: set by libavcodec
+ */
+ unsigned properties;
+#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001
+#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
+#define FF_CODEC_PROPERTY_FILM_GRAIN 0x00000004
+
+ /**
+ * Skip loop filtering for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ * Skip IDCT/dequantization for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ * Skip decoding for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ * Skip processing alpha if supported by codec.
+ * Note that if the format uses pre-multiplied alpha (common with VP6,
+ * and recommended due to better video quality/compression)
+ * the image will look as if alpha-blended onto a black background.
+ * However for formats that do not use pre-multiplied alpha
+ * there might be serious artefacts (though e.g. libswscale currently
+ * assumes pre-multiplied alpha anyway).
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int skip_alpha;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int lowres;
+
+ /**
+ * AVCodecDescriptor
+ * - encoding: unused.
+ * - decoding: set by libavcodec.
+ */
+ const struct AVCodecDescriptor* codec_descriptor;
+
+ /**
+ * Character encoding of the input subtitles file.
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ char* sub_charenc;
+
+ /**
+ * Subtitles character encoding mode. Formats or codecs might be adjusting
+ * this setting (if they are doing the conversion themselves for instance).
+ * - decoding: set by libavcodec
+ * - encoding: unused
+ */
+ int sub_charenc_mode;
+#define FF_SUB_CHARENC_MODE_DO_NOTHING \
+ -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8,
+ ///< or the codec is bitmap for instance)
+#define FF_SUB_CHARENC_MODE_AUTOMATIC \
+ 0 ///< libavcodec will select the mode itself
+#define FF_SUB_CHARENC_MODE_PRE_DECODER \
+ 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the
+ ///< decoder, requires iconv
+#define FF_SUB_CHARENC_MODE_IGNORE \
+ 2 ///< neither convert the subtitles, nor check them for valid UTF-8
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ int subtitle_header_size;
+ uint8_t* subtitle_header;
+
+ /**
+ * dump format separator.
+ * can be ", " or "\n " or anything else
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ uint8_t* dump_separator;
+
+ /**
+ * ',' separated list of allowed decoders.
+ * If NULL then all are allowed
+ * - encoding: unused
+ * - decoding: set by user
+ */
+ char* codec_whitelist;
+
+ /**
+ * Additional data associated with the entire coded stream.
+ *
+ * - decoding: may be set by user before calling avcodec_open2().
+ * - encoding: may be set by libavcodec after avcodec_open2().
+ */
+ AVPacketSideData* coded_side_data;
+ int nb_coded_side_data;
+
+ /**
+ * Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of
+ * metadata exported in frame, packet, or coded stream side data by
+ * decoders and encoders.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int export_side_data;
+
+ /**
+ * The number of pixels per image to maximally accept.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int64_t max_pixels;
+
+ /**
+ * Video decoding only. Certain video codecs support cropping, meaning that
+ * only a sub-rectangle of the decoded frame is intended for display. This
+ * option controls how cropping is handled by libavcodec.
+ *
+ * When set to 1 (the default), libavcodec will apply cropping internally.
+ * I.e. it will modify the output frame width/height fields and offset the
+ * data pointers (only by as much as possible while preserving alignment, or
+ * by the full amount if the AV_CODEC_FLAG_UNALIGNED flag is set) so that
+ * the frames output by the decoder refer only to the cropped area. The
+ * crop_* fields of the output frames will be zero.
+ *
+ * When set to 0, the width/height fields of the output frames will be set
+ * to the coded dimensions and the crop_* fields will describe the cropping
+ * rectangle. Applying the cropping is left to the caller.
+ *
+ * @warning When hardware acceleration with opaque output frames is used,
+ * libavcodec is unable to apply cropping from the top/left border.
+ *
+ * @note when this option is set to zero, the width/height fields of the
+ * AVCodecContext and output AVFrames have different meanings. The codec
+ * context fields store display dimensions (with the coded dimensions in
+ * coded_width/height), while the frame fields store the coded dimensions
+ * (with the display dimensions being determined by the crop_* fields).
+ */
+ int apply_cropping;
+
+ /**
+ * The percentage of damaged samples to discard a frame.
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int discard_damaged_percentage;
+
+ /**
+ * The number of samples per frame to maximally accept.
+ *
+ * - decoding: set by user
+ * - encoding: set by user
+ */
+ int64_t max_samples;
+
+ /**
+ * This callback is called at the beginning of each packet to get a data
+ * buffer for it.
+ *
+ * The following field will be set in the packet before this callback is
+ * called:
+ * - size
+ * This callback must use the above value to calculate the required buffer
+ * size, which must padded by at least AV_INPUT_BUFFER_PADDING_SIZE bytes.
+ *
+ * In some specific cases, the encoder may not use the entire buffer allocated
+ * by this callback. This will be reflected in the size value in the packet
+ * once returned by avcodec_receive_packet().
+ *
+ * This callback must fill the following fields in the packet:
+ * - data: alignment requirements for AVPacket apply, if any. Some
+ * architectures and encoders may benefit from having aligned data.
+ * - buf: must contain a pointer to an AVBufferRef structure. The packet's
+ * data pointer must be contained in it. See: av_buffer_create(),
+ * av_buffer_alloc(), and av_buffer_ref().
+ *
+ * If AV_CODEC_CAP_DR1 is not set then get_encode_buffer() must call
+ * avcodec_default_get_encode_buffer() instead of providing a buffer allocated
+ * by some other means.
+ *
+ * The flags field may contain a combination of AV_GET_ENCODE_BUFFER_FLAG_
+ * flags. They may be used for example to hint what use the buffer may get
+ * after being created. Implementations of this callback may ignore flags they
+ * don't understand. If AV_GET_ENCODE_BUFFER_FLAG_REF is set in flags then the
+ * packet may be reused (read and/or written to if it is writable) later by
+ * libavcodec.
+ *
+ * This callback must be thread-safe, as when frame threading is used, it may
+ * be called from multiple threads simultaneously.
+ *
+ * @see avcodec_default_get_encode_buffer()
+ *
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: unused
+ */
+ int (*get_encode_buffer)(struct AVCodecContext* s, AVPacket* pkt, int flags);
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int64_t frame_num;
+
+ /**
+ * Decoding only. May be set by the caller before avcodec_open2() to an
+ * av_malloc()'ed array (or via AVOptions). Owned and freed by the decoder
+ * afterwards.
+ *
+ * Side data attached to decoded frames may come from several sources:
+ * 1. coded_side_data, which the decoder will for certain types translate
+ * from packet-type to frame-type and attach to frames;
+ * 2. side data attached to an AVPacket sent for decoding (same
+ * considerations as above);
+ * 3. extracted from the coded bytestream.
+ * The first two cases are supplied by the caller and typically come from a
+ * container.
+ *
+ * This array configures decoder behaviour in cases when side data of the
+ * same type is present both in the coded bytestream and in the
+ * user-supplied side data (items 1. and 2. above). In all cases, at most
+ * one instance of each side data type will be attached to output frames. By
+ * default it will be the bytestream side data. Adding an
+ * AVPacketSideDataType value to this array will flip the preference for
+ * this type, thus making the decoder prefer user-supplied side data over
+ * bytestream. In case side data of the same type is present both in
+ * coded_data and attacked to a packet, the packet instance always has
+ * priority.
+ *
+ * The array may also contain a single -1, in which case the preference is
+ * switched for all side data types.
+ */
+ int* side_data_prefer_packet;
+ /**
+ * Number of entries in side_data_prefer_packet.
+ */
+ unsigned nb_side_data_prefer_packet;
+
+ /**
+ * Array containing static side data, such as HDR10 CLL / MDCV structures.
+ * Side data entries should be allocated by usage of helpers defined in
+ * libavutil/frame.h.
+ *
+ * - encoding: may be set by user before calling avcodec_open2() for
+ * encoder configuration. Afterwards owned and freed by the
+ * encoder.
+ * - decoding: unused
+ */
+ AVFrameSideData** decoded_side_data;
+ int nb_decoded_side_data;
+} AVCodecContext;
+
+/**
+ * @defgroup lavc_hwaccel AVHWAccel
+ *
+ * @note Nothing in this structure should be accessed by the user. At some
+ * point in future it will not be externally visible at all.
+ *
+ * @{
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char* name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See AV_CODEC_ID_xxx
+ */
+ enum AVCodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see AV_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+} AVHWAccel;
+
+/**
+ * HWAccel is experimental and is thus avoided in favor of non experimental
+ * codecs
+ */
+#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200
+
+/**
+ * Hardware acceleration should be used for decoding even if the codec level
+ * used is unknown or higher than the maximum supported level reported by the
+ * hardware driver.
+ *
+ * It's generally a good idea to pass this flag unless you have a specific
+ * reason not to, as hardware tends to under-report supported levels.
+ */
+#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)
+
+/**
+ * Hardware acceleration can output YUV pixel formats with a different chroma
+ * sampling than 4:2:0 and/or other than 8 bits per component.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1)
+
+/**
+ * Hardware acceleration should still be attempted for decoding when the
+ * codec profile does not match the reported capabilities of the hardware.
+ *
+ * For example, this can be used to try to decode baseline profile H.264
+ * streams in hardware - it will often succeed, because many streams marked
+ * as baseline profile actually conform to constrained baseline profile.
+ *
+ * @warning If the stream is actually not supported then the behaviour is
+ * undefined, and may include returning entirely incorrect output
+ * while indicating success.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2)
+
+/**
+ * Some hardware decoders (namely nvdec) can either output direct decoder
+ * surfaces, or make an on-device copy and return said copy.
+ * There is a hard limit on how many decoder surfaces there can be, and it
+ * cannot be accurately guessed ahead of time.
+ * For some processing chains, this can be okay, but others will run into the
+ * limit and in turn produce very confusing errors that require fine tuning of
+ * more or less obscure options by the user, or in extreme cases cannot be
+ * resolved at all without inserting an avfilter that forces a copy.
+ *
+ * Thus, the hwaccel will by default make a copy for safety and resilience.
+ * If a users really wants to minimize the amount of copies, they can set this
+ * flag and ensure their processing chain does not exhaust the surface pool.
+ */
+#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
+
+/**
+ * @}
+ */
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * Can be set for text/ass as well once they are rendered.
+ */
+ uint8_t* data[4];
+ int linesize[4];
+
+ int flags;
+ enum AVSubtitleType type;
+
+ char* text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The presentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char* ass;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect** rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char* avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char* avcodec_license(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct should be freed with avcodec_free_context().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ */
+AVCodecContext* avcodec_alloc_context3(const AVCodec* codec);
+
+/**
+ * Free the codec context and everything associated with it and write NULL to
+ * the provided pointer.
+ */
+void avcodec_free_context(AVCodecContext** avctx);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass* avcodec_get_class(void);
+
+/**
+ * Get the AVClass for AVSubtitleRect. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass* avcodec_get_subtitle_rect_class(void);
+
+/**
+ * Fill the parameters struct based on the values from the supplied codec
+ * context. Any allocated fields in par are freed and replaced with duplicates
+ * of the corresponding fields in codec.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure
+ */
+int avcodec_parameters_from_context(struct AVCodecParameters* par,
+ const AVCodecContext* codec);
+
+/**
+ * Fill the codec context based on the values from the supplied codec
+ * parameters. Any allocated fields in codec that have a corresponding field in
+ * par are freed and replaced with duplicates of the corresponding field in par.
+ * Fields in codec that do not have a counterpart in par are not touched.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure.
+ */
+int avcodec_parameters_to_context(AVCodecContext* codec,
+ const struct AVCodecParameters* par);
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * Depending on the codec, you might need to set options in the codec context
+ * also for decoding (e.g. width, height, or the pixel or audio sample format in
+ * the case the information is not available in the bitstream, as when decoding
+ * raw audio or video).
+ *
+ * Options in the codec context can be set either by setting them in the options
+ * AVDictionary, or by setting the values in the context itself, directly or by
+ * using the av_opt_set() API before calling this function.
+ *
+ * Example:
+ * @code
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * In the case AVCodecParameters are available (e.g. when demuxing a stream
+ * using libavformat, and accessing the AVStream contained in the demuxer), the
+ * codec parameters can be copied to the codec context using
+ * avcodec_parameters_to_context(), as in the following example:
+ *
+ * @code
+ * AVStream *stream = ...;
+ * context = avcodec_alloc_context3(codec);
+ * if (avcodec_parameters_to_context(context, stream->codecpar) < 0)
+ * exit(1);
+ * if (avcodec_open2(context, codec, NULL) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @note Always call this function before using decoding routines (such as
+ * @ref avcodec_receive_frame()).
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * for this context, then this parameter MUST be either NULL or
+ * equal to the previously passed codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private
+ * options, which are set on top of the options already set in
+ * avctx, can be NULL. On return this object will be filled with
+ * options that were not found in the avctx codec context.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(),
+ * avcodec_find_encoder(), av_dict_set(), av_opt_set(), av_opt_find(),
+ * avcodec_parameters_to_context()
+ */
+int avcodec_open2(AVCodecContext* avctx, const AVCodec* codec,
+ AVDictionary** options);
+
+#if FF_API_AVCODEC_CLOSE
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL
+ * codec. Subsequent calls will do nothing.
+ *
+ * @deprecated Do not use this function. Use avcodec_free_context() to destroy a
+ * codec context (either open or closed). Opening and closing a codec context
+ * multiple times is not supported anymore -- use multiple codec contexts
+ * instead.
+ */
+attribute_deprecated int avcodec_close(AVCodecContext* avctx);
+#endif
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle* sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext* s, AVFrame* frame, int flags);
+
+/**
+ * The default callback for AVCodecContext.get_encode_buffer(). It is made
+ * public so it can be called by custom get_encode_buffer() implementations for
+ * encoders without AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_encode_buffer(AVCodecContext* s, AVPacket* pkt,
+ int flags);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions(AVCodecContext* s, int* width, int* height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions2(AVCodecContext* s, int* width, int* height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expected to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform
+ * badly due to a potentially very different allocation pattern.
+ *
+ * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between
+ * input and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning subtitles. It is safe to flush even those decoders that are not
+ * marked with AV_CODEC_CAP_DELAY, then no subtitles will be returned.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The preallocated AVSubtitle in which the decoded subtitle
+ * will be stored, must be freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed,
+ * otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext* avctx, AVSubtitle* sub,
+ int* got_sub_ptr, const AVPacket* avpkt);
+
+/**
+ * Supply raw packet data as input to a decoder.
+ *
+ * Internally, this call will copy relevant AVCodecContext fields, which can
+ * influence decoding per-packet, and apply them when the packet is actually
+ * decoded. (For example AVCodecContext.skip_frame, which might direct the
+ * decoder to drop the frame contained by the packet sent with this function.)
+ *
+ * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx codec context
+ * @param[in] avpkt The input AVPacket. Usually, this will be a single video
+ * frame, or several complete audio frames.
+ * Ownership of the packet remains with the caller, and the
+ * decoder will not write to the packet. The decoder may create
+ * a reference to the packet data (or copy it if the packet is
+ * not reference-counted).
+ * Unlike with older APIs, the packet is always fully consumed,
+ * and if it contains multiple frames (e.g. some audio codecs),
+ * will require you to call avcodec_receive_frame() multiple
+ * times afterwards before you can send a new packet.
+ * It can be NULL (or an AVPacket with data set to NULL and
+ * size set to 0); in this case, it is considered a flush
+ * packet, which signals the end of the stream. Sending the
+ * first flush packet will return success. Subsequent ones are
+ * unnecessary and will return AVERROR_EOF. If the decoder
+ * still has frames buffered, it will return them after sending
+ * a flush packet.
+ *
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user
+ * must read output with avcodec_receive_frame() (once
+ * all output is read, the packet should be resent,
+ * and the call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the decoder has been flushed, and no new packets
+ * can be sent to it (also returned if more than 1 flush packet is sent)
+ * @retval AVERROR(EINVAL) codec not opened, it is an encoder, or requires
+ * flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate decoding errors
+ */
+int avcodec_send_packet(AVCodecContext* avctx, const AVPacket* avpkt);
+
+/**
+ * Return decoded output data from a decoder or encoder (when the
+ * @ref AV_CODEC_FLAG_RECON_FRAME flag is used).
+ *
+ * @param avctx codec context
+ * @param frame This will be set to a reference-counted video or audio
+ * frame (depending on the decoder type) allocated by the
+ * codec. Note that the function will always call
+ * av_frame_unref(frame) before doing anything else.
+ *
+ * @retval 0 success, a frame was returned
+ * @retval AVERROR(EAGAIN) output is not available in this state - user must
+ * try to send new input
+ * @retval AVERROR_EOF the codec has been fully flushed, and there will be
+ * no more output frames
+ * @retval AVERROR(EINVAL) codec not opened, or it is an encoder without the
+ * @ref AV_CODEC_FLAG_RECON_FRAME flag enabled
+ * @retval "other negative error code" legitimate decoding errors
+ */
+int avcodec_receive_frame(AVCodecContext* avctx, AVFrame* frame);
+
+/**
+ * Supply a raw video or audio frame to the encoder. Use
+ * avcodec_receive_packet() to retrieve buffered output packets.
+ *
+ * @param avctx codec context
+ * @param[in] frame AVFrame containing the raw audio or video frame to be
+ * encoded. Ownership of the frame remains with the caller, and the encoder will
+ * not write to the frame. The encoder may create a reference to the frame data
+ * (or copy it if the frame is not reference-counted). It can be NULL, in which
+ * case it is considered a flush packet. This signals the end of the stream. If
+ * the encoder still has packets buffered, it will return them after this call.
+ * Once flushing mode has been entered, additional flush packets are ignored,
+ * and sending frames will return AVERROR_EOF.
+ *
+ * For audio:
+ * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If it is not set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames except the last.
+ * The final frame may be smaller than avctx->frame_size.
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user
+ * must read output with avcodec_receive_packet() (once all output is read, the
+ * packet should be resent, and the call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the encoder has been flushed, and no new frames can
+ * be sent to it
+ * @retval AVERROR(EINVAL) codec not opened, it is a decoder, or requires
+ * flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate encoding errors
+ */
+int avcodec_send_frame(AVCodecContext* avctx, const AVFrame* frame);
+
+/**
+ * Read encoded data from the encoder.
+ *
+ * @param avctx codec context
+ * @param avpkt This will be set to a reference-counted packet allocated by the
+ * encoder. Note that the function will always call
+ * av_packet_unref(avpkt) before doing anything else.
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) output is not available in the current state - user
+ * must try to send input
+ * @retval AVERROR_EOF the encoder has been fully flushed, and there will be
+ * no more output packets
+ * @retval AVERROR(EINVAL) codec not opened, or it is a decoder
+ * @retval "another negative error code" legitimate encoding errors
+ */
+int avcodec_receive_packet(AVCodecContext* avctx, AVPacket* avpkt);
+
+/**
+ * Create and return a AVHWFramesContext with values adequate for hardware
+ * decoding. This is meant to get called from the get_format callback, and is
+ * a helper for preparing a AVHWFramesContext for AVCodecContext.hw_frames_ctx.
+ * This API is for decoding with certain hardware acceleration modes/APIs only.
+ *
+ * The returned AVHWFramesContext is not initialized. The caller must do this
+ * with av_hwframe_ctx_init().
+ *
+ * Calling this function is not a requirement, but makes it simpler to avoid
+ * codec or hardware API specific details when manually allocating frames.
+ *
+ * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx,
+ * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes
+ * it unnecessary to call this function or having to care about
+ * AVHWFramesContext initialization at all.
+ *
+ * There are a number of requirements for calling this function:
+ *
+ * - It must be called from get_format with the same avctx parameter that was
+ * passed to get_format. Calling it outside of get_format is not allowed, and
+ * can trigger undefined behavior.
+ * - The function is not always supported (see description of return values).
+ * Even if this function returns successfully, hwaccel initialization could
+ * fail later. (The degree to which implementations check whether the stream
+ * is actually supported varies. Some do this check only after the user's
+ * get_format callback returns.)
+ * - The hw_pix_fmt must be one of the choices suggested by get_format. If the
+ * user decides to use a AVHWFramesContext prepared with this API function,
+ * the user must return the same hw_pix_fmt from get_format.
+ * - The device_ref passed to this function must support the given hw_pix_fmt.
+ * - After calling this API function, it is the user's responsibility to
+ * initialize the AVHWFramesContext (returned by the out_frames_ref
+ * parameter), and to set AVCodecContext.hw_frames_ctx to it. If done, this must
+ * be done before returning from get_format (this is implied by the normal
+ * AVCodecContext.hw_frames_ctx API rules).
+ * - The AVHWFramesContext parameters may change every time time get_format is
+ * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format. So
+ * you are inherently required to go through this process again on every
+ * get_format call.
+ * - It is perfectly possible to call this function without actually using
+ * the resulting AVHWFramesContext. One use-case might be trying to reuse a
+ * previously initialized AVHWFramesContext, and calling this API function
+ * only to test whether the required frame parameters have changed.
+ * - Fields that use dynamically allocated values of any kind must not be set
+ * by the user unless setting them is explicitly allowed by the documentation.
+ * If the user sets AVHWFramesContext.free and AVHWFramesContext.user_opaque,
+ * the new free callback must call the potentially set previous free callback.
+ * This API call may set any dynamically allocated fields, including the free
+ * callback.
+ *
+ * The function will set at least the following fields on AVHWFramesContext
+ * (potentially more, depending on hwaccel API):
+ *
+ * - All fields set by av_hwframe_ctx_alloc().
+ * - Set the format field to hw_pix_fmt.
+ * - Set the sw_format field to the most suited and most versatile format. (An
+ * implication is that this will prefer generic formats over opaque formats
+ * with arbitrary restrictions, if possible.)
+ * - Set the width/height fields to the coded frame size, rounded up to the
+ * API-specific minimum alignment.
+ * - Only _if_ the hwaccel requires a pre-allocated pool: set the
+ * initial_pool_size field to the number of maximum reference surfaces possible
+ * with the codec, plus 1 surface for the user to work (meaning the user can
+ * safely reference at most 1 decoded surface at a time), plus additional
+ * buffering introduced by frame threading. If the hwaccel does not require
+ * pre-allocation, the field is left to 0, and the decoder will allocate new
+ * surfaces on demand during decoding.
+ * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying
+ * hardware API.
+ *
+ * Essentially, out_frames_ref returns the same as av_hwframe_ctx_alloc(), but
+ * with basic frame parameters set.
+ *
+ * The function is stateless, and does not change the AVCodecContext or the
+ * device_ref AVHWDeviceContext.
+ *
+ * @param avctx The context which is currently calling get_format, and which
+ * implicitly contains all state needed for filling the returned
+ * AVHWFramesContext properly.
+ * @param device_ref A reference to the AVHWDeviceContext describing the device
+ * which will be used by the hardware decoder.
+ * @param hw_pix_fmt The hwaccel format you are going to return from get_format.
+ * @param out_frames_ref On success, set to a reference to an _uninitialized_
+ * AVHWFramesContext, created from the given device_ref.
+ * Fields will be set to values required for decoding.
+ * Not changed if an error is returned.
+ * @return zero on success, a negative value on error. The following error codes
+ * have special semantics:
+ * AVERROR(ENOENT): the decoder does not support this functionality. Setup
+ * is always manual, or it is a decoder which does not
+ * support setting AVCodecContext.hw_frames_ctx at all,
+ * or it is a software format.
+ * AVERROR(EINVAL): it is known that hardware decoding is not supported for
+ * this configuration, or the device_ref is not supported
+ * for the hwaccel referenced by hw_pix_fmt.
+ */
+int avcodec_get_hw_frames_parameters(AVCodecContext* avctx,
+ AVBufferRef* device_ref,
+ enum AVPixelFormat hw_pix_fmt,
+ AVBufferRef** out_frames_ref);
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+enum AVPictureStructure {
+ AV_PICTURE_STRUCTURE_UNKNOWN, ///< unknown
+ AV_PICTURE_STRUCTURE_TOP_FIELD, ///< coded as top field
+ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, ///< coded as bottom field
+ AV_PICTURE_STRUCTURE_FRAME, ///< coded as frame
+};
+
+typedef struct AVCodecParserContext {
+ void* priv_data;
+ const struct AVCodecParser* parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+#define PARSER_FLAG_USE_CODEC_TS 0x1000
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+
+ /**
+ * Duration of the current frame.
+ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+ * For all other types, this is in units of AVCodecContext.time_base.
+ */
+ int duration;
+
+ enum AVFieldOrder field_order;
+
+ /**
+ * Indicate whether a picture is coded as a frame, top field or bottom field.
+ *
+ * For example, H.264 field_pic_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
+ * equal to 1 and bottom_field_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_TOP_FIELD.
+ */
+ enum AVPictureStructure picture_structure;
+
+ /**
+ * Picture number incremented in presentation or output order.
+ * This field may be reinitialized at the first picture of a new sequence.
+ *
+ * For example, this corresponds to H.264 PicOrderCnt.
+ */
+ int output_picture_number;
+
+ /**
+ * Dimensions of the decoded video intended for presentation.
+ */
+ int width;
+ int height;
+
+ /**
+ * Dimensions of the coded video.
+ */
+ int coded_width;
+ int coded_height;
+
+ /**
+ * The format of the coded data, corresponds to enum AVPixelFormat for video
+ * and for enum AVSampleFormat for audio.
+ *
+ * Note that a decoder can have considerable freedom in how exactly it
+ * decodes the data, so the format reported here might be different from the
+ * one returned by a decoder.
+ */
+ int format;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[7]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext* s);
+ /* This callback never returns an error, a negative value means that
+ * the frame start was in a previous packet. */
+ int (*parser_parse)(AVCodecParserContext* s, AVCodecContext* avctx,
+ const uint8_t** poutbuf, int* poutbuf_size,
+ const uint8_t* buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext* s);
+ int (*split)(AVCodecContext* avctx, const uint8_t* buf, int buf_size);
+} AVCodecParser;
+
+/**
+ * Iterate over all registered codec parsers.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered codec parser or NULL when the iteration is
+ * finished
+ */
+const AVCodecParser* av_parser_iterate(void** opaque);
+
+AVCodecParserContext* av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet
+ finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet
+ finished.
+ * @param buf input buffer.
+ * @param buf_size buffer size in bytes without the padding. I.e. the full
+ buffer size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE. To signal
+ EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext* s, AVCodecContext* avctx,
+ uint8_t** poutbuf, int* poutbuf_size, const uint8_t* buf,
+ int buf_size, int64_t pts, int64_t dts, int64_t pos);
+
+void av_parser_close(AVCodecParserContext* s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+int avcodec_encode_subtitle(AVCodecContext* avctx, uint8_t* buf, int buf_size,
+ const AVSubtitle* sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches
+ * which of the given pixel formats should be used to suffer the least amount of
+ * loss. The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to
+ * choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses
+ * will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(
+ const enum AVPixelFormat* pix_fmt_list, enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int* loss_ptr);
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext* s,
+ const enum AVPixelFormat* fmt);
+
+/**
+ * @}
+ */
+
+void avcodec_string(char* buf, int buf_size, AVCodecContext* enc, int encode);
+
+int avcodec_default_execute(AVCodecContext* c,
+ int (*func)(AVCodecContext* c2, void* arg2),
+ void* arg, int* ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext* c,
+ int (*func)(AVCodecContext* c2, void* arg2, int,
+ int),
+ void* arg, int* ret, int count);
+// FIXME func typedef
+
+/**
+ * Fill AVFrame audio data and linesize pointers.
+ *
+ * The buffer buf must be a preallocated buffer with a size big enough
+ * to contain the specified samples amount. The filled AVFrame data
+ * pointers will point to this buffer.
+ *
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return >=0 on success, negative error code on failure
+ * @todo return the size in bytes required to store the samples in
+ * case of success, at the next libavutil bump
+ */
+int avcodec_fill_audio_frame(AVFrame* frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t* buf,
+ int buf_size, int align);
+
+/**
+ * Reset the internal codec state / flush internal buffers. Should be called
+ * e.g. when seeking or when switching to a different stream.
+ *
+ * @note for decoders, this function just releases any references the decoder
+ * might keep internally, but the caller's references remain valid.
+ *
+ * @note for encoders, this function will only do something if the encoder
+ * declares support for AV_CODEC_CAP_ENCODER_FLUSH. When called, the encoder
+ * will drain any remaining packets, and can then be re-used for a different
+ * stream (as opposed to sending a null frame which will leave the encoder
+ * in a permanent EOF state after draining). This can be desirable if the
+ * cost of tearing down and replacing the encoder instance is high.
+ */
+void avcodec_flush_buffers(AVCodecContext* avctx);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext* avctx, int frame_bytes);
+
+/* memory */
+
+/**
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.
+ *
+ * In addition the whole buffer will initially and after resizes
+ * be 0-initialized so that no uninitialized data will ever appear.
+ */
+void av_fast_padded_malloc(void* ptr, unsigned int* size, size_t min_size);
+
+/**
+ * Same behaviour av_fast_padded_malloc except that buffer will always
+ * be 0-initialized after call.
+ */
+void av_fast_padded_mallocz(void* ptr, unsigned int* size, size_t min_size);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext* s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avdct.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avdct.h
new file mode 100644
index 0000000000..9edf4c187e
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/avdct.h
@@ -0,0 +1,85 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVDCT_H
+#define AVCODEC_AVDCT_H
+
+#include "libavutil/opt.h"
+
+/**
+ * AVDCT context.
+ * @note function pointers can be NULL if the specific features have been
+ * disabled at build time.
+ */
+typedef struct AVDCT {
+ const AVClass* av_class;
+
+ void (*idct)(int16_t* block /* align 16 */);
+
+ /**
+ * IDCT input permutation.
+ * Several optimized IDCTs need a permutated input (relative to the
+ * normal order of the reference IDCT).
+ * This permutation must be performed before the idct_put/add.
+ * Note, normally this can be merged with the zigzag/alternate scan<br>
+ * An example to avoid confusion:
+ * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...)
+ * - (x -> reference DCT -> reference IDCT -> x)
+ * - (x -> reference DCT -> simple_mmx_perm = idct_permutation
+ * -> simple_idct_mmx -> x)
+ * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant
+ * -> simple_idct_mmx -> ...)
+ */
+ uint8_t idct_permutation[64];
+
+ void (*fdct)(int16_t* block /* align 16 */);
+
+ /**
+ * DCT algorithm.
+ * must use AVOptions to set this field.
+ */
+ int dct_algo;
+
+ /**
+ * IDCT algorithm.
+ * must use AVOptions to set this field.
+ */
+ int idct_algo;
+
+ void (*get_pixels)(int16_t* block /* align 16 */,
+ const uint8_t* pixels /* align 8 */, ptrdiff_t line_size);
+
+ int bits_per_sample;
+
+ void (*get_pixels_unaligned)(int16_t* block /* align 16 */,
+ const uint8_t* pixels, ptrdiff_t line_size);
+} AVDCT;
+
+/**
+ * Allocates a AVDCT context.
+ * This needs to be initialized with avcodec_dct_init() after optionally
+ * configuring it with AVOptions.
+ *
+ * To free it use av_free()
+ */
+AVDCT* avcodec_dct_alloc(void);
+int avcodec_dct_init(AVDCT*);
+
+const AVClass* avcodec_dct_get_class(void);
+
+#endif /* AVCODEC_AVDCT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/bsf.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/bsf.h
new file mode 100644
index 0000000000..044a0597bf
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/bsf.h
@@ -0,0 +1,335 @@
+/*
+ * Bitstream filters public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BSF_H
+#define AVCODEC_BSF_H
+
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+#include "libavutil/rational.h"
+
+#include "codec_id.h"
+#include "codec_par.h"
+#include "packet.h"
+
+/**
+ * @defgroup lavc_bsf Bitstream filters
+ * @ingroup libavc
+ *
+ * Bitstream filters transform encoded media data without decoding it. This
+ * allows e.g. manipulating various header values. Bitstream filters operate on
+ * @ref AVPacket "AVPackets".
+ *
+ * The bitstream filtering API is centered around two structures:
+ * AVBitStreamFilter and AVBSFContext. The former represents a bitstream filter
+ * in abstract, the latter a specific filtering process. Obtain an
+ * AVBitStreamFilter using av_bsf_get_by_name() or av_bsf_iterate(), then pass
+ * it to av_bsf_alloc() to create an AVBSFContext. Fill in the user-settable
+ * AVBSFContext fields, as described in its documentation, then call
+ * av_bsf_init() to prepare the filter context for use.
+ *
+ * Submit packets for filtering using av_bsf_send_packet(), obtain filtered
+ * results with av_bsf_receive_packet(). When no more input packets will be
+ * sent, submit a NULL AVPacket to signal the end of the stream to the filter.
+ * av_bsf_receive_packet() will then return trailing packets, if any are
+ * produced by the filter.
+ *
+ * Finally, free the filter context with av_bsf_free().
+ * @{
+ */
+
+/**
+ * The bitstream filter state.
+ *
+ * This struct must be allocated with av_bsf_alloc() and freed with
+ * av_bsf_free().
+ *
+ * The fields in the struct will only be changed (by the caller or by the
+ * filter) as described in their documentation, and are to be considered
+ * immutable otherwise.
+ */
+typedef struct AVBSFContext {
+ /**
+ * A class for logging and AVOptions
+ */
+ const AVClass* av_class;
+
+ /**
+ * The bitstream filter this context is an instance of.
+ */
+ const struct AVBitStreamFilter* filter;
+
+ /**
+ * Opaque filter-specific private data. If filter->priv_class is non-NULL,
+ * this is an AVOptions-enabled struct.
+ */
+ void* priv_data;
+
+ /**
+ * Parameters of the input stream. This field is allocated in
+ * av_bsf_alloc(), it needs to be filled by the caller before
+ * av_bsf_init().
+ */
+ AVCodecParameters* par_in;
+
+ /**
+ * Parameters of the output stream. This field is allocated in
+ * av_bsf_alloc(), it is set by the filter in av_bsf_init().
+ */
+ AVCodecParameters* par_out;
+
+ /**
+ * The timebase used for the timestamps of the input packets. Set by the
+ * caller before av_bsf_init().
+ */
+ AVRational time_base_in;
+
+ /**
+ * The timebase used for the timestamps of the output packets. Set by the
+ * filter in av_bsf_init().
+ */
+ AVRational time_base_out;
+} AVBSFContext;
+
+typedef struct AVBitStreamFilter {
+ const char* name;
+
+ /**
+ * A list of codec ids supported by the filter, terminated by
+ * AV_CODEC_ID_NONE.
+ * May be NULL, in that case the bitstream filter works with any codec id.
+ */
+ const enum AVCodecID* codec_ids;
+
+ /**
+ * A class for the private data, used to declare bitstream filter private
+ * AVOptions. This field is NULL for bitstream filters that do not declare
+ * any options.
+ *
+ * If this field is non-NULL, the first member of the filter private data
+ * must be a pointer to AVClass, which will be set by libavcodec generic
+ * code to this class.
+ */
+ const AVClass* priv_class;
+} AVBitStreamFilter;
+
+/**
+ * @return a bitstream filter with the specified name or NULL if no such
+ * bitstream filter exists.
+ */
+const AVBitStreamFilter* av_bsf_get_by_name(const char* name);
+
+/**
+ * Iterate over all registered bitstream filters.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered bitstream filter or NULL when the iteration is
+ * finished
+ */
+const AVBitStreamFilter* av_bsf_iterate(void** opaque);
+
+/**
+ * Allocate a context for a given bitstream filter. The caller must fill in the
+ * context parameters as described in the documentation and then call
+ * av_bsf_init() before sending any data to the filter.
+ *
+ * @param filter the filter for which to allocate an instance.
+ * @param[out] ctx a pointer into which the pointer to the newly-allocated
+ * context will be written. It must be freed with av_bsf_free() after the
+ * filtering is done.
+ *
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_bsf_alloc(const AVBitStreamFilter* filter, AVBSFContext** ctx);
+
+/**
+ * Prepare the filter for use, after all the parameters and options have been
+ * set.
+ *
+ * @param ctx a AVBSFContext previously allocated with av_bsf_alloc()
+ */
+int av_bsf_init(AVBSFContext* ctx);
+
+/**
+ * Submit a packet for filtering.
+ *
+ * After sending each packet, the filter must be completely drained by calling
+ * av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or
+ * AVERROR_EOF.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param pkt the packet to filter. The bitstream filter will take ownership of
+ * the packet and reset the contents of pkt. pkt is not touched if an error
+ * occurs. If pkt is empty (i.e. NULL, or pkt->data is NULL and
+ * pkt->side_data_elems zero), it signals the end of the stream (i.e. no more
+ * non-empty packets will be sent; sending more empty packets does nothing) and
+ * will cause the filter to output any packets it may have buffered internally.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if packets need to be retrieved from the filter (using
+ * av_bsf_receive_packet()) before new input can be consumed.
+ * - Another negative AVERROR value if an error occurs.
+ */
+int av_bsf_send_packet(AVBSFContext* ctx, AVPacket* pkt);
+
+/**
+ * Retrieve a filtered packet.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param[out] pkt this struct will be filled with the contents of the filtered
+ * packet. It is owned by the caller and must be freed using
+ * av_packet_unref() when it is no longer needed.
+ * This parameter should be "clean" (i.e. freshly allocated
+ * with av_packet_alloc() or unreffed with av_packet_unref())
+ * when this function is called. If this function returns
+ * successfully, the contents of pkt will be completely
+ * overwritten by the returned data. On failure, pkt is not
+ * touched.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if more packets need to be sent to the filter (using
+ * av_bsf_send_packet()) to get more output.
+ * - AVERROR_EOF if there will be no further output from the filter.
+ * - Another negative AVERROR value if an error occurs.
+ *
+ * @note one input packet may result in several output packets, so after sending
+ * a packet with av_bsf_send_packet(), this function needs to be called
+ * repeatedly until it stops returning 0. It is also possible for a filter to
+ * output fewer packets than were sent to it, so this function may return
+ * AVERROR(EAGAIN) immediately after a successful av_bsf_send_packet() call.
+ */
+int av_bsf_receive_packet(AVBSFContext* ctx, AVPacket* pkt);
+
+/**
+ * Reset the internal bitstream filter state. Should be called e.g. when
+ * seeking.
+ */
+void av_bsf_flush(AVBSFContext* ctx);
+
+/**
+ * Free a bitstream filter context and everything associated with it; write NULL
+ * into the supplied pointer.
+ */
+void av_bsf_free(AVBSFContext** ctx);
+
+/**
+ * Get the AVClass for AVBSFContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass* av_bsf_get_class(void);
+
+/**
+ * Structure for chain/list of bitstream filters.
+ * Empty list can be allocated by av_bsf_list_alloc().
+ */
+typedef struct AVBSFList AVBSFList;
+
+/**
+ * Allocate empty list of bitstream filters.
+ * The list must be later freed by av_bsf_list_free()
+ * or finalized by av_bsf_list_finalize().
+ *
+ * @return Pointer to @ref AVBSFList on success, NULL in case of failure
+ */
+AVBSFList* av_bsf_list_alloc(void);
+
+/**
+ * Free list of bitstream filters.
+ *
+ * @param lst Pointer to pointer returned by av_bsf_list_alloc()
+ */
+void av_bsf_list_free(AVBSFList** lst);
+
+/**
+ * Append bitstream filter to the list of bitstream filters.
+ *
+ * @param lst List to append to
+ * @param bsf Filter context to be appended
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_append(AVBSFList* lst, AVBSFContext* bsf);
+
+/**
+ * Construct new bitstream filter context given it's name and options
+ * and append it to the list of bitstream filters.
+ *
+ * @param lst List to append to
+ * @param bsf_name Name of the bitstream filter
+ * @param options Options for the bitstream filter, can be set to NULL
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_append2(AVBSFList* lst, const char* bsf_name,
+ AVDictionary** options);
+/**
+ * Finalize list of bitstream filters.
+ *
+ * This function will transform @ref AVBSFList to single @ref AVBSFContext,
+ * so the whole chain of bitstream filters can be treated as single filter
+ * freshly allocated by av_bsf_alloc().
+ * If the call is successful, @ref AVBSFList structure is freed and lst
+ * will be set to NULL. In case of failure, caller is responsible for
+ * freeing the structure by av_bsf_list_free()
+ *
+ * @param lst Filter list structure to be transformed
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext
+ * structure representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_finalize(AVBSFList** lst, AVBSFContext** bsf);
+
+/**
+ * Parse string describing list of bitstream filters and create single
+ * @ref AVBSFContext describing the whole chain of bitstream filters.
+ * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext
+ * freshly allocated by av_bsf_alloc().
+ *
+ * @param str String describing chain of bitstream filters in format
+ * `bsf1[=opt1=val1:opt2=val2][,bsf2]`
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext
+ * structure representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_parse_str(const char* str, AVBSFContext** bsf);
+
+/**
+ * Get null/pass-through bitstream filter.
+ *
+ * @param[out] bsf Pointer to be set to new instance of pass-through bitstream
+ * filter
+ *
+ * @return
+ */
+int av_bsf_get_null_filter(AVBSFContext** bsf);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_BSF_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec.h
new file mode 100644
index 0000000000..7163d91d96
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec.h
@@ -0,0 +1,382 @@
+/*
+ * AVCodec public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_H
+#define AVCODEC_CODEC_H
+
+#include <stdint.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+#include "libavutil/samplefmt.h"
+
+#include "libavcodec/codec_id.h"
+#include "libavcodec/version_major.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * Decoder can use draw_horiz_band callback.
+ */
+#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
+/**
+ * Codec uses get_buffer() or get_encode_buffer() for allocating buffers and
+ * supports custom allocators.
+ * If not set, it might not use get_buffer() or get_encode_buffer() at all, or
+ * use operations that assume the buffer was allocated by
+ * avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
+ */
+#define AV_CODEC_CAP_DR1 (1 << 1)
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define AV_CODEC_CAP_DELAY (1 << 5)
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
+
+#if FF_API_SUBFRAMES
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carrying such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+# define AV_CODEC_CAP_SUBFRAMES (1 << 8)
+#endif
+
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
+/**
+ * Codec should fill in channel configuration and samplerate instead of
+ * container
+ */
+#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
+/**
+ * Codec supports multithreading through a method other than slice- or
+ * frame-level multithreading. Typically this marks wrappers around
+ * multithreading-capable external libraries.
+ */
+#define AV_CODEC_CAP_OTHER_THREADS (1 << 15)
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
+/**
+ * Decoder is not a preferred choice for probing.
+ * This indicates that the decoder is not a good choice for probing.
+ * It could for example be an expensive to spin up hardware decoder,
+ * or it could simply not provide a lot of useful information about
+ * the stream.
+ * A decoder marked with this flag should only be used as last resort
+ * choice for probing.
+ */
+#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
+
+/**
+ * Codec is backed by a hardware implementation. Typically used to
+ * identify a non-hwaccel hardware decoder. For information about hwaccels, use
+ * avcodec_get_hw_config() instead.
+ */
+#define AV_CODEC_CAP_HARDWARE (1 << 18)
+
+/**
+ * Codec is potentially backed by a hardware implementation, but not
+ * necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the
+ * implementation provides some sort of internal fallback.
+ */
+#define AV_CODEC_CAP_HYBRID (1 << 19)
+
+/**
+ * This encoder can reorder user opaque values from input AVFrames and return
+ * them with corresponding output packets.
+ * @see AV_CODEC_FLAG_COPY_OPAQUE
+ */
+#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
+
+/**
+ * This encoder can be flushed using avcodec_flush_buffers(). If this flag is
+ * not set, the encoder must be closed and reopened to ensure that no frames
+ * remain pending.
+ */
+#define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21)
+
+/**
+ * The encoder is able to output reconstructed frame data, i.e. raw frames that
+ * would be produced by decoding the encoded bitstream.
+ *
+ * Reconstructed frame output is enabled by the AV_CODEC_FLAG_RECON_FRAME flag.
+ */
+#define AV_CODEC_CAP_ENCODER_RECON_FRAME (1 << 22)
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char* name; ///< short name for the profile
+} AVProfile;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char* name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char* long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see AV_CODEC_CAP_*
+ */
+ int capabilities;
+ uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+ const AVRational*
+ supported_framerates; ///< array of supported framerates, or NULL if any,
+ ///< array is terminated by {0,0}
+ const enum AVPixelFormat*
+ pix_fmts; ///< array of supported pixel formats, or NULL if unknown,
+ ///< array is terminated by -1
+ const int*
+ supported_samplerates; ///< array of supported audio samplerates, or NULL
+ ///< if unknown, array is terminated by 0
+ const enum AVSampleFormat*
+ sample_fmts; ///< array of supported sample formats, or NULL if unknown,
+ ///< array is terminated by -1
+ const AVClass* priv_class; ///< AVClass for the private context
+ const AVProfile*
+ profiles; ///< array of recognized profiles, or NULL if unknown, array is
+ ///< terminated by {AV_PROFILE_UNKNOWN}
+
+ /**
+ * Group name of the codec implementation.
+ * This is a short symbolic name of the wrapper backing this codec. A
+ * wrapper uses some kind of external implementation for the codec, such
+ * as an external library, or a codec implementation provided by the OS or
+ * the hardware.
+ * If this field is NULL, this is a builtin, libavcodec native codec.
+ * If non-NULL, this will be the suffix in AVCodec.name in most cases
+ * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>").
+ */
+ const char* wrapper_name;
+
+ /**
+ * Array of supported channel layouts, terminated with a zeroed layout.
+ */
+ const AVChannelLayout* ch_layouts;
+} AVCodec;
+
+/**
+ * Iterate over all registered codecs.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered codec or NULL when the iteration is
+ * finished
+ */
+const AVCodec* av_codec_iterate(void** opaque);
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+const AVCodec* avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+const AVCodec* avcodec_find_decoder_by_name(const char* name);
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+const AVCodec* avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+const AVCodec* avcodec_find_encoder_by_name(const char* name);
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec* codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec* codec);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char* av_get_profile_name(const AVCodec* codec, int profile);
+
+enum {
+ /**
+ * The codec supports this format via the hw_device_ctx interface.
+ *
+ * When selecting this format, AVCodecContext.hw_device_ctx should
+ * have been set to a device of the specified type before calling
+ * avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01,
+ /**
+ * The codec supports this format via the hw_frames_ctx interface.
+ *
+ * When selecting this format for a decoder,
+ * AVCodecContext.hw_frames_ctx should be set to a suitable frames
+ * context inside the get_format() callback. The frames context
+ * must have been created on a device of the specified type.
+ *
+ * When selecting this format for an encoder,
+ * AVCodecContext.hw_frames_ctx should be set to the context which
+ * will be used for the input frames before calling avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02,
+ /**
+ * The codec supports this format by some internal method.
+ *
+ * This format can be selected without any additional configuration -
+ * no device or frames context is required.
+ */
+ AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04,
+ /**
+ * The codec supports this format by some ad-hoc method.
+ *
+ * Additional settings and/or function calls are required. See the
+ * codec-specific documentation for details. (Methods requiring
+ * this sort of configuration are deprecated and others should be
+ * used in preference.)
+ */
+ AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08,
+};
+
+typedef struct AVCodecHWConfig {
+ /**
+ * For decoders, a hardware pixel format which that decoder may be
+ * able to decode to if suitable hardware is available.
+ *
+ * For encoders, a pixel format which the encoder may be able to
+ * accept. If set to AV_PIX_FMT_NONE, this applies to all pixel
+ * formats supported by the codec.
+ */
+ enum AVPixelFormat pix_fmt;
+ /**
+ * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible
+ * setup methods which can be used with this configuration.
+ */
+ int methods;
+ /**
+ * The device type associated with the configuration.
+ *
+ * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and
+ * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused.
+ */
+ enum AVHWDeviceType device_type;
+} AVCodecHWConfig;
+
+/**
+ * Retrieve supported hardware configurations for a codec.
+ *
+ * Values of index from zero to some maximum return the indexed configuration
+ * descriptor; all other values return NULL. If the codec does not support
+ * any hardware configurations then it will always return NULL.
+ */
+const AVCodecHWConfig* avcodec_get_hw_config(const AVCodec* codec, int index);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_CODEC_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_desc.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_desc.h
new file mode 100644
index 0000000000..a8d424ea7d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_desc.h
@@ -0,0 +1,134 @@
+/*
+ * Codec descriptors public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_DESC_H
+#define AVCODEC_CODEC_DESC_H
+
+#include "libavutil/avutil.h"
+
+#include "codec_id.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_descriptor_get()
+ */
+typedef struct AVCodecDescriptor {
+ enum AVCodecID id;
+ enum AVMediaType type;
+ /**
+ * Name of the codec described by this descriptor. It is non-empty and
+ * unique for each codec descriptor. It should contain alphanumeric
+ * characters and '_' only.
+ */
+ const char* name;
+ /**
+ * A more descriptive name for this codec. May be NULL.
+ */
+ const char* long_name;
+ /**
+ * Codec properties, a combination of AV_CODEC_PROP_* flags.
+ */
+ int props;
+ /**
+ * MIME type(s) associated with the codec.
+ * May be NULL; if not, a NULL-terminated array of MIME types.
+ * The first item is always non-NULL and is the preferred MIME type.
+ */
+ const char* const* mime_types;
+ /**
+ * If non-NULL, an array of profiles recognized for this codec.
+ * Terminated with AV_PROFILE_UNKNOWN.
+ */
+ const struct AVProfile* profiles;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video and audio codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS (1 << 2)
+/**
+ * Codec supports frame reordering. That is, the coded order (the order in which
+ * the encoded packets are output by the encoders / stored / input to the
+ * decoders) may be different from the presentation order of the corresponding
+ * frames.
+ *
+ * For codecs that do not have this property set, PTS and DTS should always be
+ * equal.
+ */
+#define AV_CODEC_PROP_REORDER (1 << 3)
+
+/**
+ * Video codec supports separate coding of fields in interlaced frames.
+ */
+#define AV_CODEC_PROP_FIELDS (1 << 4)
+
+/**
+ * Subtitle codec is bitmap based
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.
+ */
+#define AV_CODEC_PROP_BITMAP_SUB (1 << 16)
+/**
+ * Subtitle codec is text based.
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.
+ */
+#define AV_CODEC_PROP_TEXT_SUB (1 << 17)
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor* avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor* avcodec_descriptor_next(const AVCodecDescriptor* prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ * exists.
+ */
+const AVCodecDescriptor* avcodec_descriptor_get_by_name(const char* name);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_DESC_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_id.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_id.h
new file mode 100644
index 0000000000..edeb281ff4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_id.h
@@ -0,0 +1,676 @@
+/*
+ * Codec IDs
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_ID_H
+#define AVCODEC_CODEC_ID_H
+
+#include "libavutil/avutil.h"
+#include "libavutil/samplefmt.h"
+
+#include "version_major.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of an existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AIC,
+ AV_CODEC_ID_ESCAPE130,
+ AV_CODEC_ID_G2M,
+ AV_CODEC_ID_WEBP,
+ AV_CODEC_ID_HNM4_VIDEO,
+ AV_CODEC_ID_HEVC,
+#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
+ AV_CODEC_ID_FIC,
+ AV_CODEC_ID_ALIAS_PIX,
+ AV_CODEC_ID_BRENDER_PIX,
+ AV_CODEC_ID_PAF_VIDEO,
+ AV_CODEC_ID_EXR,
+ AV_CODEC_ID_VP7,
+ AV_CODEC_ID_SANM,
+ AV_CODEC_ID_SGIRLE,
+ AV_CODEC_ID_MVC1,
+ AV_CODEC_ID_MVC2,
+ AV_CODEC_ID_HQX,
+ AV_CODEC_ID_TDSC,
+ AV_CODEC_ID_HQ_HQA,
+ AV_CODEC_ID_HAP,
+ AV_CODEC_ID_DDS,
+ AV_CODEC_ID_DXV,
+ AV_CODEC_ID_SCREENPRESSO,
+ AV_CODEC_ID_RSCC,
+ AV_CODEC_ID_AVS2,
+ AV_CODEC_ID_PGX,
+ AV_CODEC_ID_AVS3,
+ AV_CODEC_ID_MSP2,
+ AV_CODEC_ID_VVC,
+#define AV_CODEC_ID_H266 AV_CODEC_ID_VVC
+ AV_CODEC_ID_Y41P,
+ AV_CODEC_ID_AVRP,
+ AV_CODEC_ID_012V,
+ AV_CODEC_ID_AVUI,
+ AV_CODEC_ID_TARGA_Y216,
+ AV_CODEC_ID_V308,
+ AV_CODEC_ID_V408,
+ AV_CODEC_ID_YUV4,
+ AV_CODEC_ID_AVRN,
+ AV_CODEC_ID_CPIA,
+ AV_CODEC_ID_XFACE,
+ AV_CODEC_ID_SNOW,
+ AV_CODEC_ID_SMVJPEG,
+ AV_CODEC_ID_APNG,
+ AV_CODEC_ID_DAALA,
+ AV_CODEC_ID_CFHD,
+ AV_CODEC_ID_TRUEMOTION2RT,
+ AV_CODEC_ID_M101,
+ AV_CODEC_ID_MAGICYUV,
+ AV_CODEC_ID_SHEERVIDEO,
+ AV_CODEC_ID_YLC,
+ AV_CODEC_ID_PSD,
+ AV_CODEC_ID_PIXLET,
+ AV_CODEC_ID_SPEEDHQ,
+ AV_CODEC_ID_FMVC,
+ AV_CODEC_ID_SCPR,
+ AV_CODEC_ID_CLEARVIDEO,
+ AV_CODEC_ID_XPM,
+ AV_CODEC_ID_AV1,
+ AV_CODEC_ID_BITPACKED,
+ AV_CODEC_ID_MSCC,
+ AV_CODEC_ID_SRGC,
+ AV_CODEC_ID_SVG,
+ AV_CODEC_ID_GDV,
+ AV_CODEC_ID_FITS,
+ AV_CODEC_ID_IMM4,
+ AV_CODEC_ID_PROSUMER,
+ AV_CODEC_ID_MWSC,
+ AV_CODEC_ID_WCMV,
+ AV_CODEC_ID_RASC,
+ AV_CODEC_ID_HYMT,
+ AV_CODEC_ID_ARBC,
+ AV_CODEC_ID_AGM,
+ AV_CODEC_ID_LSCR,
+ AV_CODEC_ID_VP4,
+ AV_CODEC_ID_IMM5,
+ AV_CODEC_ID_MVDV,
+ AV_CODEC_ID_MVHA,
+ AV_CODEC_ID_CDTOONS,
+ AV_CODEC_ID_MV30,
+ AV_CODEC_ID_NOTCHLC,
+ AV_CODEC_ID_PFM,
+ AV_CODEC_ID_MOBICLIP,
+ AV_CODEC_ID_PHOTOCD,
+ AV_CODEC_ID_IPU,
+ AV_CODEC_ID_ARGO,
+ AV_CODEC_ID_CRI,
+ AV_CODEC_ID_SIMBIOSIS_IMX,
+ AV_CODEC_ID_SGA_VIDEO,
+ AV_CODEC_ID_GEM,
+ AV_CODEC_ID_VBN,
+ AV_CODEC_ID_JPEGXL,
+ AV_CODEC_ID_QOI,
+ AV_CODEC_ID_PHM,
+ AV_CODEC_ID_RADIANCE_HDR,
+ AV_CODEC_ID_WBMP,
+ AV_CODEC_ID_MEDIA100,
+ AV_CODEC_ID_VQC,
+ AV_CODEC_ID_PDV,
+ AV_CODEC_ID_EVC,
+ AV_CODEC_ID_RTV1,
+ AV_CODEC_ID_VMIX,
+ AV_CODEC_ID_LEAD,
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO =
+ 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+ AV_CODEC_ID_PCM_S24LE_PLANAR,
+ AV_CODEC_ID_PCM_S32LE_PLANAR,
+ AV_CODEC_ID_PCM_S16BE_PLANAR,
+ AV_CODEC_ID_PCM_S64LE,
+ AV_CODEC_ID_PCM_S64BE,
+ AV_CODEC_ID_PCM_F16LE,
+ AV_CODEC_ID_PCM_F24LE,
+ AV_CODEC_ID_PCM_VIDC,
+ AV_CODEC_ID_PCM_SGA,
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+ AV_CODEC_ID_ADPCM_VIMA,
+ AV_CODEC_ID_ADPCM_AFC,
+ AV_CODEC_ID_ADPCM_IMA_OKI,
+ AV_CODEC_ID_ADPCM_DTK,
+ AV_CODEC_ID_ADPCM_IMA_RAD,
+ AV_CODEC_ID_ADPCM_G726LE,
+ AV_CODEC_ID_ADPCM_THP_LE,
+ AV_CODEC_ID_ADPCM_PSX,
+ AV_CODEC_ID_ADPCM_AICA,
+ AV_CODEC_ID_ADPCM_IMA_DAT4,
+ AV_CODEC_ID_ADPCM_MTAF,
+ AV_CODEC_ID_ADPCM_AGM,
+ AV_CODEC_ID_ADPCM_ARGO,
+ AV_CODEC_ID_ADPCM_IMA_SSI,
+ AV_CODEC_ID_ADPCM_ZORK,
+ AV_CODEC_ID_ADPCM_IMA_APM,
+ AV_CODEC_ID_ADPCM_IMA_ALP,
+ AV_CODEC_ID_ADPCM_IMA_MTF,
+ AV_CODEC_ID_ADPCM_IMA_CUNNING,
+ AV_CODEC_ID_ADPCM_IMA_MOFLEX,
+ AV_CODEC_ID_ADPCM_IMA_ACORN,
+ AV_CODEC_ID_ADPCM_XMD,
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+ AV_CODEC_ID_SDX2_DPCM,
+ AV_CODEC_ID_GREMLIN_DPCM,
+ AV_CODEC_ID_DERF_DPCM,
+ AV_CODEC_ID_WADY_DPCM,
+ AV_CODEC_ID_CBD2_DPCM,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK,
+ AV_CODEC_ID_METASOUND,
+ AV_CODEC_ID_PAF_AUDIO,
+ AV_CODEC_ID_ON2AVC,
+ AV_CODEC_ID_DSS_SP,
+ AV_CODEC_ID_CODEC2,
+ AV_CODEC_ID_FFWAVESYNTH,
+ AV_CODEC_ID_SONIC,
+ AV_CODEC_ID_SONIC_LS,
+ AV_CODEC_ID_EVRC,
+ AV_CODEC_ID_SMV,
+ AV_CODEC_ID_DSD_LSBF,
+ AV_CODEC_ID_DSD_MSBF,
+ AV_CODEC_ID_DSD_LSBF_PLANAR,
+ AV_CODEC_ID_DSD_MSBF_PLANAR,
+ AV_CODEC_ID_4GV,
+ AV_CODEC_ID_INTERPLAY_ACM,
+ AV_CODEC_ID_XMA1,
+ AV_CODEC_ID_XMA2,
+ AV_CODEC_ID_DST,
+ AV_CODEC_ID_ATRAC3AL,
+ AV_CODEC_ID_ATRAC3PAL,
+ AV_CODEC_ID_DOLBY_E,
+ AV_CODEC_ID_APTX,
+ AV_CODEC_ID_APTX_HD,
+ AV_CODEC_ID_SBC,
+ AV_CODEC_ID_ATRAC9,
+ AV_CODEC_ID_HCOM,
+ AV_CODEC_ID_ACELP_KELVIN,
+ AV_CODEC_ID_MPEGH_3D_AUDIO,
+ AV_CODEC_ID_SIREN,
+ AV_CODEC_ID_HCA,
+ AV_CODEC_ID_FASTAUDIO,
+ AV_CODEC_ID_MSNSIREN,
+ AV_CODEC_ID_DFPWM,
+ AV_CODEC_ID_BONK,
+ AV_CODEC_ID_MISC4,
+ AV_CODEC_ID_APAC,
+ AV_CODEC_ID_FTR,
+ AV_CODEC_ID_WAVARC,
+ AV_CODEC_ID_RKA,
+ AV_CODEC_ID_AC4,
+ AV_CODEC_ID_OSQ,
+ AV_CODEC_ID_QOA,
+ AV_CODEC_ID_LC3,
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE =
+ 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+ AV_CODEC_ID_MICRODVD,
+ AV_CODEC_ID_EIA_608,
+ AV_CODEC_ID_JACOSUB,
+ AV_CODEC_ID_SAMI,
+ AV_CODEC_ID_REALTEXT,
+ AV_CODEC_ID_STL,
+ AV_CODEC_ID_SUBVIEWER1,
+ AV_CODEC_ID_SUBVIEWER,
+ AV_CODEC_ID_SUBRIP,
+ AV_CODEC_ID_WEBVTT,
+ AV_CODEC_ID_MPL2,
+ AV_CODEC_ID_VPLAYER,
+ AV_CODEC_ID_PJS,
+ AV_CODEC_ID_ASS,
+ AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
+ AV_CODEC_ID_TTML,
+ AV_CODEC_ID_ARIB_CAPTION,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN =
+ 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+
+ AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program
+ ///< stream.
+ AV_CODEC_ID_EPG,
+ AV_CODEC_ID_BINTEXT,
+ AV_CODEC_ID_XBIN,
+ AV_CODEC_ID_IDF,
+ AV_CODEC_ID_OTF,
+ AV_CODEC_ID_SMPTE_KLV,
+ AV_CODEC_ID_DVD_NAV,
+ AV_CODEC_ID_TIMED_ID3,
+ AV_CODEC_ID_BIN_DATA,
+ AV_CODEC_ID_SMPTE_2038,
+
+ AV_CODEC_ID_PROBE =
+ 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf
+ ///< should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS =
+ 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing
+ ///< only metadata information.
+ AV_CODEC_ID_WRAPPED_AVFRAME =
+ 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket
+ /**
+ * Dummy null video codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_VNULL,
+ /**
+ * Dummy null audio codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_ANULL,
+};
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * Get the name of a codec.
+ * @return a static string identifying the codec; never NULL
+ */
+const char* avcodec_get_name(enum AVCodecID id);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec_id the ID of the codec to which the requested profile belongs
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ *
+ * @note unlike av_get_profile_name(), which searches a list of profiles
+ * supported by a specific decoder or encoder implementation, this
+ * function searches the list of profiles from the AVCodecDescriptor
+ */
+const char* avcodec_profile_name(enum AVCodecID codec_id, int profile);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be endianness, 0 for little, 1 for big,
+ * -1 (or anything else) for native
+ * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
+ */
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_ID_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_par.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_par.h
new file mode 100644
index 0000000000..a99b976bcb
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/codec_par.h
@@ -0,0 +1,250 @@
+/*
+ * Codec parameters public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_PAR_H
+#define AVCODEC_CODEC_PAR_H
+
+#include <stdint.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/rational.h"
+#include "libavutil/pixfmt.h"
+
+#include "codec_id.h"
+#include "defs.h"
+#include "packet.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * This struct describes the properties of an encoded stream.
+ *
+ * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must
+ * be allocated with avcodec_parameters_alloc() and freed with
+ * avcodec_parameters_free().
+ */
+typedef struct AVCodecParameters {
+ /**
+ * General type of the encoded data.
+ */
+ enum AVMediaType codec_type;
+ /**
+ * Specific type of the encoded data (the codec used).
+ */
+ enum AVCodecID codec_id;
+ /**
+ * Additional information about the codec (corresponds to the AVI FOURCC).
+ */
+ uint32_t codec_tag;
+
+ /**
+ * Extra binary data needed for initializing the decoder, codec-dependent.
+ *
+ * Must be allocated with av_malloc() and will be freed by
+ * avcodec_parameters_free(). The allocated size of extradata must be at
+ * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding
+ * bytes zeroed.
+ */
+ uint8_t* extradata;
+ /**
+ * Size of the extradata content in bytes.
+ */
+ int extradata_size;
+
+ /**
+ * Additional data associated with the entire stream.
+ *
+ * Should be allocated with av_packet_side_data_new() or
+ * av_packet_side_data_add(), and will be freed by avcodec_parameters_free().
+ */
+ AVPacketSideData* coded_side_data;
+
+ /**
+ * Amount of entries in @ref coded_side_data.
+ */
+ int nb_coded_side_data;
+
+ /**
+ * - video: the pixel format, the value corresponds to enum AVPixelFormat.
+ * - audio: the sample format, the value corresponds to enum AVSampleFormat.
+ */
+ int format;
+
+ /**
+ * The average bitrate of the encoded data (in bits per second).
+ */
+ int64_t bit_rate;
+
+ /**
+ * The number of bits per sample in the codedwords.
+ *
+ * This is basically the bitrate per sample. It is mandatory for a bunch of
+ * formats to actually decode them. It's the number of bits for one sample in
+ * the actual coded bitstream.
+ *
+ * This could be for example 4 for ADPCM
+ * For PCM formats this matches bits_per_raw_sample
+ * Can be 0
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * This is the number of valid bits in each output sample. If the
+ * sample format has more bits, the least significant bits are additional
+ * padding bits, which are always 0. Use right shifts to reduce the sample
+ * to its actual size. For example, audio formats with 24 bit samples will
+ * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32.
+ * To get the original sample use "(int32_t)sample >> 8"."
+ *
+ * For ADPCM this might be 12 or 16 or similar
+ * Can be 0
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * Codec-specific bitstream restrictions that the stream conforms to.
+ */
+ int profile;
+ int level;
+
+ /**
+ * Video only. The dimensions of the video frame in pixels.
+ */
+ int width;
+ int height;
+
+ /**
+ * Video only. The aspect ratio (width / height) which a single pixel
+ * should have when displayed.
+ *
+ * When the aspect ratio is unknown / undefined, the numerator should be
+ * set to 0 (the denominator may have any value).
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Video only. Number of frames per second, for streams with constant frame
+ * durations. Should be set to { 0, 1 } when some frames have differing
+ * durations or if the value is not known.
+ *
+ * @note This field correponds to values that are stored in codec-level
+ * headers and is typically overridden by container/transport-layer
+ * timestamps, when available. It should thus be used only as a last resort,
+ * when no higher-level timing information is available.
+ */
+ AVRational framerate;
+
+ /**
+ * Video only. The order of the fields in interlaced video.
+ */
+ enum AVFieldOrder field_order;
+
+ /**
+ * Video only. Additional colorspace characteristics.
+ */
+ enum AVColorRange color_range;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace color_space;
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * Video only. Number of delayed frames.
+ */
+ int video_delay;
+
+ /**
+ * Audio only. The channel layout and number of channels.
+ */
+ AVChannelLayout ch_layout;
+ /**
+ * Audio only. The number of audio samples per second.
+ */
+ int sample_rate;
+ /**
+ * Audio only. The number of bytes per coded audio frame, required by some
+ * formats.
+ *
+ * Corresponds to nBlockAlign in WAVEFORMATEX.
+ */
+ int block_align;
+ /**
+ * Audio only. Audio frame size, if known. Required by some formats to be
+ * static.
+ */
+ int frame_size;
+
+ /**
+ * Audio only. The amount of padding (in samples) inserted by the encoder at
+ * the beginning of the audio. I.e. this number of leading decoded samples
+ * must be discarded by the caller to get the original audio without leading
+ * padding.
+ */
+ int initial_padding;
+ /**
+ * Audio only. The amount of padding (in samples) appended by the encoder to
+ * the end of the audio. I.e. this number of decoded samples must be
+ * discarded by the caller from the end of the stream to get the original
+ * audio without any trailing padding.
+ */
+ int trailing_padding;
+ /**
+ * Audio only. Number of samples to skip after a discontinuity.
+ */
+ int seek_preroll;
+} AVCodecParameters;
+
+/**
+ * Allocate a new AVCodecParameters and set its fields to default values
+ * (unknown/invalid/0). The returned struct must be freed with
+ * avcodec_parameters_free().
+ */
+AVCodecParameters* avcodec_parameters_alloc(void);
+
+/**
+ * Free an AVCodecParameters instance and everything associated with it and
+ * write NULL to the supplied pointer.
+ */
+void avcodec_parameters_free(AVCodecParameters** par);
+
+/**
+ * Copy the contents of src to dst. Any allocated fields in dst are freed and
+ * replaced with newly allocated duplicates of the corresponding fields in src.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure.
+ */
+int avcodec_parameters_copy(AVCodecParameters* dst,
+ const AVCodecParameters* src);
+
+/**
+ * This function is the same as av_get_audio_frame_duration(), except it works
+ * with AVCodecParameters instead of an AVCodecContext.
+ */
+int av_get_audio_frame_duration2(AVCodecParameters* par, int frame_bytes);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_PAR_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/defs.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/defs.h
new file mode 100644
index 0000000000..63df946e16
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/defs.h
@@ -0,0 +1,344 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DEFS_H
+#define AVCODEC_DEFS_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Misc types and constants that do not belong anywhere else.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input
+ * bitstream for decoding. This is mainly needed because some optimized
+ * bitstream readers read 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define AV_INPUT_BUFFER_PADDING_SIZE 64
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the format) and print an error message on
+ * mismatch. If AV_EF_EXPLODE is also set, a mismatching checksum will result in
+ * the decoder/demuxer returning an error.
+ */
+#define AV_EF_CRCCHECK (1 << 0)
+#define AV_EF_BITSTREAM (1 << 1) ///< detect bitstream specification deviations
+#define AV_EF_BUFFER (1 << 2) ///< detect improper bitstream length
+#define AV_EF_EXPLODE (1 << 3) ///< abort decoding on minor error detection
+
+#define AV_EF_IGNORE_ERR (1 << 15) ///< ignore errors and continue
+#define AV_EF_CAREFUL \
+ (1 << 16) ///< consider things that violate the spec, are fast to calculate
+ ///< and have not been seen in the wild as errors
+#define AV_EF_COMPLIANT \
+ (1 << 17) ///< consider all spec non compliances as errors
+#define AV_EF_AGGRESSIVE \
+ (1 << 18) ///< consider things that a sane encoder/muxer should not do as an
+ ///< error
+
+#define FF_COMPLIANCE_VERY_STRICT \
+ 2 ///< Strictly conform to an older more strict version of the spec or
+ ///< reference software.
+#define FF_COMPLIANCE_STRICT \
+ 1 ///< Strictly conform to all the things in the spec no matter what
+ ///< consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL \
+ -2 ///< Allow nonstandardized experimental things.
+
+#define AV_PROFILE_UNKNOWN -99
+#define AV_PROFILE_RESERVED -100
+
+#define AV_PROFILE_AAC_MAIN 0
+#define AV_PROFILE_AAC_LOW 1
+#define AV_PROFILE_AAC_SSR 2
+#define AV_PROFILE_AAC_LTP 3
+#define AV_PROFILE_AAC_HE 4
+#define AV_PROFILE_AAC_HE_V2 28
+#define AV_PROFILE_AAC_LD 22
+#define AV_PROFILE_AAC_ELD 38
+#define AV_PROFILE_MPEG2_AAC_LOW 128
+#define AV_PROFILE_MPEG2_AAC_HE 131
+
+#define AV_PROFILE_DNXHD 0
+#define AV_PROFILE_DNXHR_LB 1
+#define AV_PROFILE_DNXHR_SQ 2
+#define AV_PROFILE_DNXHR_HQ 3
+#define AV_PROFILE_DNXHR_HQX 4
+#define AV_PROFILE_DNXHR_444 5
+
+#define AV_PROFILE_DTS 20
+#define AV_PROFILE_DTS_ES 30
+#define AV_PROFILE_DTS_96_24 40
+#define AV_PROFILE_DTS_HD_HRA 50
+#define AV_PROFILE_DTS_HD_MA 60
+#define AV_PROFILE_DTS_EXPRESS 70
+#define AV_PROFILE_DTS_HD_MA_X 61
+#define AV_PROFILE_DTS_HD_MA_X_IMAX 62
+
+#define AV_PROFILE_EAC3_DDP_ATMOS 30
+
+#define AV_PROFILE_TRUEHD_ATMOS 30
+
+#define AV_PROFILE_MPEG2_422 0
+#define AV_PROFILE_MPEG2_HIGH 1
+#define AV_PROFILE_MPEG2_SS 2
+#define AV_PROFILE_MPEG2_SNR_SCALABLE 3
+#define AV_PROFILE_MPEG2_MAIN 4
+#define AV_PROFILE_MPEG2_SIMPLE 5
+
+#define AV_PROFILE_H264_CONSTRAINED (1 << 9) // 8+1; constraint_set1_flag
+#define AV_PROFILE_H264_INTRA (1 << 11) // 8+3; constraint_set3_flag
+
+#define AV_PROFILE_H264_BASELINE 66
+#define AV_PROFILE_H264_CONSTRAINED_BASELINE (66 | AV_PROFILE_H264_CONSTRAINED)
+#define AV_PROFILE_H264_MAIN 77
+#define AV_PROFILE_H264_EXTENDED 88
+#define AV_PROFILE_H264_HIGH 100
+#define AV_PROFILE_H264_HIGH_10 110
+#define AV_PROFILE_H264_HIGH_10_INTRA (110 | AV_PROFILE_H264_INTRA)
+#define AV_PROFILE_H264_MULTIVIEW_HIGH 118
+#define AV_PROFILE_H264_HIGH_422 122
+#define AV_PROFILE_H264_HIGH_422_INTRA (122 | AV_PROFILE_H264_INTRA)
+#define AV_PROFILE_H264_STEREO_HIGH 128
+#define AV_PROFILE_H264_HIGH_444 144
+#define AV_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define AV_PROFILE_H264_HIGH_444_INTRA (244 | AV_PROFILE_H264_INTRA)
+#define AV_PROFILE_H264_CAVLC_444 44
+
+#define AV_PROFILE_VC1_SIMPLE 0
+#define AV_PROFILE_VC1_MAIN 1
+#define AV_PROFILE_VC1_COMPLEX 2
+#define AV_PROFILE_VC1_ADVANCED 3
+
+#define AV_PROFILE_MPEG4_SIMPLE 0
+#define AV_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define AV_PROFILE_MPEG4_CORE 2
+#define AV_PROFILE_MPEG4_MAIN 3
+#define AV_PROFILE_MPEG4_N_BIT 4
+#define AV_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define AV_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define AV_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define AV_PROFILE_MPEG4_HYBRID 8
+#define AV_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define AV_PROFILE_MPEG4_CORE_SCALABLE 10
+#define AV_PROFILE_MPEG4_ADVANCED_CODING 11
+#define AV_PROFILE_MPEG4_ADVANCED_CORE 12
+#define AV_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define AV_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define AV_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+#define AV_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1
+#define AV_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2
+#define AV_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768
+#define AV_PROFILE_JPEG2000_DCINEMA_2K 3
+#define AV_PROFILE_JPEG2000_DCINEMA_4K 4
+
+#define AV_PROFILE_VP9_0 0
+#define AV_PROFILE_VP9_1 1
+#define AV_PROFILE_VP9_2 2
+#define AV_PROFILE_VP9_3 3
+
+#define AV_PROFILE_HEVC_MAIN 1
+#define AV_PROFILE_HEVC_MAIN_10 2
+#define AV_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+#define AV_PROFILE_HEVC_REXT 4
+#define AV_PROFILE_HEVC_SCC 9
+
+#define AV_PROFILE_VVC_MAIN_10 1
+#define AV_PROFILE_VVC_MAIN_10_444 33
+
+#define AV_PROFILE_AV1_MAIN 0
+#define AV_PROFILE_AV1_HIGH 1
+#define AV_PROFILE_AV1_PROFESSIONAL 2
+
+#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0
+#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
+#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2
+#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3
+#define AV_PROFILE_MJPEG_JPEG_LS 0xf7
+
+#define AV_PROFILE_SBC_MSBC 1
+
+#define AV_PROFILE_PRORES_PROXY 0
+#define AV_PROFILE_PRORES_LT 1
+#define AV_PROFILE_PRORES_STANDARD 2
+#define AV_PROFILE_PRORES_HQ 3
+#define AV_PROFILE_PRORES_4444 4
+#define AV_PROFILE_PRORES_XQ 5
+
+#define AV_PROFILE_ARIB_PROFILE_A 0
+#define AV_PROFILE_ARIB_PROFILE_C 1
+
+#define AV_PROFILE_KLVA_SYNC 0
+#define AV_PROFILE_KLVA_ASYNC 1
+
+#define AV_PROFILE_EVC_BASELINE 0
+#define AV_PROFILE_EVC_MAIN 1
+
+#define AV_LEVEL_UNKNOWN -99
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, ///< Top coded_first, top displayed first
+ AV_FIELD_BB, ///< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, ///< Top coded first, bottom displayed first
+ AV_FIELD_BT, ///< Bottom coded first, top displayed first
+};
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard {
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE = -16, ///< discard nothing
+ AVDISCARD_DEFAULT =
+ 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONINTRA = 24, ///< discard all non intra frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB, ///< Not part of ABI
+};
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan {
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+} AVPanScan;
+
+/**
+ * This structure describes the bitrate properties of an encoded bitstream. It
+ * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
+ * parameters for H.264/HEVC.
+ */
+typedef struct AVCPBProperties {
+ /**
+ * Maximum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t max_bitrate;
+ /**
+ * Minimum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t min_bitrate;
+ /**
+ * Average bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t avg_bitrate;
+
+ /**
+ * The size of the buffer to which the ratecontrol is applied, in bits.
+ * Zero if unknown or unspecified.
+ */
+ int64_t buffer_size;
+
+ /**
+ * The delay between the time the packet this structure is associated with
+ * is received and the time when it should be decoded, in periods of a 27MHz
+ * clock.
+ *
+ * UINT64_MAX when unknown or unspecified.
+ */
+ uint64_t vbv_delay;
+} AVCPBProperties;
+
+/**
+ * Allocate a CPB properties structure and initialize its fields to default
+ * values.
+ *
+ * @param size if non-NULL, the size of the allocated struct will be written
+ * here. This is useful for embedding it in side data.
+ *
+ * @return the newly allocated struct or NULL on failure
+ */
+AVCPBProperties* av_cpb_properties_alloc(size_t* size);
+
+/**
+ * This structure supplies correlation between a packet timestamp and a wall
+ * clock production time. The definition follows the Producer Reference Time
+ * ('prft') as defined in ISO/IEC 14496-12
+ */
+typedef struct AVProducerReferenceTime {
+ /**
+ * A UTC timestamp, in microseconds, since Unix epoch (e.g, av_gettime()).
+ */
+ int64_t wallclock;
+ int flags;
+} AVProducerReferenceTime;
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char* s, unsigned int v);
+
+#endif // AVCODEC_DEFS_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/packet.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/packet.h
new file mode 100644
index 0000000000..58fde480c9
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/packet.h
@@ -0,0 +1,871 @@
+/*
+ * AVPacket public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PACKET_H
+#define AVCODEC_PACKET_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/buffer.h"
+#include "libavutil/dict.h"
+#include "libavutil/rational.h"
+#include "libavutil/version.h"
+
+#include "libavcodec/version_major.h"
+
+/**
+ * @defgroup lavc_packet_side_data AVPacketSideData
+ *
+ * Types and functions for working with AVPacketSideData.
+ * @{
+ */
+enum AVPacketSideDataType {
+ /**
+ * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE
+ * bytes worth of palette. This side data signals that a new palette is
+ * present.
+ */
+ AV_PKT_DATA_PALETTE,
+
+ /**
+ * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format
+ * that the extradata buffer was changed and the receiving side should
+ * act upon it appropriately. The new extradata is embedded in the side
+ * data buffer and should be immediately used for processing the current
+ * frame or packet.
+ */
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+
+ /**
+ * This side data should be associated with an audio stream and contains
+ * ReplayGain information in form of the AVReplayGain struct.
+ */
+ AV_PKT_DATA_REPLAYGAIN,
+
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the decoded video frames for
+ * correct presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_PKT_DATA_DISPLAYMATRIX,
+
+ /**
+ * This side data should be associated with a video stream and contains
+ * Stereoscopic 3D information in form of the AVStereo3D struct.
+ */
+ AV_PKT_DATA_STEREO3D,
+
+ /**
+ * This side data should be associated with an audio stream and corresponds
+ * to enum AVAudioServiceType.
+ */
+ AV_PKT_DATA_AUDIO_SERVICE_TYPE,
+
+ /**
+ * This side data contains quality related information from the encoder.
+ * @code
+ * u32le quality factor of the compressed frame. Allowed range is between 1
+ * (good) and FF_LAMBDA_MAX (bad). u8 picture type u8 error count u16
+ * reserved u64le[error count] sum of squared differences between encoder in
+ * and output
+ * @endcode
+ */
+ AV_PKT_DATA_QUALITY_STATS,
+
+ /**
+ * This side data contains an integer value representing the stream index
+ * of a "fallback" track. A fallback track indicates an alternate
+ * track to use when the current track can not be decoded for some reason.
+ * e.g. no decoder available for codec.
+ */
+ AV_PKT_DATA_FALLBACK_TRACK,
+
+ /**
+ * This side data corresponds to the AVCPBProperties struct.
+ */
+ AV_PKT_DATA_CPB_PROPERTIES,
+
+ /**
+ * Recommmends skipping the specified number of samples
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_PKT_DATA_SKIP_SAMPLES,
+
+ /**
+ * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
+ * the packet may contain "dual mono" audio specific to Japanese DTV
+ * and if it is true, recommends only the selected channel to be used.
+ * @code
+ * u8 selected channels (0=main/left, 1=sub/right, 2=both)
+ * @endcode
+ */
+ AV_PKT_DATA_JP_DUALMONO,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop.
+ */
+ AV_PKT_DATA_STRINGS_METADATA,
+
+ /**
+ * Subtitle event position
+ * @code
+ * u32le x1
+ * u32le y1
+ * u32le x2
+ * u32le y2
+ * @endcode
+ */
+ AV_PKT_DATA_SUBTITLE_POSITION,
+
+ /**
+ * Data found in BlockAdditional element of matroska container. There is
+ * no end marker for the data, so it is required to rely on the side data
+ * size to recognize the end. 8 byte id (as found in BlockAddId) followed
+ * by data.
+ */
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+ /**
+ * The optional first identifier line of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+ /**
+ * The optional settings (rendering instructions) that immediately
+ * follow the timestamp specifier of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_SETTINGS,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop. This
+ * side data includes updated metadata which appeared in the stream.
+ */
+ AV_PKT_DATA_METADATA_UPDATE,
+
+ /**
+ * MPEGTS stream ID as uint8_t, this is required to pass the stream ID
+ * information from the demuxer to the corresponding muxer.
+ */
+ AV_PKT_DATA_MPEGTS_STREAM_ID,
+
+ /**
+ * Mastering display metadata (based on SMPTE-2086:2014). This metadata
+ * should be associated with a video stream and contains data in the form
+ * of the AVMasteringDisplayMetadata struct.
+ */
+ AV_PKT_DATA_MASTERING_DISPLAY_METADATA,
+
+ /**
+ * This side data should be associated with a video stream and corresponds
+ * to the AVSphericalMapping structure.
+ */
+ AV_PKT_DATA_SPHERICAL,
+
+ /**
+ * Content light level (based on CTA-861.3). This metadata should be
+ * associated with a video stream and contains data in the form of the
+ * AVContentLightMetadata struct.
+ */
+ AV_PKT_DATA_CONTENT_LIGHT_LEVEL,
+
+ /**
+ * ATSC A53 Part 4 Closed Captions. This metadata should be associated with
+ * a video stream. A53 CC bitstream is stored as uint8_t in
+ * AVPacketSideData.data. The number of bytes of CC data is
+ * AVPacketSideData.size.
+ */
+ AV_PKT_DATA_A53_CC,
+
+ /**
+ * This side data is encryption initialization data.
+ * The format is not part of ABI, use av_encryption_init_info_* methods to
+ * access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INIT_INFO,
+
+ /**
+ * This side data contains encryption info for how to decrypt the packet.
+ * The format is not part of ABI, use av_encryption_info_* methods to access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INFO,
+
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_PKT_DATA_AFD,
+
+ /**
+ * Producer Reference Time data corresponding to the AVProducerReferenceTime
+ * struct, usually exported by some encoders (on demand through the prft flag
+ * set in the AVCodecContext export_side_data field).
+ */
+ AV_PKT_DATA_PRFT,
+
+ /**
+ * ICC profile data consisting of an opaque octet buffer following the
+ * format described by ISO 15076-1.
+ */
+ AV_PKT_DATA_ICC_PROFILE,
+
+ /**
+ * DOVI configuration
+ * ref:
+ * dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2,
+ * section 2.2
+ * dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2,
+ * section 3.3 Tags are stored in struct AVDOVIDecoderConfigurationRecord.
+ */
+ AV_PKT_DATA_DOVI_CONF,
+
+ /**
+ * Timecode which conforms to SMPTE ST 12-1:2014. The data is an array of 4
+ * uint32_t where the first uint32_t describes how many (1-3) of the other
+ * timecodes are used. The timecode format is described in the documentation
+ * of av_timecode_get_smpte_from_framenum() function in libavutil/timecode.h.
+ */
+ AV_PKT_DATA_S12M_TIMECODE,
+
+ /**
+ * HDR10+ dynamic metadata associated with a video frame. The metadata is in
+ * the form of the AVDynamicHDRPlus struct and contains
+ * information for color volume transform - application 4 of
+ * SMPTE 2094-40:2016 standard.
+ */
+ AV_PKT_DATA_DYNAMIC_HDR10_PLUS,
+
+ /**
+ * IAMF Mix Gain Parameter Data associated with the audio frame. This metadata
+ * is in the form of the AVIAMFParamDefinition struct and contains information
+ * defined in sections 3.6.1 and 3.8.1 of the Immersive Audio Model and
+ * Formats standard.
+ */
+ AV_PKT_DATA_IAMF_MIX_GAIN_PARAM,
+
+ /**
+ * IAMF Demixing Info Parameter Data associated with the audio frame. This
+ * metadata is in the form of the AVIAMFParamDefinition struct and contains
+ * information defined in sections 3.6.1 and 3.8.2 of the Immersive Audio
+ * Model and Formats standard.
+ */
+ AV_PKT_DATA_IAMF_DEMIXING_INFO_PARAM,
+
+ /**
+ * IAMF Recon Gain Info Parameter Data associated with the audio frame. This
+ * metadata is in the form of the AVIAMFParamDefinition struct and contains
+ * information defined in sections 3.6.1 and 3.8.3 of the Immersive Audio
+ * Model and Formats standard.
+ */
+ AV_PKT_DATA_IAMF_RECON_GAIN_INFO_PARAM,
+
+ /**
+ * Ambient viewing environment metadata, as defined by H.274. This metadata
+ * should be associated with a video stream and contains data in the form
+ * of the AVAmbientViewingEnvironment struct.
+ */
+ AV_PKT_DATA_AMBIENT_VIEWING_ENVIRONMENT,
+
+ /**
+ * The number of side data types.
+ * This is not part of the public API/ABI in the sense that it may
+ * change when new side data types are added.
+ * This must stay the last enum value.
+ * If its value becomes huge, some code using it
+ * needs to be updated as it assumes it to be smaller than other limits.
+ */
+ AV_PKT_DATA_NB
+};
+
+#if FF_API_QUALITY_FACTOR
+# define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS // DEPRECATED
+#endif
+
+/**
+ * This structure stores auxiliary information for decoding, presenting, or
+ * otherwise processing the coded stream. It is typically exported by demuxers
+ * and encoders and can be fed to decoders and muxers either in a per packet
+ * basis, or as global side data (applying to the entire coded stream).
+ *
+ * Global side data is handled as follows:
+ * - During demuxing, it may be exported through
+ * @ref AVStream.codecpar.side_data "AVStream's codec parameters", which can
+ * then be passed as input to decoders through the
+ * @ref AVCodecContext.coded_side_data "decoder context's side data", for
+ * initialization.
+ * - For muxing, it can be fed through @ref AVStream.codecpar.side_data
+ * "AVStream's codec parameters", typically the output of encoders through
+ * the @ref AVCodecContext.coded_side_data "encoder context's side data", for
+ * initialization.
+ *
+ * Packet specific side data is handled as follows:
+ * - During demuxing, it may be exported through @ref AVPacket.side_data
+ * "AVPacket's side data", which can then be passed as input to decoders.
+ * - For muxing, it can be fed through @ref AVPacket.side_data "AVPacket's
+ * side data", typically the output of encoders.
+ *
+ * Different modules may accept or export different types of side data
+ * depending on media type and codec. Refer to @ref AVPacketSideDataType for a
+ * list of defined types and where they may be found or used.
+ */
+typedef struct AVPacketSideData {
+ uint8_t* data;
+ size_t size;
+ enum AVPacketSideDataType type;
+} AVPacketSideData;
+
+/**
+ * Allocate a new packet side data.
+ *
+ * @param sd pointer to an array of side data to which the side data should
+ * be added. *sd may be NULL, in which case the array will be
+ * initialized.
+ * @param nb_sd pointer to an integer containing the number of entries in
+ * the array. The integer value will be increased by 1 on success.
+ * @param type side data type
+ * @param size desired side data size
+ * @param flags currently unused. Must be zero
+ *
+ * @return pointer to freshly allocated side data on success, or NULL otherwise.
+ */
+AVPacketSideData* av_packet_side_data_new(AVPacketSideData** psd, int* pnb_sd,
+ enum AVPacketSideDataType type,
+ size_t size, int flags);
+
+/**
+ * Wrap existing data as packet side data.
+ *
+ * @param sd pointer to an array of side data to which the side data should
+ * be added. *sd may be NULL, in which case the array will be
+ * initialized
+ * @param nb_sd pointer to an integer containing the number of entries in
+ * the array. The integer value will be increased by 1 on success.
+ * @param type side data type
+ * @param data a data array. It must be allocated with the av_malloc() family
+ * of functions. The ownership of the data is transferred to the
+ * side data array on success
+ * @param size size of the data array
+ * @param flags currently unused. Must be zero
+ *
+ * @return pointer to freshly allocated side data on success, or NULL otherwise
+ * On failure, the side data array is unchanged and the data remains
+ * owned by the caller.
+ */
+AVPacketSideData* av_packet_side_data_add(AVPacketSideData** sd, int* nb_sd,
+ enum AVPacketSideDataType type,
+ void* data, size_t size, int flags);
+
+/**
+ * Get side information from a side data array.
+ *
+ * @param sd the array from which the side data should be fetched
+ * @param nb_sd value containing the number of entries in the array.
+ * @param type desired side information type
+ *
+ * @return pointer to side data if present or NULL otherwise
+ */
+const AVPacketSideData* av_packet_side_data_get(const AVPacketSideData* sd,
+ int nb_sd,
+ enum AVPacketSideDataType type);
+
+/**
+ * Remove side data of the given type from a side data array.
+ *
+ * @param sd the array from which the side data should be removed
+ * @param nb_sd pointer to an integer containing the number of entries in
+ * the array. Will be reduced by the amount of entries removed
+ * upon return
+ * @param type side information type
+ */
+void av_packet_side_data_remove(AVPacketSideData* sd, int* nb_sd,
+ enum AVPacketSideDataType type);
+
+/**
+ * Convenience function to free all the side data stored in an array, and
+ * the array itself.
+ *
+ * @param sd pointer to array of side data to free. Will be set to NULL
+ * upon return.
+ * @param nb_sd pointer to an integer containing the number of entries in
+ * the array. Will be set to 0 upon return.
+ */
+void av_packet_side_data_free(AVPacketSideData** sd, int* nb_sd);
+
+const char* av_packet_side_data_name(enum AVPacketSideDataType type);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames. Encoders are allowed to output empty
+ * packets, with no compressed data, containing only side data
+ * (e.g. to update some stream parameters at the end of encoding).
+ *
+ * The semantics of data ownership depends on the buf field.
+ * If it is set, the packet data is dynamically allocated and is
+ * valid indefinitely until a call to av_packet_unref() reduces the
+ * reference count to 0.
+ *
+ * If the buf field is not set av_packet_ref() would make a copy instead
+ * of increasing the reference count.
+ *
+ * The side data is always allocated with av_malloc(), copied by
+ * av_packet_ref() and freed by av_packet_unref().
+ *
+ * sizeof(AVPacket) being a part of the public ABI is deprecated. once
+ * av_init_packet() is removed, new packets will only be able to be allocated
+ * with av_packet_alloc(), and new fields may be added to the end of the struct
+ * with a minor bump.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ * @see av_packet_unref
+ */
+typedef struct AVPacket {
+ /**
+ * A reference to the reference-counted buffer where the packet data is
+ * stored.
+ * May be NULL, then the packet data is not reference-counted.
+ */
+ AVBufferRef* buf;
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t* data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ AVPacketSideData* side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int64_t duration;
+
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * for some private data of the user
+ */
+ void* opaque;
+
+ /**
+ * AVBufferRef for free use by the API user. FFmpeg will never check the
+ * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
+ * the packet is unreferenced. av_packet_copy_props() calls create a new
+ * reference with av_buffer_ref() for the target packet's opaque_ref field.
+ *
+ * This is unrelated to the opaque field, although it serves a similar
+ * purpose.
+ */
+ AVBufferRef* opaque_ref;
+
+ /**
+ * Time base of the packet's timestamps.
+ * In the future, this field may be set on packets output by encoders or
+ * demuxers, but its value will be by default ignored on input to decoders
+ * or muxers.
+ */
+ AVRational time_base;
+} AVPacket;
+
+#if FF_API_INIT_PACKET
+attribute_deprecated typedef struct AVPacketList {
+ AVPacket pkt;
+ struct AVPacketList* next;
+} AVPacketList;
+#endif
+
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+/**
+ * Flag is used to discard packets which are required to maintain valid
+ * decoder state but are not required for output and should be dropped
+ * after decoding.
+ **/
+#define AV_PKT_FLAG_DISCARD 0x0004
+/**
+ * The packet comes from a trusted source.
+ *
+ * Otherwise-unsafe constructs such as arbitrary pointers to data
+ * outside the packet may be followed.
+ */
+#define AV_PKT_FLAG_TRUSTED 0x0008
+/**
+ * Flag is used to indicate packets that contain frames that can
+ * be discarded by the decoder. I.e. Non-reference frames.
+ */
+#define AV_PKT_FLAG_DISPOSABLE 0x0010
+
+enum AVSideDataParamChangeFlags {
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+
+/**
+ * Allocate an AVPacket and set its fields to default values. The resulting
+ * struct must be freed using av_packet_free().
+ *
+ * @return An AVPacket filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVPacket itself, not the data buffers. Those
+ * must be allocated through other means such as av_new_packet.
+ *
+ * @see av_new_packet
+ */
+AVPacket* av_packet_alloc(void);
+
+/**
+ * Create a new packet that references the same data as src.
+ *
+ * This is a shortcut for av_packet_alloc()+av_packet_ref().
+ *
+ * @return newly created AVPacket on success, NULL on error.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ */
+AVPacket* av_packet_clone(const AVPacket* src);
+
+/**
+ * Free the packet, if the packet is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param pkt packet to be freed. The pointer will be set to NULL.
+ * @note passing NULL is a no-op.
+ */
+void av_packet_free(AVPacket** pkt);
+
+#if FF_API_INIT_PACKET
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ *
+ * @see av_packet_alloc
+ * @see av_packet_unref
+ *
+ * @deprecated This function is deprecated. Once it's removed,
+ sizeof(AVPacket) will not be a part of the ABI anymore.
+ */
+attribute_deprecated void av_init_packet(AVPacket* pkt);
+#endif
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket* pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket* pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket* pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ * and buf fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ * function returns successfully, the data is owned by the underlying
+ * AVBuffer. The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket* pkt, uint8_t* data, int size);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket* pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Wrap an existing array as a packet side data.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param data the side data array. It must be allocated with the av_malloc()
+ * family of functions. The ownership of the data is transferred to
+ * pkt.
+ * @param size side information size
+ * @return a non-negative number on success, a negative AVERROR code on
+ * failure. On failure, the packet is unchanged and the data remains
+ * owned by the caller.
+ */
+int av_packet_add_side_data(AVPacket* pkt, enum AVPacketSideDataType type,
+ uint8_t* data, size_t size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket* pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size If supplied, *size will be set to the size of the side data
+ * or to zero if the desired side data is not present.
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(const AVPacket* pkt,
+ enum AVPacketSideDataType type, size_t* size);
+
+/**
+ * Pack a dictionary for use in side_data.
+ *
+ * @param dict The dictionary to pack.
+ * @param size pointer to store the size of the returned data
+ * @return pointer to data if successful, NULL otherwise
+ */
+uint8_t* av_packet_pack_dictionary(AVDictionary* dict, size_t* size);
+/**
+ * Unpack a dictionary from side_data.
+ *
+ * @param data data from side_data
+ * @param size size of the data
+ * @param dict the metadata storage dictionary
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_unpack_dictionary(const uint8_t* data, size_t size,
+ AVDictionary** dict);
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket* pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet. Will be completely overwritten.
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error. On error, dst
+ * will be blank (as if returned by av_packet_alloc()).
+ */
+int av_packet_ref(AVPacket* dst, const AVPacket* src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket* pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket* dst, AVPacket* src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ */
+int av_packet_copy_props(AVPacket* dst, const AVPacket* src);
+
+/**
+ * Ensure the data described by a given packet is reference counted.
+ *
+ * @note This function does not ensure that the reference will be writable.
+ * Use av_packet_make_writable instead for that purpose.
+ *
+ * @see av_packet_ref
+ * @see av_packet_make_writable
+ *
+ * @param pkt packet whose data should be made reference counted.
+ *
+ * @return 0 on success, a negative AVERROR on error. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_refcounted(AVPacket* pkt);
+
+/**
+ * Create a writable reference for the data described by a given packet,
+ * avoiding data copy if possible.
+ *
+ * @param pkt Packet whose data should be made writable.
+ *
+ * @return 0 on success, a negative AVERROR on failure. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_writable(AVPacket* pkt);
+
+/**
+ * Convert valid timing fields (timestamps / durations) in a packet from one
+ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
+ * ignored.
+ *
+ * @param pkt packet on which the conversion will be performed
+ * @param tb_src source timebase, in which the timing fields in pkt are
+ * expressed
+ * @param tb_dst destination timebase, to which the timing fields will be
+ * converted
+ */
+void av_packet_rescale_ts(AVPacket* pkt, AVRational tb_src, AVRational tb_dst);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_PACKET_H
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/vdpau.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/vdpau.h
new file mode 100644
index 0000000000..4f0d956ce7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/vdpau.h
@@ -0,0 +1,168 @@
+/*
+ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
+ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
+ *
+ * Copyright (C) 2008 NVIDIA
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDPAU_H
+#define AVCODEC_VDPAU_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vdpau
+ * Public libavcodec VDPAU header.
+ */
+
+/**
+ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
+ * @ingroup lavc_codec_hwaccel
+ *
+ * VDPAU hardware acceleration has two modules
+ * - VDPAU decoding
+ * - VDPAU presentation
+ *
+ * The VDPAU decoding module parses all headers using FFmpeg
+ * parsing mechanisms and uses VDPAU for the actual decoding.
+ *
+ * As per the current implementation, the actual decoding
+ * and rendering (API calls) are done as part of the VDPAU
+ * presentation (vo_vdpau.c) module.
+ *
+ * @{
+ */
+
+#include <vdpau/vdpau.h>
+
+#include "libavutil/avconfig.h"
+#include "libavutil/attributes.h"
+
+#include "avcodec.h"
+
+struct AVCodecContext;
+struct AVFrame;
+
+typedef int (*AVVDPAU_Render2)(struct AVCodecContext*, struct AVFrame*,
+ const VdpPictureInfo*, uint32_t,
+ const VdpBitstreamBuffer*);
+
+/**
+ * This structure is used to share data between the libavcodec library and
+ * the client video application.
+ * This structure will be allocated and stored in AVCodecContext.hwaccel_context
+ * by av_vdpau_bind_context(). Members can be set by the user once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ *
+ * The size of this structure is not a part of the public ABI and must not
+ * be used outside of libavcodec.
+ */
+typedef struct AVVDPAUContext {
+ /**
+ * VDPAU decoder handle
+ *
+ * Set by user.
+ */
+ VdpDecoder decoder;
+
+ /**
+ * VDPAU decoder render callback
+ *
+ * Set by the user.
+ */
+ VdpDecoderRender* render;
+
+ AVVDPAU_Render2 render2;
+} AVVDPAUContext;
+
+#if FF_API_VDPAU_ALLOC_GET_SET
+/**
+ * @brief allocation function for AVVDPAUContext
+ *
+ * Allows extending the struct without breaking API/ABI
+ * @deprecated use av_vdpau_bind_context() instead
+ */
+attribute_deprecated AVVDPAUContext* av_alloc_vdpaucontext(void);
+
+/**
+ * @deprecated render2 is public and can be accessed directly
+ */
+attribute_deprecated AVVDPAU_Render2
+av_vdpau_hwaccel_get_render2(const AVVDPAUContext*);
+/**
+ * @deprecated render2 is public and can be accessed directly
+ */
+attribute_deprecated void av_vdpau_hwaccel_set_render2(AVVDPAUContext*,
+ AVVDPAU_Render2);
+#endif
+
+/**
+ * Associate a VDPAU device with a codec context for hardware acceleration.
+ * This function is meant to be called from the get_format() codec callback,
+ * or earlier. It can also be called after avcodec_flush_buffers() to change
+ * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent
+ * display preemption).
+ *
+ * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes
+ * successfully.
+ *
+ * @param avctx decoding context whose get_format() callback is invoked
+ * @param device VDPAU device handle to use for hardware acceleration
+ * @param get_proc_address VDPAU device driver
+ * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags
+ *
+ * @return 0 on success, an AVERROR code on failure.
+ */
+int av_vdpau_bind_context(AVCodecContext* avctx, VdpDevice device,
+ VdpGetProcAddress* get_proc_address, unsigned flags);
+
+/**
+ * Gets the parameters to create an adequate VDPAU video surface for the codec
+ * context using VDPAU hardware decoding acceleration.
+ *
+ * @note Behavior is undefined if the context was not successfully bound to a
+ * VDPAU device using av_vdpau_bind_context().
+ *
+ * @param avctx the codec context being used for decoding the stream
+ * @param type storage space for the VDPAU video surface chroma type
+ * (or NULL to ignore)
+ * @param width storage space for the VDPAU video surface pixel width
+ * (or NULL to ignore)
+ * @param height storage space for the VDPAU video surface pixel height
+ * (or NULL to ignore)
+ *
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_vdpau_get_surface_parameters(AVCodecContext* avctx, VdpChromaType* type,
+ uint32_t* width, uint32_t* height);
+
+#if FF_API_VDPAU_ALLOC_GET_SET
+/**
+ * Allocate an AVVDPAUContext.
+ *
+ * @return Newly-allocated AVVDPAUContext or NULL on failure.
+ * @deprecated use av_vdpau_bind_context() instead
+ */
+attribute_deprecated AVVDPAUContext* av_vdpau_alloc_context(void);
+#endif
+
+/** @} */
+
+#endif /* AVCODEC_VDPAU_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version.h
new file mode 100644
index 0000000000..8d91b7db19
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version.h
@@ -0,0 +1,45 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#include "libavutil/version.h"
+
+#include "version_major.h"
+
+#define LIBAVCODEC_VERSION_MINOR 5
+#define LIBAVCODEC_VERSION_MICRO 101
+
+#define LIBAVCODEC_VERSION_INT \
+ AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION \
+ AV_VERSION(LIBAVCODEC_VERSION_MAJOR, LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+#endif /* AVCODEC_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version_major.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version_major.h
new file mode 100644
index 0000000000..f987a3dd04
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavcodec/version_major.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_MAJOR_H
+#define AVCODEC_VERSION_MAJOR_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#define LIBAVCODEC_VERSION_MAJOR 61
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @note, when bumping the major version it is recommended to manually
+ * disable each FF_API_* in its own commit instead of disabling them all
+ * at once through the bump. This improves the git bisect-ability of the change.
+ */
+
+#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_SUBFRAMES (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_TICKS_PER_FRAME (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_DROPCHANGED (LIBAVCODEC_VERSION_MAJOR < 62)
+
+#define FF_API_AVFFT (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_FF_PROFILE_LEVEL (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_AVCODEC_CLOSE (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_BUFFER_MIN_SIZE (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_VDPAU_ALLOC_GET_SET (LIBAVCODEC_VERSION_MAJOR < 62)
+#define FF_API_QUALITY_FACTOR (LIBAVCODEC_VERSION_MAJOR < 62)
+
+#endif /* AVCODEC_VERSION_MAJOR_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/attributes.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/attributes.h
new file mode 100644
index 0000000000..774d1fe916
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/attributes.h
@@ -0,0 +1,173 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x, y) \
+ (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
+# define AV_GCC_VERSION_AT_MOST(x, y) \
+ (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y))
+#else
+# define AV_GCC_VERSION_AT_LEAST(x, y) 0
+# define AV_GCC_VERSION_AT_MOST(x, y) 0
+#endif
+
+#ifdef __has_builtin
+# define AV_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define AV_HAS_BUILTIN(x) 0
+#endif
+
+#ifndef av_always_inline
+# if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define av_always_inline __attribute__((always_inline)) inline
+# elif defined(_MSC_VER)
+# define av_always_inline __forceinline
+# else
+# define av_always_inline inline
+# endif
+#endif
+
+#ifndef av_extern_inline
+# if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
+# define av_extern_inline extern inline
+# else
+# define av_extern_inline inline
+# endif
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 4)
+# define av_warn_unused_result __attribute__((warn_unused_result))
+#else
+# define av_warn_unused_result
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define av_noinline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define av_noinline __declspec(noinline)
+#else
+# define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 1) || defined(__clang__)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2, 6) || defined(__clang__)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4, 3) || defined(__clang__)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4, 1) && !defined(__llvm__)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define attribute_deprecated __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define attribute_deprecated __declspec(deprecated)
+#else
+# define attribute_deprecated
+#endif
+
+/**
+ * Disable warnings about deprecated features
+ * This is useful for sections of code kept for backward compatibility and
+ * scheduled for removal.
+ */
+#ifndef AV_NOWARN_DEPRECATED
+# if AV_GCC_VERSION_AT_LEAST(4, 6) || defined(__clang__)
+# define AV_NOWARN_DEPRECATED(code) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
+ code _Pragma("GCC diagnostic pop")
+# elif defined(_MSC_VER)
+# define AV_NOWARN_DEPRECATED(code) \
+ __pragma(warning(push)) __pragma(warning(disable : 4996)) code; \
+ __pragma(warning(pop))
+# else
+# define AV_NOWARN_DEPRECATED(code) code
+# endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3, 1) || defined(__clang__)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3, 3) || defined(__clang__)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__INTEL_COMPILER)
+# define av_uninit(x) x = x
+#else
+# define av_uninit(x) x
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) \
+ __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2, 5) || defined(__clang__)
+# define av_noreturn __attribute__((noreturn))
+#else
+# define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avconfig.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avconfig.h
new file mode 100644
index 0000000000..c289fbb551
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avconfig.h
@@ -0,0 +1,6 @@
+/* Generated by ffmpeg configure */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 1
+#endif /* AVUTIL_AVCONFIG_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avutil.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avutil.h
new file mode 100644
index 0000000000..480a64e852
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/avutil.h
@@ -0,0 +1,363 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * @ingroup lavu
+ * Convenience header that includes @ref lavu "libavutil"'s core.
+ */
+
+/**
+ * @mainpage
+ *
+ * @section ffmpeg_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by FFmpeg.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @ref lavfi "libavfilter" graph-based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lswr "libswresample" audio resampling, format conversion and mixing
+ * @li @ref lpp "libpostproc" post processing library
+ * @li @ref libsws "libswscale" color conversion and scaling library
+ *
+ * @section ffmpeg_versioning Versioning and compatibility
+ *
+ * Each of the FFmpeg libraries contains a version.h header, which defines a
+ * major, minor and micro version number with the
+ * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version
+ * number is incremented with backward incompatible changes - e.g. removing
+ * parts of the public API, reordering public struct members, etc. The minor
+ * version number is incremented for backward compatible API changes or major
+ * new features - e.g. adding a new public function or a new decoder. The micro
+ * version number is incremented for smaller changes that a calling program
+ * might still want to check for - e.g. changing behavior in a previously
+ * unspecified situation.
+ *
+ * FFmpeg guarantees backward API and ABI compatibility for each library as long
+ * as its major version number is unchanged. This means that no public symbols
+ * will be removed or renamed. Types and names of the public struct members and
+ * values of public macros and enums will remain the same (unless they were
+ * explicitly declared as not part of the public API). Documented behavior will
+ * not change.
+ *
+ * In other words, any correct program that works with a given FFmpeg snapshot
+ * should work just as well without any changes with any later snapshot with the
+ * same major versions. This applies to both rebuilding the program against new
+ * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program
+ * links against.
+ *
+ * However, new public symbols may be added and new members may be appended to
+ * public structs whose size is not part of public ABI (most public structs in
+ * FFmpeg). New macros and enum values may be added. Behavior in undocumented
+ * situations may change slightly (and be documented). All those are accompanied
+ * by an entry in doc/APIchanges and incrementing either the minor or micro
+ * version number.
+ */
+
+/**
+ * @defgroup lavu libavutil
+ * Common code shared across all FFmpeg libraries.
+ *
+ * @note
+ * libavutil is designed to be modular. In most cases, in order to use the
+ * functions provided by one component of libavutil you must explicitly include
+ * the specific header containing that feature. If you are only using
+ * media-related components, you could simply include libavutil/avutil.h, which
+ * brings in most of the "core" components.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Mathematics
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_video Video related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_log Logging Facility
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup version_utils Library Version Macros
+ *
+ * @{
+ *
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return an informative version string. This usually is the actual release
+ * version number or a git commit description. This string has no fixed format
+ * and can change any time. It should never be parsed by code.
+ */
+const char* av_version_info(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char* avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char* avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * Return a string describing the media_type enum, NULL if media_type
+ * is unknown.
+ */
+const char* av_get_media_type_string(enum AVMediaType media_type);
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1 << FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256 * 128 - 1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE // FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * FFmpeg internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000))
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE 1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#ifdef __cplusplus
+/* ISO C++ forbids compound-literals. */
+# define AV_TIME_BASE_Q av_make_q(1, AV_TIME_BASE)
+#else
+# define AV_TIME_BASE_Q \
+ (AVRational) { 1, AV_TIME_BASE }
+#endif
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_NONE = 0, ///< Undefined
+ AV_PICTURE_TYPE_I, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "common.h"
+#include "rational.h"
+#include "version.h"
+#include "macros.h"
+#include "mathematics.h"
+#include "log.h"
+#include "pixfmt.h"
+
+/**
+ * Return x default pointer in case p is NULL.
+ */
+static inline void* av_x_if_null(const void* p, const void* x) {
+ return (void*)(intptr_t)(p ? p : x);
+}
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param elsize size in bytes of each list element (only 1, 2, 4 or 8)
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+unsigned av_int_list_length_for_size(unsigned elsize, const void* list,
+ uint64_t term) av_pure;
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+#define av_int_list_length(list, term) \
+ av_int_list_length_for_size(sizeof(*(list)), list, term)
+
+/**
+ * Return the fractional representation of the internal time base.
+ */
+AVRational av_get_time_base_q(void);
+
+#define AV_FOURCC_MAX_STRING_SIZE 32
+
+#define av_fourcc2str(fourcc) \
+ av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc)
+
+/**
+ * Fill the provided buffer with a string containing a FourCC (four-character
+ * code) representation.
+ *
+ * @param buf a buffer with size in bytes of at least
+ * AV_FOURCC_MAX_STRING_SIZE
+ * @param fourcc the fourcc to represent
+ * @return the buffer in input
+ */
+char* av_fourcc_make_string(char* buf, uint32_t fourcc);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/buffer.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/buffer.h
new file mode 100644
index 0000000000..372de093f9
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/buffer.h
@@ -0,0 +1,324 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_buffer
+ * refcounted data buffer API
+ */
+
+#ifndef AVUTIL_BUFFER_H
+#define AVUTIL_BUFFER_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_buffer AVBuffer
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBuffer is an API for reference-counted data buffers.
+ *
+ * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer
+ * represents the data buffer itself; it is opaque and not meant to be accessed
+ * by the caller directly, but only through AVBufferRef. However, the caller may
+ * e.g. compare two AVBuffer pointers to check whether two different references
+ * are describing the same data buffer. AVBufferRef represents a single
+ * reference to an AVBuffer and it is the object that may be manipulated by the
+ * caller directly.
+ *
+ * There are two functions provided for creating a new AVBuffer with a single
+ * reference -- av_buffer_alloc() to just allocate a new buffer, and
+ * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing
+ * reference, additional references may be created with av_buffer_ref().
+ * Use av_buffer_unref() to free a reference (this will automatically free the
+ * data once all the references are freed).
+ *
+ * The convention throughout this API and the rest of FFmpeg is such that the
+ * buffer is considered writable if there exists only one reference to it (and
+ * it has not been marked as read-only). The av_buffer_is_writable() function is
+ * provided to check whether this is true and av_buffer_make_writable() will
+ * automatically create a new writable buffer when necessary.
+ * Of course nothing prevents the calling code from violating this convention,
+ * however that is safe only when all the existing references are under its
+ * control.
+ *
+ * @note Referencing and unreferencing the buffers is thread-safe and thus
+ * may be done from multiple threads simultaneously without any need for
+ * additional locking.
+ *
+ * @note Two different references to the same buffer can point to different
+ * parts of the buffer (i.e. their AVBufferRef.data will not be equal).
+ */
+
+/**
+ * A reference counted buffer type. It is opaque and is meant to be used through
+ * references (AVBufferRef).
+ */
+typedef struct AVBuffer AVBuffer;
+
+/**
+ * A reference to a data buffer.
+ *
+ * The size of this struct is not a part of the public ABI and it is not meant
+ * to be allocated directly.
+ */
+typedef struct AVBufferRef {
+ AVBuffer* buffer;
+
+ /**
+ * The data buffer. It is considered writable if and only if
+ * this is the only reference to the buffer, in which case
+ * av_buffer_is_writable() returns 1.
+ */
+ uint8_t* data;
+ /**
+ * Size of data in bytes.
+ */
+ size_t size;
+} AVBufferRef;
+
+/**
+ * Allocate an AVBuffer of the given size using av_malloc().
+ *
+ * @return an AVBufferRef of given size or NULL when out of memory
+ */
+AVBufferRef* av_buffer_alloc(size_t size);
+
+/**
+ * Same as av_buffer_alloc(), except the returned buffer will be initialized
+ * to zero.
+ */
+AVBufferRef* av_buffer_allocz(size_t size);
+
+/**
+ * Always treat the buffer as read-only, even when it has only one
+ * reference.
+ */
+#define AV_BUFFER_FLAG_READONLY (1 << 0)
+
+/**
+ * Create an AVBuffer from an existing array.
+ *
+ * If this function is successful, data is owned by the AVBuffer. The caller may
+ * only access data through the returned AVBufferRef and references derived from
+ * it.
+ * If this function fails, data is left untouched.
+ * @param data data array
+ * @param size size of data in bytes
+ * @param free a callback for freeing this buffer's data
+ * @param opaque parameter to be got for processing or passed to free
+ * @param flags a combination of AV_BUFFER_FLAG_*
+ *
+ * @return an AVBufferRef referring to data on success, NULL on failure.
+ */
+AVBufferRef* av_buffer_create(uint8_t* data, size_t size,
+ void (*free)(void* opaque, uint8_t* data),
+ void* opaque, int flags);
+
+/**
+ * Default free callback, which calls av_free() on the buffer data.
+ * This function is meant to be passed to av_buffer_create(), not called
+ * directly.
+ */
+void av_buffer_default_free(void* opaque, uint8_t* data);
+
+/**
+ * Create a new reference to an AVBuffer.
+ *
+ * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
+ * failure.
+ */
+AVBufferRef* av_buffer_ref(const AVBufferRef* buf);
+
+/**
+ * Free a given reference and automatically free the buffer if there are no more
+ * references to it.
+ *
+ * @param buf the reference to be freed. The pointer is set to NULL on return.
+ */
+void av_buffer_unref(AVBufferRef** buf);
+
+/**
+ * @return 1 if the caller may write to the data referred to by buf (which is
+ * true if and only if buf is the only reference to the underlying AVBuffer).
+ * Return 0 otherwise.
+ * A positive answer is valid until av_buffer_ref() is called on buf.
+ */
+int av_buffer_is_writable(const AVBufferRef* buf);
+
+/**
+ * @return the opaque parameter set by av_buffer_create.
+ */
+void* av_buffer_get_opaque(const AVBufferRef* buf);
+
+int av_buffer_get_ref_count(const AVBufferRef* buf);
+
+/**
+ * Create a writable reference from a given buffer reference, avoiding data copy
+ * if possible.
+ *
+ * @param buf buffer reference to make writable. On success, buf is either left
+ * untouched, or it is unreferenced and a new writable AVBufferRef is
+ * written in its place. On failure, buf is left untouched.
+ * @return 0 on success, a negative AVERROR on failure.
+ */
+int av_buffer_make_writable(AVBufferRef** buf);
+
+/**
+ * Reallocate a given buffer.
+ *
+ * @param buf a buffer reference to reallocate. On success, buf will be
+ * unreferenced and a new reference with the required size will be
+ * written in its place. On failure buf will be left untouched. *buf
+ * may be NULL, then a new buffer is allocated.
+ * @param size required new buffer size.
+ * @return 0 on success, a negative AVERROR on failure.
+ *
+ * @note the buffer is actually reallocated with av_realloc() only if it was
+ * initially allocated through av_buffer_realloc(NULL) and there is only one
+ * reference to it (i.e. the one passed to this function). In all other cases
+ * a new buffer is allocated and the data is copied.
+ */
+int av_buffer_realloc(AVBufferRef** buf, size_t size);
+
+/**
+ * Ensure dst refers to the same data as src.
+ *
+ * When *dst is already equivalent to src, do nothing. Otherwise unreference dst
+ * and replace it with a new reference to src.
+ *
+ * @param dst Pointer to either a valid buffer reference or NULL. On success,
+ * this will point to a buffer reference equivalent to src. On
+ * failure, dst will be left untouched.
+ * @param src A buffer reference to replace dst with. May be NULL, then this
+ * function is equivalent to av_buffer_unref(dst).
+ * @return 0 on success
+ * AVERROR(ENOMEM) on memory allocation failure.
+ */
+int av_buffer_replace(AVBufferRef** dst, const AVBufferRef* src);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_bufferpool AVBufferPool
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.
+ *
+ * Frequently allocating and freeing large buffers may be slow. AVBufferPool is
+ * meant to solve this in cases when the caller needs a set of buffers of the
+ * same size (the most obvious use case being buffers for raw video or audio
+ * frames).
+ *
+ * At the beginning, the user must call av_buffer_pool_init() to create the
+ * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to
+ * get a reference to a new buffer, similar to av_buffer_alloc(). This new
+ * reference works in all aspects the same way as the one created by
+ * av_buffer_alloc(). However, when the last reference to this buffer is
+ * unreferenced, it is returned to the pool instead of being freed and will be
+ * reused for subsequent av_buffer_pool_get() calls.
+ *
+ * When the caller is done with the pool and no longer needs to allocate any new
+ * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.
+ * Once all the buffers are released, it will automatically be freed.
+ *
+ * Allocating and releasing buffers with this API is thread-safe as long as
+ * either the default alloc callback is used, or the user-supplied one is
+ * thread-safe.
+ */
+
+/**
+ * The buffer pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with av_buffer_pool_init() and freed with
+ * av_buffer_pool_uninit().
+ */
+typedef struct AVBufferPool AVBufferPool;
+
+/**
+ * Allocate and initialize a buffer pool.
+ *
+ * @param size size of each buffer in this pool
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool* av_buffer_pool_init(size_t size,
+ AVBufferRef* (*alloc)(size_t size));
+
+/**
+ * Allocate and initialize a buffer pool with a more complex allocator.
+ *
+ * @param size size of each buffer in this pool
+ * @param opaque arbitrary user data used by the allocator
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be
+ * used (av_buffer_alloc()).
+ * @param pool_free a function that will be called immediately before the pool
+ * is freed. I.e. after av_buffer_pool_uninit() is called
+ * by the caller and all the frames are returned to the pool
+ * and freed. It is intended to uninitialize the user opaque
+ * data. May be NULL.
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool* av_buffer_pool_init2(size_t size, void* opaque,
+ AVBufferRef* (*alloc)(void* opaque,
+ size_t size),
+ void (*pool_free)(void* opaque));
+
+/**
+ * Mark the pool as being available for freeing. It will actually be freed only
+ * once all the allocated buffers associated with the pool are released. Thus it
+ * is safe to call this function while some of the allocated buffers are still
+ * in use.
+ *
+ * @param pool pointer to the pool to be freed. It will be set to NULL.
+ */
+void av_buffer_pool_uninit(AVBufferPool** pool);
+
+/**
+ * Allocate a new AVBuffer, reusing an old buffer from the pool when available.
+ * This function may be called simultaneously from multiple threads.
+ *
+ * @return a reference to the new buffer on success, NULL on error.
+ */
+AVBufferRef* av_buffer_pool_get(AVBufferPool* pool);
+
+/**
+ * Query the original opaque parameter of an allocated buffer in the pool.
+ *
+ * @param ref a buffer reference to a buffer returned by av_buffer_pool_get.
+ * @return the opaque parameter set by the buffer allocator function of the
+ * buffer pool.
+ *
+ * @note the opaque parameter of ref is used by the buffer pool implementation,
+ * therefore you have to use this function to access the original opaque
+ * parameter of an allocated buffer.
+ */
+void* av_buffer_pool_buffer_get_opaque(const AVBufferRef* ref);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BUFFER_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/channel_layout.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/channel_layout.h
new file mode 100644
index 0000000000..df0abca669
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/channel_layout.h
@@ -0,0 +1,804 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "version.h"
+#include "attributes.h"
+
+/**
+ * @file
+ * @ingroup lavu_audio_channels
+ * Public libavutil channel layout APIs header.
+ */
+
+/**
+ * @defgroup lavu_audio_channels Audio channels
+ * @ingroup lavu_audio
+ *
+ * Audio channel layout utility functions
+ *
+ * @{
+ */
+
+enum AVChannel {
+ ///< Invalid channel index
+ AV_CHAN_NONE = -1,
+ AV_CHAN_FRONT_LEFT,
+ AV_CHAN_FRONT_RIGHT,
+ AV_CHAN_FRONT_CENTER,
+ AV_CHAN_LOW_FREQUENCY,
+ AV_CHAN_BACK_LEFT,
+ AV_CHAN_BACK_RIGHT,
+ AV_CHAN_FRONT_LEFT_OF_CENTER,
+ AV_CHAN_FRONT_RIGHT_OF_CENTER,
+ AV_CHAN_BACK_CENTER,
+ AV_CHAN_SIDE_LEFT,
+ AV_CHAN_SIDE_RIGHT,
+ AV_CHAN_TOP_CENTER,
+ AV_CHAN_TOP_FRONT_LEFT,
+ AV_CHAN_TOP_FRONT_CENTER,
+ AV_CHAN_TOP_FRONT_RIGHT,
+ AV_CHAN_TOP_BACK_LEFT,
+ AV_CHAN_TOP_BACK_CENTER,
+ AV_CHAN_TOP_BACK_RIGHT,
+ /** Stereo downmix. */
+ AV_CHAN_STEREO_LEFT = 29,
+ /** See above. */
+ AV_CHAN_STEREO_RIGHT,
+ AV_CHAN_WIDE_LEFT,
+ AV_CHAN_WIDE_RIGHT,
+ AV_CHAN_SURROUND_DIRECT_LEFT,
+ AV_CHAN_SURROUND_DIRECT_RIGHT,
+ AV_CHAN_LOW_FREQUENCY_2,
+ AV_CHAN_TOP_SIDE_LEFT,
+ AV_CHAN_TOP_SIDE_RIGHT,
+ AV_CHAN_BOTTOM_FRONT_CENTER,
+ AV_CHAN_BOTTOM_FRONT_LEFT,
+ AV_CHAN_BOTTOM_FRONT_RIGHT,
+
+ /** Channel is empty can be safely skipped. */
+ AV_CHAN_UNUSED = 0x200,
+
+ /** Channel contains data, but its position is unknown. */
+ AV_CHAN_UNKNOWN = 0x300,
+
+ /**
+ * Range of channels between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END represent Ambisonic components using the ACN system.
+ *
+ * Given a channel id `<i>` between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the ACN index of the channel `<n>` is
+ * `<n> = <i> - AV_CHAN_AMBISONIC_BASE`.
+ *
+ * @note these values are only used for AV_CHANNEL_ORDER_CUSTOM channel
+ * orderings, the AV_CHANNEL_ORDER_AMBISONIC ordering orders the channels
+ * implicitly by their position in the stream.
+ */
+ AV_CHAN_AMBISONIC_BASE = 0x400,
+ // leave space for 1024 ids, which correspond to maximum order-32 harmonics,
+ // which should be enough for the foreseeable use cases
+ AV_CHAN_AMBISONIC_END = 0x7ff,
+};
+
+enum AVChannelOrder {
+ /**
+ * Only the channel count is specified, without any further information
+ * about the channel order.
+ */
+ AV_CHANNEL_ORDER_UNSPEC,
+ /**
+ * The native channel order, i.e. the channels are in the same order in
+ * which they are defined in the AVChannel enum. This supports up to 63
+ * different channels.
+ */
+ AV_CHANNEL_ORDER_NATIVE,
+ /**
+ * The channel order does not correspond to any other predefined order and
+ * is stored as an explicit map. For example, this could be used to support
+ * layouts with 64 or more channels, or with empty/skipped (AV_CHAN_UNUSED)
+ * channels at arbitrary positions.
+ */
+ AV_CHANNEL_ORDER_CUSTOM,
+ /**
+ * The audio is represented as the decomposition of the sound field into
+ * spherical harmonics. Each channel corresponds to a single expansion
+ * component. Channels are ordered according to ACN (Ambisonic Channel
+ * Number).
+ *
+ * The channel with the index n in the stream contains the spherical
+ * harmonic of degree l and order m given by
+ * @code{.unparsed}
+ * l = floor(sqrt(n)),
+ * m = n - l * (l + 1).
+ * @endcode
+ *
+ * Conversely given a spherical harmonic of degree l and order m, the
+ * corresponding channel index n is given by
+ * @code{.unparsed}
+ * n = l * (l + 1) + m.
+ * @endcode
+ *
+ * Normalization is assumed to be SN3D (Schmidt Semi-Normalization)
+ * as defined in AmbiX format $ 2.1.
+ */
+ AV_CHANNEL_ORDER_AMBISONIC,
+ /**
+ * Number of channel orders, not part of ABI/API
+ */
+ FF_CHANNEL_ORDER_NB
+};
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ *
+ * A channel layout is a 64-bits integer with a bit set for every channel.
+ * The number of bits set must be equal to the number of channels.
+ * The value 0 means that the channel layout is not known.
+ * @note this data structure is not powerful enough to handle channels
+ * combinations that have the same channel multiple times, such as
+ * dual-mono.
+ *
+ * @{
+ */
+#define AV_CH_FRONT_LEFT (1ULL << AV_CHAN_FRONT_LEFT)
+#define AV_CH_FRONT_RIGHT (1ULL << AV_CHAN_FRONT_RIGHT)
+#define AV_CH_FRONT_CENTER (1ULL << AV_CHAN_FRONT_CENTER)
+#define AV_CH_LOW_FREQUENCY (1ULL << AV_CHAN_LOW_FREQUENCY)
+#define AV_CH_BACK_LEFT (1ULL << AV_CHAN_BACK_LEFT)
+#define AV_CH_BACK_RIGHT (1ULL << AV_CHAN_BACK_RIGHT)
+#define AV_CH_FRONT_LEFT_OF_CENTER (1ULL << AV_CHAN_FRONT_LEFT_OF_CENTER)
+#define AV_CH_FRONT_RIGHT_OF_CENTER (1ULL << AV_CHAN_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_BACK_CENTER (1ULL << AV_CHAN_BACK_CENTER)
+#define AV_CH_SIDE_LEFT (1ULL << AV_CHAN_SIDE_LEFT)
+#define AV_CH_SIDE_RIGHT (1ULL << AV_CHAN_SIDE_RIGHT)
+#define AV_CH_TOP_CENTER (1ULL << AV_CHAN_TOP_CENTER)
+#define AV_CH_TOP_FRONT_LEFT (1ULL << AV_CHAN_TOP_FRONT_LEFT)
+#define AV_CH_TOP_FRONT_CENTER (1ULL << AV_CHAN_TOP_FRONT_CENTER)
+#define AV_CH_TOP_FRONT_RIGHT (1ULL << AV_CHAN_TOP_FRONT_RIGHT)
+#define AV_CH_TOP_BACK_LEFT (1ULL << AV_CHAN_TOP_BACK_LEFT)
+#define AV_CH_TOP_BACK_CENTER (1ULL << AV_CHAN_TOP_BACK_CENTER)
+#define AV_CH_TOP_BACK_RIGHT (1ULL << AV_CHAN_TOP_BACK_RIGHT)
+#define AV_CH_STEREO_LEFT (1ULL << AV_CHAN_STEREO_LEFT)
+#define AV_CH_STEREO_RIGHT (1ULL << AV_CHAN_STEREO_RIGHT)
+#define AV_CH_WIDE_LEFT (1ULL << AV_CHAN_WIDE_LEFT)
+#define AV_CH_WIDE_RIGHT (1ULL << AV_CHAN_WIDE_RIGHT)
+#define AV_CH_SURROUND_DIRECT_LEFT (1ULL << AV_CHAN_SURROUND_DIRECT_LEFT)
+#define AV_CH_SURROUND_DIRECT_RIGHT (1ULL << AV_CHAN_SURROUND_DIRECT_RIGHT)
+#define AV_CH_LOW_FREQUENCY_2 (1ULL << AV_CHAN_LOW_FREQUENCY_2)
+#define AV_CH_TOP_SIDE_LEFT (1ULL << AV_CHAN_TOP_SIDE_LEFT)
+#define AV_CH_TOP_SIDE_RIGHT (1ULL << AV_CHAN_TOP_SIDE_RIGHT)
+#define AV_CH_BOTTOM_FRONT_CENTER (1ULL << AV_CHAN_BOTTOM_FRONT_CENTER)
+#define AV_CH_BOTTOM_FRONT_LEFT (1ULL << AV_CHAN_BOTTOM_FRONT_LEFT)
+#define AV_CH_BOTTOM_FRONT_RIGHT (1ULL << AV_CHAN_BOTTOM_FRONT_RIGHT)
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel layouts
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0 | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 \
+ (AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD \
+ (AV_CH_LAYOUT_STEREO | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 \
+ (AV_CH_LAYOUT_SURROUND | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0 | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK \
+ (AV_CH_LAYOUT_SURROUND | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK \
+ (AV_CH_LAYOUT_5POINT0_BACK | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0 | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT \
+ (AV_CH_LAYOUT_2_2 | AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_3POINT1POINT2 \
+ (AV_CH_LAYOUT_3POINT1 | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1 | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK \
+ (AV_CH_LAYOUT_5POINT1_BACK | AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT \
+ (AV_CH_LAYOUT_6POINT0_FRONT | AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 \
+ (AV_CH_LAYOUT_5POINT0 | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT \
+ (AV_CH_LAYOUT_5POINT0 | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 \
+ (AV_CH_LAYOUT_5POINT1 | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE \
+ (AV_CH_LAYOUT_5POINT1 | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK \
+ (AV_CH_LAYOUT_5POINT1_BACK | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_5POINT1POINT2_BACK \
+ (AV_CH_LAYOUT_5POINT1_BACK | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_OCTAGONAL \
+ (AV_CH_LAYOUT_5POINT0 | AV_CH_BACK_LEFT | AV_CH_BACK_CENTER | \
+ AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_CUBE \
+ (AV_CH_LAYOUT_QUAD | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT | \
+ AV_CH_TOP_BACK_LEFT | AV_CH_TOP_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1POINT4_BACK \
+ (AV_CH_LAYOUT_5POINT1POINT2_BACK | AV_CH_TOP_BACK_LEFT | AV_CH_TOP_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1POINT2 \
+ (AV_CH_LAYOUT_7POINT1 | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_7POINT1POINT4_BACK \
+ (AV_CH_LAYOUT_7POINT1POINT2 | AV_CH_TOP_BACK_LEFT | AV_CH_TOP_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT2POINT3 \
+ (AV_CH_LAYOUT_7POINT1POINT2 | AV_CH_TOP_BACK_CENTER | AV_CH_LOW_FREQUENCY_2)
+#define AV_CH_LAYOUT_9POINT1POINT4_BACK \
+ (AV_CH_LAYOUT_7POINT1POINT4_BACK | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXADECAGONAL \
+ (AV_CH_LAYOUT_OCTAGONAL | AV_CH_WIDE_LEFT | AV_CH_WIDE_RIGHT | \
+ AV_CH_TOP_BACK_LEFT | AV_CH_TOP_BACK_RIGHT | AV_CH_TOP_BACK_CENTER | \
+ AV_CH_TOP_FRONT_CENTER | AV_CH_TOP_FRONT_LEFT | AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT | AV_CH_STEREO_RIGHT)
+#define AV_CH_LAYOUT_22POINT2 \
+ (AV_CH_LAYOUT_7POINT1POINT4_BACK | AV_CH_FRONT_LEFT_OF_CENTER | \
+ AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_BACK_CENTER | AV_CH_LOW_FREQUENCY_2 | \
+ AV_CH_TOP_FRONT_CENTER | AV_CH_TOP_CENTER | AV_CH_TOP_SIDE_LEFT | \
+ AV_CH_TOP_SIDE_RIGHT | AV_CH_TOP_BACK_CENTER | AV_CH_BOTTOM_FRONT_CENTER | \
+ AV_CH_BOTTOM_FRONT_LEFT | AV_CH_BOTTOM_FRONT_RIGHT)
+
+#define AV_CH_LAYOUT_7POINT1_TOP_BACK AV_CH_LAYOUT_5POINT1POINT2_BACK
+
+enum AVMatrixEncoding {
+ AV_MATRIX_ENCODING_NONE,
+ AV_MATRIX_ENCODING_DOLBY,
+ AV_MATRIX_ENCODING_DPLII,
+ AV_MATRIX_ENCODING_DPLIIX,
+ AV_MATRIX_ENCODING_DPLIIZ,
+ AV_MATRIX_ENCODING_DOLBYEX,
+ AV_MATRIX_ENCODING_DOLBYHEADPHONE,
+ AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * @}
+ */
+
+/**
+ * An AVChannelCustom defines a single channel within a custom order layout
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelCustom) is a part of the
+ * public ABI.
+ *
+ * No new fields may be added to it without a major version bump.
+ */
+typedef struct AVChannelCustom {
+ enum AVChannel id;
+ char name[16];
+ void* opaque;
+} AVChannelCustom;
+
+/**
+ * An AVChannelLayout holds information about the channel layout of audio data.
+ *
+ * A channel layout here is defined as a set of channels ordered in a specific
+ * way (unless the channel order is AV_CHANNEL_ORDER_UNSPEC, in which case an
+ * AVChannelLayout carries only the channel count).
+ * All orders may be treated as if they were AV_CHANNEL_ORDER_UNSPEC by
+ * ignoring everything but the channel count, as long as
+ * av_channel_layout_check() considers they are valid.
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelLayout) is a part of the
+ * public ABI and may be used by the caller. E.g. it may be allocated on stack
+ * or embedded in caller-defined structs.
+ *
+ * AVChannelLayout can be initialized as follows:
+ * - default initialization with {0}, followed by setting all used fields
+ * correctly;
+ * - by assigning one of the predefined AV_CHANNEL_LAYOUT_* initializers;
+ * - with a constructor function, such as av_channel_layout_default(),
+ * av_channel_layout_from_mask() or av_channel_layout_from_string().
+ *
+ * The channel layout must be unitialized with av_channel_layout_uninit()
+ *
+ * Copying an AVChannelLayout via assigning is forbidden,
+ * av_channel_layout_copy() must be used instead (and its return value should
+ * be checked)
+ *
+ * No new fields may be added to it without a major version bump, except for
+ * new elements of the union fitting in sizeof(uint64_t).
+ */
+typedef struct AVChannelLayout {
+ /**
+ * Channel order used in this layout.
+ * This is a mandatory field.
+ */
+ enum AVChannelOrder order;
+
+ /**
+ * Number of channels in this layout. Mandatory field.
+ */
+ int nb_channels;
+
+ /**
+ * Details about which channels are present in this layout.
+ * For AV_CHANNEL_ORDER_UNSPEC, this field is undefined and must not be
+ * used.
+ */
+ union {
+ /**
+ * This member must be used for AV_CHANNEL_ORDER_NATIVE, and may be used
+ * for AV_CHANNEL_ORDER_AMBISONIC to signal non-diegetic channels.
+ * It is a bitmask, where the position of each set bit means that the
+ * AVChannel with the corresponding value is present.
+ *
+ * I.e. when (mask & (1 << AV_CHAN_FOO)) is non-zero, then AV_CHAN_FOO
+ * is present in the layout. Otherwise it is not present.
+ *
+ * @note when a channel layout using a bitmask is constructed or
+ * modified manually (i.e. not using any of the av_channel_layout_*
+ * functions), the code doing it must ensure that the number of set bits
+ * is equal to nb_channels.
+ */
+ uint64_t mask;
+ /**
+ * This member must be used when the channel order is
+ * AV_CHANNEL_ORDER_CUSTOM. It is a nb_channels-sized array, with each
+ * element signalling the presence of the AVChannel with the
+ * corresponding value in map[i].id.
+ *
+ * I.e. when map[i].id is equal to AV_CHAN_FOO, then AV_CH_FOO is the
+ * i-th channel in the audio data.
+ *
+ * When map[i].id is in the range between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the channel contains an ambisonic
+ * component with ACN index (as defined above)
+ * n = map[i].id - AV_CHAN_AMBISONIC_BASE.
+ *
+ * map[i].name may be filled with a 0-terminated string, in which case
+ * it will be used for the purpose of identifying the channel with the
+ * convenience functions below. Otherise it must be zeroed.
+ */
+ AVChannelCustom* map;
+ } u;
+
+ /**
+ * For some private data of the user.
+ */
+ void* opaque;
+} AVChannelLayout;
+
+/**
+ * Macro to define native channel layouts
+ *
+ * @note This doesn't use designated initializers for compatibility with C++ 17
+ * and older.
+ */
+#define AV_CHANNEL_LAYOUT_MASK(nb, m) \
+ { /* .order */ \
+ AV_CHANNEL_ORDER_NATIVE, /* .nb_channels */ (nb), /* .u.mask */ {m}, \
+ /* .opaque */ NULL \
+ }
+
+/**
+ * @name Common pre-defined channel layouts
+ * @{
+ */
+#define AV_CHANNEL_LAYOUT_MONO AV_CHANNEL_LAYOUT_MASK(1, AV_CH_LAYOUT_MONO)
+#define AV_CHANNEL_LAYOUT_STEREO AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO)
+#define AV_CHANNEL_LAYOUT_2POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2POINT1)
+#define AV_CHANNEL_LAYOUT_2_1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2_1)
+#define AV_CHANNEL_LAYOUT_SURROUND \
+ AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_SURROUND)
+#define AV_CHANNEL_LAYOUT_3POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_3POINT1)
+#define AV_CHANNEL_LAYOUT_4POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_4POINT0)
+#define AV_CHANNEL_LAYOUT_4POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_4POINT1)
+#define AV_CHANNEL_LAYOUT_2_2 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_2_2)
+#define AV_CHANNEL_LAYOUT_QUAD AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_QUAD)
+#define AV_CHANNEL_LAYOUT_5POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0)
+#define AV_CHANNEL_LAYOUT_5POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1)
+#define AV_CHANNEL_LAYOUT_5POINT0_BACK \
+ AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0_BACK)
+#define AV_CHANNEL_LAYOUT_5POINT1_BACK \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0)
+#define AV_CHANNEL_LAYOUT_6POINT0_FRONT \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_3POINT1POINT2 \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_3POINT1POINT2)
+#define AV_CHANNEL_LAYOUT_HEXAGONAL \
+ AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_HEXAGONAL)
+#define AV_CHANNEL_LAYOUT_6POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1)
+#define AV_CHANNEL_LAYOUT_6POINT1_BACK \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT1_FRONT \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT0 \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0)
+#define AV_CHANNEL_LAYOUT_7POINT0_FRONT \
+ AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT1 \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE_BACK)
+#define AV_CHANNEL_LAYOUT_5POINT1POINT2_BACK \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_5POINT1POINT2_BACK)
+#define AV_CHANNEL_LAYOUT_OCTAGONAL \
+ AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_OCTAGONAL)
+#define AV_CHANNEL_LAYOUT_CUBE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_CUBE)
+#define AV_CHANNEL_LAYOUT_5POINT1POINT4_BACK \
+ AV_CHANNEL_LAYOUT_MASK(10, AV_CH_LAYOUT_5POINT1POINT4_BACK)
+#define AV_CHANNEL_LAYOUT_7POINT1POINT2 \
+ AV_CHANNEL_LAYOUT_MASK(10, AV_CH_LAYOUT_7POINT1POINT2)
+#define AV_CHANNEL_LAYOUT_7POINT1POINT4_BACK \
+ AV_CHANNEL_LAYOUT_MASK(12, AV_CH_LAYOUT_7POINT1POINT4_BACK)
+#define AV_CHANNEL_LAYOUT_7POINT2POINT3 \
+ AV_CHANNEL_LAYOUT_MASK(12, AV_CH_LAYOUT_7POINT2POINT3)
+#define AV_CHANNEL_LAYOUT_9POINT1POINT4_BACK \
+ AV_CHANNEL_LAYOUT_MASK(14, AV_CH_LAYOUT_9POINT1POINT4_BACK)
+#define AV_CHANNEL_LAYOUT_HEXADECAGONAL \
+ AV_CHANNEL_LAYOUT_MASK(16, AV_CH_LAYOUT_HEXADECAGONAL)
+#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX \
+ AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO_DOWNMIX)
+#define AV_CHANNEL_LAYOUT_22POINT2 \
+ AV_CHANNEL_LAYOUT_MASK(24, AV_CH_LAYOUT_22POINT2)
+
+#define AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK AV_CHANNEL_LAYOUT_5POINT1POINT2_BACK
+
+#define AV_CHANNEL_LAYOUT_AMBISONIC_FIRST_ORDER \
+ { /* .order */ \
+ AV_CHANNEL_ORDER_AMBISONIC, /* .nb_channels */ 4, /* .u.mask */ {0}, \
+ /* .opaque */ NULL \
+ }
+/** @} */
+
+struct AVBPrint;
+
+/**
+ * Get a human readable string in an abbreviated form describing a given
+ * channel. This is the inverse function of @ref av_channel_from_string().
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose name to get
+ * @return amount of bytes needed to hold the output string, or a negative
+ * AVERROR on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_name(char* buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_name().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_name_bprint(struct AVBPrint* bp, enum AVChannel channel_id);
+
+/**
+ * Get a human readable string describing a given channel.
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose description to get
+ * @return amount of bytes needed to hold the output string, or a negative
+ * AVERROR on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_description(char* buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_description().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_description_bprint(struct AVBPrint* bp,
+ enum AVChannel channel_id);
+
+/**
+ * This is the inverse function of @ref av_channel_name().
+ *
+ * @return the channel with the given name
+ * AV_CHAN_NONE when name does not identify a known channel
+ */
+enum AVChannel av_channel_from_string(const char* name);
+
+/**
+ * Initialize a custom channel layout with the specified number of channels.
+ * The channel map will be allocated and the designation of all channels will
+ * be set to AV_CHAN_UNKNOWN.
+ *
+ * This is only a convenience helper function, a custom channel layout can also
+ * be constructed without using this.
+ *
+ * @param channel_layout the layout structure to be initialized
+ * @param nb_channels the number of channels
+ *
+ * @return 0 on success
+ * AVERROR(EINVAL) if the number of channels <= 0
+ * AVERROR(ENOMEM) if the channel map could not be allocated
+ */
+int av_channel_layout_custom_init(AVChannelLayout* channel_layout,
+ int nb_channels);
+
+/**
+ * Initialize a native channel layout from a bitmask indicating which channels
+ * are present.
+ *
+ * @param channel_layout the layout structure to be initialized
+ * @param mask bitmask describing the channel layout
+ *
+ * @return 0 on success
+ * AVERROR(EINVAL) for invalid mask values
+ */
+int av_channel_layout_from_mask(AVChannelLayout* channel_layout, uint64_t mask);
+
+/**
+ * Initialize a channel layout from a given string description.
+ * The input string can be represented by:
+ * - the formal channel layout name (returned by av_channel_layout_describe())
+ * - single or multiple channel names (returned by av_channel_name(), eg. "FL",
+ * or concatenated with "+", each optionally containing a custom name after
+ * a "@", eg. "FL@Left+FR@Right+LFE")
+ * - a decimal or hexadecimal value of a native channel layout (eg. "4" or
+ * "0x4")
+ * - the number of channels with default layout (eg. "4c")
+ * - the number of unordered channels (eg. "4C" or "4 channels")
+ * - the ambisonic order followed by optional non-diegetic channels (eg.
+ * "ambisonic 2+stereo")
+ * On error, the channel layout will remain uninitialized, but not necessarily
+ * untouched.
+ *
+ * @param channel_layout uninitialized channel layout for the result
+ * @param str string describing the channel layout
+ * @return 0 on success parsing the channel layout
+ * AVERROR(EINVAL) if an invalid channel layout string was provided
+ * AVERROR(ENOMEM) if there was not enough memory
+ */
+int av_channel_layout_from_string(AVChannelLayout* channel_layout,
+ const char* str);
+
+/**
+ * Get the default channel layout for a given number of channels.
+ *
+ * @param ch_layout the layout structure to be initialized
+ * @param nb_channels number of channels
+ */
+void av_channel_layout_default(AVChannelLayout* ch_layout, int nb_channels);
+
+/**
+ * Iterate over all standard channel layouts.
+ *
+ * @param opaque a pointer where libavutil will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the standard channel layout or NULL when the iteration is
+ * finished
+ */
+const AVChannelLayout* av_channel_layout_standard(void** opaque);
+
+/**
+ * Free any allocated data in the channel layout and reset the channel
+ * count to 0.
+ *
+ * @param channel_layout the layout structure to be uninitialized
+ */
+void av_channel_layout_uninit(AVChannelLayout* channel_layout);
+
+/**
+ * Make a copy of a channel layout. This differs from just assigning src to dst
+ * in that it allocates and copies the map for AV_CHANNEL_ORDER_CUSTOM.
+ *
+ * @note the destination channel_layout will be always uninitialized before
+ * copy.
+ *
+ * @param dst destination channel layout
+ * @param src source channel layout
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_channel_layout_copy(AVChannelLayout* dst, const AVChannelLayout* src);
+
+/**
+ * Get a human-readable string describing the channel layout properties.
+ * The string will be in the same format that is accepted by
+ * @ref av_channel_layout_from_string(), allowing to rebuild the same
+ * channel layout, except for opaque pointers.
+ *
+ * @param channel_layout channel layout to be described
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @return amount of bytes needed to hold the output string, or a negative
+ * AVERROR on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_layout_describe(const AVChannelLayout* channel_layout, char* buf,
+ size_t buf_size);
+
+/**
+ * bprint variant of av_channel_layout_describe().
+ *
+ * @note the string will be appended to the bprint buffer.
+ * @return 0 on success, or a negative AVERROR value on failure.
+ */
+int av_channel_layout_describe_bprint(const AVChannelLayout* channel_layout,
+ struct AVBPrint* bp);
+
+/**
+ * Get the channel with the given index in a channel layout.
+ *
+ * @param channel_layout input channel layout
+ * @param idx index of the channel
+ * @return channel with the index idx in channel_layout on success or
+ * AV_CHAN_NONE on failure (if idx is not valid or the channel order is
+ * unspecified)
+ */
+enum AVChannel av_channel_layout_channel_from_index(
+ const AVChannelLayout* channel_layout, unsigned int idx);
+
+/**
+ * Get the index of a given channel in a channel layout. In case multiple
+ * channels are found, only the first match will be returned.
+ *
+ * @param channel_layout input channel layout
+ * @param channel the channel whose index to obtain
+ * @return index of channel in channel_layout on success or a negative number if
+ * channel is not present in channel_layout.
+ */
+int av_channel_layout_index_from_channel(const AVChannelLayout* channel_layout,
+ enum AVChannel channel);
+
+/**
+ * Get the index in a channel layout of a channel described by the given string.
+ * In case multiple channels are found, only the first match will be returned.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel whose index to obtain
+ * @return a channel index described by the given string, or a negative AVERROR
+ * value.
+ */
+int av_channel_layout_index_from_string(const AVChannelLayout* channel_layout,
+ const char* name);
+
+/**
+ * Get a channel described by the given string.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel to obtain
+ * @return a channel described by the given string in channel_layout on success
+ * or AV_CHAN_NONE on failure (if the string is not valid or the channel
+ * order is unspecified)
+ */
+enum AVChannel av_channel_layout_channel_from_string(
+ const AVChannelLayout* channel_layout, const char* name);
+
+/**
+ * Find out what channels from a given set are present in a channel layout,
+ * without regard for their positions.
+ *
+ * @param channel_layout input channel layout
+ * @param mask a combination of AV_CH_* representing a set of channels
+ * @return a bitfield representing all the channels from mask that are present
+ * in channel_layout
+ */
+uint64_t av_channel_layout_subset(const AVChannelLayout* channel_layout,
+ uint64_t mask);
+
+/**
+ * Check whether a channel layout is valid, i.e. can possibly describe audio
+ * data.
+ *
+ * @param channel_layout input channel layout
+ * @return 1 if channel_layout is valid, 0 otherwise.
+ */
+int av_channel_layout_check(const AVChannelLayout* channel_layout);
+
+/**
+ * Check whether two channel layouts are semantically the same, i.e. the same
+ * channels are present on the same positions in both.
+ *
+ * If one of the channel layouts is AV_CHANNEL_ORDER_UNSPEC, while the other is
+ * not, they are considered to be unequal. If both are AV_CHANNEL_ORDER_UNSPEC,
+ * they are considered equal iff the channel counts are the same in both.
+ *
+ * @param chl input channel layout
+ * @param chl1 input channel layout
+ * @return 0 if chl and chl1 are equal, 1 if they are not equal. A negative
+ * AVERROR code if one or both are invalid.
+ */
+int av_channel_layout_compare(const AVChannelLayout* chl,
+ const AVChannelLayout* chl1);
+
+/**
+ * The conversion must be lossless.
+ */
+#define AV_CHANNEL_LAYOUT_RETYPE_FLAG_LOSSLESS (1 << 0)
+
+/**
+ * The specified retype target order is ignored and the simplest possible
+ * (canonical) order is used for which the input layout can be losslessy
+ * represented.
+ */
+#define AV_CHANNEL_LAYOUT_RETYPE_FLAG_CANONICAL (1 << 1)
+
+/**
+ * Change the AVChannelOrder of a channel layout.
+ *
+ * Change of AVChannelOrder can be either lossless or lossy. In case of a
+ * lossless conversion all the channel designations and the associated channel
+ * names (if any) are kept. On a lossy conversion the channel names and channel
+ * designations might be lost depending on the capabilities of the desired
+ * AVChannelOrder. Note that some conversions are simply not possible in which
+ * case this function returns AVERROR(ENOSYS).
+ *
+ * The following conversions are supported:
+ *
+ * Any -> Custom : Always possible, always lossless.
+ * Any -> Unspecified: Always possible, lossless if channel designations
+ * are all unknown and channel names are not used, lossy otherwise.
+ * Custom -> Ambisonic : Possible if it contains ambisonic channels with
+ * optional non-diegetic channels in the end. Lossy if the channels have
+ * custom names, lossless otherwise.
+ * Custom -> Native : Possible if it contains native channels in native
+ * order. Lossy if the channels have custom names, lossless otherwise.
+ *
+ * On error this function keeps the original channel layout untouched.
+ *
+ * @param channel_layout channel layout which will be changed
+ * @param order the desired channel layout order
+ * @param flags a combination of AV_CHANNEL_LAYOUT_RETYPE_FLAG_* constants
+ * @return 0 if the conversion was successful and lossless or if the channel
+ * layout was already in the desired order
+ * >0 if the conversion was successful but lossy
+ * AVERROR(ENOSYS) if the conversion was not possible (or would be
+ * lossy and AV_CHANNEL_LAYOUT_RETYPE_FLAG_LOSSLESS was specified)
+ * AVERROR(EINVAL), AVERROR(ENOMEM) on error
+ */
+int av_channel_layout_retype(AVChannelLayout* channel_layout,
+ enum AVChannelOrder order, int flags);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/common.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/common.h
new file mode 100644
index 0000000000..fa8398889a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/common.h
@@ -0,0 +1,587 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && \
+ !defined(UINT64_C)
+# error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS
+#endif
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "error.h"
+#include "macros.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+# include "internal.h"
+#else
+# include "mem.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+// rounded division & shift
+#define RSHIFT(a, b) \
+ ((a) > 0 ? ((a) + ((1 << (b)) >> 1)) >> (b) \
+ : ((a) + ((1 << (b)) >> 1) - 1) >> (b))
+/* assume b>0 */
+#define ROUNDED_DIV(a, b) \
+ (((a) >= 0 ? (a) + ((b) >> 1) : (a) - ((b) >> 1)) / (b))
+/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */
+#define AV_CEIL_RSHIFT(a, b) \
+ (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) : ((a) + (1 << (b)) - 1) >> (b))
+/* Backwards compat. */
+#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT
+
+#define FFUDIV(a, b) (((a) > 0 ? (a) : (a) - (b) + 1) / (b))
+#define FFUMOD(a, b) ((a) - (b) * FFUDIV(a, b))
+
+/**
+ * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as
+ * they are not representable as absolute values of their type. This is the same
+ * as with *abs()
+ * @see FFNABS()
+ */
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+/**
+ * Negative Absolute value.
+ * this works for all integers of all types.
+ * As with many macros, this evaluates its argument twice, it thus must not have
+ * a sideeffect, that is FFNABS(x++) has undefined behavior.
+ */
+#define FFNABS(a) ((a) <= 0 ? (a) : (-(a)))
+
+/**
+ * Unsigned Absolute value.
+ * This takes the absolute value of a signed int and returns it as a unsigned.
+ * This also works with INT_MIN which would otherwise not be representable
+ * As with many macros, this evaluates its argument twice.
+ */
+#define FFABSU(a) ((a) <= 0 ? -(unsigned)(a) : (unsigned)(a))
+#define FFABS64U(a) ((a) <= 0 ? -(uint64_t)(a) : (uint64_t)(a))
+
+/* misc math functions */
+
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip64
+# define av_clip64 av_clip64_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_intp2
+# define av_clip_intp2 av_clip_intp2_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_mod_uintp2
+# define av_mod_uintp2 av_mod_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_sat_sub32
+# define av_sat_sub32 av_sat_sub32_c
+#endif
+#ifndef av_sat_dsub32
+# define av_sat_dsub32 av_sat_dsub32_c
+#endif
+#ifndef av_sat_add64
+# define av_sat_add64 av_sat_add64_c
+#endif
+#ifndef av_sat_sub64
+# define av_sat_sub64 av_sat_sub64_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_clipd
+# define av_clipd av_clipd_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
+#ifndef av_parity
+# define av_parity av_parity_c
+#endif
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin)
+ return amin;
+ else if (a > amax)
+ return amax;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed 64bit integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin,
+ int64_t amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin)
+ return amin;
+ else if (a > amax)
+ return amax;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a) {
+ if (a & (~0xFF))
+ return (~a) >> 31;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a) {
+ if ((a + 0x80U) & ~0xFF)
+ return (a >> 31) ^ 0x7F;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a) {
+ if (a & (~0xFFFF))
+ return (~a) >> 31;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a) {
+ if ((a + 0x8000U) & ~0xFFFF)
+ return (a >> 31) ^ 0x7FFF;
+ else
+ return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) {
+ if ((a + 0x80000000u) & ~UINT64_C(0xFFFFFFFF))
+ return (int32_t)((a >> 63) ^ 0x7FFFFFFF);
+ else
+ return (int32_t)a;
+}
+
+/**
+ * Clip a signed integer into the -(2^p),(2^p-1) range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_intp2_c(int a, int p) {
+ if (((unsigned)a + (1 << p)) & ~((2 << p) - 1))
+ return (a >> 31) ^ ((1 << p) - 1);
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) {
+ if (a & ~((1 << p) - 1))
+ return (~a) >> 31 & ((1 << p) - 1);
+ else
+ return a;
+}
+
+/**
+ * Clear high bits from an unsigned integer starting with specific bit position
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a,
+ unsigned p) {
+ return a & ((1U << p) - 1);
+}
+
+/**
+ * Add two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_add32_c(int a, int b) {
+ return av_clipl_int32((int64_t)a + b);
+}
+
+/**
+ * Add a doubled value to another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and added to a
+ * @return sum sat(a + sat(2*b)) with signed saturation
+ */
+static av_always_inline int av_sat_dadd32_c(int a, int b) {
+ return av_sat_add32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Subtract two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return difference with signed saturation
+ */
+static av_always_inline int av_sat_sub32_c(int a, int b) {
+ return av_clipl_int32((int64_t)a - b);
+}
+
+/**
+ * Subtract a doubled value from another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and subtracted from a
+ * @return difference sat(a - sat(2*b)) with signed saturation
+ */
+static av_always_inline int av_sat_dsub32_c(int a, int b) {
+ return av_sat_sub32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Add two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int64_t av_sat_add64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5, 1)) || \
+ AV_HAS_BUILTIN(__builtin_add_overflow)
+ int64_t tmp;
+ return !__builtin_add_overflow(a, b, &tmp)
+ ? tmp
+ : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ int64_t s = a + (uint64_t)b;
+ if ((int64_t)(a ^ b | ~s ^ b) >= 0) return INT64_MAX ^ (b >> 63);
+ return s;
+#endif
+}
+
+/**
+ * Subtract two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return difference with signed saturation
+ */
+static av_always_inline int64_t av_sat_sub64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5, 1)) || \
+ AV_HAS_BUILTIN(__builtin_sub_overflow)
+ int64_t tmp;
+ return !__builtin_sub_overflow(a, b, &tmp)
+ ? tmp
+ : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ if (b <= 0 && a >= INT64_MAX + b) return INT64_MAX;
+ if (b >= 0 && a <= INT64_MIN + b) return INT64_MIN;
+ return a - b;
+#endif
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin,
+ float amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ return FFMIN(FFMAX(a, amin), amax);
+}
+
+/**
+ * Clip a double value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const double av_clipd_c(double a, double amin,
+ double amax) {
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ return FFMIN(FFMAX(a, amin), amax);
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x) {
+ return av_log2((x - 1U) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x) {
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x) {
+ return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));
+}
+
+static av_always_inline av_const int av_parity_c(uint32_t v) {
+ return av_popcount(v) & 1;
+}
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++, or if you want to make sure
+ * that *ptr stops at the end of a NULL terminated string then
+ * *ptr ? *ptr++ : 0
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ *
+ * @warning ERROR should not contain a loop control statement which
+ * could interact with the internal while loop, and should force an
+ * exit from the macro code (e.g. through a goto or a return) in order
+ * to prevent undefined results.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR) \
+ val = (GET_BYTE); \
+ { \
+ uint32_t top = (val & 128) >> 1; \
+ if ((val & 0xc0) == 0x80 || val >= 0xFE) { \
+ ERROR \
+ } \
+ while (val & top) { \
+ unsigned int tmp = (GET_BYTE)-128; \
+ if (tmp >> 6) { \
+ ERROR \
+ } \
+ val = (val << 6) + tmp; \
+ top <<= 5; \
+ } \
+ val &= (top << 1) - 1; \
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR) \
+ val = (GET_16BIT); \
+ { \
+ unsigned int hi = val - 0xD800; \
+ if (hi < 0x800) { \
+ val = (GET_16BIT)-0xDC00; \
+ if (val > 0x3FFU || hi > 0x3FFU) { \
+ ERROR \
+ } \
+ val += (hi << 10) + 0x10000; \
+ } \
+ }
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes
+ * long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE) \
+ { \
+ int bytes, shift; \
+ uint32_t in = val; \
+ if (in < 0x80) { \
+ tmp = in; \
+ PUT_BYTE \
+ } else { \
+ bytes = (av_log2(in) + 4) / 5; \
+ shift = (bytes - 1) * 6; \
+ tmp = (256 - (256 >> bytes)) | (in >> shift); \
+ PUT_BYTE \
+ while (shift >= 6) { \
+ shift -= 6; \
+ tmp = 0x80 | ((in >> shift) & 0x3f); \
+ PUT_BYTE \
+ } \
+ } \
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT) \
+ { \
+ uint32_t in = val; \
+ if (in < 0x10000) { \
+ tmp = in; \
+ PUT_16BIT \
+ } else { \
+ tmp = 0xD800 | ((in - 0x10000) >> 10); \
+ PUT_16BIT \
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF); \
+ PUT_16BIT \
+ } \
+ }
+
+#endif /* AVUTIL_COMMON_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/cpu.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/cpu.h
new file mode 100644
index 0000000000..3eb950fdd0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/cpu.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include <stddef.h>
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+/* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW \
+ 0x40000000 ///< SSE2 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW \
+ 0x20000000 ///< SSE3 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_SSSE3SLOW \
+ 0x4000000 ///< SSSE3 supported, but usually not faster
+#define AV_CPU_FLAG_ATOM \
+ 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions
+#define AV_CPU_FLAG_AVX \
+ 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't
+ ///< used
+#define AV_CPU_FLAG_AVXSLOW \
+ 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g.
+ ///< Bulldozer)
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction
+#define AV_CPU_FLAG_AVX2 \
+ 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't
+ ///< used
+#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions
+#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
+#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
+#define AV_CPU_FLAG_AVX512 \
+ 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM
+ ///< registers aren't used
+#define AV_CPU_FLAG_AVX512ICL \
+ 0x200000 ///< F/CD/BW/DQ/VL/VNNI/IFMA/VBMI/VBMI2/VPOPCNTDQ/BITALG/GFNI/VAES/VPCLMULQDQ
+#define AV_CPU_FLAG_SLOW_GATHER 0x2000000 ///< CPU has slow gathers.
+
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06
+#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07
+
+#define AV_CPU_FLAG_ARMV5TE (1 << 0)
+#define AV_CPU_FLAG_ARMV6 (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
+#define AV_CPU_FLAG_VFP (1 << 3)
+#define AV_CPU_FLAG_VFPV3 (1 << 4)
+#define AV_CPU_FLAG_NEON (1 << 5)
+#define AV_CPU_FLAG_ARMV8 (1 << 6)
+#define AV_CPU_FLAG_VFP_VM \
+ (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in
+ ///< various CPUs implementations
+#define AV_CPU_FLAG_DOTPROD (1 << 8)
+#define AV_CPU_FLAG_I8MM (1 << 9)
+#define AV_CPU_FLAG_SETEND (1 << 16)
+
+#define AV_CPU_FLAG_MMI (1 << 0)
+#define AV_CPU_FLAG_MSA (1 << 1)
+
+// Loongarch SIMD extension.
+#define AV_CPU_FLAG_LSX (1 << 0)
+#define AV_CPU_FLAG_LASX (1 << 1)
+
+// RISC-V extensions
+#define AV_CPU_FLAG_RVI (1 << 0) ///< I (full GPR bank)
+#define AV_CPU_FLAG_RVF (1 << 1) ///< F (single precision FP)
+#define AV_CPU_FLAG_RVD (1 << 2) ///< D (double precision FP)
+#define AV_CPU_FLAG_RVV_I32 (1 << 3) ///< Vectors of 8/16/32-bit int's */
+#define AV_CPU_FLAG_RVV_F32 (1 << 4) ///< Vectors of float's */
+#define AV_CPU_FLAG_RVV_I64 (1 << 5) ///< Vectors of 64-bit int's */
+#define AV_CPU_FLAG_RVV_F64 (1 << 6) ///< Vectors of double's
+#define AV_CPU_FLAG_RVB_BASIC (1 << 7) ///< Basic bit-manipulations
+#define AV_CPU_FLAG_RVB_ADDR (1 << 8) ///< Address bit-manipulations
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ * The returned value is affected by av_force_cpu_flags() if that was used
+ * before. So av_get_cpu_flags() can easily be used in an application to
+ * detect the enabled cpu flags.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Disables cpu detection and forces the specified flags.
+ * -1 is a special case that disables forcing of specific flags.
+ */
+void av_force_cpu_flags(int flags);
+
+/**
+ * Parse CPU caps from a string and update the given AV_CPU_* flags based on
+ * that.
+ *
+ * @return negative on error.
+ */
+int av_parse_cpu_caps(unsigned* flags, const char* s);
+
+/**
+ * @return the number of logical CPU cores present.
+ */
+int av_cpu_count(void);
+
+/**
+ * Overrides cpu count detection and forces the specified count.
+ * Count < 1 disables forcing of specific count.
+ */
+void av_cpu_force_count(int count);
+
+/**
+ * Get the maximum data alignment that may be required by FFmpeg.
+ *
+ * Note that this is affected by the build configuration and the CPU flags mask,
+ * so e.g. if the CPU supports AVX, but libavutil has been built with
+ * --disable-avx or the AV_CPU_FLAG_AVX flag has been disabled through
+ * av_set_cpu_flags_mask(), then this function will behave as if AVX is not
+ * present.
+ */
+size_t av_cpu_max_align(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/dict.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/dict.h
new file mode 100644
index 0000000000..967a1c8041
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/dict.h
@@ -0,0 +1,259 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ * @deprecated
+ * AVDictionary is provided for compatibility with libav. It is both in
+ * implementation as well as API inefficient. It does not scale and is
+ * extremely slow with large dictionaries.
+ * It is recommended that new code uses our tree container from tree.c/h
+ * where applicable, which uses AVL trees to achieve O(log n) performance.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+#include <stdint.h>
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key-value pairs.
+ *
+ * - To **create an AVDictionary**, simply pass an address of a NULL
+ * pointer to av_dict_set(). NULL can be used as an empty dictionary
+ * wherever a pointer to an AVDictionary is required.
+ * - To **insert an entry**, use av_dict_set().
+ * - Use av_dict_get() to **retrieve an entry**.
+ * - To **iterate over all entries**, use av_dict_iterate().
+ * - In order to **free the dictionary and all its contents**, use
+ av_dict_free().
+ *
+ @code
+ AVDictionary *d = NULL; // "create" an empty dictionary
+ AVDictionaryEntry *t = NULL;
+
+ av_dict_set(&d, "foo", "bar", 0); // add an entry
+
+ char *k = av_strdup("key"); // if your strings are already allocated,
+ char *v = av_strdup("value"); // you can avoid copying them like this
+ av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+ while ((t = av_dict_iterate(d, t))) {
+ <....> // iterate over all entries in d
+ }
+ av_dict_free(&d);
+ @endcode
+ */
+
+/**
+ * @name AVDictionary Flags
+ * Flags that influence behavior of the matching of keys or insertion to the
+ * dictionary.
+ * @{
+ */
+#define AV_DICT_MATCH_CASE \
+ 1 /**< Only get an entry with exact-case key match. Only relevant in \
+ av_dict_get(). */
+#define AV_DICT_IGNORE_SUFFIX \
+ 2 /**< Return first entry in a dictionary whose first part corresponds to \
+ the search key, ignoring the suffix of the found key string. Only \
+ relevant in av_dict_get(). */
+#define AV_DICT_DONT_STRDUP_KEY \
+ 4 /**< Take ownership of a key that's been \
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_STRDUP_VAL \
+ 8 /**< Take ownership of a value that's been \
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_OVERWRITE 16 /**< Don't overwrite existing entries. */
+#define AV_DICT_APPEND \
+ 32 /**< If the entry already exists, append to it. Note that no \
+ delimiter is added, the strings are simply concatenated. */
+#define AV_DICT_MULTIKEY \
+ 64 /**< Allow to store several equal keys in the dictionary */
+/**
+ * @}
+ */
+
+typedef struct AVDictionaryEntry {
+ char* key;
+ char* value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * The returned entry key or value must not be changed, or it will
+ * cause undefined behavior.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param key Matching key
+ * @param flags A collection of AV_DICT_* flags controlling how the
+ * entry is retrieved
+ *
+ * @return Found entry or NULL in case no matching entry was found in the
+ * dictionary
+ */
+AVDictionaryEntry* av_dict_get(const AVDictionary* m, const char* key,
+ const AVDictionaryEntry* prev, int flags);
+
+/**
+ * Iterate over a dictionary
+ *
+ * Iterates through all entries in the dictionary.
+ *
+ * @warning The returned AVDictionaryEntry key/value must not be changed.
+ *
+ * @warning As av_dict_set() invalidates all previous entries returned
+ * by this function, it must not be called while iterating over the dict.
+ *
+ * Typical usage:
+ * @code
+ * const AVDictionaryEntry *e = NULL;
+ * while ((e = av_dict_iterate(m, e))) {
+ * // ...
+ * }
+ * @endcode
+ *
+ * @param m The dictionary to iterate over
+ * @param prev Pointer to the previous AVDictionaryEntry, NULL initially
+ *
+ * @retval AVDictionaryEntry* The next element in the dictionary
+ * @retval NULL No more elements in the dictionary
+ */
+const AVDictionaryEntry* av_dict_iterate(const AVDictionary* m,
+ const AVDictionaryEntry* prev);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary* m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
+ * these arguments will be freed on error.
+ *
+ * @warning Adding a new entry to a dictionary invalidates all existing entries
+ * previously returned with av_dict_get() or av_dict_iterate().
+ *
+ * @param pm Pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key Entry key to add to *pm (will either be av_strduped or added
+ * as a new key depending on flags)
+ * @param value Entry value to add to *pm (will be av_strduped or added as a
+ * new key depending on flags). Passing a NULL value will cause an existing
+ * entry to be deleted.
+ *
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary** pm, const char* key, const char* value,
+ int flags);
+
+/**
+ * Convenience wrapper for av_dict_set() that converts the value to a string
+ * and stores it.
+ *
+ * Note: If ::AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ */
+int av_dict_set_int(AVDictionary** pm, const char* key, int64_t value,
+ int flags);
+
+/**
+ * Parse the key/value pairs list and add the parsed entries to a dictionary.
+ *
+ * In case of failure, all the successfully set entries are stored in
+ * *pm. You may need to manually free the created dictionary.
+ *
+ * @param key_val_sep A 0-terminated list of characters used to separate
+ * key from value
+ * @param pairs_sep A 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @param flags Flags to use when adding to the dictionary.
+ * ::AV_DICT_DONT_STRDUP_KEY and ::AV_DICT_DONT_STRDUP_VAL
+ * are ignored since the key/value tokens will always
+ * be duplicated.
+ *
+ * @return 0 on success, negative AVERROR code on failure
+ */
+int av_dict_parse_string(AVDictionary** pm, const char* str,
+ const char* key_val_sep, const char* pairs_sep,
+ int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ *
+ * @note Metadata is read using the ::AV_DICT_IGNORE_SUFFIX flag
+ *
+ * @param dst Pointer to a pointer to a AVDictionary struct to copy into. If
+ * *dst is NULL, this function will allocate a struct for you and put it in *dst
+ * @param src Pointer to the source AVDictionary struct to copy items from.
+ * @param flags Flags to use when setting entries in *dst
+ *
+ * @return 0 on success, negative AVERROR code on failure. If dst was allocated
+ * by this function, callers should free the associated memory.
+ */
+int av_dict_copy(AVDictionary** dst, const AVDictionary* src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary** m);
+
+/**
+ * Get dictionary entries as a string.
+ *
+ * Create a string containing dictionary's entries.
+ * Such string may be passed back to av_dict_parse_string().
+ * @note String is escaped with backslashes ('\').
+ *
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the
+ * same.
+ *
+ * @param[in] m The dictionary
+ * @param[out] buffer Pointer to buffer that will be allocated with
+ * string containg entries. Buffer must be freed by the caller when is no longer
+ * needed.
+ * @param[in] key_val_sep Character used to separate key from value
+ * @param[in] pairs_sep Character used to separate two pairs from each
+ * other
+ *
+ * @return >= 0 on success, negative on error
+ */
+int av_dict_get_string(const AVDictionary* m, char** buffer,
+ const char key_val_sep, const char pairs_sep);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/error.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/error.h
new file mode 100644
index 0000000000..74af5b1534
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/error.h
@@ -0,0 +1,158 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include <errno.h>
+#include <stddef.h>
+
+#include "macros.h"
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+/* error handling */
+#if EDOM > 0
+# define AVERROR(e) \
+ (-(e)) ///< Returns a negative error code from a POSIX error code, to
+ ///< return from library functions.
+# define AVUNERROR(e) \
+ (-(e)) ///< Returns a POSIX error code from a library function error return
+ ///< value.
+#else
+/* Some platforms have E* and errno already negated. */
+# define AVERROR(e) (e)
+# define AVUNERROR(e) (e)
+#endif
+
+#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))
+
+#define AVERROR_BSF_NOT_FOUND \
+ FFERRTAG(0xF8, 'B', 'S', 'F') ///< Bitstream filter not found
+#define AVERROR_BUG \
+ FFERRTAG('B', 'U', 'G', '!') ///< Internal bug, also see AVERROR_BUG2
+#define AVERROR_BUFFER_TOO_SMALL \
+ FFERRTAG('B', 'U', 'F', 'S') ///< Buffer too small
+#define AVERROR_DECODER_NOT_FOUND \
+ FFERRTAG(0xF8, 'D', 'E', 'C') ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND \
+ FFERRTAG(0xF8, 'D', 'E', 'M') ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND \
+ FFERRTAG(0xF8, 'E', 'N', 'C') ///< Encoder not found
+#define AVERROR_EOF FFERRTAG('E', 'O', 'F', ' ') ///< End of file
+#define AVERROR_EXIT \
+ FFERRTAG('E', 'X', 'I', 'T') ///< Immediate exit was requested; the called
+ ///< function should not be restarted
+#define AVERROR_EXTERNAL \
+ FFERRTAG('E', 'X', 'T', ' ') ///< Generic error in an external library
+#define AVERROR_FILTER_NOT_FOUND \
+ FFERRTAG(0xF8, 'F', 'I', 'L') ///< Filter not found
+#define AVERROR_INVALIDDATA \
+ FFERRTAG('I', 'N', 'D', 'A') ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND \
+ FFERRTAG(0xF8, 'M', 'U', 'X') ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND \
+ FFERRTAG(0xF8, 'O', 'P', 'T') ///< Option not found
+#define AVERROR_PATCHWELCOME \
+ FFERRTAG('P', 'A', 'W', \
+ 'E') ///< Not yet implemented in FFmpeg, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND \
+ FFERRTAG(0xF8, 'P', 'R', 'O') ///< Protocol not found
+
+#define AVERROR_STREAM_NOT_FOUND \
+ FFERRTAG(0xF8, 'S', 'T', 'R') ///< Stream not found
+/**
+ * This is semantically identical to AVERROR_BUG
+ * it has been introduced in Libav after our AVERROR_BUG and with a modified
+ * value.
+ */
+#define AVERROR_BUG2 FFERRTAG('B', 'U', 'G', ' ')
+#define AVERROR_UNKNOWN \
+ FFERRTAG('U', 'N', 'K', \
+ 'N') ///< Unknown error, typically from an external library
+#define AVERROR_EXPERIMENTAL \
+ (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set
+ ///< strict_std_compliance if you really want to use it.
+#define AVERROR_INPUT_CHANGED \
+ (-0x636e6701) ///< Input changed between calls. Reconfiguration is required.
+ ///< (can be OR-ed with AVERROR_OUTPUT_CHANGED)
+#define AVERROR_OUTPUT_CHANGED \
+ (-0x636e6702) ///< Output changed between calls. Reconfiguration is required.
+ ///< (can be OR-ed with AVERROR_INPUT_CHANGED)
+/* HTTP & RTSP errors */
+#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8, '4', '0', '0')
+#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8, '4', '0', '1')
+#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8, '4', '0', '3')
+#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8, '4', '0', '4')
+#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8, '4', 'X', 'X')
+#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8, '5', 'X', 'X')
+
+#define AV_ERROR_MAX_STRING_SIZE 64
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum error code to describe
+ * @param errbuf buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char* errbuf, size_t errbuf_size);
+
+/**
+ * Fill the provided buffer with a string containing an error string
+ * corresponding to the AVERROR code errnum.
+ *
+ * @param errbuf a buffer
+ * @param errbuf_size size in bytes of errbuf
+ * @param errnum error code to describe
+ * @return the buffer in input, filled with the error description
+ * @see av_strerror()
+ */
+static inline char* av_make_error_string(char* errbuf, size_t errbuf_size,
+ int errnum) {
+ av_strerror(errnum, errbuf, errbuf_size);
+ return errbuf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_err2str(errnum) \
+ av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, \
+ AV_ERROR_MAX_STRING_SIZE, errnum)
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/frame.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/frame.h
new file mode 100644
index 0000000000..82abfb925d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/frame.h
@@ -0,0 +1,1112 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_frame
+ * reference-counted frame API
+ */
+
+#ifndef AVUTIL_FRAME_H
+#define AVUTIL_FRAME_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "avutil.h"
+#include "buffer.h"
+#include "channel_layout.h"
+#include "dict.h"
+#include "rational.h"
+#include "samplefmt.h"
+#include "pixfmt.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_frame AVFrame
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVFrame is an abstraction for reference-counted raw multimedia data.
+ */
+
+enum AVFrameSideDataType {
+ /**
+ * The data is the AVPanScan struct defined in libavcodec.
+ */
+ AV_FRAME_DATA_PANSCAN,
+ /**
+ * ATSC A53 Part 4 Closed Captions.
+ * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
+ * The number of bytes of CC data is AVFrameSideData.size.
+ */
+ AV_FRAME_DATA_A53_CC,
+ /**
+ * Stereoscopic 3d metadata.
+ * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
+ */
+ AV_FRAME_DATA_STEREO3D,
+ /**
+ * The data is the AVMatrixEncoding enum defined in
+ * libavutil/channel_layout.h.
+ */
+ AV_FRAME_DATA_MATRIXENCODING,
+ /**
+ * Metadata relevant to a downmix procedure.
+ * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
+ */
+ AV_FRAME_DATA_DOWNMIX_INFO,
+ /**
+ * ReplayGain information in the form of the AVReplayGain struct.
+ */
+ AV_FRAME_DATA_REPLAYGAIN,
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the frame for correct
+ * presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_FRAME_DATA_DISPLAYMATRIX,
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_FRAME_DATA_AFD,
+ /**
+ * Motion vectors exported by some codecs (on demand through the export_mvs
+ * flag set in the libavcodec AVCodecContext flags2 option).
+ * The data is the AVMotionVector struct defined in
+ * libavutil/motion_vector.h.
+ */
+ AV_FRAME_DATA_MOTION_VECTORS,
+ /**
+ * Recommmends skipping the specified number of samples. This is exported
+ * only if the "skip_manual" AVOption is set in libavcodec.
+ * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_FRAME_DATA_SKIP_SAMPLES,
+ /**
+ * This side data must be associated with an audio frame and corresponds to
+ * enum AVAudioServiceType defined in avcodec.h.
+ */
+ AV_FRAME_DATA_AUDIO_SERVICE_TYPE,
+ /**
+ * Mastering display metadata associated with a video frame. The payload is
+ * an AVMasteringDisplayMetadata type and contains information about the
+ * mastering display color volume.
+ */
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+ /**
+ * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer.
+ * This is set on the first frame of a GOP that has a temporal reference of 0.
+ */
+ AV_FRAME_DATA_GOP_TIMECODE,
+
+ /**
+ * The data represents the AVSphericalMapping structure defined in
+ * libavutil/spherical.h.
+ */
+ AV_FRAME_DATA_SPHERICAL,
+
+ /**
+ * Content light level (based on CTA-861.3). This payload contains data in
+ * the form of the AVContentLightMetadata struct.
+ */
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+
+ /**
+ * The data contains an ICC profile as an opaque octet buffer following the
+ * format described by ISO 15076-1 with an optional name defined in the
+ * metadata key entry "name".
+ */
+ AV_FRAME_DATA_ICC_PROFILE,
+
+ /**
+ * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4
+ * uint32_t where the first uint32_t describes how many (1-3) of the other
+ * timecodes are used. The timecode format is described in the documentation
+ * of av_timecode_get_smpte_from_framenum() function in libavutil/timecode.h.
+ */
+ AV_FRAME_DATA_S12M_TIMECODE,
+
+ /**
+ * HDR dynamic metadata associated with a video frame. The payload is
+ * an AVDynamicHDRPlus type and contains information for color
+ * volume transform - application 4 of SMPTE 2094-40:2016 standard.
+ */
+ AV_FRAME_DATA_DYNAMIC_HDR_PLUS,
+
+ /**
+ * Regions Of Interest, the data is an array of AVRegionOfInterest type, the
+ * number of array element is implied by AVFrameSideData.size /
+ * AVRegionOfInterest.self_size.
+ */
+ AV_FRAME_DATA_REGIONS_OF_INTEREST,
+
+ /**
+ * Encoding parameters for a video frame, as described by AVVideoEncParams.
+ */
+ AV_FRAME_DATA_VIDEO_ENC_PARAMS,
+
+ /**
+ * User data unregistered metadata associated with a video frame.
+ * This is the H.26[45] UDU SEI message, and shouldn't be used for any other
+ * purpose The data is stored as uint8_t in AVFrameSideData.data which is 16
+ * bytes of uuid_iso_iec_11578 followed by AVFrameSideData.size - 16 bytes of
+ * user_data_payload_byte.
+ */
+ AV_FRAME_DATA_SEI_UNREGISTERED,
+
+ /**
+ * Film grain parameters for a frame, described by AVFilmGrainParams.
+ * Must be present for every frame which should have film grain applied.
+ *
+ * May be present multiple times, for example when there are multiple
+ * alternative parameter sets for different video signal characteristics.
+ * The user should select the most appropriate set for the application.
+ */
+ AV_FRAME_DATA_FILM_GRAIN_PARAMS,
+
+ /**
+ * Bounding boxes for object detection and classification,
+ * as described by AVDetectionBBoxHeader.
+ */
+ AV_FRAME_DATA_DETECTION_BBOXES,
+
+ /**
+ * Dolby Vision RPU raw data, suitable for passing to x265
+ * or other libraries. Array of uint8_t, with NAL emulation
+ * bytes intact.
+ */
+ AV_FRAME_DATA_DOVI_RPU_BUFFER,
+
+ /**
+ * Parsed Dolby Vision metadata, suitable for passing to a software
+ * implementation. The payload is the AVDOVIMetadata struct defined in
+ * libavutil/dovi_meta.h.
+ */
+ AV_FRAME_DATA_DOVI_METADATA,
+
+ /**
+ * HDR Vivid dynamic metadata associated with a video frame. The payload is
+ * an AVDynamicHDRVivid type and contains information for color
+ * volume transform - CUVA 005.1-2021.
+ */
+ AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
+
+ /**
+ * Ambient viewing environment metadata, as defined by H.274.
+ */
+ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT,
+
+ /**
+ * Provide encoder-specific hinting information about changed/unchanged
+ * portions of a frame. It can be used to pass information about which
+ * macroblocks can be skipped because they didn't change from the
+ * corresponding ones in the previous frame. This could be useful for
+ * applications which know this information in advance to speed up
+ * encoding.
+ */
+ AV_FRAME_DATA_VIDEO_HINT,
+};
+
+enum AVActiveFormatDescription {
+ AV_AFD_SAME = 8,
+ AV_AFD_4_3 = 9,
+ AV_AFD_16_9 = 10,
+ AV_AFD_14_9 = 11,
+ AV_AFD_4_3_SP_14_9 = 13,
+ AV_AFD_16_9_SP_14_9 = 14,
+ AV_AFD_SP_4_3 = 15,
+};
+
+/**
+ * Structure to hold side data for an AVFrame.
+ *
+ * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be
+ * added to the end with a minor bump.
+ */
+typedef struct AVFrameSideData {
+ enum AVFrameSideDataType type;
+ uint8_t* data;
+ size_t size;
+ AVDictionary* metadata;
+ AVBufferRef* buf;
+} AVFrameSideData;
+
+enum AVSideDataProps {
+ /**
+ * The side data type can be used in stream-global structures.
+ * Side data types without this property are only meaningful on per-frame
+ * basis.
+ */
+ AV_SIDE_DATA_PROP_GLOBAL = (1 << 0),
+
+ /**
+ * Multiple instances of this side data type can be meaningfully present in
+ * a single side data array.
+ */
+ AV_SIDE_DATA_PROP_MULTI = (1 << 1),
+};
+
+/**
+ * This struct describes the properties of a side data type. Its instance
+ * corresponding to a given type can be obtained from av_frame_side_data_desc().
+ */
+typedef struct AVSideDataDescriptor {
+ /**
+ * Human-readable side data description.
+ */
+ const char* name;
+
+ /**
+ * Side data property flags, a combination of AVSideDataProps values.
+ */
+ unsigned props;
+} AVSideDataDescriptor;
+
+/**
+ * Structure describing a single Region Of Interest.
+ *
+ * When multiple regions are defined in a single side-data block, they
+ * should be ordered from most to least important - some encoders are only
+ * capable of supporting a limited number of distinct regions, so will have
+ * to truncate the list.
+ *
+ * When overlapping regions are defined, the first region containing a given
+ * area of the frame applies.
+ */
+typedef struct AVRegionOfInterest {
+ /**
+ * Must be set to the size of this data structure (that is,
+ * sizeof(AVRegionOfInterest)).
+ */
+ uint32_t self_size;
+ /**
+ * Distance in pixels from the top edge of the frame to the top and
+ * bottom edges and from the left edge of the frame to the left and
+ * right edges of the rectangle defining this region of interest.
+ *
+ * The constraints on a region are encoder dependent, so the region
+ * actually affected may be slightly larger for alignment or other
+ * reasons.
+ */
+ int top;
+ int bottom;
+ int left;
+ int right;
+ /**
+ * Quantisation offset.
+ *
+ * Must be in the range -1 to +1. A value of zero indicates no quality
+ * change. A negative value asks for better quality (less quantisation),
+ * while a positive value asks for worse quality (greater quantisation).
+ *
+ * The range is calibrated so that the extreme values indicate the
+ * largest possible offset - if the rest of the frame is encoded with the
+ * worst possible quality, an offset of -1 indicates that this region
+ * should be encoded with the best possible quality anyway. Intermediate
+ * values are then interpolated in some codec-dependent way.
+ *
+ * For example, in 10-bit H.264 the quantisation parameter varies between
+ * -12 and 51. A typical qoffset value of -1/10 therefore indicates that
+ * this region should be encoded with a QP around one-tenth of the full
+ * range better than the rest of the frame. So, if most of the frame
+ * were to be encoded with a QP of around 30, this region would get a QP
+ * of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3).
+ * An extreme value of -1 would indicate that this region should be
+ * encoded with the best possible quality regardless of the treatment of
+ * the rest of the frame - that is, should be encoded at a QP of -12.
+ */
+ AVRational qoffset;
+} AVRegionOfInterest;
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using av_frame_alloc(). Note that this only
+ * allocates the AVFrame itself, the buffers for the data must be managed
+ * through other means (see below).
+ * AVFrame must be freed with av_frame_free().
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, av_frame_unref() will free any references held by
+ * the frame and reset it to its original clean state before it
+ * is reused again.
+ *
+ * The data described by an AVFrame is usually reference counted through the
+ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
+ * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
+ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
+ * every single data plane must be contained in one of the buffers in
+ * AVFrame.buf or AVFrame.extended_buf.
+ * There may be a single buffer for all the data, or one separate buffer for
+ * each plane, or anything in between.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ *
+ * Fields can be accessed through AVOptions, the name string used, matches the
+ * C structure field name for fields accessible through AVOptions. The AVClass
+ * for AVFrame can be obtained from avcodec_get_frame_class()
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte. For video,
+ * it could even point to the end of the image data.
+ *
+ * All pointers in data and extended_data must point into one of the
+ * AVBufferRef in buf or extended_buf.
+ *
+ * Some decoders access areas outside 0,0 - width,height, please
+ * see avcodec_align_dimensions2(). Some filters and swscale can read
+ * up to 16 bytes beyond the planes, if these filters are to be used,
+ * then 16 extra bytes must be allocated.
+ *
+ * NOTE: Pointers not needed by the format MUST be set to NULL.
+ *
+ * @attention In case of video, the data[] pointers can point to the
+ * end of image data in order to reverse line order, when used in
+ * combination with negative values in the linesize[] array.
+ */
+ uint8_t* data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For video, a positive or negative value, which is typically indicating
+ * the size in bytes of each picture line, but it can also be:
+ * - the negative byte size of lines for vertical flipping
+ * (with data[n] pointing to the end of the data
+ * - a positive or negative multiple of the byte size as for accessing
+ * even and odd fields of a frame (possibly flipped)
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * For video the linesizes should be multiples of the CPUs alignment
+ * preference, this is 16 or 32 for modern desktop CPUs.
+ * Some code requires such alignment other code can be slower without
+ * correct alignment, for yet other it makes no difference.
+ *
+ * @note The linesize may be larger than the size of usable data -- there
+ * may be extra padding present for performance reasons.
+ *
+ * @attention In case of video, line size values can be negative to achieve
+ * a vertically inverted iteration over image lines.
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data should always be set in a valid frame,
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used in order to access all channels.
+ */
+ uint8_t** extended_data;
+
+ /**
+ * @name Video dimensions
+ * Video frames only. The coded dimensions (in pixels) of the video frame,
+ * i.e. the size of the rectangle that contains some well-defined values.
+ *
+ * @note The part of the frame intended for display/presentation is further
+ * restricted by the @ref cropping "Cropping rectangle".
+ * @{
+ */
+ int width, height;
+ /**
+ * @}
+ */
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ */
+ int nb_samples;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum AVPixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ */
+ int format;
+
+#if FF_API_FRAME_KEY
+ /**
+ * 1 -> keyframe, 0-> not
+ *
+ * @deprecated Use AV_FRAME_FLAG_KEY instead
+ */
+ attribute_deprecated int key_frame;
+#endif
+
+ /**
+ * Picture type of the frame.
+ */
+ enum AVPictureType pict_type;
+
+ /**
+ * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Presentation timestamp in time_base units (time when frame should be shown
+ * to user).
+ */
+ int64_t pts;
+
+ /**
+ * DTS copied from the AVPacket that triggered returning this frame. (if frame
+ * threading isn't used) This is also the Presentation time of this AVFrame
+ * calculated from only AVPacket.dts values without pts values.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * Time base for the timestamps in this frame.
+ * In the future, this field may be set on frames output by decoders or
+ * filters, but its value will be by default ignored on input to encoders
+ * or filters.
+ */
+ AVRational time_base;
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ */
+ int quality;
+
+ /**
+ * Frame owner's private data.
+ *
+ * This field may be set by the code that allocates/owns the frame data.
+ * It is then not touched by any library functions, except:
+ * - it is copied to other references by av_frame_copy_props() (and hence by
+ * av_frame_ref());
+ * - it is set to NULL when the frame is cleared by av_frame_unref()
+ * - on the caller's explicit request. E.g. libavcodec encoders/decoders
+ * will copy this field to/from @ref AVPacket "AVPackets" if the caller sets
+ * @ref AV_CODEC_FLAG_COPY_OPAQUE.
+ *
+ * @see opaque_ref the reference-counted analogue
+ */
+ void* opaque;
+
+ /**
+ * Number of fields in this frame which should be repeated, i.e. the total
+ * duration of this frame should be repeat_pict + 2 normal field durations.
+ *
+ * For interlaced frames this field may be set to 1, which signals that this
+ * frame should be presented as 3 fields: beginning with the first field (as
+ * determined by AV_FRAME_FLAG_TOP_FIELD_FIRST being set or not), followed
+ * by the second field, and then the first field again.
+ *
+ * For progressive frames this field may be set to a multiple of 2, which
+ * signals that this frame's duration should be (repeat_pict + 2) / 2
+ * normal frame durations.
+ *
+ * @note This field is computed from MPEG2 repeat_first_field flag and its
+ * associated flags, H.264 pic_struct from picture timing SEI, and
+ * their analogues in other codecs. Typically it should only be used when
+ * higher-layer timing information is not available.
+ */
+ int repeat_pict;
+
+#if FF_API_INTERLACED_FRAME
+ /**
+ * The content of the picture is interlaced.
+ *
+ * @deprecated Use AV_FRAME_FLAG_INTERLACED instead
+ */
+ attribute_deprecated int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ *
+ * @deprecated Use AV_FRAME_FLAG_TOP_FIELD_FIRST instead
+ */
+ attribute_deprecated int top_field_first;
+#endif
+
+#if FF_API_PALETTE_HAS_CHANGED
+ /**
+ * Tell user application that palette has changed from previous frame.
+ */
+ attribute_deprecated int palette_has_changed;
+#endif
+
+ /**
+ * Sample rate of the audio data.
+ */
+ int sample_rate;
+
+ /**
+ * AVBuffer references backing the data for this frame. All the pointers in
+ * data and extended_data must point inside one of the buffers in buf or
+ * extended_buf. This array must be filled contiguously -- if buf[i] is
+ * non-NULL then buf[j] must also be non-NULL for all j < i.
+ *
+ * There may be at most one AVBuffer per data plane, so for video this array
+ * always contains all the references. For planar audio with more than
+ * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
+ * this array. Then the extra AVBufferRef pointers are stored in the
+ * extended_buf array.
+ */
+ AVBufferRef* buf[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For planar audio which requires more than AV_NUM_DATA_POINTERS
+ * AVBufferRef pointers, this array will hold all the references which
+ * cannot fit into AVFrame.buf.
+ *
+ * Note that this is different from AVFrame.extended_data, which always
+ * contains all the pointers. This array only contains the extra pointers,
+ * which cannot fit into AVFrame.buf.
+ *
+ * This array is always allocated using av_malloc() by whoever constructs
+ * the frame. It is freed in av_frame_unref().
+ */
+ AVBufferRef** extended_buf;
+ /**
+ * Number of elements in extended_buf.
+ */
+ int nb_extended_buf;
+
+ AVFrameSideData** side_data;
+ int nb_side_data;
+
+/**
+ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
+ * @ingroup lavu_frame
+ * Flags describing additional frame properties.
+ *
+ * @{
+ */
+
+/**
+ * The frame data may be corrupted, e.g. due to decoding errors.
+ */
+#define AV_FRAME_FLAG_CORRUPT (1 << 0)
+/**
+ * A flag to mark frames that are keyframes.
+ */
+#define AV_FRAME_FLAG_KEY (1 << 1)
+/**
+ * A flag to mark the frames which need to be decoded, but shouldn't be output.
+ */
+#define AV_FRAME_FLAG_DISCARD (1 << 2)
+/**
+ * A flag to mark frames whose content is interlaced.
+ */
+#define AV_FRAME_FLAG_INTERLACED (1 << 3)
+/**
+ * A flag to mark frames where the top field is displayed first if the content
+ * is interlaced.
+ */
+#define AV_FRAME_FLAG_TOP_FIELD_FIRST (1 << 4)
+ /**
+ * @}
+ */
+
+ /**
+ * Frame flags, a combination of @ref lavu_frame_flags
+ */
+ int flags;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ enum AVColorPrimaries color_primaries;
+
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * frame timestamp estimated using various heuristics, in stream time base
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int64_t best_effort_timestamp;
+
+#if FF_API_FRAME_PKT
+ /**
+ * reordered pos from the last AVPacket that has been input into the decoder
+ * - encoding: unused
+ * - decoding: Read by user.
+ * @deprecated use AV_CODEC_FLAG_COPY_OPAQUE to pass through arbitrary user
+ * data from packets to frames
+ */
+ attribute_deprecated int64_t pkt_pos;
+#endif
+
+ /**
+ * metadata.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVDictionary* metadata;
+
+ /**
+ * decode error flags of the frame, set to a combination of
+ * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
+ * were errors during the decoding.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int decode_error_flags;
+#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
+#define FF_DECODE_ERROR_MISSING_REFERENCE 2
+#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
+#define FF_DECODE_ERROR_DECODE_SLICES 8
+
+#if FF_API_FRAME_PKT
+ /**
+ * size of the corresponding packet containing the compressed
+ * frame.
+ * It is set to a negative value if unknown.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ * @deprecated use AV_CODEC_FLAG_COPY_OPAQUE to pass through arbitrary user
+ * data from packets to frames
+ */
+ attribute_deprecated int pkt_size;
+#endif
+
+ /**
+ * For hwaccel-format frames, this should be a reference to the
+ * AVHWFramesContext describing the frame.
+ */
+ AVBufferRef* hw_frames_ctx;
+
+ /**
+ * Frame owner's private data.
+ *
+ * This field may be set by the code that allocates/owns the frame data.
+ * It is then not touched by any library functions, except:
+ * - a new reference to the underlying buffer is propagated by
+ * av_frame_copy_props() (and hence by av_frame_ref());
+ * - it is unreferenced in av_frame_unref();
+ * - on the caller's explicit request. E.g. libavcodec encoders/decoders
+ * will propagate a new reference to/from @ref AVPacket "AVPackets" if the
+ * caller sets @ref AV_CODEC_FLAG_COPY_OPAQUE.
+ *
+ * @see opaque the plain pointer analogue
+ */
+ AVBufferRef* opaque_ref;
+
+ /**
+ * @anchor cropping
+ * @name Cropping
+ * Video frames only. The number of pixels to discard from the the
+ * top/bottom/left/right border of the frame to obtain the sub-rectangle of
+ * the frame intended for presentation.
+ * @{
+ */
+ size_t crop_top;
+ size_t crop_bottom;
+ size_t crop_left;
+ size_t crop_right;
+ /**
+ * @}
+ */
+
+ /**
+ * AVBufferRef for internal use by a single libav* library.
+ * Must not be used to transfer data between libraries.
+ * Has to be NULL when ownership of the frame leaves the respective library.
+ *
+ * Code outside the FFmpeg libs should never check or change the contents of
+ * the buffer ref.
+ *
+ * FFmpeg calls av_buffer_unref() on it when the frame is unreferenced.
+ * av_frame_copy_props() calls create a new reference with av_buffer_ref()
+ * for the target frame's private_ref field.
+ */
+ AVBufferRef* private_ref;
+
+ /**
+ * Channel layout of the audio data.
+ */
+ AVChannelLayout ch_layout;
+
+ /**
+ * Duration of the frame, in the same units as pts. 0 if unknown.
+ */
+ int64_t duration;
+} AVFrame;
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct must be freed using av_frame_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVFrame itself, not the data buffers. Those
+ * must be allocated through other means, e.g. with av_frame_get_buffer() or
+ * manually.
+ */
+AVFrame* av_frame_alloc(void);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data. If the frame is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ */
+void av_frame_free(AVFrame** frame);
+
+/**
+ * Set up a new reference to the data described by the source frame.
+ *
+ * Copy frame properties from src to dst and create a new reference for each
+ * AVBufferRef from src.
+ *
+ * If src is not reference counted, new buffers are allocated and the data is
+ * copied.
+ *
+ * @warning: dst MUST have been either unreferenced with av_frame_unref(dst),
+ * or newly allocated with av_frame_alloc() before calling this
+ * function, or undefined behavior will occur.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_frame_ref(AVFrame* dst, const AVFrame* src);
+
+/**
+ * Ensure the destination frame refers to the same data described by the source
+ * frame, either by creating a new reference for each AVBufferRef from src if
+ * they differ from those in dst, by allocating new buffers and copying data if
+ * src is not reference counted, or by unrefencing it if src is empty.
+ *
+ * Frame properties on dst will be replaced by those from src.
+ *
+ * @return 0 on success, a negative AVERROR on error. On error, dst is
+ * unreferenced.
+ */
+int av_frame_replace(AVFrame* dst, const AVFrame* src);
+
+/**
+ * Create a new frame that references the same data as src.
+ *
+ * This is a shortcut for av_frame_alloc()+av_frame_ref().
+ *
+ * @return newly created AVFrame on success, NULL on error.
+ */
+AVFrame* av_frame_clone(const AVFrame* src);
+
+/**
+ * Unreference all the buffers referenced by frame and reset the frame fields.
+ */
+void av_frame_unref(AVFrame* frame);
+
+/**
+ * Move everything contained in src to dst and reset src.
+ *
+ * @warning: dst is not unreferenced, but directly overwritten without reading
+ * or deallocating its contents. Call av_frame_unref(dst) manually
+ * before calling this function to ensure that no memory is leaked.
+ */
+void av_frame_move_ref(AVFrame* dst, AVFrame* src);
+
+/**
+ * Allocate new buffer(s) for audio or video data.
+ *
+ * The following fields must be set on frame before calling this function:
+ * - format (pixel format for video, sample format for audio)
+ * - width and height for video
+ * - nb_samples and ch_layout for audio
+ *
+ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
+ * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
+ * For planar formats, one buffer will be allocated for each plane.
+ *
+ * @warning: if frame already has been allocated, calling this function will
+ * leak memory. In addition, undefined behavior can occur in certain
+ * cases.
+ *
+ * @param frame frame in which to store the new buffers.
+ * @param align Required buffer size alignment. If equal to 0, alignment will be
+ * chosen automatically for the current CPU. It is highly
+ * recommended to pass 0 here unless you know what you are doing.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_frame_get_buffer(AVFrame* frame, int align);
+
+/**
+ * Check if the frame data is writable.
+ *
+ * @return A positive value if the frame data is writable (which is true if and
+ * only if each of the underlying buffers has only one reference, namely the one
+ * stored in this frame). Return 0 otherwise.
+ *
+ * If 1 is returned the answer is valid until av_buffer_ref() is called on any
+ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
+ *
+ * @see av_frame_make_writable(), av_buffer_is_writable()
+ */
+int av_frame_is_writable(AVFrame* frame);
+
+/**
+ * Ensure that the frame data is writable, avoiding data copy if possible.
+ *
+ * Do nothing if the frame is writable, allocate new buffers and copy the data
+ * if it is not. Non-refcounted frames behave as non-writable, i.e. a copy
+ * is always made.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @see av_frame_is_writable(), av_buffer_is_writable(),
+ * av_buffer_make_writable()
+ */
+int av_frame_make_writable(AVFrame* frame);
+
+/**
+ * Copy the frame data from src to dst.
+ *
+ * This function does not allocate anything, dst must be already initialized and
+ * allocated with the same parameters as src.
+ *
+ * This function only copies the frame data (i.e. the contents of the data /
+ * extended data arrays), not any other properties.
+ *
+ * @return >= 0 on success, a negative AVERROR on error.
+ */
+int av_frame_copy(AVFrame* dst, const AVFrame* src);
+
+/**
+ * Copy only "metadata" fields from src to dst.
+ *
+ * Metadata for the purpose of this function are those fields that do not affect
+ * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample
+ * aspect ratio (for video), but not width/height or channel layout.
+ * Side data is also copied.
+ */
+int av_frame_copy_props(AVFrame* dst, const AVFrame* src);
+
+/**
+ * Get the buffer reference a given data plane is stored in.
+ *
+ * @param frame the frame to get the plane's buffer from
+ * @param plane index of the data plane of interest in frame->extended_data.
+ *
+ * @return the buffer reference that contains the plane or NULL if the input
+ * frame is not valid.
+ */
+AVBufferRef* av_frame_get_plane_buffer(const AVFrame* frame, int plane);
+
+/**
+ * Add a new side data to a frame.
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type type of the added side data
+ * @param size size of the side data
+ *
+ * @return newly added side data on success, NULL on error
+ */
+AVFrameSideData* av_frame_new_side_data(AVFrame* frame,
+ enum AVFrameSideDataType type,
+ size_t size);
+
+/**
+ * Add a new side data to a frame from an existing AVBufferRef
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type the type of the added side data
+ * @param buf an AVBufferRef to add as side data. The ownership of
+ * the reference is transferred to the frame.
+ *
+ * @return newly added side data on success, NULL on error. On failure
+ * the frame is unchanged and the AVBufferRef remains owned by
+ * the caller.
+ */
+AVFrameSideData* av_frame_new_side_data_from_buf(AVFrame* frame,
+ enum AVFrameSideDataType type,
+ AVBufferRef* buf);
+
+/**
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this frame.
+ */
+AVFrameSideData* av_frame_get_side_data(const AVFrame* frame,
+ enum AVFrameSideDataType type);
+
+/**
+ * Remove and free all side data instances of the given type.
+ */
+void av_frame_remove_side_data(AVFrame* frame, enum AVFrameSideDataType type);
+
+/**
+ * Flags for frame cropping.
+ */
+enum {
+ /**
+ * Apply the maximum possible cropping, even if it requires setting the
+ * AVFrame.data[] entries to unaligned pointers. Passing unaligned data
+ * to FFmpeg API is generally not allowed, and causes undefined behavior
+ * (such as crashes). You can pass unaligned data only to FFmpeg APIs that
+ * are explicitly documented to accept it. Use this flag only if you
+ * absolutely know what you are doing.
+ */
+ AV_FRAME_CROP_UNALIGNED = 1 << 0,
+};
+
+/**
+ * Crop the given video AVFrame according to its crop_left/crop_top/crop_right/
+ * crop_bottom fields. If cropping is successful, the function will adjust the
+ * data pointers and the width/height fields, and set the crop fields to 0.
+ *
+ * In all cases, the cropping boundaries will be rounded to the inherent
+ * alignment of the pixel format. In some cases, such as for opaque hwaccel
+ * formats, the left/top cropping is ignored. The crop fields are set to 0 even
+ * if the cropping was rounded or ignored.
+ *
+ * @param frame the frame which should be cropped
+ * @param flags Some combination of AV_FRAME_CROP_* flags, or 0.
+ *
+ * @return >= 0 on success, a negative AVERROR on error. If the cropping fields
+ * were invalid, AVERROR(ERANGE) is returned, and nothing is changed.
+ */
+int av_frame_apply_cropping(AVFrame* frame, int flags);
+
+/**
+ * @return a string identifying the side data type
+ */
+const char* av_frame_side_data_name(enum AVFrameSideDataType type);
+
+/**
+ * @return side data descriptor corresponding to a given side data type, NULL
+ * when not available.
+ */
+const AVSideDataDescriptor* av_frame_side_data_desc(
+ enum AVFrameSideDataType type);
+
+/**
+ * Free all side data entries and their contents, then zeroes out the
+ * values which the pointers are pointing to.
+ *
+ * @param sd pointer to array of side data to free. Will be set to NULL
+ * upon return.
+ * @param nb_sd pointer to an integer containing the number of entries in
+ * the array. Will be set to 0 upon return.
+ */
+void av_frame_side_data_free(AVFrameSideData*** sd, int* nb_sd);
+
+#define AV_FRAME_SIDE_DATA_FLAG_UNIQUE (1 << 0)
+
+/**
+ * Add new side data entry to an array.
+ *
+ * @param sd pointer to array of side data to which to add another entry,
+ * or to NULL in order to start a new array.
+ * @param nb_sd pointer to an integer containing the number of entries in
+ * the array.
+ * @param type type of the added side data
+ * @param size size of the side data
+ * @param flags Some combination of AV_FRAME_SIDE_DATA_FLAG_* flags, or 0.
+ *
+ * @return newly added side data on success, NULL on error. In case of
+ * AV_FRAME_SIDE_DATA_FLAG_UNIQUE being set, entries of matching
+ * AVFrameSideDataType will be removed before the addition is
+ * attempted.
+ */
+AVFrameSideData* av_frame_side_data_new(AVFrameSideData*** sd, int* nb_sd,
+ enum AVFrameSideDataType type,
+ size_t size, unsigned int flags);
+
+/**
+ * Add a new side data entry to an array based on existing side data, taking
+ * a reference towards the contained AVBufferRef.
+ *
+ * @param sd pointer to array of side data to which to add another entry,
+ * or to NULL in order to start a new array.
+ * @param nb_sd pointer to an integer containing the number of entries in
+ * the array.
+ * @param src side data to be cloned, with a new reference utilized
+ * for the buffer.
+ * @param flags Some combination of AV_FRAME_SIDE_DATA_FLAG_* flags, or 0.
+ *
+ * @return negative error code on failure, >=0 on success. In case of
+ * AV_FRAME_SIDE_DATA_FLAG_UNIQUE being set, entries of matching
+ * AVFrameSideDataType will be removed before the addition is
+ * attempted.
+ */
+int av_frame_side_data_clone(AVFrameSideData*** sd, int* nb_sd,
+ const AVFrameSideData* src, unsigned int flags);
+
+/**
+ * Get a side data entry of a specific type from an array.
+ *
+ * @param sd array of side data.
+ * @param nb_sd integer containing the number of entries in the array.
+ * @param type type of side data to be queried
+ *
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this set.
+ */
+const AVFrameSideData* av_frame_side_data_get_c(
+ const AVFrameSideData* const* sd, const int nb_sd,
+ enum AVFrameSideDataType type);
+
+/**
+ * Wrapper around av_frame_side_data_get_c() to workaround the limitation
+ * that for any type T the conversion from T * const * to const T * const *
+ * is not performed automatically in C.
+ * @see av_frame_side_data_get_c()
+ */
+static inline const AVFrameSideData* av_frame_side_data_get(
+ AVFrameSideData* const* sd, const int nb_sd,
+ enum AVFrameSideDataType type) {
+ return av_frame_side_data_get_c((const AVFrameSideData* const*)sd, nb_sd,
+ type);
+}
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FRAME_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext.h
new file mode 100644
index 0000000000..f2c5426d5f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext.h
@@ -0,0 +1,594 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_H
+#define AVUTIL_HWCONTEXT_H
+
+#include "buffer.h"
+#include "frame.h"
+#include "log.h"
+#include "pixfmt.h"
+
+enum AVHWDeviceType {
+ AV_HWDEVICE_TYPE_NONE,
+ AV_HWDEVICE_TYPE_VDPAU,
+ AV_HWDEVICE_TYPE_CUDA,
+ AV_HWDEVICE_TYPE_VAAPI,
+ AV_HWDEVICE_TYPE_DXVA2,
+ AV_HWDEVICE_TYPE_QSV,
+ AV_HWDEVICE_TYPE_VIDEOTOOLBOX,
+ AV_HWDEVICE_TYPE_D3D11VA,
+ AV_HWDEVICE_TYPE_DRM,
+ AV_HWDEVICE_TYPE_OPENCL,
+ AV_HWDEVICE_TYPE_MEDIACODEC,
+ AV_HWDEVICE_TYPE_VULKAN,
+ AV_HWDEVICE_TYPE_D3D12VA,
+};
+
+/**
+ * This struct aggregates all the (hardware/vendor-specific) "high-level" state,
+ * i.e. state that is not tied to a concrete processing configuration.
+ * E.g., in an API that supports hardware-accelerated encoding and decoding,
+ * this struct will (if possible) wrap the state that is common to both encoding
+ * and decoding and from which specific instances of encoders or decoders can be
+ * derived.
+ *
+ * This struct is reference-counted with the AVBuffer mechanism. The
+ * av_hwdevice_ctx_alloc() constructor yields a reference, whose data field
+ * points to the actual AVHWDeviceContext. Further objects derived from
+ * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool with
+ * specific properties) will hold an internal reference to it. After all the
+ * references are released, the AVHWDeviceContext itself will be freed,
+ * optionally invoking a user-specified callback for uninitializing the hardware
+ * state.
+ */
+typedef struct AVHWDeviceContext {
+ /**
+ * A class for logging. Set by av_hwdevice_ctx_alloc().
+ */
+ const AVClass* av_class;
+
+ /**
+ * This field identifies the underlying API used for hardware access.
+ *
+ * This field is set when this struct is allocated and never changed
+ * afterwards.
+ */
+ enum AVHWDeviceType type;
+
+ /**
+ * The format-specific data, allocated and freed by libavutil along with
+ * this context.
+ *
+ * Should be cast by the user to the format-specific context defined in the
+ * corresponding header (hwcontext_*.h) and filled as described in the
+ * documentation before calling av_hwdevice_ctx_init().
+ *
+ * After calling av_hwdevice_ctx_init() this struct should not be modified
+ * by the caller.
+ */
+ void* hwctx;
+
+ /**
+ * This field may be set by the caller before calling av_hwdevice_ctx_init().
+ *
+ * If non-NULL, this callback will be called when the last reference to
+ * this context is unreferenced, immediately before it is freed.
+ *
+ * @note when other objects (e.g an AVHWFramesContext) are derived from this
+ * struct, this callback will be invoked after all such child objects
+ * are fully uninitialized and their respective destructors invoked.
+ */
+ void (*free)(struct AVHWDeviceContext* ctx);
+
+ /**
+ * Arbitrary user data, to be used e.g. by the free() callback.
+ */
+ void* user_opaque;
+} AVHWDeviceContext;
+
+/**
+ * This struct describes a set or pool of "hardware" frames (i.e. those with
+ * data not located in normal system memory). All the frames in the pool are
+ * assumed to be allocated in the same way and interchangeable.
+ *
+ * This struct is reference-counted with the AVBuffer mechanism and tied to a
+ * given AVHWDeviceContext instance. The av_hwframe_ctx_alloc() constructor
+ * yields a reference, whose data field points to the actual AVHWFramesContext
+ * struct.
+ */
+typedef struct AVHWFramesContext {
+ /**
+ * A class for logging.
+ */
+ const AVClass* av_class;
+
+ /**
+ * A reference to the parent AVHWDeviceContext. This reference is owned and
+ * managed by the enclosing AVHWFramesContext, but the caller may derive
+ * additional references from it.
+ */
+ AVBufferRef* device_ref;
+
+ /**
+ * The parent AVHWDeviceContext. This is simply a pointer to
+ * device_ref->data provided for convenience.
+ *
+ * Set by libavutil in av_hwframe_ctx_init().
+ */
+ AVHWDeviceContext* device_ctx;
+
+ /**
+ * The format-specific data, allocated and freed automatically along with
+ * this context.
+ *
+ * The user shall ignore this field if the corresponding format-specific
+ * header (hwcontext_*.h) does not define a context to be used as
+ * AVHWFramesContext.hwctx.
+ *
+ * Otherwise, it should be cast by the user to said context and filled
+ * as described in the documentation before calling av_hwframe_ctx_init().
+ *
+ * After any frames using this context are created, the contents of this
+ * struct should not be modified by the caller.
+ */
+ void* hwctx;
+
+ /**
+ * This field may be set by the caller before calling av_hwframe_ctx_init().
+ *
+ * If non-NULL, this callback will be called when the last reference to
+ * this context is unreferenced, immediately before it is freed.
+ */
+ void (*free)(struct AVHWFramesContext* ctx);
+
+ /**
+ * Arbitrary user data, to be used e.g. by the free() callback.
+ */
+ void* user_opaque;
+
+ /**
+ * A pool from which the frames are allocated by av_hwframe_get_buffer().
+ * This field may be set by the caller before calling av_hwframe_ctx_init().
+ * The buffers returned by calling av_buffer_pool_get() on this pool must
+ * have the properties described in the documentation in the corresponding hw
+ * type's header (hwcontext_*.h). The pool will be freed strictly before
+ * this struct's free() callback is invoked.
+ *
+ * This field may be NULL, then libavutil will attempt to allocate a pool
+ * internally. Note that certain device types enforce pools allocated at
+ * fixed size (frame count), which cannot be extended dynamically. In such a
+ * case, initial_pool_size must be set appropriately.
+ */
+ AVBufferPool* pool;
+
+ /**
+ * Initial size of the frame pool. If a device type does not support
+ * dynamically resizing the pool, then this is also the maximum pool size.
+ *
+ * May be set by the caller before calling av_hwframe_ctx_init(). Must be
+ * set if pool is NULL and the device type does not support dynamic pools.
+ */
+ int initial_pool_size;
+
+ /**
+ * The pixel format identifying the underlying HW surface type.
+ *
+ * Must be a hwaccel format, i.e. the corresponding descriptor must have the
+ * AV_PIX_FMT_FLAG_HWACCEL flag set.
+ *
+ * Must be set by the user before calling av_hwframe_ctx_init().
+ */
+ enum AVPixelFormat format;
+
+ /**
+ * The pixel format identifying the actual data layout of the hardware
+ * frames.
+ *
+ * Must be set by the caller before calling av_hwframe_ctx_init().
+ *
+ * @note when the underlying API does not provide the exact data layout, but
+ * only the colorspace/bit depth, this field should be set to the fully
+ * planar version of that format (e.g. for 8-bit 420 YUV it should be
+ * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else).
+ */
+ enum AVPixelFormat sw_format;
+
+ /**
+ * The allocated dimensions of the frames in this pool.
+ *
+ * Must be set by the user before calling av_hwframe_ctx_init().
+ */
+ int width, height;
+} AVHWFramesContext;
+
+/**
+ * Look up an AVHWDeviceType by name.
+ *
+ * @param name String name of the device type (case-insensitive).
+ * @return The type from enum AVHWDeviceType, or AV_HWDEVICE_TYPE_NONE if
+ * not found.
+ */
+enum AVHWDeviceType av_hwdevice_find_type_by_name(const char* name);
+
+/** Get the string name of an AVHWDeviceType.
+ *
+ * @param type Type from enum AVHWDeviceType.
+ * @return Pointer to a static string containing the name, or NULL if the type
+ * is not valid.
+ */
+const char* av_hwdevice_get_type_name(enum AVHWDeviceType type);
+
+/**
+ * Iterate over supported device types.
+ *
+ * @param prev AV_HWDEVICE_TYPE_NONE initially, then the previous type
+ * returned by this function in subsequent iterations.
+ * @return The next usable device type from enum AVHWDeviceType, or
+ * AV_HWDEVICE_TYPE_NONE if there are no more.
+ */
+enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev);
+
+/**
+ * Allocate an AVHWDeviceContext for a given hardware type.
+ *
+ * @param type the type of the hardware device to allocate.
+ * @return a reference to the newly created AVHWDeviceContext on success or NULL
+ * on failure.
+ */
+AVBufferRef* av_hwdevice_ctx_alloc(enum AVHWDeviceType type);
+
+/**
+ * Finalize the device context before use. This function must be called after
+ * the context is filled with all the required information and before it is
+ * used in any way.
+ *
+ * @param ref a reference to the AVHWDeviceContext
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwdevice_ctx_init(AVBufferRef* ref);
+
+/**
+ * Open a device of the specified type and create an AVHWDeviceContext for it.
+ *
+ * This is a convenience function intended to cover the simple cases. Callers
+ * who need to fine-tune device creation/management should open the device
+ * manually and then wrap it in an AVHWDeviceContext using
+ * av_hwdevice_ctx_alloc()/av_hwdevice_ctx_init().
+ *
+ * The returned context is already initialized and ready for use, the caller
+ * should not call av_hwdevice_ctx_init() on it. The user_opaque/free fields of
+ * the created AVHWDeviceContext are set by this function and should not be
+ * touched by the caller.
+ *
+ * @param device_ctx On success, a reference to the newly-created device context
+ * will be written here. The reference is owned by the caller
+ * and must be released with av_buffer_unref() when no longer
+ * needed. On failure, NULL will be written to this pointer.
+ * @param type The type of the device to create.
+ * @param device A type-specific string identifying the device to open.
+ * @param opts A dictionary of additional (type-specific) options to use in
+ * opening the device. The dictionary remains owned by the caller.
+ * @param flags currently unused
+ *
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create(AVBufferRef** device_ctx, enum AVHWDeviceType type,
+ const char* device, AVDictionary* opts, int flags);
+
+/**
+ * Create a new device of the specified type from an existing device.
+ *
+ * If the source device is a device of the target type or was originally
+ * derived from such a device (possibly through one or more intermediate
+ * devices of other types), then this will return a reference to the
+ * existing device of the same type as is requested.
+ *
+ * Otherwise, it will attempt to derive a new device from the given source
+ * device. If direct derivation to the new type is not implemented, it will
+ * attempt the same derivation from each ancestor of the source device in
+ * turn looking for an implemented derivation method.
+ *
+ * @param dst_ctx On success, a reference to the newly-created
+ * AVHWDeviceContext.
+ * @param type The type of the new device to create.
+ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
+ * used to create the new device.
+ * @param flags Currently unused; should be set to zero.
+ * @return Zero on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create_derived(AVBufferRef** dst_ctx,
+ enum AVHWDeviceType type,
+ AVBufferRef* src_ctx, int flags);
+
+/**
+ * Create a new device of the specified type from an existing device.
+ *
+ * This function performs the same action as av_hwdevice_ctx_create_derived,
+ * however, it is able to set options for the new device to be derived.
+ *
+ * @param dst_ctx On success, a reference to the newly-created
+ * AVHWDeviceContext.
+ * @param type The type of the new device to create.
+ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
+ * used to create the new device.
+ * @param options Options for the new device to create, same format as in
+ * av_hwdevice_ctx_create.
+ * @param flags Currently unused; should be set to zero.
+ * @return Zero on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create_derived_opts(AVBufferRef** dst_ctx,
+ enum AVHWDeviceType type,
+ AVBufferRef* src_ctx,
+ AVDictionary* options, int flags);
+
+/**
+ * Allocate an AVHWFramesContext tied to a given device context.
+ *
+ * @param device_ctx a reference to a AVHWDeviceContext. This function will make
+ * a new reference for internal use, the one passed to the
+ * function remains owned by the caller.
+ * @return a reference to the newly created AVHWFramesContext on success or NULL
+ * on failure.
+ */
+AVBufferRef* av_hwframe_ctx_alloc(AVBufferRef* device_ctx);
+
+/**
+ * Finalize the context before use. This function must be called after the
+ * context is filled with all the required information and before it is attached
+ * to any frames.
+ *
+ * @param ref a reference to the AVHWFramesContext
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwframe_ctx_init(AVBufferRef* ref);
+
+/**
+ * Allocate a new frame attached to the given AVHWFramesContext.
+ *
+ * @param hwframe_ctx a reference to an AVHWFramesContext
+ * @param frame an empty (freshly allocated or unreffed) frame to be filled with
+ * newly allocated buffers.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_hwframe_get_buffer(AVBufferRef* hwframe_ctx, AVFrame* frame, int flags);
+
+/**
+ * Copy data to or from a hw surface. At least one of dst/src must have an
+ * AVHWFramesContext attached.
+ *
+ * If src has an AVHWFramesContext attached, then the format of dst (if set)
+ * must use one of the formats returned by av_hwframe_transfer_get_formats(src,
+ * AV_HWFRAME_TRANSFER_DIRECTION_FROM).
+ * If dst has an AVHWFramesContext attached, then the format of src must use one
+ * of the formats returned by av_hwframe_transfer_get_formats(dst,
+ * AV_HWFRAME_TRANSFER_DIRECTION_TO)
+ *
+ * dst may be "clean" (i.e. with data/buf pointers unset), in which case the
+ * data buffers will be allocated by this function using av_frame_get_buffer().
+ * If dst->format is set, then this format will be used, otherwise (when
+ * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen.
+ *
+ * The two frames must have matching allocated dimensions (i.e. equal to
+ * AVHWFramesContext.width/height), since not all device types support
+ * transferring a sub-rectangle of the whole surface. The display dimensions
+ * (i.e. AVFrame.width/height) may be smaller than the allocated dimensions, but
+ * also have to be equal for both frames. When the display dimensions are
+ * smaller than the allocated dimensions, the content of the padding in the
+ * destination frame is unspecified.
+ *
+ * @param dst the destination frame. dst is not touched on failure.
+ * @param src the source frame.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR error code on failure.
+ */
+int av_hwframe_transfer_data(AVFrame* dst, const AVFrame* src, int flags);
+
+enum AVHWFrameTransferDirection {
+ /**
+ * Transfer the data from the queried hw frame.
+ */
+ AV_HWFRAME_TRANSFER_DIRECTION_FROM,
+
+ /**
+ * Transfer the data to the queried hw frame.
+ */
+ AV_HWFRAME_TRANSFER_DIRECTION_TO,
+};
+
+/**
+ * Get a list of possible source or target formats usable in
+ * av_hwframe_transfer_data().
+ *
+ * @param hwframe_ctx the frame context to obtain the information for
+ * @param dir the direction of the transfer
+ * @param formats the pointer to the output format list will be written here.
+ * The list is terminated with AV_PIX_FMT_NONE and must be freed
+ * by the caller when no longer needed using av_free().
+ * If this function returns successfully, the format list will
+ * have at least one item (not counting the terminator).
+ * On failure, the contents of this pointer are unspecified.
+ * @param flags currently unused, should be set to zero
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_hwframe_transfer_get_formats(AVBufferRef* hwframe_ctx,
+ enum AVHWFrameTransferDirection dir,
+ enum AVPixelFormat** formats, int flags);
+
+/**
+ * This struct describes the constraints on hardware frames attached to
+ * a given device with a hardware-specific configuration. This is returned
+ * by av_hwdevice_get_hwframe_constraints() and must be freed by
+ * av_hwframe_constraints_free() after use.
+ */
+typedef struct AVHWFramesConstraints {
+ /**
+ * A list of possible values for format in the hw_frames_ctx,
+ * terminated by AV_PIX_FMT_NONE. This member will always be filled.
+ */
+ enum AVPixelFormat* valid_hw_formats;
+
+ /**
+ * A list of possible values for sw_format in the hw_frames_ctx,
+ * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is
+ * not known.
+ */
+ enum AVPixelFormat* valid_sw_formats;
+
+ /**
+ * The minimum size of frames in this hw_frames_ctx.
+ * (Zero if not known.)
+ */
+ int min_width;
+ int min_height;
+
+ /**
+ * The maximum size of frames in this hw_frames_ctx.
+ * (INT_MAX if not known / no limit.)
+ */
+ int max_width;
+ int max_height;
+} AVHWFramesConstraints;
+
+/**
+ * Allocate a HW-specific configuration structure for a given HW device.
+ * After use, the user must free all members as required by the specific
+ * hardware structure being used, then free the structure itself with
+ * av_free().
+ *
+ * @param device_ctx a reference to the associated AVHWDeviceContext.
+ * @return The newly created HW-specific configuration structure on
+ * success or NULL on failure.
+ */
+void* av_hwdevice_hwconfig_alloc(AVBufferRef* device_ctx);
+
+/**
+ * Get the constraints on HW frames given a device and the HW-specific
+ * configuration to be used with that device. If no HW-specific
+ * configuration is provided, returns the maximum possible capabilities
+ * of the device.
+ *
+ * @param ref a reference to the associated AVHWDeviceContext.
+ * @param hwconfig a filled HW-specific configuration structure, or NULL
+ * to return the maximum possible capabilities of the device.
+ * @return AVHWFramesConstraints structure describing the constraints
+ * on the device, or NULL if not available.
+ */
+AVHWFramesConstraints* av_hwdevice_get_hwframe_constraints(
+ AVBufferRef* ref, const void* hwconfig);
+
+/**
+ * Free an AVHWFrameConstraints structure.
+ *
+ * @param constraints The (filled or unfilled) AVHWFrameConstraints structure.
+ */
+void av_hwframe_constraints_free(AVHWFramesConstraints** constraints);
+
+/**
+ * Flags to apply to frame mappings.
+ */
+enum {
+ /**
+ * The mapping must be readable.
+ */
+ AV_HWFRAME_MAP_READ = 1 << 0,
+ /**
+ * The mapping must be writeable.
+ */
+ AV_HWFRAME_MAP_WRITE = 1 << 1,
+ /**
+ * The mapped frame will be overwritten completely in subsequent
+ * operations, so the current frame data need not be loaded. Any values
+ * which are not overwritten are unspecified.
+ */
+ AV_HWFRAME_MAP_OVERWRITE = 1 << 2,
+ /**
+ * The mapping must be direct. That is, there must not be any copying in
+ * the map or unmap steps. Note that performance of direct mappings may
+ * be much lower than normal memory.
+ */
+ AV_HWFRAME_MAP_DIRECT = 1 << 3,
+};
+
+/**
+ * Map a hardware frame.
+ *
+ * This has a number of different possible effects, depending on the format
+ * and origin of the src and dst frames. On input, src should be a usable
+ * frame with valid buffers and dst should be blank (typically as just created
+ * by av_frame_alloc()). src should have an associated hwframe context, and
+ * dst may optionally have a format and associated hwframe context.
+ *
+ * If src was created by mapping a frame from the hwframe context of dst,
+ * then this function undoes the mapping - dst is replaced by a reference to
+ * the frame that src was originally mapped from.
+ *
+ * If both src and dst have an associated hwframe context, then this function
+ * attempts to map the src frame from its hardware context to that of dst and
+ * then fill dst with appropriate data to be usable there. This will only be
+ * possible if the hwframe contexts and associated devices are compatible -
+ * given compatible devices, av_hwframe_ctx_create_derived() can be used to
+ * create a hwframe context for dst in which mapping should be possible.
+ *
+ * If src has a hwframe context but dst does not, then the src frame is
+ * mapped to normal memory and should thereafter be usable as a normal frame.
+ * If the format is set on dst, then the mapping will attempt to create dst
+ * with that format and fail if it is not possible. If format is unset (is
+ * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most appropriate
+ * format to use is (probably the sw_format of the src hwframe context).
+ *
+ * A return value of AVERROR(ENOSYS) indicates that the mapping is not
+ * possible with the given arguments and hwframe setup, while other return
+ * values indicate that it failed somehow.
+ *
+ * On failure, the destination frame will be left blank, except for the
+ * hw_frames_ctx/format fields thay may have been set by the caller - those will
+ * be preserved as they were.
+ *
+ * @param dst Destination frame, to contain the mapping.
+ * @param src Source frame, to be mapped.
+ * @param flags Some combination of AV_HWFRAME_MAP_* flags.
+ * @return Zero on success, negative AVERROR code on failure.
+ */
+int av_hwframe_map(AVFrame* dst, const AVFrame* src, int flags);
+
+/**
+ * Create and initialise an AVHWFramesContext as a mapping of another existing
+ * AVHWFramesContext on a different device.
+ *
+ * av_hwframe_ctx_init() should not be called after this.
+ *
+ * @param derived_frame_ctx On success, a reference to the newly created
+ * AVHWFramesContext.
+ * @param format The AVPixelFormat for the derived context.
+ * @param derived_device_ctx A reference to the device to create the new
+ * AVHWFramesContext on.
+ * @param source_frame_ctx A reference to an existing AVHWFramesContext
+ * which will be mapped to the derived context.
+ * @param flags Some combination of AV_HWFRAME_MAP_* flags, defining the
+ * mapping parameters to apply to frames which are allocated
+ * in the derived device.
+ * @return Zero on success, negative AVERROR code on failure.
+ */
+int av_hwframe_ctx_create_derived(AVBufferRef** derived_frame_ctx,
+ enum AVPixelFormat format,
+ AVBufferRef* derived_device_ctx,
+ AVBufferRef* source_frame_ctx, int flags);
+
+#endif /* AVUTIL_HWCONTEXT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_drm.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_drm.h
new file mode 100644
index 0000000000..8d8a651b48
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_drm.h
@@ -0,0 +1,169 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_DRM_H
+#define AVUTIL_HWCONTEXT_DRM_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * @file
+ * API-specific header for AV_HWDEVICE_TYPE_DRM.
+ *
+ * Internal frame allocation is not currently supported - all frames
+ * must be allocated by the user. Thus AVHWFramesContext is always
+ * NULL, though this may change if support for frame allocation is
+ * added in future.
+ */
+
+enum {
+ /**
+ * The maximum number of layers/planes in a DRM frame.
+ */
+ AV_DRM_MAX_PLANES = 4
+};
+
+/**
+ * DRM object descriptor.
+ *
+ * Describes a single DRM object, addressing it as a PRIME file
+ * descriptor.
+ */
+typedef struct AVDRMObjectDescriptor {
+ /**
+ * DRM PRIME fd for the object.
+ */
+ int fd;
+ /**
+ * Total size of the object.
+ *
+ * (This includes any parts not which do not contain image data.)
+ */
+ size_t size;
+ /**
+ * Format modifier applied to the object (DRM_FORMAT_MOD_*).
+ *
+ * If the format modifier is unknown then this should be set to
+ * DRM_FORMAT_MOD_INVALID.
+ */
+ uint64_t format_modifier;
+} AVDRMObjectDescriptor;
+
+/**
+ * DRM plane descriptor.
+ *
+ * Describes a single plane of a layer, which is contained within
+ * a single object.
+ */
+typedef struct AVDRMPlaneDescriptor {
+ /**
+ * Index of the object containing this plane in the objects
+ * array of the enclosing frame descriptor.
+ */
+ int object_index;
+ /**
+ * Offset within that object of this plane.
+ */
+ ptrdiff_t offset;
+ /**
+ * Pitch (linesize) of this plane.
+ */
+ ptrdiff_t pitch;
+} AVDRMPlaneDescriptor;
+
+/**
+ * DRM layer descriptor.
+ *
+ * Describes a single layer within a frame. This has the structure
+ * defined by its format, and will contain one or more planes.
+ */
+typedef struct AVDRMLayerDescriptor {
+ /**
+ * Format of the layer (DRM_FORMAT_*).
+ */
+ uint32_t format;
+ /**
+ * Number of planes in the layer.
+ *
+ * This must match the number of planes required by format.
+ */
+ int nb_planes;
+ /**
+ * Array of planes in this layer.
+ */
+ AVDRMPlaneDescriptor planes[AV_DRM_MAX_PLANES];
+} AVDRMLayerDescriptor;
+
+/**
+ * DRM frame descriptor.
+ *
+ * This is used as the data pointer for AV_PIX_FMT_DRM_PRIME frames.
+ * It is also used by user-allocated frame pools - allocating in
+ * AVHWFramesContext.pool must return AVBufferRefs which contain
+ * an object of this type.
+ *
+ * The fields of this structure should be set such it can be
+ * imported directly by EGL using the EGL_EXT_image_dma_buf_import
+ * and EGL_EXT_image_dma_buf_import_modifiers extensions.
+ * (Note that the exact layout of a particular format may vary between
+ * platforms - we only specify that the same platform should be able
+ * to import it.)
+ *
+ * The total number of planes must not exceed AV_DRM_MAX_PLANES, and
+ * the order of the planes by increasing layer index followed by
+ * increasing plane index must be the same as the order which would
+ * be used for the data pointers in the equivalent software format.
+ */
+typedef struct AVDRMFrameDescriptor {
+ /**
+ * Number of DRM objects making up this frame.
+ */
+ int nb_objects;
+ /**
+ * Array of objects making up the frame.
+ */
+ AVDRMObjectDescriptor objects[AV_DRM_MAX_PLANES];
+ /**
+ * Number of layers in the frame.
+ */
+ int nb_layers;
+ /**
+ * Array of layers in the frame.
+ */
+ AVDRMLayerDescriptor layers[AV_DRM_MAX_PLANES];
+} AVDRMFrameDescriptor;
+
+/**
+ * DRM device.
+ *
+ * Allocated as AVHWDeviceContext.hwctx.
+ */
+typedef struct AVDRMDeviceContext {
+ /**
+ * File descriptor of DRM device.
+ *
+ * This is used as the device to create frames on, and may also be
+ * used in some derivation and mapping operations.
+ *
+ * If no device is required, set to -1.
+ */
+ int fd;
+} AVDRMDeviceContext;
+
+#endif /* AVUTIL_HWCONTEXT_DRM_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_vaapi.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_vaapi.h
new file mode 100644
index 0000000000..058b5f110d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/hwcontext_vaapi.h
@@ -0,0 +1,117 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_VAAPI_H
+#define AVUTIL_HWCONTEXT_VAAPI_H
+
+#include <va/va.h>
+
+/**
+ * @file
+ * API-specific header for AV_HWDEVICE_TYPE_VAAPI.
+ *
+ * Dynamic frame pools are supported, but note that any pool used as a render
+ * target is required to be of fixed size in order to be be usable as an
+ * argument to vaCreateContext().
+ *
+ * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs
+ * with the data pointer set to a VASurfaceID.
+ */
+
+enum {
+ /**
+ * The quirks field has been set by the user and should not be detected
+ * automatically by av_hwdevice_ctx_init().
+ */
+ AV_VAAPI_DRIVER_QUIRK_USER_SET = (1 << 0),
+ /**
+ * The driver does not destroy parameter buffers when they are used by
+ * vaRenderPicture(). Additional code will be required to destroy them
+ * separately afterwards.
+ */
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS = (1 << 1),
+
+ /**
+ * The driver does not support the VASurfaceAttribMemoryType attribute,
+ * so the surface allocation code will not try to use it.
+ */
+ AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE = (1 << 2),
+
+ /**
+ * The driver does not support surface attributes at all.
+ * The surface allocation code will never pass them to surface allocation,
+ * and the results of the vaQuerySurfaceAttributes() call will be faked.
+ */
+ AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES = (1 << 3),
+};
+
+/**
+ * VAAPI connection details.
+ *
+ * Allocated as AVHWDeviceContext.hwctx
+ */
+typedef struct AVVAAPIDeviceContext {
+ /**
+ * The VADisplay handle, to be filled by the user.
+ */
+ VADisplay display;
+ /**
+ * Driver quirks to apply - this is filled by av_hwdevice_ctx_init(),
+ * with reference to a table of known drivers, unless the
+ * AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user
+ * may need to refer to this field when performing any later
+ * operations using VAAPI with the same VADisplay.
+ */
+ unsigned int driver_quirks;
+} AVVAAPIDeviceContext;
+
+/**
+ * VAAPI-specific data associated with a frame pool.
+ *
+ * Allocated as AVHWFramesContext.hwctx.
+ */
+typedef struct AVVAAPIFramesContext {
+ /**
+ * Set by the user to apply surface attributes to all surfaces in
+ * the frame pool. If null, default settings are used.
+ */
+ VASurfaceAttrib* attributes;
+ int nb_attributes;
+ /**
+ * The surfaces IDs of all surfaces in the pool after creation.
+ * Only valid if AVHWFramesContext.initial_pool_size was positive.
+ * These are intended to be used as the render_targets arguments to
+ * vaCreateContext().
+ */
+ VASurfaceID* surface_ids;
+ int nb_surfaces;
+} AVVAAPIFramesContext;
+
+/**
+ * VAAPI hardware pipeline configuration details.
+ *
+ * Allocated with av_hwdevice_hwconfig_alloc().
+ */
+typedef struct AVVAAPIHWConfig {
+ /**
+ * ID of a VAAPI pipeline configuration.
+ */
+ VAConfigID config_id;
+} AVVAAPIHWConfig;
+
+#endif /* AVUTIL_HWCONTEXT_VAAPI_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/intfloat.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/intfloat.h
new file mode 100644
index 0000000000..f373c97796
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/intfloat.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+union av_intfloat32 {
+ uint32_t i;
+ float f;
+};
+
+union av_intfloat64 {
+ uint64_t i;
+ double f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i) {
+ union av_intfloat32 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f) {
+ union av_intfloat32 v;
+ v.f = f;
+ return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i) {
+ union av_intfloat64 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f) {
+ union av_intfloat64 v;
+ v.f = f;
+ return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/log.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/log.h
new file mode 100644
index 0000000000..e1f2af7b18
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/log.h
@@ -0,0 +1,388 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "attributes.h"
+#include "version.h"
+
+typedef enum {
+ AV_CLASS_CATEGORY_NA = 0,
+ AV_CLASS_CATEGORY_INPUT,
+ AV_CLASS_CATEGORY_OUTPUT,
+ AV_CLASS_CATEGORY_MUXER,
+ AV_CLASS_CATEGORY_DEMUXER,
+ AV_CLASS_CATEGORY_ENCODER,
+ AV_CLASS_CATEGORY_DECODER,
+ AV_CLASS_CATEGORY_FILTER,
+ AV_CLASS_CATEGORY_BITSTREAM_FILTER,
+ AV_CLASS_CATEGORY_SWSCALER,
+ AV_CLASS_CATEGORY_SWRESAMPLER,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_INPUT,
+ AV_CLASS_CATEGORY_NB ///< not part of ABI/API
+} AVClassCategory;
+
+#define AV_IS_INPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))
+
+#define AV_IS_OUTPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))
+
+struct AVOptionRanges;
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption* option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for
+ * logging is stored. For example a decoder could pass its AVCodecContext
+ * to eval as such a parent context, which an av_log() implementation
+ * could then leverage to display the parent context.
+ * The offset can be NULL.
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Category used for visualization (like color)
+ * This is only set if the category is equal for all objects using this class.
+ * available since version (51 << 16 | 56 << 8 | 100)
+ */
+ AVClassCategory category;
+
+ /**
+ * Callback to return the category.
+ * available since version (51 << 16 | 59 << 8 | 100)
+ */
+ AVClassCategory (*get_category)(void* ctx);
+
+ /**
+ * Callback to return the supported/allowed ranges.
+ * available since version (52.12)
+ */
+ int (*query_ranges)(struct AVOptionRanges**, void* obj, const char* key,
+ int flags);
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void* obj, void* prev);
+
+ /**
+ * Iterate over the AVClasses corresponding to potential AVOptions-enabled
+ * children.
+ *
+ * @param iter pointer to opaque iteration state. The caller must initialize
+ * *iter to NULL before the first call.
+ * @return AVClass for the next AVOptions-enabled child or NULL if there are
+ * no more such children.
+ *
+ * @note The difference between child_next and this is that child_next
+ * iterates over _already existing_ objects, while child_class_iterate
+ * iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_iterate)(void** iter);
+} AVClass;
+
+/**
+ * @addtogroup lavu_log
+ *
+ * @{
+ *
+ * @defgroup lavu_log_constants Logging Constants
+ *
+ * @{
+ */
+
+/**
+ * Print no output.
+ */
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+/**
+ * Standard information.
+ */
+#define AV_LOG_INFO 32
+
+/**
+ * Detailed information.
+ */
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+/**
+ * Extremely verbose debugging, useful for libav* development.
+ */
+#define AV_LOG_TRACE 56
+
+#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET)
+
+/**
+ * @}
+ */
+
+/**
+ * Sets additional colors for extended debugging sessions.
+ * @code
+ av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
+ @endcode
+ * Requires 256color terminal support. Uses outside debugging is not
+ * recommended.
+ */
+#define AV_LOG_C(x) ((x) << 8)
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ */
+void av_log(void* avcl, int level, const char* fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Send the specified message to the log once with the initial_level and then
+ * with the subsequent_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param initial_level importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant" for the first occurance.
+ * @param subsequent_level importance level of the message expressed using a
+ * @ref lavu_log_constants "Logging Constant" after the first occurance.
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param state a variable to keep trak of if a message has already been printed
+ * this must be initialized to 0 before the first use. The same state
+ * must not be accessed by 2 Threads simultaneously.
+ */
+void av_log_once(void* avcl, int initial_level, int subsequent_level,
+ int* state, const char* fmt, ...) av_printf_format(5, 6);
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_vlog(void* avcl, int level, const char* fmt, va_list vl);
+
+/**
+ * Get the current log level
+ *
+ * @see lavu_log_constants
+ *
+ * @return Current log level
+ */
+int av_log_get_level(void);
+
+/**
+ * Set the log level
+ *
+ * @see lavu_log_constants
+ *
+ * @param level Logging level
+ */
+void av_log_set_level(int level);
+
+/**
+ * Set the logging callback
+ *
+ * @note The callback must be thread safe, even if the application does not use
+ * threads itself as some codecs are multithreaded.
+ *
+ * @see av_log_default_callback
+ *
+ * @param callback A logging function with a compatible signature.
+ */
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list));
+
+/**
+ * Default logging callback
+ *
+ * It prints the message to stderr, optionally colorizing it.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_log_default_callback(void* avcl, int level, const char* fmt,
+ va_list vl);
+
+/**
+ * Return the context name
+ *
+ * @param ctx The AVClass context
+ *
+ * @return The AVClass class_name
+ */
+const char* av_default_item_name(void* ctx);
+AVClassCategory av_default_get_category(void* ptr);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formatted line
+ * @param line_size size of the buffer
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ */
+void av_log_format_line(void* ptr, int level, const char* fmt, va_list vl,
+ char* line, int line_size, int* print_prefix);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formatted line;
+ * may be NULL if line_size is 0
+ * @param line_size size of the buffer; at most line_size-1 characters will
+ * be written to the buffer, plus one null terminator
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ * @return Returns a negative value if an error occurred, otherwise returns
+ * the number of characters that would have been written for a
+ * sufficiently large buffer, not including the terminating null
+ * character. If the return value is not less than line_size, it means
+ * that the log message was truncated to fit the buffer.
+ */
+int av_log_format_line2(void* ptr, int level, const char* fmt, va_list vl,
+ char* line, int line_size, int* print_prefix);
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+
+/**
+ * Include the log severity in messages originating from codecs.
+ *
+ * Results in messages such as:
+ * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
+ */
+#define AV_LOG_PRINT_LEVEL 2
+
+void av_log_set_flags(int arg);
+int av_log_get_flags(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LOG_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/macros.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/macros.h
new file mode 100644
index 0000000000..1578d1a345
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/macros.h
@@ -0,0 +1,87 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Utility Preprocessor macros
+ */
+
+#ifndef AVUTIL_MACROS_H
+#define AVUTIL_MACROS_H
+
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+/**
+ * Comparator.
+ * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
+ * if x == y. This is useful for instance in a qsort comparator callback.
+ * Furthermore, compilers are able to optimize this to branchless code, and
+ * there is no risk of overflow with signed types.
+ * As with many macros, this evaluates its argument multiple times, it thus
+ * must not have a side-effect.
+ */
+#define FFDIFFSIGN(x, y) (((x) > (y)) - ((x) < (y)))
+
+#define FFMAX(a, b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a, b, c) FFMAX(FFMAX(a, b), c)
+#define FFMIN(a, b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a, b, c) FFMIN(FFMIN(a, b), c)
+
+#define FFSWAP(type, a, b) \
+ do { \
+ type SWAP_tmp = b; \
+ b = a; \
+ a = SWAP_tmp; \
+ } while (0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+
+#define MKTAG(a, b, c, d) \
+ ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a, b, c, d) \
+ ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * @addtogroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a##b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+/**
+ * @}
+ */
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+#define FFALIGN(x, a) (((x) + (a)-1) & ~((a)-1))
+
+#endif /* AVUTIL_MACROS_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mathematics.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mathematics.h
new file mode 100644
index 0000000000..cab0d080d2
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mathematics.h
@@ -0,0 +1,305 @@
+/*
+ * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @addtogroup lavu_math
+ * Mathematical utilities for working with timestamp and time base.
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_E
+# define M_E 2.7182818284590452354 /* e */
+#endif
+#ifndef M_Ef
+# define M_Ef 2.7182818284590452354f /* e */
+#endif
+#ifndef M_LN2
+# define M_LN2 0.69314718055994530942 /* log_e 2 */
+#endif
+#ifndef M_LN2f
+# define M_LN2f 0.69314718055994530942f /* log_e 2 */
+#endif
+#ifndef M_LN10
+# define M_LN10 2.30258509299404568402 /* log_e 10 */
+#endif
+#ifndef M_LN10f
+# define M_LN10f 2.30258509299404568402f /* log_e 10 */
+#endif
+#ifndef M_LOG2_10
+# define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_LOG2_10f
+# define M_LOG2_10f 3.32192809488736234787f /* log_2 10 */
+#endif
+#ifndef M_PHI
+# define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef M_PHIf
+# define M_PHIf 1.61803398874989484820f /* phi / golden ratio */
+#endif
+#ifndef M_PI
+# define M_PI 3.14159265358979323846 /* pi */
+#endif
+#ifndef M_PIf
+# define M_PIf 3.14159265358979323846f /* pi */
+#endif
+#ifndef M_PI_2
+# define M_PI_2 1.57079632679489661923 /* pi/2 */
+#endif
+#ifndef M_PI_2f
+# define M_PI_2f 1.57079632679489661923f /* pi/2 */
+#endif
+#ifndef M_PI_4
+# define M_PI_4 0.78539816339744830962 /* pi/4 */
+#endif
+#ifndef M_PI_4f
+# define M_PI_4f 0.78539816339744830962f /* pi/4 */
+#endif
+#ifndef M_1_PI
+# define M_1_PI 0.31830988618379067154 /* 1/pi */
+#endif
+#ifndef M_1_PIf
+# define M_1_PIf 0.31830988618379067154f /* 1/pi */
+#endif
+#ifndef M_2_PI
+# define M_2_PI 0.63661977236758134308 /* 2/pi */
+#endif
+#ifndef M_2_PIf
+# define M_2_PIf 0.63661977236758134308f /* 2/pi */
+#endif
+#ifndef M_2_SQRTPI
+# define M_2_SQRTPI 1.12837916709551257390 /* 2/sqrt(pi) */
+#endif
+#ifndef M_2_SQRTPIf
+# define M_2_SQRTPIf 1.12837916709551257390f /* 2/sqrt(pi) */
+#endif
+#ifndef M_SQRT1_2
+# define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT1_2f
+# define M_SQRT1_2f 0.70710678118654752440f /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT2
+# define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
+#endif
+#ifndef M_SQRT2f
+# define M_SQRT2f 1.41421356237309504880f /* sqrt(2) */
+#endif
+#ifndef NAN
+# define NAN av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+# define INFINITY av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ *
+ * @{
+ */
+
+/**
+ * Rounding methods.
+ */
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF =
+ 5, ///< Round to nearest and halfway cases away from zero.
+ /**
+ * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through
+ * unchanged, avoiding special cases for #AV_NOPTS_VALUE.
+ *
+ * Unlike other values of the enumeration AVRounding, this value is a
+ * bitmask that must be used in conjunction with another value of the
+ * enumeration through a bitwise OR, in order to set behavior for normal
+ * cases.
+ *
+ * @code{.c}
+ * av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
+ * // Rescaling 3:
+ * // Calculating 3 * 1 / 2
+ * // 3 / 2 is rounded up to 2
+ * // => 2
+ *
+ * av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
+ * // Rescaling AV_NOPTS_VALUE:
+ * // AV_NOPTS_VALUE == INT64_MIN
+ * // AV_NOPTS_VALUE is passed through
+ * // => AV_NOPTS_VALUE
+ * @endcode
+ */
+ AV_ROUND_PASS_MINMAX = 8192,
+};
+
+/**
+ * Compute the greatest common divisor of two integer operands.
+ *
+ * @param a Operand
+ * @param b Operand
+ * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >=
+ * 0; if a == 0 and b == 0, returns 0.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ *
+ * The operation is mathematically equivalent to `a * b / c`, but writing that
+ * directly can overflow.
+ *
+ * This function is equivalent to av_rescale_rnd() with #AV_ROUND_NEAR_INF.
+ *
+ * @see av_rescale_rnd(), av_rescale_q(), av_rescale_q_rnd()
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ *
+ * The operation is mathematically equivalent to `a * b / c`, but writing that
+ * directly can overflow, and does not support different rounding methods.
+ * If the result is not representable then INT64_MIN is returned.
+ *
+ * @see av_rescale(), av_rescale_q(), av_rescale_q_rnd()
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c,
+ enum AVRounding rnd) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ *
+ * The operation is mathematically equivalent to `a * bq / cq`.
+ *
+ * This function is equivalent to av_rescale_q_rnd() with #AV_ROUND_NEAR_INF.
+ *
+ * @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd()
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ *
+ * The operation is mathematically equivalent to `a * bq / cq`.
+ *
+ * @see av_rescale(), av_rescale_rnd(), av_rescale_q()
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding rnd) av_const;
+
+/**
+ * Compare two timestamps each in its own time base.
+ *
+ * @return One of the following values:
+ * - -1 if `ts_a` is before `ts_b`
+ * - 1 if `ts_a` is after `ts_b`
+ * - 0 if they represent the same position
+ *
+ * @warning
+ * The result of the function is undefined if one of the timestamps is outside
+ * the `int64_t` range when represented in the other's timebase.
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare the remainders of two integer operands divided by a common divisor.
+ *
+ * In other words, compare the least significant `log2(mod)` bits of integers
+ * `a` and `b`.
+ *
+ * @code{.c}
+ * av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 %
+ * 0x10 (0x2) av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11)
+ * > 0x02 % 0x20 (0x02)
+ * @endcode
+ *
+ * @param a Operand
+ * @param b Operand
+ * @param mod Divisor; must be a power of 2
+ * @return
+ * - a negative value if `a % mod < b % mod`
+ * - a positive value if `a % mod > b % mod`
+ * - zero if `a % mod == b % mod`
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * Rescale a timestamp while preserving known durations.
+ *
+ * This function is designed to be called per audio packet to scale the input
+ * timestamp to a different time base. Compared to a simple av_rescale_q()
+ * call, this function is robust against possible inconsistent frame durations.
+ *
+ * The `last` parameter is a state variable that must be preserved for all
+ * subsequent calls for the same stream. For the first call, `*last` should be
+ * initialized to #AV_NOPTS_VALUE.
+ *
+ * @param[in] in_tb Input time base
+ * @param[in] in_ts Input timestamp
+ * @param[in] fs_tb Duration time base; typically this is finer-grained
+ * (greater) than `in_tb` and `out_tb`
+ * @param[in] duration Duration till the next call to this function (i.e.
+ * duration of the current packet/frame)
+ * @param[in,out] last Pointer to a timestamp expressed in terms of
+ * `fs_tb`, acting as a state variable
+ * @param[in] out_tb Output timebase
+ * @return Timestamp expressed in terms of `out_tb`
+ *
+ * @note In the context of this function, "duration" is in term of samples, not
+ * seconds.
+ */
+int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb,
+ int duration, int64_t* last, AVRational out_tb);
+
+/**
+ * Add a value to a timestamp.
+ *
+ * This function guarantees that when the same value is repeatly added that
+ * no accumulation of rounding errors occurs.
+ *
+ * @param[in] ts Input timestamp
+ * @param[in] ts_tb Input timestamp time base
+ * @param[in] inc Value to be added
+ * @param[in] inc_tb Time base of `inc`
+ */
+int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb,
+ int64_t inc);
+
+/**
+ * 0th order modified bessel function of the first kind.
+ */
+double av_bessel_i0(double x);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mem.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mem.h
new file mode 100644
index 0000000000..738c6443bf
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/mem.h
@@ -0,0 +1,611 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_mem
+ * Memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_mem
+ * Utilities for manipulating memory.
+ *
+ * FFmpeg has several applications of memory that are not required of a typical
+ * program. For example, the computing-heavy components like video decoding and
+ * encoding can be sped up significantly through the use of aligned memory.
+ *
+ * However, for each of FFmpeg's applications of memory, there might not be a
+ * recognized or standardized API for that specific use. Memory alignment, for
+ * instance, varies wildly depending on operating systems, architectures, and
+ * compilers. Hence, this component of @ref libavutil is created to make
+ * dealing with memory consistently possible on all platforms.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup lavu_mem_attrs Function Attributes
+ * Function attributes applicable to memory handling functions.
+ *
+ * These function attributes can help compilers emit more useful warnings, or
+ * generate better code.
+ * @{
+ */
+
+/**
+ * @def av_malloc_attrib
+ * Function attribute denoting a malloc-like function.
+ *
+ * @see <a
+ * href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007bmalloc_007d-function-attribute-3251">Function
+ * attribute `malloc` in GCC's documentation</a>
+ */
+
+#if AV_GCC_VERSION_AT_LEAST(3, 1)
+# define av_malloc_attrib __attribute__((__malloc__))
+#else
+# define av_malloc_attrib
+#endif
+
+/**
+ * @def av_alloc_size(...)
+ * Function attribute used on a function that allocates memory, whose size is
+ * given by the specified parameter(s).
+ *
+ * @code{.c}
+ * void *av_malloc(size_t size) av_alloc_size(1);
+ * void *av_calloc(size_t nmemb, size_t size) av_alloc_size(1, 2);
+ * @endcode
+ *
+ * @param ... One or two parameter indexes, separated by a comma
+ *
+ * @see <a
+ * href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007balloc_005fsize_007d-function-attribute-3220">Function
+ * attribute `alloc_size` in GCC's documentation</a>
+ */
+
+#if AV_GCC_VERSION_AT_LEAST(4, 3)
+# define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#else
+# define av_alloc_size(...)
+#endif
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_mem_funcs Heap Management
+ * Functions responsible for allocating, freeing, and copying memory.
+ *
+ * All memory allocation functions have a built-in upper limit of `INT_MAX`
+ * bytes. This may be changed with av_max_alloc(), although exercise extreme
+ * caution when doing so.
+ *
+ * @{
+ */
+
+/**
+ * Allocate a memory block with alignment suitable for all memory accesses
+ * (including vectors if available on the CPU).
+ *
+ * @param size Size in bytes for the memory block to be allocated
+ * @return Pointer to the allocated block, or `NULL` if the block cannot
+ * be allocated
+ * @see av_mallocz()
+ */
+void* av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a memory block with alignment suitable for all memory accesses
+ * (including vectors if available on the CPU) and zero all the bytes of the
+ * block.
+ *
+ * @param size Size in bytes for the memory block to be allocated
+ * @return Pointer to the allocated block, or `NULL` if it cannot be allocated
+ * @see av_malloc()
+ */
+void* av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a memory block for an array with av_malloc().
+ *
+ * The allocated memory will have size `size * nmemb` bytes.
+ *
+ * @param nmemb Number of element
+ * @param size Size of a single element
+ * @return Pointer to the allocated block, or `NULL` if the block cannot
+ * be allocated
+ * @see av_malloc()
+ */
+av_alloc_size(1, 2) void* av_malloc_array(size_t nmemb, size_t size);
+
+/**
+ * Allocate a memory block for an array with av_mallocz().
+ *
+ * The allocated memory will have size `size * nmemb` bytes.
+ *
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, or `NULL` if the block cannot
+ * be allocated
+ *
+ * @see av_mallocz()
+ * @see av_malloc_array()
+ */
+void* av_calloc(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
+
+/**
+ * Allocate, reallocate, or free a block of memory.
+ *
+ * If `ptr` is `NULL` and `size` > 0, allocate a new block. Otherwise, expand or
+ * shrink that block of memory according to `size`.
+ *
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or `NULL`
+ * @param size Size in bytes of the memory block to be allocated or
+ * reallocated
+ *
+ * @return Pointer to a newly-reallocated block or `NULL` if the block
+ * cannot be reallocated
+ *
+ * @warning Unlike av_malloc(), the returned pointer is not guaranteed to be
+ * correctly aligned. The returned pointer must be freed after even
+ * if size is zero.
+ * @see av_fast_realloc()
+ * @see av_reallocp()
+ */
+void* av_realloc(void* ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Allocate, reallocate, or free a block of memory through a pointer to a
+ * pointer.
+ *
+ * If `*ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is
+ * zero, free the memory block pointed to by `*ptr`. Otherwise, expand or
+ * shrink that block of memory according to `size`.
+ *
+ * @param[in,out] ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or a pointer to `NULL`. The pointer
+ * is updated on success, or freed on failure.
+ * @param[in] size Size in bytes for the memory block to be allocated or
+ * reallocated
+ *
+ * @return Zero on success, an AVERROR error code on failure
+ *
+ * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
+ * correctly aligned.
+ */
+av_warn_unused_result int av_reallocp(void* ptr, size_t size);
+
+/**
+ * Allocate, reallocate, or free a block of memory.
+ *
+ * This function does the same thing as av_realloc(), except:
+ * - It takes two size arguments and allocates `nelem * elsize` bytes,
+ * after checking the result of the multiplication for integer overflow.
+ * - It frees the input block in case of failure, thus avoiding the memory
+ * leak with the classic
+ * @code{.c}
+ * buf = realloc(buf);
+ * if (!buf)
+ * return -1;
+ * @endcode
+ * pattern.
+ */
+void* av_realloc_f(void* ptr, size_t nelem, size_t elsize);
+
+/**
+ * Allocate, reallocate, or free an array.
+ *
+ * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block.
+ *
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or `NULL`
+ * @param nmemb Number of elements in the array
+ * @param size Size of the single element of the array
+ *
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated
+ *
+ * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
+ * correctly aligned. The returned pointer must be freed after even if
+ * nmemb is zero.
+ * @see av_reallocp_array()
+ */
+av_alloc_size(2, 3) void* av_realloc_array(void* ptr, size_t nmemb,
+ size_t size);
+
+/**
+ * Allocate, reallocate an array through a pointer to a pointer.
+ *
+ * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block.
+ *
+ * @param[in,out] ptr Pointer to a pointer to a memory block already
+ * allocated with av_realloc(), or a pointer to `NULL`.
+ * The pointer is updated on success, or freed on failure.
+ * @param[in] nmemb Number of elements
+ * @param[in] size Size of the single element
+ *
+ * @return Zero on success, an AVERROR error code on failure
+ *
+ * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
+ * correctly aligned. *ptr must be freed after even if nmemb is zero.
+ */
+int av_reallocp_array(void* ptr, size_t nmemb, size_t size);
+
+/**
+ * Reallocate the given buffer if it is not large enough, otherwise do nothing.
+ *
+ * If the given buffer is `NULL`, then a new uninitialized buffer is allocated.
+ *
+ * If the given buffer is not large enough, and reallocation fails, `NULL` is
+ * returned and `*size` is set to 0, but the original buffer is not changed or
+ * freed.
+ *
+ * A typical use pattern follows:
+ *
+ * @code{.c}
+ * uint8_t *buf = ...;
+ * uint8_t *new_buf = av_fast_realloc(buf, &current_size, size_needed);
+ * if (!new_buf) {
+ * // Allocation failed; clean up original buffer
+ * av_freep(&buf);
+ * return AVERROR(ENOMEM);
+ * }
+ * @endcode
+ *
+ * @param[in,out] ptr Already allocated buffer, or `NULL`
+ * @param[in,out] size Pointer to the size of buffer `ptr`. `*size` is
+ * updated to the new allocated size, in particular 0
+ * in case of failure.
+ * @param[in] min_size Desired minimal size of buffer `ptr`
+ * @return `ptr` if the buffer is large enough, a pointer to newly reallocated
+ * buffer if the buffer was not large enough, or `NULL` in case of
+ * error
+ * @see av_realloc()
+ * @see av_fast_malloc()
+ */
+void* av_fast_realloc(void* ptr, unsigned int* size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc(), the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special handling to
+ * avoid memleaks is necessary.
+ *
+ * `*ptr` is allowed to be `NULL`, in which case allocation always happens if
+ * `size_needed` is greater than 0.
+ *
+ * @code{.c}
+ * uint8_t *buf = ...;
+ * av_fast_malloc(&buf, &current_size, size_needed);
+ * if (!buf) {
+ * // Allocation failed; buf already freed
+ * return AVERROR(ENOMEM);
+ * }
+ * @endcode
+ *
+ * @param[in,out] ptr Pointer to pointer to an already allocated buffer.
+ * `*ptr` will be overwritten with pointer to new
+ * buffer on success or `NULL` on failure
+ * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is
+ * updated to the new allocated size, in particular 0
+ * in case of failure.
+ * @param[in] min_size Desired minimal size of buffer `*ptr`
+ * @see av_realloc()
+ * @see av_fast_mallocz()
+ */
+void av_fast_malloc(void* ptr, unsigned int* size, size_t min_size);
+
+/**
+ * Allocate and clear a buffer, reusing the given one if large enough.
+ *
+ * Like av_fast_malloc(), but all newly allocated space is initially cleared.
+ * Reused buffer is not cleared.
+ *
+ * `*ptr` is allowed to be `NULL`, in which case allocation always happens if
+ * `size_needed` is greater than 0.
+ *
+ * @param[in,out] ptr Pointer to pointer to an already allocated buffer.
+ * `*ptr` will be overwritten with pointer to new
+ * buffer on success or `NULL` on failure
+ * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is
+ * updated to the new allocated size, in particular 0
+ * in case of failure.
+ * @param[in] min_size Desired minimal size of buffer `*ptr`
+ * @see av_fast_malloc()
+ */
+void av_fast_mallocz(void* ptr, unsigned int* size, size_t min_size);
+
+/**
+ * Free a memory block which has been allocated with a function of av_malloc()
+ * or av_realloc() family.
+ *
+ * @param ptr Pointer to the memory block which should be freed.
+ *
+ * @note `ptr = NULL` is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead, to prevent leaving
+ * behind dangling pointers.
+ * @see av_freep()
+ */
+void av_free(void* ptr);
+
+/**
+ * Free a memory block which has been allocated with a function of av_malloc()
+ * or av_realloc() family, and set the pointer pointing to it to `NULL`.
+ *
+ * @code{.c}
+ * uint8_t *buf = av_malloc(16);
+ * av_free(buf);
+ * // buf now contains a dangling pointer to freed memory, and accidental
+ * // dereference of buf will result in a use-after-free, which may be a
+ * // security risk.
+ *
+ * uint8_t *buf = av_malloc(16);
+ * av_freep(&buf);
+ * // buf is now NULL, and accidental dereference will only result in a
+ * // NULL-pointer dereference.
+ * @endcode
+ *
+ * @param ptr Pointer to the pointer to the memory block which should be freed
+ * @note `*ptr = NULL` is safe and leads to no action.
+ * @see av_free()
+ */
+void av_freep(void* ptr);
+
+/**
+ * Duplicate a string.
+ *
+ * @param s String to be duplicated
+ * @return Pointer to a newly-allocated string containing a
+ * copy of `s` or `NULL` if the string cannot be allocated
+ * @see av_strndup()
+ */
+char* av_strdup(const char* s) av_malloc_attrib;
+
+/**
+ * Duplicate a substring of a string.
+ *
+ * @param s String to be duplicated
+ * @param len Maximum length of the resulting string (not counting the
+ * terminating byte)
+ * @return Pointer to a newly-allocated string containing a
+ * substring of `s` or `NULL` if the string cannot be allocated
+ */
+char* av_strndup(const char* s, size_t len) av_malloc_attrib;
+
+/**
+ * Duplicate a buffer with av_malloc().
+ *
+ * @param p Buffer to be duplicated
+ * @param size Size in bytes of the buffer copied
+ * @return Pointer to a newly allocated buffer containing a
+ * copy of `p` or `NULL` if the buffer cannot be allocated
+ */
+void* av_memdup(const void* p, size_t size);
+
+/**
+ * Overlapping memcpy() implementation.
+ *
+ * @param dst Destination buffer
+ * @param back Number of bytes back to start copying (i.e. the initial size of
+ * the overlapping window); must be > 0
+ * @param cnt Number of bytes to copy; must be >= 0
+ *
+ * @note `cnt > back` is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of `back`.
+ */
+void av_memcpy_backptr(uint8_t* dst, int back, int cnt);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_mem_dynarray Dynamic Array
+ *
+ * Utilities to make an array grow when needed.
+ *
+ * Sometimes, the programmer would want to have an array that can grow when
+ * needed. The libavutil dynamic array utilities fill that need.
+ *
+ * libavutil supports two systems of appending elements onto a dynamically
+ * allocated array, the first one storing the pointer to the value in the
+ * array, and the second storing the value directly. In both systems, the
+ * caller is responsible for maintaining a variable containing the length of
+ * the array, as well as freeing of the array after use.
+ *
+ * The first system stores pointers to values in a block of dynamically
+ * allocated memory. Since only pointers are stored, the function does not need
+ * to know the size of the type. Both av_dynarray_add() and
+ * av_dynarray_add_nofree() implement this system.
+ *
+ * @code
+ * type **array = NULL; //< an array of pointers to values
+ * int nb = 0; //< a variable to keep track of the length of the array
+ *
+ * type to_be_added = ...;
+ * type to_be_added2 = ...;
+ *
+ * av_dynarray_add(&array, &nb, &to_be_added);
+ * if (nb == 0)
+ * return AVERROR(ENOMEM);
+ *
+ * av_dynarray_add(&array, &nb, &to_be_added2);
+ * if (nb == 0)
+ * return AVERROR(ENOMEM);
+ *
+ * // Now:
+ * // nb == 2
+ * // &to_be_added == array[0]
+ * // &to_be_added2 == array[1]
+ *
+ * av_freep(&array);
+ * @endcode
+ *
+ * The second system stores the value directly in a block of memory. As a
+ * result, the function has to know the size of the type. av_dynarray2_add()
+ * implements this mechanism.
+ *
+ * @code
+ * type *array = NULL; //< an array of values
+ * int nb = 0; //< a variable to keep track of the length of the array
+ *
+ * type to_be_added = ...;
+ * type to_be_added2 = ...;
+ *
+ * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array), NULL);
+ * if (!addr)
+ * return AVERROR(ENOMEM);
+ * memcpy(addr, &to_be_added, sizeof(to_be_added));
+ *
+ * // Shortcut of the above.
+ * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array),
+ * (const void *)&to_be_added2);
+ * if (!addr)
+ * return AVERROR(ENOMEM);
+ *
+ * // Now:
+ * // nb == 2
+ * // to_be_added == array[0]
+ * // to_be_added2 == array[1]
+ *
+ * av_freep(&array);
+ * @endcode
+ *
+ * @{
+ */
+
+/**
+ * Add the pointer to an element to a dynamic array.
+ *
+ * The array to grow is supposed to be an array of pointers to
+ * structures, and the element to add must be a pointer to an already
+ * allocated structure.
+ *
+ * The array is reallocated when its size reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by `nb_ptr`
+ * is incremented.
+ * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and
+ * `*nb_ptr` is set to 0.
+ *
+ * @param[in,out] tab_ptr Pointer to the array to grow
+ * @param[in,out] nb_ptr Pointer to the number of elements in the array
+ * @param[in] elem Element to add
+ * @see av_dynarray_add_nofree(), av_dynarray2_add()
+ */
+void av_dynarray_add(void* tab_ptr, int* nb_ptr, void* elem);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * Function has the same functionality as av_dynarray_add(),
+ * but it doesn't free memory on fails. It returns error code
+ * instead and leave current buffer untouched.
+ *
+ * @return >=0 on success, negative otherwise
+ * @see av_dynarray_add(), av_dynarray2_add()
+ */
+av_warn_unused_result int av_dynarray_add_nofree(void* tab_ptr, int* nb_ptr,
+ void* elem);
+
+/**
+ * Add an element of size `elem_size` to a dynamic array.
+ *
+ * The array is reallocated when its number of elements reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by `nb_ptr`
+ * is incremented.
+ * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and
+ * `*nb_ptr` is set to 0.
+ *
+ * @param[in,out] tab_ptr Pointer to the array to grow
+ * @param[in,out] nb_ptr Pointer to the number of elements in the array
+ * @param[in] elem_size Size in bytes of an element in the array
+ * @param[in] elem_data Pointer to the data of the element to add. If
+ * `NULL`, the space of the newly added element is
+ * allocated but left uninitialized.
+ *
+ * @return Pointer to the data of the element to copy in the newly allocated
+ * space
+ * @see av_dynarray_add(), av_dynarray_add_nofree()
+ */
+void* av_dynarray2_add(void** tab_ptr, int* nb_ptr, size_t elem_size,
+ const uint8_t* elem_data);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_mem_misc Miscellaneous Functions
+ *
+ * Other functions related to memory allocation.
+ *
+ * @{
+ */
+
+/**
+ * Multiply two `size_t` values checking for overflow.
+ *
+ * @param[in] a Operand of multiplication
+ * @param[in] b Operand of multiplication
+ * @param[out] r Pointer to the result of the operation
+ * @return 0 on success, AVERROR(EINVAL) on overflow
+ */
+int av_size_mult(size_t a, size_t b, size_t* r);
+
+/**
+ * Set the maximum size that may be allocated in one block.
+ *
+ * The value specified with this function is effective for all libavutil's @ref
+ * lavu_mem_funcs "heap management functions."
+ *
+ * By default, the max value is defined as `INT_MAX`.
+ *
+ * @param max Value to be set as the new maximum size
+ *
+ * @warning Exercise extreme caution when using this function. Don't touch
+ * this if you do not understand the full consequence of doing so.
+ */
+void av_max_alloc(size_t max);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/pixfmt.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/pixfmt.h
new file mode 100644
index 0000000000..5a33471a05
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/pixfmt.h
@@ -0,0 +1,920 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ */
+
+#include "libavutil/avconfig.h"
+#include "version.h"
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+/**
+ * Maximum number of planes in any pixel format.
+ * This should be used when a maximum is needed, but code should not
+ * be written to require a maximum for no good reason.
+ */
+#define AV_VIDEO_MAX_PLANES 4
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @note
+ * If the resolution is not a multiple of the chroma subsampling factor
+ * then the chroma plane resolution must be rounded up.
+ *
+ * @par
+ * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB32 palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y
+ ///< samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y
+ ///< samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y
+ ///< samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y
+ ///< samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y
+ ///< samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black,
+ ///< in each byte pixels are ordered from the
+ ///< msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white,
+ ///< in each byte pixels are ordered from the
+ ///< msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG),
+ ///< deprecated in favor of AV_PIX_FMT_YUV420P and
+ ///< setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG),
+ ///< deprecated in favor of AV_PIX_FMT_YUV422P and
+ ///< setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG),
+ ///< deprecated in favor of AV_PIX_FMT_YUV444P and
+ ///< setting color_range
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb),
+ ///< a byte contains two pixels, the first pixel in the byte
+ ///< is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb),
+ ///< a byte contains two pixels, the first pixel in the byte
+ ///< is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for
+ ///< the UV components, which are interleaved (first byte U
+ ///< and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y
+ ///< samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in
+ ///< favor of AV_PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2
+ ///< Y & A samples)
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb),
+ ///< big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb),
+ ///< little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb),
+ ///< big-endian , X=unused/undefined
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb),
+ ///< little-endian, X=unused/undefined
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb),
+ ///< big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb),
+ ///< little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb),
+ ///< big-endian , X=unused/undefined
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb),
+ ///< little-endian, X=unused/undefined
+
+ /**
+ * Hardware acceleration through VA-API, data[3] contains a
+ * VASurfaceID.
+ */
+ AV_PIX_FMT_VAAPI,
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3]
+ ///< contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb),
+ ///< little-endian, X=unused/undefined
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb),
+ ///< big-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb),
+ ///< little-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb),
+ ///< big-endian, X=unused/undefined
+ AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha
+
+ AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+ AV_PIX_FMT_GRAY8A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the
+ ///< 2-byte value for each R/G/B component is stored as
+ ///< little-endian
+
+ /**
+ * The following 12 formats have the disadvantage of needing 1 format for each
+ * bit depth. Notice that each 9/10 bits sample is stored in 16 bits with
+ * extra padding. If you want to support multiple bit depths, then using
+ * AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
+ */
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE, ///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE, ///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y
+ ///< & A samples)
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y
+ ///< & A samples)
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y & A samples, little-endian)
+
+ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3]
+ ///< contains a VdpVideoSurface
+
+ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb),
+ ///< the 2-byte value for each X/Y/Z is stored as
+ ///< little-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb),
+ ///< the 2-byte value for each X/Y/Z is stored as
+ ///< big-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample
+ ///< per 2x1 Y samples)
+ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb
+ ///< sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb
+ ///< sample per 2x1 Y samples), big-endian
+
+ AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as big-endian
+ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as little-endian
+ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as big-endian
+ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A,
+ ///< the 2-byte value for each R/G/B/A component is
+ ///< stored as little-endian
+
+ AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
+
+ AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian)
+ AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian)
+
+ AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
+ AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
+ AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
+ /**
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ *
+ * Before FFmpeg 5.0:
+ * mfxFrameSurface1.Data.MemId contains a pointer when importing
+ * the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxFrameSurface1.Data.MemId contains a pointer to VASurfaceID
+ *
+ * DXVA2:
+ * mfxFrameSurface1.Data.MemId contains a pointer to IDirect3DSurface9
+ *
+ * FFmpeg 5.0 and above:
+ * mfxFrameSurface1.Data.MemId contains a pointer to the mfxHDLPair
+ * structure when importing the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxHDLPair.first contains a VASurfaceID pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * DXVA2:
+ * mfxHDLPair.first contains IDirect3DSurface9 pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * D3D11:
+ * mfxHDLPair.first contains a ID3D11Texture2D pointer.
+ * mfxHDLPair.second contains the texture array index of the frame if the
+ * ID3D11Texture2D is an array texture, or always MFX_INFINITE if it is a
+ * normal texture.
+ */
+ AV_PIX_FMT_QSV,
+ /**
+ * HW acceleration though MMAL, data[3] contains a pointer to the
+ * MMAL_BUFFER_HEADER_T structure.
+ */
+ AV_PIX_FMT_MMAL,
+
+ AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API,
+ ///< Picture.data[3] contains a
+ ///< ID3D11VideoDecoderOutputView pointer
+
+ /**
+ * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers
+ * exactly as for system memory frames.
+ */
+ AV_PIX_FMT_CUDA,
+
+ AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
+ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
+ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
+ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
+
+ AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per
+ ///< 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
+ AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
+ AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
+ AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
+ AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1
+ ///< Y samples) full scale (JPEG), deprecated in favor
+ ///< of AV_PIX_FMT_YUV411P and setting color_range
+
+ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line),
+ ///< 8-bit samples
+ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line),
+ ///< 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line),
+ ///< 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line),
+ ///< 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line),
+ ///< 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line),
+ ///< 16-bit samples, big-endian
+
+ AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), big-endian
+ AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per
+ ///< 1x2 Y samples), big-endian
+ AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y
+ ///< & A samples), little-endian
+ AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y
+ ///< & A samples), big-endian
+
+ AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
+
+ AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high
+ ///< bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high
+ ///< bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian
+
+ AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian
+ AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian
+
+ AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec
+
+ AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian
+ AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian
+ AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian
+ AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian
+
+ AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian
+ AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian
+
+ /**
+ * Hardware surfaces for Direct3D11.
+ *
+ * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11
+ * hwaccel API and filtering support AV_PIX_FMT_D3D11 only.
+ *
+ * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the
+ * texture array index of the frame as intptr_t if the ID3D11Texture2D is
+ * an array texture (or always 0 if it's a normal texture).
+ */
+ AV_PIX_FMT_D3D11,
+
+ AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian
+ AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian
+
+ AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp,
+ ///< big-endian
+ AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp,
+ ///< little-endian
+ AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4,
+ ///< 128bpp, big-endian
+ AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4,
+ ///< 128bpp, little-endian
+
+ /**
+ * DRM-managed buffers exposed through PRIME buffer sharing.
+ *
+ * data[0] points to an AVDRMFrameDescriptor.
+ */
+ AV_PIX_FMT_DRM_PRIME,
+ /**
+ * Hardware surfaces for OpenCL.
+ *
+ * data[i] contain 2D image objects (typed in C as cl_mem, used
+ * in OpenCL as image2d_t) for each plane of the surface.
+ */
+ AV_PIX_FMT_OPENCL,
+
+ AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian
+ AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian
+
+ AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian
+ AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian
+
+ AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per
+ ///< 2x1 Y samples), 12b alpha, little-endian
+ AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per
+ ///< 1x1 Y samples), 12b alpha, little-endian
+
+ AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for
+ ///< the UV components, which are interleaved (first byte U
+ ///< and the following byte V)
+ AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped
+
+ /**
+ * Vulkan hardware images.
+ *
+ * data[0] points to an AVVkFrame
+ */
+ AV_PIX_FMT_VULKAN,
+
+ AV_PIX_FMT_Y210BE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the
+ ///< high bits, big-endian
+ AV_PIX_FMT_Y210LE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the
+ ///< high bits, little-endian
+
+ AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G
+ ///< 10B(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G
+ ///< 10B(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10LE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G
+ ///< 10R(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10BE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G
+ ///< 10R(lsb), big-endian, X=unused/undefined
+
+ AV_PIX_FMT_P210BE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high
+ ///< bits, big-endian
+ AV_PIX_FMT_P210LE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high
+ ///< bits, little-endian
+
+ AV_PIX_FMT_P410BE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high
+ ///< bits, big-endian
+ AV_PIX_FMT_P410LE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high
+ ///< bits, little-endian
+
+ AV_PIX_FMT_P216BE, ///< interleaved chroma YUV 4:2:2, 32bpp, big-endian
+ AV_PIX_FMT_P216LE, ///< interleaved chroma YUV 4:2:2, 32bpp, little-endian
+
+ AV_PIX_FMT_P416BE, ///< interleaved chroma YUV 4:4:4, 48bpp, big-endian
+ AV_PIX_FMT_P416LE, ///< interleaved chroma YUV 4:4:4, 48bpp, little-endian
+
+ AV_PIX_FMT_VUYA, ///< packed VUYA 4:4:4, 32bpp, VUYAVUYA...
+
+ AV_PIX_FMT_RGBAF16BE, ///< IEEE-754 half precision packed RGBA 16:16:16:16,
+ ///< 64bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF16LE, ///< IEEE-754 half precision packed RGBA 16:16:16:16,
+ ///< 64bpp, RGBARGBA..., little-endian
+
+ AV_PIX_FMT_VUYX, ///< packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha
+ ///< channel is left undefined
+
+ AV_PIX_FMT_P012LE, ///< like NV12, with 12bpp per component, data in the high
+ ///< bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P012BE, ///< like NV12, with 12bpp per component, data in the high
+ ///< bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_Y212BE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the
+ ///< high bits, zeros in the low bits, big-endian
+ AV_PIX_FMT_Y212LE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the
+ ///< high bits, zeros in the low bits, little-endian
+
+ AV_PIX_FMT_XV30BE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb),
+ ///< big-endian, variant of Y410 where alpha channel is
+ ///< left undefined
+ AV_PIX_FMT_XV30LE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb),
+ ///< little-endian, variant of Y410 where alpha channel is
+ ///< left undefined
+
+ AV_PIX_FMT_XV36BE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits,
+ ///< zeros in the low bits, big-endian, variant of Y412
+ ///< where alpha channel is left undefined
+ AV_PIX_FMT_XV36LE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits,
+ ///< zeros in the low bits, little-endian, variant of Y412
+ ///< where alpha channel is left undefined
+
+ AV_PIX_FMT_RGBF32BE, ///< IEEE-754 single precision packed RGB 32:32:32,
+ ///< 96bpp, RGBRGB..., big-endian
+ AV_PIX_FMT_RGBF32LE, ///< IEEE-754 single precision packed RGB 32:32:32,
+ ///< 96bpp, RGBRGB..., little-endian
+
+ AV_PIX_FMT_RGBAF32BE, ///< IEEE-754 single precision packed RGBA 32:32:32:32,
+ ///< 128bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF32LE, ///< IEEE-754 single precision packed RGBA 32:32:32:32,
+ ///< 128bpp, RGBARGBA..., little-endian
+
+ AV_PIX_FMT_P212BE, ///< interleaved chroma YUV 4:2:2, 24bpp, data in the high
+ ///< bits, big-endian
+ AV_PIX_FMT_P212LE, ///< interleaved chroma YUV 4:2:2, 24bpp, data in the high
+ ///< bits, little-endian
+
+ AV_PIX_FMT_P412BE, ///< interleaved chroma YUV 4:4:4, 36bpp, data in the high
+ ///< bits, big-endian
+ AV_PIX_FMT_P412LE, ///< interleaved chroma YUV 4:4:4, 36bpp, data in the high
+ ///< bits, little-endian
+
+ AV_PIX_FMT_GBRAP14BE, ///< planar GBR 4:4:4:4 56bpp, big-endian
+ AV_PIX_FMT_GBRAP14LE, ///< planar GBR 4:4:4:4 56bpp, little-endian
+
+ /**
+ * Hardware surfaces for Direct3D 12.
+ *
+ * data[0] points to an AVD3D12VAFrame
+ */
+ AV_PIX_FMT_D3D12,
+
+ AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to
+ ///< link with shared libav* because the number of formats
+ ///< might differ between versions
+};
+
+#if AV_HAVE_BIGENDIAN
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0)
+#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0)
+
+#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE)
+#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE)
+#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE)
+#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE)
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
+#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
+#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE, YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE, YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE, YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
+#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
+#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE)
+#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
+#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)
+#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)
+#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE, GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
+#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE)
+#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE)
+#define AV_PIX_FMT_GBRAP14 AV_PIX_FMT_NE(GBRAP14BE, GBRAP14LE)
+#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE)
+
+#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE)
+#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE)
+#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE)
+#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE)
+
+#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE)
+#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
+
+#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
+
+#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE, YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE, YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE, YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA422P12 AV_PIX_FMT_NE(YUVA422P12BE, YUVA422P12LE)
+#define AV_PIX_FMT_YUVA444P12 AV_PIX_FMT_NE(YUVA444P12BE, YUVA444P12LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
+#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
+#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE)
+#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE)
+#define AV_PIX_FMT_P012 AV_PIX_FMT_NE(P012BE, P012LE)
+#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE)
+
+#define AV_PIX_FMT_Y210 AV_PIX_FMT_NE(Y210BE, Y210LE)
+#define AV_PIX_FMT_Y212 AV_PIX_FMT_NE(Y212BE, Y212LE)
+#define AV_PIX_FMT_XV30 AV_PIX_FMT_NE(XV30BE, XV30LE)
+#define AV_PIX_FMT_XV36 AV_PIX_FMT_NE(XV36BE, XV36LE)
+#define AV_PIX_FMT_X2RGB10 AV_PIX_FMT_NE(X2RGB10BE, X2RGB10LE)
+#define AV_PIX_FMT_X2BGR10 AV_PIX_FMT_NE(X2BGR10BE, X2BGR10LE)
+
+#define AV_PIX_FMT_P210 AV_PIX_FMT_NE(P210BE, P210LE)
+#define AV_PIX_FMT_P410 AV_PIX_FMT_NE(P410BE, P410LE)
+#define AV_PIX_FMT_P212 AV_PIX_FMT_NE(P212BE, P212LE)
+#define AV_PIX_FMT_P412 AV_PIX_FMT_NE(P412BE, P412LE)
+#define AV_PIX_FMT_P216 AV_PIX_FMT_NE(P216BE, P216LE)
+#define AV_PIX_FMT_P416 AV_PIX_FMT_NE(P416BE, P416LE)
+
+#define AV_PIX_FMT_RGBAF16 AV_PIX_FMT_NE(RGBAF16BE, RGBAF16LE)
+
+#define AV_PIX_FMT_RGBF32 AV_PIX_FMT_NE(RGBF32BE, RGBF32LE)
+#define AV_PIX_FMT_RGBAF32 AV_PIX_FMT_NE(RGBAF32BE, RGBAF32LE)
+
+/**
+ * Chromaticity coordinates of the source primaries.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.1 and
+ * ITU-T H.273.
+ */
+enum AVColorPrimaries {
+ AVCOL_PRI_RESERVED0 = 0,
+ AVCOL_PRI_BT709 =
+ 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_RESERVED = 3,
+ AVCOL_PRI_BT470M =
+ 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R
+ ///< BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M =
+ 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M =
+ 7, ///< identical to above, also called "SMPTE C" even though it uses D65
+ AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
+ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
+ AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
+ AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
+ AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
+ AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3
+ AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E (nothing there) / one of JEDEC
+ ///< P22 group phosphors
+ AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213,
+ AVCOL_PRI_NB ///< Not part of ABI
+};
+
+/**
+ * Color Transfer Characteristic.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.2.
+ */
+enum AVColorTransferCharacteristic {
+ AVCOL_TRC_RESERVED0 = 0,
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_RESERVED = 3,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358
+ ///< 525 or 625 / ITU-R BT1700 NTSC
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
+ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
+ AVCOL_TRC_LOG_SQRT =
+ 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
+ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
+ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system
+ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system
+ AVCOL_TRC_SMPTE2084 =
+ 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems
+ AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084,
+ AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1
+ AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428,
+ AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma"
+ AVCOL_TRC_NB ///< Not part of ABI
+};
+
+/**
+ * YUV colorspace type.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.3.
+ */
+enum AVColorSpace {
+ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC
+ ///< 61966-2-1 (sRGB), YZX and ST 428-1
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 /
+ ///< derived in SMPTE RP 177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_RESERVED =
+ 3, ///< reserved for future use by ITU-T and ISO/IEC just like 15-255 are
+ AVCOL_SPC_FCC =
+ 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R
+ ///< BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M =
+ 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC /
+ ///< functionally identical to above
+ AVCOL_SPC_SMPTE240M =
+ 7, ///< derived from 170M primaries and D65 white point, 170M is derived
+ ///< from BT470 System M's primaries
+ AVCOL_SPC_YCGCO =
+ 8, ///< used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO,
+ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
+ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
+ AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x
+ AVCOL_SPC_CHROMA_DERIVED_NCL =
+ 12, ///< Chromaticity-derived non-constant luminance system
+ AVCOL_SPC_CHROMA_DERIVED_CL =
+ 13, ///< Chromaticity-derived constant luminance system
+ AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp
+ AVCOL_SPC_IPT_C2 = 15, ///< SMPTE ST 2128, IPT-C2
+ AVCOL_SPC_YCGCO_RE = 16, ///< YCgCo-R, even addition of bits
+ AVCOL_SPC_YCGCO_RO = 17, ///< YCgCo-R, odd addition of bits
+ AVCOL_SPC_NB ///< Not part of ABI
+};
+
+/**
+ * Visual content value range.
+ *
+ * These values are based on definitions that can be found in multiple
+ * specifications, such as ITU-T BT.709 (3.4 - Quantization of RGB, luminance
+ * and colour-difference signals), ITU-T BT.2020 (Table 5 - Digital
+ * Representation) as well as ITU-T BT.2100 (Table 9 - Digital 10- and 12-bit
+ * integer representation). At the time of writing, the BT.2100 one is
+ * recommended, as it also defines the full range representation.
+ *
+ * Common definitions:
+ * - For RGB and luma planes such as Y in YCbCr and I in ICtCp,
+ * 'E' is the original value in range of 0.0 to 1.0.
+ * - For chroma planes such as Cb,Cr and Ct,Cp, 'E' is the original
+ * value in range of -0.5 to 0.5.
+ * - 'n' is the output bit depth.
+ * - For additional definitions such as rounding and clipping to valid n
+ * bit unsigned integer range, please refer to BT.2100 (Table 9).
+ */
+enum AVColorRange {
+ AVCOL_RANGE_UNSPECIFIED = 0,
+
+ /**
+ * Narrow or limited range content.
+ *
+ * - For luma planes:
+ *
+ * (219 * E + 16) * 2^(n-8)
+ *
+ * F.ex. the range of 16-235 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (224 * E + 128) * 2^(n-8)
+ *
+ * F.ex. the range of 16-240 for 8 bits
+ */
+ AVCOL_RANGE_MPEG = 1,
+
+ /**
+ * Full range content.
+ *
+ * - For RGB and luma planes:
+ *
+ * (2^n - 1) * E
+ *
+ * F.ex. the range of 0-255 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (2^n - 1) * E + 2^(n - 1)
+ *
+ * F.ex. the range of 1-255 for 8 bits
+ */
+ AVCOL_RANGE_JPEG = 2,
+ AVCOL_RANGE_NB ///< Not part of ABI
+};
+
+/**
+ * Location of chroma samples.
+ *
+ * Illustration showing the location of the first (top left) chroma sample of
+ *the image, the left shows only luma, the right shows the location of the
+ *chroma sample, the 2 could be imagined to overlay each other but are drawn
+ *separately due to limitations of ASCII
+ *
+ * 1st 2nd 1st 2nd horizontal luma sample positions
+ * v v v v
+ * ______ ______
+ *1st luma line > |X X ... |3 4 X ... X are luma samples,
+ * | |1 2 1-6 are possible chroma positions
+ *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position
+ */
+enum AVChromaLocation {
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0
+ AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0
+ AVCHROMA_LOC_TOPLEFT =
+ 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB ///< Not part of ABI
+};
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/rational.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/rational.h
new file mode 100644
index 0000000000..13a3c41deb
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/rational.h
@@ -0,0 +1,226 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_math_rational
+ * Utilties for rational number calculation.
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @defgroup lavu_math_rational AVRational
+ * @ingroup lavu_math
+ * Rational number calculation.
+ *
+ * While rational numbers can be expressed as floating-point numbers, the
+ * conversion process is a lossy one, so are floating-point operations. On the
+ * other hand, the nature of FFmpeg demands highly accurate calculation of
+ * timestamps. This set of rational number utilities serves as a generic
+ * interface for manipulating rational numbers as pairs of numerators and
+ * denominators.
+ *
+ * Many of the functions that operate on AVRational's have the suffix `_q`, in
+ * reference to the mathematical symbol "â„š" (Q) which denotes the set of all
+ * rational numbers.
+ *
+ * @{
+ */
+
+/**
+ * Rational number (pair of numerator and denominator).
+ */
+typedef struct AVRational {
+ int num; ///< Numerator
+ int den; ///< Denominator
+} AVRational;
+
+/**
+ * Create an AVRational.
+ *
+ * Useful for compilers that do not support compound literals.
+ *
+ * @note The return value is not reduced.
+ * @see av_reduce()
+ */
+static inline AVRational av_make_q(int num, int den) {
+ AVRational r = {num, den};
+ return r;
+}
+
+/**
+ * Compare two rationals.
+ *
+ * @param a First rational
+ * @param b Second rational
+ *
+ * @return One of the following values:
+ * - 0 if `a == b`
+ * - 1 if `a > b`
+ * - -1 if `a < b`
+ * - `INT_MIN` if one of the values is of the form `0 / 0`
+ */
+static inline int av_cmp_q(AVRational a, AVRational b) {
+ const int64_t tmp = a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+ if (tmp)
+ return (int)((tmp ^ a.den ^ b.den) >> 63) | 1;
+ else if (b.den && a.den)
+ return 0;
+ else if (a.num && b.num)
+ return (a.num >> 31) - (b.num >> 31);
+ else
+ return INT_MIN;
+}
+
+/**
+ * Convert an AVRational to a `double`.
+ * @param a AVRational to convert
+ * @return `a` in floating-point form
+ * @see av_d2q()
+ */
+static inline double av_q2d(AVRational a) { return a.num / (double)a.den; }
+
+/**
+ * Reduce a fraction.
+ *
+ * This is useful for framerate calculations.
+ *
+ * @param[out] dst_num Destination numerator
+ * @param[out] dst_den Destination denominator
+ * @param[in] num Source numerator
+ * @param[in] den Source denominator
+ * @param[in] max Maximum allowed values for `dst_num` & `dst_den`
+ * @return 1 if the operation is exact, 0 otherwise
+ */
+int av_reduce(int* dst_num, int* dst_den, int64_t num, int64_t den,
+ int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b First rational
+ * @param c Second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b First rational
+ * @param c Second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b First rational
+ * @param c Second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b First rational
+ * @param c Second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q) {
+ AVRational r = {q.den, q.num};
+ return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ *
+ * In case of infinity, the returned value is expressed as `{1, 0}` or
+ * `{-1, 0}` depending on the sign.
+ *
+ * In general rational numbers with |num| <= 1<<26 && |den| <= 1<<26
+ * can be recovered exactly from their double representation.
+ * (no exceptions were found within 1B random ones)
+ *
+ * @param d `double` to convert
+ * @param max Maximum allowed numerator and denominator
+ * @return `d` in AVRational form
+ * @see av_q2d()
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * Find which of the two rationals is closer to another rational.
+ *
+ * @param q Rational to be compared against
+ * @param q1 Rational to be tested
+ * @param q2 Rational to be tested
+ * @return One of the following values:
+ * - 1 if `q1` is nearer to `q` than `q2`
+ * - -1 if `q2` is nearer to `q` than `q1`
+ * - 0 if they have the same distance
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the value in a list of rationals nearest a given reference rational.
+ *
+ * @param q Reference rational
+ * @param q_list Array of rationals terminated by `{0, 0}`
+ * @return Index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point
+ * format.
+ *
+ * @param q Rational to be converted
+ * @return Equivalent floating-point value, expressed as an unsigned 32-bit
+ * integer.
+ * @note The returned value is platform-indepedant.
+ */
+uint32_t av_q2intfloat(AVRational q);
+
+/**
+ * Return the best rational so that a and b are multiple of it.
+ * If the resulting denominator is larger than max_den, return def.
+ */
+AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational def);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/samplefmt.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/samplefmt.h
new file mode 100644
index 0000000000..6ba22b0b79
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/samplefmt.h
@@ -0,0 +1,275 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include <stdint.h>
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_sampfmts Audio sample formats
+ *
+ * Audio sample format enumeration and related convenience functions.
+ * @{
+ */
+
+/**
+ * Audio sample formats
+ *
+ * - The data described by the sample format is always in native-endian order.
+ * Sample values can be expressed by native C types, hence the lack of a
+ * signed 24-bit sample format even though it is a common raw audio data format.
+ *
+ * - The floating-point formats are based on full volume being in the range
+ * [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
+ * (such as AVFrame in libavcodec) is as follows:
+ *
+ * @par
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ *
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+ AV_SAMPLE_FMT_S64, ///< signed 64 bits
+ AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking
+ ///< dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char* av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char* name);
+
+/**
+ * Return the planar<->packed alternative form of the given sample format, or
+ * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the
+ * requested planar/packed format, the format returned is the same as the
+ * input.
+ */
+enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt,
+ int planar);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return the packed alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return the planar alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char* av_get_sample_fmt_string(char* buf, int buf_size,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int* linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * @}
+ *
+ * @defgroup lavu_sampmanip Samples manipulation
+ *
+ * Functions that manipulate audio samples
+ * @{
+ */
+
+/**
+ * Fill plane data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The audio_data array is filled with the pointers to the samples data planes:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The value pointed to by linesize is set to the aligned size of each
+ * channel's data buffer for planar layout, or to the aligned size of the
+ * buffer for all channels for packed layout.
+ *
+ * The buffer in buf must be big enough to contain all the samples
+ * (use av_samples_get_buffer_size() to compute its minimum size),
+ * otherwise the audio_data pointers will point to invalid data.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return minimum size in bytes required for the buffer on
+ * success, or a negative error code on failure
+ */
+int av_samples_fill_arrays(uint8_t** audio_data, int* linesize,
+ const uint8_t* buf, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s), may be NULL
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return >=0 on success or a negative error code on failure
+ * @todo return the size of the allocated buffer in case of success at the next
+ * bump
+ * @see av_samples_fill_arrays()
+ * @see av_samples_alloc_array_and_samples()
+ */
+int av_samples_alloc(uint8_t** audio_data, int* linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a data pointers array, samples buffer for nb_samples
+ * samples, and fill data pointers and linesize accordingly.
+ *
+ * This is the same as av_samples_alloc(), but also allocates the data
+ * pointers array.
+ *
+ * @see av_samples_alloc()
+ */
+int av_samples_alloc_array_and_samples(uint8_t*** audio_data, int* linesize,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt,
+ int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t* const* dst, uint8_t* const* src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data array of pointers to data planes
+ * @param offset offset in samples at which to start filling
+ * @param nb_samples number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_set_silence(uint8_t* const* audio_data, int offset,
+ int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * @}
+ * @}
+ */
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/version.h b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/version.h
new file mode 100644
index 0000000000..8826d0da6c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/include/libavutil/version.h
@@ -0,0 +1,121 @@
+/*
+ * copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+#include "macros.h"
+
+/**
+ * @addtogroup version_utils
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * The FFmpeg libraries follow a versioning sheme very similar to
+ * Semantic Versioning (http://semver.org/)
+ * The difference is that the component called PATCH is called MICRO in FFmpeg
+ * and its value is reset to 100 instead of 0 to keep it above or equal to 100.
+ * Also we do not increase MICRO for every bugfix or change in git master.
+ *
+ * Prior to FFmpeg 3.2 point releases did not change any lib version number to
+ * avoid aliassing different git master checkouts.
+ * Starting with FFmpeg 3.2, the released library versions will occupy
+ * a separate MAJOR.MINOR that is not used on the master development branch.
+ * That is if we branch a release of master 55.10.123 we will bump to 55.11.100
+ * for the release and master will continue at 55.12.100 after it. Each new
+ * point release will then bump the MICRO improving the usefulness of the lib
+ * versions.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) ((a) << 16 | (b) << 8 | (c))
+#define AV_VERSION_DOT(a, b, c) a##.##b##.##c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * Extract version components from the full ::AV_VERSION_INT int as returned
+ * by functions like ::avformat_version() and ::avcodec_version()
+ */
+#define AV_VERSION_MAJOR(a) ((a) >> 16)
+#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8)
+#define AV_VERSION_MICRO(a) ((a) & 0xFF)
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 59
+#define LIBAVUTIL_VERSION_MINOR 13
+#define LIBAVUTIL_VERSION_MICRO 100
+
+#define LIBAVUTIL_VERSION_INT \
+ AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION \
+ AV_VERSION(LIBAVUTIL_VERSION_MAJOR, LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @defgroup lavu_depr_guards Deprecation Guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @note, when bumping the major version it is recommended to manually
+ * disable each FF_API_* in its own commit instead of disabling them all
+ * at once through the bump. This improves the git bisect-ability of the change.
+ *
+ * @{
+ */
+
+#define FF_API_HDR_VIVID_THREE_SPLINE (LIBAVUTIL_VERSION_MAJOR < 60)
+#define FF_API_FRAME_PKT (LIBAVUTIL_VERSION_MAJOR < 60)
+#define FF_API_INTERLACED_FRAME (LIBAVUTIL_VERSION_MAJOR < 60)
+#define FF_API_FRAME_KEY (LIBAVUTIL_VERSION_MAJOR < 60)
+#define FF_API_PALETTE_HAS_CHANGED (LIBAVUTIL_VERSION_MAJOR < 60)
+#define FF_API_VULKAN_CONTIGUOUS_MEMORY (LIBAVUTIL_VERSION_MAJOR < 60)
+#define FF_API_H274_FILM_GRAIN_VCS (LIBAVUTIL_VERSION_MAJOR < 60)
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/moz.build b/dom/media/platforms/ffmpeg/ffmpeg61/moz.build
new file mode 100644
index 0000000000..d598ae0017
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg61/moz.build
@@ -0,0 +1,47 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ "../FFmpegAudioDecoder.cpp",
+ "../FFmpegAudioEncoder.cpp",
+ "../FFmpegDataDecoder.cpp",
+ "../FFmpegDataEncoder.cpp",
+ "../FFmpegDecoderModule.cpp",
+ "../FFmpegEncoderModule.cpp",
+ "../FFmpegVideoDecoder.cpp",
+ "../FFmpegVideoEncoder.cpp",
+]
+LOCAL_INCLUDES += [
+ "..",
+ "/media/mozva",
+ "include",
+]
+
+if CONFIG["CC_TYPE"] in ("clang", "gcc"):
+ CXXFLAGS += ["-Wno-deprecated-declarations"]
+if CONFIG["CC_TYPE"] == "clang":
+ CXXFLAGS += [
+ "-Wno-unknown-attributes",
+ ]
+if CONFIG["CC_TYPE"] == "gcc":
+ CXXFLAGS += [
+ "-Wno-attributes",
+ ]
+if CONFIG["MOZ_WIDGET_TOOLKIT"] == "gtk":
+ CXXFLAGS += CONFIG["MOZ_GTK3_CFLAGS"]
+if CONFIG["MOZ_ENABLE_VAAPI"] or CONFIG["MOZ_ENABLE_V4L2"]:
+ UNIFIED_SOURCES += ["../FFmpegVideoFramePool.cpp"]
+ LOCAL_INCLUDES += ["/third_party/drm/drm/include/libdrm/"]
+ USE_LIBS += ["mozva"]
+ DEFINES["MOZ_USE_HWDECODE"] = 1
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+LOCAL_INCLUDES += [
+ "/media/libyuv/libyuv/include",
+]
+
+FINAL_LIBRARY = "xul"
diff --git a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
index dfc8244f1d..c0a6e01f98 100644
--- a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
+++ b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
@@ -7,14 +7,10 @@
#include "FFVPXRuntimeLinker.h"
#include "FFmpegLibWrapper.h"
#include "FFmpegLog.h"
-#include "BinaryPath.h"
#include "mozilla/FileUtils.h"
#include "nsLocalFile.h"
-#include "prmem.h"
+#include "nsXPCOMPrivate.h"
#include "prlink.h"
-#ifdef XP_WIN
-# include <windows.h>
-#endif
namespace mozilla {
@@ -84,29 +80,37 @@ bool FFVPXRuntimeLinker::Init() {
sFFVPXLib.LinkVAAPILibs();
#endif
- nsCOMPtr<nsIFile> libFile;
- if (NS_FAILED(mozilla::BinaryPath::GetFile(getter_AddRefs(libFile)))) {
+#ifdef XP_WIN
+ PathString path =
+ GetLibraryFilePathname(LXUL_DLL, (PRFuncPtr)&FFVPXRuntimeLinker::Init);
+#else
+ PathString path =
+ GetLibraryFilePathname(XUL_DLL, (PRFuncPtr)&FFVPXRuntimeLinker::Init);
+#endif
+ if (path.IsEmpty()) {
+ return false;
+ }
+ nsCOMPtr<nsIFile> libFile = new nsLocalFile(path);
+ if (libFile->NativePath().IsEmpty()) {
return false;
}
-#ifdef XP_DARWIN
- if (!XRE_IsParentProcess() &&
- (XRE_GetChildProcBinPathType(XRE_GetProcessType()) ==
- BinPathType::PluginContainer)) {
- // On macOS, PluginContainer processes have their binary in a
- // plugin-container.app/Content/MacOS/ directory.
- nsCOMPtr<nsIFile> parentDir1, parentDir2;
- if (NS_FAILED(libFile->GetParent(getter_AddRefs(parentDir1)))) {
- return false;
- }
- if (NS_FAILED(parentDir1->GetParent(getter_AddRefs(parentDir2)))) {
- return false;
- }
- if (NS_FAILED(parentDir2->GetParent(getter_AddRefs(libFile)))) {
+ if (getenv("MOZ_RUN_GTEST")
+#ifdef FUZZING
+ || getenv("FUZZER")
+#endif
+ ) {
+ // The condition above is the same as in
+ // xpcom/glue/standalone/nsXPCOMGlue.cpp. This means we can't reach here
+ // without the gtest libxul being loaded. In turn, that means the path to
+ // libxul leads to a subdirectory of where the libmozav* libraries are, so
+ // we get the parent.
+ nsCOMPtr<nsIFile> parent;
+ if (NS_FAILED(libFile->GetParent(getter_AddRefs(parent)))) {
return false;
}
+ libFile = parent;
}
-#endif
if (NS_FAILED(libFile->SetNativeLeafName(MOZ_DLL_PREFIX
"mozavutil" MOZ_DLL_SUFFIX ""_ns))) {
diff --git a/dom/media/platforms/ffmpeg/ffvpx/moz.build b/dom/media/platforms/ffmpeg/ffvpx/moz.build
index bc72b6d1a7..a9236b25eb 100644
--- a/dom/media/platforms/ffmpeg/ffvpx/moz.build
+++ b/dom/media/platforms/ffmpeg/ffvpx/moz.build
@@ -25,7 +25,7 @@ SOURCES += [
]
LOCAL_INCLUDES += [
"..",
- "../ffmpeg60/include",
+ "../ffmpeg61/include",
"/media/mozva",
]
diff --git a/dom/media/platforms/ffmpeg/moz.build b/dom/media/platforms/ffmpeg/moz.build
index ac78eee289..ce7c06b9a6 100644
--- a/dom/media/platforms/ffmpeg/moz.build
+++ b/dom/media/platforms/ffmpeg/moz.build
@@ -16,6 +16,7 @@ DIRS += [
"ffmpeg58",
"ffmpeg59",
"ffmpeg60",
+ "ffmpeg61",
]
UNIFIED_SOURCES += ["FFmpegRuntimeLinker.cpp"]
diff --git a/dom/media/platforms/omx/OmxDataDecoder.cpp b/dom/media/platforms/omx/OmxDataDecoder.cpp
index e830f77dd2..6c40ca4910 100644
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -521,14 +521,14 @@ nsTArray<RefPtr<OmxPromiseLayer::BufferData>>* OmxDataDecoder::GetBuffers(
return &mOutPortBuffers;
}
-void OmxDataDecoder::ResolveInitPromise(const char* aMethodName) {
+void OmxDataDecoder::ResolveInitPromise(StaticString aMethodName) {
MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
- LOG("called from %s", aMethodName);
+ LOG("called from %s", aMethodName.get());
mInitPromise.ResolveIfExists(mTrackInfo->GetType(), aMethodName);
}
void OmxDataDecoder::RejectInitPromise(MediaResult aError,
- const char* aMethodName) {
+ StaticString aMethodName) {
MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
mInitPromise.RejectIfExists(aError, aMethodName);
}
diff --git a/dom/media/platforms/omx/OmxDataDecoder.h b/dom/media/platforms/omx/OmxDataDecoder.h
index 69c388ecee..a40aafea36 100644
--- a/dom/media/platforms/omx/OmxDataDecoder.h
+++ b/dom/media/platforms/omx/OmxDataDecoder.h
@@ -88,9 +88,9 @@ class OmxDataDecoder final : public MediaDataDecoder,
protected:
void InitializationTask();
- void ResolveInitPromise(const char* aMethodName);
+ void ResolveInitPromise(StaticString aMethodName);
- void RejectInitPromise(MediaResult aError, const char* aMethodName);
+ void RejectInitPromise(MediaResult aError, StaticString aMethodName);
void OmxStateRunner();
diff --git a/dom/media/platforms/wmf/MFTEncoder.cpp b/dom/media/platforms/wmf/MFTEncoder.cpp
index 410da2733c..424ba7055b 100644
--- a/dom/media/platforms/wmf/MFTEncoder.cpp
+++ b/dom/media/platforms/wmf/MFTEncoder.cpp
@@ -10,6 +10,7 @@
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/mscom/Utils.h"
#include "WMFUtils.h"
+#include <comdef.h>
// Missing from MinGW.
#ifndef CODECAPI_AVEncAdaptiveMode
@@ -231,6 +232,7 @@ HRESULT MFTEncoder::Create(const GUID& aSubtype) {
RefPtr<IMFActivate> factory = CreateFactory(aSubtype);
if (!factory) {
+ MFT_ENC_LOGE("CreateFactory error");
return E_FAIL;
}
@@ -238,12 +240,18 @@ HRESULT MFTEncoder::Create(const GUID& aSubtype) {
RefPtr<IMFTransform> encoder;
HRESULT hr = factory->ActivateObject(
IID_PPV_ARGS(static_cast<IMFTransform**>(getter_AddRefs(encoder))));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ MFT_ENC_LOGE("MFTEncoder::Create: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return hr;
+ }
RefPtr<ICodecAPI> config;
// Avoid IID_PPV_ARGS() here for MingGW fails to declare UUID for ICodecAPI.
hr = encoder->QueryInterface(IID_ICodecAPI, getter_AddRefs(config));
if (FAILED(hr)) {
+ MFT_ENC_LOGE("QueryInterface IID_ICodecAPI error");
encoder = nullptr;
factory->ShutdownObject();
return hr;
@@ -276,7 +284,12 @@ MFTEncoder::SetMediaTypes(IMFMediaType* aInputType, IMFMediaType* aOutputType) {
MOZ_ASSERT(aInputType && aOutputType);
AsyncMFTResult asyncMFT = AttemptEnableAsync();
- NS_ENSURE_TRUE(asyncMFT.isOk(), asyncMFT.unwrapErr());
+ if (asyncMFT.isErr()) {
+ HRESULT hr = asyncMFT.inspectErr();
+ _com_error error(hr);
+ MFT_ENC_LOGE("AttemptEnableAsync error: %ls", error.ErrorMessage());
+ return asyncMFT.inspectErr();
+ }
HRESULT hr = GetStreamIDs();
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
@@ -325,6 +338,7 @@ MFTEncoder::AsyncMFTResult MFTEncoder::AttemptEnableAsync() {
IMFAttributes* pAttributes = nullptr;
HRESULT hr = mEncoder->GetAttributes(&pAttributes);
if (FAILED(hr)) {
+ MFT_ENC_LOGE("Encoder->GetAttribute error");
return AsyncMFTResult(hr);
}
@@ -337,6 +351,10 @@ MFTEncoder::AsyncMFTResult MFTEncoder::AttemptEnableAsync() {
}
pAttributes->Release();
+ if (FAILED(hr)) {
+ MFT_ENC_LOGE("Setting async unlock");
+ }
+
return SUCCEEDED(hr) ? AsyncMFTResult(async) : AsyncMFTResult(hr);
}
diff --git a/dom/media/platforms/wmf/WMF.h b/dom/media/platforms/wmf/WMF.h
index 740442ceda..86afcb8e5c 100644
--- a/dom/media/platforms/wmf/WMF.h
+++ b/dom/media/platforms/wmf/WMF.h
@@ -23,6 +23,7 @@
#include <codecapi.h>
#include "mozilla/Atomics.h"
+#include "mozilla/AppShutdown.h"
#include "mozilla/ClearOnShutdown.h"
#include "mozilla/StaticMutex.h"
#include "nsThreadUtils.h"
@@ -74,7 +75,8 @@ class MediaFoundationInitializer final {
if (sIsShutdown) {
return false;
}
- return Get()->mHasInitialized;
+ auto* rv = Get();
+ return rv ? rv->mHasInitialized : false;
}
private:
@@ -82,17 +84,36 @@ class MediaFoundationInitializer final {
{
StaticMutexAutoLock lock(sCreateMutex);
if (!sInitializer) {
+ // Already in shutdown.
+ if (AppShutdown::GetCurrentShutdownPhase() !=
+ ShutdownPhase::NotInShutdown) {
+ sIsShutdown = true;
+ return nullptr;
+ }
sInitializer.reset(new MediaFoundationInitializer());
- GetMainThreadSerialEventTarget()->Dispatch(
- NS_NewRunnableFunction("MediaFoundationInitializer::Get", [&] {
- // Need to run this before MTA thread gets destroyed.
- RunOnShutdown(
- [&] {
- sInitializer.reset();
- sIsShutdown = true;
- },
- ShutdownPhase::XPCOMShutdown);
- }));
+ auto shutdownCleanUp = [&] {
+ if (AppShutdown::GetCurrentShutdownPhase() !=
+ ShutdownPhase::NotInShutdown) {
+ sInitializer.reset();
+ sIsShutdown = true;
+ return;
+ }
+ // As MFShutdown needs to run on the MTA thread that is destroyed
+ // on XPCOMShutdownThreads, so we need to run cleanup before that
+ // phase.
+ RunOnShutdown(
+ [&]() {
+ sInitializer.reset();
+ sIsShutdown = true;
+ },
+ ShutdownPhase::XPCOMShutdown);
+ };
+ if (NS_IsMainThread()) {
+ shutdownCleanUp();
+ } else {
+ GetMainThreadSerialEventTarget()->Dispatch(NS_NewRunnableFunction(
+ "MediaFoundationInitializer::Get", shutdownCleanUp));
+ }
}
}
return sInitializer.get();
diff --git a/dom/media/platforms/wmf/WMFDataEncoderUtils.cpp b/dom/media/platforms/wmf/WMFDataEncoderUtils.cpp
new file mode 100644
index 0000000000..3bab3ccb46
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFDataEncoderUtils.cpp
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WMFDataEncoderUtils.h"
+
+#include "EncoderConfig.h"
+#include "MFTEncoder.h"
+#include "MediaData.h"
+#include "mozilla/Logging.h"
+
+namespace mozilla {
+
+#define WMF_ENC_LOG(arg, ...) \
+ MOZ_LOG(mozilla::sPEMLog, mozilla::LogLevel::Error, \
+ ("WMFDataEncoderUtils::%s: " arg, __func__, ##__VA_ARGS__))
+
+GUID CodecToSubtype(CodecType aCodec) {
+ switch (aCodec) {
+ case CodecType::H264:
+ return MFVideoFormat_H264;
+ case CodecType::VP8:
+ return MFVideoFormat_VP80;
+ case CodecType::VP9:
+ return MFVideoFormat_VP90;
+ default:
+ return GUID_NULL;
+ }
+}
+
+bool CanCreateWMFEncoder(CodecType aCodec) {
+ bool canCreate = false;
+ mscom::EnsureMTA([&]() {
+ if (!wmf::MediaFoundationInitializer::HasInitialized()) {
+ return;
+ }
+ // Try HW encoder first.
+ auto enc = MakeRefPtr<MFTEncoder>(false /* HW not allowed */);
+ canCreate = SUCCEEDED(enc->Create(CodecToSubtype(aCodec)));
+ if (!canCreate) {
+ // Try SW encoder.
+ enc = MakeRefPtr<MFTEncoder>(true /* HW not allowed */);
+ canCreate = SUCCEEDED(enc->Create(CodecToSubtype(aCodec)));
+ }
+ });
+ return canCreate;
+}
+
+static already_AddRefed<MediaByteBuffer> ParseH264Parameters(
+ nsTArray<uint8_t>& aHeader, const bool aAsAnnexB) {
+ size_t length = aHeader.Length();
+ auto annexB = MakeRefPtr<MediaByteBuffer>(length);
+ PodCopy(annexB->Elements(), aHeader.Elements(), length);
+ annexB->SetLength(length);
+ if (aAsAnnexB) {
+ return annexB.forget();
+ }
+
+ // Convert to avcC.
+ nsTArray<AnnexB::NALEntry> paramSets;
+ AnnexB::ParseNALEntries(
+ Span<const uint8_t>(annexB->Elements(), annexB->Length()), paramSets);
+
+ auto avcc = MakeRefPtr<MediaByteBuffer>();
+ AnnexB::NALEntry& sps = paramSets.ElementAt(0);
+ AnnexB::NALEntry& pps = paramSets.ElementAt(1);
+ const uint8_t* spsPtr = annexB->Elements() + sps.mOffset;
+ H264::WriteExtraData(
+ avcc, spsPtr[1], spsPtr[2], spsPtr[3],
+ Span<const uint8_t>(spsPtr, sps.mSize),
+ Span<const uint8_t>(annexB->Elements() + pps.mOffset, pps.mSize));
+ return avcc.forget();
+}
+
+static uint32_t GetProfile(H264_PROFILE aProfileLevel) {
+ switch (aProfileLevel) {
+ case H264_PROFILE_BASE:
+ return eAVEncH264VProfile_Base;
+ case H264_PROFILE_MAIN:
+ return eAVEncH264VProfile_Main;
+ case H264_PROFILE_HIGH:
+ return eAVEncH264VProfile_High;
+ default:
+ return eAVEncH264VProfile_unknown;
+ }
+}
+
+already_AddRefed<IMFMediaType> CreateInputType(EncoderConfig& aConfig) {
+ RefPtr<IMFMediaType> type;
+ HRESULT hr = wmf::MFCreateMediaType(getter_AddRefs(type));
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("MFCreateMediaType (input) error: %lx", hr);
+ return nullptr;
+ }
+ hr = type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create input type: SetGUID (major type) error: %lx", hr);
+ return nullptr;
+ }
+ // Always NV12 input
+ hr = type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create input type: SetGUID (subtype) error: %lx", hr);
+ return nullptr;
+ }
+ hr = type->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create input type: interlace mode (input) error: %lx", hr);
+ return nullptr;
+ }
+ // WMF requires a framerate to intialize properly. Provide something
+ // reasonnable if not provided.
+ if (!aConfig.mFramerate) {
+ aConfig.mFramerate = 30;
+ }
+ if (aConfig.mFramerate) {
+ hr = MFSetAttributeRatio(type, MF_MT_FRAME_RATE, aConfig.mFramerate, 1);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create input type: frame rate (input) error: %lx", hr);
+ return nullptr;
+ }
+ }
+ hr = MFSetAttributeSize(type, MF_MT_FRAME_SIZE, aConfig.mSize.width,
+ aConfig.mSize.height);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create input type: frame size (input) error: %lx", hr);
+ return nullptr;
+ }
+ return type.forget();
+}
+
+already_AddRefed<IMFMediaType> CreateOutputType(EncoderConfig& aConfig) {
+ RefPtr<IMFMediaType> type;
+ HRESULT hr = wmf::MFCreateMediaType(getter_AddRefs(type));
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("MFCreateMediaType (output) error: %lx", hr);
+ return nullptr;
+ }
+ hr = type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create output type: set major type error: %lx", hr);
+ return nullptr;
+ }
+ hr = type->SetGUID(MF_MT_SUBTYPE, CodecToSubtype(aConfig.mCodec));
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create output type: set subtype error: %lx", hr);
+ return nullptr;
+ }
+ // A bitrate need to be set here, attempt to make an educated guess if none is
+ // provided. This could be per codec to have nicer defaults.
+ size_t longDimension = std::max(aConfig.mSize.width, aConfig.mSize.height);
+ if (!aConfig.mBitrate) {
+ if (longDimension < 720) {
+ aConfig.mBitrate = 2000000;
+ } else if (longDimension < 1080) {
+ aConfig.mBitrate = 4000000;
+ } else {
+ aConfig.mBitrate = 8000000;
+ }
+ }
+ // No way to set variable / constant here.
+ hr = type->SetUINT32(MF_MT_AVG_BITRATE, aConfig.mBitrate);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create output type: set bitrate error: %lx", hr);
+ return nullptr;
+ }
+ hr = type->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create output type set interlave mode error: %lx", hr);
+ return nullptr;
+ }
+ // A positive rate must always be preset here, see the Input config part.
+ MOZ_ASSERT(aConfig.mFramerate);
+ if (aConfig.mFramerate) {
+ hr = MFSetAttributeRatio(type, MF_MT_FRAME_RATE, aConfig.mFramerate, 1);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create output type set frame rate error: %lx", hr);
+ return nullptr;
+ }
+ }
+ // Required
+ hr = MFSetAttributeSize(type, MF_MT_FRAME_SIZE, aConfig.mSize.width,
+ aConfig.mSize.height);
+ if (FAILED(hr)) {
+ WMF_ENC_LOG("Create output type set frame size error: %lx", hr);
+ return nullptr;
+ }
+
+ if (aConfig.mCodecSpecific) {
+ if (aConfig.mCodecSpecific->is<H264Specific>()) {
+ MOZ_ASSERT(aConfig.mCodec == CodecType::H264);
+ hr = FAILED(type->SetUINT32(
+ MF_MT_MPEG2_PROFILE,
+ GetProfile(aConfig.mCodecSpecific->as<H264Specific>().mProfile)));
+ if (hr) {
+ WMF_ENC_LOG("Create output type set profile error: %lx", hr);
+ return nullptr;
+ }
+ }
+ }
+
+ return type.forget();
+}
+
+HRESULT SetMediaTypes(RefPtr<MFTEncoder>& aEncoder, EncoderConfig& aConfig) {
+ RefPtr<IMFMediaType> inputType = CreateInputType(aConfig);
+ if (!inputType) {
+ return E_FAIL;
+ }
+
+ RefPtr<IMFMediaType> outputType = CreateOutputType(aConfig);
+ if (!outputType) {
+ return E_FAIL;
+ }
+
+ return aEncoder->SetMediaTypes(inputType, outputType);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFDataEncoderUtils.h b/dom/media/platforms/wmf/WMFDataEncoderUtils.h
index 19f04e768f..0bb4e00086 100644
--- a/dom/media/platforms/wmf/WMFDataEncoderUtils.h
+++ b/dom/media/platforms/wmf/WMFDataEncoderUtils.h
@@ -2,13 +2,16 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "WMFMediaDataEncoder.h"
-
+#ifndef WMFDATAENCODERUTILS_H_
+#define WMFDATAENCODERUTILS_H_
+#include <mfapi.h>
+#include "EncoderConfig.h"
#include "AnnexB.h"
#include "H264.h"
#include "libyuv.h"
#include "mozilla/Logging.h"
#include "mozilla/mscom/EnsureMTA.h"
+#include "WMF.h"
#define WMF_ENC_LOGD(arg, ...) \
MOZ_LOG( \
@@ -21,133 +24,24 @@
namespace mozilla {
-extern LazyLogModule sPEMLog;
-
-static const GUID CodecToSubtype(CodecType aCodec) {
- switch (aCodec) {
- case CodecType::H264:
- return MFVideoFormat_H264;
- case CodecType::VP8:
- return MFVideoFormat_VP80;
- case CodecType::VP9:
- return MFVideoFormat_VP90;
- default:
- return GUID_NULL;
- }
-}
-
-bool CanCreateWMFEncoder(CodecType aCodec) {
- bool canCreate = false;
- mscom::EnsureMTA([&]() {
- if (!wmf::MediaFoundationInitializer::HasInitialized()) {
- return;
- }
- // Try HW encoder first.
- auto enc = MakeRefPtr<MFTEncoder>(false /* HW not allowed */);
- canCreate = SUCCEEDED(enc->Create(CodecToSubtype(aCodec)));
- if (!canCreate) {
- // Try SW encoder.
- enc = MakeRefPtr<MFTEncoder>(true /* HW not allowed */);
- canCreate = SUCCEEDED(enc->Create(CodecToSubtype(aCodec)));
- }
- });
- return canCreate;
-}
-
-static already_AddRefed<MediaByteBuffer> ParseH264Parameters(
- nsTArray<uint8_t>& aHeader, const bool aAsAnnexB) {
- size_t length = aHeader.Length();
- auto annexB = MakeRefPtr<MediaByteBuffer>(length);
- PodCopy(annexB->Elements(), aHeader.Elements(), length);
- annexB->SetLength(length);
- if (aAsAnnexB) {
- return annexB.forget();
- }
+class MFTEncoder;
- // Convert to avcC.
- nsTArray<AnnexB::NALEntry> paramSets;
- AnnexB::ParseNALEntries(
- Span<const uint8_t>(annexB->Elements(), annexB->Length()), paramSets);
-
- auto avcc = MakeRefPtr<MediaByteBuffer>();
- AnnexB::NALEntry& sps = paramSets.ElementAt(0);
- AnnexB::NALEntry& pps = paramSets.ElementAt(1);
- const uint8_t* spsPtr = annexB->Elements() + sps.mOffset;
- H264::WriteExtraData(
- avcc, spsPtr[1], spsPtr[2], spsPtr[3],
- Span<const uint8_t>(spsPtr, sps.mSize),
- Span<const uint8_t>(annexB->Elements() + pps.mOffset, pps.mSize));
- return avcc.forget();
-}
-
-static uint32_t GetProfile(H264_PROFILE aProfileLevel) {
- switch (aProfileLevel) {
- case H264_PROFILE_BASE:
- return eAVEncH264VProfile_Base;
- case H264_PROFILE_MAIN:
- return eAVEncH264VProfile_Main;
- default:
- return eAVEncH264VProfile_unknown;
- }
-}
+extern LazyLogModule sPEMLog;
-already_AddRefed<IMFMediaType> CreateInputType(EncoderConfig& aConfig) {
- RefPtr<IMFMediaType> type;
- return SUCCEEDED(wmf::MFCreateMediaType(getter_AddRefs(type))) &&
- SUCCEEDED(
- type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video)) &&
- SUCCEEDED(type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12)) &&
- SUCCEEDED(type->SetUINT32(MF_MT_INTERLACE_MODE,
- MFVideoInterlace_Progressive)) &&
- SUCCEEDED(MFSetAttributeRatio(type, MF_MT_FRAME_RATE,
- aConfig.mFramerate, 1)) &&
- SUCCEEDED(MFSetAttributeSize(type, MF_MT_FRAME_SIZE,
- aConfig.mSize.width,
- aConfig.mSize.height))
- ? type.forget()
- : nullptr;
-}
+GUID CodecToSubtype(CodecType aCodec);
-already_AddRefed<IMFMediaType> CreateOutputType(EncoderConfig& aConfig) {
- RefPtr<IMFMediaType> type;
- if (FAILED(wmf::MFCreateMediaType(getter_AddRefs(type))) ||
- FAILED(type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video)) ||
- FAILED(type->SetGUID(MF_MT_SUBTYPE, CodecToSubtype(aConfig.mCodec))) ||
- FAILED(type->SetUINT32(MF_MT_AVG_BITRATE, aConfig.mBitrate)) ||
- FAILED(type->SetUINT32(MF_MT_INTERLACE_MODE,
- MFVideoInterlace_Progressive)) ||
- FAILED(
- MFSetAttributeRatio(type, MF_MT_FRAME_RATE, aConfig.mFramerate, 1)) ||
- FAILED(MFSetAttributeSize(type, MF_MT_FRAME_SIZE, aConfig.mSize.width,
- aConfig.mSize.height))) {
- return nullptr;
- }
- if (aConfig.mCodecSpecific) {
- if (aConfig.mCodecSpecific->is<H264Specific>()) {
- if (FAILED(type->SetUINT32(
- MF_MT_MPEG2_PROFILE,
- GetProfile(
- aConfig.mCodecSpecific->as<H264Specific>().mProfile)))) {
- return nullptr;
- }
- }
- }
+bool CanCreateWMFEncoder(CodecType aCodec);
- return type.forget();
-}
+already_AddRefed<MediaByteBuffer> ParseH264Parameters(
+ nsTArray<uint8_t>& aHeader, const bool aAsAnnexB);
+uint32_t GetProfile(H264_PROFILE aProfileLevel);
-HRESULT SetMediaTypes(RefPtr<MFTEncoder>& aEncoder, EncoderConfig& aConfig) {
- RefPtr<IMFMediaType> inputType = CreateInputType(aConfig);
- if (!inputType) {
- return E_FAIL;
- }
+already_AddRefed<IMFMediaType> CreateInputType(EncoderConfig& aConfig);
- RefPtr<IMFMediaType> outputType = CreateOutputType(aConfig);
- if (!outputType) {
- return E_FAIL;
- }
+already_AddRefed<IMFMediaType> CreateOutputType(EncoderConfig& aConfig);
- return aEncoder->SetMediaTypes(inputType, outputType);
-}
+HRESULT SetMediaTypes(RefPtr<MFTEncoder>& aEncoder, EncoderConfig& aConfig);
} // namespace mozilla
+
+#endif // WMFDATAENCODERUTILS_H_
diff --git a/dom/media/platforms/wmf/WMFDecoderModule.cpp b/dom/media/platforms/wmf/WMFDecoderModule.cpp
index b3aae1e750..79556b061b 100644
--- a/dom/media/platforms/wmf/WMFDecoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -85,13 +85,9 @@ static bool IsRemoteAcceleratedCompositor(
ident.mParentProcessType == GeckoProcessType_GPU;
}
-static Atomic<bool> sSupportedTypesInitialized(false);
-static EnumSet<WMFStreamType> sSupportedTypes;
-static EnumSet<WMFStreamType> sLackOfExtensionTypes;
-
/* static */
void WMFDecoderModule::Init(Config aConfig) {
- MOZ_DIAGNOSTIC_ASSERT(NS_IsMainThread());
+ // TODO : add an assertion to prevent this from running on main thread.
if (XRE_IsContentProcess()) {
// If we're in the content process and the UseGPUDecoder pref is set, it
// means that we've given up on the GPU process (it's been crashing) so we
@@ -134,6 +130,7 @@ void WMFDecoderModule::Init(Config aConfig) {
sDXVAEnabled = sDXVAEnabled && hwVideo;
mozilla::mscom::EnsureMTA([&]() {
+ StaticMutexAutoLock lock(sMutex);
// Store the supported MFT decoders.
sSupportedTypes.clear();
sLackOfExtensionTypes.clear();
@@ -163,7 +160,10 @@ void WMFDecoderModule::Init(Config aConfig) {
}
});
- sSupportedTypesInitialized = true;
+ {
+ StaticMutexAutoLock lock(sMutex);
+ sSupportedTypesInitialized = true;
+ }
WmfDecoderModuleMarkerAndLog("WMFInit Result",
"WMFDecoderModule::Init finishing");
@@ -270,15 +270,13 @@ HRESULT WMFDecoderModule::CreateMFTDecoder(const WMFStreamType& aType,
/* static */
bool WMFDecoderModule::CanCreateMFTDecoder(const WMFStreamType& aType) {
MOZ_ASSERT(WMFStreamType::Unknown < aType && aType < WMFStreamType::SENTINEL);
- if (!sSupportedTypesInitialized) {
- if (NS_IsMainThread()) {
- Init();
- } else {
- nsCOMPtr<nsIRunnable> runnable =
- NS_NewRunnableFunction("WMFDecoderModule::Init", [&]() { Init(); });
- SyncRunnable::DispatchToThread(GetMainThreadSerialEventTarget(),
- runnable);
- }
+ bool hasInitialized = false;
+ {
+ StaticMutexAutoLock lock(sMutex);
+ hasInitialized = sSupportedTypesInitialized;
+ }
+ if (!hasInitialized) {
+ Init();
}
// Check prefs here rather than CreateMFTDecoder so that prefs aren't baked
@@ -324,7 +322,7 @@ bool WMFDecoderModule::CanCreateMFTDecoder(const WMFStreamType& aType) {
break;
}
}
-
+ StaticMutexAutoLock lock(sMutex);
return sSupportedTypes.contains(aType);
}
@@ -380,6 +378,7 @@ media::DecodeSupportSet WMFDecoderModule::Supports(
return media::DecodeSupport::SoftwareDecode;
}
}
+ StaticMutexAutoLock lock(sMutex);
return sLackOfExtensionTypes.contains(type)
? media::DecodeSupport::UnsureDueToLackOfExtension
: media::DecodeSupportSet{};
@@ -486,6 +485,9 @@ bool WMFDecoderModule::IsHEVCSupported() {
return sForceEnableHEVC || StaticPrefs::media_wmf_hevc_enabled() == 1;
}
+/* static */
+void WMFDecoderModule::DisableForceEnableHEVC() { sForceEnableHEVC = false; }
+
} // namespace mozilla
#undef WFM_DECODER_MODULE_STATUS_MARKER
diff --git a/dom/media/platforms/wmf/WMFDecoderModule.h b/dom/media/platforms/wmf/WMFDecoderModule.h
index 3b130fd657..6debdc5836 100644
--- a/dom/media/platforms/wmf/WMFDecoderModule.h
+++ b/dom/media/platforms/wmf/WMFDecoderModule.h
@@ -10,6 +10,7 @@
# include "PlatformDecoderModule.h"
# include "WMF.h"
# include "WMFUtils.h"
+# include "mozilla/Atomics.h"
namespace mozilla {
@@ -43,7 +44,9 @@ class WMFDecoderModule : public PlatformDecoderModule {
ForceEnableHEVC,
};
- // Called on main thread.
+ // Can be called on any thread, but avoid calling this on the main thread
+ // because the initialization takes long time and we don't want to block the
+ // main thread.
static void Init(Config aConfig = Config::None);
// Called from any thread, must call init first
@@ -53,16 +56,24 @@ class WMFDecoderModule : public PlatformDecoderModule {
RefPtr<MFTDecoder>& aDecoder);
static bool CanCreateMFTDecoder(const WMFStreamType& aType);
+ static void DisableForceEnableHEVC();
+
private:
// This is used for GPU process only, where we can't set the preference
// directly (it can only set in the parent process) So we need a way to force
// enable the HEVC in order to report the support information via telemetry.
- static inline bool sForceEnableHEVC = false;
+ static inline Atomic<bool> sForceEnableHEVC{false};
static bool IsHEVCSupported();
WMFDecoderModule() = default;
virtual ~WMFDecoderModule() = default;
+
+ static inline StaticMutex sMutex;
+ static inline bool sSupportedTypesInitialized MOZ_GUARDED_BY(sMutex) = false;
+ static inline EnumSet<WMFStreamType> sSupportedTypes MOZ_GUARDED_BY(sMutex);
+ static inline EnumSet<WMFStreamType> sLackOfExtensionTypes
+ MOZ_GUARDED_BY(sMutex);
};
} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFEncoderModule.cpp b/dom/media/platforms/wmf/WMFEncoderModule.cpp
index 7b5af9bf50..9f44ce0c5e 100644
--- a/dom/media/platforms/wmf/WMFEncoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFEncoderModule.cpp
@@ -26,6 +26,9 @@ bool WMFEncoderModule::Supports(const EncoderConfig& aConfig) const {
if (aConfig.IsAudio()) {
return false;
}
+ if (aConfig.mScalabilityMode != ScalabilityMode::None) {
+ return false;
+ }
return SupportsCodec(aConfig.mCodec);
}
diff --git a/dom/media/platforms/wmf/WMFMediaDataEncoder.cpp b/dom/media/platforms/wmf/WMFMediaDataEncoder.cpp
new file mode 100644
index 0000000000..fcacedbd05
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFMediaDataEncoder.cpp
@@ -0,0 +1,399 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WMFMediaDataEncoder.h"
+
+#include "ImageContainer.h"
+#include "ImageConversion.h"
+#include "MFTEncoder.h"
+#include "PlatformEncoderModule.h"
+#include "TimeUnits.h"
+#include "WMFDataEncoderUtils.h"
+#include "WMFUtils.h"
+#include <comdef.h>
+#include "mozilla/WindowsProcessMitigations.h"
+
+namespace mozilla {
+
+using InitPromise = MediaDataEncoder::InitPromise;
+using EncodePromise = MediaDataEncoder::EncodePromise;
+using ReconfigurationPromise = MediaDataEncoder::ReconfigurationPromise;
+
+WMFMediaDataEncoder::WMFMediaDataEncoder(const EncoderConfig& aConfig,
+ const RefPtr<TaskQueue>& aTaskQueue)
+ : mConfig(aConfig),
+ mTaskQueue(aTaskQueue),
+ mHardwareNotAllowed(aConfig.mHardwarePreference ==
+ HardwarePreference::RequireSoftware ||
+ IsWin32kLockedDown()) {
+ WMF_ENC_LOGE("WMFMediaDataEncoder ctor: %s, (hw not allowed: %s)",
+ aConfig.ToString().get(), mHardwareNotAllowed ? "yes" : "no");
+ MOZ_ASSERT(mTaskQueue);
+}
+
+RefPtr<InitPromise> WMFMediaDataEncoder::Init() {
+ return InvokeAsync(mTaskQueue, this, __func__,
+ &WMFMediaDataEncoder::ProcessInit);
+}
+RefPtr<EncodePromise> WMFMediaDataEncoder::Encode(const MediaData* aSample) {
+ MOZ_ASSERT(aSample);
+
+ RefPtr<const VideoData> sample(aSample->As<const VideoData>());
+
+ return InvokeAsync<RefPtr<const VideoData>>(
+ mTaskQueue, this, __func__, &WMFMediaDataEncoder::ProcessEncode,
+ std::move(sample));
+}
+RefPtr<EncodePromise> WMFMediaDataEncoder::Drain() {
+ return InvokeAsync(mTaskQueue, __func__,
+ [self = RefPtr<WMFMediaDataEncoder>(this)]() {
+ nsTArray<RefPtr<IMFSample>> outputs;
+ return SUCCEEDED(self->mEncoder->Drain(outputs))
+ ? self->ProcessOutputSamples(outputs)
+ : EncodePromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ });
+}
+RefPtr<ShutdownPromise> WMFMediaDataEncoder::Shutdown() {
+ return InvokeAsync(mTaskQueue, __func__,
+ [self = RefPtr<WMFMediaDataEncoder>(this)]() {
+ if (self->mEncoder) {
+ self->mEncoder->Destroy();
+ self->mEncoder = nullptr;
+ }
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ });
+}
+RefPtr<GenericPromise> WMFMediaDataEncoder::SetBitrate(uint32_t aBitsPerSec) {
+ return InvokeAsync(
+ mTaskQueue, __func__,
+ [self = RefPtr<WMFMediaDataEncoder>(this), aBitsPerSec]() {
+ MOZ_ASSERT(self->mEncoder);
+ return SUCCEEDED(self->mEncoder->SetBitrate(aBitsPerSec))
+ ? GenericPromise::CreateAndResolve(true, __func__)
+ : GenericPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR, __func__);
+ });
+}
+
+RefPtr<ReconfigurationPromise> WMFMediaDataEncoder::Reconfigure(
+ const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges) {
+ // General reconfiguration interface not implemented right now
+ return MediaDataEncoder::ReconfigurationPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+};
+
+nsCString WMFMediaDataEncoder::GetDescriptionName() const {
+ return MFTEncoder::GetFriendlyName(CodecToSubtype(mConfig.mCodec));
+}
+
+RefPtr<InitPromise> WMFMediaDataEncoder::ProcessInit() {
+ AssertOnTaskQueue();
+
+ MOZ_ASSERT(!mEncoder,
+ "Should not initialize encoder again without shutting down");
+
+ if (!wmf::MediaFoundationInitializer::HasInitialized()) {
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Can't create the MFT encoder.")),
+ __func__);
+ }
+
+ RefPtr<MFTEncoder> encoder = new MFTEncoder(mHardwareNotAllowed);
+ HRESULT hr;
+ mscom::EnsureMTA([&]() { hr = InitMFTEncoder(encoder); });
+
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("init MFTEncoder: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Can't create the MFT encoder.")),
+ __func__);
+ }
+
+ mEncoder = std::move(encoder);
+ FillConfigData();
+ return InitPromise::CreateAndResolve(TrackInfo::TrackType::kVideoTrack,
+ __func__);
+}
+
+HRESULT WMFMediaDataEncoder::InitMFTEncoder(RefPtr<MFTEncoder>& aEncoder) {
+ HRESULT hr = aEncoder->Create(CodecToSubtype(mConfig.mCodec));
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("MFTEncoder::Create: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return hr;
+ }
+
+ hr = SetMediaTypes(aEncoder, mConfig);
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("MFTEncoder::SetMediaType: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return hr;
+ }
+
+ hr = aEncoder->SetModes(mConfig.mBitrate);
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("MFTEncoder::SetMode: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return hr;
+ }
+
+ return S_OK;
+}
+
+void WMFMediaDataEncoder::FillConfigData() {
+ nsTArray<UINT8> header;
+ NS_ENSURE_TRUE_VOID(SUCCEEDED(mEncoder->GetMPEGSequenceHeader(header)));
+
+ mConfigData =
+ header.Length() > 0
+ ? ParseH264Parameters(header, mConfig.mUsage == Usage::Realtime)
+ : nullptr;
+}
+
+RefPtr<EncodePromise> WMFMediaDataEncoder::ProcessEncode(
+ RefPtr<const VideoData>&& aSample) {
+ AssertOnTaskQueue();
+ MOZ_ASSERT(mEncoder);
+ MOZ_ASSERT(aSample);
+
+ RefPtr<IMFSample> nv12 = ConvertToNV12InputSample(std::move(aSample));
+ if (!nv12 || FAILED(mEncoder->PushInput(std::move(nv12)))) {
+ WMF_ENC_LOGE("failed to process input sample");
+ return EncodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Failed to process input.")),
+ __func__);
+ }
+
+ nsTArray<RefPtr<IMFSample>> outputs;
+ HRESULT hr = mEncoder->TakeOutput(outputs);
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ FillConfigData();
+ } else if (FAILED(hr)) {
+ WMF_ENC_LOGE("failed to process output");
+ return EncodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Failed to process output.")),
+ __func__);
+ }
+
+ return ProcessOutputSamples(outputs);
+}
+
+already_AddRefed<IMFSample> WMFMediaDataEncoder::ConvertToNV12InputSample(
+ RefPtr<const VideoData>&& aData) {
+ AssertOnTaskQueue();
+ MOZ_ASSERT(mEncoder);
+
+ struct NV12Info {
+ int32_t mYStride = 0;
+ int32_t mUVStride = 0;
+ size_t mYLength = 0;
+ size_t mBufferLength = 0;
+ } info;
+
+ if (const layers::PlanarYCbCrImage* image =
+ aData->mImage->AsPlanarYCbCrImage()) {
+ // Assume this is I420. If it's not, the whole process fails in
+ // ConvertToNV12 below.
+ const layers::PlanarYCbCrData* yuv = image->GetData();
+ info.mYStride = yuv->mYStride;
+ info.mUVStride = yuv->mCbCrStride * 2;
+ info.mYLength = info.mYStride * yuv->YDataSize().height;
+ info.mBufferLength =
+ info.mYLength + (info.mUVStride * yuv->CbCrDataSize().height);
+ } else {
+ info.mYStride = aData->mImage->GetSize().width;
+ info.mUVStride = info.mYStride;
+
+ const int32_t yHeight = aData->mImage->GetSize().height;
+ const int32_t uvHeight = yHeight / 2;
+
+ CheckedInt<size_t> yLength(info.mYStride);
+ yLength *= yHeight;
+ if (!yLength.isValid()) {
+ WMF_ENC_LOGE("yLength overflows");
+ return nullptr;
+ }
+ info.mYLength = yLength.value();
+
+ CheckedInt<size_t> uvLength(info.mUVStride);
+ uvLength *= uvHeight;
+ if (!uvLength.isValid()) {
+ WMF_ENC_LOGE("uvLength overflows");
+ return nullptr;
+ }
+
+ CheckedInt<size_t> length(yLength);
+ length += uvLength;
+ if (!length.isValid()) {
+ WMF_ENC_LOGE("length overflows");
+ return nullptr;
+ }
+ info.mBufferLength = length.value();
+ }
+
+ RefPtr<IMFSample> input;
+ HRESULT hr = mEncoder->CreateInputSample(&input, info.mBufferLength);
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("CreateInputSample: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return nullptr;
+ }
+
+ RefPtr<IMFMediaBuffer> buffer;
+ hr = input->GetBufferByIndex(0, getter_AddRefs(buffer));
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("GetBufferByIndex: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return nullptr;
+ }
+
+ hr = buffer->SetCurrentLength(info.mBufferLength);
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("SetCurrentLength: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return nullptr;
+ }
+
+ LockBuffer lockBuffer(buffer);
+ hr = lockBuffer.Result();
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("LockBuffer: error = 0x%lX, %ls", hr, error.ErrorMessage());
+ return nullptr;
+ }
+
+ nsresult rv =
+ ConvertToNV12(aData->mImage, lockBuffer.Data(), info.mYStride,
+ lockBuffer.Data() + info.mYLength, info.mUVStride);
+ if (NS_FAILED(rv)) {
+ WMF_ENC_LOGE("Failed to convert to NV12");
+ return nullptr;
+ }
+
+ hr = input->SetSampleTime(UsecsToHNs(aData->mTime.ToMicroseconds()));
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("SetSampleTime: error = 0x%lX, %ls", hr, error.ErrorMessage());
+ return nullptr;
+ }
+
+ hr = input->SetSampleDuration(UsecsToHNs(aData->mDuration.ToMicroseconds()));
+ if (FAILED(hr)) {
+ _com_error error(hr);
+ WMF_ENC_LOGE("SetSampleDuration: error = 0x%lX, %ls", hr,
+ error.ErrorMessage());
+ return nullptr;
+ }
+
+ return input.forget();
+}
+
+RefPtr<EncodePromise> WMFMediaDataEncoder::ProcessOutputSamples(
+ nsTArray<RefPtr<IMFSample>>& aSamples) {
+ EncodedData frames;
+ for (auto sample : aSamples) {
+ RefPtr<MediaRawData> frame = IMFSampleToMediaData(sample);
+ if (frame) {
+ frames.AppendElement(std::move(frame));
+ } else {
+ WMF_ENC_LOGE("failed to convert output frame");
+ }
+ }
+ aSamples.Clear();
+ return EncodePromise::CreateAndResolve(std::move(frames), __func__);
+}
+
+already_AddRefed<MediaRawData> WMFMediaDataEncoder::IMFSampleToMediaData(
+ RefPtr<IMFSample>& aSample) {
+ AssertOnTaskQueue();
+ MOZ_ASSERT(aSample);
+
+ RefPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = aSample->GetBufferByIndex(0, getter_AddRefs(buffer));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+
+ LockBuffer lockBuffer(buffer);
+ NS_ENSURE_TRUE(SUCCEEDED(lockBuffer.Result()), nullptr);
+
+ LONGLONG time = 0;
+ hr = aSample->GetSampleTime(&time);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+
+ LONGLONG duration = 0;
+ hr = aSample->GetSampleDuration(&duration);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+
+ bool isKeyframe =
+ MFGetAttributeUINT32(aSample, MFSampleExtension_CleanPoint, false);
+
+ auto frame = MakeRefPtr<MediaRawData>();
+ if (!WriteFrameData(frame, lockBuffer, isKeyframe)) {
+ return nullptr;
+ }
+
+ frame->mTime = media::TimeUnit::FromMicroseconds(HNsToUsecs(time));
+ frame->mDuration = media::TimeUnit::FromMicroseconds(HNsToUsecs(duration));
+ frame->mKeyframe = isKeyframe;
+
+ return frame.forget();
+}
+
+bool WMFMediaDataEncoder::WriteFrameData(RefPtr<MediaRawData>& aDest,
+ LockBuffer& aSrc, bool aIsKeyframe) {
+ if (mConfig.mCodec == CodecType::H264) {
+ size_t prependLength = 0;
+ RefPtr<MediaByteBuffer> avccHeader;
+ if (aIsKeyframe && mConfigData) {
+ if (mConfig.mUsage == Usage::Realtime) {
+ prependLength = mConfigData->Length();
+ } else {
+ avccHeader = mConfigData;
+ }
+ }
+
+ UniquePtr<MediaRawDataWriter> writer(aDest->CreateWriter());
+ if (!writer->SetSize(prependLength + aSrc.Length())) {
+ WMF_ENC_LOGE("fail to allocate output buffer");
+ return false;
+ }
+
+ if (prependLength > 0) {
+ PodCopy(writer->Data(), mConfigData->Elements(), prependLength);
+ }
+ PodCopy(writer->Data() + prependLength, aSrc.Data(), aSrc.Length());
+
+ if (mConfig.mUsage != Usage::Realtime &&
+ !AnnexB::ConvertSampleToAVCC(aDest, avccHeader)) {
+ WMF_ENC_LOGE("fail to convert annex-b sample to AVCC");
+ return false;
+ }
+
+ return true;
+ }
+ UniquePtr<MediaRawDataWriter> writer(aDest->CreateWriter());
+ if (!writer->SetSize(aSrc.Length())) {
+ WMF_ENC_LOGE("fail to allocate output buffer");
+ return false;
+ }
+
+ PodCopy(writer->Data(), aSrc.Data(), aSrc.Length());
+ return true;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFMediaDataEncoder.h b/dom/media/platforms/wmf/WMFMediaDataEncoder.h
index 31a63c8347..db1077e699 100644
--- a/dom/media/platforms/wmf/WMFMediaDataEncoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataEncoder.h
@@ -7,84 +7,30 @@
#ifndef WMFMediaDataEncoder_h_
#define WMFMediaDataEncoder_h_
-#include "ImageContainer.h"
#include "MFTEncoder.h"
#include "PlatformEncoderModule.h"
-#include "TimeUnits.h"
#include "WMFDataEncoderUtils.h"
#include "WMFUtils.h"
+#include <comdef.h>
+#include "mozilla/WindowsProcessMitigations.h"
namespace mozilla {
class WMFMediaDataEncoder final : public MediaDataEncoder {
public:
WMFMediaDataEncoder(const EncoderConfig& aConfig,
- const RefPtr<TaskQueue>& aTaskQueue)
- : mConfig(aConfig),
- mTaskQueue(aTaskQueue),
- mHardwareNotAllowed(aConfig.mHardwarePreference ==
- HardwarePreference::RequireSoftware ||
- aConfig.mHardwarePreference ==
- HardwarePreference::None) {
- MOZ_ASSERT(mTaskQueue);
- }
+ const RefPtr<TaskQueue>& aTaskQueue);
- RefPtr<InitPromise> Init() override {
- return InvokeAsync(mTaskQueue, this, __func__,
- &WMFMediaDataEncoder::ProcessInit);
- }
- RefPtr<EncodePromise> Encode(const MediaData* aSample) override {
- MOZ_ASSERT(aSample);
-
- RefPtr<const VideoData> sample(aSample->As<const VideoData>());
-
- return InvokeAsync<RefPtr<const VideoData>>(
- mTaskQueue, this, __func__, &WMFMediaDataEncoder::ProcessEncode,
- std::move(sample));
- }
- RefPtr<EncodePromise> Drain() override {
- return InvokeAsync(
- mTaskQueue, __func__, [self = RefPtr<WMFMediaDataEncoder>(this)]() {
- nsTArray<RefPtr<IMFSample>> outputs;
- return SUCCEEDED(self->mEncoder->Drain(outputs))
- ? self->ProcessOutputSamples(outputs)
- : EncodePromise::CreateAndReject(
- NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
- });
- }
- RefPtr<ShutdownPromise> Shutdown() override {
- return InvokeAsync(
- mTaskQueue, __func__, [self = RefPtr<WMFMediaDataEncoder>(this)]() {
- if (self->mEncoder) {
- self->mEncoder->Destroy();
- self->mEncoder = nullptr;
- }
- return ShutdownPromise::CreateAndResolve(true, __func__);
- });
- }
- RefPtr<GenericPromise> SetBitrate(uint32_t aBitsPerSec) override {
- return InvokeAsync(
- mTaskQueue, __func__,
- [self = RefPtr<WMFMediaDataEncoder>(this), aBitsPerSec]() {
- MOZ_ASSERT(self->mEncoder);
- return SUCCEEDED(self->mEncoder->SetBitrate(aBitsPerSec))
- ? GenericPromise::CreateAndResolve(true, __func__)
- : GenericPromise::CreateAndReject(
- NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR, __func__);
- });
- }
+ RefPtr<InitPromise> Init() override;
+ RefPtr<EncodePromise> Encode(const MediaData* aSample) override;
+ RefPtr<EncodePromise> Drain() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ RefPtr<GenericPromise> SetBitrate(uint32_t aBitsPerSec) override;
RefPtr<ReconfigurationPromise> Reconfigure(
const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges)
- override {
- // General reconfiguration interface not implemented right now
- return MediaDataEncoder::ReconfigurationPromise::CreateAndReject(
- NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
- };
-
- nsCString GetDescriptionName() const override {
- return MFTEncoder::GetFriendlyName(CodecToSubtype(mConfig.mCodec));
- }
+ override;
+ nsCString GetDescriptionName() const override;
private:
// Automatically lock/unlock IMFMediaBuffer.
@@ -107,233 +53,29 @@ class WMFMediaDataEncoder final : public MediaDataEncoder {
private:
RefPtr<IMFMediaBuffer> mBuffer;
- BYTE* mBytes;
- DWORD mCapacity;
- DWORD mLength;
- HRESULT mResult;
+ BYTE* mBytes{};
+ DWORD mCapacity{};
+ DWORD mLength{};
+ HRESULT mResult{};
};
- RefPtr<InitPromise> ProcessInit() {
- AssertOnTaskQueue();
-
- MOZ_ASSERT(!mEncoder,
- "Should not initialize encoder again without shutting down");
-
- if (!wmf::MediaFoundationInitializer::HasInitialized()) {
- return InitPromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Can't create the MFT encoder.")),
- __func__);
- }
-
- RefPtr<MFTEncoder> encoder = new MFTEncoder(mHardwareNotAllowed);
- HRESULT hr;
- mscom::EnsureMTA([&]() { hr = InitMFTEncoder(encoder); });
-
- if (FAILED(hr)) {
- WMF_ENC_LOGE("init MFTEncoder: error = 0x%lX", hr);
- return InitPromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Can't create the MFT encoder.")),
- __func__);
- }
-
- mEncoder = std::move(encoder);
- FillConfigData();
- return InitPromise::CreateAndResolve(TrackInfo::TrackType::kVideoTrack,
- __func__);
- }
-
- HRESULT InitMFTEncoder(RefPtr<MFTEncoder>& aEncoder) {
- HRESULT hr = aEncoder->Create(CodecToSubtype(mConfig.mCodec));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- hr = SetMediaTypes(aEncoder, mConfig);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- hr = aEncoder->SetModes(mConfig.mBitrate);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ RefPtr<InitPromise> ProcessInit();
- return S_OK;
- }
+ HRESULT InitMFTEncoder(RefPtr<MFTEncoder>& aEncoder);
+ void FillConfigData();
- void FillConfigData() {
- nsTArray<UINT8> header;
- NS_ENSURE_TRUE_VOID(SUCCEEDED(mEncoder->GetMPEGSequenceHeader(header)));
-
- mConfigData =
- header.Length() > 0
- ? ParseH264Parameters(header, mConfig.mUsage == Usage::Realtime)
- : nullptr;
- }
-
- RefPtr<EncodePromise> ProcessEncode(RefPtr<const VideoData>&& aSample) {
- AssertOnTaskQueue();
- MOZ_ASSERT(mEncoder);
- MOZ_ASSERT(aSample);
-
- RefPtr<IMFSample> nv12 = ConvertToNV12InputSample(std::move(aSample));
- if (!nv12 || FAILED(mEncoder->PushInput(std::move(nv12)))) {
- WMF_ENC_LOGE("failed to process input sample");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Failed to process input.")),
- __func__);
- }
-
- nsTArray<RefPtr<IMFSample>> outputs;
- HRESULT hr = mEncoder->TakeOutput(outputs);
- if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
- FillConfigData();
- } else if (FAILED(hr)) {
- WMF_ENC_LOGE("failed to process output");
- return EncodePromise::CreateAndReject(
- MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
- RESULT_DETAIL("Failed to process output.")),
- __func__);
- }
-
- return ProcessOutputSamples(outputs);
- }
+ RefPtr<EncodePromise> ProcessEncode(RefPtr<const VideoData>&& aSample);
already_AddRefed<IMFSample> ConvertToNV12InputSample(
- RefPtr<const VideoData>&& aData) {
- AssertOnTaskQueue();
- MOZ_ASSERT(mEncoder);
-
- const layers::PlanarYCbCrImage* image = aData->mImage->AsPlanarYCbCrImage();
- // TODO: Take care non planar Y-Cb-Cr image (Bug 1881647).
- NS_ENSURE_TRUE(image, nullptr);
-
- const layers::PlanarYCbCrData* yuv = image->GetData();
- auto ySize = yuv->YDataSize();
- auto cbcrSize = yuv->CbCrDataSize();
- size_t yLength = yuv->mYStride * ySize.height;
- size_t length = yLength + (yuv->mCbCrStride * cbcrSize.height * 2);
-
- RefPtr<IMFSample> input;
- HRESULT hr = mEncoder->CreateInputSample(&input, length);
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- RefPtr<IMFMediaBuffer> buffer;
- hr = input->GetBufferByIndex(0, getter_AddRefs(buffer));
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- hr = buffer->SetCurrentLength(length);
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- LockBuffer lockBuffer(buffer);
- NS_ENSURE_TRUE(SUCCEEDED(lockBuffer.Result()), nullptr);
-
- // TODO: Take care non I420 image (Bug 1881647).
- bool ok = libyuv::I420ToNV12(
- yuv->mYChannel, yuv->mYStride, yuv->mCbChannel,
- yuv->mCbCrStride, yuv->mCrChannel, yuv->mCbCrStride,
- lockBuffer.Data(), yuv->mYStride, lockBuffer.Data() + yLength,
- yuv->mCbCrStride * 2, ySize.width, ySize.height) == 0;
- NS_ENSURE_TRUE(ok, nullptr);
-
- hr = input->SetSampleTime(UsecsToHNs(aData->mTime.ToMicroseconds()));
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- hr =
- input->SetSampleDuration(UsecsToHNs(aData->mDuration.ToMicroseconds()));
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- return input.forget();
- }
+ RefPtr<const VideoData>&& aData);
RefPtr<EncodePromise> ProcessOutputSamples(
- nsTArray<RefPtr<IMFSample>>& aSamples) {
- EncodedData frames;
- for (auto sample : aSamples) {
- RefPtr<MediaRawData> frame = IMFSampleToMediaData(sample);
- if (frame) {
- frames.AppendElement(std::move(frame));
- } else {
- WMF_ENC_LOGE("failed to convert output frame");
- }
- }
- aSamples.Clear();
- return EncodePromise::CreateAndResolve(std::move(frames), __func__);
- }
-
+ nsTArray<RefPtr<IMFSample>>& aSamples);
already_AddRefed<MediaRawData> IMFSampleToMediaData(
- RefPtr<IMFSample>& aSample) {
- AssertOnTaskQueue();
- MOZ_ASSERT(aSample);
-
- RefPtr<IMFMediaBuffer> buffer;
- HRESULT hr = aSample->GetBufferByIndex(0, getter_AddRefs(buffer));
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- LockBuffer lockBuffer(buffer);
- NS_ENSURE_TRUE(SUCCEEDED(lockBuffer.Result()), nullptr);
-
- LONGLONG time = 0;
- hr = aSample->GetSampleTime(&time);
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- LONGLONG duration = 0;
- hr = aSample->GetSampleDuration(&duration);
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- bool isKeyframe =
- MFGetAttributeUINT32(aSample, MFSampleExtension_CleanPoint, false);
-
- auto frame = MakeRefPtr<MediaRawData>();
- if (!WriteFrameData(frame, lockBuffer, isKeyframe)) {
- return nullptr;
- }
-
- frame->mTime = media::TimeUnit::FromMicroseconds(HNsToUsecs(time));
- frame->mDuration = media::TimeUnit::FromMicroseconds(HNsToUsecs(duration));
- frame->mKeyframe = isKeyframe;
-
- return frame.forget();
- }
+ RefPtr<IMFSample>& aSample);
bool WriteFrameData(RefPtr<MediaRawData>& aDest, LockBuffer& aSrc,
- bool aIsKeyframe) {
- if (mConfig.mCodec == CodecType::H264) {
- size_t prependLength = 0;
- RefPtr<MediaByteBuffer> avccHeader;
- if (aIsKeyframe && mConfigData) {
- if (mConfig.mUsage == Usage::Realtime) {
- prependLength = mConfigData->Length();
- } else {
- avccHeader = mConfigData;
- }
- }
-
- UniquePtr<MediaRawDataWriter> writer(aDest->CreateWriter());
- if (!writer->SetSize(prependLength + aSrc.Length())) {
- WMF_ENC_LOGE("fail to allocate output buffer");
- return false;
- }
-
- if (prependLength > 0) {
- PodCopy(writer->Data(), mConfigData->Elements(), prependLength);
- }
- PodCopy(writer->Data() + prependLength, aSrc.Data(), aSrc.Length());
-
- if (mConfig.mUsage != Usage::Realtime &&
- !AnnexB::ConvertSampleToAVCC(aDest, avccHeader)) {
- WMF_ENC_LOGE("fail to convert annex-b sample to AVCC");
- return false;
- }
-
- return true;
- }
- UniquePtr<MediaRawDataWriter> writer(aDest->CreateWriter());
- if (!writer->SetSize(aSrc.Length())) {
- WMF_ENC_LOGE("fail to allocate output buffer");
- return false;
- }
-
- PodCopy(writer->Data(), aSrc.Data(), aSrc.Length());
- return true;
- }
+ bool aIsKeyframe);
void AssertOnTaskQueue() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); }
diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
index 65480c4a01..2344de94d9 100644
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -711,6 +711,8 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
aStage.SetImageFormat(DecodeStage::P016);
}
aStage.SetResolution(videoWidth, videoHeight);
+ aStage.SetStartTimeAndEndTime(v->mTime.ToMicroseconds(),
+ v->GetEndTime().ToMicroseconds());
});
v.forget(aOutVideoData);
@@ -753,7 +755,6 @@ WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
TimeUnit::FromMicroseconds(-1));
NS_ENSURE_TRUE(v, E_FAIL);
- v.forget(aOutVideoData);
mPerformanceRecorder.Record(pts.ToMicroseconds(), [&](DecodeStage& aStage) {
aStage.SetColorDepth(mVideoInfo.mColorDepth);
@@ -771,8 +772,11 @@ WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
aStage.SetImageFormat(DecodeStage::P016);
}
aStage.SetResolution(size.width, size.height);
+ aStage.SetStartTimeAndEndTime(v->mTime.ToMicroseconds(),
+ v->GetEndTime().ToMicroseconds());
});
+ v.forget(aOutVideoData);
return S_OK;
}
diff --git a/dom/media/platforms/wmf/moz.build b/dom/media/platforms/wmf/moz.build
index 9e0f3aa94a..13f754af29 100644
--- a/dom/media/platforms/wmf/moz.build
+++ b/dom/media/platforms/wmf/moz.build
@@ -57,9 +57,11 @@ UNIFIED_SOURCES += [
"MFTDecoder.cpp",
"MFTEncoder.cpp",
"WMFAudioMFTManager.cpp",
+ "WMFDataEncoderUtils.cpp",
"WMFDecoderModule.cpp",
"WMFEncoderModule.cpp",
"WMFMediaDataDecoder.cpp",
+ "WMFMediaDataEncoder.cpp",
"WMFVideoMFTManager.cpp",
]
diff --git a/dom/media/test/complete_length_worker.js b/dom/media/test/complete_length_worker.js
new file mode 100644
index 0000000000..ceda63fdd5
--- /dev/null
+++ b/dom/media/test/complete_length_worker.js
@@ -0,0 +1,80 @@
+"use strict";
+
+let client;
+function is(got, expected, name) {
+ client.postMessage({ type: "is", got, expected, name });
+}
+
+self.onactivate = e =>
+ e.waitUntil(
+ (async () => {
+ await self.clients.claim();
+ const allClients = await self.clients.matchAll();
+ client = allClients[0];
+ is(allClients.length, 1, "allClients.length");
+ })()
+ );
+
+let expected_start = 0;
+let response_data = [
+ // One Array element for each response in order:
+ {
+ complete_length: "*",
+ body: "O",
+ },
+ {
+ complete_length: "3",
+ body: "g",
+ },
+ {
+ // Extend length to test that the remainder is fetched.
+ complete_length: "6",
+ body: "g",
+ },
+ {
+ // Reduce length to test that no more is fetched.
+ complete_length: "4",
+ body: "S",
+ },
+];
+
+self.onfetch = e => {
+ if (!e.request.url.endsWith("/media-resource")) {
+ return; // fall back to network fetch
+ }
+ is(
+ response_data.length >= 1,
+ true,
+ `response_data.length (${response_data.length}) > 0`
+ );
+ const { complete_length, body } = response_data.shift();
+ const range = e.request.headers.get("Range");
+ const match = range.match(/^bytes=(\d+)-/);
+ is(Array.isArray(match), true, `Array.isArray(match) for ${range}`);
+ const first = parseInt(match[1]);
+ is(first, expected_start, "first");
+ const last = first + body.length - 1; // inclusive
+ expected_start = last + 1;
+ const init = {
+ status: 206, // Partial Content
+ headers: {
+ "Accept-Ranges": "bytes",
+ "Content-Type": "audio/ogg",
+ "Content-Range": `bytes ${first}-${last}/${complete_length}`,
+ "Content-Length": body.length,
+ },
+ };
+ e.respondWith(new Response(body, init));
+};
+
+self.onmessage = e => {
+ switch (e.data.type) {
+ case "got error event":
+ // Check that all expected requests were received.
+ is(response_data.length, 0, "missing fetch count");
+ client.postMessage({ type: "done" });
+ return;
+ default:
+ is(e.data.type, "__KNOWN__", "e.data.type");
+ }
+};
diff --git a/dom/media/test/mochitest.toml b/dom/media/test/mochitest.toml
index 99bd1c41c8..2490bef305 100644
--- a/dom/media/test/mochitest.toml
+++ b/dom/media/test/mochitest.toml
@@ -444,6 +444,7 @@ support-files = [
"chained-audio-video.ogg^headers^",
"chromeHelper.js",
"cloneElementVisually_helpers.js",
+ "complete_length_worker.js",
"contentType.sjs",
"detodos.opus",
"detodos.opus^headers^",
@@ -767,6 +768,9 @@ tags = "cloneelementvisually"
["test_clone_media_element.html"]
skip-if = ["os == 'android'"] # bug 1108558, android(bug 1232305)
+["test_complete_length.html"]
+scheme = "https"
+
["test_fastSeek-forwards.html"]
["test_fastSeek.html"]
diff --git a/dom/media/test/rdd_process_xpcom/RddProcessTest.cpp b/dom/media/test/rdd_process_xpcom/RddProcessTest.cpp
index fad7d6ee2e..c3e61e3f11 100644
--- a/dom/media/test/rdd_process_xpcom/RddProcessTest.cpp
+++ b/dom/media/test/rdd_process_xpcom/RddProcessTest.cpp
@@ -49,8 +49,7 @@ RddProcessTest::TestTelemetryProbes(JSContext* aCx,
promise->MaybeResolve((int32_t)rddProc->RDDProcessPid());
},
[promise](nsresult aError) {
- MOZ_ASSERT_UNREACHABLE("RddProcessTest; failure to get RDD child");
- promise->MaybeReject(aError);
+ MOZ_CRASH("RddProcessTest; failure to get RDD child");
});
promise.forget(aOutPromise);
diff --git a/dom/media/test/reftest/reftest.list b/dom/media/test/reftest/reftest.list
index bd4cb2d030..8d39975d5a 100644
--- a/dom/media/test/reftest/reftest.list
+++ b/dom/media/test/reftest/reftest.list
@@ -11,5 +11,5 @@ skip-if(Android) fuzzy(0-31,0-573249) fuzzy-if(appleSilicon,0-37,0-543189) == im
skip-if(Android) fuzzy(0-84,0-774213) fails-if(useDrawSnapshot) == uneven_frame_duration_video.html uneven_frame_duration_video-ref.html # Skip on Windows 7 as the resolution of the video is too high for test machines and will fail in the decoder.
# Set media.dormant-on-pause-timeout-ms to avoid decoders becoming dormant and busting test, skip on android as test is too noisy and unstable
skip-if(Android) pref(media.dormant-on-pause-timeout-ms,-1) fuzzy(0-20,0-500) == frame_order_mp4.html frame_order_mp4-ref.html
-skip-if(Android) fuzzy(0-30,0-270000) == incorrect_display_in_bytestream_vp8.html incorrect_display_in_bytestream_vp8-ref.html
+skip-if(Android) fuzzy(0-31,0-270000) == incorrect_display_in_bytestream_vp8.html incorrect_display_in_bytestream_vp8-ref.html
skip-if(Android) fuzzy(0-22,0-381481) == incorrect_display_in_bytestream_vp9.html incorrect_display_in_bytestream_vp9-ref.html
diff --git a/dom/media/test/test_complete_length.html b/dom/media/test/test_complete_length.html
new file mode 100644
index 0000000000..576b00dac2
--- /dev/null
+++ b/dom/media/test/test_complete_length.html
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <meta charset="utf-8">
+ <title>Test different complete-length fields of Content-Range headers</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script src="/tests/dom/serviceworkers/test/utils.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css"/>
+</head>
+<script>
+"use strict";
+
+let oncomplete;
+navigator.serviceWorker.addEventListener("message", e => {
+ switch (e.data.type) {
+ case "is":
+ is(e.data.got, e.data.expected, e.data.name);
+ break;
+ case "done":
+ oncomplete();
+ break;
+ default:
+ record(false, "unknown e.data.type", e.data.type);
+ }
+});
+
+add_task(async () => {
+ // Unregister any previous ServiceWorkerRegistrations that may not have been
+ // removed before a page reload.
+ await unregisterAll();
+ const registration =
+ await registerAndWaitForActive("complete_length_worker.js");
+ SimpleTest.registerCleanupFunction(() => registration.unregister());
+
+ const audio = new Audio("media-resource");
+ audio.preload = "metadata";
+ // An error event is generated because the resource is incomplete.
+ const error_promise = new Promise(r => audio.onerror = r);
+ await error_promise;
+ is(audio.error.code, MediaError.MEDIA_ERR_SRC_NOT_SUPPORTED, "error.code");
+ is(audio.error.message, "NS_ERROR_DOM_MEDIA_METADATA_ERR (0x806e0006)",
+ "error.message");
+
+ // Tell the ServiceWorker that media-resource requests have completed.
+ navigator.serviceWorker.controller.postMessage({type: "got error event"});
+ await new Promise(r => oncomplete = r);
+});
+</script>
+</html>
diff --git a/dom/media/tests/crashtests/crashtests.list b/dom/media/tests/crashtests/crashtests.list
index fd4ed80607..96c296a9fa 100644
--- a/dom/media/tests/crashtests/crashtests.list
+++ b/dom/media/tests/crashtests/crashtests.list
@@ -31,8 +31,8 @@ load 1511130.html
load 1510848.html
load 1516292.html
load 1576938.html
-skip-if(Android) pref(media.getusermedia.audiocapture.enabled,true) load 1573536.html
-skip-if(!Android) pref(media.getusermedia.audiocapture.enabled,true) pref(media.navigator.permission.device,false) load 1573536.html # media.navigator.permission.device is mobile-only, so other platforms fail to set it (Bug 1350948)
+skip-if(Android) pref(media.getusermedia.audio.capture.enabled,true) load 1573536.html
+skip-if(!Android) pref(media.getusermedia.audio.capture.enabled,true) pref(media.navigator.permission.device,false) load 1573536.html # media.navigator.permission.device is mobile-only, so other platforms fail to set it (Bug 1350948)
load 1594136.html
load 1749308.html
load 1764915.html
diff --git a/dom/media/utils/PerformanceRecorder.cpp b/dom/media/utils/PerformanceRecorder.cpp
index 3dc2c24a5d..dda76e0d99 100644
--- a/dom/media/utils/PerformanceRecorder.cpp
+++ b/dom/media/utils/PerformanceRecorder.cpp
@@ -245,6 +245,19 @@ ProfilerString8View PlaybackStage::Name() const {
return *mName;
}
+void PlaybackStage::AddMarker(MarkerOptions&& aOption) {
+ if (mStartAndEndTimeUs) {
+ auto& pair = *mStartAndEndTimeUs;
+ profiler_add_marker(Name(), Category(),
+ std::forward<MarkerOptions&&>(aOption),
+ geckoprofiler::markers::MediaSampleMarker{}, pair.first,
+ pair.second, 1 /* queue length */);
+ } else {
+ profiler_add_marker(Name(), Category(),
+ std::forward<MarkerOptions&&>(aOption));
+ }
+}
+
ProfilerString8View CaptureStage::Name() const {
if (!mName) {
auto imageTypeToStr = [](ImageType aType) -> const char* {
@@ -307,4 +320,17 @@ ProfilerString8View DecodeStage::Name() const {
return *mName;
}
+void DecodeStage::AddMarker(MarkerOptions&& aOption) {
+ if (mStartAndEndTimeUs) {
+ auto& pair = *mStartAndEndTimeUs;
+ profiler_add_marker(Name(), Category(),
+ std::forward<MarkerOptions&&>(aOption),
+ geckoprofiler::markers::MediaSampleMarker{}, pair.first,
+ pair.second, 1 /* queue length */);
+ } else {
+ profiler_add_marker(Name(), Category(),
+ std::forward<MarkerOptions&&>(aOption));
+ }
+}
+
} // namespace mozilla
diff --git a/dom/media/utils/PerformanceRecorder.h b/dom/media/utils/PerformanceRecorder.h
index e423c3fb5d..95fdab90ba 100644
--- a/dom/media/utils/PerformanceRecorder.h
+++ b/dom/media/utils/PerformanceRecorder.h
@@ -8,11 +8,13 @@
#define mozilla_PerformanceRecorder_h
#include <type_traits>
+#include <utility>
#include "mozilla/Attributes.h"
#include "mozilla/BaseProfilerMarkersPrerequisites.h"
#include "mozilla/Maybe.h"
#include "mozilla/Mutex.h"
+#include "mozilla/ProfilerMarkerTypes.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/TypedEnumBits.h"
#include "nsStringFwd.h"
@@ -118,7 +120,19 @@ enum class MediaStage : uint8_t {
CopyDecodedVideo,
};
-class PlaybackStage {
+class StageBase {
+ public:
+ virtual void AddMarker(MarkerOptions&& aOption) {
+ profiler_add_marker(Name(), Category(),
+ std::forward<MarkerOptions&&>(aOption));
+ }
+
+ protected:
+ virtual ProfilerString8View Name() const = 0;
+ virtual const MarkerCategory& Category() const = 0;
+};
+
+class PlaybackStage : public StageBase {
public:
explicit PlaybackStage(MediaStage aStage, int32_t aHeight = 0,
MediaInfoFlag aFlag = MediaInfoFlag::None)
@@ -126,20 +140,28 @@ class PlaybackStage {
MOZ_ASSERT(aStage != MediaStage::Invalid);
}
- ProfilerString8View Name() const;
- const MarkerCategory& Category() const {
+ ProfilerString8View Name() const override;
+ const MarkerCategory& Category() const override {
return baseprofiler::category::MEDIA_PLAYBACK;
}
+ void AddMarker(MarkerOptions&& aOption) override;
+
+ void SetStartTimeAndEndTime(uint64_t aStartTime, uint64_t aEndTime) {
+ mStartAndEndTimeUs =
+ Some(std::pair<uint64_t, uint64_t>{aStartTime, aEndTime});
+ }
MediaStage mStage;
int32_t mHeight;
MediaInfoFlag mFlag;
+ Maybe<std::pair<uint64_t, uint64_t>> mStartAndEndTimeUs;
+
private:
mutable Maybe<nsCString> mName;
};
-class CaptureStage {
+class CaptureStage : public StageBase {
public:
enum class ImageType : uint8_t {
Unknown,
@@ -160,8 +182,8 @@ class CaptureStage {
mHeight(aHeight),
mImageType(aImageType) {}
- ProfilerString8View Name() const;
- const MarkerCategory& Category() const {
+ ProfilerString8View Name() const override;
+ const MarkerCategory& Category() const override {
return baseprofiler::category::MEDIA_RT;
}
@@ -175,7 +197,7 @@ class CaptureStage {
mutable Maybe<nsCString> mName;
};
-class CopyVideoStage {
+class CopyVideoStage : public StageBase {
public:
CopyVideoStage(nsCString aSource, TrackingId aTrackingId, int32_t aWidth,
int32_t aHeight)
@@ -184,8 +206,8 @@ class CopyVideoStage {
mWidth(aWidth),
mHeight(aHeight) {}
- ProfilerString8View Name() const;
- const MarkerCategory& Category() const {
+ ProfilerString8View Name() const override;
+ const MarkerCategory& Category() const override {
return baseprofiler::category::MEDIA_RT;
}
@@ -201,7 +223,7 @@ class CopyVideoStage {
mutable Maybe<nsCString> mName;
};
-class DecodeStage {
+class DecodeStage : public StageBase {
public:
enum ImageFormat : uint8_t {
YUV420P,
@@ -223,8 +245,8 @@ class DecodeStage {
: mSource(std::move(aSource)),
mTrackingId(std::move(aTrackingId)),
mFlag(aFlag) {}
- ProfilerString8View Name() const;
- const MarkerCategory& Category() const {
+ ProfilerString8View Name() const override;
+ const MarkerCategory& Category() const override {
return baseprofiler::category::MEDIA_PLAYBACK;
}
@@ -242,6 +264,11 @@ class DecodeStage {
void SetColorDepth(gfx::ColorDepth aColorDepth) {
mColorDepth = Some(aColorDepth);
}
+ void SetStartTimeAndEndTime(uint64_t aStartTime, uint64_t aEndTime) {
+ mStartAndEndTimeUs =
+ Some(std::pair<uint64_t, uint64_t>{aStartTime, aEndTime});
+ }
+ void AddMarker(MarkerOptions&& aOption) override;
// The name of the source that performs this stage.
nsCString mSource;
@@ -256,6 +283,7 @@ class DecodeStage {
Maybe<gfx::ColorRange> mColorRange;
Maybe<gfx::ColorDepth> mColorDepth;
mutable Maybe<nsCString> mName;
+ Maybe<std::pair<uint64_t, uint64_t>> mStartAndEndTimeUs;
};
class PerformanceRecorderBase {
@@ -325,9 +353,7 @@ class PerformanceRecorderImpl : public PerformanceRecorderBase {
MOZ_ASSERT(elapsedTimeUs >= 0, "Elapsed time can't be less than 0!");
aStageMutator(stage);
AUTO_PROFILER_STATS(PROFILER_MARKER_UNTYPED);
- profiler_add_marker(
- stage.Name(), stage.Category(),
- MarkerOptions(MarkerTiming::Interval(startTime, now)));
+ stage.AddMarker(MarkerOptions(MarkerTiming::Interval(startTime, now)));
}
return static_cast<float>(elapsedTimeUs);
}
diff --git a/dom/media/utils/TelemetryProbesReporter.cpp b/dom/media/utils/TelemetryProbesReporter.cpp
index 377cee9abc..e702ae14c5 100644
--- a/dom/media/utils/TelemetryProbesReporter.cpp
+++ b/dom/media/utils/TelemetryProbesReporter.cpp
@@ -7,6 +7,7 @@
#include <cmath>
#include "FrameStatistics.h"
+#include "MediaCodecsSupport.h"
#include "VideoUtils.h"
#include "mozilla/EMEUtils.h"
#include "mozilla/Logging.h"
@@ -791,5 +792,32 @@ double TelemetryProbesReporter::GetAudiblePlayTimeInSeconds() const {
return GetTotalAudioPlayTimeInSeconds() - GetInaudiblePlayTimeInSeconds();
}
+/* static */
+void TelemetryProbesReporter::ReportDeviceMediaCodecSupported(
+ const media::MediaCodecsSupported& aSupported) {
+ static bool sReported = false;
+ if (sReported) {
+ return;
+ }
+ MOZ_ASSERT(ContainHardwareCodecsSupported(aSupported));
+ sReported = true;
+
+ glean::media_playback::device_hardware_decoder_support.Get("h264"_ns).Set(
+ aSupported.contains(
+ mozilla::media::MediaCodecsSupport::H264HardwareDecode));
+ glean::media_playback::device_hardware_decoder_support.Get("vp8"_ns).Set(
+ aSupported.contains(
+ mozilla::media::MediaCodecsSupport::VP8HardwareDecode));
+ glean::media_playback::device_hardware_decoder_support.Get("vp9"_ns).Set(
+ aSupported.contains(
+ mozilla::media::MediaCodecsSupport::VP9HardwareDecode));
+ glean::media_playback::device_hardware_decoder_support.Get("av1"_ns).Set(
+ aSupported.contains(
+ mozilla::media::MediaCodecsSupport::AV1HardwareDecode));
+ glean::media_playback::device_hardware_decoder_support.Get("hevc"_ns).Set(
+ aSupported.contains(
+ mozilla::media::MediaCodecsSupport::HEVCHardwareDecode));
+}
+
#undef LOG
} // namespace mozilla
diff --git a/dom/media/utils/TelemetryProbesReporter.h b/dom/media/utils/TelemetryProbesReporter.h
index 43e05dcadd..2f0e7f1b44 100644
--- a/dom/media/utils/TelemetryProbesReporter.h
+++ b/dom/media/utils/TelemetryProbesReporter.h
@@ -5,6 +5,7 @@
#ifndef DOM_TelemetryProbesReporter_H_
#define DOM_TelemetryProbesReporter_H_
+#include "MediaCodecsSupport.h"
#include "MediaInfo.h"
#include "mozilla/Maybe.h"
#include "mozilla/AwakeTimeStamp.h"
@@ -56,6 +57,9 @@ class TelemetryProbesReporter final {
using AudibleState = dom::AudioChannelService::AudibleState;
+ static void ReportDeviceMediaCodecSupported(
+ const media::MediaCodecsSupported& aSupported);
+
// State transitions
void OnPlay(Visibility aVisibility, MediaContent aContent, bool aIsMuted);
void OnPause(Visibility aVisibility);
diff --git a/dom/media/webaudio/FFTBlock.cpp b/dom/media/webaudio/FFTBlock.cpp
index 79fb934a00..eeeaf1061e 100644
--- a/dom/media/webaudio/FFTBlock.cpp
+++ b/dom/media/webaudio/FFTBlock.cpp
@@ -51,7 +51,8 @@ FFTBlock* FFTBlock::CreateInterpolatedBlock(const FFTBlock& block0,
const FFTBlock& block1,
double interp) {
uint32_t fftSize = block0.FFTSize();
- FFTBlock* newBlock = new FFTBlock(fftSize, 1.0f / AssertedCast<float>(fftSize));
+ FFTBlock* newBlock =
+ new FFTBlock(fftSize, 1.0f / AssertedCast<float>(fftSize));
newBlock->InterpolateFrequencyComponents(block0, block1, interp);
diff --git a/dom/media/webcodecs/DecoderAgent.cpp b/dom/media/webcodecs/DecoderAgent.cpp
index 095852c01d..f7a539fa18 100644
--- a/dom/media/webcodecs/DecoderAgent.cpp
+++ b/dom/media/webcodecs/DecoderAgent.cpp
@@ -96,16 +96,18 @@ RefPtr<DecoderAgent::ConfigurePromise> DecoderAgent::Configure(
auto params = CreateDecoderParams{
*mInfo,
CreateDecoderParams::OptionSet(
- CreateDecoderParams::Option::LowLatency,
aPreferSoftwareDecoder
? CreateDecoderParams::Option::HardwareDecoderNotAllowed
: CreateDecoderParams::Option::Default),
mInfo->GetType(), mImageContainer, knowsCompositor};
+ if (aLowLatency) {
+ params.mOptions += CreateDecoderParams::Option::LowLatency;
+ }
LOG("DecoderAgent #%d (%p) is creating a decoder - PreferSW: %s, "
- "low-latency: %syes",
+ "low-latency: %s",
mId, this, aPreferSoftwareDecoder ? "yes" : "no",
- aLowLatency ? "" : "forcibly ");
+ aLowLatency ? "yes" : "no");
RefPtr<ConfigurePromise> p = mConfigurePromise.Ensure(__func__);
diff --git a/dom/media/webcodecs/DecoderTemplate.cpp b/dom/media/webcodecs/DecoderTemplate.cpp
index 2fc2471a24..896f83b352 100644
--- a/dom/media/webcodecs/DecoderTemplate.cpp
+++ b/dom/media/webcodecs/DecoderTemplate.cpp
@@ -85,28 +85,26 @@ DecoderTemplate<DecoderType>::ConfigureMessage::Create(
template <typename DecoderType>
DecoderTemplate<DecoderType>::DecodeMessage::DecodeMessage(
- Id aId, ConfigId aConfigId, UniquePtr<InputTypeInternal>&& aData)
+ SeqId aSeqId, ConfigId aConfigId, UniquePtr<InputTypeInternal>&& aData)
: ControlMessage(
- nsPrintfCString("decode #%zu (config #%d)", aId, aConfigId)),
- mId(aId),
+ nsPrintfCString("decode #%zu (config #%d)", aSeqId, aConfigId)),
+ mSeqId(aSeqId),
mData(std::move(aData)) {}
-template <typename DecoderType>
-DecoderTemplate<DecoderType>::FlushMessage::FlushMessage(Id aId,
- ConfigId aConfigId,
- Promise* aPromise)
- : ControlMessage(
- nsPrintfCString("flush #%zu (config #%d)", aId, aConfigId)),
- mId(aId),
- mPromise(aPromise) {}
+static int64_t GenerateUniqueId() {
+ // This needs to be atomic since this can run on the main thread or worker
+ // thread.
+ static std::atomic<int64_t> sNextId = 0;
+ return ++sNextId;
+}
template <typename DecoderType>
-void DecoderTemplate<DecoderType>::FlushMessage::RejectPromiseIfAny(
- const nsresult& aReason) {
- if (mPromise) {
- mPromise->MaybeReject(aReason);
- }
-}
+DecoderTemplate<DecoderType>::FlushMessage::FlushMessage(SeqId aSeqId,
+ ConfigId aConfigId)
+ : ControlMessage(
+ nsPrintfCString("flush #%zu (config #%d)", aSeqId, aConfigId)),
+ mSeqId(aSeqId),
+ mUniqueId(GenerateUniqueId()) {}
/*
* Below are DecoderTemplate implementation
@@ -221,10 +219,16 @@ already_AddRefed<Promise> DecoderTemplate<DecoderType>::Flush(
mKeyChunkRequired = true;
- mControlMessageQueue.emplace(UniquePtr<ControlMessage>(
- new FlushMessage(++mFlushCounter, mLatestConfigureId, p)));
- LOG("%s %p enqueues %s", DecoderType::Name.get(), this,
- mControlMessageQueue.back()->ToString().get());
+ auto msg = UniquePtr<ControlMessage>(
+ new FlushMessage(++mFlushCounter, mLatestConfigureId));
+ const auto flushPromiseId = msg->AsFlushMessage()->mUniqueId;
+ MOZ_ASSERT(!mPendingFlushPromises.Contains(flushPromiseId));
+ mPendingFlushPromises.Insert(flushPromiseId, p);
+
+ mControlMessageQueue.emplace(std::move(msg));
+
+ LOG("%s %p enqueues %s, with unique id %" PRId64, DecoderType::Name.get(),
+ this, mControlMessageQueue.back()->ToString().get(), flushPromiseId);
ProcessControlMessageQueue();
return p.forget();
}
@@ -264,7 +268,7 @@ Result<Ok, nsresult> DecoderTemplate<DecoderType>::ResetInternal(
mDecodeCounter = 0;
mFlushCounter = 0;
- CancelPendingControlMessages(aResult);
+ CancelPendingControlMessagesAndFlushPromises(aResult);
DestroyDecoderAgentIfAny();
if (mDecodeQueueSize > 0) {
@@ -390,7 +394,7 @@ void DecoderTemplate<DecoderType>::ProcessControlMessageQueue() {
}
template <typename DecoderType>
-void DecoderTemplate<DecoderType>::CancelPendingControlMessages(
+void DecoderTemplate<DecoderType>::CancelPendingControlMessagesAndFlushPromises(
const nsresult& aResult) {
AssertIsOnOwningThread();
@@ -399,11 +403,6 @@ void DecoderTemplate<DecoderType>::CancelPendingControlMessages(
LOG("%s %p cancels current %s", DecoderType::Name.get(), this,
mProcessingMessage->ToString().get());
mProcessingMessage->Cancel();
-
- if (FlushMessage* flush = mProcessingMessage->AsFlushMessage()) {
- flush->RejectPromiseIfAny(aResult);
- }
-
mProcessingMessage.reset();
}
@@ -411,14 +410,18 @@ void DecoderTemplate<DecoderType>::CancelPendingControlMessages(
while (!mControlMessageQueue.empty()) {
LOG("%s %p cancels pending %s", DecoderType::Name.get(), this,
mControlMessageQueue.front()->ToString().get());
-
MOZ_ASSERT(!mControlMessageQueue.front()->IsProcessing());
- if (FlushMessage* flush = mControlMessageQueue.front()->AsFlushMessage()) {
- flush->RejectPromiseIfAny(aResult);
- }
-
mControlMessageQueue.pop();
}
+
+ // If there are pending flush promises, reject them.
+ mPendingFlushPromises.ForEach(
+ [&](const int64_t& id, const RefPtr<Promise>& p) {
+ LOG("%s %p, reject the promise for flush %" PRId64 " (unique id)",
+ DecoderType::Name.get(), this, id);
+ p->MaybeReject(aResult);
+ });
+ mPendingFlushPromises.Clear();
}
template <typename DecoderType>
@@ -565,7 +568,6 @@ MessageProcessedResult DecoderTemplate<DecoderType>::ProcessDecodeMessage(
mProcessingMessage.reset();
QueueATask("Error during decode",
[self = RefPtr{this}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
- MOZ_ASSERT(self->mState != CodecState::Closed);
self->CloseInternal(NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
});
return MessageProcessedResult::Processed;
@@ -696,6 +698,8 @@ MessageProcessedResult DecoderTemplate<DecoderType>::ProcessFlushMessage(
msg->Complete();
+ const auto flushPromiseId = msg->mUniqueId;
+
// If flush failed, it means decoder fails to decode the data
// sent before, so we treat it like decode error. We reject
// the promise first and then queue a task to close
@@ -705,14 +709,15 @@ MessageProcessedResult DecoderTemplate<DecoderType>::ProcessFlushMessage(
LOGE("%s %p, DecoderAgent #%d failed to flush: %s",
DecoderType::Name.get(), self.get(), id,
error.Description().get());
- RefPtr<Promise> promise = msg->TakePromise();
// Reject with an EncodingError instead of the error we got
// above.
self->QueueATask(
"Error during flush runnable",
- [self = RefPtr{this}, promise]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
- promise->MaybeReject(
- NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ [self = RefPtr{this}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ // If Reset() was invoked before this task executes, the
+ // promise in mPendingFlushPromises is handled there.
+ // Otherwise, the promise is going to be rejected by
+ // CloseInternal() below.
self->mProcessingMessage.reset();
MOZ_ASSERT(self->mState != CodecState::Closed);
self->CloseInternal(
@@ -733,14 +738,23 @@ MessageProcessedResult DecoderTemplate<DecoderType>::ProcessFlushMessage(
msgStr.get());
}
- RefPtr<Promise> promise = msg->TakePromise();
self->QueueATask(
"Flush: output decoding data task",
- [self = RefPtr{self}, promise, data = std::move(data)]()
- MOZ_CAN_RUN_SCRIPT_BOUNDARY {
- self->OutputDecodedData(std::move(data));
- promise->MaybeResolveWithUndefined();
- });
+ [self = RefPtr{self}, data = std::move(data),
+ flushPromiseId]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ self->OutputDecodedData(std::move(data));
+ // If Reset() was invoked before this task executes, or
+ // during the output callback above in the execution of this
+ // task, the promise in mPendingFlushPromises is handled
+ // there. Otherwise, the promise is resolved here.
+ if (Maybe<RefPtr<Promise>> p =
+ self->mPendingFlushPromises.Take(flushPromiseId)) {
+ LOG("%s %p, resolving the promise for flush %" PRId64
+ " (unique id)",
+ DecoderType::Name.get(), self.get(), flushPromiseId);
+ p.value()->MaybeResolveWithUndefined();
+ }
+ });
self->mProcessingMessage.reset();
self->ProcessControlMessageQueue();
})
diff --git a/dom/media/webcodecs/DecoderTemplate.h b/dom/media/webcodecs/DecoderTemplate.h
index fe0cb5baee..69d4e2f03d 100644
--- a/dom/media/webcodecs/DecoderTemplate.h
+++ b/dom/media/webcodecs/DecoderTemplate.h
@@ -9,6 +9,8 @@
#include <queue>
+#include "SimpleMap.h"
+#include "WebCodecsUtils.h"
#include "mozilla/DOMEventTargetHelper.h"
#include "mozilla/DecoderAgent.h"
#include "mozilla/MozPromise.h"
@@ -18,7 +20,6 @@
#include "mozilla/dom/WorkerRef.h"
#include "mozilla/media/MediaUtils.h"
#include "nsStringFwd.h"
-#include "WebCodecsUtils.h"
namespace mozilla {
@@ -76,7 +77,8 @@ class DecoderTemplate : public DOMEventTargetHelper {
const ConfigTypeInternal& Config() { return *mConfig; }
UniquePtr<ConfigTypeInternal> TakeConfig() { return std::move(mConfig); }
- const Id mId; // A unique id shown in log.
+ // The id of a configure request.
+ const Id mId;
private:
ConfigureMessage(Id aId, UniquePtr<ConfigTypeInternal>&& aConfig);
@@ -88,16 +90,18 @@ class DecoderTemplate : public DOMEventTargetHelper {
: public ControlMessage,
public MessageRequestHolder<DecoderAgent::DecodePromise> {
public:
- using Id = size_t;
+ using SeqId = size_t;
using ConfigId = typename Self::ConfigureMessage::Id;
- DecodeMessage(Id aId, ConfigId aConfigId,
+ DecodeMessage(SeqId aSeqId, ConfigId aConfigId,
UniquePtr<InputTypeInternal>&& aData);
~DecodeMessage() = default;
virtual void Cancel() override { Disconnect(); }
virtual bool IsProcessing() override { return Exists(); };
virtual DecodeMessage* AsDecodeMessage() override { return this; }
- const Id mId; // A unique id shown in log.
+ // The sequence id of a decode request associated with a specific
+ // configuration.
+ const SeqId mSeqId;
UniquePtr<InputTypeInternal> mData;
};
@@ -105,20 +109,18 @@ class DecoderTemplate : public DOMEventTargetHelper {
: public ControlMessage,
public MessageRequestHolder<DecoderAgent::DecodePromise> {
public:
- using Id = size_t;
+ using SeqId = size_t;
using ConfigId = typename Self::ConfigureMessage::Id;
- FlushMessage(Id aId, ConfigId aConfigId, Promise* aPromise);
+ FlushMessage(SeqId aSeqId, ConfigId aConfigId);
~FlushMessage() = default;
virtual void Cancel() override { Disconnect(); }
virtual bool IsProcessing() override { return Exists(); };
virtual FlushMessage* AsFlushMessage() override { return this; }
- already_AddRefed<Promise> TakePromise() { return mPromise.forget(); }
- void RejectPromiseIfAny(const nsresult& aReason);
-
- const Id mId; // A unique id shown in log.
- private:
- RefPtr<Promise> mPromise;
+ // The sequence id of a flush request associated with a specific
+ // configuration.
+ const SeqId mSeqId;
+ const int64_t mUniqueId;
};
protected:
@@ -176,7 +178,7 @@ class DecoderTemplate : public DOMEventTargetHelper {
nsresult FireEvent(nsAtom* aTypeWithOn, const nsAString& aEventType);
void ProcessControlMessageQueue();
- void CancelPendingControlMessages(const nsresult& aResult);
+ void CancelPendingControlMessagesAndFlushPromises(const nsresult& aResult);
// Queue a task to the control thread. This is to be used when a task needs to
// perform multiple steps.
@@ -209,6 +211,11 @@ class DecoderTemplate : public DOMEventTargetHelper {
std::queue<UniquePtr<ControlMessage>> mControlMessageQueue;
UniquePtr<ControlMessage> mProcessingMessage;
+ // When a flush request is initiated, a promise is created and stored in
+ // mPendingFlushPromises until it is settled in the task delivering the flush
+ // result or Reset() is called before the promise is settled.
+ SimpleMap<int64_t, RefPtr<Promise>> mPendingFlushPromises;
+
uint32_t mDecodeQueueSize;
bool mDequeueEventScheduled;
@@ -216,10 +223,10 @@ class DecoderTemplate : public DOMEventTargetHelper {
// DecoderAgent's Id.
uint32_t mLatestConfigureId;
// Tracking how many decode data has been enqueued and this number will be
- // used as the DecodeMessage's Id.
+ // used as the DecodeMessage's sequence Id.
size_t mDecodeCounter;
// Tracking how many flush request has been enqueued and this number will be
- // used as the FlushMessage's Id.
+ // used as the FlushMessage's sequence Id.
size_t mFlushCounter;
// DecoderAgent will be created every time "configure" is being processed, and
diff --git a/dom/media/webcodecs/EncoderTemplate.cpp b/dom/media/webcodecs/EncoderTemplate.cpp
index 34edfae822..2f70380519 100644
--- a/dom/media/webcodecs/EncoderTemplate.cpp
+++ b/dom/media/webcodecs/EncoderTemplate.cpp
@@ -71,16 +71,8 @@ EncoderTemplate<EncoderType>::EncodeMessage::EncodeMessage(
template <typename EncoderType>
EncoderTemplate<EncoderType>::FlushMessage::FlushMessage(
- WebCodecsId aConfigureId, Promise* aPromise)
- : ControlMessage(aConfigureId), mPromise(aPromise) {}
-
-template <typename EncoderType>
-void EncoderTemplate<EncoderType>::FlushMessage::RejectPromiseIfAny(
- const nsresult& aReason) {
- if (mPromise) {
- mPromise->MaybeReject(aReason);
- }
-}
+ WebCodecsId aConfigureId)
+ : ControlMessage(aConfigureId) {}
/*
* Below are EncoderTemplate implementation
@@ -215,7 +207,13 @@ already_AddRefed<Promise> EncoderTemplate<EncoderType>::Flush(
return p.forget();
}
- mControlMessageQueue.push(MakeRefPtr<FlushMessage>(mLatestConfigureId, p));
+ auto msg = MakeRefPtr<FlushMessage>(mLatestConfigureId);
+ const auto flushPromiseId = static_cast<int64_t>(msg->mMessageId);
+ MOZ_ASSERT(!mPendingFlushPromises.Contains(flushPromiseId));
+ mPendingFlushPromises.Insert(flushPromiseId, p);
+
+ mControlMessageQueue.emplace(std::move(msg));
+
LOG("%s %p enqueues %s", EncoderType::Name.get(), this,
mControlMessageQueue.back()->ToString().get());
ProcessControlMessageQueue();
@@ -259,7 +257,7 @@ Result<Ok, nsresult> EncoderTemplate<EncoderType>::ResetInternal(
mEncodeCounter = 0;
mFlushCounter = 0;
- CancelPendingControlMessages(aResult);
+ CancelPendingControlMessagesAndFlushPromises(aResult);
DestroyEncoderAgentIfAny();
if (mEncodeQueueSize > 0) {
@@ -403,8 +401,8 @@ void EncoderTemplate<VideoEncoderTraits>::OutputEncodedVideoData(
metadata.mDecoderConfig.Construct(std::move(decoderConfig));
mOutputNewDecoderConfig = false;
- LOGE("New config passed to output callback: %s",
- decoderConfigInternal.ToString().get());
+ LOG("New config passed to output callback: %s",
+ decoderConfigInternal.ToString().get());
}
nsAutoCString metadataInfo;
@@ -462,7 +460,7 @@ void EncoderTemplate<AudioEncoderTraits>::OutputEncodedAudioData(
this->EncoderConfigToDecoderConfig(GetParentObject(), data,
*mActiveConfig);
- // Convert VideoDecoderConfigInternal to VideoDecoderConfig
+ // Convert AudioDecoderConfigInternal to AudioDecoderConfig
RootedDictionary<AudioDecoderConfig> decoderConfig(cx);
decoderConfig.mCodec = decoderConfigInternal.mCodec;
decoderConfig.mNumberOfChannels = decoderConfigInternal.mNumberOfChannels;
@@ -473,8 +471,8 @@ void EncoderTemplate<AudioEncoderTraits>::OutputEncodedAudioData(
metadata.mDecoderConfig.Construct(std::move(decoderConfig));
mOutputNewDecoderConfig = false;
- LOGE("New config passed to output callback: %s",
- decoderConfigInternal.ToString().get());
+ LOG("New config passed to output callback: %s",
+ decoderConfigInternal.ToString().get());
}
nsAutoCString metadataInfo;
@@ -578,7 +576,7 @@ void EncoderTemplate<EncoderType>::ProcessControlMessageQueue() {
}
template <typename EncoderType>
-void EncoderTemplate<EncoderType>::CancelPendingControlMessages(
+void EncoderTemplate<EncoderType>::CancelPendingControlMessagesAndFlushPromises(
const nsresult& aResult) {
AssertIsOnOwningThread();
@@ -587,11 +585,6 @@ void EncoderTemplate<EncoderType>::CancelPendingControlMessages(
LOG("%s %p cancels current %s", EncoderType::Name.get(), this,
mProcessingMessage->ToString().get());
mProcessingMessage->Cancel();
-
- if (RefPtr<FlushMessage> flush = mProcessingMessage->AsFlushMessage()) {
- flush->RejectPromiseIfAny(aResult);
- }
-
mProcessingMessage = nullptr;
}
@@ -601,13 +594,17 @@ void EncoderTemplate<EncoderType>::CancelPendingControlMessages(
mControlMessageQueue.front()->ToString().get());
MOZ_ASSERT(!mControlMessageQueue.front()->IsProcessing());
- if (RefPtr<FlushMessage> flush =
- mControlMessageQueue.front()->AsFlushMessage()) {
- flush->RejectPromiseIfAny(aResult);
- }
-
mControlMessageQueue.pop();
}
+
+ // If there are pending flush promises, reject them.
+ mPendingFlushPromises.ForEach(
+ [&](const int64_t& id, const RefPtr<Promise>& p) {
+ LOG("%s %p, reject the promise for flush %" PRId64,
+ EncoderType::Name.get(), this, id);
+ p->MaybeReject(aResult);
+ });
+ mPendingFlushPromises.Clear();
}
template <typename EncoderType>
@@ -1020,78 +1017,88 @@ MessageProcessedResult EncoderTemplate<EncoderType>::ProcessFlushMessage(
}
mAgent->Drain()
- ->Then(
- GetCurrentSerialEventTarget(), __func__,
- [self = RefPtr{this}, id = mAgent->mId, aMessage,
- this](EncoderAgent::EncodePromise::ResolveOrRejectValue&& aResult) {
- MOZ_ASSERT(self->mProcessingMessage);
- MOZ_ASSERT(self->mProcessingMessage->AsFlushMessage());
- MOZ_ASSERT(self->mState == CodecState::Configured);
- MOZ_ASSERT(self->mAgent);
- MOZ_ASSERT(id == self->mAgent->mId);
- MOZ_ASSERT(self->mActiveConfig);
+ ->Then(GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, id = mAgent->mId, aMessage, this](
+ EncoderAgent::EncodePromise::ResolveOrRejectValue&& aResult) {
+ MOZ_ASSERT(self->mProcessingMessage);
+ MOZ_ASSERT(self->mProcessingMessage->AsFlushMessage());
+ MOZ_ASSERT(self->mState == CodecState::Configured);
+ MOZ_ASSERT(self->mAgent);
+ MOZ_ASSERT(id == self->mAgent->mId);
+ MOZ_ASSERT(self->mActiveConfig);
- LOG("%s %p, EncoderAgent #%zu %s has been %s",
- EncoderType::Name.get(), self.get(), id,
- aMessage->ToString().get(),
- aResult.IsResolve() ? "resolved" : "rejected");
+ LOG("%s %p, EncoderAgent #%zu %s has been %s",
+ EncoderType::Name.get(), self.get(), id,
+ aMessage->ToString().get(),
+ aResult.IsResolve() ? "resolved" : "rejected");
- nsCString msgStr = aMessage->ToString();
+ nsCString msgStr = aMessage->ToString();
- aMessage->Complete();
+ aMessage->Complete();
- // If flush failed, it means encoder fails to encode the data
- // sent before, so we treat it like an encode error. We reject
- // the promise first and then queue a task to close VideoEncoder
- // with an EncodingError.
- if (aResult.IsReject()) {
- const MediaResult& error = aResult.RejectValue();
- LOGE("%s %p, EncoderAgent #%zu failed to flush: %s",
- EncoderType::Name.get(), self.get(), id,
- error.Description().get());
- RefPtr<Promise> promise = aMessage->TakePromise();
- // Reject with an EncodingError instead of the error we got
- // above.
- self->QueueATask(
- "Error during flush runnable",
- [self = RefPtr{this}, promise]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
- promise->MaybeReject(
- NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
- self->mProcessingMessage = nullptr;
- MOZ_ASSERT(self->mState != CodecState::Closed);
- self->CloseInternal(
- NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
- });
- return;
- }
+ // If flush failed, it means encoder fails to encode the data
+ // sent before, so we treat it like an encode error. We reject
+ // the promise first and then queue a task to close VideoEncoder
+ // with an EncodingError.
+ if (aResult.IsReject()) {
+ const MediaResult& error = aResult.RejectValue();
+ LOGE("%s %p, EncoderAgent #%zu failed to flush: %s",
+ EncoderType::Name.get(), self.get(), id,
+ error.Description().get());
+ // Reject with an EncodingError instead of the error we got
+ // above.
+ self->QueueATask(
+ "Error during flush runnable",
+ [self = RefPtr{this}]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ // If Reset() was invoked before this task executes, the
+ // promise in mPendingFlushPromises is handled there.
+ // Otherwise, the promise is going to be rejected by
+ // CloseInternal() below.
+ self->mProcessingMessage = nullptr;
+ MOZ_ASSERT(self->mState != CodecState::Closed);
+ self->CloseInternal(
+ NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR);
+ });
+ return;
+ }
- // If flush succeeded, schedule to output encoded data first
- // and then resolve the promise, then keep processing the
- // control messages.
- MOZ_ASSERT(aResult.IsResolve());
- nsTArray<RefPtr<MediaRawData>> data =
- std::move(aResult.ResolveValue());
-
- if (data.IsEmpty()) {
- LOG("%s %p gets no data for %s", EncoderType::Name.get(),
- self.get(), msgStr.get());
- } else {
- LOG("%s %p, schedule %zu encoded data output for %s",
- EncoderType::Name.get(), self.get(), data.Length(),
- msgStr.get());
- }
+ // If flush succeeded, schedule to output encoded data first
+ // and then resolve the promise, then keep processing the
+ // control messages.
+ MOZ_ASSERT(aResult.IsResolve());
+ nsTArray<RefPtr<MediaRawData>> data =
+ std::move(aResult.ResolveValue());
- RefPtr<Promise> promise = aMessage->TakePromise();
- self->QueueATask(
- "Flush: output encoded data task",
- [self = RefPtr{self}, promise, data = std::move(data)]()
- MOZ_CAN_RUN_SCRIPT_BOUNDARY {
- self->OutputEncodedData(std::move(data));
- promise->MaybeResolveWithUndefined();
- });
- self->mProcessingMessage = nullptr;
- self->ProcessControlMessageQueue();
- })
+ if (data.IsEmpty()) {
+ LOG("%s %p gets no data for %s", EncoderType::Name.get(),
+ self.get(), msgStr.get());
+ } else {
+ LOG("%s %p, schedule %zu encoded data output for %s",
+ EncoderType::Name.get(), self.get(), data.Length(),
+ msgStr.get());
+ }
+
+ const auto flushPromiseId =
+ static_cast<int64_t>(aMessage->mMessageId);
+ self->QueueATask(
+ "Flush: output encoded data task",
+ [self = RefPtr{self}, data = std::move(data),
+ flushPromiseId]() MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+ self->OutputEncodedData(std::move(data));
+ // If Reset() was invoked before this task executes, or
+ // during the output callback above in the execution of
+ // this task, the promise in mPendingFlushPromises is
+ // handled there. Otherwise, the promise is resolved here.
+ if (Maybe<RefPtr<Promise>> p =
+ self->mPendingFlushPromises.Take(flushPromiseId)) {
+ LOG("%s %p, resolving the promise for flush %" PRId64,
+ EncoderType::Name.get(), self.get(), flushPromiseId);
+ p.value()->MaybeResolveWithUndefined();
+ }
+ });
+ self->mProcessingMessage = nullptr;
+ self->ProcessControlMessageQueue();
+ })
->Track(aMessage->Request());
return MessageProcessedResult::Processed;
diff --git a/dom/media/webcodecs/EncoderTemplate.h b/dom/media/webcodecs/EncoderTemplate.h
index bc65edca46..ecadf68681 100644
--- a/dom/media/webcodecs/EncoderTemplate.h
+++ b/dom/media/webcodecs/EncoderTemplate.h
@@ -11,14 +11,15 @@
#include "EncoderAgent.h"
#include "MediaData.h"
+#include "SimpleMap.h"
#include "WebCodecsUtils.h"
#include "mozilla/DOMEventTargetHelper.h"
#include "mozilla/MozPromise.h"
#include "mozilla/RefPtr.h"
#include "mozilla/Result.h"
#include "mozilla/UniquePtr.h"
-#include "mozilla/dom/VideoEncoderBinding.h"
#include "mozilla/dom/AudioEncoderBinding.h"
+#include "mozilla/dom/VideoEncoderBinding.h"
#include "mozilla/dom/WorkerRef.h"
#include "mozilla/media/MediaUtils.h"
#include "nsStringFwd.h"
@@ -116,12 +117,10 @@ class EncoderTemplate : public DOMEventTargetHelper {
: public ControlMessage,
public MessageRequestHolder<EncoderAgent::EncodePromise> {
public:
- FlushMessage(WebCodecsId aConfigureId, Promise* aPromise);
+ explicit FlushMessage(WebCodecsId aConfigureId);
virtual void Cancel() override { Disconnect(); }
virtual bool IsProcessing() override { return Exists(); };
virtual RefPtr<FlushMessage> AsFlushMessage() override { return this; }
- already_AddRefed<Promise> TakePromise() { return mPromise.forget(); }
- void RejectPromiseIfAny(const nsresult& aReason);
nsCString ToString() const override {
nsCString rv;
@@ -129,9 +128,6 @@ class EncoderTemplate : public DOMEventTargetHelper {
this->mMessageId);
return rv;
}
-
- private:
- RefPtr<Promise> mPromise;
};
protected:
@@ -207,7 +203,7 @@ class EncoderTemplate : public DOMEventTargetHelper {
const nsresult& aResult);
void ProcessControlMessageQueue();
- void CancelPendingControlMessages(const nsresult& aResult);
+ void CancelPendingControlMessagesAndFlushPromises(const nsresult& aResult);
template <typename Func>
void QueueATask(const char* aName, Func&& aSteps);
@@ -236,6 +232,11 @@ class EncoderTemplate : public DOMEventTargetHelper {
std::queue<RefPtr<ControlMessage>> mControlMessageQueue;
RefPtr<ControlMessage> mProcessingMessage;
+ // When a flush request is initiated, a promise is created and stored in
+ // mPendingFlushPromises until it is settled in the task delivering the flush
+ // result or Reset() is called before the promise is settled.
+ SimpleMap<int64_t, RefPtr<Promise>> mPendingFlushPromises;
+
uint32_t mEncodeQueueSize;
bool mDequeueEventScheduled;
diff --git a/dom/media/webcodecs/VideoEncoder.cpp b/dom/media/webcodecs/VideoEncoder.cpp
index 5407e917b6..4ce74fa0cb 100644
--- a/dom/media/webcodecs/VideoEncoder.cpp
+++ b/dom/media/webcodecs/VideoEncoder.cpp
@@ -343,25 +343,16 @@ static bool CanEncode(const RefPtr<VideoEncoderConfigInternal>& aConfig) {
if (!IsSupportedVideoCodec(parsedCodecString)) {
return false;
}
-
- // TODO (bug 1872879, bug 1872880): Support this on Windows and Mac.
if (aConfig->mScalabilityMode.isSome()) {
- // We only support L1T2 and L1T3 ScalabilityMode in VP8 and VP9 encoders on
- // Linux.
- bool supported = IsOnLinux() && (IsVP8CodecString(parsedCodecString) ||
- IsVP9CodecString(parsedCodecString))
- ? aConfig->mScalabilityMode->EqualsLiteral("L1T2") ||
- aConfig->mScalabilityMode->EqualsLiteral("L1T3")
- : false;
-
- if (!supported) {
+ // Check if ScalabilityMode string is valid.
+ if (!aConfig->mScalabilityMode->EqualsLiteral("L1T2") &&
+ !aConfig->mScalabilityMode->EqualsLiteral("L1T3")) {
LOGE("Scalability mode %s not supported for codec: %s",
NS_ConvertUTF16toUTF8(aConfig->mScalabilityMode.value()).get(),
NS_ConvertUTF16toUTF8(parsedCodecString).get());
return false;
}
}
-
return EncoderSupport::Supports(aConfig);
}
diff --git a/dom/media/webcodecs/crashtests/1889831.html b/dom/media/webcodecs/crashtests/1889831.html
new file mode 100644
index 0000000000..e88a028d16
--- /dev/null
+++ b/dom/media/webcodecs/crashtests/1889831.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<script>
+document.addEventListener("DOMContentLoaded", async () => {
+ const decoder = new VideoDecoder({
+ 'output': (e) => {},
+ 'error': (e) => {},
+ });
+ decoder.configure({
+ codec: 'vp8',
+ codedWidth: 320,
+ codedHeight: 240,
+ visibleRect: {x: 0, y: 0, width: 320, height: 240},
+ displayWidth: 320,
+ displayHeight: 240,
+ });
+ decoder.decode(new EncodedVideoChunk(
+ {type: 'key', timestamp: 0, data: new ArrayBuffer(0)}));
+ decoder.decode(new EncodedVideoChunk(
+ {type: 'key', timestamp: 1, data: new ArrayBuffer(0)}));
+})
+</script>
diff --git a/dom/media/webcodecs/crashtests/crashtests.list b/dom/media/webcodecs/crashtests/crashtests.list
index 16fbd90ff5..9d9a453a42 100644
--- a/dom/media/webcodecs/crashtests/crashtests.list
+++ b/dom/media/webcodecs/crashtests/crashtests.list
@@ -3,4 +3,4 @@ skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1848460.html
skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1849271.html
skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1864475.html
skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1881079.html
-
+skip-if(Android) pref(dom.media.webcodecs.enabled,true) load 1889831.html
diff --git a/dom/media/webrtc/MediaEnginePrefs.h b/dom/media/webrtc/MediaEnginePrefs.h
index de5daf0ad9..e3eff7eba5 100644
--- a/dom/media/webrtc/MediaEnginePrefs.h
+++ b/dom/media/webrtc/MediaEnginePrefs.h
@@ -28,6 +28,7 @@ class MediaEnginePrefs {
mHeight(0),
mFPS(0),
mFreq(0),
+ mUsePlatformProcessing(false),
mAecOn(false),
mUseAecMobile(false),
mAgcOn(false),
@@ -44,6 +45,7 @@ class MediaEnginePrefs {
int32_t mHeight;
int32_t mFPS;
int32_t mFreq; // for test tones (fake:true)
+ bool mUsePlatformProcessing;
bool mAecOn;
bool mUseAecMobile;
bool mAgcOn;
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
index 220dcf3bd8..c072717e00 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -148,7 +148,7 @@ nsresult MediaEngineWebRTCMicrophoneSource::Reconfigure(
}
AudioProcessing::Config AudioInputProcessing::ConfigForPrefs(
- const MediaEnginePrefs& aPrefs) {
+ const MediaEnginePrefs& aPrefs) const {
AudioProcessing::Config config;
config.pipeline.multi_channel_render = true;
@@ -207,6 +207,19 @@ AudioProcessing::Config AudioInputProcessing::ConfigForPrefs(
config.high_pass_filter.enabled = aPrefs.mHPFOn;
+ if (mPlatformProcessingSetParams &
+ CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION) {
+ config.echo_canceller.enabled = false;
+ }
+ if (mPlatformProcessingSetParams &
+ CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL) {
+ config.gain_controller1.enabled = config.gain_controller2.enabled = false;
+ }
+ if (mPlatformProcessingSetParams &
+ CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION) {
+ config.noise_suppression.enabled = false;
+ }
+
return config;
}
@@ -412,11 +425,45 @@ void AudioInputProcessing::Disconnect(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
}
+void AudioInputProcessing::NotifySetRequestedInputProcessingParamsResult(
+ MediaTrackGraph* aGraph, cubeb_input_processing_params aRequestedParams,
+ const Result<cubeb_input_processing_params, int>& aResult) {
+ aGraph->AssertOnGraphThread();
+ if (aRequestedParams != RequestedInputProcessingParams(aGraph)) {
+ // This is a result from an old request, wait for a more recent one.
+ return;
+ }
+ if (aResult.isOk()) {
+ if (mPlatformProcessingSetParams == aResult.inspect()) {
+ // No change.
+ return;
+ }
+ mPlatformProcessingSetError = Nothing();
+ mPlatformProcessingSetParams = aResult.inspect();
+ LOG("AudioInputProcessing %p platform processing params are now %s.", this,
+ CubebUtils::ProcessingParamsToString(mPlatformProcessingSetParams)
+ .get());
+ } else {
+ mPlatformProcessingSetError = Some(aResult.inspectErr());
+ mPlatformProcessingSetParams = CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ LOG("AudioInputProcessing %p platform processing params failed to apply. "
+ "Applying input processing config in libwebrtc.",
+ this);
+ }
+ ApplySettingsInternal(aGraph, mSettings);
+}
+
bool AudioInputProcessing::IsPassThrough(MediaTrackGraph* aGraph) const {
aGraph->AssertOnGraphThread();
// The high-pass filter is not taken into account when activating the
// pass through, since it's not controllable from content.
- return !(mSettings.mAecOn || mSettings.mAgcOn || mSettings.mNoiseOn);
+ auto config = AppliedConfig(aGraph);
+ auto aec = [](const auto& config) { return config.echo_canceller.enabled; };
+ auto agc = [](const auto& config) {
+ return config.gain_controller1.enabled || config.gain_controller2.enabled;
+ };
+ auto ns = [](const auto& config) { return config.noise_suppression.enabled; };
+ return !(aec(config) || agc(config) || ns(config));
}
void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
@@ -438,7 +485,7 @@ void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
}
}
-uint32_t AudioInputProcessing::GetRequestedInputChannelCount() {
+uint32_t AudioInputProcessing::GetRequestedInputChannelCount() const {
return mSettings.mChannels;
}
@@ -668,6 +715,10 @@ void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack,
return;
}
+ if (aChunk.mDuration == 0) {
+ return;
+ }
+
TrackRate sampleRate = aTrack->mSampleRate;
uint32_t framesPerPacket = GetPacketSize(sampleRate); // in frames
// Downmix from aChannels to MAX_CHANNELS if needed.
@@ -868,6 +919,7 @@ void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
!(mPacketCount % 50)) {
AudioProcessingStats stats = mAudioProcessing->GetStatistics();
char msg[1024];
+ msg[0] = '\0';
size_t offset = 0;
#define AddIfValue(format, member) \
if (stats.member.has_value()) { \
@@ -962,14 +1014,61 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraph* aGraph) {
aGraph, aGraph->CurrentDriver(), this);
}
+cubeb_input_processing_params
+AudioInputProcessing::RequestedInputProcessingParams(
+ MediaTrackGraph* aGraph) const {
+ aGraph->AssertOnGraphThread();
+ if (!mPlatformProcessingEnabled) {
+ return CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ }
+ if (mPlatformProcessingSetError) {
+ return CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ }
+ cubeb_input_processing_params params = CUBEB_INPUT_PROCESSING_PARAM_NONE;
+ if (mSettings.mAecOn) {
+ params |= CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION;
+ }
+ if (mSettings.mAgcOn) {
+ params |= CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL;
+ }
+ if (mSettings.mNoiseOn) {
+ params |= CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION;
+ }
+ return params;
+}
+
void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph,
CubebUtils::AudioDeviceID aDeviceID,
const MediaEnginePrefs& aSettings) {
TRACE("AudioInputProcessing::ApplySettings");
aGraph->AssertOnGraphThread();
+ // CUBEB_ERROR_NOT_SUPPORTED means the backend does not support platform
+ // processing. In that case, leave the error in place so we don't request
+ // processing anew.
+ if (mPlatformProcessingSetError.valueOr(CUBEB_OK) !=
+ CUBEB_ERROR_NOT_SUPPORTED) {
+ mPlatformProcessingSetError = Nothing();
+ }
+
// Read previous state from mSettings.
uint32_t oldChannelCount = GetRequestedInputChannelCount();
+
+ ApplySettingsInternal(aGraph, aSettings);
+
+ if (oldChannelCount != GetRequestedInputChannelCount()) {
+ RequestedInputChannelCountChanged(aGraph, aDeviceID);
+ }
+}
+
+void AudioInputProcessing::ApplySettingsInternal(
+ MediaTrackGraph* aGraph, const MediaEnginePrefs& aSettings) {
+ TRACE("AudioInputProcessing::ApplySettingsInternal");
+ aGraph->AssertOnGraphThread();
+
+ mPlatformProcessingEnabled = aSettings.mUsePlatformProcessing;
+
+ // Read previous state from the applied config.
bool wasPassThrough = IsPassThrough(aGraph);
mSettings = aSettings;
@@ -977,14 +1076,20 @@ void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph,
mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
}
- if (oldChannelCount != GetRequestedInputChannelCount()) {
- RequestedInputChannelCountChanged(aGraph, aDeviceID);
- }
if (wasPassThrough != IsPassThrough(aGraph)) {
PassThroughChanged(aGraph);
}
}
+webrtc::AudioProcessing::Config AudioInputProcessing::AppliedConfig(
+ MediaTrackGraph* aGraph) const {
+ aGraph->AssertOnGraphThread();
+ if (mAudioProcessing) {
+ return mAudioProcessing->GetConfig();
+ }
+ return ConfigForPrefs(mSettings);
+}
+
void AudioInputProcessing::End() {
mEnded = true;
mSegment.Clear();
@@ -1078,7 +1183,6 @@ void AudioInputProcessing::EnsureAudioProcessing(AudioProcessingTrack* aTrack) {
void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
MOZ_ASSERT(IsPassThrough(aGraph) || !mEnabled);
- MOZ_ASSERT(mPacketizerInput);
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Resetting audio "
@@ -1091,9 +1195,10 @@ void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
mAudioProcessing->Initialize();
}
- MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
- mPacketizerInput->FramesAvailable() ==
- mPacketizerInput->mPacketSize);
+ MOZ_ASSERT_IF(mPacketizerInput,
+ static_cast<uint32_t>(mSegment.GetDuration()) +
+ mPacketizerInput->FramesAvailable() ==
+ mPacketizerInput->mPacketSize);
// It's ok to clear all the internal buffer here since we won't use mSegment
// in pass-through mode or when audio processing is disabled.
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.h b/dom/media/webrtc/MediaEngineWebRTCAudio.h
index 6b1fbf0089..705d33fc38 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -117,7 +117,8 @@ class AudioInputProcessing : public AudioDataListener {
// If we're passing data directly without AEC or any other process, this
// means that all voice-processing has been disabled intentionaly. In this
// case, consider that the device is not used for voice input.
- return !IsPassThrough(aGraph);
+ return !IsPassThrough(aGraph) ||
+ mPlatformProcessingSetParams != CUBEB_INPUT_PROCESSING_PARAM_NONE;
}
void Start(MediaTrackGraph* aGraph);
@@ -125,18 +126,27 @@ class AudioInputProcessing : public AudioDataListener {
void DeviceChanged(MediaTrackGraph* aGraph) override;
- uint32_t RequestedInputChannelCount(MediaTrackGraph*) override {
+ uint32_t RequestedInputChannelCount(MediaTrackGraph*) const override {
return GetRequestedInputChannelCount();
}
+ cubeb_input_processing_params RequestedInputProcessingParams(
+ MediaTrackGraph* aGraph) const override;
+
void Disconnect(MediaTrackGraph* aGraph) override;
+ void NotifySetRequestedInputProcessingParamsResult(
+ MediaTrackGraph* aGraph, cubeb_input_processing_params aRequestedParams,
+ const Result<cubeb_input_processing_params, int>& aResult) override;
+
void PacketizeAndProcess(AudioProcessingTrack* aTrack,
const AudioSegment& aSegment);
- uint32_t GetRequestedInputChannelCount();
+ uint32_t GetRequestedInputChannelCount() const;
+
// This is true when all processing is disabled, in which case we can skip
- // packetization, resampling and other processing passes.
+ // packetization, resampling and other processing passes. Processing may still
+ // be applied by the platform on the underlying input track.
bool IsPassThrough(MediaTrackGraph* aGraph) const;
// This allow changing the APM options, enabling or disabling processing
@@ -146,6 +156,9 @@ class AudioInputProcessing : public AudioDataListener {
CubebUtils::AudioDeviceID aDeviceID,
const MediaEnginePrefs& aSettings);
+ // The config currently applied to the audio processing module.
+ webrtc::AudioProcessing::Config AppliedConfig(MediaTrackGraph* aGraph) const;
+
void End();
TrackTime NumBufferedFrames(MediaTrackGraph* aGraph) const;
@@ -163,13 +176,15 @@ class AudioInputProcessing : public AudioDataListener {
private:
~AudioInputProcessing() = default;
webrtc::AudioProcessing::Config ConfigForPrefs(
- const MediaEnginePrefs& aPrefs);
+ const MediaEnginePrefs& aPrefs) const;
void PassThroughChanged(MediaTrackGraph* aGraph);
void RequestedInputChannelCountChanged(MediaTrackGraph* aGraph,
CubebUtils::AudioDeviceID aDeviceId);
void EnsurePacketizer(AudioProcessingTrack* aTrack);
void EnsureAudioProcessing(AudioProcessingTrack* aTrack);
void ResetAudioProcessing(MediaTrackGraph* aGraph);
+ void ApplySettingsInternal(MediaTrackGraph* aGraph,
+ const MediaEnginePrefs& aSettings);
PrincipalHandle GetCheckedPrincipal(const AudioSegment& aSegment);
// This implements the processing algoritm to apply to the input (e.g. a
// microphone). If all algorithms are disabled, this class in not used. This
@@ -186,6 +201,17 @@ class AudioInputProcessing : public AudioDataListener {
// The current settings from about:config preferences and content-provided
// constraints.
MediaEnginePrefs mSettings;
+ // When false, RequestedInputProcessingParams() returns no params, resulting
+ // in platform processing getting disabled in the platform.
+ bool mPlatformProcessingEnabled = false;
+ // The latest error notified to us through
+ // NotifySetRequestedInputProcessingParamsResult, or Nothing if the latest
+ // request was successful, or if a request is pending a result.
+ Maybe<int> mPlatformProcessingSetError;
+ // The processing params currently applied in the platform. This allows
+ // adapting the AudioProcessingConfig accordingly.
+ cubeb_input_processing_params mPlatformProcessingSetParams =
+ CUBEB_INPUT_PROCESSING_PARAM_NONE;
// Buffer for up to one 10ms packet of planar mixed audio output for the
// reverse-stream (speaker data) of mAudioProcessing AEC.
// Length is packet size * channel count, regardless of how many frames are
diff --git a/dom/media/webrtc/jsapi/RTCTransformEventRunnable.cpp b/dom/media/webrtc/jsapi/RTCTransformEventRunnable.cpp
index 6f41baf80f..16e3c2fd28 100644
--- a/dom/media/webrtc/jsapi/RTCTransformEventRunnable.cpp
+++ b/dom/media/webrtc/jsapi/RTCTransformEventRunnable.cpp
@@ -57,8 +57,8 @@ already_AddRefed<Event> RTCTransformEventRunnable::BuildEvent(
// Set transformer.[[writable]] to writable.
RefPtr<RTCRtpScriptTransformer> transformer =
new RTCRtpScriptTransformer(aGlobal);
- nsresult nrv =
- transformer->Init(aCx, aTransformerOptions, mWorkerPrivate, mProxy);
+ nsresult nrv = transformer->Init(aCx, aTransformerOptions,
+ GetCurrentThreadWorkerPrivate(), mProxy);
if (NS_WARN_IF(NS_FAILED(nrv))) {
// TODO: Error handling. Currently unspecified.
return nullptr;
diff --git a/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
index 5862237711..e863934ebc 100644
--- a/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
+++ b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
@@ -750,8 +750,20 @@ void WebrtcVideoConduit::OnControlConfigChange() {
// TODO this is for webrtc-priority, but needs plumbing bits
mEncoderConfig.bitrate_priority = 1.0;
+ // Populate simulcast_layers with their config (not dimensions or
+ // dimensions-derived properties, as they're only known as a frame to
+ // be sent is known).
+ mEncoderConfig.simulcast_layers.clear();
+ for (size_t idx = 0; idx < streamCount; ++idx) {
+ webrtc::VideoStream video_stream;
+ auto& encoding = codecConfig->mEncodings[idx];
+ video_stream.active = encoding.active;
+ mEncoderConfig.simulcast_layers.push_back(video_stream);
+ }
+
// Expected max number of encodings
- mEncoderConfig.number_of_streams = streamCount;
+ mEncoderConfig.number_of_streams =
+ mEncoderConfig.simulcast_layers.size();
// libwebrtc disables this by default.
mSendStreamConfig.suspend_below_min_bitrate = false;
diff --git a/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp b/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp
index 0ead26a453..d3047f4fca 100644
--- a/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp
+++ b/dom/media/webrtc/libwebrtcglue/VideoStreamFactory.cpp
@@ -150,6 +150,7 @@ std::vector<webrtc::VideoStream> VideoStreamFactory::CreateEncoderStreams(
: aConfig.number_of_streams;
MOZ_RELEASE_ASSERT(streamCount >= 1, "Should request at least one stream");
+ MOZ_RELEASE_ASSERT(streamCount <= aConfig.simulcast_layers.size());
std::vector<webrtc::VideoStream> streams;
streams.reserve(streamCount);
@@ -160,10 +161,10 @@ std::vector<webrtc::VideoStream> VideoStreamFactory::CreateEncoderStreams(
}
for (size_t idx = 0; idx < streamCount; ++idx) {
- webrtc::VideoStream video_stream;
+ webrtc::VideoStream video_stream = aConfig.simulcast_layers[idx];
auto& encoding = mCodecConfig.mEncodings[idx];
- video_stream.active = encoding.active;
MOZ_ASSERT(encoding.constraints.scaleDownBy >= 1.0);
+ MOZ_ASSERT(video_stream.active == encoding.active);
gfx::IntSize newSize(0, 0);
diff --git a/dom/media/webrtc/metrics.yaml b/dom/media/webrtc/metrics.yaml
index aea5cf17fb..dfa8c120f1 100644
--- a/dom/media/webrtc/metrics.yaml
+++ b/dom/media/webrtc/metrics.yaml
@@ -404,3 +404,84 @@ codec.stats:
notification_emails:
- webrtc-telemetry-alerts@mozilla.com
expires: 132
+
+webrtcdtls:
+ protocol_version:
+ type: labeled_counter
+ description: >
+ The version of DTLS used for each webrtc connection. Can be 1.0, 1.2, or 1.3 (there is no 1.1 version of DTLS)
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - webrtc-telemetry-alerts@mozilla.com
+ expires: 135
+
+ cipher:
+ type: labeled_counter
+ description: >
+ The CipherSuite used for each webrtc DTLS connection, as a string
+ representation of the CipherSuite's ID in 4 hex digits (eg;
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA would be "0x0033")
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - webrtc-telemetry-alerts@mozilla.com
+ expires: 135
+
+ srtp_cipher:
+ type: labeled_counter
+ description: >
+ The SRTPProtectionProfile (see RFC 5764) used for each webrtc SRTP
+ connection, as a string representation of the SRTPProtectionProfile's ID
+ in 4 hex digits (eg; SRTP_AES128_CM_HMAC_SHA1_80 would be "0x0001")
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - webrtc-telemetry-alerts@mozilla.com
+ expires: 135
+
+ client_handshake_result:
+ type: labeled_counter
+ description: >
+ The result of each webrtc client DTLS handshake as a string containing
+ either the name of the error code (eg; SSL_ERROR_BAD_CERTIFICATE),
+ SUCCESS for successful handshakes, ALPN_FAILURE when ALPN negotiation
+ fails, or CERT_FAILURE when cert validation fails.
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - webrtc-telemetry-alerts@mozilla.com
+ expires: 135
+
+ server_handshake_result:
+ type: labeled_counter
+ description: >
+ The result of each webrtc server DTLS handshake, as the name of the error
+ code (eg; SSL_ERROR_BAD_CERTIFICATE), the empty string for successful
+ handshakes, ALPN_FAILURE when ALPN negotiation fails, or CERT_FAILURE when
+ cert validation fails.
+ bugs:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_reviews:
+ - https://bugzilla.mozilla.org/show_bug.cgi?id=1884140
+ data_sensitivity:
+ - technical
+ notification_emails:
+ - webrtc-telemetry-alerts@mozilla.com
+ expires: 135
diff --git a/dom/media/webrtc/sdp/rsdparsa_capi/src/lib.rs b/dom/media/webrtc/sdp/rsdparsa_capi/src/lib.rs
index 20a13900a2..1286a5d338 100644
--- a/dom/media/webrtc/sdp/rsdparsa_capi/src/lib.rs
+++ b/dom/media/webrtc/sdp/rsdparsa_capi/src/lib.rs
@@ -99,7 +99,7 @@ pub unsafe extern "C" fn sdp_free_session(sdp_ptr: *mut SdpSession) {
pub unsafe extern "C" fn sdp_new_reference(session: *mut SdpSession) -> *const SdpSession {
let original = Rc::from_raw(session);
let ret = Rc::into_raw(Rc::clone(&original));
- Rc::into_raw(original); // So the original reference doesn't get dropped
+ std::mem::forget(original); // So the original reference doesn't get dropped
ret
}
diff --git a/dom/media/webrtc/sdp/rsdparsa_capi/src/types.rs b/dom/media/webrtc/sdp/rsdparsa_capi/src/types.rs
index 2522c8333d..7b85a173fb 100644
--- a/dom/media/webrtc/sdp/rsdparsa_capi/src/types.rs
+++ b/dom/media/webrtc/sdp/rsdparsa_capi/src/types.rs
@@ -3,7 +3,6 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use libc::size_t;
-use std::boxed::Box;
use std::convert::TryInto;
use std::error::Error;
use std::ffi::CStr;
diff --git a/dom/media/webrtc/tests/mochitests/head.js b/dom/media/webrtc/tests/mochitests/head.js
index 1fd559217a..cc0992b469 100644
--- a/dom/media/webrtc/tests/mochitests/head.js
+++ b/dom/media/webrtc/tests/mochitests/head.js
@@ -413,7 +413,7 @@ function setupEnvironment() {
// If either fake audio or video is desired we enable fake streams.
// If loopback devices are set they will be chosen instead of fakes in gecko.
["media.navigator.streams.fake", WANT_FAKE_AUDIO || WANT_FAKE_VIDEO],
- ["media.getusermedia.audiocapture.enabled", true],
+ ["media.getusermedia.audio.capture.enabled", true],
["media.getusermedia.screensharing.enabled", true],
["media.getusermedia.window.focus_source.enabled", false],
["media.recorder.audio_node.enabled", true],
diff --git a/dom/media/webrtc/tests/mochitests/iceTestUtils.js b/dom/media/webrtc/tests/mochitests/iceTestUtils.js
index 9e76e3f7df..23237f563b 100644
--- a/dom/media/webrtc/tests/mochitests/iceTestUtils.js
+++ b/dom/media/webrtc/tests/mochitests/iceTestUtils.js
@@ -75,6 +75,18 @@ async function iceConnected(pc) {
});
}
+async function dtlsConnected(pc) {
+ return new Promise((resolve, reject) => {
+ pc.addEventListener("connectionstatechange", () => {
+ if (["connected", "completed"].includes(pc.connectionState)) {
+ resolve();
+ } else if (pc.connectionState == "failed") {
+ reject(new Error(`Connection failed`));
+ }
+ });
+ });
+}
+
// Set up trickle, but does not wait for it to complete. Can be used by itself
// in cases where we do not expect any new candidates, but want to still set up
// the signal handling in case new candidates _do_ show up.
@@ -87,7 +99,8 @@ async function connect(
answerer,
timeout,
context,
- noTrickleWait = false
+ noTrickleWait = false,
+ waitForDtls = false
) {
const trickle1 = trickleIce(offerer, answerer);
const trickle2 = trickleIce(answerer, offerer);
@@ -110,8 +123,12 @@ async function connect(
}
};
+ const connectionPromises = waitForDtls
+ ? [dtlsConnected(offerer), dtlsConnected(answerer)]
+ : [iceConnected(offerer), iceConnected(answerer)];
+
await Promise.race([
- Promise.all([iceConnected(offerer), iceConnected(answerer)]),
+ Promise.all(connectionPromises),
throwOnTimeout(timeout, context),
]);
} finally {
diff --git a/dom/media/webrtc/tests/mochitests/test_peerConnection_glean.html b/dom/media/webrtc/tests/mochitests/test_peerConnection_glean.html
index 1faf464566..a382949823 100644
--- a/dom/media/webrtc/tests/mochitests/test_peerConnection_glean.html
+++ b/dom/media/webrtc/tests/mochitests/test_peerConnection_glean.html
@@ -4,6 +4,7 @@
<head>
<script type="application/javascript" src="pc.js"></script>
<script type="application/javascript" src="sdpUtils.js"></script>
+ <script type="application/javascript" src="iceTestUtils.js"></script>
</head>
<body>
@@ -580,6 +581,187 @@
ok(preferredVideoCodec == 6, "checkLoggingMultipleTransceivers glean should show preferred video codec VP8 " + preferredVideoCodec);
},
+ async function checkDtlsHandshakeSuccess() {
+ const pc1 = new RTCPeerConnection();
+ const pc2 = new RTCPeerConnection();
+ await gleanResetTestValues();
+ let client_successes = await GleanTest.webrtcdtls.clientHandshakeResult.SUCCESS.testGetValue() || 0;
+ let server_successes = await GleanTest.webrtcdtls.serverHandshakeResult.SUCCESS.testGetValue() || 0;
+ let cipher_count = await GleanTest.webrtcdtls.cipher["0x1301"].testGetValue() || 0;
+ let srtp_cipher_count = await GleanTest.webrtcdtls.srtpCipher["0x0007"].testGetValue() || 0;
+ is(client_successes, 0);
+ is(server_successes, 0);
+ is(cipher_count, 0);
+ is(srtp_cipher_count, 0);
+
+ const stream = await navigator.mediaDevices.getUserMedia({ video: true });
+ pc1.addTrack(stream.getTracks()[0]);
+
+ await connect(pc1, pc2, 32000, "DTLS connected", true, true);
+
+ client_successes = await GleanTest.webrtcdtls.clientHandshakeResult.SUCCESS.testGetValue() || 0;
+ server_successes = await GleanTest.webrtcdtls.serverHandshakeResult.SUCCESS.testGetValue() || 0;
+ cipher_count = await GleanTest.webrtcdtls.cipher["0x1301"].testGetValue() || 0;
+ srtp_cipher_count = await GleanTest.webrtcdtls.srtpCipher["0x0007"].testGetValue() || 0;
+ is(client_successes, 1);
+ is(server_successes, 1);
+ is(cipher_count, 2);
+ is(srtp_cipher_count, 2);
+ },
+
+ async function checkDtlsCipherPrefs() {
+ await withPrefs([["security.tls13.aes_128_gcm_sha256", false],
+ ["security.tls13.aes_256_gcm_sha384", false],
+ ["security.tls13.chacha20_poly1305_sha256", true]],
+ async () => {
+ const pc1 = new RTCPeerConnection();
+ const pc2 = new RTCPeerConnection();
+ await gleanResetTestValues();
+ let cipher_count = await GleanTest.webrtcdtls.cipher["0x1303"].testGetValue() || 0;
+ is(cipher_count, 0);
+
+ const stream = await navigator.mediaDevices.getUserMedia({ video: true });
+ pc1.addTrack(stream.getTracks()[0]);
+
+ await connect(pc1, pc2, 32000, "DTLS connected", true, true);
+
+ cipher_count = await GleanTest.webrtcdtls.cipher["0x1303"].testGetValue() || 0;
+ is(cipher_count, 2);
+ });
+ },
+
+ async function checkDtlsHandshakeFailure() {
+ // We don't have many failures we can induce here, but messing up the
+ // fingerprint is one way.
+ const offerer = new RTCPeerConnection();
+ const answerer = new RTCPeerConnection();
+ await gleanResetTestValues();
+ let client_failures = await GleanTest.webrtcdtls.clientHandshakeResult.SSL_ERROR_BAD_CERTIFICATE.testGetValue() || 0;
+ let server_failures = await GleanTest.webrtcdtls.serverHandshakeResult.SSL_ERROR_BAD_CERT_ALERT.testGetValue() || 0;
+ is(client_failures, 0);
+ is(server_failures, 0);
+
+ const stream = await navigator.mediaDevices.getUserMedia({ video: true });
+ offerer.addTrack(stream.getTracks()[0]);
+
+ trickleIce(offerer, answerer);
+ trickleIce(answerer, offerer);
+ await offerer.setLocalDescription();
+ let badSdp = offerer.localDescription.sdp;
+ // Tweak the last digit in the fingerprint sent to the answerer. Answerer
+ // (which will be the DTLS client) will get an SSL_ERROR_BAD_CERTIFICATE
+ // error, and the offerer (which will be the DTLS server) will get an
+ // SSL_ERROR_BAD_CERT_ALERT.
+ const lastDigit = badSdp.match(/a=fingerprint:.*([0-9A-F])$/m)[1];
+ const newLastDigit = lastDigit == '0' ? '1' : '0';
+ badSdp = badSdp.replace(/(a=fingerprint:.*)[0-9A-F]$/m, "$1" + newLastDigit);
+ info(badSdp);
+ await answerer.setRemoteDescription({sdp: badSdp, type: "offer"});
+ await answerer.setLocalDescription();
+ await offerer.setRemoteDescription(answerer.localDescription);
+
+ const throwOnTimeout = async () => {
+ await wait(32000);
+ throw new Error(
+ `ICE did not complete within ${timeout} ms`);
+ };
+
+ const connectionPromises = [connectionStateReached(offerer, "failed"),
+ connectionStateReached(answerer, "failed")];
+
+ await Promise.race([
+ Promise.all(connectionPromises),
+ throwOnTimeout()
+ ]);
+
+ client_failures = await GleanTest.webrtcdtls.clientHandshakeResult.SSL_ERROR_BAD_CERTIFICATE.testGetValue() || 0;
+ server_failures = await GleanTest.webrtcdtls.serverHandshakeResult.SSL_ERROR_BAD_CERT_ALERT.testGetValue() || 0;
+ is(client_failures, 1);
+ is(server_failures, 1);
+ },
+
+ async function checkDtlsVersion1_3() {
+ // 1.3 should be the default
+ const pc1 = new RTCPeerConnection();
+ const pc2 = new RTCPeerConnection();
+ await gleanResetTestValues();
+ let count1_0 = await GleanTest.webrtcdtls.protocolVersion["1.0"].testGetValue() || 0;
+ let count1_2 = await GleanTest.webrtcdtls.protocolVersion["1.2"].testGetValue() || 0;
+ let count1_3 = await GleanTest.webrtcdtls.protocolVersion["1.3"].testGetValue() || 0;
+ is(count1_0, 0);
+ is(count1_2, 0);
+ is(count1_3, 0);
+
+ const stream = await navigator.mediaDevices.getUserMedia({ video: true });
+ pc1.addTrack(stream.getTracks()[0]);
+
+ await connect(pc1, pc2, 32000, "DTLS connected", true, true);
+
+ count1_0 = await GleanTest.webrtcdtls.protocolVersion["1.0"].testGetValue() || 0;
+ count1_2 = await GleanTest.webrtcdtls.protocolVersion["1.2"].testGetValue() || 0;
+ count1_3 = await GleanTest.webrtcdtls.protocolVersion["1.3"].testGetValue() || 0;
+ is(count1_0, 0);
+ is(count1_2, 0);
+ is(count1_3, 2);
+ },
+
+ async function checkDtlsVersion1_2() {
+ // Make 1.2 the default
+ await withPrefs([["media.peerconnection.dtls.version.max", 771]],
+ async () => {
+ const pc1 = new RTCPeerConnection();
+ const pc2 = new RTCPeerConnection();
+ await gleanResetTestValues();
+ let count1_0 = await GleanTest.webrtcdtls.protocolVersion["1.0"].testGetValue() || 0;
+ let count1_2 = await GleanTest.webrtcdtls.protocolVersion["1.2"].testGetValue() || 0;
+ let count1_3 = await GleanTest.webrtcdtls.protocolVersion["1.3"].testGetValue() || 0;
+ is(count1_0, 0);
+ is(count1_2, 0);
+ is(count1_3, 0);
+
+ const stream = await navigator.mediaDevices.getUserMedia({ video: true });
+ pc1.addTrack(stream.getTracks()[0]);
+
+ await connect(pc1, pc2, 32000, "DTLS connected", true, true);
+
+ count1_0 = await GleanTest.webrtcdtls.protocolVersion["1.0"].testGetValue() || 0;
+ count1_2 = await GleanTest.webrtcdtls.protocolVersion["1.2"].testGetValue() || 0;
+ count1_3 = await GleanTest.webrtcdtls.protocolVersion["1.3"].testGetValue() || 0;
+ is(count1_0, 0);
+ is(count1_2, 2);
+ is(count1_3, 0);
+ });
+ },
+
+ async function checkDtlsVersion1_0() {
+ // Make 1.0 the default
+ await withPrefs([["media.peerconnection.dtls.version.max", 770],
+ ["media.peerconnection.dtls.version.min", 770]],
+ async () => {
+ const pc1 = new RTCPeerConnection();
+ const pc2 = new RTCPeerConnection();
+ await gleanResetTestValues();
+ let count1_0 = await GleanTest.webrtcdtls.protocolVersion["1.0"].testGetValue() || 0;
+ let count1_2 = await GleanTest.webrtcdtls.protocolVersion["1.2"].testGetValue() || 0;
+ let count1_3 = await GleanTest.webrtcdtls.protocolVersion["1.3"].testGetValue() || 0;
+ is(count1_0, 0);
+ is(count1_2, 0);
+ is(count1_3, 0);
+
+ const stream = await navigator.mediaDevices.getUserMedia({ video: true });
+ pc1.addTrack(stream.getTracks()[0]);
+
+ await connect(pc1, pc2, 32000, "DTLS connected", true, true);
+
+ count1_0 = await GleanTest.webrtcdtls.protocolVersion["1.0"].testGetValue() || 0;
+ count1_2 = await GleanTest.webrtcdtls.protocolVersion["1.2"].testGetValue() || 0;
+ count1_3 = await GleanTest.webrtcdtls.protocolVersion["1.3"].testGetValue() || 0;
+ is(count1_0, 2);
+ is(count1_2, 0);
+ is(count1_3, 0);
+ });
+ },
+
];
runNetworkTest(async () => {
diff --git a/dom/media/webrtc/third_party_build/default_config_env b/dom/media/webrtc/third_party_build/default_config_env
index be3c5ba7c1..0fef4d3192 100644
--- a/dom/media/webrtc/third_party_build/default_config_env
+++ b/dom/media/webrtc/third_party_build/default_config_env
@@ -5,41 +5,41 @@
export MOZ_LIBWEBRTC_SRC=$STATE_DIR/moz-libwebrtc
# The previous fast-forward bug number is used for some error messaging.
-export MOZ_PRIOR_FASTFORWARD_BUG="1876843"
+export MOZ_PRIOR_FASTFORWARD_BUG="1883116"
# Fast-forwarding each Chromium version of libwebrtc should be done
# under a separate bugzilla bug. This bug number is used when crafting
# the commit summary as each upstream commit is vendored into the
# mercurial repository. The bug used for the v106 fast-forward was
# 1800920.
-export MOZ_FASTFORWARD_BUG="1883116"
+export MOZ_FASTFORWARD_BUG="1888181"
# MOZ_NEXT_LIBWEBRTC_MILESTONE and MOZ_NEXT_FIREFOX_REL_TARGET are
# not used during fast-forward processing, but facilitate generating this
# default config. To generate an default config for the next update, run
# bash dom/media/webrtc/third_party_build/update_default_config_env.sh
-export MOZ_NEXT_LIBWEBRTC_MILESTONE=122
-export MOZ_NEXT_FIREFOX_REL_TARGET=126
+export MOZ_NEXT_LIBWEBRTC_MILESTONE=123
+export MOZ_NEXT_FIREFOX_REL_TARGET=127
# For Chromium release branches, see:
# https://chromiumdash.appspot.com/branches
-# Chromium's v121 release branch was 6167. This is used to pre-stack
+# Chromium's v122 release branch was 6261. This is used to pre-stack
# the previous release branch's commits onto the appropriate base commit
# (the first common commit between trunk and the release branch).
-export MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM="6167"
+export MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM="6261"
-# New target release branch for v122 is branch-heads/6261. This is used
+# New target release branch for v123 is branch-heads/6312. This is used
# to calculate the next upstream commit.
-export MOZ_TARGET_UPSTREAM_BRANCH_HEAD="branch-heads/6261"
+export MOZ_TARGET_UPSTREAM_BRANCH_HEAD="branch-heads/6312"
# For local development 'mozpatches' is fine for a branch name, but when
# pushing the patch stack to github, it should be named something like
-# 'moz-mods-chr122-for-rel126'.
+# 'moz-mods-chr123-for-rel127'.
export MOZ_LIBWEBRTC_BRANCH="mozpatches"
# After elm has been merged to mozilla-central, the patch stack in
# moz-libwebrtc should be pushed to github. The script
# push_official_branch.sh uses this branch name when pushing to the
# public repo.
-export MOZ_LIBWEBRTC_OFFICIAL_BRANCH="moz-mods-chr122-for-rel126"
+export MOZ_LIBWEBRTC_OFFICIAL_BRANCH="moz-mods-chr123-for-rel127"
diff --git a/dom/media/webrtc/third_party_build/elm_rebase.sh b/dom/media/webrtc/third_party_build/elm_rebase.sh
index 0dbf93d3ce..734fcacc40 100644
--- a/dom/media/webrtc/third_party_build/elm_rebase.sh
+++ b/dom/media/webrtc/third_party_build/elm_rebase.sh
@@ -54,15 +54,32 @@ be as simple as running the following commands:
COMMIT_LIST_FILE=$TMP_DIR/rebase-commit-list.txt
export HGPLAIN=1
+if [ "x$MOZ_TOP_FF" = "x" ]; then
+ MOZ_TOP_FF=""
+fi
+if [ "x$MOZ_BOTTOM_FF" = "x" ]; then
+ MOZ_BOTTOM_FF=""
+fi
+if [ "x$STOP_FOR_REORDER" = "x" ]; then
+ STOP_FOR_REORDER=""
+fi
+
# After this point:
# * eE: All commands should succeed.
+# * u: All variables should be defined before use.
# * o pipefail: All stages of all pipes should succeed.
-set -eEo pipefail
+set -eEuo pipefail
if [ -f $STATE_DIR/rebase_resume_state ]; then
source $STATE_DIR/rebase_resume_state
else
+ # on first run, we want to verify sanity of the patch-stack so
+ # ending guidance is appropriate regarding changes in
+ # third_party/libwebrtc between the old central we're currently
+ # based on and the new central we're rebasing onto.
+ bash dom/media/webrtc/third_party_build/verify_vendoring.sh
+
if [ "x" == "x$MOZ_TOP_FF" ]; then
MOZ_TOP_FF=`hg log -r . -T"{node|short}"`
@@ -119,12 +136,6 @@ That command looks like:
fi
ERROR_HELP=""
- # After this point:
- # * eE: All commands should succeed.
- # * u: All variables should be defined before use.
- # * o pipefail: All stages of all pipes should succeed.
- set -eEuo pipefail
-
MOZ_NEW_CENTRAL=`hg log -r central -T"{node|short}"`
echo "bottom of fast-foward tree is $MOZ_BOTTOM_FF"
diff --git a/dom/media/webrtc/third_party_build/fetch_github_repo.py b/dom/media/webrtc/third_party_build/fetch_github_repo.py
index 8caa55d5c5..1031eb528b 100644
--- a/dom/media/webrtc/third_party_build/fetch_github_repo.py
+++ b/dom/media/webrtc/third_party_build/fetch_github_repo.py
@@ -67,12 +67,16 @@ def fetch_repo(github_path, clone_protocol, force_fetch, tar_path):
"git remote add upstream https://webrtc.googlesource.com/src", github_path
)
run_git("git fetch upstream", github_path)
- run_git("git merge upstream/master", github_path)
else:
print(
"Upstream remote (https://webrtc.googlesource.com/src) already configured"
)
+ # for sanity, ensure we're on master
+ run_git("git checkout master", github_path)
+ # make sure we successfully fetched upstream
+ run_git("git merge upstream/master", github_path)
+
# setup upstream branch-heads
stdout_lines = run_git(
"git config --local --get-all remote.upstream.fetch", github_path
@@ -87,9 +91,12 @@ def fetch_repo(github_path, clone_protocol, force_fetch, tar_path):
else:
print("Upstream remote branch-heads already configured")
+ # verify that a (quite old) branch-head exists
+ run_git("git show branch-heads/5059", github_path)
+
# prevent changing line endings when moving things out of the git repo
# (and into hg for instance)
- run_git("git config --local core.autocrlf false")
+ run_git("git config --local core.autocrlf false", github_path)
# do a sanity fetch in case this was not a freshly cloned copy of the
# repo, meaning it may not have all the mozilla branches present.
diff --git a/dom/media/webrtc/third_party_build/loop-ff.sh b/dom/media/webrtc/third_party_build/loop-ff.sh
index 73ad22822c..9836a8e687 100644
--- a/dom/media/webrtc/third_party_build/loop-ff.sh
+++ b/dom/media/webrtc/third_party_build/loop-ff.sh
@@ -99,9 +99,9 @@ To verify vendoring, run:
When verify_vendoring.sh is successful, please run the following command
in bash:
- (source $SCRIPT_DIR/use_config_env.sh ;
- ./mach python $SCRIPT_DIR/save_patch_stack.py \
- --repo-path $MOZ_LIBWEBRTC_SRC \
+ (source $SCRIPT_DIR/use_config_env.sh ; \\
+ ./mach python $SCRIPT_DIR/save_patch_stack.py \\
+ --repo-path $MOZ_LIBWEBRTC_SRC \\
--target-branch-head $MOZ_TARGET_UPSTREAM_BRANCH_HEAD )
You may resume running this script with the following command:
diff --git a/dom/media/webrtc/third_party_build/prep_repo.sh b/dom/media/webrtc/third_party_build/prep_repo.sh
index 8cd9ff6816..b1a04748b7 100644
--- a/dom/media/webrtc/third_party_build/prep_repo.sh
+++ b/dom/media/webrtc/third_party_build/prep_repo.sh
@@ -13,9 +13,30 @@ trap 'show_error_msg $LINENO' ERR
source dom/media/webrtc/third_party_build/use_config_env.sh
export HGPLAIN=1
+if [ "x$ALLOW_RERUN" = "x" ]; then
+ ALLOW_RERUN="0"
+fi
+
echo "MOZ_LIBWEBRTC_SRC: $MOZ_LIBWEBRTC_SRC"
echo "MOZ_LIBWEBRTC_BRANCH: $MOZ_LIBWEBRTC_BRANCH"
echo "MOZ_FASTFORWARD_BUG: $MOZ_FASTFORWARD_BUG"
+echo "ALLOW_RERUN: $ALLOW_RERUN"
+
+ERROR_HELP=$"
+A copy of moz-libwebrtc already exists at $MOZ_LIBWEBRTC_SRC
+While this script is not technically idempotent, it will try.
+However, the safest way forward is to start fresh by running:
+ rm -rf $STATE_DIR && \\
+ bash $0
+
+If you are sure you want to reuse the existing directory, run:
+ ALLOW_RERUN=1 bash $0
+"
+if [ -d $MOZ_LIBWEBRTC_SRC ] && [ "x$ALLOW_RERUN" != "x1" ]; then
+ echo "$ERROR_HELP"
+ exit 1
+fi
+ERROR_HELP=""
# After this point:
# * eE: All commands should succeed.
@@ -66,8 +87,25 @@ rm -f *.patch
# create a new work branch and "export" a new patch stack to rebase
# find the common commit between our upstream release branch and trunk
-CHERRY_PICK_BASE=`git merge-base branch-heads/$MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM master`
-echo "common commit: $CHERRY_PICK_BASE"
+PREVIOUS_RELEASE_BRANCH_BASE=`git merge-base branch-heads/$MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM master`
+echo "PREVIOUS_RELEASE_BRANCH_BASE: $PREVIOUS_RELEASE_BRANCH_BASE"
+
+NEXT_RELEASE_BRANCH_BASE=`git merge-base $MOZ_TARGET_UPSTREAM_BRANCH_HEAD master`
+echo "NEXT_RELEASE_BRANCH_BASE: $NEXT_RELEASE_BRANCH_BASE"
+
+ERROR_HELP=$"
+The previous release branch base ($PREVIOUS_RELEASE_BRANCH_BASE)
+and the next release branch base ($NEXT_RELEASE_BRANCH_BASE) are the
+same and should not be. This indicates a problem in the git repo at
+$MOZ_LIBWEBRTC_SRC.
+At the least it likely means that 'master' is older than the two
+release branches. Investigation is necessary.
+"
+if [ "x$PREVIOUS_RELEASE_BRANCH_BASE" == "x$NEXT_RELEASE_BRANCH_BASE" ]; then
+ echo "$ERROR_HELP"
+ exit 1
+fi
+ERROR_HELP=""
# find the last upstream commit used by the previous update, so we don't
# accidentally grab release branch commits that were added after we started
@@ -85,9 +123,9 @@ commands will allow the process to continue:
git checkout $MOZ_LIBWEBRTC_BRANCH && \\
git checkout -b $MOZ_LIBWEBRTC_BRANCH-old && \\
git branch -D $MOZ_LIBWEBRTC_BRANCH ) && \\
- bash $0
+ ALLOW_RERUN=1 bash $0
"
-git branch $MOZ_LIBWEBRTC_BRANCH $CHERRY_PICK_BASE
+git branch $MOZ_LIBWEBRTC_BRANCH $PREVIOUS_RELEASE_BRANCH_BASE
ERROR_HELP=""
git checkout $MOZ_LIBWEBRTC_BRANCH
@@ -95,7 +133,7 @@ git checkout $MOZ_LIBWEBRTC_BRANCH
rm -f $TMP_DIR/*.patch $TMP_DIR/*.patch.bak
# grab the patches for all the commits in chrome's release branch for libwebrtc
-git format-patch -o $TMP_DIR -k $CHERRY_PICK_BASE..$LAST_UPSTREAM_COMMIT_SHA
+git format-patch -o $TMP_DIR -k $PREVIOUS_RELEASE_BRANCH_BASE..$LAST_UPSTREAM_COMMIT_SHA
# tweak the release branch commit summaries to show they were cherry picked
sed -i.bak -e "/^Subject: / s/^Subject: /Subject: (cherry-pick-branch-heads\/$MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM) /" $TMP_DIR/*.patch
git am $TMP_DIR/*.patch # applies to branch mozpatches
diff --git a/dom/media/webrtc/third_party_build/verify_vendoring.sh b/dom/media/webrtc/third_party_build/verify_vendoring.sh
index 008ab0d5db..6152e6d003 100644
--- a/dom/media/webrtc/third_party_build/verify_vendoring.sh
+++ b/dom/media/webrtc/third_party_build/verify_vendoring.sh
@@ -13,6 +13,12 @@ trap 'show_error_msg $LINENO' ERR
source dom/media/webrtc/third_party_build/use_config_env.sh
export HGPLAIN=1
+# After this point:
+# * eE: All commands should succeed.
+# * u: All variables should be defined before use.
+# * o pipefail: All stages of all pipes should succeed.
+set -eEuo pipefail
+
echo "MOZ_LIBWEBRTC_SRC: $MOZ_LIBWEBRTC_SRC"
echo "MOZ_LIBWEBRTC_BRANCH: $MOZ_LIBWEBRTC_BRANCH"
echo "MOZ_FASTFORWARD_BUG: $MOZ_FASTFORWARD_BUG"
@@ -30,27 +36,18 @@ LAST_PATCHSTACK_UPDATE_COMMIT_SHA=`echo $LAST_PATCHSTACK_UPDATE_COMMIT \
echo "LAST_PATCHSTACK_UPDATE_COMMIT_SHA: $LAST_PATCHSTACK_UPDATE_COMMIT_SHA"
# grab the oldest, non "Vendor from libwebrtc" line
-OLDEST_CANDIDATE_COMMIT=`hg log --template "{node|short} {desc|firstline}\n" \
- -r $LAST_PATCHSTACK_UPDATE_COMMIT_SHA::. \
- | grep -v "Vendor libwebrtc from" | head -1`
-echo "OLDEST_CANDIDATE_COMMIT: $OLDEST_CANDIDATE_COMMIT"
-
-OLDEST_CANDIDATE_SHA=`echo $OLDEST_CANDIDATE_COMMIT \
- | awk '{ print $1; }'`
-echo "OLDEST_CANDIDATE_SHA: $OLDEST_CANDIDATE_SHA"
+CANDIDATE_COMMITS=`hg log --template "{node|short} {desc|firstline}\n" \
+ -r "children($LAST_PATCHSTACK_UPDATE_COMMIT_SHA)::. - desc('re:(Vendor libwebrtc)')" \
+ --include "third_party/libwebrtc/" | awk 'BEGIN { ORS=" " }; { print $1; }'`
+echo "CANDIDATE_COMMITS:"
+echo "$CANDIDATE_COMMITS"
EXTRACT_COMMIT_RANGE="{start-commit-sha}::."
-if [ "x$CURRENT_SHA" != "x$OLDEST_CANDIDATE_SHA" ]; then
- EXTRACT_COMMIT_RANGE="$OLDEST_CANDIDATE_SHA::."
+if [ "x$CANDIDATE_COMMITS" != "x" ]; then
+ EXTRACT_COMMIT_RANGE="$CANDIDATE_COMMITS"
echo "EXTRACT_COMMIT_RANGE: $EXTRACT_COMMIT_RANGE"
fi
-# After this point:
-# * eE: All commands should succeed.
-# * u: All variables should be defined before use.
-# * o pipefail: All stages of all pipes should succeed.
-set -eEuo pipefail
-
./mach python $SCRIPT_DIR/vendor-libwebrtc.py \
--from-local $MOZ_LIBWEBRTC_SRC \
--commit $MOZ_LIBWEBRTC_BRANCH \
diff --git a/dom/media/webrtc/transport/test/moz.build b/dom/media/webrtc/transport/test/moz.build
index a2b0bc2bc2..3213525abd 100644
--- a/dom/media/webrtc/transport/test/moz.build
+++ b/dom/media/webrtc/transport/test/moz.build
@@ -7,35 +7,39 @@
include("/ipc/chromium/chromium-config.mozbuild")
if CONFIG["OS_TARGET"] != "WINNT":
- if CONFIG["OS_TARGET"] != "Android":
- SOURCES += [
- "ice_unittest.cpp",
- ]
-
SOURCES += [
"buffered_stun_socket_unittest.cpp",
"multi_tcp_socket_unittest.cpp",
- "nrappkit_unittest.cpp",
"proxy_tunnel_socket_unittest.cpp",
- "rlogconnector_unittest.cpp",
"runnable_utils_unittest.cpp",
"simpletokenbucket_unittest.cpp",
- "sockettransportservice_unittest.cpp",
"stunserver.cpp",
"test_nr_socket_ice_unittest.cpp",
"test_nr_socket_unittest.cpp",
"TestSyncRunnable.cpp",
- "transport_unittests.cpp",
"turn_unittest.cpp",
- "webrtcproxychannel_unittest.cpp",
]
- if CONFIG["MOZ_SCTP"]:
+ # Bug 1894419 - Various failures under TSAN
+ if not CONFIG["MOZ_TSAN"]:
+ if CONFIG["OS_TARGET"] != "Android":
+ SOURCES += [
+ "ice_unittest.cpp",
+ ]
+
+ if CONFIG["MOZ_SCTP"]:
+ SOURCES += [
+ "sctp_unittest.cpp",
+ ]
+
SOURCES += [
- "sctp_unittest.cpp",
+ "nrappkit_unittest.cpp",
+ "rlogconnector_unittest.cpp",
+ "sockettransportservice_unittest.cpp",
+ "transport_unittests.cpp",
+ "webrtcproxychannel_unittest.cpp",
]
-
for var in ("HAVE_STRDUP", "NR_SOCKET_IS_VOID_PTR", "SCTP_DEBUG"):
DEFINES[var] = True
diff --git a/dom/media/webrtc/transport/test_nr_socket.cpp b/dom/media/webrtc/transport/test_nr_socket.cpp
index 1a6f226c42..1bf95adc88 100644
--- a/dom/media/webrtc/transport/test_nr_socket.cpp
+++ b/dom/media/webrtc/transport/test_nr_socket.cpp
@@ -141,8 +141,7 @@ TestNat::NatBehavior TestNat::ToNatBehavior(const std::string& type) {
return TestNat::PORT_DEPENDENT;
}
- MOZ_ASSERT(false, "Invalid NAT behavior");
- return TestNat::ENDPOINT_INDEPENDENT;
+ MOZ_CRASH("Invalid NAT behavior");
}
bool TestNat::has_port_mappings() const {
@@ -202,8 +201,8 @@ TestNrSocket::~TestNrSocket() { nat_->erase_socket(this); }
RefPtr<NrSocketBase> TestNrSocket::create_external_socket(
const nr_transport_addr& dest_addr) const {
- MOZ_ASSERT(nat_->enabled_);
- MOZ_ASSERT(!nat_->is_an_internal_tuple(dest_addr));
+ MOZ_RELEASE_ASSERT(nat_->enabled_);
+ MOZ_RELEASE_ASSERT(!nat_->is_an_internal_tuple(dest_addr));
int r;
nr_transport_addr nat_external_addr;
@@ -261,7 +260,7 @@ void TestNrSocket::close() {
}
int TestNrSocket::listen(int backlog) {
- MOZ_ASSERT(internal_socket_->my_addr().protocol == IPPROTO_TCP);
+ MOZ_RELEASE_ASSERT(internal_socket_->my_addr().protocol == IPPROTO_TCP);
r_log(LOG_GENERIC, LOG_DEBUG, "TestNrSocket %p %s listening", this,
internal_socket_->my_addr().as_string);
@@ -269,7 +268,7 @@ int TestNrSocket::listen(int backlog) {
}
int TestNrSocket::accept(nr_transport_addr* addrp, nr_socket** sockp) {
- MOZ_ASSERT(internal_socket_->my_addr().protocol == IPPROTO_TCP);
+ MOZ_RELEASE_ASSERT(internal_socket_->my_addr().protocol == IPPROTO_TCP);
int r = internal_socket_->accept(addrp, sockp);
if (r) {
return r;
@@ -296,7 +295,7 @@ void TestNrSocket::process_delayed_cb(NR_SOCKET s, int how, void* cb_arg) {
int TestNrSocket::sendto(const void* msg, size_t len, int flags,
const nr_transport_addr* to) {
- MOZ_ASSERT(internal_socket_->my_addr().protocol != IPPROTO_TCP);
+ MOZ_RELEASE_ASSERT(internal_socket_->my_addr().protocol != IPPROTO_TCP);
r_log(LOG_GENERIC, LOG_DEBUG, "TestNrSocket %p %s %s", this, __FUNCTION__,
to->as_string);
@@ -347,10 +346,7 @@ int TestNrSocket::sendto(const void* msg, size_t len, int flags,
external_socket = similar_port_mapping->external_socket_;
} else {
external_socket = create_external_socket(*to);
- if (!external_socket) {
- MOZ_ASSERT(false);
- return R_INTERNAL;
- }
+ MOZ_RELEASE_ASSERT(external_socket);
}
port_mapping = create_port_mapping(*to, external_socket);
@@ -371,7 +367,7 @@ int TestNrSocket::sendto(const void* msg, size_t len, int flags,
int TestNrSocket::recvfrom(void* buf, size_t maxlen, size_t* len, int flags,
nr_transport_addr* from) {
- MOZ_ASSERT(internal_socket_->my_addr().protocol != IPPROTO_TCP);
+ MOZ_RELEASE_ASSERT(internal_socket_->my_addr().protocol != IPPROTO_TCP);
if (!read_buffer_.empty()) {
UdpPacket& packet = read_buffer_.front();
@@ -441,8 +437,8 @@ bool TestNrSocket::allow_ingress(const nr_transport_addr& to,
const nr_transport_addr& from,
PortMapping** port_mapping_used) const {
// This is only called for traffic arriving at a port mapping
- MOZ_ASSERT(nat_->enabled_);
- MOZ_ASSERT(!nat_->is_an_internal_tuple(from));
+ MOZ_RELEASE_ASSERT(nat_->enabled_);
+ MOZ_RELEASE_ASSERT(!nat_->is_an_internal_tuple(from));
// Find the port mapping (if any) that this packet landed on
*port_mapping_used = nullptr;
@@ -603,7 +599,7 @@ int TestNrSocket::write(const void* msg, size_t len, size_t* written) {
return R_INTERNAL;
}
// This is TCP only
- MOZ_ASSERT(port_mappings_.size() == 1);
+ MOZ_RELEASE_ASSERT(port_mappings_.size() == 1);
r_log(LOG_GENERIC, LOG_DEBUG, "PortMapping %s -> %s writing",
port_mappings_.front()->external_socket_->my_addr().as_string,
port_mappings_.front()->remote_address_.as_string);
@@ -641,7 +637,7 @@ int TestNrSocket::read(void* buf, size_t maxlen, size_t* len) {
if (port_mappings_.empty()) {
r = internal_socket_->read(buf, maxlen, len);
} else {
- MOZ_ASSERT(port_mappings_.size() == 1);
+ MOZ_RELEASE_ASSERT(port_mappings_.size() == 1);
r = port_mappings_.front()->external_socket_->read(buf, maxlen, len);
if (!r && nat_->refresh_on_ingress_) {
port_mappings_.front()->last_used_ = PR_IntervalNow();
@@ -722,7 +718,7 @@ int TestNrSocket::async_wait(int how, NR_async_cb cb, void* cb_arg,
if (internal_socket_->my_addr().protocol == IPPROTO_TCP) {
// For a TCP connection through a simulated NAT, these signals are
// just passed through.
- MOZ_ASSERT(port_mappings_.size() == 1);
+ MOZ_RELEASE_ASSERT(port_mappings_.size() == 1);
return port_mappings_.front()->async_wait(
how, port_mapping_tcp_passthrough_callback, this, function, line);
@@ -834,7 +830,7 @@ void TestNrSocket::on_socket_readable(NrSocketBase* real_socket) {
}
void TestNrSocket::fire_readable_callback() {
- MOZ_ASSERT(poll_flags() & PR_POLL_READ);
+ MOZ_RELEASE_ASSERT(poll_flags() & PR_POLL_READ);
r_log(LOG_GENERIC, LOG_DEBUG, "TestNrSocket %p %s ready for read", this,
internal_socket_->my_addr().as_string);
fire_callback(NR_ASYNC_WAIT_READ);
@@ -849,7 +845,7 @@ void TestNrSocket::port_mapping_writeable_callback(void* ext_sock_v, int how,
}
void TestNrSocket::write_to_port_mapping(NrSocketBase* external_socket) {
- MOZ_ASSERT(internal_socket_->my_addr().protocol != IPPROTO_TCP);
+ MOZ_RELEASE_ASSERT(internal_socket_->my_addr().protocol != IPPROTO_TCP);
int r = 0;
for (PortMapping* port_mapping : port_mappings_) {
@@ -935,7 +931,7 @@ TestNrSocket::PortMapping::PortMapping(
}
int TestNrSocket::PortMapping::send_from_queue() {
- MOZ_ASSERT(remote_address_.protocol != IPPROTO_TCP);
+ MOZ_RELEASE_ASSERT(remote_address_.protocol != IPPROTO_TCP);
int r = 0;
while (!send_queue_.empty()) {
@@ -967,7 +963,7 @@ int TestNrSocket::PortMapping::send_from_queue() {
int TestNrSocket::PortMapping::sendto(const void* msg, size_t len,
const nr_transport_addr& to) {
- MOZ_ASSERT(remote_address_.protocol != IPPROTO_TCP);
+ MOZ_RELEASE_ASSERT(remote_address_.protocol != IPPROTO_TCP);
r_log(LOG_GENERIC, LOG_DEBUG, "PortMapping %s -> %s sending to %s",
external_socket_->my_addr().as_string, remote_address_.as_string,
to.as_string);
diff --git a/dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs.c b/dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs.c
index 362b7d828e..51f72f4179 100644
--- a/dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs.c
+++ b/dom/media/webrtc/transport/third_party/nICEr/src/stun/addrs.c
@@ -62,82 +62,125 @@ nr_stun_is_duplicate_addr(nr_local_addr addrs[], int count, nr_local_addr *addr)
return 0;
}
+static int
+nr_stun_filter_find_first_addr_with_ifname(nr_local_addr addrs[], int count, const char *ifname) {
+ for (int i = 0; i < count; ++i) {
+ if (!strncmp(addrs[i].addr.ifname, ifname, sizeof(addrs[i].addr.ifname))) {
+ return i;
+ }
+ }
+ return count;
+}
+
+static int
+nr_stun_filter_addrs_for_ifname(nr_local_addr src[], const int src_begin, const int src_end, nr_local_addr dest[], int *dest_index, int remove_loopback, int remove_link_local) {
+ int r, _status;
+ /* We prefer temp ipv6 for their privacy properties. If we cannot get
+ * that, we prefer ipv6 that are not based on mac address. */
+ int filter_mac_ipv6 = 0;
+ int filter_teredo_ipv6 = 0;
+ int filter_non_temp_ipv6 = 0;
+
+ const char* ifname = src[src_begin].addr.ifname;
+
+ /* Figure out what we want to filter */
+ for (int i = src_begin; i < src_end; ++i) {
+ if (strncmp(ifname, src[i].addr.ifname, sizeof(src[i].addr.ifname))) {
+ /* Ignore addrs from other interfaces */
+ continue;
+ }
+
+ if (src[i].addr.ip_version == NR_IPV6) {
+ if (nr_transport_addr_is_teredo(&src[i].addr)) {
+ src[i].interface.type |= NR_INTERFACE_TYPE_TEREDO;
+ /* Prefer teredo over mac-based address. Probably will never see
+ * both. */
+ filter_mac_ipv6 = 1;
+ } else {
+ filter_teredo_ipv6 = 1;
+ }
+
+ if (!nr_transport_addr_is_mac_based(&src[i].addr)) {
+ filter_mac_ipv6 = 1;
+ }
+
+ if (src[i].flags & NR_ADDR_FLAG_TEMPORARY) {
+ filter_non_temp_ipv6 = 1;
+ }
+ }
+ }
+
+ /* Perform the filtering */
+ for (int i = src_begin; i < src_end; ++i) {
+ if (strncmp(ifname, src[i].addr.ifname, sizeof(src[i].addr.ifname))) {
+ /* Ignore addrs from other interfaces */
+ continue;
+ }
+
+ if (nr_stun_is_duplicate_addr(dest, *dest_index, &src[i])) {
+ /* skip src[i], it's a duplicate */
+ }
+ else if (remove_loopback && nr_transport_addr_is_loopback(&src[i].addr)) {
+ /* skip src[i], it's a loopback */
+ }
+ else if (remove_link_local &&
+ nr_transport_addr_is_link_local(&src[i].addr)) {
+ /* skip src[i], it's a link-local address */
+ }
+ else if (filter_mac_ipv6 &&
+ nr_transport_addr_is_mac_based(&src[i].addr)) {
+ /* skip src[i], it's MAC based */
+ }
+ else if (filter_teredo_ipv6 &&
+ nr_transport_addr_is_teredo(&src[i].addr)) {
+ /* skip src[i], it's a Teredo address */
+ }
+ else if (filter_non_temp_ipv6 &&
+ (src[i].addr.ip_version == NR_IPV6) &&
+ !(src[i].flags & NR_ADDR_FLAG_TEMPORARY)) {
+ /* skip src[i], it's a non-temporary ipv6, and we have a temporary */
+ }
+ else {
+ /* otherwise, copy it to the destination array */
+ if ((r=nr_local_addr_copy(&dest[*dest_index], &src[i])))
+ ABORT(r);
+ ++(*dest_index);
+ }
+ }
+
+ _status = 0;
+abort:
+ return _status;
+}
+
int
nr_stun_filter_addrs(nr_local_addr addrs[], int remove_loopback, int remove_link_local, int *count)
{
int r, _status;
nr_local_addr *tmp = 0;
- int i;
- int n;
- /* We prefer temp ipv6 for their privacy properties. If we cannot get
- * that, we prefer ipv6 that are not based on mac address. */
- int filter_mac_ipv6 = 0;
- int filter_teredo_ipv6 = 0;
- int filter_non_temp_ipv6 = 0;
+ int dest_index = 0;
tmp = RMALLOC(*count * sizeof(*tmp));
if (!tmp)
ABORT(R_NO_MEMORY);
- for (i = 0; i < *count; ++i) {
- if (addrs[i].addr.ip_version == NR_IPV6) {
- if (nr_transport_addr_is_teredo(&addrs[i].addr)) {
- addrs[i].interface.type |= NR_INTERFACE_TYPE_TEREDO;
- /* Prefer teredo over mac-based address. Probably will never see
- * both. */
- filter_mac_ipv6 = 1;
- } else {
- filter_teredo_ipv6 = 1;
- }
-
- if (!nr_transport_addr_is_mac_based(&addrs[i].addr)) {
- filter_mac_ipv6 = 1;
- }
-
- if (addrs[i].flags & NR_ADDR_FLAG_TEMPORARY) {
- filter_non_temp_ipv6 = 1;
+ for (int i = 0; i < *count; ++i) {
+ if (i == nr_stun_filter_find_first_addr_with_ifname(addrs, *count, addrs[i].addr.ifname)) {
+ /* This is the first address associated with this interface.
+ * Filter for this interface once, now. */
+ if (r = nr_stun_filter_addrs_for_ifname(addrs, i, *count, tmp, &dest_index, remove_loopback, remove_link_local)) {
+ ABORT(r);
}
}
}
- n = 0;
- for (i = 0; i < *count; ++i) {
- if (nr_stun_is_duplicate_addr(tmp, n, &addrs[i])) {
- /* skip addrs[i], it's a duplicate */
- }
- else if (remove_loopback && nr_transport_addr_is_loopback(&addrs[i].addr)) {
- /* skip addrs[i], it's a loopback */
- }
- else if (remove_link_local &&
- nr_transport_addr_is_link_local(&addrs[i].addr)) {
- /* skip addrs[i], it's a link-local address */
- }
- else if (filter_mac_ipv6 &&
- nr_transport_addr_is_mac_based(&addrs[i].addr)) {
- /* skip addrs[i], it's MAC based */
- }
- else if (filter_teredo_ipv6 &&
- nr_transport_addr_is_teredo(&addrs[i].addr)) {
- /* skip addrs[i], it's a Teredo address */
- }
- else if (filter_non_temp_ipv6 &&
- (addrs[i].addr.ip_version == NR_IPV6) &&
- !(addrs[i].flags & NR_ADDR_FLAG_TEMPORARY)) {
- /* skip addrs[i], it's a non-temporary ipv6, and we have a temporary */
- }
- else {
- /* otherwise, copy it to the temporary array */
- if ((r=nr_local_addr_copy(&tmp[n], &addrs[i])))
- ABORT(r);
- ++n;
- }
- }
+ /* Clear the entire array out before copying back */
+ memset(addrs, 0, *count * sizeof(*addrs));
- *count = n;
+ *count = dest_index;
- memset(addrs, 0, *count * sizeof(*addrs));
/* copy temporary array into passed in/out array */
- for (i = 0; i < *count; ++i) {
+ for (int i = 0; i < *count; ++i) {
if ((r=nr_local_addr_copy(&addrs[i], &tmp[i])))
ABORT(r);
}
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/log/r_log.c b/dom/media/webrtc/transport/third_party/nrappkit/src/log/r_log.c
index 09bb24749f..bb47cda879 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/log/r_log.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/log/r_log.c
@@ -375,22 +375,10 @@ int r_vlog(int facility,int level,const char *format,va_list ap)
int stderr_vlog(int facility,int level,const char *format,va_list ap)
{
-#if 0 /* remove time stamping, for now */
- char cbuf[30];
- time_t tt;
-
- tt=time(0);
-
- ctime_r(&tt,cbuf);
- cbuf[strlen(cbuf)-1]=0;
-
- fprintf(stderr,"%s: ",cbuf);
-#endif
-
vfprintf(stderr,format,ap);
fprintf(stderr,"\n");
return(0);
- }
+ }
int syslog_vlog(int facility,int level,const char *format,va_list ap)
{
@@ -525,7 +513,7 @@ int r_logging(int facility, int level)
static int r_log_get_default_level(void)
{
- char *log;
+ char *log = 0;
int _status;
log=getenv("R_LOG_LEVEL");
@@ -546,7 +534,7 @@ static int r_log_get_default_level(void)
static int r_log_get_destinations(int usereg)
{
- char *log;
+ char *log = 0;
int i;
int r,_status;
@@ -627,7 +615,7 @@ int r_log_init()
int _r_log_init(int use_reg)
{
#ifndef WIN32
- char *log;
+ char *log = 0;
#endif
if(r_log_initted==0) {
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry.c b/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry.c
index 709b1c3fb7..3134ad1536 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry.c
@@ -563,7 +563,7 @@ NR_reg_make_registry(NR_registry parent, char *child, NR_registry out)
int r, _status;
size_t plen;
size_t clen;
- char *c;
+ char *c = 0;
size_t i;
if ((r=nr_reg_is_valid(parent)))
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_local.c b/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_local.c
index ed6e19aaa0..e577b7d4e5 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_local.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registry_local.c
@@ -134,19 +134,9 @@ static int nr_reg_get_array(char *name, unsigned char type, UCHAR *out, size_t s
static int nr_reg_set(char *name, int type, void *data);
static int nr_reg_set_array(char *name, unsigned char type, UCHAR *data, size_t length);
static int nr_reg_set_parent_registries(char *name);
-
-/* make these static OLD_REGISTRY */
-#if 0
-static int nr_reg_fetch_node(char *name, unsigned char type, nr_registry_node **node, int *free_node);
-static char *nr_reg_alloc_node_data(char *name, nr_registry_node *node, int *freeit);
-#else
int nr_reg_fetch_node(char *name, unsigned char type, nr_registry_node **node, int *free_node);
char *nr_reg_alloc_node_data(char *name, nr_registry_node *node, int *freeit);
-#endif
static int nr_reg_rfree(void *ptr);
-#if 0 /* Unused currently */
-static int nr_reg_noop(void *ptr);
-#endif
static int nr_reg_compute_length(char *name, nr_registry_node *node, size_t *length);
char *nr_reg_action_name(int action);
@@ -155,10 +145,6 @@ char *nr_reg_action_name(int action);
* nr_array_registry_node */
static r_assoc *nr_registry = 0;
-#if 0 /* Unused currently */
-static nr_array_registry_node nr_top_level_node;
-#endif
-
typedef struct nr_reg_find_children_arg_ {
size_t size;
NR_registry *children;
@@ -178,7 +164,7 @@ nr_reg_local_iter(NR_registry prefix, int (*action)(void *ptr, r_assoc_iterator
{
int r, _status;
r_assoc_iterator iter;
- char *name;
+ char *name = 0;
int namel;
nr_registry_node *node;
int prefixl;
@@ -246,7 +232,7 @@ nr_reg_local_find_children(void *ptr, r_assoc_iterator *iter, char *prefix, char
{
int _status;
int prefixl = strlen(prefix);
- char *dot;
+ char *dot = 0;
nr_reg_find_children_arg *arg = (void*)ptr;
assert(sizeof(*(arg->children)) == sizeof(NR_registry));
@@ -275,7 +261,7 @@ int
nr_reg_local_count_children(void *ptr, r_assoc_iterator *iter, char *prefix, char *name, nr_registry_node *node)
{
int prefixl = strlen(prefix);
- char *dot;
+ char *dot = 0;
/* only count children */
if (name[prefixl] == '.') {
@@ -296,7 +282,7 @@ nr_reg_local_dump_print(void *ptr, r_assoc_iterator *iter, char *prefix, char *n
{
int _status;
int freeit = 0;
- char *data;
+ char *data = 0;
/* only print leaf nodes */
if (node->type != NR_REG_TYPE_REGISTRY) {
@@ -315,14 +301,6 @@ nr_reg_local_dump_print(void *ptr, r_assoc_iterator *iter, char *prefix, char *n
}
-#if 0 /* Unused currently */
-int
-nr_reg_noop(void *ptr)
-{
- return 0;
-}
-#endif
-
int
nr_reg_rfree(void *ptr)
{
@@ -750,7 +728,7 @@ nr_reg_set_parent_registries(char *name)
{
int r, _status;
char *parent = 0;
- char *dot;
+ char *dot = 0;
if ((parent = r_strdup(name)) == 0)
ABORT(R_NO_MEMORY);
@@ -955,7 +933,7 @@ nr_reg_local_get_type(NR_registry name, NR_registry_type type)
{
int r, _status;
nr_registry_node *node = 0;
- char *str;
+ char *str = 0;
if ((r=nr_reg_is_valid(name)))
ABORT(r);
@@ -1044,7 +1022,7 @@ int
nr_reg_local_get_child_count(NR_registry parent, size_t *count)
{
int r, _status;
- nr_registry_node *ignore1;
+ nr_registry_node *ignore1 = 0;
int ignore2;
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registrycb.c b/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registrycb.c
index 4b326a1ee2..bd3570cefc 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registrycb.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/registry/registrycb.c
@@ -118,9 +118,9 @@ int
nr_reg_register_callback(NR_registry name, char action, void (*cb)(void *cb_arg, char action, NR_registry name), void *cb_arg)
{
int r, _status;
- r_assoc *assoc;
+ r_assoc *assoc = 0;
int create_assoc = 0;
- nr_reg_cb_info *info;
+ nr_reg_cb_info *info = 0;
int create_info = 0;
unsigned char cb_id[SIZEOF_CB_ID];
@@ -191,7 +191,7 @@ int
nr_reg_unregister_callback(char *name, char action, void (*cb)(void *cb_arg, char action, NR_registry name))
{
int r, _status;
- r_assoc *assoc;
+ r_assoc *assoc = 0;
int size;
unsigned char cb_id[SIZEOF_CB_ID];
@@ -283,12 +283,12 @@ int
nr_reg_raise_event_recurse(char *name, char *tmp, int action)
{
int r, _status;
- r_assoc *assoc;
+ r_assoc *assoc = 0;
nr_reg_cb_info *info;
r_assoc_iterator iter;
- char *key;
+ char *key = 0;
int keyl;
- char *c;
+ char *c = 0;
int free_tmp = 0;
int count;
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_assoc.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_assoc.c
index 25b3827d50..eb8cb0b061 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_assoc.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_assoc.c
@@ -150,7 +150,7 @@ int r_assoc_create(assocp,hash_func,bits)
int r_assoc_destroy(assocp)
r_assoc **assocp;
{
- r_assoc *assoc;
+ r_assoc *assoc = 0;
int i;
if(!assocp || !*assocp)
@@ -169,7 +169,7 @@ int r_assoc_destroy(assocp)
static int destroy_assoc_chain(chain)
r_assoc_el *chain;
{
- r_assoc_el *nxt;
+ r_assoc_el *nxt = 0;
while(chain){
nxt=chain->next;
@@ -190,7 +190,7 @@ static int copy_assoc_chain(knewp,old)
r_assoc_el **knewp;
r_assoc_el *old;
{
- r_assoc_el *knew=0,*ptr,*tmp;
+ r_assoc_el *knew = 0, *ptr = 0, *tmp = 0;
int r,_status;
ptr=0; /* Pacify GCC's uninitialized warning.
@@ -245,7 +245,7 @@ static int r_assoc_fetch_bucket(assoc,key,len,bucketp)
r_assoc_el **bucketp;
{
UINT4 hash_value;
- r_assoc_el *bucket;
+ r_assoc_el *bucket = 0;
hash_value=assoc->hash_func(key,len,assoc->bits);
@@ -265,7 +265,7 @@ int r_assoc_fetch(assoc,key,len,datap)
int len;
void **datap;
{
- r_assoc_el *bucket;
+ r_assoc_el *bucket = 0;
int r;
if(r=r_assoc_fetch_bucket(assoc,key,len,&bucket)){
@@ -287,7 +287,7 @@ int r_assoc_insert(assoc,key,len,data,copy,destroy,how)
int (*destroy)(void *ptr);
int how;
{
- r_assoc_el *bucket,*new_bucket=0;
+ r_assoc_el *bucket = 0, *new_bucket = 0;
int r,_status;
if(r=r_assoc_fetch_bucket(assoc,key,len,&bucket)){
@@ -340,7 +340,7 @@ int r_assoc_delete(assoc,key,len)
int len;
{
int r;
- r_assoc_el *bucket;
+ r_assoc_el *bucket = 0;
UINT4 hash_value;
if(r=r_assoc_fetch_bucket(assoc,key,len,&bucket)){
@@ -377,7 +377,7 @@ int r_assoc_copy(knewp,old)
r_assoc *old;
{
int r,_status,i;
- r_assoc *knew;
+ r_assoc *knew = 0;
if(!(knew=(r_assoc *)RCALLOC(sizeof(r_assoc))))
ABORT(R_NO_MEMORY);
@@ -441,7 +441,7 @@ int r_assoc_iter(iter,key,keyl,val)
void **val;
{
int i;
- r_assoc_el *ret;
+ r_assoc_el *ret = 0;
if(!iter->next)
return(R_EOD);
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_crc32.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_crc32.c
index 38d3e4da38..6def127dfd 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_crc32.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_crc32.c
@@ -157,7 +157,7 @@ r_crc32(buf, dlen, cval)
u_int32_t *cval;
{
u_int32_t crc = ~0;
- char *p ;
+ char *p = 0;
int i;
u_int32_t crc32_total = 0 ;
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_data.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_data.c
index dfb7af2d5c..23df74fb8b 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_data.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_data.c
@@ -183,7 +183,7 @@ int r_data_destroy(dp)
int r_data_destroy_v(v)
void *v;
{
- Data *d;
+ Data *d = 0;
if(!v)
return(0);
@@ -199,7 +199,7 @@ int r_data_destroy_v(v)
int r_data_destroy_vp(v)
void **v;
{
- Data *d;
+ Data *d = 0;
if(!v || !*v)
return(0);
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_list.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_list.c
index 4e71d67030..527d39b43a 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_list.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_list.c
@@ -117,8 +117,8 @@ int r_list_create(listp)
int r_list_destroy(listp)
r_list **listp;
{
- r_list *list;
- r_list_el *el;
+ r_list *list = 0;
+ r_list_el *el = 0;
if(!listp || !*listp)
return(0);
@@ -147,7 +147,7 @@ int r_list_copy(outp,in)
r_list *in;
{
r_list *out=0;
- r_list_el *el,*el2,*last=0;
+ r_list_el *el = 0, *el2 = 0, *last = 0;
int r, _status;
if(!in){
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_memory.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_memory.c
index 53846fc019..3cfcc916d4 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_memory.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_memory.c
@@ -66,7 +66,7 @@ void *r_malloc(type,size)
size_t size;
{
size_t total;
- r_malloc_chunk *chunk;
+ r_malloc_chunk *chunk = 0;
total=size+sizeof(r_malloc_chunk);
@@ -90,7 +90,7 @@ void *r_calloc(type,number,size)
size_t number;
size_t size;
{
- void *ret;
+ void *ret = 0;
size_t total;
total=number*size;
@@ -106,7 +106,7 @@ void *r_calloc(type,number,size)
void r_free(ptr)
void *ptr;
{
- r_malloc_chunk *chunk;
+ r_malloc_chunk *chunk = 0;
if(!ptr) return;
@@ -125,7 +125,7 @@ void *r_realloc(ptr,size)
void *ptr;
size_t size;
{
- r_malloc_chunk *chunk,*nchunk;
+ r_malloc_chunk *chunk = 0, *nchunk = 0;
size_t total;
if(!ptr) return(r_malloc(255,size));
@@ -154,7 +154,7 @@ char *r_strdup(str)
const char *str;
{
int len;
- char *nstr;
+ char *nstr = 0;
if(!str)
return(0);
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_replace.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_replace.c
index 8916b884cc..4dc8feb878 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_replace.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/libekr/r_replace.c
@@ -88,7 +88,7 @@ char *strdup(str)
char *str;
{
int len=strlen(str);
- char *n;
+ char *n = 0;
if(!(n=(char *)malloc(len+1)))
return(0);
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/p_buf.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/p_buf.c
index 459baecdda..6ada8420ed 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/p_buf.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/p_buf.c
@@ -72,7 +72,7 @@ int nr_p_buf_ctx_create(size,ctxp)
int nr_p_buf_ctx_destroy(ctxp)
nr_p_buf_ctx **ctxp;
{
- nr_p_buf_ctx *ctx;
+ nr_p_buf_ctx *ctx = 0;
if(!ctxp || !*ctxp)
return(0);
@@ -133,7 +133,7 @@ int nr_p_buf_free_chain(ctx,head)
nr_p_buf_ctx *ctx;
nr_p_buf_head *head;
{
- nr_p_buf *n1,*n2;
+ nr_p_buf *n1 = 0, *n2 = 0;
n1=STAILQ_FIRST(head);
while(n1){
@@ -155,7 +155,7 @@ int nr_p_buf_write_to_chain(ctx,chain,data,len)
UINT4 len;
{
int r,_status;
- nr_p_buf *buf;
+ nr_p_buf *buf = 0;
buf=STAILQ_LAST(chain,nr_p_buf_,entry);
while(len){
@@ -186,7 +186,7 @@ int nr_p_buf_write_to_chain(ctx,chain,data,len)
static int nr_p_buf_destroy_chain(head)
nr_p_buf_head *head;
{
- nr_p_buf *n1,*n2;
+ nr_p_buf *n1 = 0, *n2 = 0;
n1=STAILQ_FIRST(head);
while(n1){
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.c b/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.c
index 79b14a8967..51f75832b1 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.c
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.c
@@ -79,38 +79,6 @@ int nr_get_filename(base,name,namep)
return(_status);
}
-#if 0
-int read_RSA_private_key(base,name,keyp)
- char *base;
- char *name;
- RSA **keyp;
- {
- char *keyfile=0;
- BIO *bio=0;
- FILE *fp=0;
- RSA *rsa=0;
- int r,_status;
-
- /* Load the keyfile */
- if(r=get_filename(base,name,&keyfile))
- ABORT(r);
- if(!(fp=fopen(keyfile,"r")))
- ABORT(R_NOT_FOUND);
- if(!(bio=BIO_new(BIO_s_file())))
- ABORT(R_NO_MEMORY);
- BIO_set_fp(bio,fp,BIO_NOCLOSE);
-
- if(!(rsa=PEM_read_bio_RSAPrivateKey(bio,0,0,0)))
- ABORT(R_NOT_FOUND);
-
- *keyp=rsa;
- _status=0;
- abort:
- return(_status);
- }
-#endif
-
-
void nr_errprintf_log(const char *format,...)
{
va_list ap;
@@ -296,55 +264,6 @@ int nr_sha1_file(char *filename,UCHAR *out)
// TODO
#else
-#if 0
-
-#include <fts.h>
-
-int nr_rm_tree(char *path)
- {
- FTS *fts=0;
- FTSENT *p;
- int failed=0;
- int _status;
- char *argv[2];
-
- argv[0]=path;
- argv[1]=0;
-
- if(!(fts=fts_open(argv,0,NULL))){
- r_log_e(LOG_COMMON,LOG_ERR,"Couldn't open directory %s",path);
- ABORT(R_FAILED);
- }
-
- while(p=fts_read(fts)){
- switch(p->fts_info){
- case FTS_D:
- break;
- case FTS_DOT:
- break;
- case FTS_ERR:
- r_log_e(LOG_COMMON,LOG_ERR,"Problem reading %s",p->fts_path);
- break;
- default:
- r_log(LOG_COMMON,LOG_DEBUG,"Removing %s",p->fts_path);
- errno=0;
- if(remove(p->fts_path)){
- r_log_e(LOG_COMMON,LOG_ERR,"Problem removing %s",p->fts_path);
- failed=1;
- }
- }
- }
-
- if(failed)
- ABORT(R_FAILED);
-
- _status=0;
- abort:
- if(fts) fts_close(fts);
- return(_status);
- }
-#endif
-
int nr_write_pid_file(char *pid_filename)
{
FILE *fp;
@@ -625,7 +544,7 @@ inet_ntop6(const unsigned char *src, char *dst, size_t size)
* to use pointer overlays. All the world's not a VAX.
*/
char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"];
- char *tp, *ep;
+ char *tp = 0, *ep = 0;
struct { int base, len; } best, cur;
unsigned int words[NS_IN6ADDRSZ / NS_INT16SZ];
int i;
diff --git a/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.h b/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.h
index d7861659cd..26c692fbbe 100644
--- a/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.h
+++ b/dom/media/webrtc/transport/third_party/nrappkit/src/util/util.h
@@ -43,11 +43,6 @@
#include "registry.h"
int nr_get_filename(char *base,char *name, char **namep);
-#if 0
-#include <openssl/ssl.h>
-
-int read_RSA_private_key(char *base, char *name,RSA **keyp);
-#endif
void nr_errprintf_log(const char *fmt,...);
void nr_errprintf_log2(void *ignore, const char *fmt,...);
extern int nr_util_default_log_facility;
diff --git a/dom/media/webrtc/transport/transportlayerdtls.cpp b/dom/media/webrtc/transport/transportlayerdtls.cpp
index 4ab8aaa029..1279726bce 100644
--- a/dom/media/webrtc/transport/transportlayerdtls.cpp
+++ b/dom/media/webrtc/transport/transportlayerdtls.cpp
@@ -9,12 +9,14 @@
#include "transportlayerdtls.h"
#include <algorithm>
+#include <iomanip>
#include <queue>
#include <sstream>
#include "dtlsidentity.h"
#include "keyhi.h"
#include "logging.h"
+#include "mozilla/glean/GleanMetrics.h"
#include "mozilla/Telemetry.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Unused.h"
@@ -889,6 +891,7 @@ void TransportLayerDtls::Handshake() {
if (!cert_ok_) {
MOZ_MTLOG(ML_ERROR, LAYER_INFO << "Certificate check never occurred");
TL_SET_STATE(TS_ERROR);
+ RecordHandshakeCompletionTelemetry("CERT_FAILURE");
return;
}
if (!CheckAlpn()) {
@@ -897,11 +900,13 @@ void TransportLayerDtls::Handshake() {
// (assuming the close_notify isn't dropped).
ssl_fd_ = nullptr;
TL_SET_STATE(TS_ERROR);
+ RecordHandshakeCompletionTelemetry("ALPN_FAILURE");
return;
}
TL_SET_STATE(TS_OPEN);
+ RecordHandshakeCompletionTelemetry("SUCCESS");
RecordTlsTelemetry();
timer_ = nullptr;
} else {
@@ -932,6 +937,7 @@ void TransportLayerDtls::Handshake() {
MOZ_MTLOG(ML_ERROR, LAYER_INFO << "DTLS handshake error " << err << " ("
<< err_msg << ")");
TL_SET_STATE(TS_ERROR);
+ RecordHandshakeCompletionTelemetry(err_msg);
break;
}
}
@@ -1468,6 +1474,17 @@ void TransportLayerDtls::TimerCallback(nsITimer* timer, void* arg) {
dtls->Handshake();
}
+void TransportLayerDtls::RecordHandshakeCompletionTelemetry(
+ const char* aResult) {
+ if (role_ == CLIENT) {
+ mozilla::glean::webrtcdtls::client_handshake_result.Get(nsCString(aResult))
+ .Add(1);
+ } else {
+ mozilla::glean::webrtcdtls::server_handshake_result.Get(nsCString(aResult))
+ .Add(1);
+ }
+}
+
void TransportLayerDtls::RecordTlsTelemetry() {
MOZ_ASSERT(state_ == TS_OPEN);
SSLChannelInfo info;
@@ -1478,54 +1495,29 @@ void TransportLayerDtls::RecordTlsTelemetry() {
return;
}
- uint16_t telemetry_cipher = 0;
-
- switch (info.cipherSuite) {
- /* Old DHE ciphers: candidates for removal, see bug 1227519 */
- case TLS_DHE_RSA_WITH_AES_128_CBC_SHA:
- telemetry_cipher = 1;
- break;
- case TLS_DHE_RSA_WITH_AES_256_CBC_SHA:
- telemetry_cipher = 2;
- break;
- /* Current ciphers */
- case TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
- telemetry_cipher = 3;
- break;
- case TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
- telemetry_cipher = 4;
- break;
- case TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
- telemetry_cipher = 5;
- break;
- case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
- telemetry_cipher = 6;
- break;
- case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
- telemetry_cipher = 7;
+ switch (info.protocolVersion) {
+ case SSL_LIBRARY_VERSION_TLS_1_1:
+ mozilla::glean::webrtcdtls::protocol_version.Get("1.0"_ns).Add(1);
break;
- case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
- telemetry_cipher = 8;
+ case SSL_LIBRARY_VERSION_TLS_1_2:
+ mozilla::glean::webrtcdtls::protocol_version.Get("1.2"_ns).Add(1);
break;
- case TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:
- telemetry_cipher = 9;
- break;
- case TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:
- telemetry_cipher = 10;
- break;
- /* TLS 1.3 ciphers */
- case TLS_AES_128_GCM_SHA256:
- telemetry_cipher = 11;
- break;
- case TLS_CHACHA20_POLY1305_SHA256:
- telemetry_cipher = 12;
- break;
- case TLS_AES_256_GCM_SHA384:
- telemetry_cipher = 13;
+ case SSL_LIBRARY_VERSION_TLS_1_3:
+ mozilla::glean::webrtcdtls::protocol_version.Get("1.3"_ns).Add(1);
break;
+ default:
+ MOZ_CRASH("Unknown SSL version");
}
- Telemetry::Accumulate(Telemetry::WEBRTC_DTLS_CIPHER, telemetry_cipher);
+ {
+ std::ostringstream oss;
+ // Record TLS cipher-suite ID as a string (eg;
+ // TLS_DHE_RSA_WITH_AES_128_CBC_SHA is 0x0033)
+ oss << "0x" << std::setfill('0') << std::setw(4) << std::hex
+ << info.cipherSuite;
+ mozilla::glean::webrtcdtls::cipher.Get(nsCString(oss.str().c_str())).Add(1);
+ MOZ_MTLOG(ML_DEBUG, "cipher: " << oss.str());
+ }
uint16_t cipher;
nsresult rv = GetSrtpCipher(&cipher);
@@ -1535,24 +1527,15 @@ void TransportLayerDtls::RecordTlsTelemetry() {
return;
}
- auto cipher_label = mozilla::Telemetry::LABELS_WEBRTC_SRTP_CIPHER::Unknown;
-
- switch (cipher) {
- case kDtlsSrtpAes128CmHmacSha1_80:
- cipher_label = Telemetry::LABELS_WEBRTC_SRTP_CIPHER::Aes128CmHmacSha1_80;
- break;
- case kDtlsSrtpAes128CmHmacSha1_32:
- cipher_label = Telemetry::LABELS_WEBRTC_SRTP_CIPHER::Aes128CmHmacSha1_32;
- break;
- case kDtlsSrtpAeadAes128Gcm:
- cipher_label = Telemetry::LABELS_WEBRTC_SRTP_CIPHER::AeadAes128Gcm;
- break;
- case kDtlsSrtpAeadAes256Gcm:
- cipher_label = Telemetry::LABELS_WEBRTC_SRTP_CIPHER::AeadAes256Gcm;
- break;
+ {
+ std::ostringstream oss;
+ // Record SRTP cipher-suite ID as a string (eg;
+ // SRTP_AES128_CM_HMAC_SHA1_80 is 0x0001)
+ oss << "0x" << std::setfill('0') << std::setw(4) << std::hex << cipher;
+ mozilla::glean::webrtcdtls::srtp_cipher.Get(nsCString(oss.str().c_str()))
+ .Add(1);
+ MOZ_MTLOG(ML_DEBUG, "srtp cipher: " << oss.str());
}
-
- Telemetry::AccumulateCategorical(cipher_label);
}
} // namespace mozilla
diff --git a/dom/media/webrtc/transport/transportlayerdtls.h b/dom/media/webrtc/transport/transportlayerdtls.h
index d68a6e77d5..e0370a04b2 100644
--- a/dom/media/webrtc/transport/transportlayerdtls.h
+++ b/dom/media/webrtc/transport/transportlayerdtls.h
@@ -144,7 +144,7 @@ class TransportLayerDtls final : public TransportLayer {
SECStatus CheckDigest(const DtlsDigest& digest,
UniqueCERTCertificate& cert) const;
- void RecordHandshakeCompletionTelemetry(TransportLayer::State endState);
+ void RecordHandshakeCompletionTelemetry(const char* aResult);
void RecordTlsTelemetry();
static PRBool WriteSrtpXtn(PRFileDesc* fd, SSLHandshakeType message,
diff --git a/dom/media/webvtt/vtt.sys.mjs b/dom/media/webvtt/vtt.sys.mjs
index 9e8071c427..af400fbbfe 100644
--- a/dom/media/webvtt/vtt.sys.mjs
+++ b/dom/media/webvtt/vtt.sys.mjs
@@ -351,15 +351,44 @@ function parseContent(window, input, mode) {
return consume(m[1] ? m[1] : m[2]);
}
- // Unescape a string 's'.
- function unescape1(e) {
- return ESCAPE[e];
- }
- function unescape(s) {
- let m;
- while ((m = s.match(/&(amp|lt|gt|lrm|rlm|nbsp);/))) {
- s = s.replace(m[0], unescape1);
- }
+ const unescapeHelper = window.document.createElement("div");
+ function unescapeEntities(s) {
+ let match;
+
+ // Decimal numeric character reference
+ s = s.replace(/&#(\d+);?/g, (candidate, number) => {
+ try {
+ const codepoint = parseInt(number);
+ return String.fromCodePoint(codepoint);
+ } catch (_) {
+ return candidate;
+ }
+ });
+
+ // Hexadecimal numeric character reference
+ s = s.replace(/&#x([\dA-Fa-f]+);?/g, (candidate, number) => {
+ try {
+ const codepoint = parseInt(number, 16);
+ return String.fromCodePoint(codepoint);
+ } catch (_) {
+ return candidate;
+ }
+ });
+
+ // Named character references
+ s = s.replace(/&\w[\w\d]*;?/g, candidate => {
+ // The list of entities is huge, so we use innerHTML instead.
+ // We should probably use setHTML instead once that is available (bug 1650370).
+ // Ideally we would be able to use a faster/simpler variant of setHTML (bug 1731215).
+ unescapeHelper.innerHTML = candidate;
+ const unescaped = unescapeHelper.innerText;
+ if (unescaped == candidate) { // not a valid entity
+ return candidate;
+ }
+ return unescaped;
+ });
+ unescapeHelper.innerHTML = "";
+
return s;
}
@@ -432,12 +461,21 @@ function parseContent(window, input, mode) {
while ((t = nextToken()) !== null) {
if (t[0] === '<') {
if (t[1] === "/") {
+ const endTag = t.slice(2, -1);
+ const stackEnd = tagStack.at(-1);
+
// If the closing tag matches, move back up to the parent node.
- if (tagStack.length &&
- tagStack[tagStack.length - 1] === t.substr(2).replace(">", "")) {
+ if (stackEnd == endTag) {
tagStack.pop();
current = current.parentNode;
+
+ // If the closing tag is <ruby> and we're at an <rt>, move back up to
+ // the <ruby>'s parent node.
+ } else if (endTag == "ruby" && current.nodeName == "RT") {
+ tagStack.pop();
+ current = current.parentNode.parentNode;
}
+
// Otherwise just ignore the end tag.
continue;
}
@@ -477,7 +515,7 @@ function parseContent(window, input, mode) {
}
// Text nodes are leaf nodes.
- current.appendChild(window.document.createTextNode(unescape(t)));
+ current.appendChild(window.document.createTextNode(unescapeEntities(t)));
}
return root;