summaryrefslogtreecommitdiffstats
path: root/dom/media/platforms/agnostic
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media/platforms/agnostic')
-rw-r--r--dom/media/platforms/agnostic/AOMDecoder.cpp1066
-rw-r--r--dom/media/platforms/agnostic/AOMDecoder.h287
-rw-r--r--dom/media/platforms/agnostic/AgnosticDecoderModule.cpp218
-rw-r--r--dom/media/platforms/agnostic/AgnosticDecoderModule.h39
-rw-r--r--dom/media/platforms/agnostic/BlankDecoderModule.cpp144
-rw-r--r--dom/media/platforms/agnostic/BlankDecoderModule.h68
-rw-r--r--dom/media/platforms/agnostic/DAV1DDecoder.cpp382
-rw-r--r--dom/media/platforms/agnostic/DAV1DDecoder.h68
-rw-r--r--dom/media/platforms/agnostic/DummyMediaDataDecoder.cpp80
-rw-r--r--dom/media/platforms/agnostic/DummyMediaDataDecoder.h68
-rw-r--r--dom/media/platforms/agnostic/NullDecoderModule.cpp57
-rw-r--r--dom/media/platforms/agnostic/OpusDecoder.cpp380
-rw-r--r--dom/media/platforms/agnostic/OpusDecoder.h70
-rw-r--r--dom/media/platforms/agnostic/TheoraDecoder.cpp267
-rw-r--r--dom/media/platforms/agnostic/TheoraDecoder.h64
-rw-r--r--dom/media/platforms/agnostic/VPXDecoder.cpp676
-rw-r--r--dom/media/platforms/agnostic/VPXDecoder.h208
-rw-r--r--dom/media/platforms/agnostic/VorbisDecoder.cpp364
-rw-r--r--dom/media/platforms/agnostic/VorbisDecoder.h66
-rw-r--r--dom/media/platforms/agnostic/WAVDecoder.cpp162
-rw-r--r--dom/media/platforms/agnostic/WAVDecoder.h44
-rw-r--r--dom/media/platforms/agnostic/bytestreams/Adts.cpp94
-rw-r--r--dom/media/platforms/agnostic/bytestreams/Adts.h22
-rw-r--r--dom/media/platforms/agnostic/bytestreams/AnnexB.cpp364
-rw-r--r--dom/media/platforms/agnostic/bytestreams/AnnexB.h66
-rw-r--r--dom/media/platforms/agnostic/bytestreams/H264.cpp1356
-rw-r--r--dom/media/platforms/agnostic/bytestreams/H264.h525
-rw-r--r--dom/media/platforms/agnostic/bytestreams/gtest/TestAnnexB.cpp144
-rw-r--r--dom/media/platforms/agnostic/bytestreams/gtest/moz.build11
-rw-r--r--dom/media/platforms/agnostic/bytestreams/moz.build35
-rw-r--r--dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.cpp156
-rw-r--r--dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.h54
-rw-r--r--dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h103
-rw-r--r--dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp479
-rw-r--r--dom/media/platforms/agnostic/eme/EMEDecoderModule.h79
-rw-r--r--dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp79
-rw-r--r--dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h68
-rw-r--r--dom/media/platforms/agnostic/eme/moz.build22
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp94
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPDecoderModule.h58
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp489
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h129
-rw-r--r--dom/media/platforms/agnostic/gmp/moz.build24
43 files changed, 9229 insertions, 0 deletions
diff --git a/dom/media/platforms/agnostic/AOMDecoder.cpp b/dom/media/platforms/agnostic/AOMDecoder.cpp
new file mode 100644
index 0000000000..d6b0576cd2
--- /dev/null
+++ b/dom/media/platforms/agnostic/AOMDecoder.cpp
@@ -0,0 +1,1066 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AOMDecoder.h"
+
+#include <algorithm>
+
+#include "BitWriter.h"
+#include "BitReader.h"
+#include "ImageContainer.h"
+#include "MediaResult.h"
+#include "TimeUnits.h"
+#include "aom/aom_image.h"
+#include "aom/aomdx.h"
+#include "gfx2DGlue.h"
+#include "gfxUtils.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/TaskQueue.h"
+#include "nsError.h"
+#include "nsThreadUtils.h"
+#include "prsystem.h"
+#include "VideoUtils.h"
+
+#undef LOG
+#define LOG(arg, ...) \
+ DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: " arg, __func__, \
+ ##__VA_ARGS__)
+#define LOG_RESULT(code, message, ...) \
+ DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: %s (code %d) " message, \
+ __func__, aom_codec_err_to_string(code), (int)code, ##__VA_ARGS__)
+#define LOGEX_RESULT(_this, code, message, ...) \
+ DDMOZ_LOGEX(_this, sPDMLog, mozilla::LogLevel::Debug, \
+ "::%s: %s (code %d) " message, __func__, \
+ aom_codec_err_to_string(code), (int)code, ##__VA_ARGS__)
+#define LOG_STATIC_RESULT(code, message, ...) \
+ MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, \
+ ("AOMDecoder::%s: %s (code %d) " message, __func__, \
+ aom_codec_err_to_string(code), (int)code, ##__VA_ARGS__))
+
+#define ASSERT_BYTE_ALIGNED(bitIO) MOZ_ASSERT((bitIO).BitCount() % 8 == 0)
+
+namespace mozilla {
+
+using namespace gfx;
+using namespace layers;
+using gfx::CICP::ColourPrimaries;
+using gfx::CICP::MatrixCoefficients;
+using gfx::CICP::TransferCharacteristics;
+
+static MediaResult InitContext(AOMDecoder& aAOMDecoder, aom_codec_ctx_t* aCtx,
+ const VideoInfo& aInfo) {
+ aom_codec_iface_t* dx = aom_codec_av1_dx();
+ if (!dx) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Couldn't get AV1 decoder interface."));
+ }
+
+ size_t decode_threads = 2;
+ if (aInfo.mDisplay.width >= 2048) {
+ decode_threads = 8;
+ } else if (aInfo.mDisplay.width >= 1024) {
+ decode_threads = 4;
+ }
+ decode_threads = std::min(decode_threads, GetNumberOfProcessors());
+
+ aom_codec_dec_cfg_t config;
+ PodZero(&config);
+ config.threads = static_cast<unsigned int>(decode_threads);
+ config.w = config.h = 0; // set after decode
+ config.allow_lowbitdepth = true;
+
+ aom_codec_flags_t flags = 0;
+
+ auto res = aom_codec_dec_init(aCtx, dx, &config, flags);
+ if (res != AOM_CODEC_OK) {
+ LOGEX_RESULT(&aAOMDecoder, res, "Codec initialization failed, res=%d",
+ int(res));
+ return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("AOM error initializing AV1 decoder: %s",
+ aom_codec_err_to_string(res)));
+ }
+ return NS_OK;
+}
+
+AOMDecoder::AOMDecoder(const CreateDecoderParams& aParams)
+ : mImageContainer(aParams.mImageContainer),
+ mTaskQueue(TaskQueue::Create(
+ GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), "AOMDecoder")),
+ mInfo(aParams.VideoConfig()),
+ mTrackingId(aParams.mTrackingId) {
+ PodZero(&mCodec);
+}
+
+AOMDecoder::~AOMDecoder() = default;
+
+RefPtr<ShutdownPromise> AOMDecoder::Shutdown() {
+ RefPtr<AOMDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self]() {
+ auto res = aom_codec_destroy(&self->mCodec);
+ if (res != AOM_CODEC_OK) {
+ LOGEX_RESULT(self.get(), res, "aom_codec_destroy");
+ }
+ return self->mTaskQueue->BeginShutdown();
+ });
+}
+
+RefPtr<MediaDataDecoder::InitPromise> AOMDecoder::Init() {
+ MediaResult rv = InitContext(*this, &mCodec, mInfo);
+ if (NS_FAILED(rv)) {
+ return AOMDecoder::InitPromise::CreateAndReject(rv, __func__);
+ }
+ return AOMDecoder::InitPromise::CreateAndResolve(TrackInfo::kVideoTrack,
+ __func__);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> AOMDecoder::Flush() {
+ return InvokeAsync(mTaskQueue, __func__, [this, self = RefPtr(this)]() {
+ mPerformanceRecorder.Record(std::numeric_limits<int64_t>::max());
+ return FlushPromise::CreateAndResolve(true, __func__);
+ });
+}
+
+// UniquePtr dtor wrapper for aom_image_t.
+struct AomImageFree {
+ void operator()(aom_image_t* img) { aom_img_free(img); }
+};
+
+RefPtr<MediaDataDecoder::DecodePromise> AOMDecoder::ProcessDecode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+#if defined(DEBUG)
+ NS_ASSERTION(
+ IsKeyframe(*aSample) == aSample->mKeyframe,
+ "AOM Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
+#endif
+
+ MediaInfoFlag flag = MediaInfoFlag::None;
+ flag |= (aSample->mKeyframe ? MediaInfoFlag::KeyFrame
+ : MediaInfoFlag::NonKeyFrame);
+ flag |= MediaInfoFlag::SoftwareDecoding;
+ flag |= MediaInfoFlag::VIDEO_AV1;
+
+ mTrackingId.apply([&](const auto& aId) {
+ mPerformanceRecorder.Start(aSample->mTimecode.ToMicroseconds(),
+ "AOMDecoder"_ns, aId, flag);
+ });
+
+ if (aom_codec_err_t r = aom_codec_decode(&mCodec, aSample->Data(),
+ aSample->Size(), nullptr)) {
+ LOG_RESULT(r, "Decode error!");
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("AOM error decoding AV1 sample: %s",
+ aom_codec_err_to_string(r))),
+ __func__);
+ }
+
+ aom_codec_iter_t iter = nullptr;
+ aom_image_t* img;
+ UniquePtr<aom_image_t, AomImageFree> img8;
+ DecodedData results;
+
+ while ((img = aom_codec_get_frame(&mCodec, &iter))) {
+ NS_ASSERTION(
+ img->fmt == AOM_IMG_FMT_I420 || img->fmt == AOM_IMG_FMT_I42016 ||
+ img->fmt == AOM_IMG_FMT_I444 || img->fmt == AOM_IMG_FMT_I44416,
+ "AV1 image format not I420 or I444");
+
+ // Chroma shifts are rounded down as per the decoding examples in the SDK
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = img->planes[0];
+ b.mPlanes[0].mStride = img->stride[0];
+ b.mPlanes[0].mHeight = img->d_h;
+ b.mPlanes[0].mWidth = img->d_w;
+ b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = img->planes[1];
+ b.mPlanes[1].mStride = img->stride[1];
+ b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = img->planes[2];
+ b.mPlanes[2].mStride = img->stride[2];
+ b.mPlanes[2].mSkip = 0;
+
+ if (img->fmt == AOM_IMG_FMT_I420 || img->fmt == AOM_IMG_FMT_I42016) {
+ b.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+
+ b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+
+ b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+ } else if (img->fmt == AOM_IMG_FMT_I444 || img->fmt == AOM_IMG_FMT_I44416) {
+ b.mPlanes[1].mHeight = img->d_h;
+ b.mPlanes[1].mWidth = img->d_w;
+
+ b.mPlanes[2].mHeight = img->d_h;
+ b.mPlanes[2].mWidth = img->d_w;
+ } else {
+ LOG("AOM Unknown image format");
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("AOM Unknown image format")),
+ __func__);
+ }
+
+ if (img->bit_depth == 10) {
+ b.mColorDepth = ColorDepth::COLOR_10;
+ } else if (img->bit_depth == 12) {
+ b.mColorDepth = ColorDepth::COLOR_12;
+ }
+
+ switch (img->mc) {
+ case AOM_CICP_MC_BT_601:
+ b.mYUVColorSpace = YUVColorSpace::BT601;
+ break;
+ case AOM_CICP_MC_BT_2020_NCL:
+ case AOM_CICP_MC_BT_2020_CL:
+ b.mYUVColorSpace = YUVColorSpace::BT2020;
+ break;
+ case AOM_CICP_MC_BT_709:
+ b.mYUVColorSpace = YUVColorSpace::BT709;
+ break;
+ default:
+ b.mYUVColorSpace = DefaultColorSpace({img->d_w, img->d_h});
+ break;
+ }
+ b.mColorRange = img->range == AOM_CR_FULL_RANGE ? ColorRange::FULL
+ : ColorRange::LIMITED;
+
+ switch (img->cp) {
+ case AOM_CICP_CP_BT_709:
+ b.mColorPrimaries = ColorSpace2::BT709;
+ break;
+ case AOM_CICP_CP_BT_2020:
+ b.mColorPrimaries = ColorSpace2::BT2020;
+ break;
+ default:
+ b.mColorPrimaries = ColorSpace2::BT709;
+ break;
+ }
+
+ RefPtr<VideoData> v;
+ v = VideoData::CreateAndCopyData(
+ mInfo, mImageContainer, aSample->mOffset, aSample->mTime,
+ aSample->mDuration, b, aSample->mKeyframe, aSample->mTimecode,
+ mInfo.ScaledImageRect(img->d_w, img->d_h), nullptr);
+
+ if (!v) {
+ LOG("Image allocation error source %ux%u display %ux%u picture %ux%u",
+ img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
+ mInfo.mImage.width, mInfo.mImage.height);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ }
+ mPerformanceRecorder.Record(
+ aSample->mTimecode.ToMicroseconds(), [&](DecodeStage& aStage) {
+ aStage.SetResolution(mInfo.mImage.width, mInfo.mImage.height);
+ auto format = [&]() -> Maybe<DecodeStage::ImageFormat> {
+ switch (img->fmt) {
+ case AOM_IMG_FMT_I420:
+ case AOM_IMG_FMT_I42016:
+ return Some(DecodeStage::YUV420P);
+ case AOM_IMG_FMT_I444:
+ case AOM_IMG_FMT_I44416:
+ return Some(DecodeStage::YUV444P);
+ default:
+ return Nothing();
+ }
+ }();
+ format.apply([&](auto& aFmt) { aStage.SetImageFormat(aFmt); });
+ aStage.SetYUVColorSpace(b.mYUVColorSpace);
+ aStage.SetColorRange(b.mColorRange);
+ aStage.SetColorDepth(b.mColorDepth);
+ });
+ results.AppendElement(std::move(v));
+ }
+ return DecodePromise::CreateAndResolve(std::move(results), __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> AOMDecoder::Decode(
+ MediaRawData* aSample) {
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &AOMDecoder::ProcessDecode, aSample);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> AOMDecoder::Drain() {
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
+}
+
+/* static */
+bool AOMDecoder::IsAV1(const nsACString& aMimeType) {
+ return aMimeType.EqualsLiteral("video/av1");
+}
+
+/* static */
+bool AOMDecoder::IsKeyframe(Span<const uint8_t> aBuffer) {
+ aom_codec_stream_info_t info;
+ PodZero(&info);
+
+ auto res = aom_codec_peek_stream_info(aom_codec_av1_dx(), aBuffer.Elements(),
+ aBuffer.Length(), &info);
+ if (res != AOM_CODEC_OK) {
+ LOG_STATIC_RESULT(
+ res, "couldn't get keyframe flag with aom_codec_peek_stream_info");
+ return false;
+ }
+
+ return bool(info.is_kf);
+}
+
+/* static */
+gfx::IntSize AOMDecoder::GetFrameSize(Span<const uint8_t> aBuffer) {
+ aom_codec_stream_info_t info;
+ PodZero(&info);
+
+ auto res = aom_codec_peek_stream_info(aom_codec_av1_dx(), aBuffer.Elements(),
+ aBuffer.Length(), &info);
+ if (res != AOM_CODEC_OK) {
+ LOG_STATIC_RESULT(
+ res, "couldn't get frame size with aom_codec_peek_stream_info");
+ }
+
+ return gfx::IntSize(info.w, info.h);
+}
+
+/* static */
+AOMDecoder::OBUIterator AOMDecoder::ReadOBUs(const Span<const uint8_t>& aData) {
+ return OBUIterator(aData);
+}
+
+void AOMDecoder::OBUIterator::UpdateNext() {
+ // If mGoNext is not set, we don't need to load a new OBU.
+ if (!mGoNext) {
+ return;
+ }
+ // Check if we've reached the end of the data. Allow mGoNext to stay true so
+ // that HasNext() will return false.
+ if (mPosition >= mData.Length()) {
+ return;
+ }
+ mGoNext = false;
+
+ // If retrieving the next OBU fails, reset the current OBU and set the
+ // position past the end of the data so that HasNext() returns false.
+ auto resetExit = MakeScopeExit([&]() {
+ mCurrent = OBUInfo();
+ mPosition = mData.Length();
+ });
+
+ auto subspan = mData.Subspan(mPosition, mData.Length() - mPosition);
+ BitReader br(subspan.Elements(), subspan.Length() * 8);
+ OBUInfo temp;
+
+ // AV1 spec available at:
+ // https://aomediacodec.github.io/av1-spec/
+ // or https://aomediacodec.github.io/av1-spec/av1-spec.pdf
+
+ // begin open_bitstream_unit( )
+ // https://aomediacodec.github.io/av1-spec/#general-obu-syntax
+
+ // begin obu_header( )
+ // https://aomediacodec.github.io/av1-spec/#obu-header-syntax
+ br.ReadBit(); // obu_forbidden_bit
+ temp.mType = static_cast<OBUType>(br.ReadBits(4));
+ if (!temp.IsValid()) {
+ // Non-fatal error, unknown OBUs can be skipped as long as the size field
+ // is properly specified.
+ NS_WARNING(nsPrintfCString("Encountered unknown OBU type (%" PRIu8
+ ", OBU may be invalid",
+ static_cast<uint8_t>(temp.mType))
+ .get());
+ }
+ temp.mExtensionFlag = br.ReadBit();
+ bool hasSizeField = br.ReadBit();
+ br.ReadBit(); // obu_reserved_1bit
+
+ // begin obu_extension_header( ) (5.3.3)
+ if (temp.mExtensionFlag) {
+ if (br.BitsLeft() < 8) {
+ mResult = MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ "Not enough bits left for an OBU extension header");
+ return;
+ }
+ br.ReadBits(3); // temporal_id
+ br.ReadBits(2); // spatial_id
+ br.ReadBits(3); // extension_header_reserved_3bits
+ }
+ // end obu_extension_header( )
+ // end obu_header( )
+
+ // Get the size of the remaining OBU data attached to the header in
+ // bytes.
+ size_t size;
+ if (hasSizeField) {
+ if (br.BitsLeft() < 8) {
+ mResult = MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ "Not enough bits left for an OBU size field");
+ return;
+ }
+ CheckedUint32 checkedSize = br.ReadULEB128().toChecked<uint32_t>();
+ // Spec requires that the value ULEB128 reads is (1 << 32) - 1 or below.
+ // See leb128(): https://aomediacodec.github.io/av1-spec/#leb128
+ if (!checkedSize.isValid()) {
+ mResult =
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, "OBU size was too large");
+ return;
+ }
+ size = checkedSize.value();
+ } else {
+ // This case should rarely happen in practice. To support the Annex B
+ // format in the specification, we would have to parse every header type
+ // to skip over them, but this allows us to at least iterate once to
+ // retrieve the first OBU in the data.
+ size = mData.Length() - 1 - temp.mExtensionFlag;
+ }
+
+ if (br.BitsLeft() / 8 < size) {
+ mResult = MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ nsPrintfCString("Size specified by the OBU header (%zu) is more "
+ "than the actual remaining OBU data (%zu)",
+ size, br.BitsLeft() / 8)
+ .get());
+ return;
+ }
+
+ ASSERT_BYTE_ALIGNED(br);
+
+ size_t bytes = br.BitCount() / 8;
+ temp.mContents = mData.Subspan(mPosition + bytes, size);
+ mCurrent = temp;
+ // end open_bitstream_unit( )
+
+ mPosition += bytes + size;
+ resetExit.release();
+ mResult = NS_OK;
+}
+
+/* static */
+already_AddRefed<MediaByteBuffer> AOMDecoder::CreateOBU(
+ const OBUType aType, const Span<const uint8_t>& aContents) {
+ RefPtr<MediaByteBuffer> buffer = new MediaByteBuffer();
+
+ BitWriter bw(buffer);
+ bw.WriteBits(0, 1); // obu_forbidden_bit
+ bw.WriteBits(static_cast<uint8_t>(aType), 4);
+ bw.WriteBit(false); // obu_extension_flag
+ bw.WriteBit(true); // obu_has_size_field
+ bw.WriteBits(0, 1); // obu_reserved_1bit
+ ASSERT_BYTE_ALIGNED(bw);
+ bw.WriteULEB128(aContents.Length());
+ ASSERT_BYTE_ALIGNED(bw);
+
+ buffer->AppendElements(aContents.Elements(), aContents.Length());
+ return buffer.forget();
+}
+
+/* static */
+MediaResult AOMDecoder::ReadSequenceHeaderInfo(
+ const Span<const uint8_t>& aSample, AV1SequenceInfo& aDestInfo) {
+ // We need to get the last sequence header OBU, the specification does not
+ // limit a temporal unit to one sequence header.
+ OBUIterator iter = ReadOBUs(aSample);
+ OBUInfo seqOBU;
+
+ while (true) {
+ if (!iter.HasNext()) {
+ // Pass along the error from parsing the OBU.
+ MediaResult result = iter.GetResult();
+ if (result.Code() != NS_OK) {
+ return result;
+ }
+ break;
+ }
+ OBUInfo obu = iter.Next();
+ if (obu.mType == OBUType::SequenceHeader) {
+ seqOBU = obu;
+ }
+ }
+
+ if (seqOBU.mType != OBUType::SequenceHeader) {
+ return NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA;
+ }
+
+ // Sequence header syntax is specified here:
+ // https://aomediacodec.github.io/av1-spec/#sequence-header-obu-syntax
+ // Section 5.5: Sequence header OBU syntax
+
+ // See also Section 6.4: Sequence header OBU semantics
+ // https://aomediacodec.github.io/av1-spec/#sequence-header-obu-semantics
+ // This section defines all the fields used in the sequence header.
+ BitReader br(seqOBU.mContents.Elements(), seqOBU.mContents.Length() * 8);
+ AV1SequenceInfo tempInfo;
+
+ // begin sequence_header_obu( )
+ // https://aomediacodec.github.io/av1-spec/#general-sequence-header-obu-syntax
+ tempInfo.mProfile = br.ReadBits(3);
+ const bool stillPicture = br.ReadBit();
+ const bool reducedStillPicture = br.ReadBit();
+ if (!stillPicture && reducedStillPicture) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ "reduced_still_picture is true while still_picture is false");
+ }
+
+ if (reducedStillPicture) {
+ OperatingPoint op;
+ op.mLayers = 0;
+ op.mLevel = br.ReadBits(5); // seq_level_idx[0]
+ op.mTier = 0;
+ tempInfo.mOperatingPoints.SetCapacity(1);
+ tempInfo.mOperatingPoints.AppendElement(op);
+ } else {
+ bool decoderModelInfoPresent;
+ uint8_t operatingPointCountMinusOne;
+
+ if (br.ReadBit()) { // timing_info_present_flag
+ // begin timing_info( )
+ // https://aomediacodec.github.io/av1-spec/#timing-info-syntax
+ br.ReadBits(32); // num_units_in_display_tick
+ br.ReadBits(32); // time_scale
+ if (br.ReadBit()) { // equal_picture_interval
+ br.ReadUE(); // num_ticks_per_picture_minus_1
+ }
+ // end timing_info( )
+
+ decoderModelInfoPresent = br.ReadBit();
+ if (decoderModelInfoPresent) {
+ // begin decoder_model_info( )
+ // https://aomediacodec.github.io/av1-spec/#decoder-model-info-syntax
+ br.ReadBits(5); // buffer_delay_length_minus_1
+ br.ReadBits(32); // num_units_in_decoding_tick
+ br.ReadBits(5); // buffer_removal_time_length_minus_1
+ br.ReadBits(5); // frame_presentation_time_length_minus_1
+ // end decoder_model_info( )
+ }
+ } else {
+ decoderModelInfoPresent = false;
+ }
+
+ bool initialDisplayDelayPresent = br.ReadBit();
+ operatingPointCountMinusOne = br.ReadBits(5);
+ tempInfo.mOperatingPoints.SetCapacity(operatingPointCountMinusOne + 1);
+ for (uint8_t i = 0; i <= operatingPointCountMinusOne; i++) {
+ OperatingPoint op;
+ op.mLayers = br.ReadBits(12); // operating_point_idc[ i ]
+ op.mLevel = br.ReadBits(5); // seq_level_idx[ i ]
+ op.mTier = op.mLevel > 7 ? br.ReadBits(1) : 0;
+ if (decoderModelInfoPresent) {
+ br.ReadBit(); // decoder_model_present_for_this_op[ i ]
+ }
+ if (initialDisplayDelayPresent) {
+ if (br.ReadBit()) { // initial_display_delay_present_for_this_op[ i ]
+ br.ReadBits(4);
+ }
+ }
+ tempInfo.mOperatingPoints.AppendElement(op);
+ }
+ }
+
+ uint8_t frameWidthBits = br.ReadBits(4) + 1;
+ uint8_t frameHeightBits = br.ReadBits(4) + 1;
+ uint32_t maxWidth = br.ReadBits(frameWidthBits) + 1;
+ uint32_t maxHeight = br.ReadBits(frameHeightBits) + 1;
+ tempInfo.mImage = gfx::IntSize(maxWidth, maxHeight);
+
+ if (!reducedStillPicture) {
+ if (br.ReadBit()) { // frame_id_numbers_present_flag
+ br.ReadBits(4); // delta_frame_id_length_minus_2
+ br.ReadBits(3); // additional_frame_id_legnth_minus_1
+ }
+ }
+
+ br.ReadBit(); // use_128x128_superblock
+ br.ReadBit(); // enable_filter_intra
+ br.ReadBit(); // enable_intra_edge_filter
+
+ if (reducedStillPicture) {
+ // enable_interintra_compound = 0
+ // enable_masked_compound = 0
+ // enable_warped_motion = 0
+ // enable_dual_filter = 0
+ // enable_order_hint = 0
+ // enable_jnt_comp = 0
+ // enable_ref_frame_mvs = 0
+ // seq_force_screen_content_tools = SELECT_SCREEN_CONTENT_TOOLS
+ // seq_force_integer_mv = SELECT_INTEGER_MV
+ // OrderHintBits = 0
+ } else {
+ br.ReadBit(); // enable_interintra_compound
+ br.ReadBit(); // enable_masked_compound
+ br.ReadBit(); // enable_warped_motion
+ br.ReadBit(); // enable_dual_filter
+
+ const bool enableOrderHint = br.ReadBit();
+
+ if (enableOrderHint) {
+ br.ReadBit(); // enable_jnt_comp
+ br.ReadBit(); // enable_ref_frame_mvs
+ }
+
+ uint8_t forceScreenContentTools;
+
+ if (br.ReadBit()) { // seq_choose_screen_content_tools
+ forceScreenContentTools = 2; // SELECT_SCREEN_CONTENT_TOOLS
+ } else {
+ forceScreenContentTools = br.ReadBits(1);
+ }
+
+ if (forceScreenContentTools > 0) {
+ if (!br.ReadBit()) { // seq_choose_integer_mv
+ br.ReadBit(); // seq_force_integer_mv
+ }
+ }
+
+ if (enableOrderHint) {
+ br.ReadBits(3); // order_hint_bits_minus_1
+ }
+ }
+
+ br.ReadBit(); // enable_superres
+ br.ReadBit(); // enable_cdef
+ br.ReadBit(); // enable_restoration
+
+ // begin color_config( )
+ // https://aomediacodec.github.io/av1-spec/#color-config-syntax
+ const bool highBitDepth = br.ReadBit();
+ if (tempInfo.mProfile == 2 && highBitDepth) {
+ const bool twelveBit = br.ReadBit();
+ tempInfo.mBitDepth = twelveBit ? 12 : 10;
+ } else {
+ tempInfo.mBitDepth = highBitDepth ? 10 : 8;
+ }
+
+ tempInfo.mMonochrome = tempInfo.mProfile == 1 ? false : br.ReadBit();
+
+ VideoColorSpace* colors = &tempInfo.mColorSpace;
+
+ if (br.ReadBit()) { // color_description_present_flag
+ colors->mPrimaries = static_cast<ColourPrimaries>(br.ReadBits(8));
+ colors->mTransfer = static_cast<TransferCharacteristics>(br.ReadBits(8));
+ colors->mMatrix = static_cast<MatrixCoefficients>(br.ReadBits(8));
+ } else {
+ colors->mPrimaries = ColourPrimaries::CP_UNSPECIFIED;
+ colors->mTransfer = TransferCharacteristics::TC_UNSPECIFIED;
+ colors->mMatrix = MatrixCoefficients::MC_UNSPECIFIED;
+ }
+
+ if (tempInfo.mMonochrome) {
+ colors->mRange = br.ReadBit() ? ColorRange::FULL : ColorRange::LIMITED;
+ tempInfo.mSubsamplingX = true;
+ tempInfo.mSubsamplingY = true;
+ tempInfo.mChromaSamplePosition = ChromaSamplePosition::Unknown;
+ } else if (colors->mPrimaries == ColourPrimaries::CP_BT709 &&
+ colors->mTransfer == TransferCharacteristics::TC_SRGB &&
+ colors->mMatrix == MatrixCoefficients::MC_IDENTITY) {
+ colors->mRange = ColorRange::FULL;
+ tempInfo.mSubsamplingX = false;
+ tempInfo.mSubsamplingY = false;
+ } else {
+ colors->mRange = br.ReadBit() ? ColorRange::FULL : ColorRange::LIMITED;
+ switch (tempInfo.mProfile) {
+ case 0:
+ tempInfo.mSubsamplingX = true;
+ tempInfo.mSubsamplingY = true;
+ break;
+ case 1:
+ tempInfo.mSubsamplingX = false;
+ tempInfo.mSubsamplingY = false;
+ break;
+ case 2:
+ if (tempInfo.mBitDepth == 12) {
+ tempInfo.mSubsamplingX = br.ReadBit();
+ tempInfo.mSubsamplingY =
+ tempInfo.mSubsamplingX ? br.ReadBit() : false;
+ } else {
+ tempInfo.mSubsamplingX = true;
+ tempInfo.mSubsamplingY = false;
+ }
+ break;
+ }
+ tempInfo.mChromaSamplePosition =
+ tempInfo.mSubsamplingX && tempInfo.mSubsamplingY
+ ? static_cast<ChromaSamplePosition>(br.ReadBits(2))
+ : ChromaSamplePosition::Unknown;
+ }
+
+ br.ReadBit(); // separate_uv_delta_q
+ // end color_config( )
+
+ br.ReadBit(); // film_grain_params_present
+ // end sequence_header_obu( )
+
+ // begin trailing_bits( )
+ // https://aomediacodec.github.io/av1-spec/#trailing-bits-syntax
+ if (br.BitsLeft() > 8) {
+ NS_WARNING(
+ "AV1 sequence header finished reading with more than "
+ "a byte of aligning bits, may indicate an error");
+ }
+ // Ensure that data is read correctly by checking trailing bits.
+ bool correct = br.ReadBit();
+ correct &= br.ReadBits(br.BitsLeft() % 8) == 0;
+ while (br.BitsLeft() > 0) {
+ correct &= br.ReadBits(8) == 0;
+ }
+ if (!correct) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ "AV1 sequence header was corrupted");
+ }
+ // end trailing_bits( )
+
+ aDestInfo = tempInfo;
+ return NS_OK;
+}
+
+/* static */
+already_AddRefed<MediaByteBuffer> AOMDecoder::CreateSequenceHeader(
+ const AV1SequenceInfo& aInfo, nsresult& aResult) {
+ aResult = NS_ERROR_FAILURE;
+
+ RefPtr<MediaByteBuffer> seqHdrBuffer = new MediaByteBuffer();
+ BitWriter bw(seqHdrBuffer);
+
+ // See 5.5.1: General sequence header OBU syntax
+ // https://aomediacodec.github.io/av1-spec/#general-sequence-header-obu-syntax
+ bw.WriteBits(aInfo.mProfile, 3);
+ bw.WriteBit(false); // still_picture
+ bw.WriteBit(false); // reduced_still_picture_header
+
+ bw.WriteBit(false); // timing_info_present_flag
+ // if ( timing_info_present_flag ) {...}
+ bw.WriteBit(false); // initial_display_delay_present_flag
+
+ size_t opCount = aInfo.mOperatingPoints.Length();
+ bw.WriteBits(opCount - 1, 5); // operating_points_cnt_minus_1
+ for (size_t i = 0; i < opCount; i++) {
+ OperatingPoint op = aInfo.mOperatingPoints[i];
+ bw.WriteBits(op.mLayers, 12); // operating_point_idc[ i ]
+ bw.WriteBits(op.mLevel, 5);
+ if (op.mLevel > 7) {
+ bw.WriteBits(op.mTier, 1);
+ } else {
+ // seq_tier[ i ] = 0
+ if (op.mTier != 0) {
+ NS_WARNING("Operating points cannot specify tier for levels under 8.");
+ return nullptr;
+ }
+ }
+ // if ( decoder_model_info_present_flag ) {...}
+ // else
+ // decoder_model_info_present_for_this_op[ i ] = 0
+ // if ( initial_display_delay_present_flag ) {...}
+ }
+
+ if (!aInfo.mImage.IsEmpty() <= 0) {
+ NS_WARNING("Sequence header requires a valid image size");
+ return nullptr;
+ }
+ auto getBits = [](int32_t value) {
+ uint8_t bit = 0;
+ do {
+ value >>= 1;
+ bit++;
+ } while (value > 0);
+ return bit;
+ };
+ uint8_t bitsW = getBits(aInfo.mImage.Width());
+ uint8_t bitsH = getBits(aInfo.mImage.Height());
+ bw.WriteBits(bitsW - 1, 4);
+ bw.WriteBits(bitsH - 1, 4);
+ bw.WriteBits(aInfo.mImage.Width() - 1, bitsW);
+ bw.WriteBits(aInfo.mImage.Height() - 1, bitsH);
+
+ // if ( !reduced_still_picture_header )
+ bw.WriteBit(false); // frame_id_numbers_present_flag
+ // if ( frame_id_numbers_present_flag ) {...}
+ // end if ( !reduced_still_picture_header )
+
+ // Values below are derived from a 1080p YouTube AV1 stream.
+ // The values are unused currently for determining the usable
+ // decoder, and are only included to allow successful validation
+ // of the generated sequence header.
+
+ bw.WriteBit(true); // use_128x128_superblock
+ bw.WriteBit(true); // enable_filter_intra
+ bw.WriteBit(true); // enable_intra_edge_filter
+
+ // if ( !reduced_still_picture_header)
+ bw.WriteBit(false); // enable_interintra_compound
+ bw.WriteBit(true); // enable_masked_compound
+ bw.WriteBit(true); // enable_warped_motion
+ bw.WriteBit(false); // enable_dual_filter
+
+ bw.WriteBit(true); // enable_order_hint
+ // if ( enable_order_hint )
+ bw.WriteBit(false); // enable_jnt_comp
+ bw.WriteBit(true); // enable_ref_frame_mvs
+ // end if ( enable_order_hint )
+
+ bw.WriteBit(true); // seq_choose_screen_content_tools
+ // if ( seq_choose_screen_content_tools )
+ // seq_force_screen_content_tools = SELECT_SCREEN_CONTENT_TOOLS (2)
+ // else
+ // seq_force_screen_content_tools = f(1)
+
+ // if ( seq_force_screen_content_tools > 0 )
+ bw.WriteBit(true); // seq_choose_integer_mv
+ // if ( !seq_choose_integer_mv ) {...}
+ // end if ( seq_force_screen_content_tools > 0 )
+
+ // if ( enable_order_hint )
+ bw.WriteBits(6, 3); // order_hint_bits_minus_1
+ // end if ( enable_order_hint )
+ // end if ( !reduced_still_picture_header )
+
+ bw.WriteBit(false); // enable_superres
+ bw.WriteBit(false); // enable_cdef
+ bw.WriteBit(true); // enable_restoration
+
+ // Begin color_config( )
+ // https://aomediacodec.github.io/av1-spec/#color-config-syntax
+ bool highBitDepth = aInfo.mBitDepth >= 10;
+ bw.WriteBit(highBitDepth);
+
+ if (aInfo.mBitDepth == 12 && aInfo.mProfile != 2) {
+ NS_WARNING("Profile must be 2 for 12-bit");
+ return nullptr;
+ }
+ if (aInfo.mProfile == 2 && highBitDepth) {
+ bw.WriteBit(aInfo.mBitDepth == 12); // twelve_bit
+ }
+
+ if (aInfo.mMonochrome && aInfo.mProfile == 1) {
+ NS_WARNING("Profile 1 does not support monochrome");
+ return nullptr;
+ }
+ if (aInfo.mProfile != 1) {
+ bw.WriteBit(aInfo.mMonochrome);
+ }
+
+ const VideoColorSpace colors = aInfo.mColorSpace;
+ bool colorsPresent =
+ colors.mPrimaries != ColourPrimaries::CP_UNSPECIFIED ||
+ colors.mTransfer != TransferCharacteristics::TC_UNSPECIFIED ||
+ colors.mMatrix != MatrixCoefficients::MC_UNSPECIFIED;
+ bw.WriteBit(colorsPresent);
+
+ if (colorsPresent) {
+ bw.WriteBits(static_cast<uint8_t>(colors.mPrimaries), 8);
+ bw.WriteBits(static_cast<uint8_t>(colors.mTransfer), 8);
+ bw.WriteBits(static_cast<uint8_t>(colors.mMatrix), 8);
+ }
+
+ if (aInfo.mMonochrome) {
+ if (!aInfo.mSubsamplingX || !aInfo.mSubsamplingY) {
+ NS_WARNING("Monochrome requires 4:0:0 subsampling");
+ return nullptr;
+ }
+ if (aInfo.mChromaSamplePosition != ChromaSamplePosition::Unknown) {
+ NS_WARNING(
+ "Cannot specify chroma sample position on monochrome sequence");
+ return nullptr;
+ }
+ bw.WriteBit(colors.mRange == ColorRange::FULL);
+ } else if (colors.mPrimaries == ColourPrimaries::CP_BT709 &&
+ colors.mTransfer == TransferCharacteristics::TC_SRGB &&
+ colors.mMatrix == MatrixCoefficients::MC_IDENTITY) {
+ if (aInfo.mSubsamplingX || aInfo.mSubsamplingY ||
+ colors.mRange != ColorRange::FULL ||
+ aInfo.mChromaSamplePosition != ChromaSamplePosition::Unknown) {
+ NS_WARNING("sRGB requires 4:4:4 subsampling with full color range");
+ return nullptr;
+ }
+ } else {
+ bw.WriteBit(colors.mRange == ColorRange::FULL);
+ switch (aInfo.mProfile) {
+ case 0:
+ if (!aInfo.mSubsamplingX || !aInfo.mSubsamplingY) {
+ NS_WARNING("Main Profile requires 4:2:0 subsampling");
+ return nullptr;
+ }
+ break;
+ case 1:
+ if (aInfo.mSubsamplingX || aInfo.mSubsamplingY) {
+ NS_WARNING("High Profile requires 4:4:4 subsampling");
+ return nullptr;
+ }
+ break;
+ case 2:
+ if (aInfo.mBitDepth == 12) {
+ bw.WriteBit(aInfo.mSubsamplingX);
+ if (aInfo.mSubsamplingX) {
+ bw.WriteBit(aInfo.mSubsamplingY);
+ }
+ } else {
+ if (!aInfo.mSubsamplingX || aInfo.mSubsamplingY) {
+ NS_WARNING(
+ "Professional Profile < 12-bit requires 4:2:2 subsampling");
+ return nullptr;
+ }
+ }
+ break;
+ }
+
+ if (aInfo.mSubsamplingX && aInfo.mSubsamplingY) {
+ bw.WriteBits(static_cast<uint8_t>(aInfo.mChromaSamplePosition), 2);
+ } else {
+ if (aInfo.mChromaSamplePosition != ChromaSamplePosition::Unknown) {
+ NS_WARNING("Only 4:2:0 subsampling can specify chroma position");
+ return nullptr;
+ }
+ }
+ }
+
+ bw.WriteBit(false); // separate_uv_delta_q
+ // end color_config( )
+
+ bw.WriteBit(true); // film_grain_params_present
+
+ // trailing_bits( )
+ // https://aomediacodec.github.io/av1-spec/#trailing-bits-syntax
+ size_t numTrailingBits = 8 - (bw.BitCount() % 8);
+ bw.WriteBit(true);
+ bw.WriteBits(0, numTrailingBits - 1);
+ ASSERT_BYTE_ALIGNED(bw);
+
+ Span<const uint8_t> seqHdr(seqHdrBuffer->Elements(), seqHdrBuffer->Length());
+ aResult = NS_OK;
+ return CreateOBU(OBUType::SequenceHeader, seqHdr);
+}
+
+/* static */
+void AOMDecoder::TryReadAV1CBox(const MediaByteBuffer* aBox,
+ AV1SequenceInfo& aDestInfo,
+ MediaResult& aSeqHdrResult) {
+ // See av1C specification:
+ // https://aomediacodec.github.io/av1-isobmff/#av1codecconfigurationbox-section
+ BitReader br(aBox);
+
+ br.ReadBits(8); // marker, version
+
+ aDestInfo.mProfile = br.ReadBits(3);
+
+ OperatingPoint op;
+ op.mLevel = br.ReadBits(5);
+ op.mTier = br.ReadBits(1);
+ aDestInfo.mOperatingPoints.AppendElement(op);
+
+ bool highBitDepth = br.ReadBit();
+ bool twelveBit = br.ReadBit();
+ aDestInfo.mBitDepth = highBitDepth ? twelveBit ? 12 : 10 : 8;
+
+ aDestInfo.mMonochrome = br.ReadBit();
+ aDestInfo.mSubsamplingX = br.ReadBit();
+ aDestInfo.mSubsamplingY = br.ReadBit();
+ aDestInfo.mChromaSamplePosition =
+ static_cast<ChromaSamplePosition>(br.ReadBits(2));
+
+ br.ReadBits(3); // reserved
+ br.ReadBit(); // initial_presentation_delay_present
+ br.ReadBits(4); // initial_presentation_delay_minus_one or reserved
+
+ ASSERT_BYTE_ALIGNED(br);
+
+ size_t skipBytes = br.BitCount() / 8;
+ Span<const uint8_t> obus(aBox->Elements() + skipBytes,
+ aBox->Length() - skipBytes);
+
+ // Minimum possible OBU header size
+ if (obus.Length() < 1) {
+ aSeqHdrResult = NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA;
+ return;
+ }
+
+ // If present, the sequence header will be redundant to some values, but any
+ // values stored in it should be treated as more accurate than av1C.
+ aSeqHdrResult = ReadSequenceHeaderInfo(obus, aDestInfo);
+}
+
+/* static */
+void AOMDecoder::WriteAV1CBox(const AV1SequenceInfo& aInfo,
+ MediaByteBuffer* aDestBox, bool& aHasSeqHdr) {
+ aHasSeqHdr = false;
+
+ BitWriter bw(aDestBox);
+
+ bw.WriteBit(true); // marker
+ bw.WriteBits(1, 7); // version
+
+ bw.WriteBits(aInfo.mProfile, 3);
+
+ MOZ_DIAGNOSTIC_ASSERT(aInfo.mOperatingPoints.Length() > 0);
+ bw.WriteBits(aInfo.mOperatingPoints[0].mLevel, 5);
+ bw.WriteBits(aInfo.mOperatingPoints[0].mTier, 1);
+
+ bw.WriteBit(aInfo.mBitDepth >= 10); // high_bitdepth
+ bw.WriteBit(aInfo.mBitDepth == 12); // twelve_bit
+
+ bw.WriteBit(aInfo.mMonochrome);
+ bw.WriteBit(aInfo.mSubsamplingX);
+ bw.WriteBit(aInfo.mSubsamplingY);
+ bw.WriteBits(static_cast<uint8_t>(aInfo.mChromaSamplePosition), 2);
+
+ bw.WriteBits(0, 3); // reserved
+ bw.WriteBit(false); // initial_presentation_delay_present
+ bw.WriteBits(0, 4); // initial_presentation_delay_minus_one or reserved
+
+ ASSERT_BYTE_ALIGNED(bw);
+
+ nsresult rv;
+ RefPtr<MediaByteBuffer> seqHdrBuffer = CreateSequenceHeader(aInfo, rv);
+
+ if (NS_SUCCEEDED(rv)) {
+ aDestBox->AppendElements(seqHdrBuffer->Elements(), seqHdrBuffer->Length());
+ aHasSeqHdr = true;
+ }
+}
+
+/* static */
+Maybe<AOMDecoder::AV1SequenceInfo> AOMDecoder::CreateSequenceInfoFromCodecs(
+ const nsAString& aCodec) {
+ AV1SequenceInfo info;
+ OperatingPoint op;
+ uint8_t chromaSamplePosition;
+ if (!ExtractAV1CodecDetails(aCodec, info.mProfile, op.mLevel, op.mTier,
+ info.mBitDepth, info.mMonochrome,
+ info.mSubsamplingX, info.mSubsamplingY,
+ chromaSamplePosition, info.mColorSpace)) {
+ return Nothing();
+ }
+ info.mOperatingPoints.AppendElement(op);
+ info.mChromaSamplePosition =
+ static_cast<ChromaSamplePosition>(chromaSamplePosition);
+ return Some(info);
+}
+
+/* static */
+bool AOMDecoder::SetVideoInfo(VideoInfo* aDestInfo, const nsAString& aCodec) {
+ Maybe<AV1SequenceInfo> info = CreateSequenceInfoFromCodecs(aCodec);
+ if (info.isNothing()) {
+ return false;
+ }
+
+ if (!aDestInfo->mImage.IsEmpty()) {
+ info->mImage = aDestInfo->mImage;
+ }
+
+ RefPtr<MediaByteBuffer> extraData = new MediaByteBuffer();
+ bool hasSeqHdr;
+ WriteAV1CBox(info.value(), extraData, hasSeqHdr);
+ aDestInfo->mExtraData = extraData;
+ return true;
+}
+
+} // namespace mozilla
+#undef LOG
+#undef ASSERT_BYTE_ALIGNED
diff --git a/dom/media/platforms/agnostic/AOMDecoder.h b/dom/media/platforms/agnostic/AOMDecoder.h
new file mode 100644
index 0000000000..2d3d7761fa
--- /dev/null
+++ b/dom/media/platforms/agnostic/AOMDecoder.h
@@ -0,0 +1,287 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(AOMDecoder_h_)
+# define AOMDecoder_h_
+
+# include <stdint.h>
+
+# include "PerformanceRecorder.h"
+# include "PlatformDecoderModule.h"
+# include "aom/aom_decoder.h"
+# include "mozilla/Span.h"
+# include "VideoUtils.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(AOMDecoder, MediaDataDecoder);
+
+class AOMDecoder final : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<AOMDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AOMDecoder, final);
+
+ explicit AOMDecoder(const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override {
+ return "av1 libaom video decoder"_ns;
+ }
+ nsCString GetCodecName() const override { return "av1"_ns; }
+
+ // Return true if aMimeType is a one of the strings used
+ // by our demuxers to identify AV1 streams.
+ static bool IsAV1(const nsACString& aMimeType);
+
+ // Return true if a sample is a keyframe.
+ static bool IsKeyframe(Span<const uint8_t> aBuffer);
+
+ // Return the frame dimensions for a sample.
+ static gfx::IntSize GetFrameSize(Span<const uint8_t> aBuffer);
+
+ // obu_type defined at:
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=123
+ enum class OBUType : uint8_t {
+ Reserved = 0,
+ SequenceHeader = 1,
+ TemporalDelimiter = 2,
+ FrameHeader = 3,
+ TileGroup = 4,
+ Metadata = 5,
+ Frame = 6,
+ RedundantFrameHeader = 7,
+ TileList = 8,
+ Padding = 15
+ };
+
+ struct OBUInfo {
+ OBUType mType = OBUType::Reserved;
+ bool mExtensionFlag = false;
+ Span<const uint8_t> mContents;
+
+ bool IsValid() const {
+ switch (mType) {
+ case OBUType::SequenceHeader:
+ case OBUType::TemporalDelimiter:
+ case OBUType::FrameHeader:
+ case OBUType::TileGroup:
+ case OBUType::Metadata:
+ case OBUType::Frame:
+ case OBUType::RedundantFrameHeader:
+ case OBUType::TileList:
+ case OBUType::Padding:
+ return true;
+ default:
+ return false;
+ }
+ }
+ };
+
+ struct OBUIterator {
+ public:
+ explicit OBUIterator(const Span<const uint8_t>& aData)
+ : mData(aData), mPosition(0), mGoNext(true), mResult(NS_OK) {}
+ bool HasNext() {
+ UpdateNext();
+ return !mGoNext;
+ }
+ OBUInfo Next() {
+ UpdateNext();
+ mGoNext = true;
+ return mCurrent;
+ }
+ MediaResult GetResult() const { return mResult; }
+
+ private:
+ const Span<const uint8_t>& mData;
+ size_t mPosition;
+ OBUInfo mCurrent;
+ bool mGoNext;
+ MediaResult mResult;
+
+ // Used to fill mCurrent with the next OBU in the iterator.
+ // mGoNext must be set to false if the next OBU is retrieved,
+ // otherwise it will be true so that HasNext() returns false.
+ // When an invalid OBU is read, the iterator will finish and
+ // mCurrent will be reset to default OBUInfo().
+ void UpdateNext();
+ };
+
+ // Create an iterator to parse Open Bitstream Units from a buffer.
+ static OBUIterator ReadOBUs(const Span<const uint8_t>& aData);
+ // Writes an Open Bitstream Unit header type and the contained subheader.
+ // Extension flag is set to 0 and size field is always present.
+ static already_AddRefed<MediaByteBuffer> CreateOBU(
+ const OBUType aType, const Span<const uint8_t>& aContents);
+
+ // chroma_sample_position defined at:
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=131
+ enum class ChromaSamplePosition : uint8_t {
+ Unknown = 0,
+ Vertical = 1,
+ Colocated = 2,
+ Reserved = 3
+ };
+
+ struct OperatingPoint {
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=125
+ // operating_point_idc[ i ]: A set of bitwise flags determining
+ // the temporal and spatial layers to decode.
+ // A value of 0 indicates that scalability is not being used.
+ uint16_t mLayers = 0;
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=650
+ // See A.3: Levels for a definition of the available levels.
+ uint8_t mLevel = 0;
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=126
+ // seq_tier[ i ]: The tier for the selected operating point.
+ uint8_t mTier = 0;
+
+ bool operator==(const OperatingPoint& aOther) const {
+ return mLayers == aOther.mLayers && mLevel == aOther.mLevel &&
+ mTier == aOther.mTier;
+ }
+ bool operator!=(const OperatingPoint& aOther) const {
+ return !(*this == aOther);
+ }
+ };
+
+ struct AV1SequenceInfo {
+ AV1SequenceInfo() = default;
+
+ AV1SequenceInfo(const AV1SequenceInfo& aOther) { *this = aOther; }
+
+ // Profiles, levels and tiers defined at:
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=650
+ uint8_t mProfile = 0;
+
+ // choose_operating_point( ) defines that the operating points are
+ // specified in order of preference by the encoder. Higher operating
+ // points indices in the header will allow a tradeoff of quality for
+ // performance, dropping some data from the decoding process.
+ // Normally we are only interested in the first operating point.
+ // See: https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=126
+ nsTArray<OperatingPoint> mOperatingPoints = nsTArray<OperatingPoint>(1);
+
+ gfx::IntSize mImage = {0, 0};
+
+ // Color configs explained at:
+ // https://aomediacodec.github.io/av1-spec/av1-spec.pdf#page=129
+ uint8_t mBitDepth = 8;
+ bool mMonochrome = false;
+ bool mSubsamplingX = true;
+ bool mSubsamplingY = true;
+ ChromaSamplePosition mChromaSamplePosition = ChromaSamplePosition::Unknown;
+
+ VideoColorSpace mColorSpace;
+
+ gfx::ColorDepth ColorDepth() const {
+ return gfx::ColorDepthForBitDepth(mBitDepth);
+ }
+
+ bool operator==(const AV1SequenceInfo& aOther) const {
+ if (mProfile != aOther.mProfile || mImage != aOther.mImage ||
+ mBitDepth != aOther.mBitDepth || mMonochrome != aOther.mMonochrome ||
+ mSubsamplingX != aOther.mSubsamplingX ||
+ mSubsamplingY != aOther.mSubsamplingY ||
+ mChromaSamplePosition != aOther.mChromaSamplePosition ||
+ mColorSpace != aOther.mColorSpace) {
+ return false;
+ }
+
+ size_t opCount = mOperatingPoints.Length();
+ if (opCount != aOther.mOperatingPoints.Length()) {
+ return false;
+ }
+ for (size_t i = 0; i < opCount; i++) {
+ if (mOperatingPoints[i] != aOther.mOperatingPoints[i]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+ bool operator!=(const AV1SequenceInfo& aOther) const {
+ return !(*this == aOther);
+ }
+ AV1SequenceInfo& operator=(const AV1SequenceInfo& aOther) {
+ mProfile = aOther.mProfile;
+
+ size_t opCount = aOther.mOperatingPoints.Length();
+ mOperatingPoints.ClearAndRetainStorage();
+ mOperatingPoints.SetCapacity(opCount);
+ for (size_t i = 0; i < opCount; i++) {
+ mOperatingPoints.AppendElement(aOther.mOperatingPoints[i]);
+ }
+
+ mImage = aOther.mImage;
+ mBitDepth = aOther.mBitDepth;
+ mMonochrome = aOther.mMonochrome;
+ mSubsamplingX = aOther.mSubsamplingX;
+ mSubsamplingY = aOther.mSubsamplingY;
+ mChromaSamplePosition = aOther.mChromaSamplePosition;
+ mColorSpace = aOther.mColorSpace;
+ return *this;
+ }
+ };
+
+ // Get a sequence header's info from a sample.
+ // Returns a MediaResult with codes:
+ // NS_OK: Sequence header was successfully found and read.
+ // NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: Sequence header was not present.
+ // Other errors will indicate that the data was corrupt.
+ static MediaResult ReadSequenceHeaderInfo(const Span<const uint8_t>& aSample,
+ AV1SequenceInfo& aDestInfo);
+ // Writes a sequence header OBU to the buffer.
+ static already_AddRefed<MediaByteBuffer> CreateSequenceHeader(
+ const AV1SequenceInfo& aInfo, nsresult& aResult);
+
+ // Reads the raw data of an ISOBMFF-compatible av1 configuration box (av1C),
+ // including any included sequence header.
+ static void TryReadAV1CBox(const MediaByteBuffer* aBox,
+ AV1SequenceInfo& aDestInfo,
+ MediaResult& aSeqHdrResult);
+ // Reads the raw data of an ISOBMFF-compatible av1 configuration box (av1C),
+ // including any included sequence header.
+ // This function should only be called for av1C boxes made by WriteAV1CBox, as
+ // it will assert that the box and its contained OBUs are not corrupted.
+ static void ReadAV1CBox(const MediaByteBuffer* aBox,
+ AV1SequenceInfo& aDestInfo, bool& aHadSeqHdr) {
+ MediaResult seqHdrResult;
+ TryReadAV1CBox(aBox, aDestInfo, seqHdrResult);
+ nsresult code = seqHdrResult.Code();
+ MOZ_ASSERT(code == NS_OK || code == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA);
+ aHadSeqHdr = code == NS_OK;
+ }
+ // Writes an ISOBMFF-compatible av1 configuration box (av1C) to the buffer.
+ static void WriteAV1CBox(const AV1SequenceInfo& aInfo,
+ MediaByteBuffer* aDestBox, bool& aHasSeqHdr);
+
+ // Create sequence info from a MIME codecs string.
+ static Maybe<AV1SequenceInfo> CreateSequenceInfoFromCodecs(
+ const nsAString& aCodec);
+ static bool SetVideoInfo(VideoInfo* aDestInfo, const nsAString& aCodec);
+
+ private:
+ ~AOMDecoder();
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
+
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ const RefPtr<TaskQueue> mTaskQueue;
+
+ // AOM decoder state
+ aom_codec_ctx_t mCodec;
+
+ const VideoInfo mInfo;
+ const Maybe<TrackingId> mTrackingId;
+ PerformanceRecorderMulti<DecodeStage> mPerformanceRecorder;
+};
+
+} // namespace mozilla
+
+#endif // AOMDecoder_h_
diff --git a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
new file mode 100644
index 0000000000..22723a6a04
--- /dev/null
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
@@ -0,0 +1,218 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AgnosticDecoderModule.h"
+
+#include "OpusDecoder.h"
+#include "TheoraDecoder.h"
+#include "VPXDecoder.h"
+#include "VorbisDecoder.h"
+#include "WAVDecoder.h"
+#include "mozilla/Logging.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "VideoUtils.h"
+
+#ifdef MOZ_AV1
+# include "AOMDecoder.h"
+# include "DAV1DDecoder.h"
+#endif
+
+namespace mozilla {
+
+enum class DecoderType {
+#ifdef MOZ_AV1
+ AV1,
+#endif
+ Opus,
+ Theora,
+ Vorbis,
+ VPX,
+ Wave,
+};
+
+static bool IsAvailableInDefault(DecoderType type) {
+ switch (type) {
+#ifdef MOZ_AV1
+ case DecoderType::AV1:
+ return StaticPrefs::media_av1_enabled();
+#endif
+ case DecoderType::Opus:
+ case DecoderType::Theora:
+ case DecoderType::Vorbis:
+ case DecoderType::VPX:
+ case DecoderType::Wave:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool IsAvailableInRdd(DecoderType type) {
+ switch (type) {
+#ifdef MOZ_AV1
+ case DecoderType::AV1:
+ return StaticPrefs::media_av1_enabled();
+#endif
+ case DecoderType::Opus:
+ return StaticPrefs::media_rdd_opus_enabled();
+ case DecoderType::Theora:
+ return StaticPrefs::media_rdd_theora_enabled();
+ case DecoderType::Vorbis:
+#if defined(__MINGW32__)
+ // If this is a MinGW build we need to force AgnosticDecoderModule to
+ // handle the decision to support Vorbis decoding (instead of
+ // RDD/RemoteDecoderModule) because of Bug 1597408 (Vorbis decoding on
+ // RDD causing sandboxing failure on MinGW-clang). Typically this
+ // would be dealt with using defines in StaticPrefList.yaml, but we
+ // must handle it here because of Bug 1598426 (the __MINGW32__ define
+ // isn't supported in StaticPrefList.yaml).
+ return false;
+#else
+ return StaticPrefs::media_rdd_vorbis_enabled();
+#endif
+ case DecoderType::VPX:
+ return StaticPrefs::media_rdd_vpx_enabled();
+ case DecoderType::Wave:
+ return StaticPrefs::media_rdd_wav_enabled();
+ default:
+ return false;
+ }
+}
+
+static bool IsAvailableInUtility(DecoderType type) {
+ switch (type) {
+ case DecoderType::Opus:
+ return StaticPrefs::media_utility_opus_enabled();
+ case DecoderType::Vorbis:
+#if defined(__MINGW32__)
+ // If this is a MinGW build we need to force AgnosticDecoderModule to
+ // handle the decision to support Vorbis decoding (instead of
+ // Utility/RemoteDecoderModule) because of Bug 1597408 (Vorbis decoding on
+ // Utility causing sandboxing failure on MinGW-clang). Typically this
+ // would be dealt with using defines in StaticPrefList.yaml, but we
+ // must handle it here because of Bug 1598426 (the __MINGW32__ define
+ // isn't supported in StaticPrefList.yaml).
+ return false;
+#else
+ return StaticPrefs::media_utility_vorbis_enabled();
+#endif
+ case DecoderType::Wave:
+ return StaticPrefs::media_utility_wav_enabled();
+ case DecoderType::Theora: // Video codecs, dont take care of them
+ case DecoderType::VPX:
+ default:
+ return false;
+ }
+}
+
+// Checks if decoder is available in the current process
+static bool IsAvailable(DecoderType type) {
+ return XRE_IsRDDProcess() ? IsAvailableInRdd(type)
+ : XRE_IsUtilityProcess() ? IsAvailableInUtility(type)
+ : IsAvailableInDefault(type);
+}
+
+media::DecodeSupportSet AgnosticDecoderModule::SupportsMimeType(
+ const nsACString& aMimeType, DecoderDoctorDiagnostics* aDiagnostics) const {
+ UniquePtr<TrackInfo> trackInfo = CreateTrackInfoWithMIMEType(aMimeType);
+ if (!trackInfo) {
+ return media::DecodeSupport::Unsupported;
+ }
+ return Supports(SupportDecoderParams(*trackInfo), aDiagnostics);
+}
+
+media::DecodeSupportSet AgnosticDecoderModule::Supports(
+ const SupportDecoderParams& aParams,
+ DecoderDoctorDiagnostics* aDiagnostics) const {
+ // This should only be supported by MFMediaEngineDecoderModule.
+ if (aParams.mMediaEngineId) {
+ return media::DecodeSupport::Unsupported;
+ }
+
+ const auto& trackInfo = aParams.mConfig;
+ const nsACString& mimeType = trackInfo.mMimeType;
+
+ bool supports =
+#ifdef MOZ_AV1
+ // We remove support for decoding AV1 here if RDD is enabled so that
+ // decoding on the content process doesn't accidentally happen in case
+ // something goes wrong with launching the RDD process.
+ (AOMDecoder::IsAV1(mimeType) && IsAvailable(DecoderType::AV1)) ||
+#endif
+ (VPXDecoder::IsVPX(mimeType) && IsAvailable(DecoderType::VPX)) ||
+ (TheoraDecoder::IsTheora(mimeType) && IsAvailable(DecoderType::Theora)) ||
+ (VorbisDataDecoder::IsVorbis(mimeType) &&
+ IsAvailable(DecoderType::Vorbis)) ||
+ (WaveDataDecoder::IsWave(mimeType) && IsAvailable(DecoderType::Wave)) ||
+ (OpusDataDecoder::IsOpus(mimeType) && IsAvailable(DecoderType::Opus));
+ MOZ_LOG(sPDMLog, LogLevel::Debug,
+ ("Agnostic decoder %s requested type '%s'",
+ supports ? "supports" : "rejects", mimeType.BeginReading()));
+ if (supports) {
+ return media::DecodeSupport::SoftwareDecode;
+ }
+ return media::DecodeSupport::Unsupported;
+}
+
+already_AddRefed<MediaDataDecoder> AgnosticDecoderModule::CreateVideoDecoder(
+ const CreateDecoderParams& aParams) {
+ if (Supports(SupportDecoderParams(aParams), nullptr /* diagnostic */) ==
+ media::DecodeSupport::Unsupported) {
+ return nullptr;
+ }
+ RefPtr<MediaDataDecoder> m;
+
+ if (VPXDecoder::IsVPX(aParams.mConfig.mMimeType)) {
+ m = new VPXDecoder(aParams);
+ }
+#ifdef MOZ_AV1
+ // We remove support for decoding AV1 here if RDD is enabled so that
+ // decoding on the content process doesn't accidentally happen in case
+ // something goes wrong with launching the RDD process.
+ if (StaticPrefs::media_av1_enabled() &&
+ (!StaticPrefs::media_rdd_process_enabled() || XRE_IsRDDProcess()) &&
+ AOMDecoder::IsAV1(aParams.mConfig.mMimeType)) {
+ if (StaticPrefs::media_av1_use_dav1d()) {
+ m = new DAV1DDecoder(aParams);
+ } else {
+ m = new AOMDecoder(aParams);
+ }
+ }
+#endif
+ else if (TheoraDecoder::IsTheora(aParams.mConfig.mMimeType)) {
+ m = new TheoraDecoder(aParams);
+ }
+
+ return m.forget();
+}
+
+already_AddRefed<MediaDataDecoder> AgnosticDecoderModule::CreateAudioDecoder(
+ const CreateDecoderParams& aParams) {
+ if (Supports(SupportDecoderParams(aParams), nullptr /* diagnostic */) ==
+ media::DecodeSupport::Unsupported) {
+ return nullptr;
+ }
+ RefPtr<MediaDataDecoder> m;
+
+ const TrackInfo& config = aParams.mConfig;
+ if (VorbisDataDecoder::IsVorbis(config.mMimeType)) {
+ m = new VorbisDataDecoder(aParams);
+ } else if (OpusDataDecoder::IsOpus(config.mMimeType)) {
+ m = new OpusDataDecoder(aParams);
+ } else if (WaveDataDecoder::IsWave(config.mMimeType)) {
+ m = new WaveDataDecoder(aParams);
+ }
+
+ return m.forget();
+}
+
+/* static */
+already_AddRefed<PlatformDecoderModule> AgnosticDecoderModule::Create() {
+ RefPtr<PlatformDecoderModule> pdm = new AgnosticDecoderModule();
+ return pdm.forget();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/AgnosticDecoderModule.h b/dom/media/platforms/agnostic/AgnosticDecoderModule.h
new file mode 100644
index 0000000000..bac5f4bf42
--- /dev/null
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(AgnosticDecoderModule_h_)
+# define AgnosticDecoderModule_h_
+
+# include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class AgnosticDecoderModule : public PlatformDecoderModule {
+ public:
+ static already_AddRefed<PlatformDecoderModule> Create();
+
+ media::DecodeSupportSet SupportsMimeType(
+ const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+ media::DecodeSupportSet Supports(
+ const SupportDecoderParams& aParams,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ protected:
+ AgnosticDecoderModule() = default;
+ virtual ~AgnosticDecoderModule() = default;
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
+ const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
+ const CreateDecoderParams& aParams) override;
+};
+
+} // namespace mozilla
+
+#endif /* AgnosticDecoderModule_h_ */
diff --git a/dom/media/platforms/agnostic/BlankDecoderModule.cpp b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
new file mode 100644
index 0000000000..fba07338dc
--- /dev/null
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "BlankDecoderModule.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/UniquePtrExtensions.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/gfx/Rect.h"
+#include "mozilla/gfx/Point.h"
+#include "ImageContainer.h"
+#include "MediaData.h"
+#include "MediaInfo.h"
+#include "VideoUtils.h"
+
+namespace mozilla {
+
+BlankVideoDataCreator::BlankVideoDataCreator(
+ uint32_t aFrameWidth, uint32_t aFrameHeight,
+ layers::ImageContainer* aImageContainer)
+ : mFrameWidth(aFrameWidth),
+ mFrameHeight(aFrameHeight),
+ mImageContainer(aImageContainer) {
+ mInfo.mDisplay = gfx::IntSize(mFrameWidth, mFrameHeight);
+ mPicture = gfx::IntRect(0, 0, mFrameWidth, mFrameHeight);
+}
+
+already_AddRefed<MediaData> BlankVideoDataCreator::Create(
+ MediaRawData* aSample) {
+ // Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
+ // with a U and V plane that are half the size of the Y plane, i.e 8 bit,
+ // 2x2 subsampled. Have the data pointer of each frame point to the
+ // first plane, they'll always be zero'd memory anyway.
+ const CheckedUint32 size = CheckedUint32(mFrameWidth) * mFrameHeight;
+ if (!size.isValid()) {
+ // Overflow happened.
+ return nullptr;
+ }
+ auto frame = MakeUniqueFallible<uint8_t[]>(size.value());
+ if (!frame) {
+ return nullptr;
+ }
+ memset(frame.get(), 0, mFrameWidth * mFrameHeight);
+ VideoData::YCbCrBuffer buffer;
+
+ // Y plane.
+ buffer.mPlanes[0].mData = frame.get();
+ buffer.mPlanes[0].mStride = mFrameWidth;
+ buffer.mPlanes[0].mHeight = mFrameHeight;
+ buffer.mPlanes[0].mWidth = mFrameWidth;
+ buffer.mPlanes[0].mSkip = 0;
+
+ // Cb plane.
+ buffer.mPlanes[1].mData = frame.get();
+ buffer.mPlanes[1].mStride = (mFrameWidth + 1) / 2;
+ buffer.mPlanes[1].mHeight = (mFrameHeight + 1) / 2;
+ buffer.mPlanes[1].mWidth = (mFrameWidth + 1) / 2;
+ buffer.mPlanes[1].mSkip = 0;
+
+ // Cr plane.
+ buffer.mPlanes[2].mData = frame.get();
+ buffer.mPlanes[2].mStride = (mFrameWidth + 1) / 2;
+ buffer.mPlanes[2].mHeight = (mFrameHeight + 1) / 2;
+ buffer.mPlanes[2].mWidth = (mFrameWidth + 1) / 2;
+ buffer.mPlanes[2].mSkip = 0;
+
+ buffer.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ buffer.mYUVColorSpace = gfx::YUVColorSpace::BT601;
+ buffer.mColorPrimaries = gfx::ColorSpace2::BT709;
+
+ return VideoData::CreateAndCopyData(mInfo, mImageContainer, aSample->mOffset,
+ aSample->mTime, aSample->mDuration,
+ buffer, aSample->mKeyframe,
+ aSample->mTime, mPicture, nullptr);
+}
+
+BlankAudioDataCreator::BlankAudioDataCreator(uint32_t aChannelCount,
+ uint32_t aSampleRate)
+ : mFrameSum(0), mChannelCount(aChannelCount), mSampleRate(aSampleRate) {}
+
+already_AddRefed<MediaData> BlankAudioDataCreator::Create(
+ MediaRawData* aSample) {
+ // Convert duration to frames. We add 1 to duration to account for
+ // rounding errors, so we get a consistent tone.
+ CheckedInt64 frames =
+ UsecsToFrames(aSample->mDuration.ToMicroseconds() + 1, mSampleRate);
+ if (!frames.isValid() || !mChannelCount || !mSampleRate ||
+ frames.value() > (UINT32_MAX / mChannelCount)) {
+ return nullptr;
+ }
+ AlignedAudioBuffer samples(frames.value() * mChannelCount);
+ if (!samples) {
+ return nullptr;
+ }
+ // Fill the sound buffer with an A4 tone.
+ static const float pi = 3.14159265f;
+ static const float noteHz = 440.0f;
+ for (int i = 0; i < frames.value(); i++) {
+ float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
+ for (unsigned c = 0; c < mChannelCount; c++) {
+ samples[i * mChannelCount + c] = AudioDataValue(f);
+ }
+ mFrameSum++;
+ }
+ RefPtr<AudioData> data(new AudioData(aSample->mOffset, aSample->mTime,
+ std::move(samples), mChannelCount,
+ mSampleRate));
+ return data.forget();
+}
+
+already_AddRefed<MediaDataDecoder> BlankDecoderModule::CreateVideoDecoder(
+ const CreateDecoderParams& aParams) {
+ const VideoInfo& config = aParams.VideoConfig();
+ UniquePtr<DummyDataCreator> creator = MakeUnique<BlankVideoDataCreator>(
+ config.mDisplay.width, config.mDisplay.height, aParams.mImageContainer);
+ RefPtr<MediaDataDecoder> decoder = new DummyMediaDataDecoder(
+ std::move(creator), "blank media data decoder"_ns, aParams);
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder> BlankDecoderModule::CreateAudioDecoder(
+ const CreateDecoderParams& aParams) {
+ const AudioInfo& config = aParams.AudioConfig();
+ UniquePtr<DummyDataCreator> creator =
+ MakeUnique<BlankAudioDataCreator>(config.mChannels, config.mRate);
+ RefPtr<MediaDataDecoder> decoder = new DummyMediaDataDecoder(
+ std::move(creator), "blank media data decoder"_ns, aParams);
+ return decoder.forget();
+}
+
+media::DecodeSupportSet BlankDecoderModule::SupportsMimeType(
+ const nsACString& aMimeType, DecoderDoctorDiagnostics* aDiagnostics) const {
+ return media::DecodeSupport::SoftwareDecode;
+}
+
+/* static */
+already_AddRefed<PlatformDecoderModule> BlankDecoderModule::Create() {
+ return MakeAndAddRef<BlankDecoderModule>();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/BlankDecoderModule.h b/dom/media/platforms/agnostic/BlankDecoderModule.h
new file mode 100644
index 0000000000..65e5d4479e
--- /dev/null
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(BlankDecoderModule_h_)
+# define BlankDecoderModule_h_
+
+# include "DummyMediaDataDecoder.h"
+# include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+namespace layers {
+class ImageContainer;
+}
+
+class MediaData;
+class MediaRawData;
+
+class BlankVideoDataCreator : public DummyDataCreator {
+ public:
+ BlankVideoDataCreator(uint32_t aFrameWidth, uint32_t aFrameHeight,
+ layers::ImageContainer* aImageContainer);
+
+ already_AddRefed<MediaData> Create(MediaRawData* aSample) override;
+
+ private:
+ VideoInfo mInfo;
+ gfx::IntRect mPicture;
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+ RefPtr<layers::ImageContainer> mImageContainer;
+};
+
+class BlankAudioDataCreator : public DummyDataCreator {
+ public:
+ BlankAudioDataCreator(uint32_t aChannelCount, uint32_t aSampleRate);
+
+ already_AddRefed<MediaData> Create(MediaRawData* aSample) override;
+
+ private:
+ int64_t mFrameSum;
+ uint32_t mChannelCount;
+ uint32_t mSampleRate;
+};
+
+class BlankDecoderModule : public PlatformDecoderModule {
+ template <typename T, typename... Args>
+ friend already_AddRefed<T> MakeAndAddRef(Args&&...);
+
+ public:
+ static already_AddRefed<PlatformDecoderModule> Create();
+
+ already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
+ const CreateDecoderParams& aParams) override;
+
+ already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
+ const CreateDecoderParams& aParams) override;
+
+ media::DecodeSupportSet SupportsMimeType(
+ const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+};
+
+} // namespace mozilla
+
+#endif /* BlankDecoderModule_h_ */
diff --git a/dom/media/platforms/agnostic/DAV1DDecoder.cpp b/dom/media/platforms/agnostic/DAV1DDecoder.cpp
new file mode 100644
index 0000000000..7e4af2bd88
--- /dev/null
+++ b/dom/media/platforms/agnostic/DAV1DDecoder.cpp
@@ -0,0 +1,382 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DAV1DDecoder.h"
+
+#include "gfxUtils.h"
+#include "ImageContainer.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "mozilla/TaskQueue.h"
+#include "nsThreadUtils.h"
+#include "PerformanceRecorder.h"
+#include "VideoUtils.h"
+
+#undef LOG
+#define LOG(arg, ...) \
+ DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: " arg, __func__, \
+ ##__VA_ARGS__)
+
+namespace mozilla {
+
+static int GetDecodingThreadCount(uint32_t aCodedHeight) {
+ /**
+ * Based on the result we print out from the dav1decoder [1], the
+ * following information shows the number of tiles for AV1 videos served on
+ * Youtube. Each Tile can be decoded in parallel, so we would like to make
+ * sure we at least use enough threads to match the number of tiles.
+ *
+ * ----------------------------
+ * | resolution row col total |
+ * | 480p 2 1 2 |
+ * | 720p 2 2 4 |
+ * | 1080p 4 2 8 |
+ * | 1440p 4 2 8 |
+ * | 2160p 8 4 32 |
+ * ----------------------------
+ *
+ * Besides the tile thread count, the frame thread count also needs to be
+ * considered. As we didn't find anything about what the best number is for
+ * the count of frame thread, just simply use 2 for parallel jobs, which
+ * is similar with Chromium's implementation. They uses 3 frame threads for
+ * 720p+ but less tile threads, so we will still use more total threads. In
+ * addition, their data is measured on 2019, our data should be closer to the
+ * current real world situation.
+ * [1]
+ * https://searchfox.org/mozilla-central/rev/2f5ed7b7244172d46f538051250b14fb4d8f1a5f/third_party/dav1d/src/decode.c#2940
+ */
+ int tileThreads = 2, frameThreads = 2;
+ if (aCodedHeight >= 2160) {
+ tileThreads = 32;
+ } else if (aCodedHeight >= 1080) {
+ tileThreads = 8;
+ } else if (aCodedHeight >= 720) {
+ tileThreads = 4;
+ }
+ return tileThreads * frameThreads;
+}
+
+DAV1DDecoder::DAV1DDecoder(const CreateDecoderParams& aParams)
+ : mInfo(aParams.VideoConfig()),
+ mTaskQueue(TaskQueue::Create(
+ GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
+ "Dav1dDecoder")),
+ mImageContainer(aParams.mImageContainer),
+ mImageAllocator(aParams.mKnowsCompositor),
+ mTrackingId(aParams.mTrackingId) {}
+
+DAV1DDecoder::~DAV1DDecoder() = default;
+
+RefPtr<MediaDataDecoder::InitPromise> DAV1DDecoder::Init() {
+ Dav1dSettings settings;
+ dav1d_default_settings(&settings);
+ size_t decoder_threads = 2;
+ if (mInfo.mDisplay.width >= 2048) {
+ decoder_threads = 8;
+ } else if (mInfo.mDisplay.width >= 1024) {
+ decoder_threads = 4;
+ }
+ if (StaticPrefs::media_av1_new_thread_count_strategy()) {
+ decoder_threads = GetDecodingThreadCount(mInfo.mImage.Height());
+ }
+ // Still need to consider the amount of physical cores in order to achieve
+ // best performance.
+ settings.n_threads =
+ static_cast<int>(std::min(decoder_threads, GetNumberOfProcessors()));
+ if (int32_t count = StaticPrefs::media_av1_force_thread_count(); count > 0) {
+ settings.n_threads = count;
+ }
+
+ int res = dav1d_open(&mContext, &settings);
+ if (res < 0) {
+ return DAV1DDecoder::InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Couldn't get dAV1d decoder interface.")),
+ __func__);
+ }
+ return DAV1DDecoder::InitPromise::CreateAndResolve(TrackInfo::kVideoTrack,
+ __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> DAV1DDecoder::Decode(
+ MediaRawData* aSample) {
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &DAV1DDecoder::InvokeDecode, aSample);
+}
+
+void ReleaseDataBuffer_s(const uint8_t* buf, void* user_data) {
+ MOZ_ASSERT(user_data);
+ MOZ_ASSERT(buf);
+ DAV1DDecoder* d = static_cast<DAV1DDecoder*>(user_data);
+ d->ReleaseDataBuffer(buf);
+}
+
+void DAV1DDecoder::ReleaseDataBuffer(const uint8_t* buf) {
+ // The release callback may be called on a different thread defined by the
+ // third party dav1d execution. In that case post a task into TaskQueue to
+ // ensure that mDecodingBuffers is only ever accessed on the TaskQueue.
+ RefPtr<DAV1DDecoder> self = this;
+ auto releaseBuffer = [self, buf] {
+ MOZ_ASSERT(self->mTaskQueue->IsCurrentThreadIn());
+ DebugOnly<bool> found = self->mDecodingBuffers.Remove(buf);
+ MOZ_ASSERT(found);
+ };
+
+ if (mTaskQueue->IsCurrentThreadIn()) {
+ releaseBuffer();
+ } else {
+ nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
+ "DAV1DDecoder::ReleaseDataBuffer", std::move(releaseBuffer)));
+ MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+ Unused << rv;
+ }
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> DAV1DDecoder::InvokeDecode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(aSample);
+
+ MediaInfoFlag flag = MediaInfoFlag::None;
+ flag |= (aSample->mKeyframe ? MediaInfoFlag::KeyFrame
+ : MediaInfoFlag::NonKeyFrame);
+ flag |= MediaInfoFlag::SoftwareDecoding;
+ flag |= MediaInfoFlag::VIDEO_AV1;
+ mTrackingId.apply([&](const auto& aId) {
+ mPerformanceRecorder.Start(aSample->mTimecode.ToMicroseconds(),
+ "DAV1DDecoder"_ns, aId, flag);
+ });
+
+ // Add the buffer to the hashtable in order to increase
+ // the ref counter and keep it alive. When dav1d does not
+ // need it any more will call it's release callback. Remove
+ // the buffer, in there, to reduce the ref counter and eventually
+ // free it. We need a hashtable and not an array because the
+ // release callback are not coming in the same order that the
+ // buffers have been added in the decoder (threading ordering
+ // inside decoder)
+ mDecodingBuffers.InsertOrUpdate(aSample->Data(), RefPtr{aSample});
+ Dav1dData data;
+ int res = dav1d_data_wrap(&data, aSample->Data(), aSample->Size(),
+ ReleaseDataBuffer_s, this);
+ data.m.timestamp = aSample->mTimecode.ToMicroseconds();
+ data.m.duration = aSample->mDuration.ToMicroseconds();
+ data.m.offset = aSample->mOffset;
+
+ if (res < 0) {
+ LOG("Create decoder data error.");
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ }
+ DecodedData results;
+ do {
+ res = dav1d_send_data(mContext, &data);
+ if (res < 0 && res != -EAGAIN) {
+ LOG("Decode error: %d", res);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__), __func__);
+ }
+ // Alway consume the whole buffer on success.
+ // At this point only -EAGAIN error is expected.
+ MOZ_ASSERT((res == 0 && !data.sz) ||
+ (res == -EAGAIN && data.sz == aSample->Size()));
+
+ MediaResult rs(NS_OK);
+ res = GetPicture(results, rs);
+ if (res < 0) {
+ if (res == -EAGAIN) {
+ // No frames ready to return. This is not an
+ // error, in some circumstances, we need to
+ // feed it with a certain amount of frames
+ // before we get a picture.
+ continue;
+ }
+ return DecodePromise::CreateAndReject(rs, __func__);
+ }
+ } while (data.sz > 0);
+
+ return DecodePromise::CreateAndResolve(std::move(results), __func__);
+}
+
+int DAV1DDecoder::GetPicture(DecodedData& aData, MediaResult& aResult) {
+ class Dav1dPictureWrapper {
+ public:
+ Dav1dPicture* operator&() { return &p; }
+ const Dav1dPicture& operator*() const { return p; }
+ ~Dav1dPictureWrapper() { dav1d_picture_unref(&p); }
+
+ private:
+ Dav1dPicture p = Dav1dPicture();
+ };
+ Dav1dPictureWrapper picture;
+
+ int res = dav1d_get_picture(mContext, &picture);
+ if (res < 0) {
+ LOG("Decode error: %d", res);
+ aResult = MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
+ return res;
+ }
+
+ if ((*picture).p.layout == DAV1D_PIXEL_LAYOUT_I400) {
+ return 0;
+ }
+
+ RefPtr<VideoData> v = ConstructImage(*picture);
+ if (!v) {
+ LOG("Image allocation error: %ux%u"
+ " display %ux%u picture %ux%u",
+ (*picture).p.w, (*picture).p.h, mInfo.mDisplay.width,
+ mInfo.mDisplay.height, mInfo.mImage.width, mInfo.mImage.height);
+ aResult = MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ return -1;
+ }
+ aData.AppendElement(std::move(v));
+ return 0;
+}
+
+/* static */
+Maybe<gfx::YUVColorSpace> DAV1DDecoder::GetColorSpace(
+ const Dav1dPicture& aPicture, LazyLogModule& aLogger) {
+ // When returning Nothing(), the caller chooses the appropriate default.
+ if (!aPicture.seq_hdr || !aPicture.seq_hdr->color_description_present) {
+ return Nothing();
+ }
+
+ return gfxUtils::CicpToColorSpace(
+ static_cast<gfx::CICP::MatrixCoefficients>(aPicture.seq_hdr->mtrx),
+ static_cast<gfx::CICP::ColourPrimaries>(aPicture.seq_hdr->pri), aLogger);
+}
+
+/* static */
+Maybe<gfx::ColorSpace2> DAV1DDecoder::GetColorPrimaries(
+ const Dav1dPicture& aPicture, LazyLogModule& aLogger) {
+ // When returning Nothing(), the caller chooses the appropriate default.
+ if (!aPicture.seq_hdr || !aPicture.seq_hdr->color_description_present) {
+ return Nothing();
+ }
+
+ return gfxUtils::CicpToColorPrimaries(
+ static_cast<gfx::CICP::ColourPrimaries>(aPicture.seq_hdr->pri), aLogger);
+}
+
+already_AddRefed<VideoData> DAV1DDecoder::ConstructImage(
+ const Dav1dPicture& aPicture) {
+ VideoData::YCbCrBuffer b;
+ if (aPicture.p.bpc == 10) {
+ b.mColorDepth = gfx::ColorDepth::COLOR_10;
+ } else if (aPicture.p.bpc == 12) {
+ b.mColorDepth = gfx::ColorDepth::COLOR_12;
+ } else {
+ b.mColorDepth = gfx::ColorDepth::COLOR_8;
+ }
+
+ b.mYUVColorSpace =
+ DAV1DDecoder::GetColorSpace(aPicture, sPDMLog)
+ .valueOr(DefaultColorSpace({aPicture.p.w, aPicture.p.h}));
+ b.mColorPrimaries = DAV1DDecoder::GetColorPrimaries(aPicture, sPDMLog)
+ .valueOr(gfx::ColorSpace2::BT709);
+ b.mColorRange = aPicture.seq_hdr->color_range ? gfx::ColorRange::FULL
+ : gfx::ColorRange::LIMITED;
+
+ b.mPlanes[0].mData = static_cast<uint8_t*>(aPicture.data[0]);
+ b.mPlanes[0].mStride = aPicture.stride[0];
+ b.mPlanes[0].mHeight = aPicture.p.h;
+ b.mPlanes[0].mWidth = aPicture.p.w;
+ b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = static_cast<uint8_t*>(aPicture.data[1]);
+ b.mPlanes[1].mStride = aPicture.stride[1];
+ b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = static_cast<uint8_t*>(aPicture.data[2]);
+ b.mPlanes[2].mStride = aPicture.stride[1];
+ b.mPlanes[2].mSkip = 0;
+
+ // https://code.videolan.org/videolan/dav1d/blob/master/tools/output/yuv.c#L67
+ const int ss_ver = aPicture.p.layout == DAV1D_PIXEL_LAYOUT_I420;
+ const int ss_hor = aPicture.p.layout != DAV1D_PIXEL_LAYOUT_I444;
+
+ b.mPlanes[1].mHeight = (aPicture.p.h + ss_ver) >> ss_ver;
+ b.mPlanes[1].mWidth = (aPicture.p.w + ss_hor) >> ss_hor;
+
+ b.mPlanes[2].mHeight = (aPicture.p.h + ss_ver) >> ss_ver;
+ b.mPlanes[2].mWidth = (aPicture.p.w + ss_hor) >> ss_hor;
+
+ if (ss_ver) {
+ b.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ } else if (ss_hor) {
+ b.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH;
+ }
+
+ // Timestamp, duration and offset used here are wrong.
+ // We need to take those values from the decoder. Latest
+ // dav1d version allows for that.
+ media::TimeUnit timecode =
+ media::TimeUnit::FromMicroseconds(aPicture.m.timestamp);
+ media::TimeUnit duration =
+ media::TimeUnit::FromMicroseconds(aPicture.m.duration);
+ int64_t offset = aPicture.m.offset;
+ bool keyframe = aPicture.frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
+
+ mPerformanceRecorder.Record(aPicture.m.timestamp, [&](DecodeStage& aStage) {
+ aStage.SetResolution(aPicture.p.w, aPicture.p.h);
+ auto format = [&]() -> Maybe<DecodeStage::ImageFormat> {
+ switch (aPicture.p.layout) {
+ case DAV1D_PIXEL_LAYOUT_I420:
+ return Some(DecodeStage::YUV420P);
+ case DAV1D_PIXEL_LAYOUT_I422:
+ return Some(DecodeStage::YUV422P);
+ case DAV1D_PIXEL_LAYOUT_I444:
+ return Some(DecodeStage::YUV444P);
+ default:
+ return Nothing();
+ }
+ }();
+ format.apply([&](auto& aFmt) { aStage.SetImageFormat(aFmt); });
+ aStage.SetYUVColorSpace(b.mYUVColorSpace);
+ aStage.SetColorRange(b.mColorRange);
+ aStage.SetColorDepth(b.mColorDepth);
+ });
+
+ return VideoData::CreateAndCopyData(
+ mInfo, mImageContainer, offset, timecode, duration, b, keyframe, timecode,
+ mInfo.ScaledImageRect(aPicture.p.w, aPicture.p.h), mImageAllocator);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> DAV1DDecoder::Drain() {
+ RefPtr<DAV1DDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this] {
+ int res = 0;
+ DecodedData results;
+ do {
+ MediaResult rs(NS_OK);
+ res = GetPicture(results, rs);
+ if (res < 0 && res != -EAGAIN) {
+ return DecodePromise::CreateAndReject(rs, __func__);
+ }
+ } while (res != -EAGAIN);
+ return DecodePromise::CreateAndResolve(std::move(results), __func__);
+ });
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> DAV1DDecoder::Flush() {
+ RefPtr<DAV1DDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [this, self]() {
+ dav1d_flush(self->mContext);
+ mPerformanceRecorder.Record(std::numeric_limits<int64_t>::max());
+ return FlushPromise::CreateAndResolve(true, __func__);
+ });
+}
+
+RefPtr<ShutdownPromise> DAV1DDecoder::Shutdown() {
+ RefPtr<DAV1DDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self]() {
+ dav1d_close(&self->mContext);
+ return self->mTaskQueue->BeginShutdown();
+ });
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/DAV1DDecoder.h b/dom/media/platforms/agnostic/DAV1DDecoder.h
new file mode 100644
index 0000000000..3a7f4d4ee4
--- /dev/null
+++ b/dom/media/platforms/agnostic/DAV1DDecoder.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(DAV1DDecoder_h_)
+# define DAV1DDecoder_h_
+
+# include "PerformanceRecorder.h"
+# include "PlatformDecoderModule.h"
+# include "dav1d/dav1d.h"
+# include "nsRefPtrHashtable.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(DAV1DDecoder, MediaDataDecoder);
+
+typedef nsRefPtrHashtable<nsPtrHashKey<const uint8_t>, MediaRawData>
+ MediaRawDataHashtable;
+
+class DAV1DDecoder final : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<DAV1DDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DAV1DDecoder, final);
+
+ explicit DAV1DDecoder(const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override {
+ return "av1 libdav1d video decoder"_ns;
+ }
+ nsCString GetCodecName() const override { return "av1"_ns; }
+
+ void ReleaseDataBuffer(const uint8_t* buf);
+
+ static Maybe<gfx::YUVColorSpace> GetColorSpace(const Dav1dPicture& aPicture,
+ LazyLogModule& aLogger);
+
+ static Maybe<gfx::ColorSpace2> GetColorPrimaries(const Dav1dPicture& aPicture,
+ LazyLogModule& aLogger);
+
+ private:
+ virtual ~DAV1DDecoder();
+ RefPtr<DecodePromise> InvokeDecode(MediaRawData* aSample);
+ int GetPicture(DecodedData& aData, MediaResult& aResult);
+ already_AddRefed<VideoData> ConstructImage(const Dav1dPicture& aPicture);
+
+ Dav1dContext* mContext = nullptr;
+
+ const VideoInfo mInfo;
+ const RefPtr<TaskQueue> mTaskQueue;
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ const RefPtr<layers::KnowsCompositor> mImageAllocator;
+ const Maybe<TrackingId> mTrackingId;
+ PerformanceRecorderMulti<DecodeStage> mPerformanceRecorder;
+
+ // Keep the buffers alive until dav1d
+ // does not need them any more.
+ MediaRawDataHashtable mDecodingBuffers;
+};
+
+} // namespace mozilla
+
+#endif // DAV1DDecoder_h_
diff --git a/dom/media/platforms/agnostic/DummyMediaDataDecoder.cpp b/dom/media/platforms/agnostic/DummyMediaDataDecoder.cpp
new file mode 100644
index 0000000000..172c7f3dd9
--- /dev/null
+++ b/dom/media/platforms/agnostic/DummyMediaDataDecoder.cpp
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DummyMediaDataDecoder.h"
+#include "AnnexB.h"
+#include "H264.h"
+#include "MP4Decoder.h"
+
+namespace mozilla {
+
+DummyDataCreator::~DummyDataCreator() = default;
+
+DummyMediaDataDecoder::DummyMediaDataDecoder(
+ UniquePtr<DummyDataCreator>&& aCreator, const nsACString& aDescription,
+ const CreateDecoderParams& aParams)
+ : mCreator(std::move(aCreator)),
+ mIsH264(MP4Decoder::IsH264(aParams.mConfig.mMimeType)),
+ mMaxRefFrames(mIsH264 ? H264::HasSPS(aParams.VideoConfig().mExtraData)
+ ? H264::ComputeMaxRefFrames(
+ aParams.VideoConfig().mExtraData)
+ : 16
+ : 0),
+ mType(aParams.mConfig.GetType()),
+ mDescription(aDescription) {}
+
+RefPtr<MediaDataDecoder::InitPromise> DummyMediaDataDecoder::Init() {
+ return InitPromise::CreateAndResolve(mType, __func__);
+}
+
+RefPtr<ShutdownPromise> DummyMediaDataDecoder::Shutdown() {
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> DummyMediaDataDecoder::Decode(
+ MediaRawData* aSample) {
+ RefPtr<MediaData> data = mCreator->Create(aSample);
+
+ if (!data) {
+ return DecodePromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+
+ // Frames come out in DTS order but we need to output them in PTS order.
+ mReorderQueue.Push(std::move(data));
+
+ if (mReorderQueue.Length() > mMaxRefFrames) {
+ return DecodePromise::CreateAndResolve(DecodedData{mReorderQueue.Pop()},
+ __func__);
+ }
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> DummyMediaDataDecoder::Drain() {
+ DecodedData samples;
+ while (!mReorderQueue.IsEmpty()) {
+ samples.AppendElement(mReorderQueue.Pop());
+ }
+ return DecodePromise::CreateAndResolve(std::move(samples), __func__);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> DummyMediaDataDecoder::Flush() {
+ mReorderQueue.Clear();
+ return FlushPromise::CreateAndResolve(true, __func__);
+}
+
+nsCString DummyMediaDataDecoder::GetDescriptionName() const {
+ return "blank media data decoder"_ns;
+}
+
+nsCString DummyMediaDataDecoder::GetCodecName() const { return "unknown"_ns; }
+
+MediaDataDecoder::ConversionRequired DummyMediaDataDecoder::NeedsConversion()
+ const {
+ return mIsH264 ? ConversionRequired::kNeedAVCC
+ : ConversionRequired::kNeedNone;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/DummyMediaDataDecoder.h b/dom/media/platforms/agnostic/DummyMediaDataDecoder.h
new file mode 100644
index 0000000000..562d289bd9
--- /dev/null
+++ b/dom/media/platforms/agnostic/DummyMediaDataDecoder.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(DummyMediaDataDecoder_h_)
+# define DummyMediaDataDecoder_h_
+
+# include "MediaInfo.h"
+# include "mozilla/UniquePtr.h"
+# include "PlatformDecoderModule.h"
+# include "ReorderQueue.h"
+
+namespace mozilla {
+
+class MediaRawData;
+
+class DummyDataCreator {
+ public:
+ virtual ~DummyDataCreator();
+ virtual already_AddRefed<MediaData> Create(MediaRawData* aSample) = 0;
+};
+
+DDLoggedTypeDeclNameAndBase(DummyMediaDataDecoder, MediaDataDecoder);
+
+// Decoder that uses a passed in object's Create function to create Null
+// MediaData objects.
+class DummyMediaDataDecoder final
+ : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<DummyMediaDataDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DummyMediaDataDecoder, final);
+
+ DummyMediaDataDecoder(UniquePtr<DummyDataCreator>&& aCreator,
+ const nsACString& aDescription,
+ const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+
+ RefPtr<ShutdownPromise> Shutdown() override;
+
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+
+ RefPtr<DecodePromise> Drain() override;
+
+ RefPtr<FlushPromise> Flush() override;
+
+ nsCString GetDescriptionName() const override;
+
+ nsCString GetCodecName() const override;
+
+ ConversionRequired NeedsConversion() const override;
+
+ private:
+ ~DummyMediaDataDecoder() = default;
+
+ UniquePtr<DummyDataCreator> mCreator;
+ const bool mIsH264;
+ const uint32_t mMaxRefFrames;
+ ReorderQueue mReorderQueue;
+ TrackInfo::TrackType mType;
+ nsCString mDescription;
+};
+
+} // namespace mozilla
+
+#endif // !defined(DummyMediaDataDecoder_h_)
diff --git a/dom/media/platforms/agnostic/NullDecoderModule.cpp b/dom/media/platforms/agnostic/NullDecoderModule.cpp
new file mode 100644
index 0000000000..2893a4af67
--- /dev/null
+++ b/dom/media/platforms/agnostic/NullDecoderModule.cpp
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DummyMediaDataDecoder.h"
+#include "ImageContainer.h"
+
+namespace mozilla {
+
+class NullVideoDataCreator : public DummyDataCreator {
+ public:
+ NullVideoDataCreator() = default;
+
+ already_AddRefed<MediaData> Create(MediaRawData* aSample) override {
+ // Create a dummy VideoData with an empty image. This gives us something to
+ // send to media streams if necessary.
+ RefPtr<layers::PlanarYCbCrImage> image =
+ new layers::RecyclingPlanarYCbCrImage(new layers::BufferRecycleBin());
+ return VideoData::CreateFromImage(gfx::IntSize(), aSample->mOffset,
+ aSample->mTime, aSample->mDuration, image,
+ aSample->mKeyframe, aSample->mTimecode);
+ }
+};
+
+class NullDecoderModule : public PlatformDecoderModule {
+ public:
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
+ const CreateDecoderParams& aParams) override {
+ UniquePtr<DummyDataCreator> creator = MakeUnique<NullVideoDataCreator>();
+ RefPtr<MediaDataDecoder> decoder = new DummyMediaDataDecoder(
+ std::move(creator), "null media data decoder"_ns, aParams);
+ return decoder.forget();
+ }
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
+ const CreateDecoderParams& aParams) override {
+ MOZ_ASSERT(false, "Audio decoders are unsupported.");
+ return nullptr;
+ }
+
+ media::DecodeSupportSet SupportsMimeType(
+ const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override {
+ return media::DecodeSupport::SoftwareDecode;
+ }
+};
+
+already_AddRefed<PlatformDecoderModule> CreateNullDecoderModule() {
+ RefPtr<PlatformDecoderModule> pdm = new NullDecoderModule();
+ return pdm.forget();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/OpusDecoder.cpp b/dom/media/platforms/agnostic/OpusDecoder.cpp
new file mode 100644
index 0000000000..715fa848dc
--- /dev/null
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -0,0 +1,380 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OpusDecoder.h"
+
+#include <inttypes.h> // For PRId64
+
+#include "OpusParser.h"
+#include "TimeUnits.h"
+#include "VideoUtils.h"
+#include "VorbisDecoder.h" // For VorbisLayout
+#include "VorbisUtils.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+#include "opus/opus.h"
+extern "C" {
+#include "opus/opus_multistream.h"
+}
+
+#define OPUS_DEBUG(arg, ...) \
+ DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: " arg, __func__, \
+ ##__VA_ARGS__)
+
+namespace mozilla {
+
+OpusDataDecoder::OpusDataDecoder(const CreateDecoderParams& aParams)
+ : mInfo(aParams.AudioConfig()),
+ mOpusDecoder(nullptr),
+ mSkip(0),
+ mDecodedHeader(false),
+ mPaddingDiscarded(false),
+ mFrames(0),
+ mChannelMap(AudioConfig::ChannelLayout::UNKNOWN_MAP),
+ mDefaultPlaybackDeviceMono(aParams.mOptions.contains(
+ CreateDecoderParams::Option::DefaultPlaybackDeviceMono)) {}
+
+OpusDataDecoder::~OpusDataDecoder() {
+ if (mOpusDecoder) {
+ opus_multistream_decoder_destroy(mOpusDecoder);
+ mOpusDecoder = nullptr;
+ }
+}
+
+RefPtr<ShutdownPromise> OpusDataDecoder::Shutdown() {
+ // mThread may not be set if Init hasn't been called first.
+ MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+RefPtr<MediaDataDecoder::InitPromise> OpusDataDecoder::Init() {
+ mThread = GetCurrentSerialEventTarget();
+ if (!mInfo.mCodecSpecificConfig.is<OpusCodecSpecificData>()) {
+ MOZ_ASSERT_UNREACHABLE();
+ OPUS_DEBUG("Opus decoder got non-opus codec specific data");
+ return InitPromise::CreateAndReject(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Opus decoder got non-opus codec specific data!")),
+ __func__);
+ }
+ const OpusCodecSpecificData opusCodecSpecificData =
+ mInfo.mCodecSpecificConfig.as<OpusCodecSpecificData>();
+ RefPtr<MediaByteBuffer> opusHeaderBlob =
+ opusCodecSpecificData.mHeadersBinaryBlob;
+ size_t length = opusHeaderBlob->Length();
+ uint8_t* p = opusHeaderBlob->Elements();
+ if (NS_FAILED(DecodeHeader(p, length))) {
+ OPUS_DEBUG("Error decoding header!");
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Error decoding header!")),
+ __func__);
+ }
+
+ MOZ_ASSERT(mMappingTable.Length() >= uint32_t(mOpusParser->mChannels));
+ int r;
+ mOpusDecoder = opus_multistream_decoder_create(
+ mOpusParser->mRate, mOpusParser->mChannels, mOpusParser->mStreams,
+ mOpusParser->mCoupledStreams, mMappingTable.Elements(), &r);
+
+ if (!mOpusDecoder) {
+ OPUS_DEBUG("Error creating decoder!");
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Error creating decoder!")),
+ __func__);
+ }
+
+ // Opus has a special feature for stereo coding where it represent wide
+ // stereo channels by 180-degree out of phase. This improves quality, but
+ // needs to be disabled when the output is downmixed to mono. Playback number
+ // of channels are set in AudioSink, using the same method
+ // `DecideAudioPlaybackChannels()`, and triggers downmix if needed.
+ if (mDefaultPlaybackDeviceMono || DecideAudioPlaybackChannels(mInfo) == 1) {
+ opus_multistream_decoder_ctl(mOpusDecoder,
+ OPUS_SET_PHASE_INVERSION_DISABLED(1));
+ }
+
+ mSkip = mOpusParser->mPreSkip;
+ mPaddingDiscarded = false;
+
+ if (opusCodecSpecificData.mContainerCodecDelayMicroSeconds !=
+ FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate).value()) {
+ NS_WARNING(
+ "Invalid Opus header: container CodecDelay and Opus pre-skip do not "
+ "match!");
+ }
+ OPUS_DEBUG("Opus preskip in extradata: %" PRId64 "us",
+ opusCodecSpecificData.mContainerCodecDelayMicroSeconds);
+
+ if (mInfo.mRate != (uint32_t)mOpusParser->mRate) {
+ NS_WARNING("Invalid Opus header: container and codec rate do not match!");
+ }
+ if (mInfo.mChannels != (uint32_t)mOpusParser->mChannels) {
+ NS_WARNING(
+ "Invalid Opus header: container and codec channels do not match!");
+ }
+
+ return r == OPUS_OK
+ ? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__)
+ : InitPromise::CreateAndReject(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL(
+ "could not create opus multistream decoder!")),
+ __func__);
+}
+
+nsresult OpusDataDecoder::DecodeHeader(const unsigned char* aData,
+ size_t aLength) {
+ MOZ_ASSERT(!mOpusParser);
+ MOZ_ASSERT(!mOpusDecoder);
+ MOZ_ASSERT(!mDecodedHeader);
+ mDecodedHeader = true;
+
+ mOpusParser = MakeUnique<OpusParser>();
+ if (!mOpusParser->DecodeHeader(const_cast<unsigned char*>(aData), aLength)) {
+ return NS_ERROR_FAILURE;
+ }
+ int channels = mOpusParser->mChannels;
+
+ mMappingTable.SetLength(channels);
+ AudioConfig::ChannelLayout vorbisLayout(
+ channels, VorbisDataDecoder::VorbisLayout(channels));
+ if (vorbisLayout.IsValid()) {
+ mChannelMap = vorbisLayout.Map();
+
+ AudioConfig::ChannelLayout smpteLayout(
+ AudioConfig::ChannelLayout::SMPTEDefault(vorbisLayout));
+
+ AutoTArray<uint8_t, 8> map;
+ map.SetLength(channels);
+ if (mOpusParser->mChannelMapping == 1 &&
+ vorbisLayout.MappingTable(smpteLayout, &map)) {
+ for (int i = 0; i < channels; i++) {
+ mMappingTable[i] = mOpusParser->mMappingTable[map[i]];
+ }
+ } else {
+ // Use Opus set channel mapping and return channels as-is.
+ PodCopy(mMappingTable.Elements(), mOpusParser->mMappingTable, channels);
+ }
+ } else {
+ // Create a dummy mapping table so that channel ordering stay the same
+ // during decoding.
+ for (int i = 0; i < channels; i++) {
+ mMappingTable[i] = i;
+ }
+ }
+
+ return NS_OK;
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Decode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ PROCESS_DECODE_LOG(aSample);
+ uint32_t channels = mOpusParser->mChannels;
+
+ if (mPaddingDiscarded) {
+ // Discard padding should be used only on the final packet, so
+ // decoding after a padding discard is invalid.
+ OPUS_DEBUG("Opus error, discard padding on interstitial packet");
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Discard padding on interstitial packet")),
+ __func__);
+ }
+
+ if (!mLastFrameTime ||
+ mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
+ // We are starting a new block.
+ mFrames = 0;
+ mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
+ }
+
+ // Maximum value is 63*2880, so there's no chance of overflow.
+ int frames_number =
+ opus_packet_get_nb_frames(aSample->Data(), aSample->Size());
+ if (frames_number <= 0) {
+ OPUS_DEBUG("Invalid packet header: r=%d length=%zu", frames_number,
+ aSample->Size());
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid packet header: r=%d length=%u",
+ frames_number, uint32_t(aSample->Size()))),
+ __func__);
+ }
+
+ int samples = opus_packet_get_samples_per_frame(
+ aSample->Data(), opus_int32(mOpusParser->mRate));
+
+ // A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
+ CheckedInt32 totalFrames =
+ CheckedInt32(frames_number) * CheckedInt32(samples);
+ if (!totalFrames.isValid()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Frames count overflow")),
+ __func__);
+ }
+
+ int frames = totalFrames.value();
+ if (frames < 120 || frames > 5760) {
+ OPUS_DEBUG("Invalid packet frames: %d", frames);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid packet frames:%d", frames)),
+ __func__);
+ }
+
+ AlignedAudioBuffer buffer(frames * channels);
+ if (!buffer) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ }
+
+ // Decode to the appropriate sample type.
+#ifdef MOZ_SAMPLE_TYPE_FLOAT32
+ int ret = opus_multistream_decode_float(mOpusDecoder, aSample->Data(),
+ aSample->Size(), buffer.get(), frames,
+ false);
+#else
+ int ret =
+ opus_multistream_decode(mOpusDecoder, aSample->Data(), aSample->Size(),
+ buffer.get(), frames, false);
+#endif
+ if (ret < 0) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Opus decoding error:%d", ret)),
+ __func__);
+ }
+ NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
+ auto startTime = aSample->mTime;
+
+ OPUS_DEBUG("Decoding frames: [%lf, %lf]", aSample->mTime.ToSeconds(),
+ aSample->GetEndTime().ToSeconds());
+
+ // Trim the initial frames while the decoder is settling.
+ if (mSkip > 0) {
+ int32_t skipFrames = std::min<int32_t>(mSkip, frames);
+ int32_t keepFrames = frames - skipFrames;
+ OPUS_DEBUG("Opus decoder trimming %d of %d frames", skipFrames, frames);
+ PodMove(buffer.get(), buffer.get() + skipFrames * channels,
+ keepFrames * channels);
+ startTime = startTime + media::TimeUnit(skipFrames, mOpusParser->mRate);
+ frames = keepFrames;
+ mSkip -= skipFrames;
+ aSample->mTime += media::TimeUnit(skipFrames, 48000);
+ aSample->mDuration -= media::TimeUnit(skipFrames, 48000);
+ OPUS_DEBUG("Adjusted frame after trimming pre-roll: [%lf, %lf]",
+ aSample->mTime.ToSeconds(), aSample->GetEndTime().ToSeconds());
+ }
+
+ if (aSample->mDiscardPadding > 0) {
+ OPUS_DEBUG("Opus decoder discarding %u of %d frames",
+ aSample->mDiscardPadding, frames);
+ // Padding discard is only supposed to happen on the final packet.
+ // Record the discard so we can return an error if another packet is
+ // decoded.
+ if (aSample->mDiscardPadding > uint32_t(frames)) {
+ // Discarding more than the entire packet is invalid.
+ OPUS_DEBUG("Opus error, discard padding larger than packet");
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Discard padding larger than packet")),
+ __func__);
+ }
+
+ mPaddingDiscarded = true;
+ frames = frames - aSample->mDiscardPadding;
+ }
+
+ // Apply the header gain if one was specified.
+#ifdef MOZ_SAMPLE_TYPE_FLOAT32
+ if (mOpusParser->mGain != 1.0f) {
+ float gain = mOpusParser->mGain;
+ uint32_t samples = frames * channels;
+ for (uint32_t i = 0; i < samples; i++) {
+ buffer[i] *= gain;
+ }
+ }
+#else
+ if (mOpusParser->mGain_Q16 != 65536) {
+ int64_t gain_Q16 = mOpusParser->mGain_Q16;
+ uint32_t samples = frames * channels;
+ for (uint32_t i = 0; i < samples; i++) {
+ int32_t val = static_cast<int32_t>((gain_Q16 * buffer[i] + 32768) >> 16);
+ buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val));
+ }
+ }
+#endif
+
+ auto duration = media::TimeUnit(frames, mOpusParser->mRate);
+ if (!duration.IsValid()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting WebM audio duration")),
+ __func__);
+ }
+ auto time = startTime -
+ media::TimeUnit(mOpusParser->mPreSkip, mOpusParser->mRate) +
+ media::TimeUnit(mFrames, mOpusParser->mRate);
+ if (!time.IsValid()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow shifting tstamp by codec delay")),
+ __func__);
+ };
+
+ mFrames += frames;
+ mTotalFrames += frames;
+
+ OPUS_DEBUG("Total frames so far: %" PRId64, mTotalFrames);
+
+ if (!frames) {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ }
+
+ // Trim extra allocated frames.
+ buffer.SetLength(frames * channels);
+
+ return DecodePromise::CreateAndResolve(
+ DecodedData{new AudioData(aSample->mOffset, time, std::move(buffer),
+ mOpusParser->mChannels, mOpusParser->mRate,
+ mChannelMap)},
+ __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Drain() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> OpusDataDecoder::Flush() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ if (!mOpusDecoder) {
+ return FlushPromise::CreateAndResolve(true, __func__);
+ }
+
+ MOZ_ASSERT(mOpusDecoder);
+ // Reset the decoder.
+ opus_multistream_decoder_ctl(mOpusDecoder, OPUS_RESET_STATE);
+ mSkip = mOpusParser->mPreSkip;
+ mPaddingDiscarded = false;
+ mLastFrameTime.reset();
+ return FlushPromise::CreateAndResolve(true, __func__);
+}
+
+/* static */
+bool OpusDataDecoder::IsOpus(const nsACString& aMimeType) {
+ return aMimeType.EqualsLiteral("audio/opus");
+}
+
+} // namespace mozilla
+#undef OPUS_DEBUG
diff --git a/dom/media/platforms/agnostic/OpusDecoder.h b/dom/media/platforms/agnostic/OpusDecoder.h
new file mode 100644
index 0000000000..3df3471292
--- /dev/null
+++ b/dom/media/platforms/agnostic/OpusDecoder.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(OpusDecoder_h_)
+# define OpusDecoder_h_
+
+# include "PlatformDecoderModule.h"
+
+# include "mozilla/Maybe.h"
+# include "nsTArray.h"
+
+struct OpusMSDecoder;
+
+namespace mozilla {
+
+class OpusParser;
+
+DDLoggedTypeDeclNameAndBase(OpusDataDecoder, MediaDataDecoder);
+
+class OpusDataDecoder final : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<OpusDataDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OpusDataDecoder, final);
+
+ explicit OpusDataDecoder(const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override {
+ return "opus audio decoder"_ns;
+ }
+ nsCString GetCodecName() const override { return "opus"_ns; }
+
+ // Return true if mimetype is Opus
+ static bool IsOpus(const nsACString& aMimeType);
+
+ private:
+ ~OpusDataDecoder();
+
+ nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
+
+ const AudioInfo mInfo;
+ nsCOMPtr<nsISerialEventTarget> mThread;
+
+ // Opus decoder state
+ UniquePtr<OpusParser> mOpusParser;
+ OpusMSDecoder* mOpusDecoder;
+
+ uint16_t mSkip; // Samples left to trim before playback.
+ bool mDecodedHeader;
+
+ // Opus padding should only be discarded on the final packet. Once this
+ // is set to true, if the reader attempts to decode any further packets it
+ // will raise an error so we can indicate that the file is invalid.
+ bool mPaddingDiscarded;
+ int64_t mFrames;
+ int64_t mTotalFrames = 0;
+ Maybe<int64_t> mLastFrameTime;
+ AutoTArray<uint8_t, 8> mMappingTable;
+ AudioConfig::ChannelLayout::ChannelMap mChannelMap;
+ bool mDefaultPlaybackDeviceMono;
+};
+
+} // namespace mozilla
+#endif
diff --git a/dom/media/platforms/agnostic/TheoraDecoder.cpp b/dom/media/platforms/agnostic/TheoraDecoder.cpp
new file mode 100644
index 0000000000..468eda9014
--- /dev/null
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -0,0 +1,267 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "TheoraDecoder.h"
+
+#include <algorithm>
+
+#include "ImageContainer.h"
+#include "TimeUnits.h"
+#include "XiphExtradata.h"
+#include "gfx2DGlue.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/TaskQueue.h"
+#include "nsError.h"
+#include "PerformanceRecorder.h"
+#include "VideoUtils.h"
+
+#undef LOG
+#define LOG(arg, ...) \
+ DDMOZ_LOG(gMediaDecoderLog, mozilla::LogLevel::Debug, "::%s: " arg, \
+ __func__, ##__VA_ARGS__)
+
+namespace mozilla {
+
+using namespace gfx;
+using namespace layers;
+
+extern LazyLogModule gMediaDecoderLog;
+
+ogg_packet InitTheoraPacket(const unsigned char* aData, size_t aLength,
+ bool aBOS, bool aEOS, int64_t aGranulepos,
+ int64_t aPacketNo) {
+ ogg_packet packet;
+ packet.packet = const_cast<unsigned char*>(aData);
+ packet.bytes = aLength;
+ packet.b_o_s = aBOS;
+ packet.e_o_s = aEOS;
+ packet.granulepos = aGranulepos;
+ packet.packetno = aPacketNo;
+ return packet;
+}
+
+TheoraDecoder::TheoraDecoder(const CreateDecoderParams& aParams)
+ : mImageAllocator(aParams.mKnowsCompositor),
+ mImageContainer(aParams.mImageContainer),
+ mTaskQueue(TaskQueue::Create(
+ GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
+ "TheoraDecoder")),
+ mTheoraInfo{},
+ mTheoraComment{},
+ mTheoraSetupInfo(nullptr),
+ mTheoraDecoderContext(nullptr),
+ mPacketCount(0),
+ mInfo(aParams.VideoConfig()),
+ mTrackingId(aParams.mTrackingId) {
+ MOZ_COUNT_CTOR(TheoraDecoder);
+}
+
+TheoraDecoder::~TheoraDecoder() {
+ MOZ_COUNT_DTOR(TheoraDecoder);
+ th_setup_free(mTheoraSetupInfo);
+ th_comment_clear(&mTheoraComment);
+ th_info_clear(&mTheoraInfo);
+}
+
+RefPtr<ShutdownPromise> TheoraDecoder::Shutdown() {
+ RefPtr<TheoraDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+ if (mTheoraDecoderContext) {
+ th_decode_free(mTheoraDecoderContext);
+ mTheoraDecoderContext = nullptr;
+ }
+ return mTaskQueue->BeginShutdown();
+ });
+}
+
+RefPtr<MediaDataDecoder::InitPromise> TheoraDecoder::Init() {
+ th_comment_init(&mTheoraComment);
+ th_info_init(&mTheoraInfo);
+
+ nsTArray<unsigned char*> headers;
+ nsTArray<size_t> headerLens;
+ if (!XiphExtradataToHeaders(headers, headerLens,
+ mInfo.mCodecSpecificConfig->Elements(),
+ mInfo.mCodecSpecificConfig->Length())) {
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Could not get theora header.")),
+ __func__);
+ }
+ for (size_t i = 0; i < headers.Length(); i++) {
+ if (NS_FAILED(DoDecodeHeader(headers[i], headerLens[i]))) {
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Could not decode theora header.")),
+ __func__);
+ }
+ }
+ if (mPacketCount != 3) {
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Packet count is wrong.")),
+ __func__);
+ }
+
+ mTheoraDecoderContext = th_decode_alloc(&mTheoraInfo, mTheoraSetupInfo);
+ if (mTheoraDecoderContext) {
+ return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+ } else {
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("Could not allocate theora decoder.")),
+ __func__);
+ }
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> TheoraDecoder::Flush() {
+ return InvokeAsync(mTaskQueue, __func__, []() {
+ return FlushPromise::CreateAndResolve(true, __func__);
+ });
+}
+
+nsresult TheoraDecoder::DoDecodeHeader(const unsigned char* aData,
+ size_t aLength) {
+ bool bos = mPacketCount == 0;
+ ogg_packet pkt =
+ InitTheoraPacket(aData, aLength, bos, false, 0, mPacketCount++);
+
+ int r = th_decode_headerin(&mTheoraInfo, &mTheoraComment, &mTheoraSetupInfo,
+ &pkt);
+ return r > 0 ? NS_OK : NS_ERROR_FAILURE;
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> TheoraDecoder::ProcessDecode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ MediaInfoFlag flag = MediaInfoFlag::None;
+ flag |= (aSample->mKeyframe ? MediaInfoFlag::KeyFrame
+ : MediaInfoFlag::NonKeyFrame);
+ flag |= MediaInfoFlag::SoftwareDecoding;
+ flag |= MediaInfoFlag::VIDEO_THEORA;
+ Maybe<PerformanceRecorder<DecodeStage>> rec =
+ mTrackingId.map([&](const auto& aId) {
+ return PerformanceRecorder<DecodeStage>("TheoraDecoder"_ns, aId, flag);
+ });
+
+ const unsigned char* aData = aSample->Data();
+ size_t aLength = aSample->Size();
+
+ bool bos = mPacketCount == 0;
+ ogg_packet pkt =
+ InitTheoraPacket(aData, aLength, bos, false,
+ aSample->mTimecode.ToMicroseconds(), mPacketCount++);
+
+ int ret = th_decode_packetin(mTheoraDecoderContext, &pkt, nullptr);
+ if (ret == 0 || ret == TH_DUPFRAME) {
+ th_ycbcr_buffer ycbcr;
+ th_decode_ycbcr_out(mTheoraDecoderContext, ycbcr);
+
+ int hdec = !(mTheoraInfo.pixel_fmt & 1);
+ int vdec = !(mTheoraInfo.pixel_fmt & 2);
+
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = ycbcr[0].data;
+ b.mPlanes[0].mStride = ycbcr[0].stride;
+ b.mPlanes[0].mHeight = mTheoraInfo.frame_height;
+ b.mPlanes[0].mWidth = mTheoraInfo.frame_width;
+ b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = ycbcr[1].data;
+ b.mPlanes[1].mStride = ycbcr[1].stride;
+ b.mPlanes[1].mHeight = mTheoraInfo.frame_height >> vdec;
+ b.mPlanes[1].mWidth = mTheoraInfo.frame_width >> hdec;
+ b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = ycbcr[2].data;
+ b.mPlanes[2].mStride = ycbcr[2].stride;
+ b.mPlanes[2].mHeight = mTheoraInfo.frame_height >> vdec;
+ b.mPlanes[2].mWidth = mTheoraInfo.frame_width >> hdec;
+ b.mPlanes[2].mSkip = 0;
+
+ if (vdec) {
+ b.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ } else if (hdec) {
+ b.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH;
+ }
+
+ b.mYUVColorSpace =
+ DefaultColorSpace({mTheoraInfo.frame_width, mTheoraInfo.frame_height});
+
+ IntRect pictureArea(mTheoraInfo.pic_x, mTheoraInfo.pic_y,
+ mTheoraInfo.pic_width, mTheoraInfo.pic_height);
+
+ VideoInfo info;
+ info.mDisplay = mInfo.mDisplay;
+ RefPtr<VideoData> v = VideoData::CreateAndCopyData(
+ info, mImageContainer, aSample->mOffset, aSample->mTime,
+ aSample->mDuration, b, aSample->mKeyframe, aSample->mTimecode,
+ mInfo.ScaledImageRect(mTheoraInfo.frame_width,
+ mTheoraInfo.frame_height),
+ mImageAllocator);
+ if (!v) {
+ LOG("Image allocation error source %ux%u display %ux%u picture %ux%u",
+ mTheoraInfo.frame_width, mTheoraInfo.frame_height,
+ mInfo.mDisplay.width, mInfo.mDisplay.height, mInfo.mImage.width,
+ mInfo.mImage.height);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("Insufficient memory")),
+ __func__);
+ }
+
+ rec.apply([&](auto& aRec) {
+ aRec.Record([&](DecodeStage& aStage) {
+ aStage.SetResolution(static_cast<int>(mTheoraInfo.frame_width),
+ static_cast<int>(mTheoraInfo.frame_height));
+ auto format = [&]() -> Maybe<DecodeStage::ImageFormat> {
+ switch (mTheoraInfo.pixel_fmt) {
+ case TH_PF_420:
+ return Some(DecodeStage::YUV420P);
+ case TH_PF_422:
+ return Some(DecodeStage::YUV422P);
+ case TH_PF_444:
+ return Some(DecodeStage::YUV444P);
+ default:
+ return Nothing();
+ }
+ }();
+ format.apply([&](auto& aFmt) { aStage.SetImageFormat(aFmt); });
+ aStage.SetYUVColorSpace(b.mYUVColorSpace);
+ aStage.SetColorRange(b.mColorRange);
+ aStage.SetColorDepth(b.mColorDepth);
+ });
+ });
+
+ return DecodePromise::CreateAndResolve(DecodedData{v}, __func__);
+ }
+ LOG("Theora Decode error: %d", ret);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Theora decode error:%d", ret)),
+ __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> TheoraDecoder::Decode(
+ MediaRawData* aSample) {
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &TheoraDecoder::ProcessDecode, aSample);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> TheoraDecoder::Drain() {
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
+}
+
+/* static */
+bool TheoraDecoder::IsTheora(const nsACString& aMimeType) {
+ return aMimeType.EqualsLiteral("video/theora");
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/TheoraDecoder.h b/dom/media/platforms/agnostic/TheoraDecoder.h
new file mode 100644
index 0000000000..23e994b667
--- /dev/null
+++ b/dom/media/platforms/agnostic/TheoraDecoder.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(TheoraDecoder_h_)
+# define TheoraDecoder_h_
+
+# include <stdint.h>
+
+# include "PlatformDecoderModule.h"
+# include "ogg/ogg.h"
+# include "theora/theoradec.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(TheoraDecoder, MediaDataDecoder);
+
+class TheoraDecoder final : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<TheoraDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TheoraDecoder, final);
+
+ explicit TheoraDecoder(const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+
+ // Return true if mimetype is a Theora codec
+ static bool IsTheora(const nsACString& aMimeType);
+
+ nsCString GetDescriptionName() const override {
+ return "theora video decoder"_ns;
+ }
+
+ nsCString GetCodecName() const override { return "theora"_ns; }
+
+ private:
+ ~TheoraDecoder();
+ nsresult DoDecodeHeader(const unsigned char* aData, size_t aLength);
+
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
+
+ const RefPtr<layers::KnowsCompositor> mImageAllocator;
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ const RefPtr<TaskQueue> mTaskQueue;
+
+ // Theora header & decoder state
+ th_info mTheoraInfo;
+ th_comment mTheoraComment;
+ th_setup_info* mTheoraSetupInfo;
+ th_dec_ctx* mTheoraDecoderContext;
+ int mPacketCount;
+
+ const VideoInfo mInfo;
+ const Maybe<TrackingId> mTrackingId;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/agnostic/VPXDecoder.cpp b/dom/media/platforms/agnostic/VPXDecoder.cpp
new file mode 100644
index 0000000000..0c4d8234f5
--- /dev/null
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -0,0 +1,676 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VPXDecoder.h"
+
+#include <algorithm>
+
+#include "BitReader.h"
+#include "BitWriter.h"
+#include "ImageContainer.h"
+#include "TimeUnits.h"
+#include "gfx2DGlue.h"
+#include "gfxUtils.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/TaskQueue.h"
+#include "mozilla/Unused.h"
+#include "nsError.h"
+#include "PerformanceRecorder.h"
+#include "prsystem.h"
+#include "VideoUtils.h"
+#include "vpx/vpx_image.h"
+
+#undef LOG
+#define LOG(arg, ...) \
+ DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: " arg, __func__, \
+ ##__VA_ARGS__)
+
+namespace mozilla {
+
+using namespace gfx;
+using namespace layers;
+
+static VPXDecoder::Codec MimeTypeToCodec(const nsACString& aMimeType) {
+ if (aMimeType.EqualsLiteral("video/vp8")) {
+ return VPXDecoder::Codec::VP8;
+ } else if (aMimeType.EqualsLiteral("video/vp9")) {
+ return VPXDecoder::Codec::VP9;
+ }
+ return VPXDecoder::Codec::Unknown;
+}
+
+static nsresult InitContext(vpx_codec_ctx_t* aCtx, const VideoInfo& aInfo,
+ const VPXDecoder::Codec aCodec, bool aLowLatency) {
+ int decode_threads = 2;
+
+ vpx_codec_iface_t* dx = nullptr;
+ if (aCodec == VPXDecoder::Codec::VP8) {
+ dx = vpx_codec_vp8_dx();
+ } else if (aCodec == VPXDecoder::Codec::VP9) {
+ dx = vpx_codec_vp9_dx();
+ if (aInfo.mDisplay.width >= 2048) {
+ decode_threads = 8;
+ } else if (aInfo.mDisplay.width >= 1024) {
+ decode_threads = 4;
+ }
+ }
+ decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors());
+
+ vpx_codec_dec_cfg_t config;
+ config.threads = aLowLatency ? 1 : decode_threads;
+ config.w = config.h = 0; // set after decode
+
+ if (!dx || vpx_codec_dec_init(aCtx, dx, &config, 0)) {
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
+ : mImageContainer(aParams.mImageContainer),
+ mImageAllocator(aParams.mKnowsCompositor),
+ mTaskQueue(TaskQueue::Create(
+ GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER), "VPXDecoder")),
+ mInfo(aParams.VideoConfig()),
+ mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType)),
+ mLowLatency(
+ aParams.mOptions.contains(CreateDecoderParams::Option::LowLatency)),
+ mTrackingId(aParams.mTrackingId) {
+ MOZ_COUNT_CTOR(VPXDecoder);
+ PodZero(&mVPX);
+ PodZero(&mVPXAlpha);
+}
+
+VPXDecoder::~VPXDecoder() { MOZ_COUNT_DTOR(VPXDecoder); }
+
+RefPtr<ShutdownPromise> VPXDecoder::Shutdown() {
+ RefPtr<VPXDecoder> self = this;
+ return InvokeAsync(mTaskQueue, __func__, [self]() {
+ vpx_codec_destroy(&self->mVPX);
+ vpx_codec_destroy(&self->mVPXAlpha);
+ return self->mTaskQueue->BeginShutdown();
+ });
+}
+
+RefPtr<MediaDataDecoder::InitPromise> VPXDecoder::Init() {
+ if (NS_FAILED(InitContext(&mVPX, mInfo, mCodec, mLowLatency))) {
+ return VPXDecoder::InitPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ if (mInfo.HasAlpha()) {
+ if (NS_FAILED(InitContext(&mVPXAlpha, mInfo, mCodec, mLowLatency))) {
+ return VPXDecoder::InitPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ }
+ return VPXDecoder::InitPromise::CreateAndResolve(TrackInfo::kVideoTrack,
+ __func__);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> VPXDecoder::Flush() {
+ return InvokeAsync(mTaskQueue, __func__, []() {
+ return FlushPromise::CreateAndResolve(true, __func__);
+ });
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> VPXDecoder::ProcessDecode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
+
+ MediaInfoFlag flag = MediaInfoFlag::None;
+ flag |= (aSample->mKeyframe ? MediaInfoFlag::KeyFrame
+ : MediaInfoFlag::NonKeyFrame);
+ flag |= MediaInfoFlag::SoftwareDecoding;
+ switch (mCodec) {
+ case Codec::VP8:
+ flag |= MediaInfoFlag::VIDEO_VP8;
+ break;
+ case Codec::VP9:
+ flag |= MediaInfoFlag::VIDEO_VP9;
+ break;
+ default:
+ break;
+ }
+ flag |= MediaInfoFlag::VIDEO_THEORA;
+ auto rec = mTrackingId.map([&](const auto& aId) {
+ return PerformanceRecorder<DecodeStage>("VPXDecoder"_ns, aId, flag);
+ });
+
+ if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(),
+ aSample->Size(), nullptr, 0)) {
+ LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r))),
+ __func__);
+ }
+
+ vpx_codec_iter_t iter = nullptr;
+ vpx_image_t* img;
+ vpx_image_t* img_alpha = nullptr;
+ bool alpha_decoded = false;
+ DecodedData results;
+
+ while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
+ NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 || img->fmt == VPX_IMG_FMT_I444,
+ "WebM image format not I420 or I444");
+ NS_ASSERTION(!alpha_decoded,
+ "Multiple frames per packet that contains alpha");
+
+ if (aSample->AlphaSize() > 0) {
+ if (!alpha_decoded) {
+ MediaResult rv = DecodeAlpha(&img_alpha, aSample);
+ if (NS_FAILED(rv)) {
+ return DecodePromise::CreateAndReject(rv, __func__);
+ }
+ alpha_decoded = true;
+ }
+ }
+ // Chroma shifts are rounded down as per the decoding examples in the SDK
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = img->planes[0];
+ b.mPlanes[0].mStride = img->stride[0];
+ b.mPlanes[0].mHeight = img->d_h;
+ b.mPlanes[0].mWidth = img->d_w;
+ b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = img->planes[1];
+ b.mPlanes[1].mStride = img->stride[1];
+ b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = img->planes[2];
+ b.mPlanes[2].mStride = img->stride[2];
+ b.mPlanes[2].mSkip = 0;
+
+ if (img->fmt == VPX_IMG_FMT_I420) {
+ b.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+
+ b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+
+ b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+ } else if (img->fmt == VPX_IMG_FMT_I444) {
+ b.mPlanes[1].mHeight = img->d_h;
+ b.mPlanes[1].mWidth = img->d_w;
+
+ b.mPlanes[2].mHeight = img->d_h;
+ b.mPlanes[2].mWidth = img->d_w;
+ } else {
+ LOG("VPX Unknown image format");
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VPX Unknown image format")),
+ __func__);
+ }
+ b.mYUVColorSpace = [&]() {
+ switch (img->cs) {
+ case VPX_CS_BT_601:
+ case VPX_CS_SMPTE_170:
+ case VPX_CS_SMPTE_240:
+ return gfx::YUVColorSpace::BT601;
+ case VPX_CS_BT_709:
+ return gfx::YUVColorSpace::BT709;
+ case VPX_CS_BT_2020:
+ return gfx::YUVColorSpace::BT2020;
+ default:
+ return DefaultColorSpace({img->d_w, img->d_h});
+ }
+ }();
+ b.mColorRange = img->range == VPX_CR_FULL_RANGE ? gfx::ColorRange::FULL
+ : gfx::ColorRange::LIMITED;
+
+ RefPtr<VideoData> v;
+ if (!img_alpha) {
+ v = VideoData::CreateAndCopyData(
+ mInfo, mImageContainer, aSample->mOffset, aSample->mTime,
+ aSample->mDuration, b, aSample->mKeyframe, aSample->mTimecode,
+ mInfo.ScaledImageRect(img->d_w, img->d_h), mImageAllocator);
+ } else {
+ VideoData::YCbCrBuffer::Plane alpha_plane;
+ alpha_plane.mData = img_alpha->planes[0];
+ alpha_plane.mStride = img_alpha->stride[0];
+ alpha_plane.mHeight = img_alpha->d_h;
+ alpha_plane.mWidth = img_alpha->d_w;
+ alpha_plane.mSkip = 0;
+ v = VideoData::CreateAndCopyData(
+ mInfo, mImageContainer, aSample->mOffset, aSample->mTime,
+ aSample->mDuration, b, alpha_plane, aSample->mKeyframe,
+ aSample->mTimecode, mInfo.ScaledImageRect(img->d_w, img->d_h));
+ }
+
+ if (!v) {
+ LOG("Image allocation error source %ux%u display %ux%u picture %ux%u",
+ img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
+ mInfo.mImage.width, mInfo.mImage.height);
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ }
+
+ rec.apply([&](auto& aRec) {
+ return aRec.Record([&](DecodeStage& aStage) {
+ aStage.SetResolution(static_cast<int>(img->d_w),
+ static_cast<int>(img->d_h));
+ auto format = [&]() -> Maybe<DecodeStage::ImageFormat> {
+ switch (img->fmt) {
+ case VPX_IMG_FMT_I420:
+ return Some(DecodeStage::YUV420P);
+ case VPX_IMG_FMT_I444:
+ return Some(DecodeStage::YUV444P);
+ default:
+ return Nothing();
+ }
+ }();
+ format.apply([&](auto& aFmt) { aStage.SetImageFormat(aFmt); });
+ aStage.SetYUVColorSpace(b.mYUVColorSpace);
+ aStage.SetColorRange(b.mColorRange);
+ aStage.SetColorDepth(b.mColorDepth);
+ });
+ });
+
+ results.AppendElement(std::move(v));
+ }
+ return DecodePromise::CreateAndResolve(std::move(results), __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> VPXDecoder::Decode(
+ MediaRawData* aSample) {
+ return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
+ &VPXDecoder::ProcessDecode, aSample);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> VPXDecoder::Drain() {
+ return InvokeAsync(mTaskQueue, __func__, [] {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ });
+}
+
+MediaResult VPXDecoder::DecodeAlpha(vpx_image_t** aImgAlpha,
+ const MediaRawData* aSample) {
+ vpx_codec_err_t r = vpx_codec_decode(&mVPXAlpha, aSample->AlphaData(),
+ aSample->AlphaSize(), nullptr, 0);
+ if (r) {
+ LOG("VPX decode alpha error: %s", vpx_codec_err_to_string(r));
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VPX decode alpha error: %s",
+ vpx_codec_err_to_string(r)));
+ }
+
+ vpx_codec_iter_t iter = nullptr;
+
+ *aImgAlpha = vpx_codec_get_frame(&mVPXAlpha, &iter);
+ NS_ASSERTION((*aImgAlpha)->fmt == VPX_IMG_FMT_I420 ||
+ (*aImgAlpha)->fmt == VPX_IMG_FMT_I444,
+ "WebM image format not I420 or I444");
+
+ return NS_OK;
+}
+
+nsCString VPXDecoder::GetCodecName() const {
+ switch (mCodec) {
+ case Codec::VP8:
+ return "vp8"_ns;
+ case Codec::VP9:
+ return "vp9"_ns;
+ default:
+ return "unknown"_ns;
+ }
+}
+
+/* static */
+bool VPXDecoder::IsVPX(const nsACString& aMimeType, uint8_t aCodecMask) {
+ return ((aCodecMask & VPXDecoder::VP8) &&
+ aMimeType.EqualsLiteral("video/vp8")) ||
+ ((aCodecMask & VPXDecoder::VP9) &&
+ aMimeType.EqualsLiteral("video/vp9"));
+}
+
+/* static */
+bool VPXDecoder::IsVP8(const nsACString& aMimeType) {
+ return IsVPX(aMimeType, VPXDecoder::VP8);
+}
+
+/* static */
+bool VPXDecoder::IsVP9(const nsACString& aMimeType) {
+ return IsVPX(aMimeType, VPXDecoder::VP9);
+}
+
+/* static */
+bool VPXDecoder::IsKeyframe(Span<const uint8_t> aBuffer, Codec aCodec) {
+ VPXStreamInfo info;
+ return GetStreamInfo(aBuffer, info, aCodec) && info.mKeyFrame;
+}
+
+/* static */
+gfx::IntSize VPXDecoder::GetFrameSize(Span<const uint8_t> aBuffer,
+ Codec aCodec) {
+ VPXStreamInfo info;
+ if (!GetStreamInfo(aBuffer, info, aCodec)) {
+ return gfx::IntSize();
+ }
+ return info.mImage;
+}
+
+/* static */
+gfx::IntSize VPXDecoder::GetDisplaySize(Span<const uint8_t> aBuffer,
+ Codec aCodec) {
+ VPXStreamInfo info;
+ if (!GetStreamInfo(aBuffer, info, aCodec)) {
+ return gfx::IntSize();
+ }
+ return info.mDisplay;
+}
+
+/* static */
+int VPXDecoder::GetVP9Profile(Span<const uint8_t> aBuffer) {
+ VPXStreamInfo info;
+ if (!GetStreamInfo(aBuffer, info, Codec::VP9)) {
+ return -1;
+ }
+ return info.mProfile;
+}
+
+/* static */
+bool VPXDecoder::GetStreamInfo(Span<const uint8_t> aBuffer,
+ VPXDecoder::VPXStreamInfo& aInfo, Codec aCodec) {
+ if (aBuffer.IsEmpty()) {
+ // Can't be good.
+ return false;
+ }
+
+ aInfo = VPXStreamInfo();
+
+ if (aCodec == Codec::VP8) {
+ aInfo.mKeyFrame = (aBuffer[0] & 1) ==
+ 0; // frame type (0 for key frames, 1 for interframes)
+ if (!aInfo.mKeyFrame) {
+ // We can't retrieve the required information from interframes.
+ return true;
+ }
+ if (aBuffer.Length() < 10) {
+ return false;
+ }
+ uint8_t version = (aBuffer[0] >> 1) & 0x7;
+ if (version > 3) {
+ return false;
+ }
+ uint8_t start_code_byte_0 = aBuffer[3];
+ uint8_t start_code_byte_1 = aBuffer[4];
+ uint8_t start_code_byte_2 = aBuffer[5];
+ if (start_code_byte_0 != 0x9d || start_code_byte_1 != 0x01 ||
+ start_code_byte_2 != 0x2a) {
+ return false;
+ }
+ uint16_t width = (aBuffer[6] | aBuffer[7] << 8) & 0x3fff;
+ uint16_t height = (aBuffer[8] | aBuffer[9] << 8) & 0x3fff;
+
+ // aspect ratio isn't found in the VP8 frame header.
+ aInfo.mImage = gfx::IntSize(width, height);
+ aInfo.mDisplayAndImageDifferent = false;
+ aInfo.mDisplay = aInfo.mImage;
+ return true;
+ }
+
+ BitReader br(aBuffer.Elements(), aBuffer.Length() * 8);
+ uint32_t frameMarker = br.ReadBits(2); // frame_marker
+ if (frameMarker != 2) {
+ // That's not a valid vp9 header.
+ return false;
+ }
+ uint32_t profile = br.ReadBits(1); // profile_low_bit
+ profile |= br.ReadBits(1) << 1; // profile_high_bit
+ if (profile == 3) {
+ profile += br.ReadBits(1); // reserved_zero
+ if (profile > 3) {
+ // reserved_zero wasn't zero.
+ return false;
+ }
+ }
+
+ aInfo.mProfile = profile;
+
+ bool show_existing_frame = br.ReadBits(1);
+ if (show_existing_frame) {
+ if (profile == 3 && aBuffer.Length() < 2) {
+ return false;
+ }
+ Unused << br.ReadBits(3); // frame_to_show_map_idx
+ return true;
+ }
+
+ if (aBuffer.Length() < 10) {
+ // Header too small;
+ return false;
+ }
+
+ aInfo.mKeyFrame = !br.ReadBits(1);
+ bool show_frame = br.ReadBits(1);
+ bool error_resilient_mode = br.ReadBits(1);
+
+ auto frame_sync_code = [&]() -> bool {
+ uint8_t frame_sync_byte_1 = br.ReadBits(8);
+ uint8_t frame_sync_byte_2 = br.ReadBits(8);
+ uint8_t frame_sync_byte_3 = br.ReadBits(8);
+ return frame_sync_byte_1 == 0x49 && frame_sync_byte_2 == 0x83 &&
+ frame_sync_byte_3 == 0x42;
+ };
+
+ auto color_config = [&]() -> bool {
+ aInfo.mBitDepth = 8;
+ if (profile >= 2) {
+ bool ten_or_twelve_bit = br.ReadBits(1);
+ aInfo.mBitDepth = ten_or_twelve_bit ? 12 : 10;
+ }
+ aInfo.mColorSpace = br.ReadBits(3);
+ if (aInfo.mColorSpace != 7 /* CS_RGB */) {
+ aInfo.mFullRange = br.ReadBits(1);
+ if (profile == 1 || profile == 3) {
+ aInfo.mSubSampling_x = br.ReadBits(1);
+ aInfo.mSubSampling_y = br.ReadBits(1);
+ if (br.ReadBits(1)) { // reserved_zero
+ return false;
+ };
+ } else {
+ aInfo.mSubSampling_x = true;
+ aInfo.mSubSampling_y = true;
+ }
+ } else {
+ aInfo.mFullRange = true;
+ if (profile == 1 || profile == 3) {
+ aInfo.mSubSampling_x = false;
+ aInfo.mSubSampling_y = false;
+ if (br.ReadBits(1)) { // reserved_zero
+ return false;
+ };
+ } else {
+ // sRGB color space is only available with VP9 profile 1.
+ return false;
+ }
+ }
+ return true;
+ };
+
+ auto frame_size = [&]() {
+ int32_t width = static_cast<int32_t>(br.ReadBits(16)) + 1;
+ int32_t height = static_cast<int32_t>(br.ReadBits(16)) + 1;
+ aInfo.mImage = gfx::IntSize(width, height);
+ };
+
+ auto render_size = [&]() {
+ // render_and_frame_size_different
+ aInfo.mDisplayAndImageDifferent = br.ReadBits(1);
+ if (aInfo.mDisplayAndImageDifferent) {
+ int32_t width = static_cast<int32_t>(br.ReadBits(16)) + 1;
+ int32_t height = static_cast<int32_t>(br.ReadBits(16)) + 1;
+ aInfo.mDisplay = gfx::IntSize(width, height);
+ } else {
+ aInfo.mDisplay = aInfo.mImage;
+ }
+ };
+
+ if (aInfo.mKeyFrame) {
+ if (!frame_sync_code()) {
+ return false;
+ }
+ if (!color_config()) {
+ return false;
+ }
+ frame_size();
+ render_size();
+ } else {
+ bool intra_only = show_frame ? false : br.ReadBit();
+ if (!error_resilient_mode) {
+ Unused << br.ReadBits(2); // reset_frame_context
+ }
+ if (intra_only) {
+ if (!frame_sync_code()) {
+ return false;
+ }
+ if (profile > 0) {
+ if (!color_config()) {
+ return false;
+ }
+ } else {
+ aInfo.mColorSpace = 1; // CS_BT_601
+ aInfo.mSubSampling_x = true;
+ aInfo.mSubSampling_y = true;
+ aInfo.mBitDepth = 8;
+ }
+ Unused << br.ReadBits(8); // refresh_frame_flags
+ frame_size();
+ render_size();
+ }
+ }
+ return true;
+}
+
+// Ref: "VP Codec ISO Media File Format Binding, v1.0, 2017-03-31"
+// <https://www.webmproject.org/vp9/mp4/>
+//
+// class VPCodecConfigurationBox extends FullBox('vpcC', version = 1, 0)
+// {
+// VPCodecConfigurationRecord() vpcConfig;
+// }
+//
+// aligned (8) class VPCodecConfigurationRecord {
+// unsigned int (8) profile;
+// unsigned int (8) level;
+// unsigned int (4) bitDepth;
+// unsigned int (3) chromaSubsampling;
+// unsigned int (1) videoFullRangeFlag;
+// unsigned int (8) colourPrimaries;
+// unsigned int (8) transferCharacteristics;
+// unsigned int (8) matrixCoefficients;
+// unsigned int (16) codecIntializationDataSize;
+// unsigned int (8)[] codecIntializationData;
+// }
+
+/* static */
+void VPXDecoder::GetVPCCBox(MediaByteBuffer* aDestBox,
+ const VPXStreamInfo& aInfo) {
+ BitWriter writer(aDestBox);
+
+ int chroma = [&]() {
+ if (aInfo.mSubSampling_x && aInfo.mSubSampling_y) {
+ return 1; // 420 Colocated;
+ }
+ if (aInfo.mSubSampling_x && !aInfo.mSubSampling_y) {
+ return 2; // 422
+ }
+ if (!aInfo.mSubSampling_x && !aInfo.mSubSampling_y) {
+ return 3; // 444
+ }
+ // This indicates 4:4:0 subsampling, which is not expressable in the
+ // 'vpcC' box. Default to 4:2:0.
+ return 1;
+ }();
+
+ writer.WriteU8(1); // version
+ writer.WriteBits(0, 24); // flags
+
+ writer.WriteU8(aInfo.mProfile); // profile
+ writer.WriteU8(10); // level set it to 1.0
+
+ writer.WriteBits(aInfo.mBitDepth, 4); // bitdepth
+ writer.WriteBits(chroma, 3); // chroma
+ writer.WriteBit(aInfo.mFullRange); // full/restricted range
+
+ // See VPXDecoder::VPXStreamInfo enums
+ writer.WriteU8(aInfo.mColorPrimaries); // color primaries
+ writer.WriteU8(aInfo.mTransferFunction); // transfer characteristics
+ writer.WriteU8(2); // matrix coefficients: unspecified
+
+ writer.WriteBits(0,
+ 16); // codecIntializationDataSize (must be 0 for VP8/VP9)
+}
+
+/* static */
+bool VPXDecoder::SetVideoInfo(VideoInfo* aDestInfo, const nsAString& aCodec) {
+ VPXDecoder::VPXStreamInfo info;
+ uint8_t level = 0;
+ uint8_t chroma = 1;
+ VideoColorSpace colorSpace;
+ if (!ExtractVPXCodecDetails(aCodec, info.mProfile, level, info.mBitDepth,
+ chroma, colorSpace)) {
+ return false;
+ }
+
+ aDestInfo->mColorPrimaries =
+ gfxUtils::CicpToColorPrimaries(colorSpace.mPrimaries, sPDMLog);
+ aDestInfo->mTransferFunction =
+ gfxUtils::CicpToTransferFunction(colorSpace.mTransfer);
+ aDestInfo->mColorDepth = gfx::ColorDepthForBitDepth(info.mBitDepth);
+ VPXDecoder::SetChroma(info, chroma);
+ info.mFullRange = colorSpace.mRange == ColorRange::FULL;
+ RefPtr<MediaByteBuffer> extraData = new MediaByteBuffer();
+ VPXDecoder::GetVPCCBox(extraData, info);
+ aDestInfo->mExtraData = extraData;
+ return true;
+}
+
+/* static */
+void VPXDecoder::SetChroma(VPXStreamInfo& aDestInfo, uint8_t chroma) {
+ switch (chroma) {
+ case 0:
+ case 1:
+ aDestInfo.mSubSampling_x = true;
+ aDestInfo.mSubSampling_y = true;
+ break;
+ case 2:
+ aDestInfo.mSubSampling_x = true;
+ aDestInfo.mSubSampling_y = false;
+ break;
+ case 3:
+ aDestInfo.mSubSampling_x = false;
+ aDestInfo.mSubSampling_y = false;
+ break;
+ }
+}
+
+/* static */
+void VPXDecoder::ReadVPCCBox(VPXStreamInfo& aDestInfo, MediaByteBuffer* aBox) {
+ BitReader reader(aBox);
+
+ reader.ReadBits(8); // version
+ reader.ReadBits(24); // flags
+ aDestInfo.mProfile = reader.ReadBits(8);
+ reader.ReadBits(8); // level
+
+ aDestInfo.mBitDepth = reader.ReadBits(4);
+ SetChroma(aDestInfo, reader.ReadBits(3));
+ aDestInfo.mFullRange = reader.ReadBit();
+
+ aDestInfo.mColorPrimaries = reader.ReadBits(8); // color primaries
+ aDestInfo.mTransferFunction = reader.ReadBits(8); // transfer characteristics
+ reader.ReadBits(8); // matrix coefficients
+
+ MOZ_ASSERT(reader.ReadBits(16) ==
+ 0); // codecInitializationDataSize (must be 0 for VP8/VP9)
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/VPXDecoder.h b/dom/media/platforms/agnostic/VPXDecoder.h
new file mode 100644
index 0000000000..e5fe80128f
--- /dev/null
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(VPXDecoder_h_)
+# define VPXDecoder_h_
+
+# include <stdint.h>
+
+# include "PlatformDecoderModule.h"
+# include "mozilla/Span.h"
+# include "mozilla/gfx/Types.h"
+# include "vpx/vp8dx.h"
+# include "vpx/vpx_codec.h"
+# include "vpx/vpx_decoder.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(VPXDecoder, MediaDataDecoder);
+
+class VPXDecoder final : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<VPXDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VPXDecoder, final);
+
+ explicit VPXDecoder(const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override {
+ return "libvpx video decoder"_ns;
+ }
+ nsCString GetCodecName() const override;
+
+ enum Codec : uint8_t {
+ VP8 = 1 << 0,
+ VP9 = 1 << 1,
+ Unknown = 1 << 7,
+ };
+
+ // Return true if aMimeType is a one of the strings used by our demuxers to
+ // identify VPX of the specified type. Does not parse general content type
+ // strings, i.e. white space matters.
+ static bool IsVPX(const nsACString& aMimeType,
+ uint8_t aCodecMask = VP8 | VP9);
+ static bool IsVP8(const nsACString& aMimeType);
+ static bool IsVP9(const nsACString& aMimeType);
+
+ // Return true if a sample is a keyframe for the specified codec.
+ static bool IsKeyframe(Span<const uint8_t> aBuffer, Codec aCodec);
+
+ // Return the frame dimensions for a sample for the specified codec.
+ static gfx::IntSize GetFrameSize(Span<const uint8_t> aBuffer, Codec aCodec);
+ // Return the display dimensions for a sample for the specified codec.
+ static gfx::IntSize GetDisplaySize(Span<const uint8_t> aBuffer, Codec aCodec);
+
+ // Return the VP9 profile as per https://www.webmproject.org/vp9/profiles/
+ // Return negative value if error.
+ static int GetVP9Profile(Span<const uint8_t> aBuffer);
+
+ struct VPXStreamInfo {
+ gfx::IntSize mImage;
+ bool mDisplayAndImageDifferent = false;
+ gfx::IntSize mDisplay;
+ bool mKeyFrame = false;
+
+ uint8_t mProfile = 0;
+ uint8_t mBitDepth = 8;
+ /*
+ 0 CS_UNKNOWN Unknown (in this case the color space must be signaled outside
+ the VP9 bitstream).
+ 1 CS_BT_601 Rec. ITU-R BT.601-7
+ 2 CS_BT_709 Rec. ITU-R BT.709-6
+ 3 CS_SMPTE_170 SMPTE-170
+ 4 CS_SMPTE_240 SMPTE-240
+ 5 CS_BT_2020 Rec. ITU-R BT.2020-2
+ 6 CS_RESERVED Reserved
+ 7 CS_RGB sRGB (IEC 61966-2-1)
+ */
+ int mColorSpace = 1; // CS_BT_601
+
+ gfx::YUVColorSpace ColorSpace() const {
+ switch (mColorSpace) {
+ case 1:
+ case 3:
+ case 4:
+ return gfx::YUVColorSpace::BT601;
+ case 2:
+ return gfx::YUVColorSpace::BT709;
+ case 5:
+ return gfx::YUVColorSpace::BT2020;
+ default:
+ return gfx::YUVColorSpace::Default;
+ }
+ }
+
+ uint8_t mColorPrimaries = gfx::CICP::ColourPrimaries::CP_UNSPECIFIED;
+ gfx::ColorSpace2 ColorPrimaries() const {
+ switch (mColorPrimaries) {
+ case gfx::CICP::ColourPrimaries::CP_BT709:
+ return gfx::ColorSpace2::BT709;
+ case gfx::CICP::ColourPrimaries::CP_UNSPECIFIED:
+ return gfx::ColorSpace2::BT709;
+ case gfx::CICP::ColourPrimaries::CP_BT2020:
+ return gfx::ColorSpace2::BT2020;
+ default:
+ return gfx::ColorSpace2::BT709;
+ }
+ }
+
+ uint8_t mTransferFunction =
+ gfx::CICP::TransferCharacteristics::TC_UNSPECIFIED;
+ gfx::TransferFunction TransferFunction() const {
+ switch (mTransferFunction) {
+ case gfx::CICP::TransferCharacteristics::TC_BT709:
+ return gfx::TransferFunction::BT709;
+ case gfx::CICP::TransferCharacteristics::TC_SRGB:
+ return gfx::TransferFunction::SRGB;
+ case gfx::CICP::TransferCharacteristics::TC_SMPTE2084:
+ return gfx::TransferFunction::PQ;
+ case gfx::CICP::TransferCharacteristics::TC_HLG:
+ return gfx::TransferFunction::HLG;
+ default:
+ return gfx::TransferFunction::BT709;
+ }
+ }
+
+ /*
+ mFullRange == false then:
+ For BitDepth equals 8:
+ Y is between 16 and 235 inclusive.
+ U and V are between 16 and 240 inclusive.
+ For BitDepth equals 10:
+ Y is between 64 and 940 inclusive.
+ U and V are between 64 and 960 inclusive.
+ For BitDepth equals 12:
+ Y is between 256 and 3760.
+ U and V are between 256 and 3840 inclusive.
+ mFullRange == true then:
+ No restriction on Y, U, V values.
+ */
+ bool mFullRange = false;
+
+ gfx::ColorRange ColorRange() const {
+ return mFullRange ? gfx::ColorRange::FULL : gfx::ColorRange::LIMITED;
+ }
+
+ /*
+ Sub-sampling, used only for non sRGB colorspace.
+ subsampling_x subsampling_y Description
+ 0 0 YUV 4:4:4
+ 0 1 YUV 4:4:0
+ 1 0 YUV 4:2:2
+ 1 1 YUV 4:2:0
+ */
+ bool mSubSampling_x = true;
+ bool mSubSampling_y = true;
+
+ bool IsCompatible(const VPXStreamInfo& aOther) const {
+ return mImage == aOther.mImage && mProfile == aOther.mProfile &&
+ mBitDepth == aOther.mBitDepth &&
+ mSubSampling_x == aOther.mSubSampling_x &&
+ mSubSampling_y == aOther.mSubSampling_y &&
+ mColorSpace == aOther.mColorSpace &&
+ mFullRange == aOther.mFullRange;
+ }
+ };
+
+ static bool GetStreamInfo(Span<const uint8_t> aBuffer, VPXStreamInfo& aInfo,
+ Codec aCodec);
+
+ static void GetVPCCBox(MediaByteBuffer* aDestBox, const VPXStreamInfo& aInfo);
+ // Set extradata for a VP8/VP9 track, returning false if the codec was
+ // invalid.
+ static bool SetVideoInfo(VideoInfo* aDestInfo, const nsAString& aCodec);
+
+ static void SetChroma(VPXStreamInfo& aDestInfo, uint8_t chroma);
+ static void ReadVPCCBox(VPXStreamInfo& aDestInfo, MediaByteBuffer* aBox);
+
+ private:
+ ~VPXDecoder();
+ RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
+ MediaResult DecodeAlpha(vpx_image_t** aImgAlpha, const MediaRawData* aSample);
+
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ RefPtr<layers::KnowsCompositor> mImageAllocator;
+ const RefPtr<TaskQueue> mTaskQueue;
+
+ // VPx decoder state
+ vpx_codec_ctx_t mVPX;
+
+ // VPx alpha decoder state
+ vpx_codec_ctx_t mVPXAlpha;
+
+ const VideoInfo mInfo;
+
+ const Codec mCodec;
+ const bool mLowLatency;
+ const Maybe<TrackingId> mTrackingId;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/agnostic/VorbisDecoder.cpp b/dom/media/platforms/agnostic/VorbisDecoder.cpp
new file mode 100644
index 0000000000..01c0e8dbe5
--- /dev/null
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -0,0 +1,364 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VorbisDecoder.h"
+
+#include "VideoUtils.h"
+#include "VorbisUtils.h"
+#include "XiphExtradata.h"
+#include "mozilla/Logging.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+
+#undef LOG
+#define LOG(type, msg) MOZ_LOG(sPDMLog, type, msg)
+
+namespace mozilla {
+
+ogg_packet InitVorbisPacket(const unsigned char* aData, size_t aLength,
+ bool aBOS, bool aEOS, int64_t aGranulepos,
+ int64_t aPacketNo) {
+ ogg_packet packet;
+ packet.packet = const_cast<unsigned char*>(aData);
+ packet.bytes = aLength;
+ packet.b_o_s = aBOS;
+ packet.e_o_s = aEOS;
+ packet.granulepos = aGranulepos;
+ packet.packetno = aPacketNo;
+ return packet;
+}
+
+VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
+ : mInfo(aParams.AudioConfig()), mPacketCount(0), mFrames(0) {
+ // Zero these member vars to avoid crashes in Vorbis clear functions when
+ // destructor is called before |Init|.
+ PodZero(&mVorbisBlock);
+ PodZero(&mVorbisDsp);
+ PodZero(&mVorbisInfo);
+ PodZero(&mVorbisComment);
+}
+
+VorbisDataDecoder::~VorbisDataDecoder() {
+ vorbis_block_clear(&mVorbisBlock);
+ vorbis_dsp_clear(&mVorbisDsp);
+ vorbis_info_clear(&mVorbisInfo);
+ vorbis_comment_clear(&mVorbisComment);
+}
+
+RefPtr<ShutdownPromise> VorbisDataDecoder::Shutdown() {
+ // mThread may not be set if Init hasn't been called first.
+ MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+RefPtr<MediaDataDecoder::InitPromise> VorbisDataDecoder::Init() {
+ mThread = GetCurrentSerialEventTarget();
+ vorbis_info_init(&mVorbisInfo);
+ vorbis_comment_init(&mVorbisComment);
+ PodZero(&mVorbisDsp);
+ PodZero(&mVorbisBlock);
+
+ AutoTArray<unsigned char*, 4> headers;
+ AutoTArray<size_t, 4> headerLens;
+ MOZ_ASSERT(mInfo.mCodecSpecificConfig.is<VorbisCodecSpecificData>(),
+ "Vorbis decoder should get vorbis codec specific data");
+ RefPtr<MediaByteBuffer> vorbisHeaderBlob =
+ GetAudioCodecSpecificBlob(mInfo.mCodecSpecificConfig);
+ if (!XiphExtradataToHeaders(headers, headerLens, vorbisHeaderBlob->Elements(),
+ vorbisHeaderBlob->Length())) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: could not get vorbis header"));
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Could not get vorbis header.")),
+ __func__);
+ }
+ for (size_t i = 0; i < headers.Length(); i++) {
+ if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
+ LOG(LogLevel::Warning,
+ ("VorbisDecoder: could not get decode vorbis header"));
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Could not decode vorbis header.")),
+ __func__);
+ }
+ }
+
+ MOZ_ASSERT(mPacketCount == 3);
+
+ int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
+ if (r) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: could not init vorbis decoder"));
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Systhesis init fail.")),
+ __func__);
+ }
+
+ r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
+ if (r) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: could not init vorbis block"));
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Block init fail.")),
+ __func__);
+ }
+
+ if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: Invalid Vorbis header: container "
+ "and codec rate do not match!"));
+ }
+ if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: Invalid Vorbis header: container "
+ "and codec channels do not match!"));
+ }
+
+ AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
+ if (!layout.IsValid()) {
+ LOG(LogLevel::Warning,
+ ("VorbisDecoder: Invalid Vorbis header: invalid channel layout!"));
+ return InitPromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Invalid audio layout.")),
+ __func__);
+ }
+
+ return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
+}
+
+nsresult VorbisDataDecoder::DecodeHeader(const unsigned char* aData,
+ size_t aLength) {
+ bool bos = mPacketCount == 0;
+ ogg_packet pkt =
+ InitVorbisPacket(aData, aLength, bos, false, 0, mPacketCount++);
+ MOZ_ASSERT(mPacketCount <= 3);
+
+ int r = vorbis_synthesis_headerin(&mVorbisInfo, &mVorbisComment, &pkt);
+ return r == 0 ? NS_OK : NS_ERROR_FAILURE;
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Decode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ PROCESS_DECODE_LOG(aSample);
+
+ const unsigned char* aData = aSample->Data();
+ size_t aLength = aSample->Size();
+ int64_t aOffset = aSample->mOffset;
+
+ MOZ_ASSERT(mPacketCount >= 3);
+
+ if (!mLastFrameTime ||
+ mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
+ // We are starting a new block.
+ mFrames = 0;
+ mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
+ }
+
+ ogg_packet pkt =
+ InitVorbisPacket(aData, aLength, false, aSample->mEOS,
+ aSample->mTimecode.ToMicroseconds(), mPacketCount++);
+
+ int err = vorbis_synthesis(&mVorbisBlock, &pkt);
+ if (err) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis:%d", err)),
+ __func__);
+ LOG(LogLevel::Warning, ("vorbis_synthesis returned an error"));
+ }
+
+ err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
+ if (err) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)),
+ __func__);
+ LOG(LogLevel::Warning, ("vorbis_synthesis_blockin returned an error"));
+ }
+
+ VorbisPCMValue** pcm = 0;
+ int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
+ if (frames == 0) {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ }
+
+ DecodedData results;
+ while (frames > 0) {
+ uint32_t channels = mVorbisDsp.vi->channels;
+ uint32_t rate = mVorbisDsp.vi->rate;
+ AlignedAudioBuffer buffer(frames * channels);
+ if (!buffer) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: cannot allocate buffer"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ }
+ for (uint32_t j = 0; j < channels; ++j) {
+ VorbisPCMValue* channel = pcm[j];
+ for (uint32_t i = 0; i < uint32_t(frames); ++i) {
+ buffer[i * channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
+ }
+ }
+
+ auto duration = media::TimeUnit(frames, rate);
+ if (!duration.IsValid()) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: invalid packet duration"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting audio duration")),
+ __func__);
+ }
+ auto total_duration = media::TimeUnit(mFrames, rate);
+ if (!total_duration.IsValid()) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: invalid total duration"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting audio total_duration")),
+ __func__);
+ }
+
+ auto time = total_duration + aSample->mTime;
+ if (!time.IsValid()) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: invalid sample time"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL(
+ "Overflow adding total_duration and aSample->mTime")),
+ __func__);
+ };
+
+ if (!mAudioConverter) {
+ const AudioConfig::ChannelLayout layout =
+ AudioConfig::ChannelLayout(channels, VorbisLayout(channels));
+ AudioConfig in(layout, channels, rate);
+ AudioConfig out(AudioConfig::ChannelLayout::SMPTEDefault(layout),
+ channels, rate);
+ mAudioConverter = MakeUnique<AudioConverter>(in, out);
+ }
+ MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
+ AudioSampleBuffer data(std::move(buffer));
+ data = mAudioConverter->Process(std::move(data));
+
+ RefPtr<AudioData> audio =
+ new AudioData(aOffset, time, data.Forget(), channels, rate,
+ mAudioConverter->OutputConfig().Layout().Map());
+ MOZ_DIAGNOSTIC_ASSERT(duration == audio->mDuration, "must be equal");
+ results.AppendElement(std::move(audio));
+ mFrames += frames;
+ err = vorbis_synthesis_read(&mVorbisDsp, frames);
+ if (err) {
+ LOG(LogLevel::Warning, ("VorbisDecoder: vorbis_synthesis_read"));
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
+ __func__);
+ }
+
+ frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
+ }
+ return DecodePromise::CreateAndResolve(std::move(results), __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Drain() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> VorbisDataDecoder::Flush() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ // Ignore failed results from vorbis_synthesis_restart. They
+ // aren't fatal and it fails when ResetDecode is called at a
+ // time when no vorbis data has been read.
+ vorbis_synthesis_restart(&mVorbisDsp);
+ mLastFrameTime.reset();
+ return FlushPromise::CreateAndResolve(true, __func__);
+}
+
+/* static */
+bool VorbisDataDecoder::IsVorbis(const nsACString& aMimeType) {
+ return aMimeType.EqualsLiteral("audio/vorbis");
+}
+
+/* static */
+const AudioConfig::Channel* VorbisDataDecoder::VorbisLayout(
+ uint32_t aChannels) {
+ // From https://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
+ // Section 4.3.9.
+ typedef AudioConfig::Channel Channel;
+
+ switch (aChannels) {
+ case 1: // the stream is monophonic
+ {
+ static const Channel config[] = {AudioConfig::CHANNEL_FRONT_CENTER};
+ return config;
+ }
+ case 2: // the stream is stereo. channel order: left, right
+ {
+ static const Channel config[] = {AudioConfig::CHANNEL_FRONT_LEFT,
+ AudioConfig::CHANNEL_FRONT_RIGHT};
+ return config;
+ }
+ case 3: // the stream is a 1d-surround encoding. channel order: left,
+ // center, right
+ {
+ static const Channel config[] = {AudioConfig::CHANNEL_FRONT_LEFT,
+ AudioConfig::CHANNEL_FRONT_CENTER,
+ AudioConfig::CHANNEL_FRONT_RIGHT};
+ return config;
+ }
+ case 4: // the stream is quadraphonic surround. channel order: front left,
+ // front right, rear left, rear right
+ {
+ static const Channel config[] = {
+ AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_RIGHT,
+ AudioConfig::CHANNEL_BACK_LEFT, AudioConfig::CHANNEL_BACK_RIGHT};
+ return config;
+ }
+ case 5: // the stream is five-channel surround. channel order: front left,
+ // center, front right, rear left, rear right
+ {
+ static const Channel config[] = {
+ AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
+ AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_BACK_LEFT,
+ AudioConfig::CHANNEL_BACK_RIGHT};
+ return config;
+ }
+ case 6: // the stream is 5.1 surround. channel order: front left, center,
+ // front right, rear left, rear right, LFE
+ {
+ static const Channel config[] = {
+ AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
+ AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_BACK_LEFT,
+ AudioConfig::CHANNEL_BACK_RIGHT, AudioConfig::CHANNEL_LFE};
+ return config;
+ }
+ case 7: // surround. channel order: front left, center, front right, side
+ // left, side right, rear center, LFE
+ {
+ static const Channel config[] = {
+ AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
+ AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_SIDE_LEFT,
+ AudioConfig::CHANNEL_SIDE_RIGHT, AudioConfig::CHANNEL_BACK_CENTER,
+ AudioConfig::CHANNEL_LFE};
+ return config;
+ }
+ case 8: // the stream is 7.1 surround. channel order: front left, center,
+ // front right, side left, side right, rear left, rear right, LFE
+ {
+ static const Channel config[] = {
+ AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
+ AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_SIDE_LEFT,
+ AudioConfig::CHANNEL_SIDE_RIGHT, AudioConfig::CHANNEL_BACK_LEFT,
+ AudioConfig::CHANNEL_BACK_RIGHT, AudioConfig::CHANNEL_LFE};
+ return config;
+ }
+ default:
+ return nullptr;
+ }
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/VorbisDecoder.h b/dom/media/platforms/agnostic/VorbisDecoder.h
new file mode 100644
index 0000000000..ce61ccdcad
--- /dev/null
+++ b/dom/media/platforms/agnostic/VorbisDecoder.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(VorbisDecoder_h_)
+# define VorbisDecoder_h_
+
+# include "AudioConverter.h"
+# include "PlatformDecoderModule.h"
+# include "mozilla/Maybe.h"
+
+# ifdef MOZ_TREMOR
+# include "tremor/ivorbiscodec.h"
+# else
+# include "vorbis/codec.h"
+# endif
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(VorbisDataDecoder, MediaDataDecoder);
+
+class VorbisDataDecoder final
+ : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<VorbisDataDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VorbisDataDecoder, final);
+
+ explicit VorbisDataDecoder(const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override {
+ return "vorbis audio decoder"_ns;
+ }
+ nsCString GetCodecName() const override { return "vorbis"_ns; }
+
+ // Return true if mimetype is Vorbis
+ static bool IsVorbis(const nsACString& aMimeType);
+ static const AudioConfig::Channel* VorbisLayout(uint32_t aChannels);
+
+ private:
+ ~VorbisDataDecoder();
+
+ nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
+
+ const AudioInfo mInfo;
+ nsCOMPtr<nsISerialEventTarget> mThread;
+
+ // Vorbis decoder state
+ vorbis_info mVorbisInfo;
+ vorbis_comment mVorbisComment;
+ vorbis_dsp_state mVorbisDsp;
+ vorbis_block mVorbisBlock;
+
+ int64_t mPacketCount;
+ int64_t mFrames;
+ Maybe<int64_t> mLastFrameTime;
+ UniquePtr<AudioConverter> mAudioConverter;
+};
+
+} // namespace mozilla
+#endif
diff --git a/dom/media/platforms/agnostic/WAVDecoder.cpp b/dom/media/platforms/agnostic/WAVDecoder.cpp
new file mode 100644
index 0000000000..e8dc0dc38d
--- /dev/null
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -0,0 +1,162 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WAVDecoder.h"
+
+#include "AudioSampleFormat.h"
+#include "BufferReader.h"
+#include "VideoUtils.h"
+#include "mozilla/Casting.h"
+#include "mozilla/SyncRunnable.h"
+
+namespace mozilla {
+
+int16_t DecodeALawSample(uint8_t aValue) {
+ aValue = aValue ^ 0x55;
+ int8_t sign = (aValue & 0x80) ? -1 : 1;
+ uint8_t exponent = (aValue & 0x70) >> 4;
+ uint8_t mantissa = aValue & 0x0F;
+ int16_t sample = mantissa << 4;
+ switch (exponent) {
+ case 0:
+ sample += 8;
+ break;
+ case 1:
+ sample += 0x108;
+ break;
+ default:
+ sample += 0x108;
+ sample <<= exponent - 1;
+ }
+ return sign * sample;
+}
+
+int16_t DecodeULawSample(uint8_t aValue) {
+ aValue = aValue ^ 0xFF;
+ int8_t sign = (aValue & 0x80) ? -1 : 1;
+ uint8_t exponent = (aValue & 0x70) >> 4;
+ uint8_t mantissa = aValue & 0x0F;
+ int16_t sample = (33 + 2 * mantissa) * (2 << (exponent + 1)) - 33;
+ return sign * sample;
+}
+
+WaveDataDecoder::WaveDataDecoder(const CreateDecoderParams& aParams)
+ : mInfo(aParams.AudioConfig()) {}
+
+RefPtr<ShutdownPromise> WaveDataDecoder::Shutdown() {
+ // mThread may not be set if Init hasn't been called first.
+ MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+RefPtr<MediaDataDecoder::InitPromise> WaveDataDecoder::Init() {
+ mThread = GetCurrentSerialEventTarget();
+ return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::Decode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ size_t aLength = aSample->Size();
+ BufferReader aReader(aSample->Data(), aLength);
+ int64_t aOffset = aSample->mOffset;
+
+ int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
+
+ AlignedAudioBuffer buffer(frames * mInfo.mChannels);
+ if (!buffer) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
+ }
+ for (int i = 0; i < frames; ++i) {
+ for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
+ if (mInfo.mProfile == 3) { // IEEE Float Data
+ auto res = aReader.ReadLEU32();
+ if (res.isErr()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(res.unwrapErr(), __func__), __func__);
+ }
+ float sample = BitwiseCast<float>(res.unwrap());
+ buffer[i * mInfo.mChannels + j] =
+ FloatToAudioSample<AudioDataValue>(sample);
+ } else if (mInfo.mProfile == 6) { // ALAW Data
+ auto res = aReader.ReadU8();
+ if (res.isErr()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(res.unwrapErr(), __func__), __func__);
+ }
+ int16_t decoded = DecodeALawSample(res.unwrap());
+ buffer[i * mInfo.mChannels + j] =
+ IntegerToAudioSample<AudioDataValue>(decoded);
+ } else if (mInfo.mProfile == 7) { // ULAW Data
+ auto res = aReader.ReadU8();
+ if (res.isErr()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(res.unwrapErr(), __func__), __func__);
+ }
+ int16_t decoded = DecodeULawSample(res.unwrap());
+ buffer[i * mInfo.mChannels + j] =
+ IntegerToAudioSample<AudioDataValue>(decoded);
+ } else { // PCM Data
+ if (mInfo.mBitDepth == 8) {
+ auto res = aReader.ReadU8();
+ if (res.isErr()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(res.unwrapErr(), __func__), __func__);
+ }
+ buffer[i * mInfo.mChannels + j] =
+ UInt8bitToAudioSample<AudioDataValue>(res.unwrap());
+ } else if (mInfo.mBitDepth == 16) {
+ auto res = aReader.ReadLE16();
+ if (res.isErr()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(res.unwrapErr(), __func__), __func__);
+ }
+ buffer[i * mInfo.mChannels + j] =
+ IntegerToAudioSample<AudioDataValue>(res.unwrap());
+ } else if (mInfo.mBitDepth == 24) {
+ auto res = aReader.ReadLE24();
+ if (res.isErr()) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(res.unwrapErr(), __func__), __func__);
+ }
+ buffer[i * mInfo.mChannels + j] =
+ Int24bitToAudioSample<AudioDataValue>(res.unwrap());
+ }
+ }
+ }
+ }
+
+ return DecodePromise::CreateAndResolve(
+ DecodedData{new AudioData(aOffset, aSample->mTime, std::move(buffer),
+ mInfo.mChannels, mInfo.mRate)},
+ __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::Drain() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> WaveDataDecoder::Flush() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ return FlushPromise::CreateAndResolve(true, __func__);
+}
+
+/* static */
+bool WaveDataDecoder::IsWave(const nsACString& aMimeType) {
+ // Some WebAudio uses "audio/x-wav",
+ // WAVdemuxer uses "audio/wave; codecs=aNum".
+ return aMimeType.EqualsLiteral("audio/x-wav") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=3") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=6") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=7") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=65534");
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/WAVDecoder.h b/dom/media/platforms/agnostic/WAVDecoder.h
new file mode 100644
index 0000000000..8e3b614bd9
--- /dev/null
+++ b/dom/media/platforms/agnostic/WAVDecoder.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(WaveDecoder_h_)
+# define WaveDecoder_h_
+
+# include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(WaveDataDecoder, MediaDataDecoder);
+
+class WaveDataDecoder final : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<WaveDataDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WaveDataDecoder, final);
+
+ explicit WaveDataDecoder(const CreateDecoderParams& aParams);
+
+ // Return true if mimetype is Wave
+ static bool IsWave(const nsACString& aMimeType);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override {
+ return "wave audio decoder"_ns;
+ }
+ nsCString GetCodecName() const override { return "wave"_ns; }
+
+ private:
+ ~WaveDataDecoder() = default;
+
+ const AudioInfo mInfo;
+ nsCOMPtr<nsISerialEventTarget> mThread;
+};
+
+} // namespace mozilla
+#endif
diff --git a/dom/media/platforms/agnostic/bytestreams/Adts.cpp b/dom/media/platforms/agnostic/bytestreams/Adts.cpp
new file mode 100644
index 0000000000..5f31904d9c
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/Adts.cpp
@@ -0,0 +1,94 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Adts.h"
+#include "MediaData.h"
+#include "mozilla/Array.h"
+#include "mozilla/ArrayUtils.h"
+
+namespace mozilla {
+
+static const int kADTSHeaderSize = 7;
+
+int8_t Adts::GetFrequencyIndex(uint32_t aSamplesPerSecond) {
+ static const uint32_t freq_lookup[] = {96000, 88200, 64000, 48000, 44100,
+ 32000, 24000, 22050, 16000, 12000,
+ 11025, 8000, 7350, 0};
+
+ int8_t i = 0;
+ while (freq_lookup[i] && aSamplesPerSecond < freq_lookup[i]) {
+ i++;
+ }
+
+ if (!freq_lookup[i]) {
+ return -1;
+ }
+
+ return i;
+}
+
+bool Adts::ConvertSample(uint16_t aChannelCount, int8_t aFrequencyIndex,
+ int8_t aProfile, MediaRawData* aSample) {
+ size_t newSize = aSample->Size() + kADTSHeaderSize;
+
+ // ADTS header uses 13 bits for packet size.
+ if (newSize >= (1 << 13) || aChannelCount > 15 || aFrequencyIndex < 0 ||
+ aProfile < 1 || aProfile > 4) {
+ return false;
+ }
+
+ Array<uint8_t, kADTSHeaderSize> header;
+ header[0] = 0xff;
+ header[1] = 0xf1;
+ header[2] =
+ ((aProfile - 1) << 6) + (aFrequencyIndex << 2) + (aChannelCount >> 2);
+ header[3] = ((aChannelCount & 0x3) << 6) + (newSize >> 11);
+ header[4] = (newSize & 0x7ff) >> 3;
+ header[5] = ((newSize & 7) << 5) + 0x1f;
+ header[6] = 0xfc;
+
+ UniquePtr<MediaRawDataWriter> writer(aSample->CreateWriter());
+ if (!writer->Prepend(&header[0], ArrayLength(header))) {
+ return false;
+ }
+
+ if (aSample->mCrypto.IsEncrypted()) {
+ if (aSample->mCrypto.mPlainSizes.Length() == 0) {
+ writer->mCrypto.mPlainSizes.AppendElement(kADTSHeaderSize);
+ writer->mCrypto.mEncryptedSizes.AppendElement(aSample->Size() -
+ kADTSHeaderSize);
+ } else {
+ writer->mCrypto.mPlainSizes[0] += kADTSHeaderSize;
+ }
+ }
+
+ return true;
+}
+
+bool Adts::RevertSample(MediaRawData* aSample) {
+ if (aSample->Size() < kADTSHeaderSize) {
+ return false;
+ }
+
+ {
+ const uint8_t* header = aSample->Data();
+ if (header[0] != 0xff || header[1] != 0xf1 || header[6] != 0xfc) {
+ // Not ADTS.
+ return false;
+ }
+ }
+
+ UniquePtr<MediaRawDataWriter> writer(aSample->CreateWriter());
+ writer->PopFront(kADTSHeaderSize);
+
+ if (aSample->mCrypto.IsEncrypted()) {
+ if (aSample->mCrypto.mPlainSizes.Length() > 0 &&
+ writer->mCrypto.mPlainSizes[0] >= kADTSHeaderSize) {
+ writer->mCrypto.mPlainSizes[0] -= kADTSHeaderSize;
+ }
+ }
+
+ return true;
+}
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/bytestreams/Adts.h b/dom/media/platforms/agnostic/bytestreams/Adts.h
new file mode 100644
index 0000000000..c2b6b558b6
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/Adts.h
@@ -0,0 +1,22 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ADTS_H_
+#define ADTS_H_
+
+#include <stdint.h>
+
+namespace mozilla {
+class MediaRawData;
+
+class Adts {
+ public:
+ static int8_t GetFrequencyIndex(uint32_t aSamplesPerSecond);
+ static bool ConvertSample(uint16_t aChannelCount, int8_t aFrequencyIndex,
+ int8_t aProfile, mozilla::MediaRawData* aSample);
+ static bool RevertSample(MediaRawData* aSample);
+};
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/agnostic/bytestreams/AnnexB.cpp b/dom/media/platforms/agnostic/bytestreams/AnnexB.cpp
new file mode 100644
index 0000000000..07e9c2dde8
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/AnnexB.cpp
@@ -0,0 +1,364 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/ResultExtensions.h"
+#include "mozilla/Unused.h"
+#include "AnnexB.h"
+#include "BufferReader.h"
+#include "ByteWriter.h"
+#include "MediaData.h"
+
+namespace mozilla {
+
+static const uint8_t kAnnexBDelimiter[] = {0, 0, 0, 1};
+
+Result<Ok, nsresult> AnnexB::ConvertSampleToAnnexB(
+ mozilla::MediaRawData* aSample, bool aAddSPS) {
+ MOZ_ASSERT(aSample);
+
+ if (!IsAVCC(aSample)) {
+ return Ok();
+ }
+ MOZ_ASSERT(aSample->Data());
+
+ MOZ_TRY(ConvertSampleTo4BytesAVCC(aSample));
+
+ if (aSample->Size() < 4) {
+ // Nothing to do, it's corrupted anyway.
+ return Ok();
+ }
+
+ BufferReader reader(aSample->Data(), aSample->Size());
+
+ nsTArray<uint8_t> tmp;
+ ByteWriter<BigEndian> writer(tmp);
+
+ while (reader.Remaining() >= 4) {
+ uint32_t nalLen;
+ MOZ_TRY_VAR(nalLen, reader.ReadU32());
+ const uint8_t* p = reader.Read(nalLen);
+
+ if (!writer.Write(kAnnexBDelimiter, ArrayLength(kAnnexBDelimiter))) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+ if (!p) {
+ break;
+ }
+ if (!writer.Write(p, nalLen)) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+ }
+
+ UniquePtr<MediaRawDataWriter> samplewriter(aSample->CreateWriter());
+
+ if (!samplewriter->Replace(tmp.Elements(), tmp.Length())) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+
+ // Prepend the Annex B NAL with SPS and PPS tables to keyframes.
+ if (aAddSPS && aSample->mKeyframe) {
+ RefPtr<MediaByteBuffer> annexB =
+ ConvertExtraDataToAnnexB(aSample->mExtraData);
+ if (!samplewriter->Prepend(annexB->Elements(), annexB->Length())) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+
+ // Prepending the NAL with SPS/PPS will mess up the encryption subsample
+ // offsets. So we need to account for the extra bytes by increasing
+ // the length of the first clear data subsample. Otherwise decryption
+ // will fail.
+ if (aSample->mCrypto.IsEncrypted()) {
+ if (aSample->mCrypto.mPlainSizes.Length() == 0) {
+ CheckedUint32 plainSize{annexB->Length()};
+ CheckedUint32 encryptedSize{samplewriter->Size()};
+ encryptedSize -= annexB->Length();
+ samplewriter->mCrypto.mPlainSizes.AppendElement(plainSize.value());
+ samplewriter->mCrypto.mEncryptedSizes.AppendElement(
+ encryptedSize.value());
+ } else {
+ CheckedUint32 newSize{samplewriter->mCrypto.mPlainSizes[0]};
+ newSize += annexB->Length();
+ samplewriter->mCrypto.mPlainSizes[0] = newSize.value();
+ }
+ }
+ }
+
+ return Ok();
+}
+
+already_AddRefed<mozilla::MediaByteBuffer> AnnexB::ConvertExtraDataToAnnexB(
+ const mozilla::MediaByteBuffer* aExtraData) {
+ // AVCC 6 byte header looks like:
+ // +------+------+------+------+------+------+------+------+
+ // [0] | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
+ // +------+------+------+------+------+------+------+------+
+ // [1] | profile |
+ // +------+------+------+------+------+------+------+------+
+ // [2] | compatiblity |
+ // +------+------+------+------+------+------+------+------+
+ // [3] | level |
+ // +------+------+------+------+------+------+------+------+
+ // [4] | unused | nalLenSiz-1 |
+ // +------+------+------+------+------+------+------+------+
+ // [5] | unused | numSps |
+ // +------+------+------+------+------+------+------+------+
+
+ RefPtr<mozilla::MediaByteBuffer> annexB = new mozilla::MediaByteBuffer;
+
+ BufferReader reader(*aExtraData);
+ const uint8_t* ptr = reader.Read(5);
+ if (ptr && ptr[0] == 1) {
+ // Append SPS then PPS
+ Unused << reader.ReadU8().map(
+ [&](uint8_t x) { return ConvertSPSOrPPS(reader, x & 31, annexB); });
+ Unused << reader.ReadU8().map(
+ [&](uint8_t x) { return ConvertSPSOrPPS(reader, x, annexB); });
+ // MP4Box adds extra bytes that we ignore. I don't know what they do.
+ }
+
+ return annexB.forget();
+}
+
+Result<mozilla::Ok, nsresult> AnnexB::ConvertSPSOrPPS(
+ BufferReader& aReader, uint8_t aCount, mozilla::MediaByteBuffer* aAnnexB) {
+ for (int i = 0; i < aCount; i++) {
+ uint16_t length;
+ MOZ_TRY_VAR(length, aReader.ReadU16());
+
+ const uint8_t* ptr = aReader.Read(length);
+ if (!ptr) {
+ return Err(NS_ERROR_FAILURE);
+ }
+ aAnnexB->AppendElements(kAnnexBDelimiter, ArrayLength(kAnnexBDelimiter));
+ aAnnexB->AppendElements(ptr, length);
+ }
+ return Ok();
+}
+
+static Result<Ok, nsresult> FindStartCodeInternal(BufferReader& aBr) {
+ size_t offset = aBr.Offset();
+
+ for (uint32_t i = 0; i < aBr.Align() && aBr.Remaining() >= 3; i++) {
+ auto res = aBr.PeekU24();
+ if (res.isOk() && (res.unwrap() == 0x000001)) {
+ return Ok();
+ }
+ mozilla::Unused << aBr.Read(1);
+ }
+
+ while (aBr.Remaining() >= 6) {
+ uint32_t x32;
+ MOZ_TRY_VAR(x32, aBr.PeekU32());
+ if ((x32 - 0x01010101) & (~x32) & 0x80808080) {
+ if ((x32 >> 8) == 0x000001) {
+ return Ok();
+ }
+ if (x32 == 0x000001) {
+ mozilla::Unused << aBr.Read(1);
+ return Ok();
+ }
+ if ((x32 & 0xff) == 0) {
+ const uint8_t* p = aBr.Peek(1);
+ if ((x32 & 0xff00) == 0 && p[4] == 1) {
+ mozilla::Unused << aBr.Read(2);
+ return Ok();
+ }
+ if (p[4] == 0 && p[5] == 1) {
+ mozilla::Unused << aBr.Read(3);
+ return Ok();
+ }
+ }
+ }
+ mozilla::Unused << aBr.Read(4);
+ }
+
+ while (aBr.Remaining() >= 3) {
+ uint32_t data;
+ MOZ_TRY_VAR(data, aBr.PeekU24());
+ if (data == 0x000001) {
+ return Ok();
+ }
+ mozilla::Unused << aBr.Read(1);
+ }
+
+ // No start code were found; Go back to the beginning.
+ mozilla::Unused << aBr.Seek(offset);
+ return Err(NS_ERROR_FAILURE);
+}
+
+static Result<Ok, nsresult> FindStartCode(BufferReader& aBr,
+ size_t& aStartSize) {
+ if (FindStartCodeInternal(aBr).isErr()) {
+ aStartSize = 0;
+ return Err(NS_ERROR_FAILURE);
+ }
+
+ aStartSize = 3;
+ if (aBr.Offset()) {
+ // Check if it's 4-bytes start code
+ aBr.Rewind(1);
+ uint8_t data;
+ MOZ_TRY_VAR(data, aBr.ReadU8());
+ if (data == 0) {
+ aStartSize = 4;
+ }
+ }
+ mozilla::Unused << aBr.Read(3);
+ return Ok();
+}
+
+/* static */
+void AnnexB::ParseNALEntries(const Span<const uint8_t>& aSpan,
+ nsTArray<AnnexB::NALEntry>& aEntries) {
+ BufferReader reader(aSpan.data(), aSpan.Length());
+ size_t startSize;
+ auto rv = FindStartCode(reader, startSize);
+ size_t startOffset = reader.Offset();
+ if (rv.isOk()) {
+ while (FindStartCode(reader, startSize).isOk()) {
+ int64_t offset = reader.Offset();
+ int64_t sizeNAL = offset - startOffset - startSize;
+ aEntries.AppendElement(AnnexB::NALEntry(startOffset, sizeNAL));
+ reader.Seek(startOffset);
+ reader.Read(sizeNAL + startSize);
+ startOffset = offset;
+ }
+ }
+ int64_t sizeNAL = reader.Remaining();
+ if (sizeNAL) {
+ aEntries.AppendElement(AnnexB::NALEntry(startOffset, sizeNAL));
+ }
+}
+
+static Result<mozilla::Ok, nsresult> ParseNALUnits(ByteWriter<BigEndian>& aBw,
+ BufferReader& aBr) {
+ size_t startSize;
+
+ auto rv = FindStartCode(aBr, startSize);
+ if (rv.isOk()) {
+ size_t startOffset = aBr.Offset();
+ while (FindStartCode(aBr, startSize).isOk()) {
+ size_t offset = aBr.Offset();
+ size_t sizeNAL = offset - startOffset - startSize;
+ aBr.Seek(startOffset);
+ if (!aBw.WriteU32(sizeNAL) || !aBw.Write(aBr.Read(sizeNAL), sizeNAL)) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+ aBr.Read(startSize);
+ startOffset = offset;
+ }
+ }
+ size_t sizeNAL = aBr.Remaining();
+ if (sizeNAL) {
+ if (!aBw.WriteU32(sizeNAL) || !aBw.Write(aBr.Read(sizeNAL), sizeNAL)) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+ }
+ return Ok();
+}
+
+bool AnnexB::ConvertSampleToAVCC(mozilla::MediaRawData* aSample,
+ const RefPtr<MediaByteBuffer>& aAVCCHeader) {
+ if (IsAVCC(aSample)) {
+ return ConvertSampleTo4BytesAVCC(aSample).isOk();
+ }
+ if (!IsAnnexB(aSample)) {
+ // Not AnnexB, nothing to convert.
+ return true;
+ }
+
+ nsTArray<uint8_t> nalu;
+ ByteWriter<BigEndian> writer(nalu);
+ BufferReader reader(aSample->Data(), aSample->Size());
+
+ if (ParseNALUnits(writer, reader).isErr()) {
+ return false;
+ }
+ UniquePtr<MediaRawDataWriter> samplewriter(aSample->CreateWriter());
+ if (!samplewriter->Replace(nalu.Elements(), nalu.Length())) {
+ return false;
+ }
+
+ if (aAVCCHeader) {
+ aSample->mExtraData = aAVCCHeader;
+ return true;
+ }
+
+ // Create the AVCC header.
+ auto extradata = MakeRefPtr<mozilla::MediaByteBuffer>();
+ static const uint8_t kFakeExtraData[] = {
+ 1 /* version */,
+ 0x64 /* profile (High) */,
+ 0 /* profile compat (0) */,
+ 40 /* level (40) */,
+ 0xfc | 3 /* nal size - 1 */,
+ 0xe0 /* num SPS (0) */,
+ 0 /* num PPS (0) */
+ };
+ // XXX(Bug 1631371) Check if this should use a fallible operation as it
+ // pretended earlier.
+ extradata->AppendElements(kFakeExtraData, ArrayLength(kFakeExtraData));
+ aSample->mExtraData = std::move(extradata);
+ return true;
+}
+
+Result<mozilla::Ok, nsresult> AnnexB::ConvertSampleTo4BytesAVCC(
+ mozilla::MediaRawData* aSample) {
+ MOZ_ASSERT(IsAVCC(aSample));
+
+ int nalLenSize = ((*aSample->mExtraData)[4] & 3) + 1;
+
+ if (nalLenSize == 4) {
+ return Ok();
+ }
+ nsTArray<uint8_t> dest;
+ ByteWriter<BigEndian> writer(dest);
+ BufferReader reader(aSample->Data(), aSample->Size());
+ while (reader.Remaining() > nalLenSize) {
+ uint32_t nalLen;
+ switch (nalLenSize) {
+ case 1:
+ MOZ_TRY_VAR(nalLen, reader.ReadU8());
+ break;
+ case 2:
+ MOZ_TRY_VAR(nalLen, reader.ReadU16());
+ break;
+ case 3:
+ MOZ_TRY_VAR(nalLen, reader.ReadU24());
+ break;
+ }
+
+ MOZ_ASSERT(nalLenSize != 4);
+
+ const uint8_t* p = reader.Read(nalLen);
+ if (!p) {
+ return Ok();
+ }
+ if (!writer.WriteU32(nalLen) || !writer.Write(p, nalLen)) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+ }
+ UniquePtr<MediaRawDataWriter> samplewriter(aSample->CreateWriter());
+ if (!samplewriter->Replace(dest.Elements(), dest.Length())) {
+ return Err(NS_ERROR_OUT_OF_MEMORY);
+ }
+ return Ok();
+}
+
+bool AnnexB::IsAVCC(const mozilla::MediaRawData* aSample) {
+ return aSample->Size() >= 3 && aSample->mExtraData &&
+ aSample->mExtraData->Length() >= 7 && (*aSample->mExtraData)[0] == 1;
+}
+
+bool AnnexB::IsAnnexB(const mozilla::MediaRawData* aSample) {
+ if (aSample->Size() < 4) {
+ return false;
+ }
+ uint32_t header = mozilla::BigEndian::readUint32(aSample->Data());
+ return header == 0x00000001 || (header >> 8) == 0x000001;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/bytestreams/AnnexB.h b/dom/media/platforms/agnostic/bytestreams/AnnexB.h
new file mode 100644
index 0000000000..dbb8a7c3e1
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/AnnexB.h
@@ -0,0 +1,66 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DOM_MEDIA_PLATFORMS_AGNOSTIC_BYTESTREAMS_ANNEX_B_H_
+#define DOM_MEDIA_PLATFORMS_AGNOSTIC_BYTESTREAMS_ANNEX_B_H_
+
+#include "ErrorList.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/Result.h"
+
+template <class>
+class nsTArray;
+
+namespace mozilla {
+class BufferReader;
+class MediaRawData;
+class MediaByteBuffer;
+
+class AnnexB {
+ public:
+ struct NALEntry {
+ NALEntry(int64_t aOffset, int64_t aSize) : mOffset(aOffset), mSize(aSize) {
+ MOZ_ASSERT(mOffset >= 0);
+ MOZ_ASSERT(mSize >= 0);
+ }
+ // They should be non-negative, so we use int64_t to assert their value when
+ // assigning value to them.
+ int64_t mOffset;
+ int64_t mSize;
+ };
+ // All conversions assume size of NAL length field is 4 bytes.
+ // Convert a sample from AVCC format to Annex B.
+ static mozilla::Result<mozilla::Ok, nsresult> ConvertSampleToAnnexB(
+ mozilla::MediaRawData* aSample, bool aAddSPS = true);
+ // Convert a sample from Annex B to AVCC.
+ // an AVCC extradata must not be set.
+ static bool ConvertSampleToAVCC(
+ mozilla::MediaRawData* aSample,
+ const RefPtr<mozilla::MediaByteBuffer>& aAVCCHeader = nullptr);
+ static mozilla::Result<mozilla::Ok, nsresult> ConvertSampleTo4BytesAVCC(
+ mozilla::MediaRawData* aSample);
+
+ // Parse an AVCC extradata and construct the Annex B sample header.
+ static already_AddRefed<mozilla::MediaByteBuffer> ConvertExtraDataToAnnexB(
+ const mozilla::MediaByteBuffer* aExtraData);
+ // Returns true if format is AVCC and sample has valid extradata.
+ static bool IsAVCC(const mozilla::MediaRawData* aSample);
+ // Returns true if format is AnnexB.
+ static bool IsAnnexB(const mozilla::MediaRawData* aSample);
+
+ // Parse NAL entries from the bytes stream to know the offset and the size of
+ // each NAL in the bytes stream.
+ static void ParseNALEntries(const Span<const uint8_t>& aSpan,
+ nsTArray<AnnexB::NALEntry>& aEntries);
+
+ private:
+ // AVCC box parser helper.
+ static mozilla::Result<mozilla::Ok, nsresult> ConvertSPSOrPPS(
+ mozilla::BufferReader& aReader, uint8_t aCount,
+ mozilla::MediaByteBuffer* aAnnexB);
+};
+
+} // namespace mozilla
+
+#endif // DOM_MEDIA_PLATFORMS_AGNOSTIC_BYTESTREAMS_ANNEX_B_H_
diff --git a/dom/media/platforms/agnostic/bytestreams/H264.cpp b/dom/media/platforms/agnostic/bytestreams/H264.cpp
new file mode 100644
index 0000000000..4dc33e1763
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/H264.cpp
@@ -0,0 +1,1356 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "H264.h"
+#include <cmath>
+#include <limits>
+#include "AnnexB.h"
+#include "BitReader.h"
+#include "BitWriter.h"
+#include "BufferReader.h"
+#include "ByteWriter.h"
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/ResultExtensions.h"
+
+#define READSE(var, min, max) \
+ { \
+ int32_t val = br.ReadSE(); \
+ if (val < min || val > max) { \
+ return false; \
+ } \
+ aDest.var = val; \
+ }
+
+#define READUE(var, max) \
+ { \
+ uint32_t uval = br.ReadUE(); \
+ if (uval > max) { \
+ return false; \
+ } \
+ aDest.var = uval; \
+ }
+
+namespace mozilla {
+
+// Default scaling lists (per spec).
+// ITU H264:
+// Table 7-2 – Assignment of mnemonic names to scaling list indices and
+// specification of fall-back rule
+static const uint8_t Default_4x4_Intra[16] = {6, 13, 13, 20, 20, 20, 28, 28,
+ 28, 28, 32, 32, 32, 37, 37, 42};
+
+static const uint8_t Default_4x4_Inter[16] = {10, 14, 14, 20, 20, 20, 24, 24,
+ 24, 24, 27, 27, 27, 30, 30, 34};
+
+static const uint8_t Default_8x8_Intra[64] = {
+ 6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18, 18, 18, 18, 23,
+ 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27,
+ 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31,
+ 31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42};
+
+static const uint8_t Default_8x8_Inter[64] = {
+ 9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19, 19, 19, 19, 21,
+ 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27,
+ 27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35};
+
+namespace detail {
+static void scaling_list(BitReader& aBr, uint8_t* aScalingList,
+ int aSizeOfScalingList, const uint8_t* aDefaultList,
+ const uint8_t* aFallbackList) {
+ int32_t lastScale = 8;
+ int32_t nextScale = 8;
+ int32_t deltaScale;
+
+ // (pic|seq)_scaling_list_present_flag[i]
+ if (!aBr.ReadBit()) {
+ if (aFallbackList) {
+ memcpy(aScalingList, aFallbackList, aSizeOfScalingList);
+ }
+ return;
+ }
+
+ for (int i = 0; i < aSizeOfScalingList; i++) {
+ if (nextScale != 0) {
+ deltaScale = aBr.ReadSE();
+ nextScale = (lastScale + deltaScale + 256) % 256;
+ if (!i && !nextScale) {
+ memcpy(aScalingList, aDefaultList, aSizeOfScalingList);
+ return;
+ }
+ }
+ aScalingList[i] = (nextScale == 0) ? lastScale : nextScale;
+ lastScale = aScalingList[i];
+ }
+}
+} // namespace detail.
+
+template <size_t N>
+static void scaling_list(BitReader& aBr, uint8_t (&aScalingList)[N],
+ const uint8_t (&aDefaultList)[N],
+ const uint8_t (&aFallbackList)[N]) {
+ detail::scaling_list(aBr, aScalingList, N, aDefaultList, aFallbackList);
+}
+
+template <size_t N>
+static void scaling_list(BitReader& aBr, uint8_t (&aScalingList)[N],
+ const uint8_t (&aDefaultList)[N]) {
+ detail::scaling_list(aBr, aScalingList, N, aDefaultList, nullptr);
+}
+
+SPSData::SPSData() {
+ PodZero(this);
+ // Default values when they aren't defined as per ITU-T H.264 (2014/02).
+ chroma_format_idc = 1;
+ video_format = 5;
+ colour_primaries = 2;
+ transfer_characteristics = 2;
+ sample_ratio = 1.0;
+ memset(scaling_matrix4x4, 16, sizeof(scaling_matrix4x4));
+ memset(scaling_matrix8x8, 16, sizeof(scaling_matrix8x8));
+}
+
+bool SPSData::operator==(const SPSData& aOther) const {
+ return this->valid && aOther.valid && !memcmp(this, &aOther, sizeof(SPSData));
+}
+
+bool SPSData::operator!=(const SPSData& aOther) const {
+ return !(operator==(aOther));
+}
+
+// Described in ISO 23001-8:2016
+// Table 2
+enum class PrimaryID : uint8_t {
+ INVALID = 0,
+ BT709 = 1,
+ UNSPECIFIED = 2,
+ BT470M = 4,
+ BT470BG = 5,
+ SMPTE170M = 6,
+ SMPTE240M = 7,
+ FILM = 8,
+ BT2020 = 9,
+ SMPTEST428_1 = 10,
+ SMPTEST431_2 = 11,
+ SMPTEST432_1 = 12,
+ EBU_3213_E = 22
+};
+
+// Table 3
+enum class TransferID : uint8_t {
+ INVALID = 0,
+ BT709 = 1,
+ UNSPECIFIED = 2,
+ GAMMA22 = 4,
+ GAMMA28 = 5,
+ SMPTE170M = 6,
+ SMPTE240M = 7,
+ LINEAR = 8,
+ LOG = 9,
+ LOG_SQRT = 10,
+ IEC61966_2_4 = 11,
+ BT1361_ECG = 12,
+ IEC61966_2_1 = 13,
+ BT2020_10 = 14,
+ BT2020_12 = 15,
+ SMPTEST2084 = 16,
+ SMPTEST428_1 = 17,
+
+ // Not yet standardized
+ ARIB_STD_B67 = 18, // AKA hybrid-log gamma, HLG.
+};
+
+// Table 4
+enum class MatrixID : uint8_t {
+ RGB = 0,
+ BT709 = 1,
+ UNSPECIFIED = 2,
+ FCC = 4,
+ BT470BG = 5,
+ SMPTE170M = 6,
+ SMPTE240M = 7,
+ YCOCG = 8,
+ BT2020_NCL = 9,
+ BT2020_CL = 10,
+ YDZDX = 11,
+ INVALID = 255,
+};
+
+static PrimaryID GetPrimaryID(int aPrimary) {
+ if (aPrimary < 1 || aPrimary > 22 || aPrimary == 3) {
+ return PrimaryID::INVALID;
+ }
+ if (aPrimary > 12 && aPrimary < 22) {
+ return PrimaryID::INVALID;
+ }
+ return static_cast<PrimaryID>(aPrimary);
+}
+
+static TransferID GetTransferID(int aTransfer) {
+ if (aTransfer < 1 || aTransfer > 18 || aTransfer == 3) {
+ return TransferID::INVALID;
+ }
+ return static_cast<TransferID>(aTransfer);
+}
+
+static MatrixID GetMatrixID(int aMatrix) {
+ if (aMatrix < 0 || aMatrix > 11 || aMatrix == 3) {
+ return MatrixID::INVALID;
+ }
+ return static_cast<MatrixID>(aMatrix);
+}
+
+gfx::YUVColorSpace SPSData::ColorSpace() const {
+ // Bitfield, note that guesses with higher values take precedence over
+ // guesses with lower values.
+ enum Guess {
+ GUESS_BT601 = 1 << 0,
+ GUESS_BT709 = 1 << 1,
+ GUESS_BT2020 = 1 << 2,
+ };
+
+ uint32_t guess = 0;
+
+ switch (GetPrimaryID(colour_primaries)) {
+ case PrimaryID::BT709:
+ guess |= GUESS_BT709;
+ break;
+ case PrimaryID::BT470M:
+ case PrimaryID::BT470BG:
+ case PrimaryID::SMPTE170M:
+ case PrimaryID::SMPTE240M:
+ guess |= GUESS_BT601;
+ break;
+ case PrimaryID::BT2020:
+ guess |= GUESS_BT2020;
+ break;
+ case PrimaryID::FILM:
+ case PrimaryID::SMPTEST428_1:
+ case PrimaryID::SMPTEST431_2:
+ case PrimaryID::SMPTEST432_1:
+ case PrimaryID::EBU_3213_E:
+ case PrimaryID::INVALID:
+ case PrimaryID::UNSPECIFIED:
+ break;
+ }
+
+ switch (GetTransferID(transfer_characteristics)) {
+ case TransferID::BT709:
+ guess |= GUESS_BT709;
+ break;
+ case TransferID::GAMMA22:
+ case TransferID::GAMMA28:
+ case TransferID::SMPTE170M:
+ case TransferID::SMPTE240M:
+ guess |= GUESS_BT601;
+ break;
+ case TransferID::BT2020_10:
+ case TransferID::BT2020_12:
+ guess |= GUESS_BT2020;
+ break;
+ case TransferID::LINEAR:
+ case TransferID::LOG:
+ case TransferID::LOG_SQRT:
+ case TransferID::IEC61966_2_4:
+ case TransferID::BT1361_ECG:
+ case TransferID::IEC61966_2_1:
+ case TransferID::SMPTEST2084:
+ case TransferID::SMPTEST428_1:
+ case TransferID::ARIB_STD_B67:
+ case TransferID::INVALID:
+ case TransferID::UNSPECIFIED:
+ break;
+ }
+
+ switch (GetMatrixID(matrix_coefficients)) {
+ case MatrixID::BT709:
+ guess |= GUESS_BT709;
+ break;
+ case MatrixID::BT470BG:
+ case MatrixID::SMPTE170M:
+ case MatrixID::SMPTE240M:
+ guess |= GUESS_BT601;
+ break;
+ case MatrixID::BT2020_NCL:
+ case MatrixID::BT2020_CL:
+ guess |= GUESS_BT2020;
+ break;
+ case MatrixID::RGB:
+ case MatrixID::FCC:
+ case MatrixID::YCOCG:
+ case MatrixID::YDZDX:
+ case MatrixID::INVALID:
+ case MatrixID::UNSPECIFIED:
+ break;
+ }
+
+ // Removes lowest bit until only a single bit remains.
+ while (guess & (guess - 1)) {
+ guess &= guess - 1;
+ }
+ if (!guess) {
+ // A better default to BT601 which should die a slow death.
+ guess = GUESS_BT709;
+ }
+
+ switch (guess) {
+ case GUESS_BT601:
+ return gfx::YUVColorSpace::BT601;
+ case GUESS_BT709:
+ return gfx::YUVColorSpace::BT709;
+ case GUESS_BT2020:
+ return gfx::YUVColorSpace::BT2020;
+ default:
+ MOZ_CRASH("not possible to get here but makes compiler happy");
+ }
+}
+
+gfx::ColorDepth SPSData::ColorDepth() const {
+ if (bit_depth_luma_minus8 != 0 && bit_depth_luma_minus8 != 2 &&
+ bit_depth_luma_minus8 != 4) {
+ // We don't know what that is, just assume 8 bits to prevent decoding
+ // regressions if we ever encounter those.
+ return gfx::ColorDepth::COLOR_8;
+ }
+ return gfx::ColorDepthForBitDepth(bit_depth_luma_minus8 + 8);
+}
+
+// SPSNAL and SPSNALIterator do not own their data.
+class SPSNAL {
+ public:
+ SPSNAL(const uint8_t* aPtr, size_t aLength) {
+ MOZ_ASSERT(aPtr);
+
+ if (aLength == 0 || (*aPtr & 0x1f) != H264_NAL_SPS) {
+ return;
+ }
+ mDecodedNAL = H264::DecodeNALUnit(aPtr, aLength);
+ if (mDecodedNAL) {
+ mLength = BitReader::GetBitLength(mDecodedNAL);
+ }
+ }
+
+ SPSNAL() = default;
+
+ bool IsValid() const { return mDecodedNAL; }
+
+ bool operator==(const SPSNAL& aOther) const {
+ if (!mDecodedNAL || !aOther.mDecodedNAL) {
+ return false;
+ }
+
+ SPSData decodedSPS1;
+ SPSData decodedSPS2;
+ if (!GetSPSData(decodedSPS1) || !aOther.GetSPSData(decodedSPS2)) {
+ // Couldn't decode one SPS, perform a binary comparison
+ if (mLength != aOther.mLength) {
+ return false;
+ }
+ MOZ_ASSERT(mLength / 8 <= mDecodedNAL->Length());
+
+ if (memcmp(mDecodedNAL->Elements(), aOther.mDecodedNAL->Elements(),
+ mLength / 8)) {
+ return false;
+ }
+
+ uint32_t remaining = mLength - (mLength & ~7);
+
+ BitReader b1(mDecodedNAL->Elements() + mLength / 8, remaining);
+ BitReader b2(aOther.mDecodedNAL->Elements() + mLength / 8, remaining);
+ for (uint32_t i = 0; i < remaining; i++) {
+ if (b1.ReadBit() != b2.ReadBit()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ return decodedSPS1 == decodedSPS2;
+ }
+
+ bool operator!=(const SPSNAL& aOther) const { return !(operator==(aOther)); }
+
+ bool GetSPSData(SPSData& aDest) const {
+ return H264::DecodeSPS(mDecodedNAL, aDest);
+ }
+
+ private:
+ RefPtr<mozilla::MediaByteBuffer> mDecodedNAL;
+ uint32_t mLength = 0;
+};
+
+class SPSNALIterator {
+ public:
+ explicit SPSNALIterator(const mozilla::MediaByteBuffer* aExtraData)
+ : mExtraDataPtr(aExtraData->Elements()), mReader(aExtraData) {
+ if (!mReader.Read(5)) {
+ return;
+ }
+
+ auto res = mReader.ReadU8();
+ mNumSPS = res.isOk() ? res.unwrap() & 0x1f : 0;
+ if (mNumSPS == 0) {
+ return;
+ }
+ mValid = true;
+ }
+
+ SPSNALIterator& operator++() {
+ if (mEOS || !mValid) {
+ return *this;
+ }
+ if (--mNumSPS == 0) {
+ mEOS = true;
+ }
+ auto res = mReader.ReadU16();
+ uint16_t length = res.isOk() ? res.unwrap() : 0;
+ if (length == 0 || !mReader.Read(length)) {
+ mEOS = true;
+ }
+ return *this;
+ }
+
+ explicit operator bool() const { return mValid && !mEOS; }
+
+ SPSNAL operator*() const {
+ MOZ_ASSERT(bool(*this));
+ BufferReader reader(mExtraDataPtr + mReader.Offset(), mReader.Remaining());
+
+ auto res = reader.ReadU16();
+ uint16_t length = res.isOk() ? res.unwrap() : 0;
+ const uint8_t* ptr = reader.Read(length);
+ if (!ptr || !length) {
+ return SPSNAL();
+ }
+ return SPSNAL(ptr, length);
+ }
+
+ private:
+ const uint8_t* mExtraDataPtr;
+ BufferReader mReader;
+ bool mValid = false;
+ bool mEOS = false;
+ uint8_t mNumSPS = 0;
+};
+
+/* static */ already_AddRefed<mozilla::MediaByteBuffer> H264::DecodeNALUnit(
+ const uint8_t* aNAL, size_t aLength) {
+ MOZ_ASSERT(aNAL);
+
+ if (aLength < 4) {
+ return nullptr;
+ }
+
+ RefPtr<mozilla::MediaByteBuffer> rbsp = new mozilla::MediaByteBuffer;
+ BufferReader reader(aNAL, aLength);
+ auto res = reader.ReadU8();
+ if (res.isErr()) {
+ return nullptr;
+ }
+ uint8_t nal_unit_type = res.unwrap() & 0x1f;
+ uint32_t nalUnitHeaderBytes = 1;
+ if (nal_unit_type == H264_NAL_PREFIX || nal_unit_type == H264_NAL_SLICE_EXT ||
+ nal_unit_type == H264_NAL_SLICE_EXT_DVC) {
+ bool svc_extension_flag = false;
+ bool avc_3d_extension_flag = false;
+ if (nal_unit_type != H264_NAL_SLICE_EXT_DVC) {
+ res = reader.PeekU8();
+ if (res.isErr()) {
+ return nullptr;
+ }
+ svc_extension_flag = res.unwrap() & 0x80;
+ } else {
+ res = reader.PeekU8();
+ if (res.isErr()) {
+ return nullptr;
+ }
+ avc_3d_extension_flag = res.unwrap() & 0x80;
+ }
+ if (svc_extension_flag) {
+ nalUnitHeaderBytes += 3;
+ } else if (avc_3d_extension_flag) {
+ nalUnitHeaderBytes += 2;
+ } else {
+ nalUnitHeaderBytes += 3;
+ }
+ }
+ if (!reader.Read(nalUnitHeaderBytes - 1)) {
+ return nullptr;
+ }
+ uint32_t lastbytes = 0xffff;
+ while (reader.Remaining()) {
+ auto res = reader.ReadU8();
+ if (res.isErr()) {
+ return nullptr;
+ }
+ uint8_t byte = res.unwrap();
+ if ((lastbytes & 0xffff) == 0 && byte == 0x03) {
+ // reset last two bytes, to detect the 0x000003 sequence again.
+ lastbytes = 0xffff;
+ } else {
+ rbsp->AppendElement(byte);
+ }
+ lastbytes = (lastbytes << 8) | byte;
+ }
+ return rbsp.forget();
+}
+
+// The reverse of DecodeNALUnit. To allow the distinction between Annex B (that
+// uses 0x000001 as marker) and AVCC, the pattern 0x00 0x00 0x0n (where n is
+// between 0 and 3) can't be found in the bytestream. A 0x03 byte is inserted
+// after the second 0. Eg. 0x00 0x00 0x00 becomes 0x00 0x00 0x03 0x00
+/* static */ already_AddRefed<mozilla::MediaByteBuffer> H264::EncodeNALUnit(
+ const uint8_t* aNAL, size_t aLength) {
+ MOZ_ASSERT(aNAL);
+ RefPtr<MediaByteBuffer> rbsp = new MediaByteBuffer();
+ BufferReader reader(aNAL, aLength);
+
+ auto res = reader.ReadU8();
+ if (res.isErr()) {
+ return rbsp.forget();
+ }
+ rbsp->AppendElement(res.unwrap());
+
+ res = reader.ReadU8();
+ if (res.isErr()) {
+ return rbsp.forget();
+ }
+ rbsp->AppendElement(res.unwrap());
+
+ while ((res = reader.ReadU8()).isOk()) {
+ uint8_t val = res.unwrap();
+ if (val <= 0x03 && rbsp->ElementAt(rbsp->Length() - 2) == 0 &&
+ rbsp->ElementAt(rbsp->Length() - 1) == 0) {
+ rbsp->AppendElement(0x03);
+ }
+ rbsp->AppendElement(val);
+ }
+ return rbsp.forget();
+}
+
+static int32_t ConditionDimension(float aValue) {
+ // This will exclude NaNs and too-big values.
+ if (aValue > 1.0 && aValue <= float(INT32_MAX) / 2) {
+ return int32_t(aValue);
+ }
+ return 0;
+}
+
+/* static */
+bool H264::DecodeSPS(const mozilla::MediaByteBuffer* aSPS, SPSData& aDest) {
+ if (!aSPS) {
+ return false;
+ }
+ BitReader br(aSPS, BitReader::GetBitLength(aSPS));
+
+ aDest.profile_idc = br.ReadBits(8);
+ aDest.constraint_set0_flag = br.ReadBit();
+ aDest.constraint_set1_flag = br.ReadBit();
+ aDest.constraint_set2_flag = br.ReadBit();
+ aDest.constraint_set3_flag = br.ReadBit();
+ aDest.constraint_set4_flag = br.ReadBit();
+ aDest.constraint_set5_flag = br.ReadBit();
+ br.ReadBits(2); // reserved_zero_2bits
+ aDest.level_idc = br.ReadBits(8);
+ READUE(seq_parameter_set_id, MAX_SPS_COUNT - 1);
+
+ if (aDest.profile_idc == 100 || aDest.profile_idc == 110 ||
+ aDest.profile_idc == 122 || aDest.profile_idc == 244 ||
+ aDest.profile_idc == 44 || aDest.profile_idc == 83 ||
+ aDest.profile_idc == 86 || aDest.profile_idc == 118 ||
+ aDest.profile_idc == 128 || aDest.profile_idc == 138 ||
+ aDest.profile_idc == 139 || aDest.profile_idc == 134) {
+ READUE(chroma_format_idc, 3);
+ if (aDest.chroma_format_idc == 3) {
+ aDest.separate_colour_plane_flag = br.ReadBit();
+ }
+ READUE(bit_depth_luma_minus8, 6);
+ READUE(bit_depth_chroma_minus8, 6);
+ br.ReadBit(); // qpprime_y_zero_transform_bypass_flag
+ aDest.seq_scaling_matrix_present_flag = br.ReadBit();
+ if (aDest.seq_scaling_matrix_present_flag) {
+ scaling_list(br, aDest.scaling_matrix4x4[0], Default_4x4_Intra,
+ Default_4x4_Intra);
+ scaling_list(br, aDest.scaling_matrix4x4[1], Default_4x4_Intra,
+ aDest.scaling_matrix4x4[0]);
+ scaling_list(br, aDest.scaling_matrix4x4[2], Default_4x4_Intra,
+ aDest.scaling_matrix4x4[1]);
+ scaling_list(br, aDest.scaling_matrix4x4[3], Default_4x4_Inter,
+ Default_4x4_Inter);
+ scaling_list(br, aDest.scaling_matrix4x4[4], Default_4x4_Inter,
+ aDest.scaling_matrix4x4[3]);
+ scaling_list(br, aDest.scaling_matrix4x4[5], Default_4x4_Inter,
+ aDest.scaling_matrix4x4[4]);
+
+ scaling_list(br, aDest.scaling_matrix8x8[0], Default_8x8_Intra,
+ Default_8x8_Intra);
+ scaling_list(br, aDest.scaling_matrix8x8[1], Default_8x8_Inter,
+ Default_8x8_Inter);
+ if (aDest.chroma_format_idc == 3) {
+ scaling_list(br, aDest.scaling_matrix8x8[2], Default_8x8_Intra,
+ aDest.scaling_matrix8x8[0]);
+ scaling_list(br, aDest.scaling_matrix8x8[3], Default_8x8_Inter,
+ aDest.scaling_matrix8x8[1]);
+ scaling_list(br, aDest.scaling_matrix8x8[4], Default_8x8_Intra,
+ aDest.scaling_matrix8x8[2]);
+ scaling_list(br, aDest.scaling_matrix8x8[5], Default_8x8_Inter,
+ aDest.scaling_matrix8x8[3]);
+ }
+ }
+ } else if (aDest.profile_idc == 183) {
+ aDest.chroma_format_idc = 0;
+ } else {
+ // default value if chroma_format_idc isn't set.
+ aDest.chroma_format_idc = 1;
+ }
+ READUE(log2_max_frame_num, 12);
+ aDest.log2_max_frame_num += 4;
+ READUE(pic_order_cnt_type, 2);
+ if (aDest.pic_order_cnt_type == 0) {
+ READUE(log2_max_pic_order_cnt_lsb, 12);
+ aDest.log2_max_pic_order_cnt_lsb += 4;
+ } else if (aDest.pic_order_cnt_type == 1) {
+ aDest.delta_pic_order_always_zero_flag = br.ReadBit();
+ READSE(offset_for_non_ref_pic, -231, 230);
+ READSE(offset_for_top_to_bottom_field, -231, 230);
+ uint32_t num_ref_frames_in_pic_order_cnt_cycle = br.ReadUE();
+ for (uint32_t i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ br.ReadSE(); // offset_for_ref_frame[i]
+ }
+ }
+ aDest.max_num_ref_frames = br.ReadUE();
+ aDest.gaps_in_frame_num_allowed_flag = br.ReadBit();
+ aDest.pic_width_in_mbs = br.ReadUE() + 1;
+ aDest.pic_height_in_map_units = br.ReadUE() + 1;
+ aDest.frame_mbs_only_flag = br.ReadBit();
+ if (!aDest.frame_mbs_only_flag) {
+ aDest.pic_height_in_map_units *= 2;
+ aDest.mb_adaptive_frame_field_flag = br.ReadBit();
+ }
+ aDest.direct_8x8_inference_flag = br.ReadBit();
+ aDest.frame_cropping_flag = br.ReadBit();
+ if (aDest.frame_cropping_flag) {
+ aDest.frame_crop_left_offset = br.ReadUE();
+ aDest.frame_crop_right_offset = br.ReadUE();
+ aDest.frame_crop_top_offset = br.ReadUE();
+ aDest.frame_crop_bottom_offset = br.ReadUE();
+ }
+
+ aDest.sample_ratio = 1.0f;
+ aDest.vui_parameters_present_flag = br.ReadBit();
+ if (aDest.vui_parameters_present_flag) {
+ if (!vui_parameters(br, aDest)) {
+ return false;
+ }
+ }
+
+ // Calculate common values.
+
+ uint8_t ChromaArrayType =
+ aDest.separate_colour_plane_flag ? 0 : aDest.chroma_format_idc;
+ // Calculate width.
+ uint32_t CropUnitX = 1;
+ uint32_t SubWidthC = aDest.chroma_format_idc == 3 ? 1 : 2;
+ if (ChromaArrayType != 0) {
+ CropUnitX = SubWidthC;
+ }
+
+ // Calculate Height
+ uint32_t CropUnitY = 2 - aDest.frame_mbs_only_flag;
+ uint32_t SubHeightC = aDest.chroma_format_idc <= 1 ? 2 : 1;
+ if (ChromaArrayType != 0) {
+ CropUnitY *= SubHeightC;
+ }
+
+ uint32_t width = aDest.pic_width_in_mbs * 16;
+ uint32_t height = aDest.pic_height_in_map_units * 16;
+ if (aDest.frame_crop_left_offset <=
+ std::numeric_limits<int32_t>::max() / 4 / CropUnitX &&
+ aDest.frame_crop_right_offset <=
+ std::numeric_limits<int32_t>::max() / 4 / CropUnitX &&
+ aDest.frame_crop_top_offset <=
+ std::numeric_limits<int32_t>::max() / 4 / CropUnitY &&
+ aDest.frame_crop_bottom_offset <=
+ std::numeric_limits<int32_t>::max() / 4 / CropUnitY &&
+ (aDest.frame_crop_left_offset + aDest.frame_crop_right_offset) *
+ CropUnitX <
+ width &&
+ (aDest.frame_crop_top_offset + aDest.frame_crop_bottom_offset) *
+ CropUnitY <
+ height) {
+ aDest.crop_left = aDest.frame_crop_left_offset * CropUnitX;
+ aDest.crop_right = aDest.frame_crop_right_offset * CropUnitX;
+ aDest.crop_top = aDest.frame_crop_top_offset * CropUnitY;
+ aDest.crop_bottom = aDest.frame_crop_bottom_offset * CropUnitY;
+ } else {
+ // Nonsensical value, ignore them.
+ aDest.crop_left = aDest.crop_right = aDest.crop_top = aDest.crop_bottom = 0;
+ }
+
+ aDest.pic_width = width - aDest.crop_left - aDest.crop_right;
+ aDest.pic_height = height - aDest.crop_top - aDest.crop_bottom;
+
+ aDest.interlaced = !aDest.frame_mbs_only_flag;
+
+ // Determine display size.
+ if (aDest.sample_ratio > 1.0) {
+ // Increase the intrinsic width
+ aDest.display_width =
+ ConditionDimension(aDest.pic_width * aDest.sample_ratio);
+ aDest.display_height = aDest.pic_height;
+ } else {
+ // Increase the intrinsic height
+ aDest.display_width = aDest.pic_width;
+ aDest.display_height =
+ ConditionDimension(aDest.pic_height / aDest.sample_ratio);
+ }
+
+ aDest.valid = true;
+
+ return true;
+}
+
+/* static */
+bool H264::vui_parameters(BitReader& aBr, SPSData& aDest) {
+ aDest.aspect_ratio_info_present_flag = aBr.ReadBit();
+ if (aDest.aspect_ratio_info_present_flag) {
+ aDest.aspect_ratio_idc = aBr.ReadBits(8);
+ aDest.sar_width = aDest.sar_height = 0;
+
+ // From E.2.1 VUI parameters semantics (ITU-T H.264 02/2014)
+ switch (aDest.aspect_ratio_idc) {
+ case 0:
+ // Unspecified
+ break;
+ case 1:
+ /*
+ 1:1
+ 7680x4320 16:9 frame without horizontal overscan
+ 3840x2160 16:9 frame without horizontal overscan
+ 1280x720 16:9 frame without horizontal overscan
+ 1920x1080 16:9 frame without horizontal overscan (cropped from
+ 1920x1088) 640x480 4:3 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 1.0f;
+ break;
+ case 2:
+ /*
+ 12:11
+ 720x576 4:3 frame with horizontal overscan
+ 352x288 4:3 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 12.0 / 11.0;
+ break;
+ case 3:
+ /*
+ 10:11
+ 720x480 4:3 frame with horizontal overscan
+ 352x240 4:3 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 10.0 / 11.0;
+ break;
+ case 4:
+ /*
+ 16:11
+ 720x576 16:9 frame with horizontal overscan
+ 528x576 4:3 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 16.0 / 11.0;
+ break;
+ case 5:
+ /*
+ 40:33
+ 720x480 16:9 frame with horizontal overscan
+ 528x480 4:3 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 40.0 / 33.0;
+ break;
+ case 6:
+ /*
+ 24:11
+ 352x576 4:3 frame without horizontal overscan
+ 480x576 16:9 frame with horizontal overscan
+ */
+ aDest.sample_ratio = 24.0 / 11.0;
+ break;
+ case 7:
+ /*
+ 20:11
+ 352x480 4:3 frame without horizontal overscan
+ 480x480 16:9 frame with horizontal overscan
+ */
+ aDest.sample_ratio = 20.0 / 11.0;
+ break;
+ case 8:
+ /*
+ 32:11
+ 352x576 16:9 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 32.0 / 11.0;
+ break;
+ case 9:
+ /*
+ 80:33
+ 352x480 16:9 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 80.0 / 33.0;
+ break;
+ case 10:
+ /*
+ 18:11
+ 480x576 4:3 frame with horizontal overscan
+ */
+ aDest.sample_ratio = 18.0 / 11.0;
+ break;
+ case 11:
+ /*
+ 15:11
+ 480x480 4:3 frame with horizontal overscan
+ */
+ aDest.sample_ratio = 15.0 / 11.0;
+ break;
+ case 12:
+ /*
+ 64:33
+ 528x576 16:9 frame with horizontal overscan
+ */
+ aDest.sample_ratio = 64.0 / 33.0;
+ break;
+ case 13:
+ /*
+ 160:99
+ 528x480 16:9 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 160.0 / 99.0;
+ break;
+ case 14:
+ /*
+ 4:3
+ 1440x1080 16:9 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 4.0 / 3.0;
+ break;
+ case 15:
+ /*
+ 3:2
+ 1280x1080 16:9 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 3.2 / 2.0;
+ break;
+ case 16:
+ /*
+ 2:1
+ 960x1080 16:9 frame without horizontal overscan
+ */
+ aDest.sample_ratio = 2.0 / 1.0;
+ break;
+ case 255:
+ /* Extended_SAR */
+ aDest.sar_width = aBr.ReadBits(16);
+ aDest.sar_height = aBr.ReadBits(16);
+ if (aDest.sar_width && aDest.sar_height) {
+ aDest.sample_ratio = float(aDest.sar_width) / float(aDest.sar_height);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (aBr.ReadBit()) { // overscan_info_present_flag
+ aDest.overscan_appropriate_flag = aBr.ReadBit();
+ }
+
+ if (aBr.ReadBit()) { // video_signal_type_present_flag
+ aDest.video_format = aBr.ReadBits(3);
+ aDest.video_full_range_flag = aBr.ReadBit();
+ aDest.colour_description_present_flag = aBr.ReadBit();
+ if (aDest.colour_description_present_flag) {
+ aDest.colour_primaries = aBr.ReadBits(8);
+ aDest.transfer_characteristics = aBr.ReadBits(8);
+ aDest.matrix_coefficients = aBr.ReadBits(8);
+ }
+ }
+
+ aDest.chroma_loc_info_present_flag = aBr.ReadBit();
+ if (aDest.chroma_loc_info_present_flag) {
+ BitReader& br = aBr; // so that macro READUE works
+ READUE(chroma_sample_loc_type_top_field, 5);
+ READUE(chroma_sample_loc_type_bottom_field, 5);
+ }
+
+ bool timing_info_present_flag = aBr.ReadBit();
+ if (timing_info_present_flag) {
+ aBr.ReadBits(32); // num_units_in_tick
+ aBr.ReadBits(32); // time_scale
+ aBr.ReadBit(); // fixed_frame_rate_flag
+ }
+ return true;
+}
+
+/* static */
+bool H264::DecodeSPSFromExtraData(const mozilla::MediaByteBuffer* aExtraData,
+ SPSData& aDest) {
+ SPSNALIterator it(aExtraData);
+ if (!it) {
+ return false;
+ }
+ return (*it).GetSPSData(aDest);
+}
+
+/* static */
+bool H264::EnsureSPSIsSane(SPSData& aSPS) {
+ bool valid = true;
+ static const float default_aspect = 4.0f / 3.0f;
+ if (aSPS.sample_ratio <= 0.0f || aSPS.sample_ratio > 6.0f) {
+ if (aSPS.pic_width && aSPS.pic_height) {
+ aSPS.sample_ratio = (float)aSPS.pic_width / (float)aSPS.pic_height;
+ } else {
+ aSPS.sample_ratio = default_aspect;
+ }
+ aSPS.display_width = aSPS.pic_width;
+ aSPS.display_height = aSPS.pic_height;
+ valid = false;
+ }
+ if (aSPS.max_num_ref_frames > 16) {
+ aSPS.max_num_ref_frames = 16;
+ valid = false;
+ }
+ return valid;
+}
+
+/* static */
+uint32_t H264::ComputeMaxRefFrames(const mozilla::MediaByteBuffer* aExtraData) {
+ uint32_t maxRefFrames = 4;
+ // Retrieve video dimensions from H264 SPS NAL.
+ SPSData spsdata;
+ if (DecodeSPSFromExtraData(aExtraData, spsdata)) {
+ // max_num_ref_frames determines the size of the sliding window
+ // we need to queue that many frames in order to guarantee proper
+ // pts frames ordering. Use a minimum of 4 to ensure proper playback of
+ // non compliant videos.
+ maxRefFrames =
+ std::min(std::max(maxRefFrames, spsdata.max_num_ref_frames + 1), 16u);
+ }
+ return maxRefFrames;
+}
+
+/* static */ H264::FrameType H264::GetFrameType(
+ const mozilla::MediaRawData* aSample) {
+ if (!AnnexB::IsAVCC(aSample)) {
+ // We must have a valid AVCC frame with extradata.
+ return FrameType::INVALID;
+ }
+ MOZ_ASSERT(aSample->Data());
+
+ int nalLenSize = ((*aSample->mExtraData)[4] & 3) + 1;
+
+ BufferReader reader(aSample->Data(), aSample->Size());
+
+ while (reader.Remaining() >= nalLenSize) {
+ uint32_t nalLen = 0;
+ switch (nalLenSize) {
+ case 1:
+ nalLen = reader.ReadU8().unwrapOr(0);
+ break;
+ case 2:
+ nalLen = reader.ReadU16().unwrapOr(0);
+ break;
+ case 3:
+ nalLen = reader.ReadU24().unwrapOr(0);
+ break;
+ case 4:
+ nalLen = reader.ReadU32().unwrapOr(0);
+ break;
+ }
+ if (!nalLen) {
+ continue;
+ }
+ const uint8_t* p = reader.Read(nalLen);
+ if (!p) {
+ return FrameType::INVALID;
+ }
+ int8_t nalType = *p & 0x1f;
+ if (nalType == H264_NAL_IDR_SLICE) {
+ // IDR NAL.
+ return FrameType::I_FRAME;
+ } else if (nalType == H264_NAL_SEI) {
+ RefPtr<mozilla::MediaByteBuffer> decodedNAL = DecodeNALUnit(p, nalLen);
+ SEIRecoveryData data;
+ if (DecodeRecoverySEI(decodedNAL, data)) {
+ return FrameType::I_FRAME;
+ }
+ } else if (nalType == H264_NAL_SLICE) {
+ RefPtr<mozilla::MediaByteBuffer> decodedNAL = DecodeNALUnit(p, nalLen);
+ if (DecodeISlice(decodedNAL)) {
+ return FrameType::I_FRAME;
+ }
+ }
+ }
+
+ return FrameType::OTHER;
+}
+
+/* static */ already_AddRefed<mozilla::MediaByteBuffer> H264::ExtractExtraData(
+ const mozilla::MediaRawData* aSample) {
+ MOZ_ASSERT(AnnexB::IsAVCC(aSample));
+
+ RefPtr<mozilla::MediaByteBuffer> extradata = new mozilla::MediaByteBuffer;
+
+ // SPS content
+ nsTArray<uint8_t> sps;
+ ByteWriter<BigEndian> spsw(sps);
+ int numSps = 0;
+ // PPS content
+ nsTArray<uint8_t> pps;
+ ByteWriter<BigEndian> ppsw(pps);
+ int numPps = 0;
+
+ int nalLenSize = ((*aSample->mExtraData)[4] & 3) + 1;
+
+ size_t sampleSize = aSample->Size();
+ if (aSample->mCrypto.IsEncrypted()) {
+ // The content is encrypted, we can only parse the non-encrypted data.
+ MOZ_ASSERT(aSample->mCrypto.mPlainSizes.Length() > 0);
+ if (aSample->mCrypto.mPlainSizes.Length() == 0 ||
+ aSample->mCrypto.mPlainSizes[0] > sampleSize) {
+ // This is invalid content.
+ return nullptr;
+ }
+ sampleSize = aSample->mCrypto.mPlainSizes[0];
+ }
+
+ BufferReader reader(aSample->Data(), sampleSize);
+
+ nsTArray<SPSData> SPSTable;
+ // If we encounter SPS with the same id but different content, we will stop
+ // attempting to detect duplicates.
+ bool checkDuplicate = true;
+
+ // Find SPS and PPS NALUs in AVCC data
+ while (reader.Remaining() > nalLenSize) {
+ uint32_t nalLen = 0;
+ switch (nalLenSize) {
+ case 1:
+ Unused << reader.ReadU8().map(
+ [&](uint8_t x) mutable { return nalLen = x; });
+ break;
+ case 2:
+ Unused << reader.ReadU16().map(
+ [&](uint16_t x) mutable { return nalLen = x; });
+ break;
+ case 3:
+ Unused << reader.ReadU24().map(
+ [&](uint32_t x) mutable { return nalLen = x; });
+ break;
+ case 4:
+ Unused << reader.ReadU32().map(
+ [&](uint32_t x) mutable { return nalLen = x; });
+ break;
+ }
+ const uint8_t* p = reader.Read(nalLen);
+ if (!p) {
+ // The read failed, but we may already have some SPS + PPS data so
+ // break out of reading and process what we have, if any.
+ break;
+ }
+ uint8_t nalType = *p & 0x1f;
+
+ if (nalType == H264_NAL_SPS) {
+ RefPtr<mozilla::MediaByteBuffer> sps = DecodeNALUnit(p, nalLen);
+ SPSData data;
+ if (!DecodeSPS(sps, data)) {
+ // Invalid SPS, ignore.
+ continue;
+ }
+ uint8_t spsId = data.seq_parameter_set_id;
+ if (spsId >= SPSTable.Length()) {
+ if (!SPSTable.SetLength(spsId + 1, fallible)) {
+ // OOM.
+ return nullptr;
+ }
+ }
+ if (checkDuplicate && SPSTable[spsId].valid && SPSTable[spsId] == data) {
+ // Duplicate ignore.
+ continue;
+ }
+ if (SPSTable[spsId].valid) {
+ // We already have detected a SPS with this Id. Just to be safe we
+ // disable SPS duplicate detection.
+ checkDuplicate = false;
+ } else {
+ SPSTable[spsId] = data;
+ }
+ numSps++;
+ if (!spsw.WriteU16(nalLen) || !spsw.Write(p, nalLen)) {
+ return extradata.forget();
+ }
+ } else if (nalType == H264_NAL_PPS) {
+ numPps++;
+ if (!ppsw.WriteU16(nalLen) || !ppsw.Write(p, nalLen)) {
+ return extradata.forget();
+ }
+ }
+ }
+
+ // We ignore PPS data if we didn't find a SPS as we would be unable to
+ // decode it anyway.
+ numPps = numSps ? numPps : 0;
+
+ if (numSps && sps.Length() > 5) {
+ extradata->AppendElement(1); // version
+ extradata->AppendElement(sps[3]); // profile
+ extradata->AppendElement(sps[4]); // profile compat
+ extradata->AppendElement(sps[5]); // level
+ extradata->AppendElement(0xfc | 3); // nal size - 1
+ extradata->AppendElement(0xe0 | numSps);
+ extradata->AppendElements(sps.Elements(), sps.Length());
+ extradata->AppendElement(numPps);
+ if (numPps) {
+ extradata->AppendElements(pps.Elements(), pps.Length());
+ }
+ }
+
+ return extradata.forget();
+}
+
+/* static */
+bool H264::HasSPS(const mozilla::MediaByteBuffer* aExtraData) {
+ return NumSPS(aExtraData) > 0;
+}
+
+/* static */
+uint8_t H264::NumSPS(const mozilla::MediaByteBuffer* aExtraData) {
+ if (!aExtraData || aExtraData->IsEmpty()) {
+ return 0;
+ }
+
+ BufferReader reader(aExtraData);
+ if (!reader.Read(5)) {
+ return 0;
+ }
+ auto res = reader.ReadU8();
+ if (res.isErr()) {
+ return 0;
+ }
+ return res.unwrap() & 0x1f;
+}
+
+/* static */
+bool H264::CompareExtraData(const mozilla::MediaByteBuffer* aExtraData1,
+ const mozilla::MediaByteBuffer* aExtraData2) {
+ if (aExtraData1 == aExtraData2) {
+ return true;
+ }
+ uint8_t numSPS = NumSPS(aExtraData1);
+ if (numSPS == 0 || numSPS != NumSPS(aExtraData2)) {
+ return false;
+ }
+
+ // We only compare if the SPS are the same as the various H264 decoders can
+ // deal with in-band change of PPS.
+
+ SPSNALIterator it1(aExtraData1);
+ SPSNALIterator it2(aExtraData2);
+
+ while (it1 && it2) {
+ if (*it1 != *it2) {
+ return false;
+ }
+ ++it1;
+ ++it2;
+ }
+ return true;
+}
+
+static inline Result<Ok, nsresult> ReadSEIInt(BufferReader& aBr,
+ uint32_t& aOutput) {
+ uint8_t tmpByte;
+
+ aOutput = 0;
+ MOZ_TRY_VAR(tmpByte, aBr.ReadU8());
+ while (tmpByte == 0xFF) {
+ aOutput += 255;
+ MOZ_TRY_VAR(tmpByte, aBr.ReadU8());
+ }
+ aOutput += tmpByte; // this is the last byte
+ return Ok();
+}
+
+/* static */
+bool H264::DecodeISlice(const mozilla::MediaByteBuffer* aSlice) {
+ if (!aSlice) {
+ return false;
+ }
+
+ // According to ITU-T Rec H.264 Table 7.3.3, read the slice type from
+ // slice_header, and the slice type 2 and 7 are representing I slice.
+ BitReader br(aSlice);
+ // Skip `first_mb_in_slice`
+ br.ReadUE();
+ // The value of slice type can go from 0 to 9, but the value between 5 to
+ // 9 are actually equal to 0 to 4.
+ const uint32_t sliceType = br.ReadUE() % 5;
+ return sliceType == SLICE_TYPES::I_SLICE || sliceType == SI_SLICE;
+}
+
+/* static */
+bool H264::DecodeRecoverySEI(const mozilla::MediaByteBuffer* aSEI,
+ SEIRecoveryData& aDest) {
+ if (!aSEI) {
+ return false;
+ }
+ // sei_rbsp() as per 7.3.2.3 Supplemental enhancement information RBSP syntax
+ BufferReader br(aSEI);
+
+ do {
+ // sei_message() as per
+ // 7.3.2.3.1 Supplemental enhancement information message syntax
+ uint32_t payloadType = 0;
+ if (ReadSEIInt(br, payloadType).isErr()) {
+ return false;
+ }
+
+ uint32_t payloadSize = 0;
+ if (ReadSEIInt(br, payloadSize).isErr()) {
+ return false;
+ }
+
+ // sei_payload(payloadType, payloadSize) as per
+ // D.1 SEI payload syntax.
+ const uint8_t* p = br.Read(payloadSize);
+ if (!p) {
+ return false;
+ }
+ if (payloadType == 6) { // SEI_RECOVERY_POINT
+ if (payloadSize == 0) {
+ // Invalid content, ignore.
+ continue;
+ }
+ // D.1.7 Recovery point SEI message syntax
+ BitReader br(p, payloadSize * 8);
+ aDest.recovery_frame_cnt = br.ReadUE();
+ aDest.exact_match_flag = br.ReadBit();
+ aDest.broken_link_flag = br.ReadBit();
+ aDest.changing_slice_group_idc = br.ReadBits(2);
+ return true;
+ }
+ } while (br.PeekU8().isOk() &&
+ br.PeekU8().unwrap() !=
+ 0x80); // more_rbsp_data() msg[offset] != 0x80
+ // ignore the trailing bits rbsp_trailing_bits();
+ return false;
+}
+
+/*static */ already_AddRefed<mozilla::MediaByteBuffer> H264::CreateExtraData(
+ uint8_t aProfile, uint8_t aConstraints, uint8_t aLevel,
+ const gfx::IntSize& aSize) {
+ // SPS of a 144p video.
+ const uint8_t originSPS[] = {0x4d, 0x40, 0x0c, 0xe8, 0x80, 0x80, 0x9d,
+ 0x80, 0xb5, 0x01, 0x01, 0x01, 0x40, 0x00,
+ 0x00, 0x00, 0x40, 0x00, 0x00, 0x0f, 0x03,
+ 0xc5, 0x0a, 0x44, 0x80};
+
+ RefPtr<MediaByteBuffer> extraData = new MediaByteBuffer();
+ extraData->AppendElements(originSPS, sizeof(originSPS));
+ BitReader br(extraData, BitReader::GetBitLength(extraData));
+
+ RefPtr<MediaByteBuffer> sps = new MediaByteBuffer();
+ BitWriter bw(sps);
+
+ br.ReadBits(8); // Skip original profile_idc
+ bw.WriteU8(aProfile);
+ br.ReadBits(8); // Skip original constraint flags && reserved_zero_2bits
+ aConstraints =
+ aConstraints & ~0x3; // Ensure reserved_zero_2bits are set to 0
+ bw.WriteBits(aConstraints, 8);
+ br.ReadBits(8); // Skip original level_idc
+ bw.WriteU8(aLevel);
+ bw.WriteUE(br.ReadUE()); // seq_parameter_set_id (0 stored on 1 bit)
+
+ if (aProfile == 100 || aProfile == 110 || aProfile == 122 ||
+ aProfile == 244 || aProfile == 44 || aProfile == 83 || aProfile == 86 ||
+ aProfile == 118 || aProfile == 128 || aProfile == 138 ||
+ aProfile == 139 || aProfile == 134) {
+ bw.WriteUE(1); // chroma_format_idc -> always set to 4:2:0 chroma format
+ bw.WriteUE(0); // bit_depth_luma_minus8 -> always 8 bits here
+ bw.WriteUE(0); // bit_depth_chroma_minus8 -> always 8 bits here
+ bw.WriteBit(false); // qpprime_y_zero_transform_bypass_flag
+ bw.WriteBit(false); // seq_scaling_matrix_present_flag
+ }
+
+ bw.WriteBits(br.ReadBits(11),
+ 11); // log2_max_frame_num to gaps_in_frame_num_allowed_flag
+
+ // skip over original exp-golomb encoded width/height
+ br.ReadUE(); // skip width
+ br.ReadUE(); // skip height
+ uint32_t width = aSize.width;
+ uint32_t widthNeeded = width % 16 != 0 ? (width / 16 + 1) * 16 : width;
+ uint32_t height = aSize.height;
+ uint32_t heightNeeded = height % 16 != 0 ? (height / 16 + 1) * 16 : height;
+ bw.WriteUE(widthNeeded / 16 - 1);
+ bw.WriteUE(heightNeeded / 16 - 1);
+ bw.WriteBit(br.ReadBit()); // write frame_mbs_only_flag
+ bw.WriteBit(br.ReadBit()); // write direct_8x8_inference_flag;
+ if (widthNeeded != width || heightNeeded != height) {
+ // Write cropping value
+ bw.WriteBit(true); // skip frame_cropping_flag
+ bw.WriteUE(0); // frame_crop_left_offset
+ bw.WriteUE((widthNeeded - width) / 2); // frame_crop_right_offset
+ bw.WriteUE(0); // frame_crop_top_offset
+ bw.WriteUE((heightNeeded - height) / 2); // frame_crop_bottom_offset
+ } else {
+ bw.WriteBit(false); // skip frame_cropping_flag
+ }
+ br.ReadBit(); // skip frame_cropping_flag;
+ // Write the remainings of the original sps (vui_parameters which sets an
+ // aspect ration of 1.0)
+ while (br.BitsLeft()) {
+ bw.WriteBit(br.ReadBit());
+ }
+ bw.CloseWithRbspTrailing();
+
+ RefPtr<MediaByteBuffer> encodedSPS =
+ EncodeNALUnit(sps->Elements(), sps->Length());
+ extraData->Clear();
+
+ const uint8_t PPS[] = {0xeb, 0xef, 0x20};
+
+ WriteExtraData(
+ extraData, aProfile, aConstraints, aLevel,
+ Span<const uint8_t>(encodedSPS->Elements(), encodedSPS->Length()),
+ Span<const uint8_t>(PPS, sizeof(PPS)));
+
+ return extraData.forget();
+}
+
+void H264::WriteExtraData(MediaByteBuffer* aDestExtraData,
+ const uint8_t aProfile, const uint8_t aConstraints,
+ const uint8_t aLevel, const Span<const uint8_t> aSPS,
+ const Span<const uint8_t> aPPS) {
+ aDestExtraData->AppendElement(1);
+ aDestExtraData->AppendElement(aProfile);
+ aDestExtraData->AppendElement(aConstraints);
+ aDestExtraData->AppendElement(aLevel);
+ aDestExtraData->AppendElement(3); // nalLENSize-1
+ aDestExtraData->AppendElement(1); // numPPS
+ uint8_t c[2];
+ mozilla::BigEndian::writeUint16(&c[0], aSPS.Length() + 1);
+ aDestExtraData->AppendElements(c, 2);
+ aDestExtraData->AppendElement((0x00 << 7) | (0x3 << 5) | H264_NAL_SPS);
+ aDestExtraData->AppendElements(aSPS.Elements(), aSPS.Length());
+
+ aDestExtraData->AppendElement(1); // numPPS
+ mozilla::BigEndian::writeUint16(&c[0], aPPS.Length() + 1);
+ aDestExtraData->AppendElements(c, 2);
+ aDestExtraData->AppendElement((0x00 << 7) | (0x3 << 5) | H264_NAL_PPS);
+ aDestExtraData->AppendElements(aPPS.Elements(), aPPS.Length());
+}
+
+#undef READUE
+#undef READSE
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/bytestreams/H264.h b/dom/media/platforms/agnostic/bytestreams/H264.h
new file mode 100644
index 0000000000..37c0e5bc14
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/H264.h
@@ -0,0 +1,525 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MP4_DEMUXER_H264_H_
+#define MP4_DEMUXER_H264_H_
+
+#include "DecoderData.h"
+#include "mozilla/gfx/Types.h"
+
+namespace mozilla {
+class BitReader;
+
+// Spec 7.4.2.1
+#define MAX_SPS_COUNT 32
+#define MAX_PPS_COUNT 256
+
+// NAL unit types
+enum NAL_TYPES {
+ H264_NAL_SLICE = 1,
+ H264_NAL_DPA = 2,
+ H264_NAL_DPB = 3,
+ H264_NAL_DPC = 4,
+ H264_NAL_IDR_SLICE = 5,
+ H264_NAL_SEI = 6,
+ H264_NAL_SPS = 7,
+ H264_NAL_PPS = 8,
+ H264_NAL_AUD = 9,
+ H264_NAL_END_SEQUENCE = 10,
+ H264_NAL_END_STREAM = 11,
+ H264_NAL_FILLER_DATA = 12,
+ H264_NAL_SPS_EXT = 13,
+ H264_NAL_PREFIX = 14,
+ H264_NAL_AUXILIARY_SLICE = 19,
+ H264_NAL_SLICE_EXT = 20,
+ H264_NAL_SLICE_EXT_DVC = 21,
+};
+
+// According to ITU-T Rec H.264 (2017/04) Table 7.6.
+enum SLICE_TYPES {
+ P_SLICE = 0,
+ B_SLICE = 1,
+ I_SLICE = 2,
+ SP_SLICE = 3,
+ SI_SLICE = 4,
+};
+
+struct SPSData {
+ bool operator==(const SPSData& aOther) const;
+ bool operator!=(const SPSData& aOther) const;
+
+ gfx::YUVColorSpace ColorSpace() const;
+ gfx::ColorDepth ColorDepth() const;
+
+ bool valid;
+
+ /* Decoded Members */
+ /*
+ pic_width is the decoded width according to:
+ pic_width = ((pic_width_in_mbs_minus1 + 1) * 16)
+ - (frame_crop_left_offset + frame_crop_right_offset) * 2
+ */
+ uint32_t pic_width;
+ /*
+ pic_height is the decoded height according to:
+ pic_height = (2 - frame_mbs_only_flag) * ((pic_height_in_map_units_minus1 +
+ 1) * 16)
+ - (frame_crop_top_offset + frame_crop_bottom_offset) * 2
+ */
+ uint32_t pic_height;
+
+ bool interlaced;
+
+ /*
+ Displayed size.
+ display_width and display_height are adjusted according to the display
+ sample aspect ratio.
+ */
+ uint32_t display_width;
+ uint32_t display_height;
+
+ float sample_ratio;
+
+ uint32_t crop_left;
+ uint32_t crop_right;
+ uint32_t crop_top;
+ uint32_t crop_bottom;
+
+ /*
+ H264 decoding parameters according to ITU-T H.264 (T-REC-H.264-201402-I/en)
+ http://www.itu.int/rec/T-REC-H.264-201402-I/en
+ */
+
+ bool constraint_set0_flag;
+ bool constraint_set1_flag;
+ bool constraint_set2_flag;
+ bool constraint_set3_flag;
+ bool constraint_set4_flag;
+ bool constraint_set5_flag;
+
+ /*
+ profile_idc and level_idc indicate the profile and level to which the coded
+ video sequence conforms when the SVC sequence parameter set is the active
+ SVC sequence parameter set.
+ */
+ uint8_t profile_idc;
+ uint8_t level_idc;
+
+ /*
+ seq_parameter_set_id identifies the sequence parameter set that is referred
+ to by the picture parameter set. The value of seq_parameter_set_id shall be
+ in the range of 0 to 31, inclusive.
+ */
+ uint8_t seq_parameter_set_id;
+
+ /*
+ chroma_format_idc specifies the chroma sampling relative to the luma
+ sampling as specified in clause 6.2. The value of chroma_format_idc shall be
+ in the range of 0 to 3, inclusive. When chroma_format_idc is not present,
+ it shall be inferred to be equal to 1 (4:2:0 chroma format).
+ When profile_idc is equal to 183, chroma_format_idc shall be equal to 0
+ (4:0:0 chroma format).
+ */
+ uint8_t chroma_format_idc;
+
+ /*
+ bit_depth_luma_minus8 specifies the bit depth of the samples of the luma
+ array and the value of the luma quantisation parameter range offset
+ QpBdOffset Y , as specified by
+ BitDepth Y = 8 + bit_depth_luma_minus8 (7-3)
+ QpBdOffset Y = 6 * bit_depth_luma_minus8 (7-4)
+ When bit_depth_luma_minus8 is not present, it shall be inferred to be equal
+ to 0. bit_depth_luma_minus8 shall be in the range of 0 to 6, inclusive.
+ */
+ uint8_t bit_depth_luma_minus8;
+
+ /*
+ bit_depth_chroma_minus8 specifies the bit depth of the samples of the chroma
+ arrays and the value of the chroma quantisation parameter range offset
+ QpBdOffset C , as specified by
+ BitDepth C = 8 + bit_depth_chroma_minus8 (7-5)
+ QpBdOffset C = 6 * bit_depth_chroma_minus8 (7-6)
+ When bit_depth_chroma_minus8 is not present, it shall be inferred to be
+ equal to 0. bit_depth_chroma_minus8 shall be in the range of 0 to 6,
+ inclusive.
+ */
+ uint8_t bit_depth_chroma_minus8;
+
+ /*
+ separate_colour_plane_flag equal to 1 specifies that the three colour
+ components of the 4:4:4 chroma format are coded separately.
+ separate_colour_plane_flag equal to 0 specifies that the colour components
+ are not coded separately. When separate_colour_plane_flag is not present,
+ it shall be inferred to be equal to 0. When separate_colour_plane_flag is
+ equal to 1, the primary coded picture consists of three separate components,
+ each of which consists of coded samples of one colour plane (Y, Cb or Cr)
+ that each use the monochrome coding syntax. In this case, each colour plane
+ is associated with a specific colour_plane_id value.
+ */
+ bool separate_colour_plane_flag;
+
+ /*
+ seq_scaling_matrix_present_flag equal to 1 specifies that the flags
+ seq_scaling_list_present_flag[ i ] for i = 0..7 or
+ i = 0..11 are present. seq_scaling_matrix_present_flag equal to 0 specifies
+ that these flags are not present and the sequence-level scaling list
+ specified by Flat_4x4_16 shall be inferred for i = 0..5 and the
+ sequence-level scaling list specified by Flat_8x8_16 shall be inferred for
+ i = 6..11. When seq_scaling_matrix_present_flag is not present, it shall be
+ inferred to be equal to 0.
+ */
+ bool seq_scaling_matrix_present_flag;
+
+ /*
+ log2_max_frame_num_minus4 specifies the value of the variable
+ MaxFrameNum that is used in frame_num related derivations as
+ follows:
+
+ MaxFrameNum = 2( log2_max_frame_num_minus4 + 4 ). The value of
+ log2_max_frame_num_minus4 shall be in the range of 0 to 12, inclusive.
+ */
+ uint8_t log2_max_frame_num;
+
+ /*
+ pic_order_cnt_type specifies the method to decode picture order
+ count (as specified in subclause 8.2.1). The value of
+ pic_order_cnt_type shall be in the range of 0 to 2, inclusive.
+ */
+ uint8_t pic_order_cnt_type;
+
+ /*
+ log2_max_pic_order_cnt_lsb_minus4 specifies the value of the
+ variable MaxPicOrderCntLsb that is used in the decoding
+ process for picture order count as specified in subclause
+ 8.2.1 as follows:
+
+ MaxPicOrderCntLsb = 2( log2_max_pic_order_cnt_lsb_minus4 + 4 )
+
+ The value of log2_max_pic_order_cnt_lsb_minus4 shall be in
+ the range of 0 to 12, inclusive.
+ */
+ uint8_t log2_max_pic_order_cnt_lsb;
+
+ /*
+ delta_pic_order_always_zero_flag equal to 1 specifies that
+ delta_pic_order_cnt[ 0 ] and delta_pic_order_cnt[ 1 ] are
+ not present in the slice headers of the sequence and shall
+ be inferred to be equal to 0.
+ */
+ bool delta_pic_order_always_zero_flag;
+
+ /*
+ offset_for_non_ref_pic is used to calculate the picture
+ order count of a non-reference picture as specified in
+ 8.2.1. The value of offset_for_non_ref_pic shall be in the
+ range of -231 to 231 - 1, inclusive.
+ */
+ int8_t offset_for_non_ref_pic;
+
+ /*
+ offset_for_top_to_bottom_field is used to calculate the
+ picture order count of a bottom field as specified in
+ subclause 8.2.1. The value of offset_for_top_to_bottom_field
+ shall be in the range of -231 to 231 - 1, inclusive.
+ */
+ int8_t offset_for_top_to_bottom_field;
+
+ /*
+ max_num_ref_frames specifies the maximum number of short-term and
+ long-term reference frames, complementary reference field pairs,
+ and non-paired reference fields that may be used by the decoding
+ process for inter prediction of any picture in the
+ sequence. max_num_ref_frames also determines the size of the sliding
+ window operation as specified in subclause 8.2.5.3. The value of
+ max_num_ref_frames shall be in the range of 0 to MaxDpbFrames (as
+ specified in subclause A.3.1 or A.3.2), inclusive.
+ */
+ uint32_t max_num_ref_frames;
+
+ /*
+ gaps_in_frame_num_value_allowed_flag specifies the allowed
+ values of frame_num as specified in subclause 7.4.3 and the
+ decoding process in case of an inferred gap between values of
+ frame_num as specified in subclause 8.2.5.2.
+ */
+ bool gaps_in_frame_num_allowed_flag;
+
+ /*
+ pic_width_in_mbs_minus1 plus 1 specifies the width of each
+ decoded picture in units of macroblocks. 16 macroblocks in a row
+ */
+ uint32_t pic_width_in_mbs;
+
+ /*
+ pic_height_in_map_units_minus1 plus 1 specifies the height in
+ slice group map units of a decoded frame or field. 16
+ macroblocks in each column.
+ */
+ uint32_t pic_height_in_map_units;
+
+ /*
+ frame_mbs_only_flag equal to 0 specifies that coded pictures of
+ the coded video sequence may either be coded fields or coded
+ frames. frame_mbs_only_flag equal to 1 specifies that every
+ coded picture of the coded video sequence is a coded frame
+ containing only frame macroblocks.
+ */
+ bool frame_mbs_only_flag;
+
+ /*
+ mb_adaptive_frame_field_flag equal to 0 specifies no
+ switching between frame and field macroblocks within a
+ picture. mb_adaptive_frame_field_flag equal to 1 specifies
+ the possible use of switching between frame and field
+ macroblocks within frames. When mb_adaptive_frame_field_flag
+ is not present, it shall be inferred to be equal to 0.
+ */
+ bool mb_adaptive_frame_field_flag;
+
+ /*
+ direct_8x8_inference_flag specifies the method used in the derivation
+ process for luma motion vectors for B_Skip, B_Direct_16x16 and B_Direct_8x8
+ as specified in clause 8.4.1.2. When frame_mbs_only_flag is equal to 0,
+ direct_8x8_inference_flag shall be equal to 1.
+ */
+ bool direct_8x8_inference_flag;
+
+ /*
+ frame_cropping_flag equal to 1 specifies that the frame cropping
+ offset parameters follow next in the sequence parameter
+ set. frame_cropping_flag equal to 0 specifies that the frame
+ cropping offset parameters are not present.
+ */
+ bool frame_cropping_flag;
+ uint32_t frame_crop_left_offset;
+ uint32_t frame_crop_right_offset;
+ uint32_t frame_crop_top_offset;
+ uint32_t frame_crop_bottom_offset;
+
+ // VUI Parameters
+
+ /*
+ vui_parameters_present_flag equal to 1 specifies that the
+ vui_parameters( ) syntax structure as specified in Annex E is
+ present. vui_parameters_present_flag equal to 0 specifies that
+ the vui_parameters( ) syntax structure as specified in Annex E
+ is not present.
+ */
+ bool vui_parameters_present_flag;
+
+ /*
+ aspect_ratio_info_present_flag equal to 1 specifies that
+ aspect_ratio_idc is present. aspect_ratio_info_present_flag
+ equal to 0 specifies that aspect_ratio_idc is not present.
+ */
+ bool aspect_ratio_info_present_flag;
+
+ /*
+ aspect_ratio_idc specifies the value of the sample aspect
+ ratio of the luma samples. Table E-1 shows the meaning of
+ the code. When aspect_ratio_idc indicates Extended_SAR, the
+ sample aspect ratio is represented by sar_width and
+ sar_height. When the aspect_ratio_idc syntax element is not
+ present, aspect_ratio_idc value shall be inferred to be
+ equal to 0.
+ */
+ uint8_t aspect_ratio_idc;
+ uint32_t sar_width;
+ uint32_t sar_height;
+
+ /*
+ video_signal_type_present_flag equal to 1 specifies that video_format,
+ video_full_range_flag and colour_description_present_flag are present.
+ video_signal_type_present_flag equal to 0, specify that video_format,
+ video_full_range_flag and colour_description_present_flag are not present.
+ */
+ bool video_signal_type_present_flag;
+
+ /*
+ overscan_info_present_flag equal to1 specifies that the
+ overscan_appropriate_flag is present. When overscan_info_present_flag is
+ equal to 0 or is not present, the preferred display method for the video
+ signal is unspecified (Unspecified).
+ */
+ bool overscan_info_present_flag;
+ /*
+ overscan_appropriate_flag equal to 1 indicates that the cropped decoded
+ pictures output are suitable for display using overscan.
+ overscan_appropriate_flag equal to 0 indicates that the cropped decoded
+ pictures output contain visually important information in the entire region
+ out to the edges of the cropping rectangle of the picture
+ */
+ bool overscan_appropriate_flag;
+
+ /*
+ video_format indicates the representation of the pictures as specified in
+ Table E-2, before being coded in accordance with this
+ Recommendation | International Standard. When the video_format syntax
+ element is not present, video_format value shall be inferred to be equal
+ to 5. (Unspecified video format)
+ */
+ uint8_t video_format;
+
+ /*
+ video_full_range_flag indicates the black level and range of the luma and
+ chroma signals as derived from E′Y, E′PB, and E′PR or E′R, E′G, and E′B
+ real-valued component signals.
+ When the video_full_range_flag syntax element is not present, the value of
+ video_full_range_flag shall be inferred to be equal to 0.
+ */
+ bool video_full_range_flag;
+
+ /*
+ colour_description_present_flag equal to1 specifies that colour_primaries,
+ transfer_characteristics and matrix_coefficients are present.
+ colour_description_present_flag equal to 0 specifies that colour_primaries,
+ transfer_characteristics and matrix_coefficients are not present.
+ */
+ bool colour_description_present_flag;
+
+ /*
+ colour_primaries indicates the chromaticity coordinates of the source
+ primaries as specified in Table E-3 in terms of the CIE 1931 definition of
+ x and y as specified by ISO 11664-1.
+ When the colour_primaries syntax element is not present, the value of
+ colour_primaries shall be inferred to be equal to 2 (the chromaticity is
+ unspecified or is determined by the application).
+ */
+ uint8_t colour_primaries;
+
+ /*
+ transfer_characteristics indicates the opto-electronic transfer
+ characteristic of the source picture as specified in Table E-4 as a function
+ of a linear optical intensity input Lc with a nominal real-valued range of 0
+ to 1.
+ When the transfer_characteristics syntax element is not present, the value
+ of transfer_characteristics shall be inferred to be equal to 2
+ (the transfer characteristics are unspecified or are determined by the
+ application).
+ */
+ uint8_t transfer_characteristics;
+
+ uint8_t matrix_coefficients;
+ bool chroma_loc_info_present_flag;
+ /*
+ The value of chroma_sample_loc_type_top_field and
+ chroma_sample_loc_type_bottom_field shall be in the range of 0 to 5,
+ inclusive
+ */
+ uint8_t chroma_sample_loc_type_top_field;
+ uint8_t chroma_sample_loc_type_bottom_field;
+
+ bool scaling_matrix_present;
+ uint8_t scaling_matrix4x4[6][16];
+ uint8_t scaling_matrix8x8[6][64];
+
+ SPSData();
+};
+
+struct SEIRecoveryData {
+ /*
+ recovery_frame_cnt specifies the recovery point of output pictures in output
+ order. All decoded pictures in output order are indicated to be correct or
+ approximately correct in content starting at the output order position of
+ the reference picture having the frame_num equal to the frame_num of the VCL
+ NAL units for the current access unit incremented by recovery_frame_cnt in
+ modulo MaxFrameNum arithmetic. recovery_frame_cnt shall be in the range of 0
+ to MaxFrameNum − 1, inclusive.
+ */
+ uint32_t recovery_frame_cnt = 0;
+ /*
+ exact_match_flag indicates whether decoded pictures at and subsequent to the
+ specified recovery point in output order derived by starting the decoding
+ process at the access unit associated with the recovery point SEI message
+ shall be an exact match to the pictures that would be produced by starting
+ the decoding process at the location of a previous IDR access unit in the
+ NAL unit stream. The value 0 indicates that the match need not be exact and
+ the value 1 indicates that the match shall be exact.
+ */
+ bool exact_match_flag = false;
+ /*
+ broken_link_flag indicates the presence or absence of a broken link in the
+ NAL unit stream at the location of the recovery point SEI message */
+ bool broken_link_flag = false;
+ /*
+ changing_slice_group_idc equal to 0 indicates that decoded pictures are
+ correct or approximately correct in content at and subsequent to the
+ recovery point in output order when all macroblocks of the primary coded
+ pictures are decoded within the changing slice group period
+ */
+ uint8_t changing_slice_group_idc = 0;
+};
+
+class H264 {
+ public:
+ /* Check if out of band extradata contains a SPS NAL */
+ static bool HasSPS(const mozilla::MediaByteBuffer* aExtraData);
+ // Extract SPS and PPS NALs from aSample by looking into each NALs.
+ // aSample must be in AVCC format.
+ static already_AddRefed<mozilla::MediaByteBuffer> ExtractExtraData(
+ const mozilla::MediaRawData* aSample);
+ // Return true if both extradata are equal.
+ static bool CompareExtraData(const mozilla::MediaByteBuffer* aExtraData1,
+ const mozilla::MediaByteBuffer* aExtraData2);
+
+ // Ensure that SPS data makes sense, Return true if SPS data was, and false
+ // otherwise. If false, then content will be adjusted accordingly.
+ static bool EnsureSPSIsSane(SPSData& aSPS);
+
+ static bool DecodeSPSFromExtraData(const mozilla::MediaByteBuffer* aExtraData,
+ SPSData& aDest);
+ /* Decode SPS NAL RBSP and fill SPSData structure */
+ static bool DecodeSPS(const mozilla::MediaByteBuffer* aSPS, SPSData& aDest);
+
+ // If the given aExtraData is valid, return the aExtraData.max_num_ref_frames
+ // clamped to be in the range of [4, 16]; otherwise return 4.
+ static uint32_t ComputeMaxRefFrames(
+ const mozilla::MediaByteBuffer* aExtraData);
+
+ enum class FrameType {
+ I_FRAME,
+ OTHER,
+ INVALID,
+ };
+
+ // Returns the frame type. Returns I_FRAME if the sample is an IDR
+ // (Instantaneous Decoding Refresh) Picture.
+ static FrameType GetFrameType(const mozilla::MediaRawData* aSample);
+ // Create a dummy extradata, useful to create a decoder and test the
+ // capabilities of the decoder.
+ static already_AddRefed<mozilla::MediaByteBuffer> CreateExtraData(
+ uint8_t aProfile, uint8_t aConstraints, uint8_t aLevel,
+ const gfx::IntSize& aSize);
+ static void WriteExtraData(mozilla::MediaByteBuffer* aDestExtraData,
+ const uint8_t aProfile, const uint8_t aConstraints,
+ const uint8_t aLevel,
+ const Span<const uint8_t> aSPS,
+ const Span<const uint8_t> aPPS);
+
+ private:
+ friend class SPSNAL;
+ /* Extract RAW BYTE SEQUENCE PAYLOAD from NAL content.
+ Returns nullptr if invalid content.
+ This is compliant to ITU H.264 7.3.1 Syntax in tabular form NAL unit syntax
+ */
+ static already_AddRefed<mozilla::MediaByteBuffer> DecodeNALUnit(
+ const uint8_t* aNAL, size_t aLength);
+ static already_AddRefed<mozilla::MediaByteBuffer> EncodeNALUnit(
+ const uint8_t* aNAL, size_t aLength);
+ static bool vui_parameters(mozilla::BitReader& aBr, SPSData& aDest);
+ // Read HRD parameters, all data is ignored.
+ static void hrd_parameters(mozilla::BitReader& aBr);
+ static uint8_t NumSPS(const mozilla::MediaByteBuffer* aExtraData);
+ // Decode SEI payload and return true if the SEI NAL indicates a recovery
+ // point.
+ static bool DecodeRecoverySEI(const mozilla::MediaByteBuffer* aSEI,
+ SEIRecoveryData& aDest);
+ // Decode NAL Slice payload and return true if its slice type is I slice or SI
+ // slice.
+ static bool DecodeISlice(const mozilla::MediaByteBuffer* aSlice);
+};
+
+} // namespace mozilla
+
+#endif // MP4_DEMUXER_H264_H_
diff --git a/dom/media/platforms/agnostic/bytestreams/gtest/TestAnnexB.cpp b/dom/media/platforms/agnostic/bytestreams/gtest/TestAnnexB.cpp
new file mode 100644
index 0000000000..abc4b2ae8d
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/gtest/TestAnnexB.cpp
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "gtest/gtest.h"
+
+#include "AnnexB.h"
+#include "ByteWriter.h"
+#include "H264.h"
+
+namespace mozilla {
+
+// Create AVCC style extra data (the contents on an AVCC box). Note
+// NALLengthSize will be 4 so AVCC samples need to set their data up
+// accordingly.
+static already_AddRefed<MediaByteBuffer> GetExtraData() {
+ // Extra data with
+ // - baseline profile(0x42 == 66).
+ // - constraint flags 0 and 1 set(0xc0) -- normal for baseline profile.
+ // - level 4.0 (0x28 == 40).
+ // - 1280 * 720 resolution.
+ return H264::CreateExtraData(0x42, 0xc0, 0x28, {1280, 720});
+}
+
+// Create an AVCC style sample with requested size in bytes. This sample is
+// setup to contain a single NAL (in practice samples can contain many). The
+// sample sets its NAL size to aSampleSize - 4 and stores that size in the first
+// 4 bytes. Aside from the NAL size at the start, the data is uninitialized
+// (beware)! aSampleSize is a uint32_t as samples larger than can be expressed
+// by a uint32_t are not to spec.
+static already_AddRefed<MediaRawData> GetAvccSample(uint32_t aSampleSize) {
+ if (aSampleSize < 4) {
+ // Stop tests asking for insane samples.
+ EXPECT_FALSE(true) << "Samples should be requested with sane sizes";
+ }
+ nsTArray<uint8_t> sampleData;
+
+ // Write the NAL size.
+ ByteWriter<BigEndian> writer(sampleData);
+ EXPECT_TRUE(writer.WriteU32(aSampleSize - 4));
+
+ // Write the 'NAL'. Beware, this data is uninitialized.
+ sampleData.AppendElements(static_cast<size_t>(aSampleSize) - 4);
+ RefPtr<MediaRawData> rawData =
+ new MediaRawData{sampleData.Elements(), sampleData.Length()};
+ EXPECT_NE(rawData->Data(), nullptr);
+
+ // Set extra data.
+ rawData->mExtraData = GetExtraData();
+ return rawData.forget();
+}
+
+// Test that conversion from AVCC to AnnexB works as expected.
+TEST(AnnexB, AnnexBConversion)
+{
+ RefPtr<MediaRawData> rawData{GetAvccSample(128)};
+
+ {
+ // Test conversion of data when not adding SPS works as expected.
+ RefPtr<MediaRawData> rawDataClone = rawData->Clone();
+ Result<Ok, nsresult> result =
+ AnnexB::ConvertSampleToAnnexB(rawDataClone, /* aAddSps */ false);
+ EXPECT_TRUE(result.isOk()) << "Conversion should succeed";
+ EXPECT_EQ(rawDataClone->Size(), rawData->Size())
+ << "AnnexB sample should be the same size as the AVCC sample -- the 4 "
+ "byte NAL length data (AVCC) is replaced with 4 bytes of NAL "
+ "separator (AnnexB)";
+ EXPECT_TRUE(AnnexB::IsAnnexB(rawDataClone))
+ << "The sample should be AnnexB following conversion";
+ }
+
+ {
+ // Test that the SPS data is not added if the frame is not a keyframe.
+ RefPtr<MediaRawData> rawDataClone = rawData->Clone();
+ rawDataClone->mKeyframe =
+ false; // false is the default, but let's be sure.
+ Result<Ok, nsresult> result =
+ AnnexB::ConvertSampleToAnnexB(rawDataClone, /* aAddSps */ true);
+ EXPECT_TRUE(result.isOk()) << "Conversion should succeed";
+ EXPECT_EQ(rawDataClone->Size(), rawData->Size())
+ << "AnnexB sample should be the same size as the AVCC sample -- the 4 "
+ "byte NAL length data (AVCC) is replaced with 4 bytes of NAL "
+ "separator (AnnexB) and SPS data is not added as the frame is not a "
+ "keyframe";
+ EXPECT_TRUE(AnnexB::IsAnnexB(rawDataClone))
+ << "The sample should be AnnexB following conversion";
+ }
+
+ {
+ // Test that the SPS data is added to keyframes.
+ RefPtr<MediaRawData> rawDataClone = rawData->Clone();
+ rawDataClone->mKeyframe = true;
+ Result<Ok, nsresult> result =
+ AnnexB::ConvertSampleToAnnexB(rawDataClone, /* aAddSps */ true);
+ EXPECT_TRUE(result.isOk()) << "Conversion should succeed";
+ EXPECT_GT(rawDataClone->Size(), rawData->Size())
+ << "AnnexB sample should be larger than the AVCC sample because we've "
+ "added SPS data";
+ EXPECT_TRUE(AnnexB::IsAnnexB(rawDataClone))
+ << "The sample should be AnnexB following conversion";
+ // We could verify the SPS and PPS data we add, but we don't have great
+ // tooling to do so. Consider doing so in future.
+ }
+
+ {
+ // Test conversion involving subsample encryption doesn't overflow vlaues.
+ const uint32_t sampleSize = UINT16_MAX * 2;
+ RefPtr<MediaRawData> rawCryptoData{GetAvccSample(sampleSize)};
+ // Need to be a keyframe to test prepending SPS + PPS to sample.
+ rawCryptoData->mKeyframe = true;
+ UniquePtr<MediaRawDataWriter> rawDataWriter = rawCryptoData->CreateWriter();
+
+ rawDataWriter->mCrypto.mCryptoScheme = CryptoScheme::Cenc;
+
+ // We want to check that the clear size doesn't overflow during conversion.
+ // This size originates in a uint16_t, but since it can grow during AnnexB
+ // we cover it here.
+ const uint16_t clearSize = UINT16_MAX - 10;
+ // Set a clear size very close to uint16_t max value.
+ rawDataWriter->mCrypto.mPlainSizes.AppendElement(clearSize);
+ rawDataWriter->mCrypto.mEncryptedSizes.AppendElement(sampleSize -
+ clearSize);
+
+ RefPtr<MediaRawData> rawCryptoDataClone = rawCryptoData->Clone();
+ Result<Ok, nsresult> result =
+ AnnexB::ConvertSampleToAnnexB(rawCryptoDataClone, /* aAddSps */ true);
+ EXPECT_TRUE(result.isOk()) << "Conversion should succeed";
+ EXPECT_GT(rawCryptoDataClone->Size(), rawCryptoData->Size())
+ << "AnnexB sample should be larger than the AVCC sample because we've "
+ "added SPS data";
+ EXPECT_GT(rawCryptoDataClone->mCrypto.mPlainSizes[0],
+ rawCryptoData->mCrypto.mPlainSizes[0])
+ << "Conversion should have increased clear data sizes without overflow";
+ EXPECT_EQ(rawCryptoDataClone->mCrypto.mEncryptedSizes[0],
+ rawCryptoData->mCrypto.mEncryptedSizes[0])
+ << "Conversion should not affect encrypted sizes";
+ EXPECT_TRUE(AnnexB::IsAnnexB(rawCryptoDataClone))
+ << "The sample should be AnnexB following conversion";
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/bytestreams/gtest/moz.build b/dom/media/platforms/agnostic/bytestreams/gtest/moz.build
new file mode 100644
index 0000000000..be0bb9b3ae
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/gtest/moz.build
@@ -0,0 +1,11 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ "TestAnnexB.cpp",
+]
+
+FINAL_LIBRARY = "xul-gtest"
diff --git a/dom/media/platforms/agnostic/bytestreams/moz.build b/dom/media/platforms/agnostic/bytestreams/moz.build
new file mode 100644
index 0000000000..225cb427f8
--- /dev/null
+++ b/dom/media/platforms/agnostic/bytestreams/moz.build
@@ -0,0 +1,35 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Core", "Audio/Video: Playback")
+
+TEST_DIRS += [
+ "gtest",
+]
+
+EXPORTS += [
+ "Adts.h",
+ "AnnexB.h",
+ "H264.h",
+]
+
+UNIFIED_SOURCES += [
+ "Adts.cpp",
+ "AnnexB.cpp",
+ "H264.cpp",
+]
+
+LOCAL_INCLUDES += [
+ "../../../mp4/",
+]
+
+FINAL_LIBRARY = "xul"
+
+# Suppress warnings for now.
+CXXFLAGS += [
+ "-Wno-sign-compare",
+]
diff --git a/dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.cpp b/dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.cpp
new file mode 100644
index 0000000000..e71632e6d3
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.cpp
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ChromiumCDMVideoDecoder.h"
+#include "ChromiumCDMProxy.h"
+#include "content_decryption_module.h"
+#include "GMPService.h"
+#include "GMPVideoDecoder.h"
+#include "MP4Decoder.h"
+#include "VPXDecoder.h"
+
+namespace mozilla {
+
+ChromiumCDMVideoDecoder::ChromiumCDMVideoDecoder(
+ const GMPVideoDecoderParams& aParams, CDMProxy* aCDMProxy)
+ : mCDMParent(aCDMProxy->AsChromiumCDMProxy()->GetCDMParent()),
+ mConfig(aParams.mConfig),
+ mCrashHelper(aParams.mCrashHelper),
+ mGMPThread(GetGMPThread()),
+ mImageContainer(aParams.mImageContainer),
+ mKnowsCompositor(aParams.mKnowsCompositor) {}
+
+ChromiumCDMVideoDecoder::~ChromiumCDMVideoDecoder() = default;
+
+static uint32_t ToCDMH264Profile(uint8_t aProfile) {
+ switch (aProfile) {
+ case 66:
+ return cdm::VideoCodecProfile::kH264ProfileBaseline;
+ case 77:
+ return cdm::VideoCodecProfile::kH264ProfileMain;
+ case 88:
+ return cdm::VideoCodecProfile::kH264ProfileExtended;
+ case 100:
+ return cdm::VideoCodecProfile::kH264ProfileHigh;
+ case 110:
+ return cdm::VideoCodecProfile::kH264ProfileHigh10;
+ case 122:
+ return cdm::VideoCodecProfile::kH264ProfileHigh422;
+ case 144:
+ return cdm::VideoCodecProfile::kH264ProfileHigh444Predictive;
+ }
+ return cdm::VideoCodecProfile::kUnknownVideoCodecProfile;
+}
+
+RefPtr<MediaDataDecoder::InitPromise> ChromiumCDMVideoDecoder::Init() {
+ if (!mCDMParent) {
+ // Must have failed to get the CDMParent from the ChromiumCDMProxy
+ // in our constructor; the MediaKeys must have shut down the CDM
+ // before we had a chance to start up the decoder.
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ gmp::CDMVideoDecoderConfig config;
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ config.mCodec() = cdm::VideoCodec::kCodecH264;
+ config.mProfile() =
+ ToCDMH264Profile(mConfig.mExtraData->SafeElementAt(1, 0));
+ config.mExtraData() = mConfig.mExtraData->Clone();
+ mConvertToAnnexB = true;
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ config.mCodec() = cdm::VideoCodec::kCodecVp8;
+ config.mProfile() = cdm::VideoCodecProfile::kProfileNotNeeded;
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ config.mCodec() = cdm::VideoCodec::kCodecVp9;
+ config.mProfile() = cdm::VideoCodecProfile::kProfileNotNeeded;
+ } else {
+ return MediaDataDecoder::InitPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ config.mImageWidth() = mConfig.mImage.width;
+ config.mImageHeight() = mConfig.mImage.height;
+ config.mEncryptionScheme() = cdm::EncryptionScheme::kUnencrypted;
+ switch (mConfig.mCrypto.mCryptoScheme) {
+ case CryptoScheme::None:
+ break;
+ case CryptoScheme::Cenc:
+ config.mEncryptionScheme() = cdm::EncryptionScheme::kCenc;
+ break;
+ case CryptoScheme::Cbcs:
+ config.mEncryptionScheme() = cdm::EncryptionScheme::kCbcs;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Should not have unrecognized encryption type");
+ break;
+ }
+
+ RefPtr<gmp::ChromiumCDMParent> cdm = mCDMParent;
+ VideoInfo info = mConfig;
+ RefPtr<layers::ImageContainer> imageContainer = mImageContainer;
+ RefPtr<layers::KnowsCompositor> knowsCompositor = mKnowsCompositor;
+ return InvokeAsync(mGMPThread, __func__,
+ [cdm, config, info, imageContainer, knowsCompositor]() {
+ return cdm->InitializeVideoDecoder(
+ config, info, imageContainer, knowsCompositor);
+ });
+}
+
+nsCString ChromiumCDMVideoDecoder::GetDescriptionName() const {
+ return "chromium cdm video decoder"_ns;
+}
+
+nsCString ChromiumCDMVideoDecoder::GetCodecName() const {
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ return "h264"_ns;
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ return "vp8"_ns;
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ return "vp9"_ns;
+ }
+ return "unknown"_ns;
+}
+
+MediaDataDecoder::ConversionRequired ChromiumCDMVideoDecoder::NeedsConversion()
+ const {
+ return mConvertToAnnexB ? ConversionRequired::kNeedAnnexB
+ : ConversionRequired::kNeedNone;
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> ChromiumCDMVideoDecoder::Decode(
+ MediaRawData* aSample) {
+ RefPtr<gmp::ChromiumCDMParent> cdm = mCDMParent;
+ RefPtr<MediaRawData> sample = aSample;
+ return InvokeAsync(mGMPThread, __func__, [cdm, sample]() {
+ return cdm->DecryptAndDecodeFrame(sample);
+ });
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> ChromiumCDMVideoDecoder::Flush() {
+ MOZ_ASSERT(mCDMParent);
+ RefPtr<gmp::ChromiumCDMParent> cdm = mCDMParent;
+ return InvokeAsync(mGMPThread, __func__,
+ [cdm]() { return cdm->FlushVideoDecoder(); });
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> ChromiumCDMVideoDecoder::Drain() {
+ MOZ_ASSERT(mCDMParent);
+ RefPtr<gmp::ChromiumCDMParent> cdm = mCDMParent;
+ return InvokeAsync(mGMPThread, __func__, [cdm]() { return cdm->Drain(); });
+}
+
+RefPtr<ShutdownPromise> ChromiumCDMVideoDecoder::Shutdown() {
+ if (!mCDMParent) {
+ // Must have failed to get the CDMParent from the ChromiumCDMProxy
+ // in our constructor; the MediaKeys must have shut down the CDM
+ // before we had a chance to start up the decoder.
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ }
+ RefPtr<gmp::ChromiumCDMParent> cdm = mCDMParent;
+ return InvokeAsync(mGMPThread, __func__,
+ [cdm]() { return cdm->ShutdownVideoDecoder(); });
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.h b/dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.h
new file mode 100644
index 0000000000..c177bf2e48
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ChromiumCDMVideoDecoder_h_
+#define ChromiumCDMVideoDecoder_h_
+
+#include "ChromiumCDMParent.h"
+#include "PlatformDecoderModule.h"
+#include "mozilla/layers/KnowsCompositor.h"
+
+namespace mozilla {
+
+class CDMProxy;
+struct GMPVideoDecoderParams;
+
+DDLoggedTypeDeclNameAndBase(ChromiumCDMVideoDecoder, MediaDataDecoder);
+
+class ChromiumCDMVideoDecoder final
+ : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<ChromiumCDMVideoDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ChromiumCDMVideoDecoder, final);
+
+ ChromiumCDMVideoDecoder(const GMPVideoDecoderParams& aParams,
+ CDMProxy* aCDMProxy);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override;
+ nsCString GetCodecName() const override;
+ ConversionRequired NeedsConversion() const override;
+
+ private:
+ ~ChromiumCDMVideoDecoder();
+
+ RefPtr<gmp::ChromiumCDMParent> mCDMParent;
+ const VideoInfo mConfig;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+ nsCOMPtr<nsISerialEventTarget> mGMPThread;
+ RefPtr<layers::ImageContainer> mImageContainer;
+ RefPtr<layers::KnowsCompositor> mKnowsCompositor;
+ MozPromiseHolder<InitPromise> mInitPromise;
+ bool mConvertToAnnexB = false;
+};
+
+} // namespace mozilla
+
+#endif // ChromiumCDMVideoDecoder_h_
diff --git a/dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h b/dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h
new file mode 100644
index 0000000000..bafb387f83
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DecryptThroughputLimit_h
+#define DecryptThroughputLimit_h
+
+#include <deque>
+
+#include "MediaTimer.h"
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+// We throttle our decrypt so that we don't decrypt more than a certain
+// duration of samples per second. This is to work around bugs in the
+// Widevine CDM. See bugs 1338924, 1342822, 1718223.
+class DecryptThroughputLimit {
+ public:
+ explicit DecryptThroughputLimit(nsISerialEventTarget* aTargetThread,
+ uint32_t aMaxThroughputMs)
+ : mThrottleScheduler(aTargetThread),
+ mMaxThroughput(aMaxThroughputMs / 1000.0) {}
+
+ typedef MozPromise<RefPtr<MediaRawData>, MediaResult, true> ThrottlePromise;
+
+ // Resolves promise after a delay if necessary in order to reduce the
+ // throughput of samples sent through the CDM for decryption.
+ RefPtr<ThrottlePromise> Throttle(MediaRawData* aSample) {
+ // We should only have one decrypt request being processed at once.
+ MOZ_RELEASE_ASSERT(!mThrottleScheduler.IsScheduled());
+
+ const TimeDuration WindowSize = TimeDuration::FromSeconds(0.1);
+ const TimeDuration MaxThroughput =
+ TimeDuration::FromSeconds(mMaxThroughput);
+
+ // Forget decrypts that happened before the start of our window.
+ const TimeStamp now = TimeStamp::Now();
+ while (!mDecrypts.empty() &&
+ mDecrypts.front().mTimestamp < now - WindowSize) {
+ mDecrypts.pop_front();
+ }
+
+ // How much time duration of the media would we have decrypted inside the
+ // time window if we did decrypt this block?
+ TimeDuration sampleDuration = aSample->mDuration.ToTimeDuration();
+ TimeDuration durationDecrypted = sampleDuration;
+ for (const DecryptedJob& job : mDecrypts) {
+ durationDecrypted += job.mSampleDuration;
+ }
+
+ if (durationDecrypted < MaxThroughput) {
+ // If we decrypted a sample of this duration, we would *not* have
+ // decrypted more than our threshold for max throughput, over the
+ // preceding wall time window. So we're safe to proceed with this
+ // decrypt.
+ mDecrypts.push_back(DecryptedJob({now, sampleDuration}));
+ return ThrottlePromise::CreateAndResolve(aSample, __func__);
+ }
+
+ // Otherwise, we need to delay until decrypting won't exceed our
+ // throughput threshold.
+
+ RefPtr<ThrottlePromise> p = mPromiseHolder.Ensure(__func__);
+
+ TimeDuration delay = durationDecrypted - MaxThroughput;
+ TimeStamp target = now + delay;
+ RefPtr<MediaRawData> sample(aSample);
+ mThrottleScheduler.Ensure(
+ target,
+ [this, sample, sampleDuration]() {
+ mThrottleScheduler.CompleteRequest();
+ mDecrypts.push_back(DecryptedJob({TimeStamp::Now(), sampleDuration}));
+ mPromiseHolder.Resolve(sample, __func__);
+ },
+ []() { MOZ_DIAGNOSTIC_ASSERT(false); });
+
+ return p;
+ }
+
+ void Flush() {
+ mThrottleScheduler.Reset();
+ mPromiseHolder.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ }
+
+ private:
+ DelayedScheduler mThrottleScheduler;
+ MozPromiseHolder<ThrottlePromise> mPromiseHolder;
+
+ double mMaxThroughput;
+
+ struct DecryptedJob {
+ TimeStamp mTimestamp;
+ TimeDuration mSampleDuration;
+ };
+ std::deque<DecryptedJob> mDecrypts;
+};
+
+} // namespace mozilla
+
+#endif // DecryptThroughputLimit_h
diff --git a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
new file mode 100644
index 0000000000..d4477bd6cd
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
@@ -0,0 +1,479 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "EMEDecoderModule.h"
+
+#include <inttypes.h>
+
+#include "Adts.h"
+#include "BlankDecoderModule.h"
+#include "ChromiumCDMVideoDecoder.h"
+#include "DecryptThroughputLimit.h"
+#include "GMPDecoderModule.h"
+#include "GMPService.h"
+#include "GMPVideoDecoder.h"
+#include "MP4Decoder.h"
+#include "MediaInfo.h"
+#include "PDMFactory.h"
+#include "mozilla/CDMProxy.h"
+#include "mozilla/EMEUtils.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Unused.h"
+#include "nsClassHashtable.h"
+#include "nsServiceManagerUtils.h"
+
+namespace mozilla {
+
+typedef MozPromiseRequestHolder<DecryptPromise> DecryptPromiseRequestHolder;
+
+DDLoggedTypeDeclNameAndBase(EMEDecryptor, MediaDataDecoder);
+
+class ADTSSampleConverter {
+ public:
+ explicit ADTSSampleConverter(const AudioInfo& aInfo)
+ : mNumChannels(aInfo.mChannels)
+ // Note: we set profile to 2 if we encounter an extended profile (which
+ // set mProfile to 0 and then set mExtendedProfile) such as HE-AACv2
+ // (profile 5). These can then pass through conversion to ADTS and back.
+ // This is done as ADTS only has 2 bits for profile, and the transform
+ // subtracts one from the value. We check if the profile supplied is > 4
+ // for safety. 2 is used as a fallback value, though it seems the CDM
+ // doesn't care what is set.
+ ,
+ mProfile(aInfo.mProfile < 1 || aInfo.mProfile > 4 ? 2 : aInfo.mProfile),
+ mFrequencyIndex(Adts::GetFrequencyIndex(aInfo.mRate)) {
+ EME_LOG("ADTSSampleConvertor(): aInfo.mProfile=%" PRIi8
+ " aInfo.mExtendedProfile=%" PRIi8,
+ aInfo.mProfile, aInfo.mExtendedProfile);
+ if (aInfo.mProfile < 1 || aInfo.mProfile > 4) {
+ EME_LOG(
+ "ADTSSampleConvertor(): Profile not in [1, 4]! Samples will "
+ "their profile set to 2!");
+ }
+ }
+ bool Convert(MediaRawData* aSample) const {
+ return Adts::ConvertSample(mNumChannels, mFrequencyIndex, mProfile,
+ aSample);
+ }
+ bool Revert(MediaRawData* aSample) const {
+ return Adts::RevertSample(aSample);
+ }
+
+ private:
+ const uint32_t mNumChannels;
+ const uint8_t mProfile;
+ const uint8_t mFrequencyIndex;
+};
+
+class EMEDecryptor final : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<EMEDecryptor> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(EMEDecryptor, final);
+
+ EMEDecryptor(MediaDataDecoder* aDecoder, CDMProxy* aProxy,
+ TrackInfo::TrackType aType,
+ const std::function<MediaEventProducer<TrackInfo::TrackType>*()>&
+ aOnWaitingForKey,
+ UniquePtr<ADTSSampleConverter> aConverter = nullptr)
+ : mDecoder(aDecoder),
+ mProxy(aProxy),
+ mSamplesWaitingForKey(
+ new SamplesWaitingForKey(mProxy, aType, aOnWaitingForKey)),
+ mADTSSampleConverter(std::move(aConverter)),
+ mIsShutdown(false) {
+ DDLINKCHILD("decoder", mDecoder.get());
+ }
+
+ RefPtr<InitPromise> Init() override {
+ MOZ_ASSERT(!mIsShutdown);
+ mThread = GetCurrentSerialEventTarget();
+ uint32_t maxThroughputMs = StaticPrefs::media_eme_max_throughput_ms();
+ EME_LOG("EME max-throughput-ms=%" PRIu32, maxThroughputMs);
+ mThroughputLimiter.emplace(mThread, maxThroughputMs);
+
+ return mDecoder->Init();
+ }
+
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ MOZ_RELEASE_ASSERT(mDecrypts.Count() == 0,
+ "Can only process one sample at a time");
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+
+ RefPtr<EMEDecryptor> self = this;
+ mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
+ ->Then(
+ mThread, __func__,
+ [self](const RefPtr<MediaRawData>& aSample) {
+ self->mKeyRequest.Complete();
+ self->ThrottleDecode(aSample);
+ },
+ [self]() { self->mKeyRequest.Complete(); })
+ ->Track(mKeyRequest);
+ return p;
+ }
+
+ void ThrottleDecode(MediaRawData* aSample) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+
+ RefPtr<EMEDecryptor> self = this;
+ mThroughputLimiter->Throttle(aSample)
+ ->Then(
+ mThread, __func__,
+ [self](RefPtr<MediaRawData> aSample) {
+ self->mThrottleRequest.Complete();
+ self->AttemptDecode(aSample);
+ },
+ [self]() { self->mThrottleRequest.Complete(); })
+ ->Track(mThrottleRequest);
+ }
+
+ void AttemptDecode(MediaRawData* aSample) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ if (mIsShutdown) {
+ NS_WARNING("EME encrypted sample arrived after shutdown");
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ return;
+ }
+
+ if (mADTSSampleConverter && !mADTSSampleConverter->Convert(aSample)) {
+ mDecodePromise.RejectIfExists(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Failed to convert encrypted AAC sample to ADTS")),
+ __func__);
+ return;
+ }
+
+ const auto& decrypt = mDecrypts.InsertOrUpdate(
+ aSample, MakeUnique<DecryptPromiseRequestHolder>());
+ mProxy->Decrypt(aSample)
+ ->Then(mThread, __func__, this, &EMEDecryptor::Decrypted,
+ &EMEDecryptor::Decrypted)
+ ->Track(*decrypt);
+ }
+
+ void Decrypted(const DecryptResult& aDecrypted) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ MOZ_ASSERT(aDecrypted.mSample);
+
+ UniquePtr<DecryptPromiseRequestHolder> holder;
+ mDecrypts.Remove(aDecrypted.mSample, &holder);
+ if (holder) {
+ holder->Complete();
+ } else {
+ // Decryption is not in the list of decrypt operations waiting
+ // for a result. It must have been flushed or drained. Ignore result.
+ return;
+ }
+
+ if (mADTSSampleConverter &&
+ !mADTSSampleConverter->Revert(aDecrypted.mSample)) {
+ mDecodePromise.RejectIfExists(
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Failed to revert decrypted ADTS sample to AAC")),
+ __func__);
+ return;
+ }
+
+ if (mIsShutdown) {
+ NS_WARNING("EME decrypted sample arrived after shutdown");
+ return;
+ }
+
+ if (aDecrypted.mStatus == eme::NoKeyErr) {
+ // Key became unusable after we sent the sample to CDM to decrypt.
+ // Call Decode() again, so that the sample is enqueued for decryption
+ // if the key becomes usable again.
+ AttemptDecode(aDecrypted.mSample);
+ } else if (aDecrypted.mStatus != eme::Ok) {
+ mDecodePromise.RejectIfExists(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("decrypted.mStatus=%u",
+ uint32_t(aDecrypted.mStatus))),
+ __func__);
+ } else {
+ MOZ_ASSERT(!mIsShutdown);
+ // The sample is no longer encrypted, so clear its crypto metadata.
+ UniquePtr<MediaRawDataWriter> writer(aDecrypted.mSample->CreateWriter());
+ writer->mCrypto = CryptoSample();
+ RefPtr<EMEDecryptor> self = this;
+ mDecoder->Decode(aDecrypted.mSample)
+ ->Then(mThread, __func__,
+ [self](DecodePromise::ResolveOrRejectValue&& aValue) {
+ self->mDecodeRequest.Complete();
+ self->mDecodePromise.ResolveOrReject(std::move(aValue),
+ __func__);
+ })
+ ->Track(mDecodeRequest);
+ }
+ }
+
+ RefPtr<FlushPromise> Flush() override {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ MOZ_ASSERT(!mIsShutdown);
+ mKeyRequest.DisconnectIfExists();
+ mThrottleRequest.DisconnectIfExists();
+ mDecodeRequest.DisconnectIfExists();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mThroughputLimiter->Flush();
+ for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
+ auto holder = iter.UserData();
+ holder->DisconnectIfExists();
+ iter.Remove();
+ }
+ RefPtr<SamplesWaitingForKey> k = mSamplesWaitingForKey;
+ return mDecoder->Flush()->Then(mThread, __func__, [k]() {
+ k->Flush();
+ return FlushPromise::CreateAndResolve(true, __func__);
+ });
+ }
+
+ RefPtr<DecodePromise> Drain() override {
+ MOZ_ASSERT(mThread->IsOnCurrentThread());
+ MOZ_ASSERT(!mIsShutdown);
+ MOZ_ASSERT(mDecodePromise.IsEmpty() && !mDecodeRequest.Exists(),
+ "Must wait for decoding to complete");
+ for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
+ auto holder = iter.UserData();
+ holder->DisconnectIfExists();
+ iter.Remove();
+ }
+ return mDecoder->Drain();
+ }
+
+ RefPtr<ShutdownPromise> Shutdown() override {
+ // mThread may not be set if Init hasn't been called first.
+ MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
+ MOZ_ASSERT(!mIsShutdown);
+ mIsShutdown = true;
+ mSamplesWaitingForKey->BreakCycles();
+ mSamplesWaitingForKey = nullptr;
+ RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
+ mProxy = nullptr;
+ return decoder->Shutdown();
+ }
+
+ nsCString GetDescriptionName() const override {
+ return mDecoder->GetDescriptionName();
+ }
+
+ nsCString GetCodecName() const override { return mDecoder->GetCodecName(); }
+
+ ConversionRequired NeedsConversion() const override {
+ return mDecoder->NeedsConversion();
+ }
+
+ private:
+ ~EMEDecryptor() = default;
+
+ RefPtr<MediaDataDecoder> mDecoder;
+ nsCOMPtr<nsISerialEventTarget> mThread;
+ RefPtr<CDMProxy> mProxy;
+ nsClassHashtable<nsRefPtrHashKey<MediaRawData>, DecryptPromiseRequestHolder>
+ mDecrypts;
+ RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
+ MozPromiseRequestHolder<SamplesWaitingForKey::WaitForKeyPromise> mKeyRequest;
+ Maybe<DecryptThroughputLimit> mThroughputLimiter;
+ MozPromiseRequestHolder<DecryptThroughputLimit::ThrottlePromise>
+ mThrottleRequest;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
+ MozPromiseHolder<FlushPromise> mFlushPromise;
+ MozPromiseRequestHolder<DecodePromise> mDecodeRequest;
+ UniquePtr<ADTSSampleConverter> mADTSSampleConverter;
+ bool mIsShutdown;
+};
+
+EMEMediaDataDecoderProxy::EMEMediaDataDecoderProxy(
+ const CreateDecoderParams& aParams,
+ already_AddRefed<MediaDataDecoder> aProxyDecoder,
+ already_AddRefed<nsISerialEventTarget> aProxyThread, CDMProxy* aProxy)
+ : MediaDataDecoderProxy(std::move(aProxyDecoder), std::move(aProxyThread)),
+ mThread(GetCurrentSerialEventTarget()),
+ mSamplesWaitingForKey(new SamplesWaitingForKey(
+ aProxy, aParams.mType, aParams.mOnWaitingForKeyEvent)),
+ mProxy(aProxy) {}
+
+EMEMediaDataDecoderProxy::EMEMediaDataDecoderProxy(
+ const CreateDecoderParams& aParams,
+ already_AddRefed<MediaDataDecoder> aProxyDecoder, CDMProxy* aProxy)
+ : MediaDataDecoderProxy(std::move(aProxyDecoder),
+ do_AddRef(GetCurrentSerialEventTarget())),
+ mThread(GetCurrentSerialEventTarget()),
+ mSamplesWaitingForKey(new SamplesWaitingForKey(
+ aProxy, aParams.mType, aParams.mOnWaitingForKeyEvent)),
+ mProxy(aProxy) {}
+
+RefPtr<MediaDataDecoder::DecodePromise> EMEMediaDataDecoderProxy::Decode(
+ MediaRawData* aSample) {
+ RefPtr<EMEMediaDataDecoderProxy> self = this;
+ RefPtr<MediaRawData> sample = aSample;
+ return InvokeAsync(mThread, __func__, [self, this, sample]() {
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ mSamplesWaitingForKey->WaitIfKeyNotUsable(sample)
+ ->Then(
+ mThread, __func__,
+ [self, this](RefPtr<MediaRawData> aSample) {
+ mKeyRequest.Complete();
+
+ MediaDataDecoderProxy::Decode(aSample)
+ ->Then(mThread, __func__,
+ [self,
+ this](DecodePromise::ResolveOrRejectValue&& aValue) {
+ mDecodeRequest.Complete();
+ mDecodePromise.ResolveOrReject(std::move(aValue),
+ __func__);
+ })
+ ->Track(mDecodeRequest);
+ },
+ [self]() {
+ self->mKeyRequest.Complete();
+ MOZ_CRASH("Should never get here");
+ })
+ ->Track(mKeyRequest);
+
+ return p;
+ });
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> EMEMediaDataDecoderProxy::Flush() {
+ RefPtr<EMEMediaDataDecoderProxy> self = this;
+ return InvokeAsync(mThread, __func__, [self, this]() {
+ mKeyRequest.DisconnectIfExists();
+ mDecodeRequest.DisconnectIfExists();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ return MediaDataDecoderProxy::Flush();
+ });
+}
+
+RefPtr<ShutdownPromise> EMEMediaDataDecoderProxy::Shutdown() {
+ RefPtr<EMEMediaDataDecoderProxy> self = this;
+ return InvokeAsync(mThread, __func__, [self, this]() {
+ mSamplesWaitingForKey->BreakCycles();
+ mSamplesWaitingForKey = nullptr;
+ mProxy = nullptr;
+ return MediaDataDecoderProxy::Shutdown();
+ });
+}
+
+EMEDecoderModule::EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM)
+ : mProxy(aProxy), mPDM(aPDM) {}
+
+EMEDecoderModule::~EMEDecoderModule() = default;
+
+static already_AddRefed<MediaDataDecoderProxy> CreateDecoderWrapper(
+ CDMProxy* aProxy, const CreateDecoderParams& aParams) {
+ RefPtr<gmp::GeckoMediaPluginService> s(
+ gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
+ if (!s) {
+ return nullptr;
+ }
+ nsCOMPtr<nsISerialEventTarget> thread(s->GetGMPThread());
+ if (!thread) {
+ return nullptr;
+ }
+ RefPtr<MediaDataDecoderProxy> decoder(
+ new EMEMediaDataDecoderProxy(aParams,
+ do_AddRef(new ChromiumCDMVideoDecoder(
+ GMPVideoDecoderParams(aParams), aProxy)),
+ thread.forget(), aProxy));
+ return decoder.forget();
+}
+
+RefPtr<EMEDecoderModule::CreateDecoderPromise>
+EMEDecoderModule::AsyncCreateDecoder(const CreateDecoderParams& aParams) {
+ MOZ_ASSERT(aParams.mConfig.mCrypto.IsEncrypted());
+ MOZ_ASSERT(mPDM);
+
+ if (aParams.mConfig.IsVideo()) {
+ if (StaticPrefs::media_eme_video_blank()) {
+ EME_LOG(
+ "EMEDecoderModule::CreateVideoDecoder() creating a blank decoder.");
+ RefPtr<PlatformDecoderModule> m(BlankDecoderModule::Create());
+ RefPtr<MediaDataDecoder> decoder = m->CreateVideoDecoder(aParams);
+ return EMEDecoderModule::CreateDecoderPromise::CreateAndResolve(decoder,
+ __func__);
+ }
+
+ if (SupportsMimeType(aParams.mConfig.mMimeType, nullptr) !=
+ media::DecodeSupport::Unsupported) {
+ // GMP decodes. Assume that means it can decrypt too.
+ return EMEDecoderModule::CreateDecoderPromise::CreateAndResolve(
+ CreateDecoderWrapper(mProxy, aParams), __func__);
+ }
+
+ RefPtr<EMEDecoderModule::CreateDecoderPromise> p =
+ mPDM->CreateDecoder(aParams)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this},
+ params = CreateDecoderParamsForAsync(aParams)](
+ RefPtr<MediaDataDecoder>&& aDecoder) {
+ RefPtr<MediaDataDecoder> emeDecoder(
+ new EMEDecryptor(aDecoder, self->mProxy, params.mType,
+ params.mOnWaitingForKeyEvent));
+ return EMEDecoderModule::CreateDecoderPromise::CreateAndResolve(
+ emeDecoder, __func__);
+ },
+ [](const MediaResult& aError) {
+ return EMEDecoderModule::CreateDecoderPromise::CreateAndReject(
+ aError, __func__);
+ });
+ return p;
+ }
+
+ MOZ_ASSERT(aParams.mConfig.IsAudio());
+
+ // We don't support using the GMP to decode audio.
+ MOZ_ASSERT(SupportsMimeType(aParams.mConfig.mMimeType, nullptr) ==
+ media::DecodeSupport::Unsupported);
+ MOZ_ASSERT(mPDM);
+
+ if (StaticPrefs::media_eme_audio_blank()) {
+ EME_LOG("EMEDecoderModule::CreateAudioDecoder() creating a blank decoder.");
+ RefPtr<PlatformDecoderModule> m(BlankDecoderModule::Create());
+ RefPtr<MediaDataDecoder> decoder = m->CreateAudioDecoder(aParams);
+ return EMEDecoderModule::CreateDecoderPromise::CreateAndResolve(decoder,
+ __func__);
+ }
+
+ UniquePtr<ADTSSampleConverter> converter = nullptr;
+ if (MP4Decoder::IsAAC(aParams.mConfig.mMimeType)) {
+ // The CDM expects encrypted AAC to be in ADTS format.
+ // See bug 1433344.
+ converter = MakeUnique<ADTSSampleConverter>(aParams.AudioConfig());
+ }
+
+ RefPtr<EMEDecoderModule::CreateDecoderPromise> p =
+ mPDM->CreateDecoder(aParams)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, params = CreateDecoderParamsForAsync(aParams),
+ converter = std::move(converter)](
+ RefPtr<MediaDataDecoder>&& aDecoder) mutable {
+ RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(
+ aDecoder, self->mProxy, params.mType,
+ params.mOnWaitingForKeyEvent, std::move(converter)));
+ return EMEDecoderModule::CreateDecoderPromise::CreateAndResolve(
+ emeDecoder, __func__);
+ },
+ [](const MediaResult& aError) {
+ return EMEDecoderModule::CreateDecoderPromise::CreateAndReject(
+ aError, __func__);
+ });
+ return p;
+}
+
+media::DecodeSupportSet EMEDecoderModule::SupportsMimeType(
+ const nsACString& aMimeType, DecoderDoctorDiagnostics* aDiagnostics) const {
+ Maybe<nsCString> keySystem;
+ keySystem.emplace(NS_ConvertUTF16toUTF8(mProxy->KeySystem()));
+ return GMPDecoderModule::SupportsMimeType(
+ aMimeType, nsLiteralCString(CHROMIUM_CDM_API), keySystem);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/eme/EMEDecoderModule.h b/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
new file mode 100644
index 0000000000..06fea6e650
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(EMEDecoderModule_h_)
+# define EMEDecoderModule_h_
+
+# include "MediaDataDecoderProxy.h"
+# include "PlatformDecoderModule.h"
+# include "SamplesWaitingForKey.h"
+
+namespace mozilla {
+
+class CDMProxy;
+class PDMFactory;
+
+class EMEDecoderModule : public PlatformDecoderModule {
+ public:
+ EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM);
+
+ protected:
+ RefPtr<CreateDecoderPromise> AsyncCreateDecoder(
+ const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
+ const CreateDecoderParams& aParams) override {
+ MOZ_CRASH("Not used");
+ }
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
+ const CreateDecoderParams& aParams) override {
+ MOZ_CRASH("Not used");
+ }
+
+ media::DecodeSupportSet SupportsMimeType(
+ const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ private:
+ virtual ~EMEDecoderModule();
+ RefPtr<CDMProxy> mProxy;
+ // Will be null if CDM has decoding capability.
+ RefPtr<PDMFactory> mPDM;
+};
+
+DDLoggedTypeDeclNameAndBase(EMEMediaDataDecoderProxy, MediaDataDecoderProxy);
+
+class EMEMediaDataDecoderProxy
+ : public MediaDataDecoderProxy,
+ public DecoderDoctorLifeLogger<EMEMediaDataDecoderProxy> {
+ public:
+ EMEMediaDataDecoderProxy(const CreateDecoderParams& aParams,
+ already_AddRefed<MediaDataDecoder> aProxyDecoder,
+ already_AddRefed<nsISerialEventTarget> aProxyThread,
+ CDMProxy* aProxy);
+ EMEMediaDataDecoderProxy(const CreateDecoderParams& aParams,
+ already_AddRefed<MediaDataDecoder> aProxyDecoder,
+ CDMProxy* aProxy);
+
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+
+ private:
+ nsCOMPtr<nsISerialEventTarget> mThread;
+ RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
+ MozPromiseRequestHolder<SamplesWaitingForKey::WaitForKeyPromise> mKeyRequest;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseRequestHolder<DecodePromise> mDecodeRequest;
+ RefPtr<CDMProxy> mProxy;
+};
+
+} // namespace mozilla
+
+#endif // EMEDecoderModule_h_
diff --git a/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp
new file mode 100644
index 0000000000..23d79ce56a
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "SamplesWaitingForKey.h"
+
+#include "MediaData.h"
+#include "MediaEventSource.h"
+#include "mozilla/CDMCaps.h"
+#include "mozilla/CDMProxy.h"
+#include "mozilla/TaskQueue.h"
+
+namespace mozilla {
+
+SamplesWaitingForKey::SamplesWaitingForKey(
+ CDMProxy* aProxy, TrackInfo::TrackType aType,
+ const std::function<MediaEventProducer<TrackInfo::TrackType>*()>&
+ aOnWaitingForKeyEvent)
+ : mMutex("SamplesWaitingForKey"),
+ mProxy(aProxy),
+ mType(aType),
+ mOnWaitingForKeyEvent(aOnWaitingForKeyEvent) {}
+
+SamplesWaitingForKey::~SamplesWaitingForKey() { Flush(); }
+
+RefPtr<SamplesWaitingForKey::WaitForKeyPromise>
+SamplesWaitingForKey::WaitIfKeyNotUsable(MediaRawData* aSample) {
+ if (!aSample || !aSample->mCrypto.IsEncrypted() || !mProxy) {
+ return WaitForKeyPromise::CreateAndResolve(aSample, __func__);
+ }
+ auto caps = mProxy->Capabilites().Lock();
+ const auto& keyid = aSample->mCrypto.mKeyId;
+ if (caps->IsKeyUsable(keyid)) {
+ return WaitForKeyPromise::CreateAndResolve(aSample, __func__);
+ }
+ SampleEntry entry;
+ entry.mSample = aSample;
+ RefPtr<WaitForKeyPromise> p = entry.mPromise.Ensure(__func__);
+ {
+ MutexAutoLock lock(mMutex);
+ mSamples.AppendElement(std::move(entry));
+ }
+ if (mOnWaitingForKeyEvent && mOnWaitingForKeyEvent()) {
+ mOnWaitingForKeyEvent()->Notify(mType);
+ }
+ caps->NotifyWhenKeyIdUsable(aSample->mCrypto.mKeyId, this);
+ return p;
+}
+
+void SamplesWaitingForKey::NotifyUsable(const CencKeyId& aKeyId) {
+ MutexAutoLock lock(mMutex);
+ size_t i = 0;
+ while (i < mSamples.Length()) {
+ auto& entry = mSamples[i];
+ if (aKeyId == entry.mSample->mCrypto.mKeyId) {
+ entry.mPromise.Resolve(entry.mSample, __func__);
+ mSamples.RemoveElementAt(i);
+ } else {
+ i++;
+ }
+ }
+}
+
+void SamplesWaitingForKey::Flush() {
+ MutexAutoLock lock(mMutex);
+ for (auto& sample : mSamples) {
+ sample.mPromise.Reject(true, __func__);
+ }
+ mSamples.Clear();
+}
+
+void SamplesWaitingForKey::BreakCycles() {
+ MutexAutoLock lock(mMutex);
+ mProxy = nullptr;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h
new file mode 100644
index 0000000000..06d72e3aae
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef SamplesWaitingForKey_h_
+#define SamplesWaitingForKey_h_
+
+#include <functional>
+
+#include "MediaInfo.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/RefPtr.h"
+
+namespace mozilla {
+
+typedef nsTArray<uint8_t> CencKeyId;
+
+class CDMProxy;
+template <typename... Es>
+class MediaEventProducer;
+class MediaRawData;
+
+// Encapsulates the task of waiting for the CDMProxy to have the necessary
+// keys to decrypt a given sample.
+class SamplesWaitingForKey {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesWaitingForKey)
+
+ typedef MozPromise<RefPtr<MediaRawData>, bool, /* IsExclusive = */ true>
+ WaitForKeyPromise;
+
+ SamplesWaitingForKey(
+ CDMProxy* aProxy, TrackInfo::TrackType aType,
+ const std::function<MediaEventProducer<TrackInfo::TrackType>*()>&
+ aOnWaitingForKeyEvent);
+
+ // Returns a promise that will be resolved if or when a key for decoding the
+ // sample becomes usable.
+ RefPtr<WaitForKeyPromise> WaitIfKeyNotUsable(MediaRawData* aSample);
+
+ void NotifyUsable(const CencKeyId& aKeyId);
+
+ void Flush();
+
+ void BreakCycles();
+
+ protected:
+ ~SamplesWaitingForKey();
+
+ private:
+ Mutex mMutex MOZ_UNANNOTATED;
+ RefPtr<CDMProxy> mProxy;
+ struct SampleEntry {
+ RefPtr<MediaRawData> mSample;
+ MozPromiseHolder<WaitForKeyPromise> mPromise;
+ };
+ nsTArray<SampleEntry> mSamples;
+ const TrackInfo::TrackType mType;
+ const std::function<MediaEventProducer<TrackInfo::TrackType>*()>
+ mOnWaitingForKeyEvent;
+};
+
+} // namespace mozilla
+
+#endif // SamplesWaitingForKey_h_
diff --git a/dom/media/platforms/agnostic/eme/moz.build b/dom/media/platforms/agnostic/eme/moz.build
new file mode 100644
index 0000000000..34f0007b3b
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/moz.build
@@ -0,0 +1,22 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ "ChromiumCDMVideoDecoder.h",
+ "DecryptThroughputLimit.h",
+ "EMEDecoderModule.h",
+ "SamplesWaitingForKey.h",
+]
+
+UNIFIED_SOURCES += [
+ "ChromiumCDMVideoDecoder.cpp",
+ "EMEDecoderModule.cpp",
+ "SamplesWaitingForKey.cpp",
+]
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+FINAL_LIBRARY = "xul"
diff --git a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
new file mode 100644
index 0000000000..a389c9ad0b
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "GMPDecoderModule.h"
+
+#include "DecoderDoctorDiagnostics.h"
+#include "GMPService.h"
+#include "GMPUtils.h"
+#include "GMPVideoDecoder.h"
+#include "MP4Decoder.h"
+#include "MediaDataDecoderProxy.h"
+#include "VPXDecoder.h"
+#include "VideoUtils.h"
+#include "gmp-video-decode.h"
+#include "mozilla/StaticMutex.h"
+#include "nsServiceManagerUtils.h"
+#ifdef XP_WIN
+# include "WMFDecoderModule.h"
+#endif
+
+namespace mozilla {
+
+static already_AddRefed<MediaDataDecoderProxy> CreateDecoderWrapper(
+ GMPVideoDecoderParams&& aParams) {
+ RefPtr<gmp::GeckoMediaPluginService> s(
+ gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
+ if (!s) {
+ return nullptr;
+ }
+ nsCOMPtr<nsISerialEventTarget> thread(s->GetGMPThread());
+ if (!thread) {
+ return nullptr;
+ }
+
+ RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(
+ do_AddRef(new GMPVideoDecoder(std::move(aParams))), thread.forget()));
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder> GMPDecoderModule::CreateVideoDecoder(
+ const CreateDecoderParams& aParams) {
+ if (!MP4Decoder::IsH264(aParams.mConfig.mMimeType) &&
+ !VPXDecoder::IsVP8(aParams.mConfig.mMimeType) &&
+ !VPXDecoder::IsVP9(aParams.mConfig.mMimeType)) {
+ return nullptr;
+ }
+
+ return CreateDecoderWrapper(GMPVideoDecoderParams(aParams));
+}
+
+already_AddRefed<MediaDataDecoder> GMPDecoderModule::CreateAudioDecoder(
+ const CreateDecoderParams& aParams) {
+ return nullptr;
+}
+
+/* static */
+media::DecodeSupportSet GMPDecoderModule::SupportsMimeType(
+ const nsACString& aMimeType, const nsACString& aApi,
+ const Maybe<nsCString>& aKeySystem) {
+ AutoTArray<nsCString, 2> tags;
+ if (MP4Decoder::IsH264(aMimeType)) {
+ tags.AppendElement("h264"_ns);
+ } else if (VPXDecoder::IsVP9(aMimeType)) {
+ tags.AppendElement("vp9"_ns);
+ } else if (VPXDecoder::IsVP8(aMimeType)) {
+ tags.AppendElement("vp8"_ns);
+ } else {
+ return media::DecodeSupport::Unsupported;
+ }
+
+ // Optional tag for EME GMP plugins.
+ if (aKeySystem) {
+ tags.AppendElement(*aKeySystem);
+ }
+
+ // GMP plugins are always software based.
+ return HaveGMPFor(aApi, tags) ? media::DecodeSupport::SoftwareDecode
+ : media::DecodeSupport::Unsupported;
+}
+
+media::DecodeSupportSet GMPDecoderModule::SupportsMimeType(
+ const nsACString& aMimeType, DecoderDoctorDiagnostics* aDiagnostics) const {
+ return SupportsMimeType(aMimeType, "decode-video"_ns, Nothing());
+}
+
+/* static */
+already_AddRefed<PlatformDecoderModule> GMPDecoderModule::Create() {
+ return MakeAndAddRef<GMPDecoderModule>();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h
new file mode 100644
index 0000000000..1a131dc154
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GMPDecoderModule_h_)
+# define GMPDecoderModule_h_
+
+# include "PlatformDecoderModule.h"
+# include "mozilla/Maybe.h"
+
+// The special NodeId we use when doing unencrypted decoding using the GMP's
+// decoder. This ensures that each GMP MediaDataDecoder we create doesn't
+// require spinning up a new process, but instead we run all instances of
+// GMP decoders in the one process, to reduce overhead.
+//
+// Note: GMP storage is isolated by NodeId, and non persistent for this
+// special NodeId, and the only way a GMP can communicate with the outside
+// world is through the EME GMP APIs, and we never run EME with this NodeID
+// (because NodeIds are random strings which can't contain the '-' character),
+// so there's no way a malicious GMP can harvest, store, and then report any
+// privacy sensitive data about what users are watching.
+# define SHARED_GMP_DECODING_NODE_ID "gmp-shared-decoding"_ns
+
+namespace mozilla {
+
+class GMPDecoderModule : public PlatformDecoderModule {
+ template <typename T, typename... Args>
+ friend already_AddRefed<T> MakeAndAddRef(Args&&...);
+
+ public:
+ static already_AddRefed<PlatformDecoderModule> Create();
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
+ const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
+ const CreateDecoderParams& aParams) override;
+
+ media::DecodeSupportSet SupportsMimeType(
+ const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ static media::DecodeSupportSet SupportsMimeType(
+ const nsACString& aMimeType, const nsACString& aApi,
+ const Maybe<nsCString>& aKeySystem);
+
+ private:
+ GMPDecoderModule() = default;
+ virtual ~GMPDecoderModule() = default;
+};
+
+} // namespace mozilla
+
+#endif // GMPDecoderModule_h_
diff --git a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
new file mode 100644
index 0000000000..47798fafb0
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -0,0 +1,489 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "GMPVideoDecoder.h"
+#include "GMPDecoderModule.h"
+#include "GMPVideoHost.h"
+#include "GMPLog.h"
+#include "MediaData.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "nsServiceManagerUtils.h"
+#include "AnnexB.h"
+#include "H264.h"
+#include "MP4Decoder.h"
+#include "prsystem.h"
+#include "VPXDecoder.h"
+#include "VideoUtils.h"
+
+namespace mozilla {
+
+#if defined(DEBUG)
+static bool IsOnGMPThread() {
+ nsCOMPtr<mozIGeckoMediaPluginService> mps =
+ do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ MOZ_ASSERT(mps);
+
+ nsCOMPtr<nsIThread> gmpThread;
+ nsresult rv = mps->GetThread(getter_AddRefs(gmpThread));
+ MOZ_ASSERT(NS_SUCCEEDED(rv) && gmpThread);
+ return gmpThread->IsOnCurrentThread();
+}
+#endif
+
+GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
+ : mConfig(aParams.VideoConfig()),
+ mImageContainer(aParams.mImageContainer),
+ mCrashHelper(aParams.mCrashHelper),
+ mKnowsCompositor(aParams.mKnowsCompositor),
+ mTrackingId(aParams.mTrackingId) {}
+
+nsCString GMPVideoDecoder::GetCodecName() const {
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ return "h264"_ns;
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ return "vp8"_ns;
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ return "vp9"_ns;
+ }
+ return "unknown"_ns;
+}
+
+void GMPVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame) {
+ GMP_LOG_DEBUG("GMPVideoDecoder::Decoded");
+
+ GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);
+ MOZ_ASSERT(IsOnGMPThread());
+
+ VideoData::YCbCrBuffer b;
+ for (int i = 0; i < kGMPNumOfPlanes; ++i) {
+ b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
+ b.mPlanes[i].mStride = decodedFrame->Stride(GMPPlaneType(i));
+ if (i == kGMPYPlane) {
+ b.mPlanes[i].mWidth = decodedFrame->Width();
+ b.mPlanes[i].mHeight = decodedFrame->Height();
+ } else {
+ b.mPlanes[i].mWidth = (decodedFrame->Width() + 1) / 2;
+ b.mPlanes[i].mHeight = (decodedFrame->Height() + 1) / 2;
+ }
+ b.mPlanes[i].mSkip = 0;
+ }
+
+ b.mChromaSubsampling = gfx::ChromaSubsampling::HALF_WIDTH_AND_HEIGHT;
+ b.mYUVColorSpace =
+ DefaultColorSpace({decodedFrame->Width(), decodedFrame->Height()});
+
+ UniquePtr<SampleMetadata> sampleData;
+ if (auto entryHandle = mSamples.Lookup(decodedFrame->Timestamp())) {
+ sampleData = std::move(entryHandle.Data());
+ entryHandle.Remove();
+ } else {
+ GMP_LOG_DEBUG(
+ "GMPVideoDecoder::Decoded(this=%p) missing sample metadata for "
+ "time %" PRIu64,
+ this, decodedFrame->Timestamp());
+ if (mSamples.IsEmpty()) {
+ // If we have no remaining samples in the table, then we have processed
+ // all outstanding decode requests.
+ ProcessReorderQueue(mDecodePromise, __func__);
+ }
+ return;
+ }
+
+ MOZ_ASSERT(sampleData);
+
+ gfx::IntRect pictureRegion(0, 0, decodedFrame->Width(),
+ decodedFrame->Height());
+ RefPtr<VideoData> v = VideoData::CreateAndCopyData(
+ mConfig, mImageContainer, sampleData->mOffset,
+ media::TimeUnit::FromMicroseconds(decodedFrame->UpdatedTimestamp()),
+ media::TimeUnit::FromMicroseconds(decodedFrame->Duration()), b,
+ sampleData->mKeyframe, media::TimeUnit::FromMicroseconds(-1),
+ pictureRegion, mKnowsCompositor);
+ RefPtr<GMPVideoDecoder> self = this;
+ if (v) {
+ mPerformanceRecorder.Record(static_cast<int64_t>(decodedFrame->Timestamp()),
+ [&](DecodeStage& aStage) {
+ aStage.SetImageFormat(DecodeStage::YUV420P);
+ aStage.SetResolution(decodedFrame->Width(),
+ decodedFrame->Height());
+ aStage.SetYUVColorSpace(b.mYUVColorSpace);
+ aStage.SetColorDepth(b.mColorDepth);
+ aStage.SetColorRange(b.mColorRange);
+ });
+
+ if (mReorderFrames) {
+ mReorderQueue.Push(std::move(v));
+ } else {
+ mUnorderedData.AppendElement(std::move(v));
+ }
+
+ if (mSamples.IsEmpty()) {
+ // If we have no remaining samples in the table, then we have processed
+ // all outstanding decode requests.
+ ProcessReorderQueue(mDecodePromise, __func__);
+ }
+ } else {
+ mReorderQueue.Clear();
+ mUnorderedData.Clear();
+ mSamples.Clear();
+ mDecodePromise.RejectIfExists(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CallBack::CreateAndCopyData")),
+ __func__);
+ }
+}
+
+void GMPVideoDecoder::ReceivedDecodedReferenceFrame(const uint64_t aPictureId) {
+ GMP_LOG_DEBUG("GMPVideoDecoder::ReceivedDecodedReferenceFrame");
+ MOZ_ASSERT(IsOnGMPThread());
+}
+
+void GMPVideoDecoder::ReceivedDecodedFrame(const uint64_t aPictureId) {
+ GMP_LOG_DEBUG("GMPVideoDecoder::ReceivedDecodedFrame");
+ MOZ_ASSERT(IsOnGMPThread());
+}
+
+void GMPVideoDecoder::InputDataExhausted() {
+ GMP_LOG_DEBUG("GMPVideoDecoder::InputDataExhausted");
+ MOZ_ASSERT(IsOnGMPThread());
+ mSamples.Clear();
+ ProcessReorderQueue(mDecodePromise, __func__);
+}
+
+void GMPVideoDecoder::DrainComplete() {
+ GMP_LOG_DEBUG("GMPVideoDecoder::DrainComplete");
+ MOZ_ASSERT(IsOnGMPThread());
+ mSamples.Clear();
+
+ if (mDrainPromise.IsEmpty()) {
+ return;
+ }
+
+ DecodedData results;
+ if (mReorderFrames) {
+ results.SetCapacity(mReorderQueue.Length());
+ while (!mReorderQueue.IsEmpty()) {
+ results.AppendElement(mReorderQueue.Pop());
+ }
+ } else {
+ results = std::move(mUnorderedData);
+ }
+
+ mDrainPromise.Resolve(std::move(results), __func__);
+}
+
+void GMPVideoDecoder::ResetComplete() {
+ GMP_LOG_DEBUG("GMPVideoDecoder::ResetComplete");
+ MOZ_ASSERT(IsOnGMPThread());
+ mPerformanceRecorder.Record(std::numeric_limits<int64_t>::max());
+ mFlushPromise.ResolveIfExists(true, __func__);
+}
+
+void GMPVideoDecoder::Error(GMPErr aErr) {
+ GMP_LOG_DEBUG("GMPVideoDecoder::Error");
+ MOZ_ASSERT(IsOnGMPThread());
+ auto error = MediaResult(aErr == GMPDecodeErr ? NS_ERROR_DOM_MEDIA_DECODE_ERR
+ : NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("GMPErr:%x", aErr));
+ mDecodePromise.RejectIfExists(error, __func__);
+ mDrainPromise.RejectIfExists(error, __func__);
+ mFlushPromise.RejectIfExists(error, __func__);
+}
+
+void GMPVideoDecoder::Terminated() {
+ GMP_LOG_DEBUG("GMPVideoDecoder::Terminated");
+ MOZ_ASSERT(IsOnGMPThread());
+ Error(GMPErr::GMPAbortedErr);
+}
+
+void GMPVideoDecoder::ProcessReorderQueue(
+ MozPromiseHolder<DecodePromise>& aPromise, const char* aMethodName) {
+ if (aPromise.IsEmpty()) {
+ return;
+ }
+
+ if (!mReorderFrames) {
+ aPromise.Resolve(std::move(mUnorderedData), aMethodName);
+ return;
+ }
+
+ DecodedData results;
+ size_t availableFrames = mReorderQueue.Length();
+ if (availableFrames > mMaxRefFrames) {
+ size_t resolvedFrames = availableFrames - mMaxRefFrames;
+ results.SetCapacity(resolvedFrames);
+ do {
+ results.AppendElement(mReorderQueue.Pop());
+ } while (--resolvedFrames > 0);
+ }
+
+ aPromise.Resolve(std::move(results), aMethodName);
+}
+
+GMPVideoDecoder::GMPVideoDecoder(const GMPVideoDecoderParams& aParams)
+ : mConfig(aParams.mConfig),
+ mGMP(nullptr),
+ mHost(nullptr),
+ mConvertNALUnitLengths(false),
+ mCrashHelper(aParams.mCrashHelper),
+ mImageContainer(aParams.mImageContainer),
+ mKnowsCompositor(aParams.mKnowsCompositor),
+ mTrackingId(aParams.mTrackingId),
+ mCanDecodeBatch(StaticPrefs::media_gmp_decoder_decode_batch()),
+ mReorderFrames(StaticPrefs::media_gmp_decoder_reorder_frames()) {}
+
+void GMPVideoDecoder::InitTags(nsTArray<nsCString>& aTags) {
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ aTags.AppendElement("h264"_ns);
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ aTags.AppendElement("vp8"_ns);
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ aTags.AppendElement("vp9"_ns);
+ }
+}
+
+nsCString GMPVideoDecoder::GetNodeId() { return SHARED_GMP_DECODING_NODE_ID; }
+
+GMPUniquePtr<GMPVideoEncodedFrame> GMPVideoDecoder::CreateFrame(
+ MediaRawData* aSample) {
+ GMPVideoFrame* ftmp = nullptr;
+ GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
+ if (GMP_FAILED(err)) {
+ return nullptr;
+ }
+
+ GMPUniquePtr<GMPVideoEncodedFrame> frame(
+ static_cast<GMPVideoEncodedFrame*>(ftmp));
+ err = frame->CreateEmptyFrame(aSample->Size());
+ if (GMP_FAILED(err)) {
+ return nullptr;
+ }
+
+ memcpy(frame->Buffer(), aSample->Data(), frame->Size());
+
+ // Convert 4-byte NAL unit lengths to host-endian 4-byte buffer lengths to
+ // suit the GMP API.
+ if (mConvertNALUnitLengths) {
+ const int kNALLengthSize = 4;
+ uint8_t* buf = frame->Buffer();
+ while (buf < frame->Buffer() + frame->Size() - kNALLengthSize) {
+ uint32_t length = BigEndian::readUint32(buf) + kNALLengthSize;
+ *reinterpret_cast<uint32_t*>(buf) = length;
+ buf += length;
+ }
+ }
+
+ frame->SetBufferType(GMP_BufferLength32);
+
+ frame->SetEncodedWidth(mConfig.mDisplay.width);
+ frame->SetEncodedHeight(mConfig.mDisplay.height);
+ frame->SetTimeStamp(aSample->mTime.ToMicroseconds());
+ frame->SetCompleteFrame(true);
+ frame->SetDuration(aSample->mDuration.ToMicroseconds());
+ frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);
+
+ return frame;
+}
+
+const VideoInfo& GMPVideoDecoder::GetConfig() const { return mConfig; }
+
+void GMPVideoDecoder::GMPInitDone(GMPVideoDecoderProxy* aGMP,
+ GMPVideoHost* aHost) {
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (!aGMP) {
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+ MOZ_ASSERT(aHost);
+
+ if (mInitPromise.IsEmpty()) {
+ // GMP must have been shutdown while we were waiting for Init operation
+ // to complete.
+ aGMP->Close();
+ return;
+ }
+
+ bool isOpenH264 = aGMP->GetPluginType() == GMPPluginType::OpenH264;
+
+ GMPVideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+
+ codec.mGMPApiVersion = kGMPVersion34;
+ nsTArray<uint8_t> codecSpecific;
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ codec.mCodecType = kGMPVideoCodecH264;
+ codecSpecific.AppendElement(0); // mPacketizationMode.
+ codecSpecific.AppendElements(mConfig.mExtraData->Elements(),
+ mConfig.mExtraData->Length());
+ // OpenH264 expects pseudo-AVCC, but others must be passed
+ // AnnexB for H264.
+ mConvertToAnnexB = !isOpenH264;
+ mMaxRefFrames = H264::ComputeMaxRefFrames(mConfig.mExtraData);
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ codec.mCodecType = kGMPVideoCodecVP8;
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ codec.mCodecType = kGMPVideoCodecVP9;
+ } else {
+ // Unrecognized mime type
+ aGMP->Close();
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+ codec.mWidth = mConfig.mImage.width;
+ codec.mHeight = mConfig.mImage.height;
+ codec.mUseThreadedDecode = StaticPrefs::media_gmp_decoder_multithreaded();
+ codec.mLogLevel = GetGMPLibraryLogLevel();
+
+ nsresult rv =
+ aGMP->InitDecode(codec, codecSpecific, this, PR_GetNumberOfProcessors());
+ if (NS_FAILED(rv)) {
+ aGMP->Close();
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+
+ mGMP = aGMP;
+ mHost = aHost;
+
+ // GMP implementations have interpreted the meaning of GMP_BufferLength32
+ // differently. The OpenH264 GMP expects GMP_BufferLength32 to behave as
+ // specified in the GMP API, where each buffer is prefixed by a 32-bit
+ // host-endian buffer length that includes the size of the buffer length
+ // field. Other existing GMPs currently expect GMP_BufferLength32 (when
+ // combined with kGMPVideoCodecH264) to mean "like AVCC but restricted to
+ // 4-byte NAL lengths" (i.e. buffer lengths are specified in big-endian
+ // and do not include the length of the buffer length field.
+ mConvertNALUnitLengths = isOpenH264;
+
+ mInitPromise.Resolve(TrackInfo::kVideoTrack, __func__);
+}
+
+RefPtr<MediaDataDecoder::InitPromise> GMPVideoDecoder::Init() {
+ MOZ_ASSERT(IsOnGMPThread());
+
+ mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ MOZ_ASSERT(mMPS);
+
+ RefPtr<InitPromise> promise(mInitPromise.Ensure(__func__));
+
+ nsTArray<nsCString> tags;
+ InitTags(tags);
+ UniquePtr<GetGMPVideoDecoderCallback> callback(new GMPInitDoneCallback(this));
+ if (NS_FAILED(mMPS->GetGMPVideoDecoder(mCrashHelper, &tags, GetNodeId(),
+ std::move(callback)))) {
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return promise;
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> GMPVideoDecoder::Decode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(IsOnGMPThread());
+
+ RefPtr<MediaRawData> sample(aSample);
+ if (!mGMP) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("mGMP not initialized")),
+ __func__);
+ }
+
+ if (mTrackingId) {
+ MediaInfoFlag flag = MediaInfoFlag::None;
+ flag |= (aSample->mKeyframe ? MediaInfoFlag::KeyFrame
+ : MediaInfoFlag::NonKeyFrame);
+ if (mGMP->GetPluginType() == GMPPluginType::OpenH264) {
+ flag |= MediaInfoFlag::SoftwareDecoding;
+ }
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ flag |= MediaInfoFlag::VIDEO_H264;
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ flag |= MediaInfoFlag::VIDEO_VP8;
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ flag |= MediaInfoFlag::VIDEO_VP9;
+ }
+ mPerformanceRecorder.Start(aSample->mTime.ToMicroseconds(),
+ "GMPVideoDecoder"_ns, *mTrackingId, flag);
+ }
+
+ GMPUniquePtr<GMPVideoEncodedFrame> frame = CreateFrame(sample);
+ if (!frame) {
+ return DecodePromise::CreateAndReject(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CreateFrame returned null")),
+ __func__);
+ }
+
+ uint64_t frameTimestamp = frame->TimeStamp();
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
+ nsresult rv = mGMP->Decode(std::move(frame), false, info, 0);
+ if (NS_FAILED(rv)) {
+ mDecodePromise.Reject(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("mGMP->Decode:%" PRIx32,
+ static_cast<uint32_t>(rv))),
+ __func__);
+ }
+
+ // If we have multiple outstanding frames, we need to track which offset
+ // belongs to which frame. During seek, it is possible to get the same frame
+ // requested twice, if the old frame is still outstanding. We will simply drop
+ // the extra decoded frame and request more input if the last outstanding.
+ mSamples.WithEntryHandle(frameTimestamp, [&](auto entryHandle) {
+ auto sampleData = MakeUnique<SampleMetadata>(sample);
+ entryHandle.InsertOrUpdate(std::move(sampleData));
+ });
+
+ return p;
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> GMPVideoDecoder::Flush() {
+ MOZ_ASSERT(IsOnGMPThread());
+
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+
+ RefPtr<FlushPromise> p = mFlushPromise.Ensure(__func__);
+ if (!mGMP || NS_FAILED(mGMP->Reset())) {
+ // Abort the flush.
+ mPerformanceRecorder.Record(std::numeric_limits<int64_t>::max());
+ mFlushPromise.Resolve(true, __func__);
+ }
+ return p;
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> GMPVideoDecoder::Drain() {
+ MOZ_ASSERT(IsOnGMPThread());
+
+ MOZ_ASSERT(mDecodePromise.IsEmpty(), "Must wait for decoding to complete");
+
+ RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
+ if (!mGMP || NS_FAILED(mGMP->Drain())) {
+ mDrainPromise.Resolve(DecodedData(), __func__);
+ }
+
+ return p;
+}
+
+RefPtr<ShutdownPromise> GMPVideoDecoder::Shutdown() {
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+
+ // Note that this *may* be called from the proxy thread also.
+ // TODO: If that's the case, then this code is racy.
+ if (!mGMP) {
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+ }
+ // Note this unblocks flush and drain operations waiting for callbacks.
+ mGMP->Close();
+ mGMP = nullptr;
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
new file mode 100644
index 0000000000..1f0f59c685
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/layers/KnowsCompositor.h"
+#if !defined(GMPVideoDecoder_h_)
+# define GMPVideoDecoder_h_
+
+# include "GMPVideoDecoderProxy.h"
+# include "ImageContainer.h"
+# include "MediaDataDecoderProxy.h"
+# include "MediaInfo.h"
+# include "PerformanceRecorder.h"
+# include "PlatformDecoderModule.h"
+# include "ReorderQueue.h"
+# include "mozIGeckoMediaPluginService.h"
+# include "nsClassHashtable.h"
+
+namespace mozilla {
+
+struct MOZ_STACK_CLASS GMPVideoDecoderParams {
+ explicit GMPVideoDecoderParams(const CreateDecoderParams& aParams);
+
+ const VideoInfo& mConfig;
+ layers::ImageContainer* mImageContainer;
+ GMPCrashHelper* mCrashHelper;
+ layers::KnowsCompositor* mKnowsCompositor;
+ const Maybe<TrackingId> mTrackingId;
+};
+
+DDLoggedTypeDeclNameAndBase(GMPVideoDecoder, MediaDataDecoder);
+
+class GMPVideoDecoder final : public MediaDataDecoder,
+ public GMPVideoDecoderCallbackProxy,
+ public DecoderDoctorLifeLogger<GMPVideoDecoder> {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(GMPVideoDecoder, final);
+
+ explicit GMPVideoDecoder(const GMPVideoDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override {
+ return "gmp video decoder"_ns;
+ }
+ nsCString GetCodecName() const override;
+ ConversionRequired NeedsConversion() const override {
+ return mConvertToAnnexB ? ConversionRequired::kNeedAnnexB
+ : ConversionRequired::kNeedAVCC;
+ }
+ bool CanDecodeBatch() const override { return mCanDecodeBatch; }
+
+ // GMPVideoDecoderCallbackProxy
+ // All those methods are called on the GMP thread.
+ void Decoded(GMPVideoi420Frame* aDecodedFrame) override;
+ void ReceivedDecodedReferenceFrame(const uint64_t aPictureId) override;
+ void ReceivedDecodedFrame(const uint64_t aPictureId) override;
+ void InputDataExhausted() override;
+ void DrainComplete() override;
+ void ResetComplete() override;
+ void Error(GMPErr aErr) override;
+ void Terminated() override;
+
+ protected:
+ virtual void InitTags(nsTArray<nsCString>& aTags);
+ virtual nsCString GetNodeId();
+ virtual GMPUniquePtr<GMPVideoEncodedFrame> CreateFrame(MediaRawData* aSample);
+ virtual const VideoInfo& GetConfig() const;
+ void ProcessReorderQueue(MozPromiseHolder<DecodePromise>& aPromise,
+ const char* aMethodName);
+
+ private:
+ ~GMPVideoDecoder() = default;
+
+ class GMPInitDoneCallback : public GetGMPVideoDecoderCallback {
+ public:
+ explicit GMPInitDoneCallback(GMPVideoDecoder* aDecoder)
+ : mDecoder(aDecoder) {}
+
+ void Done(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost) override {
+ mDecoder->GMPInitDone(aGMP, aHost);
+ }
+
+ private:
+ RefPtr<GMPVideoDecoder> mDecoder;
+ };
+ void GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost);
+
+ const VideoInfo mConfig;
+ nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
+ GMPVideoDecoderProxy* mGMP;
+ GMPVideoHost* mHost;
+ bool mConvertNALUnitLengths;
+ MozPromiseHolder<InitPromise> mInitPromise;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+
+ struct SampleMetadata {
+ explicit SampleMetadata(MediaRawData* aSample)
+ : mOffset(aSample->mOffset), mKeyframe(aSample->mKeyframe) {}
+ int64_t mOffset;
+ bool mKeyframe;
+ };
+
+ nsClassHashtable<nsUint64HashKey, SampleMetadata> mSamples;
+ RefPtr<layers::ImageContainer> mImageContainer;
+ RefPtr<layers::KnowsCompositor> mKnowsCompositor;
+ PerformanceRecorderMulti<DecodeStage> mPerformanceRecorder;
+ const Maybe<TrackingId> mTrackingId;
+
+ uint32_t mMaxRefFrames = 0;
+ ReorderQueue mReorderQueue;
+ DecodedData mUnorderedData;
+
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseHolder<DecodePromise> mDrainPromise;
+ MozPromiseHolder<FlushPromise> mFlushPromise;
+ bool mConvertToAnnexB = false;
+ bool mCanDecodeBatch = false;
+ bool mReorderFrames = true;
+};
+
+} // namespace mozilla
+
+#endif // GMPVideoDecoder_h_
diff --git a/dom/media/platforms/agnostic/gmp/moz.build b/dom/media/platforms/agnostic/gmp/moz.build
new file mode 100644
index 0000000000..8e49f4a0e8
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/moz.build
@@ -0,0 +1,24 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+LOCAL_INCLUDES += [
+ "/dom/media/gmp", # for GMPLog.h,
+]
+
+EXPORTS += [
+ "GMPDecoderModule.h",
+ "GMPVideoDecoder.h",
+]
+
+UNIFIED_SOURCES += [
+ "GMPDecoderModule.cpp",
+ "GMPVideoDecoder.cpp",
+]
+
+# GMPVideoEncodedFrameImpl.h needs IPC
+include("/ipc/chromium/chromium-config.mozbuild")
+
+FINAL_LIBRARY = "xul"