summaryrefslogtreecommitdiffstats
path: root/dom/media/platforms/wrappers
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media/platforms/wrappers')
-rw-r--r--dom/media/platforms/wrappers/AudioTrimmer.cpp214
-rw-r--r--dom/media/platforms/wrappers/AudioTrimmer.h50
-rw-r--r--dom/media/platforms/wrappers/MediaChangeMonitor.cpp951
-rw-r--r--dom/media/platforms/wrappers/MediaChangeMonitor.h127
-rw-r--r--dom/media/platforms/wrappers/MediaDataDecoderProxy.cpp137
-rw-r--r--dom/media/platforms/wrappers/MediaDataDecoderProxy.h57
6 files changed, 1536 insertions, 0 deletions
diff --git a/dom/media/platforms/wrappers/AudioTrimmer.cpp b/dom/media/platforms/wrappers/AudioTrimmer.cpp
new file mode 100644
index 0000000000..fa37132314
--- /dev/null
+++ b/dom/media/platforms/wrappers/AudioTrimmer.cpp
@@ -0,0 +1,214 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AudioTrimmer.h"
+
+#define LOG(arg, ...) \
+ DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: " arg, __func__, \
+ ##__VA_ARGS__)
+
+#define LOGV(arg, ...) \
+ DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Verbose, "::%s: " arg, __func__, \
+ ##__VA_ARGS__)
+
+namespace mozilla {
+
+using media::TimeInterval;
+using media::TimeUnit;
+
+RefPtr<MediaDataDecoder::InitPromise> AudioTrimmer::Init() {
+ mThread = GetCurrentSerialEventTarget();
+ return mDecoder->Init();
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::Decode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread(),
+ "We're not on the thread we were first initialized on");
+ RefPtr<MediaRawData> sample = aSample;
+ PrepareTrimmers(sample);
+ RefPtr<AudioTrimmer> self = this;
+ RefPtr<DecodePromise> p = mDecoder->Decode(sample)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self, sample](DecodePromise::ResolveOrRejectValue&& aValue) {
+ return self->HandleDecodedResult(std::move(aValue), sample);
+ });
+ return p;
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> AudioTrimmer::Flush() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread(),
+ "We're not on the thread we were first initialized on");
+ RefPtr<FlushPromise> p = mDecoder->Flush();
+ mTrimmers.Clear();
+ return p;
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::Drain() {
+ MOZ_ASSERT(mThread->IsOnCurrentThread(),
+ "We're not on the thread we were first initialized on");
+ LOG("Draining");
+ RefPtr<DecodePromise> p = mDecoder->Drain()->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}](DecodePromise::ResolveOrRejectValue&& aValue) {
+ return self->HandleDecodedResult(std::move(aValue), nullptr);
+ });
+ return p;
+}
+
+RefPtr<ShutdownPromise> AudioTrimmer::Shutdown() {
+ // mThread may not be set if Init hasn't been called first.
+ MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
+ return mDecoder->Shutdown();
+}
+
+nsCString AudioTrimmer::GetDescriptionName() const {
+ return mDecoder->GetDescriptionName();
+}
+
+bool AudioTrimmer::IsHardwareAccelerated(nsACString& aFailureReason) const {
+ return mDecoder->IsHardwareAccelerated(aFailureReason);
+}
+
+void AudioTrimmer::SetSeekThreshold(const media::TimeUnit& aTime) {
+ mDecoder->SetSeekThreshold(aTime);
+}
+
+bool AudioTrimmer::SupportDecoderRecycling() const {
+ return mDecoder->SupportDecoderRecycling();
+}
+
+MediaDataDecoder::ConversionRequired AudioTrimmer::NeedsConversion() const {
+ return mDecoder->NeedsConversion();
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::HandleDecodedResult(
+ DecodePromise::ResolveOrRejectValue&& aValue, MediaRawData* aRaw) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread(),
+ "We're not on the thread we were first initialized on");
+ if (aValue.IsReject()) {
+ return DecodePromise::CreateAndReject(std::move(aValue.RejectValue()),
+ __func__);
+ }
+ int64_t rawStart = aRaw ? aRaw->mTime.ToMicroseconds() : 0;
+ int64_t rawEnd = aRaw ? aRaw->GetEndTime().ToMicroseconds() : 0;
+ MediaDataDecoder::DecodedData results = std::move(aValue.ResolveValue());
+ if (results.IsEmpty()) {
+ // No samples returned, we assume this is due to the latency of the
+ // decoder and that the related decoded sample will be returned during
+ // the next call to Decode().
+ LOG("No sample returned for sample[%" PRId64 ",%" PRId64 "]", rawStart,
+ rawEnd);
+ }
+ for (uint32_t i = 0; i < results.Length();) {
+ const RefPtr<MediaData>& data = results[i];
+ MOZ_ASSERT(data->mType == MediaData::Type::AUDIO_DATA);
+ TimeInterval sampleInterval(data->mTime, data->GetEndTime());
+ if (mTrimmers.IsEmpty()) {
+ // mTrimmers being empty can only occurs if the decoder returned more
+ // frames than we pushed in. We can't handle this case, abort trimming.
+ LOG("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64
+ "] no trimming information",
+ rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(),
+ sampleInterval.mEnd.ToMicroseconds());
+ i++;
+ continue;
+ }
+
+ Maybe<TimeInterval> trimmer = mTrimmers[0];
+ mTrimmers.RemoveElementAt(0);
+ if (!trimmer) {
+ // Those frames didn't need trimming.
+ LOGV("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64
+ "] no trimming needed",
+ rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(),
+ sampleInterval.mEnd.ToMicroseconds());
+ i++;
+ continue;
+ }
+ if (!trimmer->Intersects(sampleInterval)) {
+ LOG("sample[%" PRId64 ",%" PRId64 "] (decoded[%" PRId64 ",%" PRId64
+ "] would be empty after trimming, dropping it",
+ rawStart, rawEnd, sampleInterval.mStart.ToMicroseconds(),
+ sampleInterval.mEnd.ToMicroseconds());
+ results.RemoveElementAt(i);
+ continue;
+ }
+ LOG("Trimming sample[%" PRId64 ",%" PRId64 "] to [%" PRId64 ",%" PRId64
+ "] (raw "
+ "was:[%" PRId64 ",%" PRId64 "])",
+ sampleInterval.mStart.ToMicroseconds(),
+ sampleInterval.mEnd.ToMicroseconds(), trimmer->mStart.ToMicroseconds(),
+ trimmer->mEnd.ToMicroseconds(), rawStart, rawEnd);
+
+ TimeInterval trim({std::max(trimmer->mStart, sampleInterval.mStart),
+ std::min(trimmer->mEnd, sampleInterval.mEnd)});
+ AudioData* sample = static_cast<AudioData*>(data.get());
+ bool ok = sample->SetTrimWindow(trim);
+ NS_ASSERTION(ok, "Trimming of audio sample failed");
+ Unused << ok;
+ if (sample->Frames() == 0) {
+ LOG("sample[%" PRId64 ",%" PRId64
+ "] is empty after trimming, dropping it",
+ rawStart, rawEnd);
+ results.RemoveElementAt(i);
+ continue;
+ }
+ i++;
+ }
+ return DecodePromise::CreateAndResolve(std::move(results), __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::DecodeBatch(
+ nsTArray<RefPtr<MediaRawData>>&& aSamples) {
+ MOZ_ASSERT(mThread->IsOnCurrentThread(),
+ "We're not on the thread we were first initialized on");
+ LOG("DecodeBatch");
+
+ for (auto&& sample : aSamples) {
+ PrepareTrimmers(sample);
+ }
+ RefPtr<DecodePromise> p =
+ mDecoder->DecodeBatch(std::move(aSamples))
+ ->Then(GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}](
+ DecodePromise::ResolveOrRejectValue&& aValue) {
+ // If the decoder returned less samples than what we fed it.
+ // We can assume that this is due to the decoder encoding
+ // delay and that all decoded frames have been shifted by n =
+ // compressedSamples.Length() - decodedSamples.Length() and
+ // that the first n compressed samples returned nothing.
+ return self->HandleDecodedResult(std::move(aValue), nullptr);
+ });
+ return p;
+}
+
+void AudioTrimmer::PrepareTrimmers(MediaRawData* aRaw) {
+ // A compress sample indicates that it needs to be trimmed after decoding by
+ // having its mOriginalPresentationWindow member set; in which case
+ // mOriginalPresentationWindow contains the original time and duration of
+ // the frame set by the demuxer and mTime and mDuration set to what it
+ // should be after trimming.
+ if (aRaw->mOriginalPresentationWindow) {
+ LOG("sample[%" PRId64 ",%" PRId64 "] has trimming info ([%" PRId64
+ ",%" PRId64 "]",
+ aRaw->mOriginalPresentationWindow->mStart.ToMicroseconds(),
+ aRaw->mOriginalPresentationWindow->mEnd.ToMicroseconds(),
+ aRaw->mTime.ToMicroseconds(), aRaw->GetEndTime().ToMicroseconds());
+ mTrimmers.AppendElement(
+ Some(TimeInterval(aRaw->mTime, aRaw->GetEndTime())));
+ aRaw->mTime = aRaw->mOriginalPresentationWindow->mStart;
+ aRaw->mDuration = aRaw->mOriginalPresentationWindow->Length();
+ } else {
+ LOGV("sample[%" PRId64 ",%" PRId64 "] no trimming information",
+ aRaw->mTime.ToMicroseconds(), aRaw->GetEndTime().ToMicroseconds());
+ mTrimmers.AppendElement(Nothing());
+ }
+}
+
+} // namespace mozilla
+
+#undef LOG
diff --git a/dom/media/platforms/wrappers/AudioTrimmer.h b/dom/media/platforms/wrappers/AudioTrimmer.h
new file mode 100644
index 0000000000..dd7ac1a57b
--- /dev/null
+++ b/dom/media/platforms/wrappers/AudioTrimmer.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(AudioTrimmer_h_)
+# define AudioTrimmer_h_
+
+# include "PlatformDecoderModule.h"
+# include "mozilla/Mutex.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(AudioTrimmer, MediaDataDecoder);
+
+class AudioTrimmer : public MediaDataDecoder {
+ public:
+ AudioTrimmer(already_AddRefed<MediaDataDecoder> aDecoder,
+ const CreateDecoderParams& aParams)
+ : mDecoder(aDecoder) {}
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ bool CanDecodeBatch() const override { return mDecoder->CanDecodeBatch(); }
+ RefPtr<DecodePromise> DecodeBatch(
+ nsTArray<RefPtr<MediaRawData>>&& aSamples) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ nsCString GetDescriptionName() const override;
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
+ void SetSeekThreshold(const media::TimeUnit& aTime) override;
+ bool SupportDecoderRecycling() const override;
+ ConversionRequired NeedsConversion() const override;
+
+ private:
+ // Apply trimming information on decoded data. aRaw can be null as it's only
+ // used for logging purposes.
+ RefPtr<DecodePromise> HandleDecodedResult(
+ DecodePromise::ResolveOrRejectValue&& aValue, MediaRawData* aRaw);
+ void PrepareTrimmers(MediaRawData* aRaw);
+ const RefPtr<MediaDataDecoder> mDecoder;
+ nsCOMPtr<nsISerialEventTarget> mThread;
+ AutoTArray<Maybe<media::TimeInterval>, 2> mTrimmers;
+};
+
+} // namespace mozilla
+
+#endif // AudioTrimmer_h_
diff --git a/dom/media/platforms/wrappers/MediaChangeMonitor.cpp b/dom/media/platforms/wrappers/MediaChangeMonitor.cpp
new file mode 100644
index 0000000000..17387204ed
--- /dev/null
+++ b/dom/media/platforms/wrappers/MediaChangeMonitor.cpp
@@ -0,0 +1,951 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaChangeMonitor.h"
+
+#include "AnnexB.h"
+#include "H264.h"
+#include "GeckoProfiler.h"
+#include "ImageContainer.h"
+#include "MP4Decoder.h"
+#include "MediaInfo.h"
+#include "PDMFactory.h"
+#include "VPXDecoder.h"
+#ifdef MOZ_AV1
+# include "AOMDecoder.h"
+#endif
+#include "gfxUtils.h"
+#include "mozilla/ProfilerMarkers.h"
+#include "mozilla/StaticPrefs_media.h"
+#include "mozilla/TaskQueue.h"
+
+namespace mozilla {
+
+extern LazyLogModule gMediaDecoderLog;
+
+#define LOG(x, ...) \
+ MOZ_LOG(gMediaDecoderLog, LogLevel::Debug, (x, ##__VA_ARGS__))
+
+// H264ChangeMonitor is used to ensure that only AVCC or AnnexB is fed to the
+// underlying MediaDataDecoder. The H264ChangeMonitor allows playback of content
+// where the SPS NAL may not be provided in the init segment (e.g. AVC3 or Annex
+// B) H264ChangeMonitor will monitor the input data, and will delay creation of
+// the MediaDataDecoder until a SPS and PPS NALs have been extracted.
+
+class H264ChangeMonitor : public MediaChangeMonitor::CodecChangeMonitor {
+ public:
+ explicit H264ChangeMonitor(const VideoInfo& aInfo, bool aFullParsing)
+ : mCurrentConfig(aInfo), mFullParsing(aFullParsing) {
+ if (CanBeInstantiated()) {
+ UpdateConfigFromExtraData(aInfo.mExtraData);
+ }
+ }
+
+ bool CanBeInstantiated() const override {
+ return H264::HasSPS(mCurrentConfig.mExtraData);
+ }
+
+ MediaResult CheckForChange(MediaRawData* aSample) override {
+ // To be usable we need to convert the sample to 4 bytes NAL size AVCC.
+ if (!AnnexB::ConvertSampleToAVCC(aSample)) {
+ // We need AVCC content to be able to later parse the SPS.
+ // This is a no-op if the data is already AVCC.
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("ConvertSampleToAVCC"));
+ }
+
+ if (!AnnexB::IsAVCC(aSample)) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Invalid H264 content"));
+ }
+
+ RefPtr<MediaByteBuffer> extra_data =
+ aSample->mKeyframe || !mGotSPS || mFullParsing
+ ? H264::ExtractExtraData(aSample)
+ : nullptr;
+
+ if (!H264::HasSPS(extra_data) && !H264::HasSPS(mCurrentConfig.mExtraData)) {
+ // We don't have inband data and the original config didn't contain a SPS.
+ // We can't decode this content.
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ mGotSPS = true;
+
+ if (!H264::HasSPS(extra_data)) {
+ // This sample doesn't contain inband SPS/PPS
+ // We now check if the out of band one has changed.
+ // This scenario can currently only occur on Android with devices that can
+ // recycle a decoder.
+ bool hasOutOfBandExtraData = H264::HasSPS(aSample->mExtraData);
+ if (!hasOutOfBandExtraData || !mPreviousExtraData ||
+ H264::CompareExtraData(aSample->mExtraData, mPreviousExtraData)) {
+ if (hasOutOfBandExtraData && !mPreviousExtraData) {
+ // We are decoding the first sample, store the out of band sample's
+ // extradata so that we can check for future change.
+ mPreviousExtraData = aSample->mExtraData;
+ }
+ return NS_OK;
+ }
+ extra_data = aSample->mExtraData;
+ } else if (H264::CompareExtraData(extra_data, mCurrentConfig.mExtraData)) {
+ return NS_OK;
+ }
+
+ // Store the sample's extradata so we don't trigger a false positive
+ // with the out of band test on the next sample.
+ mPreviousExtraData = aSample->mExtraData;
+ UpdateConfigFromExtraData(extra_data);
+
+ PROFILER_MARKER_TEXT("H264 Stream Change", MEDIA_PLAYBACK, {},
+ "H264ChangeMonitor::CheckForChange has detected a "
+ "change in the stream and will request a new decoder");
+ return NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
+ }
+
+ const TrackInfo& Config() const override { return mCurrentConfig; }
+
+ MediaResult PrepareSample(MediaDataDecoder::ConversionRequired aConversion,
+ MediaRawData* aSample,
+ bool aNeedKeyFrame) override {
+ MOZ_DIAGNOSTIC_ASSERT(
+ aConversion == MediaDataDecoder::ConversionRequired::kNeedAnnexB ||
+ aConversion == MediaDataDecoder::ConversionRequired::kNeedAVCC,
+ "Conversion must be either AVCC or AnnexB");
+
+ aSample->mExtraData = mCurrentConfig.mExtraData;
+ aSample->mTrackInfo = mTrackInfo;
+
+ if (aConversion == MediaDataDecoder::ConversionRequired::kNeedAnnexB) {
+ auto res = AnnexB::ConvertSampleToAnnexB(aSample, aNeedKeyFrame);
+ if (res.isErr()) {
+ return MediaResult(res.unwrapErr(),
+ RESULT_DETAIL("ConvertSampleToAnnexB"));
+ }
+ }
+
+ return NS_OK;
+ }
+
+ private:
+ void UpdateConfigFromExtraData(MediaByteBuffer* aExtraData) {
+ SPSData spsdata;
+ if (H264::DecodeSPSFromExtraData(aExtraData, spsdata) &&
+ spsdata.pic_width > 0 && spsdata.pic_height > 0) {
+ H264::EnsureSPSIsSane(spsdata);
+ mCurrentConfig.mImage.width = spsdata.pic_width;
+ mCurrentConfig.mImage.height = spsdata.pic_height;
+ mCurrentConfig.mDisplay.width = spsdata.display_width;
+ mCurrentConfig.mDisplay.height = spsdata.display_height;
+ mCurrentConfig.mColorDepth = spsdata.ColorDepth();
+ mCurrentConfig.mColorSpace = Some(spsdata.ColorSpace());
+ // spsdata.colour_primaries has the same values as
+ // gfx::CICP::ColourPrimaries.
+ mCurrentConfig.mColorPrimaries = gfxUtils::CicpToColorPrimaries(
+ static_cast<gfx::CICP::ColourPrimaries>(spsdata.colour_primaries),
+ gMediaDecoderLog);
+ // spsdata.transfer_characteristics has the same values as
+ // gfx::CICP::TransferCharacteristics.
+ mCurrentConfig.mTransferFunction = gfxUtils::CicpToTransferFunction(
+ static_cast<gfx::CICP::TransferCharacteristics>(
+ spsdata.transfer_characteristics));
+ mCurrentConfig.mColorRange = spsdata.video_full_range_flag
+ ? gfx::ColorRange::FULL
+ : gfx::ColorRange::LIMITED;
+ }
+ mCurrentConfig.mExtraData = aExtraData;
+ mTrackInfo = new TrackInfoSharedPtr(mCurrentConfig, mStreamID++);
+ }
+
+ VideoInfo mCurrentConfig;
+ uint32_t mStreamID = 0;
+ const bool mFullParsing;
+ bool mGotSPS = false;
+ RefPtr<TrackInfoSharedPtr> mTrackInfo;
+ RefPtr<MediaByteBuffer> mPreviousExtraData;
+};
+
+// Gets the pixel aspect ratio from the decoded video size and the rendered
+// size.
+inline double GetPixelAspectRatio(const gfx::IntSize& aImage,
+ const gfx::IntSize& aDisplay) {
+ return (static_cast<double>(aDisplay.Width()) / aImage.Width()) /
+ (static_cast<double>(aDisplay.Height()) / aImage.Height());
+}
+
+// Returns the render size based on the PAR and the new image size.
+inline gfx::IntSize ApplyPixelAspectRatio(double aPixelAspectRatio,
+ const gfx::IntSize& aImage) {
+ return gfx::IntSize(static_cast<int32_t>(aImage.Width() * aPixelAspectRatio),
+ aImage.Height());
+}
+
+class VPXChangeMonitor : public MediaChangeMonitor::CodecChangeMonitor {
+ public:
+ explicit VPXChangeMonitor(const VideoInfo& aInfo)
+ : mCurrentConfig(aInfo),
+ mCodec(VPXDecoder::IsVP8(aInfo.mMimeType) ? VPXDecoder::Codec::VP8
+ : VPXDecoder::Codec::VP9),
+ mPixelAspectRatio(GetPixelAspectRatio(aInfo.mImage, aInfo.mDisplay)) {
+ mTrackInfo = new TrackInfoSharedPtr(mCurrentConfig, mStreamID++);
+
+ if (mCurrentConfig.mExtraData && !mCurrentConfig.mExtraData->IsEmpty()) {
+ // If we're passed VP codec configuration, store it so that we can
+ // instantiate the decoder on init.
+ VPXDecoder::VPXStreamInfo vpxInfo;
+ vpxInfo.mImage = mCurrentConfig.mImage;
+ vpxInfo.mDisplay = mCurrentConfig.mDisplay;
+ VPXDecoder::ReadVPCCBox(vpxInfo, mCurrentConfig.mExtraData);
+ mInfo = Some(vpxInfo);
+ }
+ }
+
+ bool CanBeInstantiated() const override {
+ // We want to see at least one sample before we create a decoder so that we
+ // can create the vpcC content on mCurrentConfig.mExtraData.
+ return mCodec == VPXDecoder::Codec::VP8 || mInfo ||
+ mCurrentConfig.mCrypto.IsEncrypted();
+ }
+
+ MediaResult CheckForChange(MediaRawData* aSample) override {
+ // Don't look at encrypted content.
+ if (aSample->mCrypto.IsEncrypted()) {
+ return NS_OK;
+ }
+ auto dataSpan = Span<const uint8_t>(aSample->Data(), aSample->Size());
+
+ // We don't trust the keyframe flag as set on the MediaRawData.
+ VPXDecoder::VPXStreamInfo info;
+ if (!VPXDecoder::GetStreamInfo(dataSpan, info, mCodec)) {
+ return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+ }
+ // For both VP8 and VP9, we only look for resolution changes
+ // on keyframes. Other resolution changes are invalid.
+ if (!info.mKeyFrame) {
+ return NS_OK;
+ }
+
+ nsresult rv = NS_OK;
+ if (mInfo) {
+ if (mInfo.ref().IsCompatible(info)) {
+ return rv;
+ }
+ // We can't properly determine the image rect once we've had a resolution
+ // change.
+ mCurrentConfig.ResetImageRect();
+ PROFILER_MARKER_TEXT(
+ "VPX Stream Change", MEDIA_PLAYBACK, {},
+ "VPXChangeMonitor::CheckForChange has detected a change in the "
+ "stream and will request a new decoder");
+ rv = NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
+ } else if (mCurrentConfig.mImage != info.mImage ||
+ mCurrentConfig.mDisplay != info.mDisplay) {
+ // We can't properly determine the image rect if we're changing
+ // resolution based on sample information.
+ mCurrentConfig.ResetImageRect();
+ PROFILER_MARKER_TEXT("VPX Stream Init Discrepancy", MEDIA_PLAYBACK, {},
+ "VPXChangeMonitor::CheckForChange has detected a "
+ "discrepancy between initialization data and stream "
+ "content and will request a new decoder");
+ rv = NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
+ }
+
+ LOG("Detect inband %s resolution changes, image (%" PRId32 ",%" PRId32
+ ")->(%" PRId32 ",%" PRId32 "), display (%" PRId32 ",%" PRId32
+ ")->(%" PRId32 ",%" PRId32 " %s)",
+ mCodec == VPXDecoder::Codec::VP9 ? "VP9" : "VP8",
+ mCurrentConfig.mImage.Width(), mCurrentConfig.mImage.Height(),
+ info.mImage.Width(), info.mImage.Height(),
+ mCurrentConfig.mDisplay.Width(), mCurrentConfig.mDisplay.Height(),
+ info.mDisplay.Width(), info.mDisplay.Height(),
+ info.mDisplayAndImageDifferent ? "specified" : "unspecified");
+
+ mInfo = Some(info);
+ mCurrentConfig.mImage = info.mImage;
+ if (info.mDisplayAndImageDifferent) {
+ // If the flag to change the display size is set in the sequence, we
+ // set our original values to begin rescaling according to the new values.
+ mCurrentConfig.mDisplay = info.mDisplay;
+ mPixelAspectRatio = GetPixelAspectRatio(info.mImage, info.mDisplay);
+ } else {
+ mCurrentConfig.mDisplay =
+ ApplyPixelAspectRatio(mPixelAspectRatio, info.mImage);
+ }
+
+ mCurrentConfig.mColorDepth = gfx::ColorDepthForBitDepth(info.mBitDepth);
+ mCurrentConfig.mColorSpace = Some(info.ColorSpace());
+ // VPX bitstream doesn't specify color primaries.
+
+ // We don't update the transfer function here, because VPX bitstream
+ // doesn't specify the transfer function. Instead, we keep the transfer
+ // function (if any) that was set in mCurrentConfig when we were created.
+ // If a video changes colorspaces away from BT2020, we won't clear
+ // mTransferFunction, in case the video changes back to BT2020 and we
+ // need the value again.
+
+ mCurrentConfig.mColorRange = info.ColorRange();
+ if (mCodec == VPXDecoder::Codec::VP9) {
+ mCurrentConfig.mExtraData->ClearAndRetainStorage();
+ VPXDecoder::GetVPCCBox(mCurrentConfig.mExtraData, info);
+ }
+ mTrackInfo = new TrackInfoSharedPtr(mCurrentConfig, mStreamID++);
+
+ return rv;
+ }
+
+ const TrackInfo& Config() const override { return mCurrentConfig; }
+
+ MediaResult PrepareSample(MediaDataDecoder::ConversionRequired aConversion,
+ MediaRawData* aSample,
+ bool aNeedKeyFrame) override {
+ aSample->mTrackInfo = mTrackInfo;
+
+ return NS_OK;
+ }
+
+ private:
+ VideoInfo mCurrentConfig;
+ const VPXDecoder::Codec mCodec;
+ Maybe<VPXDecoder::VPXStreamInfo> mInfo;
+ uint32_t mStreamID = 0;
+ RefPtr<TrackInfoSharedPtr> mTrackInfo;
+ double mPixelAspectRatio;
+};
+
+#ifdef MOZ_AV1
+class AV1ChangeMonitor : public MediaChangeMonitor::CodecChangeMonitor {
+ public:
+ explicit AV1ChangeMonitor(const VideoInfo& aInfo)
+ : mCurrentConfig(aInfo),
+ mPixelAspectRatio(GetPixelAspectRatio(aInfo.mImage, aInfo.mDisplay)) {
+ mTrackInfo = new TrackInfoSharedPtr(mCurrentConfig, mStreamID++);
+
+ if (mCurrentConfig.mExtraData && !mCurrentConfig.mExtraData->IsEmpty()) {
+ // If we're passed AV1 codec configuration, store it so that we can
+ // instantiate a decoder in MediaChangeMonitor::Create.
+ AOMDecoder::AV1SequenceInfo seqInfo;
+ MediaResult seqHdrResult;
+ AOMDecoder::TryReadAV1CBox(mCurrentConfig.mExtraData, seqInfo,
+ seqHdrResult);
+ // If the av1C box doesn't include a sequence header specifying image
+ // size, keep the one provided by VideoInfo.
+ if (seqHdrResult.Code() != NS_OK) {
+ seqInfo.mImage = mCurrentConfig.mImage;
+ }
+
+ UpdateConfig(seqInfo);
+ }
+ }
+
+ bool CanBeInstantiated() const override {
+ // We want to have enough codec configuration to determine whether hardware
+ // decoding can be used before creating a decoder. The av1C box or a
+ // sequence header from a sample will contain this information.
+ return mInfo || mCurrentConfig.mCrypto.IsEncrypted();
+ }
+
+ void UpdateConfig(const AOMDecoder::AV1SequenceInfo& aInfo) {
+ mInfo = Some(aInfo);
+ mCurrentConfig.mColorDepth = gfx::ColorDepthForBitDepth(aInfo.mBitDepth);
+ mCurrentConfig.mColorSpace = gfxUtils::CicpToColorSpace(
+ aInfo.mColorSpace.mMatrix, aInfo.mColorSpace.mPrimaries,
+ gMediaDecoderLog);
+ mCurrentConfig.mColorPrimaries = gfxUtils::CicpToColorPrimaries(
+ aInfo.mColorSpace.mPrimaries, gMediaDecoderLog);
+ mCurrentConfig.mTransferFunction =
+ gfxUtils::CicpToTransferFunction(aInfo.mColorSpace.mTransfer);
+ mCurrentConfig.mColorRange = aInfo.mColorSpace.mRange;
+
+ if (mCurrentConfig.mImage != mInfo->mImage) {
+ gfx::IntSize newDisplay =
+ ApplyPixelAspectRatio(mPixelAspectRatio, aInfo.mImage);
+ LOG("AV1ChangeMonitor detected a resolution change in-band, image "
+ "(%" PRIu32 ",%" PRIu32 ")->(%" PRIu32 ",%" PRIu32
+ "), display (%" PRIu32 ",%" PRIu32 ")->(%" PRIu32 ",%" PRIu32
+ " from PAR)",
+ mCurrentConfig.mImage.Width(), mCurrentConfig.mImage.Height(),
+ aInfo.mImage.Width(), aInfo.mImage.Height(),
+ mCurrentConfig.mDisplay.Width(), mCurrentConfig.mDisplay.Height(),
+ newDisplay.Width(), newDisplay.Height());
+ mCurrentConfig.mImage = aInfo.mImage;
+ mCurrentConfig.mDisplay = newDisplay;
+ mCurrentConfig.ResetImageRect();
+ }
+
+ bool wroteSequenceHeader = false;
+ // Our headers should all be around the same size.
+ mCurrentConfig.mExtraData->ClearAndRetainStorage();
+ AOMDecoder::WriteAV1CBox(aInfo, mCurrentConfig.mExtraData.get(),
+ wroteSequenceHeader);
+ // Header should always be written ReadSequenceHeaderInfo succeeds.
+ MOZ_ASSERT(wroteSequenceHeader);
+ }
+
+ MediaResult CheckForChange(MediaRawData* aSample) override {
+ // Don't look at encrypted content.
+ if (aSample->mCrypto.IsEncrypted()) {
+ return NS_OK;
+ }
+ auto dataSpan = Span<const uint8_t>(aSample->Data(), aSample->Size());
+
+ // We don't trust the keyframe flag as set on the MediaRawData.
+ AOMDecoder::AV1SequenceInfo info;
+ MediaResult seqHdrResult =
+ AOMDecoder::ReadSequenceHeaderInfo(dataSpan, info);
+ nsresult seqHdrCode = seqHdrResult.Code();
+ if (seqHdrCode == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
+ return NS_OK;
+ }
+ if (seqHdrCode != NS_OK) {
+ LOG("AV1ChangeMonitor::CheckForChange read a corrupted sample: %s",
+ seqHdrResult.Description().get());
+ return seqHdrResult;
+ }
+
+ nsresult rv = NS_OK;
+ if (mInfo.isSome() &&
+ (mInfo->mProfile != info.mProfile ||
+ mInfo->ColorDepth() != info.ColorDepth() ||
+ mInfo->mMonochrome != info.mMonochrome ||
+ mInfo->mSubsamplingX != info.mSubsamplingX ||
+ mInfo->mSubsamplingY != info.mSubsamplingY ||
+ mInfo->mChromaSamplePosition != info.mChromaSamplePosition ||
+ mInfo->mImage != info.mImage)) {
+ PROFILER_MARKER_TEXT(
+ "AV1 Stream Change", MEDIA_PLAYBACK, {},
+ "AV1ChangeMonitor::CheckForChange has detected a change in a "
+ "stream and will request a new decoder");
+ LOG("AV1ChangeMonitor detected a change and requests a new decoder");
+ rv = NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
+ }
+
+ UpdateConfig(info);
+
+ if (rv == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) {
+ mTrackInfo = new TrackInfoSharedPtr(mCurrentConfig, mStreamID++);
+ }
+ return rv;
+ }
+
+ const TrackInfo& Config() const override { return mCurrentConfig; }
+
+ MediaResult PrepareSample(MediaDataDecoder::ConversionRequired aConversion,
+ MediaRawData* aSample,
+ bool aNeedKeyFrame) override {
+ aSample->mTrackInfo = mTrackInfo;
+ return NS_OK;
+ }
+
+ private:
+ VideoInfo mCurrentConfig;
+ Maybe<AOMDecoder::AV1SequenceInfo> mInfo;
+ uint32_t mStreamID = 0;
+ RefPtr<TrackInfoSharedPtr> mTrackInfo;
+ double mPixelAspectRatio;
+};
+#endif
+
+MediaChangeMonitor::MediaChangeMonitor(
+ PDMFactory* aPDMFactory,
+ UniquePtr<CodecChangeMonitor>&& aCodecChangeMonitor,
+ MediaDataDecoder* aDecoder, const CreateDecoderParams& aParams)
+ : mChangeMonitor(std::move(aCodecChangeMonitor)),
+ mPDMFactory(aPDMFactory),
+ mCurrentConfig(aParams.VideoConfig()),
+ mDecoder(aDecoder),
+ mParams(aParams) {}
+
+/* static */
+RefPtr<PlatformDecoderModule::CreateDecoderPromise> MediaChangeMonitor::Create(
+ PDMFactory* aPDMFactory, const CreateDecoderParams& aParams) {
+ UniquePtr<CodecChangeMonitor> changeMonitor;
+ const VideoInfo& currentConfig = aParams.VideoConfig();
+ if (VPXDecoder::IsVPX(currentConfig.mMimeType)) {
+ changeMonitor = MakeUnique<VPXChangeMonitor>(currentConfig);
+#ifdef MOZ_AV1
+ } else if (AOMDecoder::IsAV1(currentConfig.mMimeType)) {
+ changeMonitor = MakeUnique<AV1ChangeMonitor>(currentConfig);
+#endif
+ } else {
+ MOZ_ASSERT(MP4Decoder::IsH264(currentConfig.mMimeType));
+ changeMonitor = MakeUnique<H264ChangeMonitor>(
+ currentConfig, aParams.mOptions.contains(
+ CreateDecoderParams::Option::FullH264Parsing));
+ }
+
+ // The change monitor may have an updated track config. E.g. the h264 monitor
+ // may update the config after parsing extra data in the VideoInfo. Create a
+ // new set of params with the updated track info from our monitor and the
+ // other params for aParams and use that going forward.
+ const CreateDecoderParams updatedParams{changeMonitor->Config(), aParams};
+
+ RefPtr<MediaChangeMonitor> instance = new MediaChangeMonitor(
+ aPDMFactory, std::move(changeMonitor), nullptr, updatedParams);
+
+ if (instance->mChangeMonitor->CanBeInstantiated()) {
+ RefPtr<PlatformDecoderModule::CreateDecoderPromise> p =
+ instance->CreateDecoder()->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [instance = RefPtr{instance}] {
+ return PlatformDecoderModule::CreateDecoderPromise::
+ CreateAndResolve(instance, __func__);
+ },
+ [](const MediaResult& aError) {
+ return PlatformDecoderModule::CreateDecoderPromise::
+ CreateAndReject(aError, __func__);
+ });
+ return p;
+ }
+
+ return PlatformDecoderModule::CreateDecoderPromise::CreateAndResolve(
+ instance, __func__);
+}
+
+MediaChangeMonitor::~MediaChangeMonitor() = default;
+
+RefPtr<MediaDataDecoder::InitPromise> MediaChangeMonitor::Init() {
+ mThread = GetCurrentSerialEventTarget();
+ if (mDecoder) {
+ RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
+ RefPtr<MediaChangeMonitor> self = this;
+ mDecoder->Init()
+ ->Then(GetCurrentSerialEventTarget(), __func__,
+ [self, this](InitPromise::ResolveOrRejectValue&& aValue) {
+ mInitPromiseRequest.Complete();
+ if (aValue.IsResolve()) {
+ mDecoderInitialized = true;
+ mConversionRequired = Some(mDecoder->NeedsConversion());
+ mCanRecycleDecoder = Some(CanRecycleDecoder());
+ }
+ return mInitPromise.ResolveOrRejectIfExists(std::move(aValue),
+ __func__);
+ })
+ ->Track(mInitPromiseRequest);
+ return p;
+ }
+
+ // We haven't been able to initialize a decoder due to missing
+ // extradata.
+ return MediaDataDecoder::InitPromise::CreateAndResolve(TrackType::kVideoTrack,
+ __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> MediaChangeMonitor::Decode(
+ MediaRawData* aSample) {
+ AssertOnThread();
+ MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(),
+ "Flush operation didn't complete");
+
+ MOZ_RELEASE_ASSERT(
+ !mDecodePromiseRequest.Exists() && !mInitPromiseRequest.Exists(),
+ "Can't request a new decode until previous one completed");
+
+ MediaResult rv = CheckForChange(aSample);
+
+ if (rv == NS_ERROR_NOT_INITIALIZED) {
+ // We are missing the required init data to create the decoder.
+ if (mParams.mOptions.contains(
+ CreateDecoderParams::Option::ErrorIfNoInitializationData)) {
+ // This frame can't be decoded and should be treated as an error.
+ return DecodePromise::CreateAndReject(rv, __func__);
+ }
+ // Swallow the frame, and await delivery of init data.
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ }
+ if (rv == NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER) {
+ // The decoder is pending initialization.
+ RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
+ return p;
+ }
+
+ if (NS_FAILED(rv)) {
+ return DecodePromise::CreateAndReject(rv, __func__);
+ }
+
+ if (mNeedKeyframe && !aSample->mKeyframe) {
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+ }
+
+ rv = mChangeMonitor->PrepareSample(*mConversionRequired, aSample,
+ mNeedKeyframe);
+ if (NS_FAILED(rv)) {
+ return DecodePromise::CreateAndReject(rv, __func__);
+ }
+
+ mNeedKeyframe = false;
+
+ return mDecoder->Decode(aSample);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> MediaChangeMonitor::Flush() {
+ AssertOnThread();
+ mDecodePromiseRequest.DisconnectIfExists();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mNeedKeyframe = true;
+ mPendingFrames.Clear();
+
+ MOZ_RELEASE_ASSERT(mFlushPromise.IsEmpty(), "Previous flush didn't complete");
+
+ /*
+ When we detect a change of content in the byte stream, we first drain the
+ current decoder (1), flush (2), shut it down (3) create a new decoder (4)
+ and initialize it (5). It is possible for MediaChangeMonitor::Flush to be
+ called during any of those times. If during (1):
+ - mDrainRequest will not be empty.
+ - The old decoder can still be used, with the current extradata as
+ stored in mCurrentConfig.mExtraData.
+
+ If during (2):
+ - mFlushRequest will not be empty.
+ - The old decoder can still be used, with the current extradata as
+ stored in mCurrentConfig.mExtraData.
+
+ If during (3):
+ - mShutdownRequest won't be empty.
+ - mDecoder is empty.
+ - The old decoder is no longer referenced by the MediaChangeMonitor.
+
+ If during (4):
+ - mDecoderRequest won't be empty.
+ - mDecoder is not set. Steps will continue to (5) to set and initialize it
+
+ If during (5):
+ - mInitPromiseRequest won't be empty.
+ - mDecoder is set but not usable yet.
+ */
+
+ if (mDrainRequest.Exists() || mFlushRequest.Exists() ||
+ mShutdownRequest.Exists() || mDecoderRequest.Exists() ||
+ mInitPromiseRequest.Exists()) {
+ // We let the current decoder complete and will resume after.
+ RefPtr<FlushPromise> p = mFlushPromise.Ensure(__func__);
+ return p;
+ }
+ if (mDecoder && mDecoderInitialized) {
+ return mDecoder->Flush();
+ }
+ return FlushPromise::CreateAndResolve(true, __func__);
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> MediaChangeMonitor::Drain() {
+ AssertOnThread();
+ MOZ_RELEASE_ASSERT(!mDrainRequest.Exists());
+ mNeedKeyframe = true;
+ if (mDecoder) {
+ return mDecoder->Drain();
+ }
+ return DecodePromise::CreateAndResolve(DecodedData(), __func__);
+}
+
+RefPtr<ShutdownPromise> MediaChangeMonitor::Shutdown() {
+ AssertOnThread();
+ mInitPromiseRequest.DisconnectIfExists();
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDecodePromiseRequest.DisconnectIfExists();
+ mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mDrainRequest.DisconnectIfExists();
+ mFlushRequest.DisconnectIfExists();
+ mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ mShutdownRequest.DisconnectIfExists();
+
+ if (mShutdownPromise) {
+ // We have a shutdown in progress, return that promise instead as we can't
+ // shutdown a decoder twice.
+ RefPtr<ShutdownPromise> p = std::move(mShutdownPromise);
+ return p;
+ }
+ return ShutdownDecoder();
+}
+
+RefPtr<ShutdownPromise> MediaChangeMonitor::ShutdownDecoder() {
+ AssertOnThread();
+ mConversionRequired.reset();
+ if (mDecoder) {
+ RefPtr<MediaDataDecoder> decoder = std::move(mDecoder);
+ return decoder->Shutdown();
+ }
+ return ShutdownPromise::CreateAndResolve(true, __func__);
+}
+
+bool MediaChangeMonitor::IsHardwareAccelerated(
+ nsACString& aFailureReason) const {
+ if (mDecoder) {
+ return mDecoder->IsHardwareAccelerated(aFailureReason);
+ }
+#ifdef MOZ_APPLEMEDIA
+ // On mac, we can assume H264 is hardware accelerated for now.
+ // This allows MediaCapabilities to report that playback will be smooth.
+ // Which will always be.
+ return true;
+#else
+ return MediaDataDecoder::IsHardwareAccelerated(aFailureReason);
+#endif
+}
+
+void MediaChangeMonitor::SetSeekThreshold(const media::TimeUnit& aTime) {
+ if (mDecoder) {
+ mDecoder->SetSeekThreshold(aTime);
+ } else {
+ MediaDataDecoder::SetSeekThreshold(aTime);
+ }
+}
+
+RefPtr<MediaChangeMonitor::CreateDecoderPromise>
+MediaChangeMonitor::CreateDecoder() {
+ mCurrentConfig = *mChangeMonitor->Config().GetAsVideoInfo();
+ RefPtr<CreateDecoderPromise> p =
+ mPDMFactory
+ ->CreateDecoder(
+ {mCurrentConfig, mParams, CreateDecoderParams::NoWrapper(true)})
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, this](RefPtr<MediaDataDecoder>&& aDecoder) {
+ mDecoder = std::move(aDecoder);
+ DDLINKCHILD("decoder", mDecoder.get());
+ return CreateDecoderPromise::CreateAndResolve(true, __func__);
+ },
+ [self = RefPtr{this}](const MediaResult& aError) {
+ return CreateDecoderPromise::CreateAndReject(aError, __func__);
+ });
+
+ mDecoderInitialized = false;
+ mNeedKeyframe = true;
+
+ return p;
+}
+
+MediaResult MediaChangeMonitor::CreateDecoderAndInit(MediaRawData* aSample) {
+ MOZ_ASSERT(mThread && mThread->IsOnCurrentThread());
+
+ MediaResult rv = mChangeMonitor->CheckForChange(aSample);
+ if (!NS_SUCCEEDED(rv) && rv != NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) {
+ return rv;
+ }
+
+ if (!mChangeMonitor->CanBeInstantiated()) {
+ // Nothing found yet, will try again later.
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ CreateDecoder()
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, this, sample = RefPtr{aSample}] {
+ mDecoderRequest.Complete();
+ mDecoder->Init()
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self, sample, this](const TrackType aTrackType) {
+ mInitPromiseRequest.Complete();
+ mDecoderInitialized = true;
+ mConversionRequired = Some(mDecoder->NeedsConversion());
+ mCanRecycleDecoder = Some(CanRecycleDecoder());
+
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current operation.
+ mFlushPromise.Resolve(true, __func__);
+ return;
+ }
+
+ DecodeFirstSample(sample);
+ },
+ [self, this](const MediaResult& aError) {
+ mInitPromiseRequest.Complete();
+
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current operation.
+ mFlushPromise.Reject(aError, __func__);
+ return;
+ }
+
+ mDecodePromise.Reject(
+ MediaResult(
+ aError.Code(),
+ RESULT_DETAIL("Unable to initialize decoder")),
+ __func__);
+ })
+ ->Track(mInitPromiseRequest);
+ },
+ [self = RefPtr{this}, this](const MediaResult& aError) {
+ mDecoderRequest.Complete();
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current operation.
+ mFlushPromise.Reject(aError, __func__);
+ return;
+ }
+ mDecodePromise.Reject(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Unable to create decoder")),
+ __func__);
+ })
+ ->Track(mDecoderRequest);
+ return NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER;
+}
+
+bool MediaChangeMonitor::CanRecycleDecoder() const {
+ MOZ_ASSERT(mDecoder);
+ return StaticPrefs::media_decoder_recycle_enabled() &&
+ mDecoder->SupportDecoderRecycling();
+}
+
+void MediaChangeMonitor::DecodeFirstSample(MediaRawData* aSample) {
+ // We feed all the data to AnnexB decoder as a non-keyframe could contain
+ // the SPS/PPS when used with WebRTC and this data is needed by the decoder.
+ if (mNeedKeyframe && !aSample->mKeyframe &&
+ *mConversionRequired != ConversionRequired::kNeedAnnexB) {
+ mDecodePromise.Resolve(std::move(mPendingFrames), __func__);
+ mPendingFrames = DecodedData();
+ return;
+ }
+
+ MediaResult rv = mChangeMonitor->PrepareSample(*mConversionRequired, aSample,
+ mNeedKeyframe);
+
+ if (NS_FAILED(rv)) {
+ mDecodePromise.Reject(rv, __func__);
+ return;
+ }
+
+ if (aSample->mKeyframe) {
+ mNeedKeyframe = false;
+ }
+
+ RefPtr<MediaChangeMonitor> self = this;
+ mDecoder->Decode(aSample)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self, this](MediaDataDecoder::DecodedData&& aResults) {
+ mDecodePromiseRequest.Complete();
+ mPendingFrames.AppendElements(std::move(aResults));
+ mDecodePromise.Resolve(std::move(mPendingFrames), __func__);
+ mPendingFrames = DecodedData();
+ },
+ [self, this](const MediaResult& aError) {
+ mDecodePromiseRequest.Complete();
+ mDecodePromise.Reject(aError, __func__);
+ })
+ ->Track(mDecodePromiseRequest);
+}
+
+MediaResult MediaChangeMonitor::CheckForChange(MediaRawData* aSample) {
+ if (!mDecoder) {
+ return CreateDecoderAndInit(aSample);
+ }
+
+ MediaResult rv = mChangeMonitor->CheckForChange(aSample);
+
+ if (NS_SUCCEEDED(rv) || rv != NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) {
+ return rv;
+ }
+
+ if (*mCanRecycleDecoder) {
+ // Do not recreate the decoder, reuse it.
+ mNeedKeyframe = true;
+ return NS_OK;
+ }
+
+ // The content has changed, signal to drain the current decoder and once done
+ // create a new one.
+ DrainThenFlushDecoder(aSample);
+ return NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER;
+}
+
+void MediaChangeMonitor::DrainThenFlushDecoder(MediaRawData* aPendingSample) {
+ AssertOnThread();
+ MOZ_ASSERT(mDecoderInitialized);
+ RefPtr<MediaRawData> sample = aPendingSample;
+ RefPtr<MediaChangeMonitor> self = this;
+ mDecoder->Drain()
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self, sample, this](MediaDataDecoder::DecodedData&& aResults) {
+ mDrainRequest.Complete();
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current operation.
+ mFlushPromise.Resolve(true, __func__);
+ return;
+ }
+ if (aResults.Length() > 0) {
+ mPendingFrames.AppendElements(std::move(aResults));
+ DrainThenFlushDecoder(sample);
+ return;
+ }
+ // We've completed the draining operation, we can now flush the
+ // decoder.
+ FlushThenShutdownDecoder(sample);
+ },
+ [self, this](const MediaResult& aError) {
+ mDrainRequest.Complete();
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current operation.
+ mFlushPromise.Reject(aError, __func__);
+ return;
+ }
+ mDecodePromise.Reject(aError, __func__);
+ })
+ ->Track(mDrainRequest);
+}
+
+void MediaChangeMonitor::FlushThenShutdownDecoder(
+ MediaRawData* aPendingSample) {
+ AssertOnThread();
+ MOZ_ASSERT(mDecoderInitialized);
+ RefPtr<MediaRawData> sample = aPendingSample;
+ RefPtr<MediaChangeMonitor> self = this;
+ mDecoder->Flush()
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self, sample, this]() {
+ mFlushRequest.Complete();
+
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current operation.
+ mFlushPromise.Resolve(true, __func__);
+ return;
+ }
+
+ mShutdownPromise = ShutdownDecoder();
+ mShutdownPromise
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self, sample, this]() {
+ mShutdownRequest.Complete();
+ mShutdownPromise = nullptr;
+
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current
+ // operation.
+ mFlushPromise.Resolve(true, __func__);
+ return;
+ }
+
+ MediaResult rv = CreateDecoderAndInit(sample);
+ if (rv == NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER) {
+ // All good so far, will continue later.
+ return;
+ }
+ MOZ_ASSERT(NS_FAILED(rv));
+ mDecodePromise.Reject(rv, __func__);
+ return;
+ },
+ [] { MOZ_CRASH("Can't reach here'"); })
+ ->Track(mShutdownRequest);
+ },
+ [self, this](const MediaResult& aError) {
+ mFlushRequest.Complete();
+ if (!mFlushPromise.IsEmpty()) {
+ // A Flush is pending, abort the current operation.
+ mFlushPromise.Reject(aError, __func__);
+ return;
+ }
+ mDecodePromise.Reject(aError, __func__);
+ })
+ ->Track(mFlushRequest);
+}
+
+#undef LOG
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wrappers/MediaChangeMonitor.h b/dom/media/platforms/wrappers/MediaChangeMonitor.h
new file mode 100644
index 0000000000..d77b1ca7eb
--- /dev/null
+++ b/dom/media/platforms/wrappers/MediaChangeMonitor.h
@@ -0,0 +1,127 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_H264Converter_h
+#define mozilla_H264Converter_h
+
+#include "PDMFactory.h"
+#include "PlatformDecoderModule.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(MediaChangeMonitor, MediaDataDecoder);
+
+// MediaChangeMonitor is a MediaDataDecoder wrapper used to ensure that
+// only one type of content is fed to the underlying MediaDataDecoder.
+// The MediaChangeMonitor allows playback of content where some out of band
+// extra data (such as SPS NAL for H264 content) may not be provided in the
+// init segment (e.g. AVC3 or Annex B) MediaChangeMonitor will monitor the
+// input data, and will delay creation of the MediaDataDecoder until such out
+// of band have been extracted should the underlying decoder required it.
+
+class MediaChangeMonitor : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<MediaChangeMonitor> {
+ public:
+ static RefPtr<PlatformDecoderModule::CreateDecoderPromise> Create(
+ PDMFactory* aPDMFactory, const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
+ nsCString GetDescriptionName() const override {
+ if (mDecoder) {
+ return mDecoder->GetDescriptionName();
+ }
+ return "MediaChangeMonitor decoder (pending)"_ns;
+ }
+ void SetSeekThreshold(const media::TimeUnit& aTime) override;
+ bool SupportDecoderRecycling() const override {
+ if (mDecoder) {
+ return mDecoder->SupportDecoderRecycling();
+ }
+ return false;
+ }
+
+ ConversionRequired NeedsConversion() const override {
+ if (mDecoder) {
+ return mDecoder->NeedsConversion();
+ }
+ // Default so no conversion is performed.
+ return ConversionRequired::kNeedNone;
+ }
+
+ class CodecChangeMonitor {
+ public:
+ virtual bool CanBeInstantiated() const = 0;
+ virtual MediaResult CheckForChange(MediaRawData* aSample) = 0;
+ virtual const TrackInfo& Config() const = 0;
+ virtual MediaResult PrepareSample(
+ MediaDataDecoder::ConversionRequired aConversion, MediaRawData* aSample,
+ bool aNeedKeyFrame) = 0;
+ virtual ~CodecChangeMonitor() = default;
+ };
+
+ private:
+ MediaChangeMonitor(PDMFactory* aPDMFactory,
+ UniquePtr<CodecChangeMonitor>&& aCodecChangeMonitor,
+ MediaDataDecoder* aDecoder,
+ const CreateDecoderParams& aParams);
+ virtual ~MediaChangeMonitor();
+
+ void AssertOnThread() const {
+ // mThread may not be set if Init hasn't been called first.
+ MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
+ }
+
+ bool CanRecycleDecoder() const;
+
+ typedef MozPromise<bool, MediaResult, true /* exclusive */>
+ CreateDecoderPromise;
+ // Will create the required MediaDataDecoder if need AVCC and we have a SPS
+ // NAL. Returns NS_ERROR_FAILURE if error is permanent and can't be recovered
+ // and will set mError accordingly.
+ RefPtr<CreateDecoderPromise> CreateDecoder();
+ MediaResult CreateDecoderAndInit(MediaRawData* aSample);
+ MediaResult CheckForChange(MediaRawData* aSample);
+
+ void DecodeFirstSample(MediaRawData* aSample);
+ void DrainThenFlushDecoder(MediaRawData* aPendingSample);
+ void FlushThenShutdownDecoder(MediaRawData* aPendingSample);
+ RefPtr<ShutdownPromise> ShutdownDecoder();
+
+ UniquePtr<CodecChangeMonitor> mChangeMonitor;
+ RefPtr<PDMFactory> mPDMFactory;
+ VideoInfo mCurrentConfig;
+ nsCOMPtr<nsISerialEventTarget> mThread;
+ RefPtr<MediaDataDecoder> mDecoder;
+ MozPromiseRequestHolder<CreateDecoderPromise> mDecoderRequest;
+ MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
+ MozPromiseHolder<InitPromise> mInitPromise;
+ MozPromiseRequestHolder<DecodePromise> mDecodePromiseRequest;
+ MozPromiseHolder<DecodePromise> mDecodePromise;
+ MozPromiseRequestHolder<FlushPromise> mFlushRequest;
+ MediaDataDecoder::DecodedData mPendingFrames;
+ MozPromiseRequestHolder<DecodePromise> mDrainRequest;
+ MozPromiseRequestHolder<ShutdownPromise> mShutdownRequest;
+ RefPtr<ShutdownPromise> mShutdownPromise;
+ MozPromiseHolder<FlushPromise> mFlushPromise;
+
+ bool mNeedKeyframe = true;
+ Maybe<bool> mCanRecycleDecoder;
+ Maybe<MediaDataDecoder::ConversionRequired> mConversionRequired;
+ bool mDecoderInitialized = false;
+ const CreateDecoderParamsForAsync mParams;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_H264Converter_h
diff --git a/dom/media/platforms/wrappers/MediaDataDecoderProxy.cpp b/dom/media/platforms/wrappers/MediaDataDecoderProxy.cpp
new file mode 100644
index 0000000000..6bddb97e65
--- /dev/null
+++ b/dom/media/platforms/wrappers/MediaDataDecoderProxy.cpp
@@ -0,0 +1,137 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaDataDecoderProxy.h"
+
+namespace mozilla {
+
+RefPtr<MediaDataDecoder::InitPromise> MediaDataDecoderProxy::Init() {
+ MOZ_ASSERT(!mIsShutdown);
+
+ if (!mProxyThread || mProxyThread->IsOnCurrentThread()) {
+ return mProxyDecoder->Init();
+ }
+ return InvokeAsync(mProxyThread, __func__, [self = RefPtr{this}] {
+ return self->mProxyDecoder->Init();
+ });
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> MediaDataDecoderProxy::Decode(
+ MediaRawData* aSample) {
+ MOZ_ASSERT(!mIsShutdown);
+
+ if (!mProxyThread || mProxyThread->IsOnCurrentThread()) {
+ return mProxyDecoder->Decode(aSample);
+ }
+ RefPtr<MediaRawData> sample = aSample;
+ return InvokeAsync(mProxyThread, __func__, [self = RefPtr{this}, sample] {
+ return self->mProxyDecoder->Decode(sample);
+ });
+}
+
+bool MediaDataDecoderProxy::CanDecodeBatch() const {
+ return mProxyDecoder->CanDecodeBatch();
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> MediaDataDecoderProxy::DecodeBatch(
+ nsTArray<RefPtr<MediaRawData>>&& aSamples) {
+ MOZ_ASSERT(!mIsShutdown);
+ if (!mProxyThread || mProxyThread->IsOnCurrentThread()) {
+ return mProxyDecoder->DecodeBatch(std::move(aSamples));
+ }
+ return InvokeAsync(
+ mProxyThread, __func__,
+ [self = RefPtr{this}, samples = std::move(aSamples)]() mutable {
+ return self->mProxyDecoder->DecodeBatch(std::move(samples));
+ });
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> MediaDataDecoderProxy::Flush() {
+ MOZ_ASSERT(!mIsShutdown);
+
+ if (!mProxyThread || mProxyThread->IsOnCurrentThread()) {
+ return mProxyDecoder->Flush();
+ }
+ return InvokeAsync(mProxyThread, __func__, [self = RefPtr{this}] {
+ return self->mProxyDecoder->Flush();
+ });
+}
+
+RefPtr<MediaDataDecoder::DecodePromise> MediaDataDecoderProxy::Drain() {
+ MOZ_ASSERT(!mIsShutdown);
+
+ if (!mProxyThread || mProxyThread->IsOnCurrentThread()) {
+ return mProxyDecoder->Drain();
+ }
+ return InvokeAsync(mProxyThread, __func__, [self = RefPtr{this}] {
+ return self->mProxyDecoder->Drain();
+ });
+}
+
+RefPtr<ShutdownPromise> MediaDataDecoderProxy::Shutdown() {
+ MOZ_ASSERT(!mIsShutdown);
+
+#if defined(DEBUG)
+ mIsShutdown = true;
+#endif
+
+ if (!mProxyThread || mProxyThread->IsOnCurrentThread()) {
+ return mProxyDecoder->Shutdown();
+ }
+ // We chain another promise to ensure that the proxied decoder gets destructed
+ // on the proxy thread.
+ return InvokeAsync(mProxyThread, __func__, [self = RefPtr{this}] {
+ RefPtr<ShutdownPromise> p = self->mProxyDecoder->Shutdown()->Then(
+ self->mProxyThread, __func__,
+ [self](const ShutdownPromise::ResolveOrRejectValue& aResult) {
+ self->mProxyDecoder = nullptr;
+ return ShutdownPromise::CreateAndResolveOrReject(aResult, __func__);
+ });
+ return p;
+ });
+}
+
+nsCString MediaDataDecoderProxy::GetDescriptionName() const {
+ MOZ_ASSERT(!mIsShutdown);
+
+ return mProxyDecoder->GetDescriptionName();
+}
+
+bool MediaDataDecoderProxy::IsHardwareAccelerated(
+ nsACString& aFailureReason) const {
+ MOZ_ASSERT(!mIsShutdown);
+
+ return mProxyDecoder->IsHardwareAccelerated(aFailureReason);
+}
+
+void MediaDataDecoderProxy::SetSeekThreshold(const media::TimeUnit& aTime) {
+ MOZ_ASSERT(!mIsShutdown);
+
+ if (!mProxyThread || mProxyThread->IsOnCurrentThread()) {
+ mProxyDecoder->SetSeekThreshold(aTime);
+ return;
+ }
+ media::TimeUnit time = aTime;
+ mProxyThread->Dispatch(NS_NewRunnableFunction(
+ "MediaDataDecoderProxy::SetSeekThreshold", [self = RefPtr{this}, time] {
+ self->mProxyDecoder->SetSeekThreshold(time);
+ }));
+}
+
+bool MediaDataDecoderProxy::SupportDecoderRecycling() const {
+ MOZ_ASSERT(!mIsShutdown);
+
+ return mProxyDecoder->SupportDecoderRecycling();
+}
+
+MediaDataDecoder::ConversionRequired MediaDataDecoderProxy::NeedsConversion()
+ const {
+ MOZ_ASSERT(!mIsShutdown);
+
+ return mProxyDecoder->NeedsConversion();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wrappers/MediaDataDecoderProxy.h b/dom/media/platforms/wrappers/MediaDataDecoderProxy.h
new file mode 100644
index 0000000000..f69cb810b8
--- /dev/null
+++ b/dom/media/platforms/wrappers/MediaDataDecoderProxy.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaDataDecoderProxy_h_)
+# define MediaDataDecoderProxy_h_
+
+# include "PlatformDecoderModule.h"
+# include "mozilla/Atomics.h"
+# include "mozilla/RefPtr.h"
+# include "nsThreadUtils.h"
+# include "nscore.h"
+
+namespace mozilla {
+
+DDLoggedTypeDeclNameAndBase(MediaDataDecoderProxy, MediaDataDecoder);
+
+class MediaDataDecoderProxy
+ : public MediaDataDecoder,
+ public DecoderDoctorLifeLogger<MediaDataDecoderProxy> {
+ public:
+ explicit MediaDataDecoderProxy(
+ already_AddRefed<MediaDataDecoder> aProxyDecoder,
+ already_AddRefed<nsISerialEventTarget> aProxyThread = nullptr)
+ : mProxyDecoder(aProxyDecoder), mProxyThread(aProxyThread) {
+ DDLINKCHILD("proxy decoder", mProxyDecoder.get());
+ }
+
+ RefPtr<InitPromise> Init() override;
+ RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
+ bool CanDecodeBatch() const override;
+ RefPtr<DecodePromise> DecodeBatch(
+ nsTArray<RefPtr<MediaRawData>>&& aSamples) override;
+ RefPtr<DecodePromise> Drain() override;
+ RefPtr<FlushPromise> Flush() override;
+ RefPtr<ShutdownPromise> Shutdown() override;
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
+ nsCString GetDescriptionName() const override;
+ void SetSeekThreshold(const media::TimeUnit& aTime) override;
+ bool SupportDecoderRecycling() const override;
+ ConversionRequired NeedsConversion() const override;
+
+ private:
+ // Set on construction and clear on the proxy thread if set.
+ RefPtr<MediaDataDecoder> mProxyDecoder;
+ const nsCOMPtr<nsISerialEventTarget> mProxyThread;
+
+# if defined(DEBUG)
+ Atomic<bool> mIsShutdown = Atomic<bool>(false);
+# endif
+};
+
+} // namespace mozilla
+
+#endif // MediaDataDecoderProxy_h_