/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim:set ts=2 sw=2 sts=2 et cindent: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "AudioContext.h" #include "blink/PeriodicWave.h" #include "mozilla/ErrorResult.h" #include "mozilla/NotNull.h" #include "mozilla/OwningNonNull.h" #include "mozilla/RefPtr.h" #include "mozilla/Preferences.h" #include "mozilla/StaticPrefs_media.h" #include "mozilla/dom/AnalyserNode.h" #include "mozilla/dom/AnalyserNodeBinding.h" #include "mozilla/dom/AudioBufferSourceNodeBinding.h" #include "mozilla/dom/AudioContextBinding.h" #include "mozilla/dom/BaseAudioContextBinding.h" #include "mozilla/dom/BiquadFilterNodeBinding.h" #include "mozilla/dom/ChannelMergerNodeBinding.h" #include "mozilla/dom/ChannelSplitterNodeBinding.h" #include "mozilla/dom/ConvolverNodeBinding.h" #include "mozilla/dom/DelayNodeBinding.h" #include "mozilla/dom/DynamicsCompressorNodeBinding.h" #include "mozilla/dom/GainNodeBinding.h" #include "mozilla/dom/IIRFilterNodeBinding.h" #include "mozilla/dom/HTMLMediaElement.h" #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h" #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h" #include "mozilla/dom/MediaStreamTrackAudioSourceNodeBinding.h" #include "mozilla/dom/OfflineAudioContextBinding.h" #include "mozilla/dom/OscillatorNodeBinding.h" #include "mozilla/dom/PannerNodeBinding.h" #include "mozilla/dom/PeriodicWaveBinding.h" #include "mozilla/dom/Performance.h" #include "mozilla/dom/Promise.h" #include "mozilla/dom/StereoPannerNodeBinding.h" #include "mozilla/dom/WaveShaperNodeBinding.h" #include "mozilla/dom/Worklet.h" #include "AudioBuffer.h" #include "AudioBufferSourceNode.h" #include "AudioChannelService.h" #include "AudioDestinationNode.h" #include "AudioListener.h" #include "AudioNodeTrack.h" #include "AudioStream.h" #include "AudioWorkletImpl.h" #include "AutoplayPolicy.h" #include "BiquadFilterNode.h" #include "ChannelMergerNode.h" #include "ChannelSplitterNode.h" #include "ConstantSourceNode.h" #include "ConvolverNode.h" #include "DelayNode.h" #include "DynamicsCompressorNode.h" #include "GainNode.h" #include "IIRFilterNode.h" #include "js/ArrayBuffer.h" // JS::StealArrayBufferContents #include "MediaElementAudioSourceNode.h" #include "MediaStreamAudioDestinationNode.h" #include "MediaStreamAudioSourceNode.h" #include "MediaTrackGraph.h" #include "MediaStreamTrackAudioSourceNode.h" #include "nsContentUtils.h" #include "nsIScriptError.h" #include "nsNetCID.h" #include "nsNetUtil.h" #include "nsPIDOMWindow.h" #include "nsPrintfCString.h" #include "nsRFPService.h" #include "OscillatorNode.h" #include "PannerNode.h" #include "PeriodicWave.h" #include "ScriptProcessorNode.h" #include "StereoPannerNode.h" #include "WaveShaperNode.h" extern mozilla::LazyLogModule gAutoplayPermissionLog; #define AUTOPLAY_LOG(msg, ...) \ MOZ_LOG(gAutoplayPermissionLog, LogLevel::Debug, (msg, ##__VA_ARGS__)) using std::move; namespace mozilla::dom { // 0 is a special value that MediaTracks use to denote they are not part of a // AudioContext. static dom::AudioContext::AudioContextId gAudioContextId = 1; NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext) NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext) // The destination node and AudioContext form a cycle and so the destination // track will be destroyed. mWorklet must be shut down before the track // is destroyed. Do this before clearing mWorklet. tmp->ShutdownWorklet(); NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination) NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener) NS_IMPL_CYCLE_COLLECTION_UNLINK(mWorklet) NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray) NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises) if (tmp->mSuspendCalled || !tmp->mIsStarted) { NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes) } // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed // explicitly. mAllNodes is an array of weak pointers, ignore it here. // mBasicWaveFormCache cannot participate in cycles, ignore it here. // Remove weak reference on the global window as the context is not usable // without mDestination. tmp->DisconnectFromWindow(); NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper) NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext, DOMEventTargetHelper) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mWorklet) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises) if (tmp->mSuspendCalled || !tmp->mIsStarted) { NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes) } // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed // explicitly. mAllNodes is an array of weak pointers, ignore it here. // mBasicWaveFormCache cannot participate in cycles, ignore it here. NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper) NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper) NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioContext) NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter) NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) { if (aIsOffline || aSampleRate != 0.0) { return aSampleRate; } else { return static_cast(CubebUtils::PreferredSampleRate()); } } AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, bool aIsOffline, uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate) : DOMEventTargetHelper(aWindow), mId(gAudioContextId++), mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate)), mAudioContextState(AudioContextState::Suspended), mNumberOfChannels(aNumberOfChannels), mIsOffline(aIsOffline), mIsStarted(!aIsOffline), mIsShutDown(false), mCloseCalled(false), // Realtime contexts start with suspended tracks until an // AudioCallbackDriver is running. mSuspendCalled(!aIsOffline), mIsDisconnecting(false), mWasAllowedToStart(true), mSuspendedByContent(false), mWasEverAllowedToStart(false), mWasEverBlockedToStart(false), mWouldBeAllowedToStart(true) { bool mute = aWindow->AddAudioContext(this); // Note: AudioDestinationNode needs an AudioContext that must already be // bound to the window. const bool allowedToStart = AutoplayPolicy::IsAllowedToPlay(*this); mDestination = new AudioDestinationNode(this, aIsOffline, aNumberOfChannels, aLength); mDestination->Init(); // If an AudioContext is not allowed to start, we would postpone its state // transition from `suspended` to `running` until sites explicitly call // AudioContext.resume() or AudioScheduledSourceNode.start(). if (!allowedToStart) { MOZ_ASSERT(!mIsOffline); AUTOPLAY_LOG("AudioContext %p is not allowed to start", this); ReportBlocked(); } else if (!mIsOffline) { ResumeInternal(AudioContextOperationFlags::SendStateChange); } // The context can't be muted until it has a destination. if (mute) { Mute(); } UpdateAutoplayAssumptionStatus(); FFTBlock::MainThreadInit(); } void AudioContext::StartBlockedAudioContextIfAllowed() { MOZ_ASSERT(NS_IsMainThread()); MaybeUpdateAutoplayTelemetry(); // Only try to start AudioContext when AudioContext was not allowed to start. if (mWasAllowedToStart) { return; } const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this); AUTOPLAY_LOG("Trying to start AudioContext %p, IsAllowedToPlay=%d", this, isAllowedToPlay); // Only start the AudioContext if this resume() call was initiated by content, // not if it was a result of the AudioContext starting after having been // blocked because of the auto-play policy. if (isAllowedToPlay && !mSuspendedByContent) { ResumeInternal(AudioContextOperationFlags::SendStateChange); } else { ReportBlocked(); } } void AudioContext::DisconnectFromWindow() { nsPIDOMWindowInner* window = GetOwner(); if (window) { window->RemoveAudioContext(this); } } AudioContext::~AudioContext() { DisconnectFromWindow(); UnregisterWeakMemoryReporter(this); } JSObject* AudioContext::WrapObject(JSContext* aCx, JS::Handle aGivenProto) { if (mIsOffline) { return OfflineAudioContext_Binding::Wrap(aCx, this, aGivenProto); } else { return AudioContext_Binding::Wrap(aCx, this, aGivenProto); } } static bool CheckFullyActive(nsPIDOMWindowInner* aWindow, ErrorResult& aRv) { if (!aWindow->IsFullyActive()) { aRv.ThrowInvalidStateError("The document is not fully active."); return false; } return true; } /* static */ already_AddRefed AudioContext::Constructor( const GlobalObject& aGlobal, const AudioContextOptions& aOptions, ErrorResult& aRv) { nsCOMPtr window = do_QueryInterface(aGlobal.GetAsSupports()); if (!window) { aRv.Throw(NS_ERROR_FAILURE); return nullptr; } /** * If the current settings object’s responsible document is NOT fully * active, throw an InvalidStateError and abort these steps. */ if (!CheckFullyActive(window, aRv)) { return nullptr; } if (aOptions.mSampleRate.WasPassed() && (aOptions.mSampleRate.Value() < WebAudioUtils::MinSampleRate || aOptions.mSampleRate.Value() > WebAudioUtils::MaxSampleRate)) { aRv.ThrowNotSupportedError(nsPrintfCString( "Sample rate %g is not in the range [%u, %u]", aOptions.mSampleRate.Value(), WebAudioUtils::MinSampleRate, WebAudioUtils::MaxSampleRate)); return nullptr; } float sampleRate = aOptions.mSampleRate.WasPassed() ? aOptions.mSampleRate.Value() : MediaTrackGraph::REQUEST_DEFAULT_SAMPLE_RATE; RefPtr object = new AudioContext(window, false, 2, 0, sampleRate); RegisterWeakMemoryReporter(object); return object.forget(); } /* static */ already_AddRefed AudioContext::Constructor( const GlobalObject& aGlobal, const OfflineAudioContextOptions& aOptions, ErrorResult& aRv) { return Constructor(aGlobal, aOptions.mNumberOfChannels, aOptions.mLength, aOptions.mSampleRate, aRv); } /* static */ already_AddRefed AudioContext::Constructor( const GlobalObject& aGlobal, uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate, ErrorResult& aRv) { nsCOMPtr window = do_QueryInterface(aGlobal.GetAsSupports()); if (!window) { aRv.Throw(NS_ERROR_FAILURE); return nullptr; } /** * If the current settings object’s responsible document is NOT fully * active, throw an InvalidStateError and abort these steps. */ if (!CheckFullyActive(window, aRv)) { return nullptr; } if (aNumberOfChannels == 0 || aNumberOfChannels > WebAudioUtils::MaxChannelCount) { aRv.ThrowNotSupportedError( nsPrintfCString("%u is not a valid channel count", aNumberOfChannels)); return nullptr; } if (aLength == 0) { aRv.ThrowNotSupportedError("Length must be nonzero"); return nullptr; } if (aSampleRate < WebAudioUtils::MinSampleRate || aSampleRate > WebAudioUtils::MaxSampleRate) { // The DOM binding protects us against infinity and NaN aRv.ThrowNotSupportedError(nsPrintfCString( "Sample rate %g is not in the range [%u, %u]", aSampleRate, WebAudioUtils::MinSampleRate, WebAudioUtils::MaxSampleRate)); return nullptr; } RefPtr object = new AudioContext(window, true, aNumberOfChannels, aLength, aSampleRate); RegisterWeakMemoryReporter(object); return object.forget(); } already_AddRefed AudioContext::CreateBufferSource() { return AudioBufferSourceNode::Create(nullptr, *this, AudioBufferSourceOptions()); } already_AddRefed AudioContext::CreateConstantSource() { RefPtr constantSourceNode = new ConstantSourceNode(this); return constantSourceNode.forget(); } already_AddRefed AudioContext::CreateBuffer( uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate, ErrorResult& aRv) { if (!aNumberOfChannels) { aRv.ThrowNotSupportedError("Number of channels must be nonzero"); return nullptr; } return AudioBuffer::Create(GetOwner(), aNumberOfChannels, aLength, aSampleRate, aRv); } namespace { bool IsValidBufferSize(uint32_t aBufferSize) { switch (aBufferSize) { case 0: // let the implementation choose the buffer size case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: return true; default: return false; } } } // namespace already_AddRefed AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) { return MediaStreamAudioDestinationNode::Create(*this, AudioNodeOptions(), aRv); } already_AddRefed AudioContext::CreateScriptProcessor( uint32_t aBufferSize, uint32_t aNumberOfInputChannels, uint32_t aNumberOfOutputChannels, ErrorResult& aRv) { if (aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) { aRv.ThrowIndexSizeError( "At least one of numberOfInputChannels and numberOfOutputChannels must " "be nonzero"); return nullptr; } if (aNumberOfInputChannels > WebAudioUtils::MaxChannelCount) { aRv.ThrowIndexSizeError(nsPrintfCString( "%u is not a valid number of input channels", aNumberOfInputChannels)); return nullptr; } if (aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount) { aRv.ThrowIndexSizeError( nsPrintfCString("%u is not a valid number of output channels", aNumberOfOutputChannels)); return nullptr; } if (!IsValidBufferSize(aBufferSize)) { aRv.ThrowIndexSizeError( nsPrintfCString("%u is not a valid bufferSize", aBufferSize)); return nullptr; } RefPtr scriptProcessor = new ScriptProcessorNode( this, aBufferSize, aNumberOfInputChannels, aNumberOfOutputChannels); return scriptProcessor.forget(); } already_AddRefed AudioContext::CreateAnalyser(ErrorResult& aRv) { return AnalyserNode::Create(*this, AnalyserOptions(), aRv); } already_AddRefed AudioContext::CreateStereoPanner( ErrorResult& aRv) { return StereoPannerNode::Create(*this, StereoPannerOptions(), aRv); } already_AddRefed AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement, ErrorResult& aRv) { MediaElementAudioSourceOptions options; options.mMediaElement = aMediaElement; return MediaElementAudioSourceNode::Create(*this, options, aRv); } already_AddRefed AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream, ErrorResult& aRv) { MediaStreamAudioSourceOptions options; options.mMediaStream = aMediaStream; return MediaStreamAudioSourceNode::Create(*this, options, aRv); } already_AddRefed AudioContext::CreateMediaStreamTrackSource(MediaStreamTrack& aMediaStreamTrack, ErrorResult& aRv) { MediaStreamTrackAudioSourceOptions options; options.mMediaStreamTrack = aMediaStreamTrack; return MediaStreamTrackAudioSourceNode::Create(*this, options, aRv); } already_AddRefed AudioContext::CreateGain(ErrorResult& aRv) { return GainNode::Create(*this, GainOptions(), aRv); } already_AddRefed AudioContext::CreateWaveShaper( ErrorResult& aRv) { return WaveShaperNode::Create(*this, WaveShaperOptions(), aRv); } already_AddRefed AudioContext::CreateDelay(double aMaxDelayTime, ErrorResult& aRv) { DelayOptions options; options.mMaxDelayTime = aMaxDelayTime; return DelayNode::Create(*this, options, aRv); } already_AddRefed AudioContext::CreatePanner(ErrorResult& aRv) { return PannerNode::Create(*this, PannerOptions(), aRv); } already_AddRefed AudioContext::CreateConvolver( ErrorResult& aRv) { return ConvolverNode::Create(nullptr, *this, ConvolverOptions(), aRv); } already_AddRefed AudioContext::CreateChannelSplitter( uint32_t aNumberOfOutputs, ErrorResult& aRv) { ChannelSplitterOptions options; options.mNumberOfOutputs = aNumberOfOutputs; return ChannelSplitterNode::Create(*this, options, aRv); } already_AddRefed AudioContext::CreateChannelMerger( uint32_t aNumberOfInputs, ErrorResult& aRv) { ChannelMergerOptions options; options.mNumberOfInputs = aNumberOfInputs; return ChannelMergerNode::Create(*this, options, aRv); } already_AddRefed AudioContext::CreateDynamicsCompressor( ErrorResult& aRv) { return DynamicsCompressorNode::Create(*this, DynamicsCompressorOptions(), aRv); } already_AddRefed AudioContext::CreateBiquadFilter( ErrorResult& aRv) { return BiquadFilterNode::Create(*this, BiquadFilterOptions(), aRv); } already_AddRefed AudioContext::CreateIIRFilter( const Sequence& aFeedforward, const Sequence& aFeedback, mozilla::ErrorResult& aRv) { IIRFilterOptions options; options.mFeedforward = aFeedforward; options.mFeedback = aFeedback; return IIRFilterNode::Create(*this, options, aRv); } already_AddRefed AudioContext::CreateOscillator( ErrorResult& aRv) { return OscillatorNode::Create(*this, OscillatorOptions(), aRv); } already_AddRefed AudioContext::CreatePeriodicWave( const Float32Array& aRealData, const Float32Array& aImagData, const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv) { aRealData.ComputeState(); aImagData.ComputeState(); RefPtr periodicWave = new PeriodicWave( this, aRealData.Data(), aRealData.Length(), aImagData.Data(), aImagData.Length(), aConstraints.mDisableNormalization, aRv); if (aRv.Failed()) { return nullptr; } return periodicWave.forget(); } AudioListener* AudioContext::Listener() { if (!mListener) { mListener = new AudioListener(this); } return mListener; } double AudioContext::OutputLatency() { if (mIsShutDown) { return 0.0; } // When reduceFingerprinting is enabled, return a latency figure that is // fixed, but plausible for the platform. double latency_s = 0.0; if (StaticPrefs::privacy_resistFingerprinting()) { #ifdef XP_MACOSX latency_s = 512. / mSampleRate; #elif MOZ_WIDGET_ANDROID latency_s = 0.020; #elif XP_WIN latency_s = 0.04; #else // Catchall for other OSes, including Linux. latency_s = 0.025; #endif } else { return Graph()->AudioOutputLatency(); } return latency_s; } void AudioContext::GetOutputTimestamp(AudioTimestamp& aTimeStamp) { if (!Destination()) { aTimeStamp.mContextTime.Construct(0.0); aTimeStamp.mPerformanceTime.Construct(0.0); return; } // The currentTime currently being output is the currentTime minus the audio // output latency. The resolution of CurrentTime() is already reduced. aTimeStamp.mContextTime.Construct( std::max(0.0, CurrentTime() - OutputLatency())); nsPIDOMWindowInner* parent = GetParentObject(); Performance* perf = parent ? parent->GetPerformance() : nullptr; if (perf) { // perf->Now() already has reduced resolution here, no need to do it again. aTimeStamp.mPerformanceTime.Construct( std::max(0., perf->Now() - (OutputLatency() * 1000.))); } else { aTimeStamp.mPerformanceTime.Construct(0.0); } } Worklet* AudioContext::GetAudioWorklet(ErrorResult& aRv) { if (!mWorklet) { mWorklet = AudioWorkletImpl::CreateWorklet(this, aRv); } return mWorklet; } bool AudioContext::IsRunning() const { return mAudioContextState == AudioContextState::Running; } already_AddRefed AudioContext::CreatePromise(ErrorResult& aRv) { // Get the relevant global for the promise from the wrapper cache because // DOMEventTargetHelper::GetOwner() returns null if the document is unloaded. // We know the wrapper exists because it is being used for |this| from JS. // See https://github.com/heycam/webidl/issues/932 for why the relevant // global is used instead of the current global. nsCOMPtr global = xpc::NativeGlobal(GetWrapper()); RefPtr promise = Promise::Create(global, aRv); if (aRv.Failed()) { return nullptr; } /** * If this's relevant global object's associated Document is not fully * active then return a promise rejected with "InvalidStateError" * DOMException. */ nsCOMPtr window = do_QueryInterface(global); if (!window->IsFullyActive()) { promise->MaybeRejectWithInvalidStateError( "The document is not fully active."); } return promise.forget(); } already_AddRefed AudioContext::DecodeAudioData( const ArrayBuffer& aBuffer, const Optional>& aSuccessCallback, const Optional>& aFailureCallback, ErrorResult& aRv) { AutoJSAPI jsapi; jsapi.Init(); JSContext* cx = jsapi.cx(); // CheckedUnwrapStatic is OK, since we know we have an ArrayBuffer. JS::Rooted obj(cx, js::CheckedUnwrapStatic(aBuffer.Obj())); if (!obj) { aRv.ThrowSecurityError("Can't get audio data from cross-origin object"); return nullptr; } RefPtr promise = CreatePromise(aRv); if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) { return promise.forget(); } JSAutoRealm ar(cx, obj); aBuffer.ComputeState(); if (!aBuffer.Data()) { // Throw if the buffer is detached aRv.ThrowTypeError("Buffer argument can't be a detached buffer"); return nullptr; } // Detach the array buffer size_t length = aBuffer.Length(); uint8_t* data = static_cast(JS::StealArrayBufferContents(cx, obj)); // Sniff the content of the media. // Failed type sniffing will be handled by AsyncDecodeWebAudio. nsAutoCString contentType; NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType); RefPtr failureCallback; RefPtr successCallback; if (aFailureCallback.WasPassed()) { failureCallback = &aFailureCallback.Value(); } if (aSuccessCallback.WasPassed()) { successCallback = &aSuccessCallback.Value(); } UniquePtr job( new WebAudioDecodeJob(this, promise, successCallback, failureCallback)); AsyncDecodeWebAudio(contentType.get(), data, length, *job); // Transfer the ownership to mDecodeJobs mDecodeJobs.AppendElement(move(job)); return promise.forget(); } void AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) { // Since UniquePtr doesn't provide an operator== which allows you to compare // against raw pointers, we need to iterate manually. for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) { if (mDecodeJobs[i].get() == aDecodeJob) { mDecodeJobs.RemoveElementAt(i); break; } } } void AudioContext::RegisterActiveNode(AudioNode* aNode) { if (!mCloseCalled) { mActiveNodes.PutEntry(aNode); } } void AudioContext::UnregisterActiveNode(AudioNode* aNode) { mActiveNodes.RemoveEntry(aNode); } uint32_t AudioContext::MaxChannelCount() const { if (StaticPrefs::privacy_resistFingerprinting()) { return 2; } return std::min( WebAudioUtils::MaxChannelCount, mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels()); } uint32_t AudioContext::ActiveNodeCount() const { return mActiveNodes.Count(); } MediaTrackGraph* AudioContext::Graph() const { return Destination()->Track()->Graph(); } AudioNodeTrack* AudioContext::DestinationTrack() const { if (Destination()) { return Destination()->Track(); } return nullptr; } void AudioContext::ShutdownWorklet() { if (mWorklet) { mWorklet->Impl()->NotifyWorkletFinished(); } } double AudioContext::CurrentTime() { mozilla::MediaTrack* track = Destination()->Track(); double rawTime = track->TrackTimeToSeconds(track->GetCurrentTime()); // CurrentTime increments in intervals of 128/sampleRate. If the Timer // Precision Reduction is smaller than this interval, the jittered time // can always be reversed to the raw step of the interval. In that case // we can simply return the un-reduced time; and avoid breaking tests. // We have to convert each variable into a common magnitude, we choose ms. if ((128 / mSampleRate) * 1000.0 > nsRFPService::TimerResolution() / 1000.0) { return rawTime; } MOZ_ASSERT(GetParentObject()->AsGlobal()); // The value of a MediaTrack's CurrentTime will always advance forward; it // will never reset (even if one rewinds a video.) Therefore we can use a // single Random Seed initialized at the same time as the object. return nsRFPService::ReduceTimePrecisionAsSecs( rawTime, GetRandomTimelineSeed(), /* aIsSystemPrincipal */ false, GetParentObject()->AsGlobal()->CrossOriginIsolated()); } nsISerialEventTarget* AudioContext::GetMainThread() const { if (nsPIDOMWindowInner* window = GetParentObject()) { return window->AsGlobal()->EventTargetFor(TaskCategory::Other); } return GetCurrentSerialEventTarget(); } void AudioContext::DisconnectFromOwner() { mIsDisconnecting = true; OnWindowDestroy(); DOMEventTargetHelper::DisconnectFromOwner(); } void AudioContext::OnWindowDestroy() { // Avoid resend the Telemetry data. if (!mIsShutDown) { MaybeUpdateAutoplayTelemetryWhenShutdown(); } mIsShutDown = true; CloseInternal(nullptr, AudioContextOperationFlags::None); // We don't want to touch promises if the global is going away soon. if (!mIsDisconnecting) { for (auto p : mPromiseGripArray) { p->MaybeRejectWithInvalidStateError("Navigated away from page"); } mPromiseGripArray.Clear(); for (const auto& p : mPendingResumePromises) { p->MaybeRejectWithInvalidStateError("Navigated away from page"); } mPendingResumePromises.Clear(); } // On process shutdown, the MTG thread shuts down before the destination // track is destroyed, but AudioWorklet needs to release objects on the MTG // thread. AudioContext::Shutdown() is invoked on processing the // PBrowser::Destroy() message before xpcom shutdown begins. ShutdownWorklet(); if (mDestination) { // We can destroy the MediaTrackGraph at this point. // Although there may be other clients using the graph, this graph is used // only for clients in the same window and this window is going away. // This will also interrupt any worklet script still running on the graph // thread. Graph()->ForceShutDown(); // AudioDestinationNodes on rendering offline contexts have a // self-reference which needs removal. if (mIsOffline) { mDestination->OfflineShutdown(); } } } /* This runnable allows to fire the "statechange" event */ class OnStateChangeTask final : public Runnable { public: explicit OnStateChangeTask(AudioContext* aAudioContext) : Runnable("dom::OnStateChangeTask"), mAudioContext(aAudioContext) {} NS_IMETHODIMP Run() override { nsPIDOMWindowInner* parent = mAudioContext->GetParentObject(); if (!parent) { return NS_ERROR_FAILURE; } Document* doc = parent->GetExtantDoc(); if (!doc) { return NS_ERROR_FAILURE; } return nsContentUtils::DispatchTrustedEvent( doc, static_cast(mAudioContext), u"statechange"_ns, CanBubble::eNo, Cancelable::eNo); } private: RefPtr mAudioContext; }; void AudioContext::Dispatch(already_AddRefed&& aRunnable) { MOZ_ASSERT(NS_IsMainThread()); nsCOMPtr parentObject = do_QueryInterface(GetParentObject()); // It can happen that this runnable took a long time to reach the main thread, // and the global is not valid anymore. if (parentObject) { parentObject->AbstractMainThreadFor(TaskCategory::Other) ->Dispatch(move(aRunnable)); } else { RefPtr runnable(aRunnable); runnable = nullptr; } } void AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) { MOZ_ASSERT(NS_IsMainThread()); if (mAudioContextState == AudioContextState::Closed) { fprintf(stderr, "Invalid transition: mAudioContextState: %d -> aNewState %d\n", static_cast(mAudioContextState), static_cast(aNewState)); MOZ_ASSERT(false); } if (aPromise) { Promise* promise = reinterpret_cast(aPromise); // It is possible for the promise to have been removed from // mPromiseGripArray if the cycle collector has severed our connections. DO // NOT dereference the promise pointer in that case since it may point to // already freed memory. if (mPromiseGripArray.Contains(promise)) { promise->MaybeResolveWithUndefined(); DebugOnly rv = mPromiseGripArray.RemoveElement(promise); MOZ_ASSERT(rv, "Promise wasn't in the grip array?"); } } // Resolve all pending promises once the audio context has been allowed to // start. if (aNewState == AudioContextState::Running) { for (const auto& p : mPendingResumePromises) { p->MaybeResolveWithUndefined(); } mPendingResumePromises.Clear(); } if (mAudioContextState != aNewState) { RefPtr task = new OnStateChangeTask(this); Dispatch(task.forget()); } mAudioContextState = aNewState; Destination()->NotifyAudioContextStateChanged(); } nsTArray> AudioContext::GetAllTracks() const { nsTArray> tracks; for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) { AudioNode* node = iter.Get()->GetKey(); mozilla::MediaTrack* t = node->GetTrack(); if (t) { tracks.AppendElement(t); } // Add the tracks of AudioParam. const nsTArray>& audioParams = node->GetAudioParams(); if (!audioParams.IsEmpty()) { for (auto& param : audioParams) { t = param->GetTrack(); if (t && !tracks.Contains(t)) { tracks.AppendElement(t); } } } } return tracks; } already_AddRefed AudioContext::Suspend(ErrorResult& aRv) { RefPtr promise = CreatePromise(aRv); if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) { return promise.forget(); } if (mIsOffline) { // XXXbz This is not reachable, since we don't implement this // method on OfflineAudioContext at all! promise->MaybeRejectWithNotSupportedError( "Can't suspend OfflineAudioContext yet"); return promise.forget(); } if (mCloseCalled) { promise->MaybeRejectWithInvalidStateError( "Can't suspend if the control thread state is \"closed\""); return promise.forget(); } mSuspendedByContent = true; mPromiseGripArray.AppendElement(promise); SuspendInternal(promise, AudioContextOperationFlags::SendStateChange); return promise.forget(); } void AudioContext::SuspendFromChrome() { if (mIsOffline || mIsShutDown) { return; } SuspendInternal(nullptr, Preferences::GetBool("dom.audiocontext.testing") ? AudioContextOperationFlags::SendStateChange : AudioContextOperationFlags::None); } void AudioContext::SuspendInternal(void* aPromise, AudioContextOperationFlags aFlags) { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(!mIsOffline); Destination()->Suspend(); nsTArray> tracks; // If mSuspendCalled is true then we already suspended all our tracks, // so don't suspend them again (since suspend(); suspend(); resume(); should // cancel both suspends). But we still need to do ApplyAudioContextOperation // to ensure our new promise is resolved. if (!mSuspendCalled) { tracks = GetAllTracks(); } auto promise = Graph()->ApplyAudioContextOperation( DestinationTrack(), move(tracks), AudioContextOperation::Suspend); if ((aFlags & AudioContextOperationFlags::SendStateChange)) { promise->Then( GetMainThread(), "AudioContext::OnStateChanged", [self = RefPtr(this), aPromise](AudioContextState aNewState) { self->OnStateChanged(aPromise, aNewState); }, [] { MOZ_CRASH("Unexpected rejection"); }); } mSuspendCalled = true; } void AudioContext::ResumeFromChrome() { if (mIsOffline || mIsShutDown) { return; } ResumeInternal(Preferences::GetBool("dom.audiocontext.testing") ? AudioContextOperationFlags::SendStateChange : AudioContextOperationFlags::None); } already_AddRefed AudioContext::Resume(ErrorResult& aRv) { RefPtr promise = CreatePromise(aRv); if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) { return promise.forget(); } if (mIsOffline) { promise->MaybeRejectWithNotSupportedError( "Can't resume OfflineAudioContext"); return promise.forget(); } if (mCloseCalled) { promise->MaybeRejectWithInvalidStateError( "Can't resume if the control thread state is \"closed\""); return promise.forget(); } mSuspendedByContent = false; mPendingResumePromises.AppendElement(promise); const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this); AUTOPLAY_LOG("Trying to resume AudioContext %p, IsAllowedToPlay=%d", this, isAllowedToPlay); if (isAllowedToPlay) { ResumeInternal(AudioContextOperationFlags::SendStateChange); } else { ReportBlocked(); } MaybeUpdateAutoplayTelemetry(); return promise.forget(); } void AudioContext::ResumeInternal(AudioContextOperationFlags aFlags) { MOZ_ASSERT(!mIsOffline); AUTOPLAY_LOG("Allow to resume AudioContext %p", this); mWasAllowedToStart = true; Destination()->Resume(); nsTArray> tracks; // If mSuspendCalled is false then we already resumed all our tracks, // so don't resume them again (since suspend(); resume(); resume(); should // be OK). But we still need to do ApplyAudioContextOperation // to ensure our new promise is resolved. if (mSuspendCalled) { tracks = GetAllTracks(); } auto promise = Graph()->ApplyAudioContextOperation( DestinationTrack(), move(tracks), AudioContextOperation::Resume); if (aFlags & AudioContextOperationFlags::SendStateChange) { promise->Then( GetMainThread(), "AudioContext::OnStateChanged", [self = RefPtr(this)](AudioContextState aNewState) { self->OnStateChanged(nullptr, aNewState); }, [] {}); // Promise may be rejected after graph shutdown. } mSuspendCalled = false; } void AudioContext::UpdateAutoplayAssumptionStatus() { if (AutoplayPolicyTelemetryUtils::WouldBeAllowedToPlayIfAutoplayDisabled( *this)) { mWasEverAllowedToStart |= true; mWouldBeAllowedToStart = true; } else { mWasEverBlockedToStart |= true; mWouldBeAllowedToStart = false; } } void AudioContext::MaybeUpdateAutoplayTelemetry() { // Exclude offline AudioContext because it's always allowed to start. if (mIsOffline) { return; } if (AutoplayPolicyTelemetryUtils::WouldBeAllowedToPlayIfAutoplayDisabled( *this) && !mWouldBeAllowedToStart) { AccumulateCategorical( mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::AllowedAfterBlocked); } UpdateAutoplayAssumptionStatus(); } void AudioContext::MaybeUpdateAutoplayTelemetryWhenShutdown() { // Exclude offline AudioContext because it's always allowed to start. if (mIsOffline) { return; } if (mWasEverAllowedToStart && !mWasEverBlockedToStart) { AccumulateCategorical( mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::NeverBlocked); } else if (!mWasEverAllowedToStart && mWasEverBlockedToStart) { AccumulateCategorical( mozilla::Telemetry::LABELS_WEB_AUDIO_AUTOPLAY::NeverAllowed); } } void AudioContext::ReportBlocked() { ReportToConsole(nsIScriptError::warningFlag, "BlockAutoplayWebAudioStartError"); mWasAllowedToStart = false; if (!StaticPrefs::media_autoplay_block_event_enabled()) { return; } RefPtr self = this; RefPtr r = NS_NewRunnableFunction("AudioContext::AutoplayBlocked", [self]() { nsPIDOMWindowInner* parent = self->GetParentObject(); if (!parent) { return; } Document* doc = parent->GetExtantDoc(); if (!doc) { return; } AUTOPLAY_LOG("Dispatch `blocked` event for AudioContext %p", self.get()); nsContentUtils::DispatchTrustedEvent( doc, static_cast(self), u"blocked"_ns, CanBubble::eNo, Cancelable::eNo); }); Dispatch(r.forget()); } already_AddRefed AudioContext::Close(ErrorResult& aRv) { RefPtr promise = CreatePromise(aRv); if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) { return promise.forget(); } if (mIsOffline) { // XXXbz This is not reachable, since we don't implement this // method on OfflineAudioContext at all! promise->MaybeRejectWithNotSupportedError( "Can't close OfflineAudioContext yet"); return promise.forget(); } if (mCloseCalled) { promise->MaybeRejectWithInvalidStateError( "Can't close an AudioContext twice"); return promise.forget(); } mPromiseGripArray.AppendElement(promise); CloseInternal(promise, AudioContextOperationFlags::SendStateChange); return promise.forget(); } void AudioContext::OfflineClose() { CloseInternal(nullptr, AudioContextOperationFlags::None); } void AudioContext::CloseInternal(void* aPromise, AudioContextOperationFlags aFlags) { // This can be called when freeing a document, and the tracks are dead at // this point, so we need extra null-checks. AudioNodeTrack* ds = DestinationTrack(); if (ds && !mIsOffline) { Destination()->Close(); nsTArray> tracks; // If mSuspendCalled or mCloseCalled are true then we already suspended // all our tracks, so don't suspend them again. But we still need to do // ApplyAudioContextOperation to ensure our new promise is resolved. if (!mSuspendCalled && !mCloseCalled) { tracks = GetAllTracks(); } auto promise = Graph()->ApplyAudioContextOperation( ds, move(tracks), AudioContextOperation::Close); if ((aFlags & AudioContextOperationFlags::SendStateChange)) { promise->Then( GetMainThread(), "AudioContext::OnStateChanged", [self = RefPtr(this), aPromise](AudioContextState aNewState) { self->OnStateChanged(aPromise, aNewState); }, [] {}); // Promise may be rejected after graph shutdown. } } mCloseCalled = true; // Release references to active nodes. // Active AudioNodes don't unregister in destructors, at which point the // Node is already unregistered. mActiveNodes.Clear(); } void AudioContext::RegisterNode(AudioNode* aNode) { MOZ_ASSERT(!mAllNodes.Contains(aNode)); mAllNodes.PutEntry(aNode); } void AudioContext::UnregisterNode(AudioNode* aNode) { MOZ_ASSERT(mAllNodes.Contains(aNode)); mAllNodes.RemoveEntry(aNode); } already_AddRefed AudioContext::StartRendering(ErrorResult& aRv) { MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext"); RefPtr promise = CreatePromise(aRv); if (aRv.Failed() || promise->State() == Promise::PromiseState::Rejected) { return promise.forget(); } if (mIsStarted) { aRv.ThrowInvalidStateError("Rendering already started"); return nullptr; } mIsStarted = true; mDestination->StartRendering(promise); OnStateChanged(nullptr, AudioContextState::Running); return promise.forget(); } unsigned long AudioContext::Length() { MOZ_ASSERT(mIsOffline); return mDestination->Length(); } void AudioContext::Mute() const { MOZ_ASSERT(!mIsOffline); if (mDestination) { mDestination->Mute(); } } void AudioContext::Unmute() const { MOZ_ASSERT(!mIsOffline); if (mDestination) { mDestination->Unmute(); } } void AudioContext::SetParamMapForWorkletName( const nsAString& aName, AudioParamDescriptorMap* aParamMap) { MOZ_ASSERT(!mWorkletParamDescriptors.GetValue(aName)); Unused << mWorkletParamDescriptors.Put(aName, move(*aParamMap), fallible); } size_t AudioContext::SizeOfIncludingThis( mozilla::MallocSizeOf aMallocSizeOf) const { // AudioNodes are tracked separately because we do not want the AudioContext // to track all of the AudioNodes it creates, so we wouldn't be able to // traverse them from here. size_t amount = aMallocSizeOf(this); if (mListener) { amount += mListener->SizeOfIncludingThis(aMallocSizeOf); } amount += mDecodeJobs.ShallowSizeOfExcludingThis(aMallocSizeOf); for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) { amount += mDecodeJobs[i]->SizeOfIncludingThis(aMallocSizeOf); } amount += mActiveNodes.ShallowSizeOfExcludingThis(aMallocSizeOf); return amount; } NS_IMETHODIMP AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData, bool aAnonymize) { const nsLiteralCString nodeDescription( "Memory used by AudioNode DOM objects (Web Audio)."); for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) { AudioNode* node = iter.Get()->GetKey(); int64_t amount = node->SizeOfIncludingThis(MallocSizeOf); nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes", node->NodeType()); aHandleReport->Callback(""_ns, domNodePath, KIND_HEAP, UNITS_BYTES, amount, nodeDescription, aData); } int64_t amount = SizeOfIncludingThis(MallocSizeOf); MOZ_COLLECT_REPORT("explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES, amount, "Memory used by AudioContext objects (Web Audio)."); return NS_OK; } BasicWaveFormCache* AudioContext::GetBasicWaveFormCache() { MOZ_ASSERT(NS_IsMainThread()); if (!mBasicWaveFormCache) { mBasicWaveFormCache = new BasicWaveFormCache(SampleRate()); } return mBasicWaveFormCache; } void AudioContext::ReportToConsole(uint32_t aErrorFlags, const char* aMsg) const { MOZ_ASSERT(aMsg); Document* doc = GetParentObject() ? GetParentObject()->GetExtantDoc() : nullptr; nsContentUtils::ReportToConsole(aErrorFlags, "Media"_ns, doc, nsContentUtils::eDOM_PROPERTIES, aMsg); } BasicWaveFormCache::BasicWaveFormCache(uint32_t aSampleRate) : mSampleRate(aSampleRate) { MOZ_ASSERT(NS_IsMainThread()); } BasicWaveFormCache::~BasicWaveFormCache() = default; WebCore::PeriodicWave* BasicWaveFormCache::GetBasicWaveForm( OscillatorType aType) { MOZ_ASSERT(!NS_IsMainThread()); if (aType == OscillatorType::Sawtooth) { if (!mSawtooth) { mSawtooth = WebCore::PeriodicWave::createSawtooth(mSampleRate); } return mSawtooth; } if (aType == OscillatorType::Square) { if (!mSquare) { mSquare = WebCore::PeriodicWave::createSquare(mSampleRate); } return mSquare; } if (aType == OscillatorType::Triangle) { if (!mTriangle) { mTriangle = WebCore::PeriodicWave::createTriangle(mSampleRate); } return mTriangle; } MOZ_ASSERT(false, "Not reached"); return nullptr; } } // namespace mozilla::dom