From 6bf0a5cb5034a7e684dcc3500e841785237ce2dd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 19:32:43 +0200 Subject: Adding upstream version 1:115.7.0. Signed-off-by: Daniel Baumann --- dom/media/webaudio/AudioNodeExternalInputTrack.cpp | 225 +++++++++++++++++++++ 1 file changed, 225 insertions(+) create mode 100644 dom/media/webaudio/AudioNodeExternalInputTrack.cpp (limited to 'dom/media/webaudio/AudioNodeExternalInputTrack.cpp') diff --git a/dom/media/webaudio/AudioNodeExternalInputTrack.cpp b/dom/media/webaudio/AudioNodeExternalInputTrack.cpp new file mode 100644 index 0000000000..2142752d39 --- /dev/null +++ b/dom/media/webaudio/AudioNodeExternalInputTrack.cpp @@ -0,0 +1,225 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AlignedTArray.h" +#include "AlignmentUtils.h" +#include "AudioNodeEngine.h" +#include "AudioNodeExternalInputTrack.h" +#include "AudioChannelFormat.h" +#include "mozilla/dom/MediaStreamAudioSourceNode.h" + +using namespace mozilla::dom; + +namespace mozilla { + +AudioNodeExternalInputTrack::AudioNodeExternalInputTrack( + AudioNodeEngine* aEngine, TrackRate aSampleRate) + : AudioNodeTrack(aEngine, NO_TRACK_FLAGS, aSampleRate) { + MOZ_COUNT_CTOR(AudioNodeExternalInputTrack); +} + +AudioNodeExternalInputTrack::~AudioNodeExternalInputTrack() { + MOZ_COUNT_DTOR(AudioNodeExternalInputTrack); +} + +/* static */ +already_AddRefed +AudioNodeExternalInputTrack::Create(MediaTrackGraph* aGraph, + AudioNodeEngine* aEngine) { + AudioContext* ctx = aEngine->NodeMainThread()->Context(); + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aGraph == ctx->Graph()); + + RefPtr track = + new AudioNodeExternalInputTrack(aEngine, aGraph->GraphRate()); + track->mSuspendedCount += ctx->ShouldSuspendNewTrack(); + aGraph->AddTrack(track); + return track.forget(); +} + +/** + * Copies the data in aInput to aOffsetInBlock within aBlock. + * aBlock must have been allocated with AllocateInputBlock and have a channel + * count that's a superset of the channels in aInput. + */ +template +static void CopyChunkToBlock(AudioChunk& aInput, AudioBlock* aBlock, + uint32_t aOffsetInBlock) { + uint32_t blockChannels = aBlock->ChannelCount(); + AutoTArray channels; + if (aInput.IsNull()) { + channels.SetLength(blockChannels); + PodZero(channels.Elements(), blockChannels); + } else { + const nsTArray& inputChannels = aInput.ChannelData(); + channels.SetLength(inputChannels.Length()); + PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length()); + if (channels.Length() != blockChannels) { + // We only need to upmix here because aBlock's channel count has been + // chosen to be a superset of the channel count of every chunk. + AudioChannelsUpMix(&channels, blockChannels, static_cast(nullptr)); + } + } + + for (uint32_t c = 0; c < blockChannels; ++c) { + float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock; + if (channels[c]) { + ConvertAudioSamplesWithScale(channels[c], outputData, + aInput.GetDuration(), aInput.mVolume); + } else { + PodZero(outputData, aInput.GetDuration()); + } + } +} + +/** + * Converts the data in aSegment to a single chunk aBlock. aSegment must have + * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the + * channels in every chunk of aSegment. aBlock must be float format or null. + */ +static void ConvertSegmentToAudioBlock(AudioSegment* aSegment, + AudioBlock* aBlock, + int32_t aFallbackChannelCount) { + NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, + "Bad segment duration"); + + { + AudioSegment::ChunkIterator ci(*aSegment); + NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!"); + if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE && + (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) { + bool aligned = true; + for (size_t i = 0; i < ci->mChannelData.Length(); ++i) { + if (!IS_ALIGNED16(ci->mChannelData[i])) { + aligned = false; + break; + } + } + + // Return this chunk directly to avoid copying data. + if (aligned) { + *aBlock = *ci; + return; + } + } + } + + aBlock->AllocateChannels(aFallbackChannelCount); + + uint32_t duration = 0; + for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) { + switch (ci->mBufferFormat) { + case AUDIO_FORMAT_S16: { + CopyChunkToBlock(*ci, aBlock, duration); + break; + } + case AUDIO_FORMAT_FLOAT32: { + CopyChunkToBlock(*ci, aBlock, duration); + break; + } + case AUDIO_FORMAT_SILENCE: { + // The actual type of the sample does not matter here, but we still need + // to send some audio to the graph. + CopyChunkToBlock(*ci, aBlock, duration); + break; + } + } + duration += ci->GetDuration(); + } +} + +void AudioNodeExternalInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) { + // According to spec, number of outputs is always 1. + MOZ_ASSERT(mLastChunks.Length() == 1); + + // GC stuff can result in our input track being destroyed before this track. + // Handle that. + if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) { + mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + MOZ_ASSERT(mInputs.Length() == 1); + + MediaTrack* source = mInputs[0]->GetSource(); + AutoTArray audioSegments; + uint32_t inputChannels = 0; + + MOZ_ASSERT(source->GetData()->GetType() == MediaSegment::AUDIO, + "AudioNodeExternalInputTrack shouldn't have a video input"); + + const AudioSegment& inputSegment = + *mInputs[0]->GetSource()->GetData(); + if (!inputSegment.IsNull()) { + AudioSegment& segment = *audioSegments.AppendElement(); + GraphTime next; + for (GraphTime t = aFrom; t < aTo; t = next) { + MediaInputPort::InputInterval interval = + MediaInputPort::GetNextInputInterval(mInputs[0], t); + interval.mEnd = std::min(interval.mEnd, aTo); + if (interval.mStart >= interval.mEnd) { + break; + } + next = interval.mEnd; + + // We know this track does not block during the processing interval --- + // we're not finished, we don't underrun, and we're not suspended. + TrackTime outputStart = GraphTimeToTrackTime(interval.mStart); + TrackTime outputEnd = GraphTimeToTrackTime(interval.mEnd); + TrackTime ticks = outputEnd - outputStart; + + if (interval.mInputIsBlocked) { + segment.AppendNullData(ticks); + } else { + // The input track is not blocked in this interval, so no need to call + // GraphTimeToTrackTimeWithBlocking. + TrackTime inputStart = + std::min(inputSegment.GetDuration(), + source->GraphTimeToTrackTime(interval.mStart)); + TrackTime inputEnd = + std::min(inputSegment.GetDuration(), + source->GraphTimeToTrackTime(interval.mEnd)); + + segment.AppendSlice(inputSegment, inputStart, inputEnd); + // Pad if we're looking past the end of the track + segment.AppendNullData(ticks - (inputEnd - inputStart)); + } + } + + for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); + iter.Next()) { + inputChannels = + GetAudioChannelsSuperset(inputChannels, iter->ChannelCount()); + } + } + + uint32_t accumulateIndex = 0; + if (inputChannels) { + DownmixBufferType downmixBuffer; + ASSERT_ALIGNED16(downmixBuffer.Elements()); + for (auto& audioSegment : audioSegments) { + AudioBlock tmpChunk; + ConvertSegmentToAudioBlock(&audioSegment, &tmpChunk, inputChannels); + if (!tmpChunk.IsNull()) { + if (accumulateIndex == 0) { + mLastChunks[0].AllocateChannels(inputChannels); + } + AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], + &downmixBuffer); + accumulateIndex++; + } + } + } + if (accumulateIndex == 0) { + mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); + } +} + +bool AudioNodeExternalInputTrack::IsEnabled() { + return ((MediaStreamAudioSourceNodeEngine*)Engine())->IsEnabled(); +} + +} // namespace mozilla -- cgit v1.2.3