summaryrefslogtreecommitdiffstats
path: root/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:42 +0000
commitda4c7e7ed675c3bf405668739c3012d140856109 (patch)
treecdd868dba063fecba609a1d819de271f0d51b23e /dom/media/webrtc/MediaEngineWebRTCAudio.cpp
parentAdding upstream version 125.0.3. (diff)
downloadfirefox-da4c7e7ed675c3bf405668739c3012d140856109.tar.xz
firefox-da4c7e7ed675c3bf405668739c3012d140856109.zip
Adding upstream version 126.0.upstream/126.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/media/webrtc/MediaEngineWebRTCAudio.cpp')
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.cpp279
1 files changed, 160 insertions, 119 deletions
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
index 9d778d411d..220dcf3bd8 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -20,6 +20,7 @@
#include "mozilla/Sprintf.h"
#include "mozilla/Logging.h"
+#include "api/audio/echo_canceller3_factory.h"
#include "common_audio/include/audio_util.h"
#include "modules/audio_processing/include/audio_processing.h"
@@ -146,22 +147,17 @@ nsresult MediaEngineWebRTCMicrophoneSource::Reconfigure(
return NS_OK;
}
-void MediaEngineWebRTCMicrophoneSource::ApplySettings(
+AudioProcessing::Config AudioInputProcessing::ConfigForPrefs(
const MediaEnginePrefs& aPrefs) {
- AssertIsOnOwningThread();
-
- TRACE("ApplySettings");
- MOZ_ASSERT(
- mTrack,
- "ApplySetting is to be called only after SetTrack has been called");
+ AudioProcessing::Config config;
- mAudioProcessingConfig.pipeline.multi_channel_render = true;
- mAudioProcessingConfig.pipeline.multi_channel_capture = true;
+ config.pipeline.multi_channel_render = true;
+ config.pipeline.multi_channel_capture = true;
- mAudioProcessingConfig.echo_canceller.enabled = aPrefs.mAecOn;
- mAudioProcessingConfig.echo_canceller.mobile_mode = aPrefs.mUseAecMobile;
+ config.echo_canceller.enabled = aPrefs.mAecOn;
+ config.echo_canceller.mobile_mode = aPrefs.mUseAecMobile;
- if ((mAudioProcessingConfig.gain_controller1.enabled =
+ if ((config.gain_controller1.enabled =
aPrefs.mAgcOn && !aPrefs.mAgc2Forced)) {
auto mode = static_cast<AudioProcessing::Config::GainController1::Mode>(
aPrefs.mAgc);
@@ -169,7 +165,7 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
mode != AudioProcessing::Config::GainController1::kAdaptiveDigital &&
mode != AudioProcessing::Config::GainController1::kFixedDigital) {
LOG_ERROR("AudioInputProcessing %p Attempt to set invalid AGC mode %d",
- mInputProcessing.get(), static_cast<int>(mode));
+ this, static_cast<int>(mode));
mode = AudioProcessing::Config::GainController1::kAdaptiveDigital;
}
#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
@@ -177,20 +173,20 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
LOG_ERROR(
"AudioInputProcessing %p Invalid AGC mode kAdaptiveAnalog on "
"mobile",
- mInputProcessing.get());
+ this);
MOZ_ASSERT_UNREACHABLE(
"Bad pref set in all.js or in about:config"
" for the auto gain, on mobile.");
mode = AudioProcessing::Config::GainController1::kFixedDigital;
}
#endif
- mAudioProcessingConfig.gain_controller1.mode = mode;
+ config.gain_controller1.mode = mode;
}
- mAudioProcessingConfig.gain_controller2.enabled =
- mAudioProcessingConfig.gain_controller2.adaptive_digital.enabled =
+ config.gain_controller2.enabled =
+ config.gain_controller2.adaptive_digital.enabled =
aPrefs.mAgcOn && aPrefs.mAgc2Forced;
- if ((mAudioProcessingConfig.noise_suppression.enabled = aPrefs.mNoiseOn)) {
+ if ((config.noise_suppression.enabled = aPrefs.mNoiseOn)) {
auto level = static_cast<AudioProcessing::Config::NoiseSuppression::Level>(
aPrefs.mNoise);
if (level != AudioProcessing::Config::NoiseSuppression::kLow &&
@@ -200,49 +196,44 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
LOG_ERROR(
"AudioInputProcessing %p Attempt to set invalid noise suppression "
"level %d",
- mInputProcessing.get(), static_cast<int>(level));
+ this, static_cast<int>(level));
level = AudioProcessing::Config::NoiseSuppression::kModerate;
}
- mAudioProcessingConfig.noise_suppression.level = level;
+ config.noise_suppression.level = level;
}
- mAudioProcessingConfig.transient_suppression.enabled = aPrefs.mTransientOn;
+ config.transient_suppression.enabled = aPrefs.mTransientOn;
+
+ config.high_pass_filter.enabled = aPrefs.mHPFOn;
- mAudioProcessingConfig.high_pass_filter.enabled = aPrefs.mHPFOn;
+ return config;
+}
+
+void MediaEngineWebRTCMicrophoneSource::ApplySettings(
+ const MediaEnginePrefs& aPrefs) {
+ AssertIsOnOwningThread();
+
+ TRACE("ApplySettings");
+ MOZ_ASSERT(
+ mTrack,
+ "ApplySetting is to be called only after SetTrack has been called");
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
NS_DispatchToMainThread(NS_NewRunnableFunction(
- __func__, [this, that, deviceID, track = mTrack, prefs = aPrefs,
- audioProcessingConfig = mAudioProcessingConfig] {
+ __func__, [this, that, deviceID, track = mTrack, prefs = aPrefs] {
mSettings->mEchoCancellation.Value() = prefs.mAecOn;
mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
mSettings->mChannelCount.Value() = prefs.mChannels;
- // The high-pass filter is not taken into account when activating the
- // pass through, since it's not controllable from content.
- bool passThrough = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
-
if (track->IsDestroyed()) {
return;
}
track->QueueControlMessageWithNoShutdown(
- [track, deviceID, inputProcessing = mInputProcessing,
- audioProcessingConfig, passThrough,
- requestedInputChannelCount = prefs.mChannels] {
- inputProcessing->ApplyConfig(track->Graph(),
- audioProcessingConfig);
- {
- TRACE("SetRequestedInputChannelCount");
- inputProcessing->SetRequestedInputChannelCount(
- track->Graph(), deviceID, requestedInputChannelCount);
- }
- {
- TRACE("SetPassThrough");
- inputProcessing->SetPassThrough(track->Graph(), passThrough);
- }
+ [track, deviceID, prefs, inputProcessing = mInputProcessing] {
+ inputProcessing->ApplySettings(track->Graph(), deviceID, prefs);
});
}));
}
@@ -408,57 +399,51 @@ void MediaEngineWebRTCMicrophoneSource::GetSettings(
}
AudioInputProcessing::AudioInputProcessing(uint32_t aMaxChannelCount)
- : mAudioProcessing(AudioProcessingBuilder().Create().release()),
- mRequestedInputChannelCount(aMaxChannelCount),
- mSkipProcessing(false),
- mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
+ : mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
mEnabled(false),
mEnded(false),
- mPacketCount(0) {}
+ mPacketCount(0) {
+ mSettings.mChannels = static_cast<int32_t>(std::min<uint32_t>(
+ std::numeric_limits<int32_t>::max(), aMaxChannelCount));
+}
void AudioInputProcessing::Disconnect(MediaTrackGraph* aGraph) {
// This method is just for asserts.
aGraph->AssertOnGraphThread();
}
-bool AudioInputProcessing::PassThrough(MediaTrackGraph* aGraph) const {
+bool AudioInputProcessing::IsPassThrough(MediaTrackGraph* aGraph) const {
aGraph->AssertOnGraphThread();
- return mSkipProcessing;
+ // The high-pass filter is not taken into account when activating the
+ // pass through, since it's not controllable from content.
+ return !(mSettings.mAecOn || mSettings.mAgcOn || mSettings.mNoiseOn);
}
-void AudioInputProcessing::SetPassThrough(MediaTrackGraph* aGraph,
- bool aPassThrough) {
+void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
- if (aPassThrough == mSkipProcessing) {
- return;
- }
- mSkipProcessing = aPassThrough;
-
if (!mEnabled) {
MOZ_ASSERT(!mPacketizerInput);
return;
}
- if (aPassThrough) {
- // Turn on pass-through
+ if (IsPassThrough(aGraph)) {
+ // Switching to pass-through. Clear state so that it doesn't affect any
+ // future processing, if re-enabled.
ResetAudioProcessing(aGraph);
} else {
- // Turn off pass-through
+ // No longer pass-through. Processing will not use old state.
+ // Packetizer setup is deferred until needed.
MOZ_ASSERT(!mPacketizerInput);
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
}
}
uint32_t AudioInputProcessing::GetRequestedInputChannelCount() {
- return mRequestedInputChannelCount;
+ return mSettings.mChannels;
}
-void AudioInputProcessing::SetRequestedInputChannelCount(
- MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId,
- uint32_t aRequestedInputChannelCount) {
- mRequestedInputChannelCount = aRequestedInputChannelCount;
-
+void AudioInputProcessing::RequestedInputChannelCountChanged(
+ MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId) {
aGraph->ReevaluateInputDevice(aDeviceId);
}
@@ -470,12 +455,7 @@ void AudioInputProcessing::Start(MediaTrackGraph* aGraph) {
}
mEnabled = true;
- if (mSkipProcessing) {
- return;
- }
-
MOZ_ASSERT(!mPacketizerInput);
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
}
void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
@@ -487,7 +467,7 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
mEnabled = false;
- if (mSkipProcessing) {
+ if (IsPassThrough(aGraph)) {
return;
}
@@ -605,10 +585,11 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
//
// The D(N) frames of data are just forwarded from input to output without any
// processing
-void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
- GraphTime aTo, AudioSegment* aInput,
+void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
+ GraphTime aFrom, GraphTime aTo,
+ AudioSegment* aInput,
AudioSegment* aOutput) {
- aGraph->AssertOnGraphThread();
+ aTrack->AssertOnGraphThread();
MOZ_ASSERT(aFrom <= aTo);
MOZ_ASSERT(!mEnded);
@@ -617,10 +598,11 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
return;
}
+ MediaTrackGraph* graph = aTrack->Graph();
if (!mEnabled) {
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Filling %" PRId64
" frames of silence to output (disabled)",
- aGraph, aGraph->CurrentDriver(), this, need);
+ graph, graph->CurrentDriver(), this, need);
aOutput->AppendNullData(need);
return;
}
@@ -628,22 +610,20 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(aInput->GetDuration() == need,
"Wrong data length from input port source");
- if (PassThrough(aGraph)) {
+ if (IsPassThrough(graph)) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Forwarding %" PRId64
" frames of input data to output directly (PassThrough)",
- aGraph, aGraph->CurrentDriver(), this, aInput->GetDuration());
+ graph, graph->CurrentDriver(), this, aInput->GetDuration());
aOutput->AppendSegment(aInput);
return;
}
- // SetPassThrough(false) must be called before reaching here.
- MOZ_ASSERT(mPacketizerInput);
- // If mRequestedInputChannelCount is updated, create a new packetizer. No
- // need to change the pre-buffering since the rate is always the same. The
- // frames left in the packetizer would be replaced by null data and then
- // transferred to mSegment.
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
+ // If the requested input channel count is updated, create a new
+ // packetizer. No need to change the pre-buffering since the rate is always
+ // the same. The frames left in the packetizer would be replaced by null
+ // data and then transferred to mSegment.
+ EnsurePacketizer(aTrack);
// Preconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@@ -655,10 +635,10 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(mSegment.GetDuration() >= 1);
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
- PacketizeAndProcess(aGraph, *aInput);
+ PacketizeAndProcess(aTrack, *aInput);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Buffer has %" PRId64
" frames of data now, after packetizing and processing",
- aGraph, aGraph->CurrentDriver(), this, mSegment.GetDuration());
+ graph, graph->CurrentDriver(), this, mSegment.GetDuration());
// By setting pre-buffering to the number of frames of one packet, and
// because the maximum number of frames stuck in the packetizer before
@@ -669,8 +649,7 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
mSegment.RemoveLeading(need);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p moving %" PRId64
" frames of data to output, leaving %" PRId64 " frames in buffer",
- aGraph, aGraph->CurrentDriver(), this, need,
- mSegment.GetDuration());
+ graph, graph->CurrentDriver(), this, need, mSegment.GetDuration());
// Postconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@@ -680,16 +659,16 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
}
-void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
+void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack,
const AudioChunk& aChunk) {
MOZ_ASSERT(aChunk.ChannelCount() > 0);
- aGraph->AssertOnGraphThread();
+ aTrack->AssertOnGraphThread();
- if (!mEnabled || PassThrough(aGraph)) {
+ if (!mEnabled || IsPassThrough(aTrack->Graph())) {
return;
}
- TrackRate sampleRate = aGraph->GraphRate();
+ TrackRate sampleRate = aTrack->mSampleRate;
uint32_t framesPerPacket = GetPacketSize(sampleRate); // in frames
// Downmix from aChannels to MAX_CHANNELS if needed.
uint32_t channelCount =
@@ -727,6 +706,7 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
if (mOutputBufferFrameCount == framesPerPacket) {
// Have a complete packet. Analyze it.
+ EnsureAudioProcessing(aTrack);
for (uint32_t channel = 0; channel < channelCount; channel++) {
channelPtrs[channel] = &mOutputBuffer[channel * framesPerPacket];
}
@@ -743,14 +723,15 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
}
// Only called if we're not in passthrough mode
-void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
+void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
const AudioSegment& aSegment) {
- MOZ_ASSERT(!PassThrough(aGraph),
+ MediaTrackGraph* graph = aTrack->Graph();
+ MOZ_ASSERT(!IsPassThrough(graph),
"This should be bypassed when in PassThrough mode.");
MOZ_ASSERT(mEnabled);
MOZ_ASSERT(mPacketizerInput);
MOZ_ASSERT(mPacketizerInput->mPacketSize ==
- GetPacketSize(aGraph->GraphRate()));
+ GetPacketSize(aTrack->mSampleRate));
// Calculate number of the pending frames in mChunksInPacketizer.
auto pendingFrames = [&]() {
@@ -792,7 +773,7 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Packetizing %zu frames. "
"Packetizer has %u frames (enough for %u packets) now",
- aGraph, aGraph->CurrentDriver(), this, frameCount,
+ graph, graph->CurrentDriver(), this, frameCount,
mPacketizerInput->FramesAvailable(),
mPacketizerInput->PacketsAvailable());
@@ -850,9 +831,10 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
deinterleavedPacketizedInputDataChannelPointers.Elements());
}
- StreamConfig inputConfig(aGraph->GraphRate(), channelCountInput);
+ StreamConfig inputConfig(aTrack->mSampleRate, channelCountInput);
StreamConfig outputConfig = inputConfig;
+ EnsureAudioProcessing(aTrack);
// Bug 1404965: Get the right delay here, it saves some work down the line.
mAudioProcessing->set_stream_delay_ms(0);
@@ -958,7 +940,7 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
"(Graph %p, Driver %p) AudioInputProcessing %p Appending %u frames of "
"packetized audio, leaving %u frames in packetizer (%" PRId64
" frames in mChunksInPacketizer)",
- aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
+ graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
mPacketizerInput->FramesAvailable(), pendingFrames());
// Postcondition of the Principal-labelling logic.
@@ -971,17 +953,36 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
// Reset some processing
- mAudioProcessing->Initialize();
+ if (mAudioProcessing) {
+ mAudioProcessing->Initialize();
+ }
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Reinitializing audio "
"processing",
aGraph, aGraph->CurrentDriver(), this);
}
-void AudioInputProcessing::ApplyConfig(MediaTrackGraph* aGraph,
- const AudioProcessing::Config& aConfig) {
+void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceID,
+ const MediaEnginePrefs& aSettings) {
+ TRACE("AudioInputProcessing::ApplySettings");
aGraph->AssertOnGraphThread();
- mAudioProcessing->ApplyConfig(aConfig);
+
+ // Read previous state from mSettings.
+ uint32_t oldChannelCount = GetRequestedInputChannelCount();
+ bool wasPassThrough = IsPassThrough(aGraph);
+
+ mSettings = aSettings;
+ if (mAudioProcessing) {
+ mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
+ }
+
+ if (oldChannelCount != GetRequestedInputChannelCount()) {
+ RequestedInputChannelCountChanged(aGraph, aDeviceID);
+ }
+ if (wasPassThrough != IsPassThrough(aGraph)) {
+ PassThroughChanged(aGraph);
+ }
}
void AudioInputProcessing::End() {
@@ -995,14 +996,15 @@ TrackTime AudioInputProcessing::NumBufferedFrames(
return mSegment.GetDuration();
}
-void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
- uint32_t aChannels) {
- aGraph->AssertOnGraphThread();
- MOZ_ASSERT(aChannels > 0);
+void AudioInputProcessing::EnsurePacketizer(AudioProcessingTrack* aTrack) {
+ aTrack->AssertOnGraphThread();
MOZ_ASSERT(mEnabled);
- MOZ_ASSERT(!mSkipProcessing);
+ MediaTrackGraph* graph = aTrack->Graph();
+ MOZ_ASSERT(!IsPassThrough(graph));
- if (mPacketizerInput && mPacketizerInput->mChannels == aChannels) {
+ uint32_t channelCount = GetRequestedInputChannelCount();
+ MOZ_ASSERT(channelCount > 0);
+ if (mPacketizerInput && mPacketizerInput->mChannels == channelCount) {
return;
}
@@ -1010,7 +1012,7 @@ void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
// need to change pre-buffering since the packet size is the same as the old
// one, since the rate is a constant.
MOZ_ASSERT_IF(mPacketizerInput, mPacketizerInput->mPacketSize ==
- GetPacketSize(aGraph->GraphRate()));
+ GetPacketSize(aTrack->mSampleRate));
bool needPreBuffering = !mPacketizerInput;
if (mPacketizerInput) {
const TrackTime numBufferedFrames =
@@ -1020,24 +1022,62 @@ void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
mChunksInPacketizer.clear();
}
- mPacketizerInput.emplace(GetPacketSize(aGraph->GraphRate()), aChannels);
+ mPacketizerInput.emplace(GetPacketSize(aTrack->mSampleRate), channelCount);
if (needPreBuffering) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p: Adding %u frames of "
"silence as pre-buffering",
- aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
+ graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
AudioSegment buffering;
buffering.AppendNullData(
static_cast<TrackTime>(mPacketizerInput->mPacketSize));
- PacketizeAndProcess(aGraph, buffering);
+ PacketizeAndProcess(aTrack, buffering);
+ }
+}
+
+void AudioInputProcessing::EnsureAudioProcessing(AudioProcessingTrack* aTrack) {
+ aTrack->AssertOnGraphThread();
+
+ MediaTrackGraph* graph = aTrack->Graph();
+ // If the AEC might need to deal with drift then inform it of this and it
+ // will be less conservative about echo suppression. This can lead to some
+ // suppression of non-echo signal, so do this only when drift is expected.
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=11985#c2
+ bool haveAECAndDrift = mSettings.mAecOn;
+ if (haveAECAndDrift) {
+ if (mSettings.mExpectDrift < 0) {
+ haveAECAndDrift =
+ graph->OutputForAECMightDrift() ||
+ aTrack->GetDeviceInputTrackGraphThread()->AsNonNativeInputTrack();
+ } else {
+ haveAECAndDrift = mSettings.mExpectDrift > 0;
+ }
+ }
+ if (!mAudioProcessing || haveAECAndDrift != mHadAECAndDrift) {
+ TRACE("AudioProcessing creation");
+ LOG("Track %p AudioInputProcessing %p creating AudioProcessing. "
+ "aec+drift: %s",
+ aTrack, this, haveAECAndDrift ? "Y" : "N");
+ mHadAECAndDrift = haveAECAndDrift;
+ AudioProcessingBuilder builder;
+ builder.SetConfig(ConfigForPrefs(mSettings));
+ if (haveAECAndDrift) {
+ // Setting an EchoControlFactory always enables AEC, overriding
+ // Config::echo_canceller.enabled, so do this only when AEC is enabled.
+ EchoCanceller3Config aec3Config;
+ aec3Config.echo_removal_control.has_clock_drift = true;
+ builder.SetEchoControlFactory(
+ std::make_unique<EchoCanceller3Factory>(aec3Config));
+ }
+ mAudioProcessing.reset(builder.Create().release());
}
}
void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
- MOZ_ASSERT(mSkipProcessing || !mEnabled);
+ MOZ_ASSERT(IsPassThrough(aGraph) || !mEnabled);
MOZ_ASSERT(mPacketizerInput);
LOG_FRAME(
@@ -1047,7 +1087,9 @@ void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
// Reset AudioProcessing so that if we resume processing in the future it
// doesn't depend on old state.
- mAudioProcessing->Initialize();
+ if (mAudioProcessing) {
+ mAudioProcessing->Initialize();
+ }
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
mPacketizerInput->FramesAvailable() ==
@@ -1124,9 +1166,8 @@ void AudioProcessingTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
} else {
MOZ_ASSERT(mInputs.Length() == 1);
AudioSegment data;
- DeviceInputConsumerTrack::GetInputSourceData(data, mInputs[0], aFrom,
- aTo);
- mInputProcessing->Process(Graph(), aFrom, aTo, &data,
+ DeviceInputConsumerTrack::GetInputSourceData(data, aFrom, aTo);
+ mInputProcessing->Process(this, aFrom, aTo, &data,
GetData<AudioSegment>());
}
MOZ_ASSERT(TrackTimeToGraphTime(GetEnd()) == aTo);
@@ -1142,7 +1183,7 @@ void AudioProcessingTrack::NotifyOutputData(MediaTrackGraph* aGraph,
MOZ_ASSERT(mGraph == aGraph, "Cannot feed audio output to another graph");
AssertOnGraphThread();
if (mInputProcessing) {
- mInputProcessing->ProcessOutputData(aGraph, aChunk);
+ mInputProcessing->ProcessOutputData(this, aChunk);
}
}