diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-12 05:35:29 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-12 05:35:29 +0000 |
commit | 59203c63bb777a3bacec32fb8830fba33540e809 (patch) | |
tree | 58298e711c0ff0575818c30485b44a2f21bf28a0 /dom/media/webrtc/MediaEngineWebRTCAudio.cpp | |
parent | Adding upstream version 126.0.1. (diff) | |
download | firefox-59203c63bb777a3bacec32fb8830fba33540e809.tar.xz firefox-59203c63bb777a3bacec32fb8830fba33540e809.zip |
Adding upstream version 127.0.upstream/127.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/media/webrtc/MediaEngineWebRTCAudio.cpp')
-rw-r--r-- | dom/media/webrtc/MediaEngineWebRTCAudio.cpp | 125 |
1 files changed, 115 insertions, 10 deletions
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp index 220dcf3bd8..c072717e00 100644 --- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -148,7 +148,7 @@ nsresult MediaEngineWebRTCMicrophoneSource::Reconfigure( } AudioProcessing::Config AudioInputProcessing::ConfigForPrefs( - const MediaEnginePrefs& aPrefs) { + const MediaEnginePrefs& aPrefs) const { AudioProcessing::Config config; config.pipeline.multi_channel_render = true; @@ -207,6 +207,19 @@ AudioProcessing::Config AudioInputProcessing::ConfigForPrefs( config.high_pass_filter.enabled = aPrefs.mHPFOn; + if (mPlatformProcessingSetParams & + CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION) { + config.echo_canceller.enabled = false; + } + if (mPlatformProcessingSetParams & + CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL) { + config.gain_controller1.enabled = config.gain_controller2.enabled = false; + } + if (mPlatformProcessingSetParams & + CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION) { + config.noise_suppression.enabled = false; + } + return config; } @@ -412,11 +425,45 @@ void AudioInputProcessing::Disconnect(MediaTrackGraph* aGraph) { aGraph->AssertOnGraphThread(); } +void AudioInputProcessing::NotifySetRequestedInputProcessingParamsResult( + MediaTrackGraph* aGraph, cubeb_input_processing_params aRequestedParams, + const Result<cubeb_input_processing_params, int>& aResult) { + aGraph->AssertOnGraphThread(); + if (aRequestedParams != RequestedInputProcessingParams(aGraph)) { + // This is a result from an old request, wait for a more recent one. + return; + } + if (aResult.isOk()) { + if (mPlatformProcessingSetParams == aResult.inspect()) { + // No change. + return; + } + mPlatformProcessingSetError = Nothing(); + mPlatformProcessingSetParams = aResult.inspect(); + LOG("AudioInputProcessing %p platform processing params are now %s.", this, + CubebUtils::ProcessingParamsToString(mPlatformProcessingSetParams) + .get()); + } else { + mPlatformProcessingSetError = Some(aResult.inspectErr()); + mPlatformProcessingSetParams = CUBEB_INPUT_PROCESSING_PARAM_NONE; + LOG("AudioInputProcessing %p platform processing params failed to apply. " + "Applying input processing config in libwebrtc.", + this); + } + ApplySettingsInternal(aGraph, mSettings); +} + bool AudioInputProcessing::IsPassThrough(MediaTrackGraph* aGraph) const { aGraph->AssertOnGraphThread(); // The high-pass filter is not taken into account when activating the // pass through, since it's not controllable from content. - return !(mSettings.mAecOn || mSettings.mAgcOn || mSettings.mNoiseOn); + auto config = AppliedConfig(aGraph); + auto aec = [](const auto& config) { return config.echo_canceller.enabled; }; + auto agc = [](const auto& config) { + return config.gain_controller1.enabled || config.gain_controller2.enabled; + }; + auto ns = [](const auto& config) { return config.noise_suppression.enabled; }; + return !(aec(config) || agc(config) || ns(config)); } void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) { @@ -438,7 +485,7 @@ void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) { } } -uint32_t AudioInputProcessing::GetRequestedInputChannelCount() { +uint32_t AudioInputProcessing::GetRequestedInputChannelCount() const { return mSettings.mChannels; } @@ -668,6 +715,10 @@ void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack, return; } + if (aChunk.mDuration == 0) { + return; + } + TrackRate sampleRate = aTrack->mSampleRate; uint32_t framesPerPacket = GetPacketSize(sampleRate); // in frames // Downmix from aChannels to MAX_CHANNELS if needed. @@ -868,6 +919,7 @@ void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack, !(mPacketCount % 50)) { AudioProcessingStats stats = mAudioProcessing->GetStatistics(); char msg[1024]; + msg[0] = '\0'; size_t offset = 0; #define AddIfValue(format, member) \ if (stats.member.has_value()) { \ @@ -962,14 +1014,61 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraph* aGraph) { aGraph, aGraph->CurrentDriver(), this); } +cubeb_input_processing_params +AudioInputProcessing::RequestedInputProcessingParams( + MediaTrackGraph* aGraph) const { + aGraph->AssertOnGraphThread(); + if (!mPlatformProcessingEnabled) { + return CUBEB_INPUT_PROCESSING_PARAM_NONE; + } + if (mPlatformProcessingSetError) { + return CUBEB_INPUT_PROCESSING_PARAM_NONE; + } + cubeb_input_processing_params params = CUBEB_INPUT_PROCESSING_PARAM_NONE; + if (mSettings.mAecOn) { + params |= CUBEB_INPUT_PROCESSING_PARAM_ECHO_CANCELLATION; + } + if (mSettings.mAgcOn) { + params |= CUBEB_INPUT_PROCESSING_PARAM_AUTOMATIC_GAIN_CONTROL; + } + if (mSettings.mNoiseOn) { + params |= CUBEB_INPUT_PROCESSING_PARAM_NOISE_SUPPRESSION; + } + return params; +} + void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceID, const MediaEnginePrefs& aSettings) { TRACE("AudioInputProcessing::ApplySettings"); aGraph->AssertOnGraphThread(); + // CUBEB_ERROR_NOT_SUPPORTED means the backend does not support platform + // processing. In that case, leave the error in place so we don't request + // processing anew. + if (mPlatformProcessingSetError.valueOr(CUBEB_OK) != + CUBEB_ERROR_NOT_SUPPORTED) { + mPlatformProcessingSetError = Nothing(); + } + // Read previous state from mSettings. uint32_t oldChannelCount = GetRequestedInputChannelCount(); + + ApplySettingsInternal(aGraph, aSettings); + + if (oldChannelCount != GetRequestedInputChannelCount()) { + RequestedInputChannelCountChanged(aGraph, aDeviceID); + } +} + +void AudioInputProcessing::ApplySettingsInternal( + MediaTrackGraph* aGraph, const MediaEnginePrefs& aSettings) { + TRACE("AudioInputProcessing::ApplySettingsInternal"); + aGraph->AssertOnGraphThread(); + + mPlatformProcessingEnabled = aSettings.mUsePlatformProcessing; + + // Read previous state from the applied config. bool wasPassThrough = IsPassThrough(aGraph); mSettings = aSettings; @@ -977,14 +1076,20 @@ void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph, mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings)); } - if (oldChannelCount != GetRequestedInputChannelCount()) { - RequestedInputChannelCountChanged(aGraph, aDeviceID); - } if (wasPassThrough != IsPassThrough(aGraph)) { PassThroughChanged(aGraph); } } +webrtc::AudioProcessing::Config AudioInputProcessing::AppliedConfig( + MediaTrackGraph* aGraph) const { + aGraph->AssertOnGraphThread(); + if (mAudioProcessing) { + return mAudioProcessing->GetConfig(); + } + return ConfigForPrefs(mSettings); +} + void AudioInputProcessing::End() { mEnded = true; mSegment.Clear(); @@ -1078,7 +1183,6 @@ void AudioInputProcessing::EnsureAudioProcessing(AudioProcessingTrack* aTrack) { void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) { aGraph->AssertOnGraphThread(); MOZ_ASSERT(IsPassThrough(aGraph) || !mEnabled); - MOZ_ASSERT(mPacketizerInput); LOG_FRAME( "(Graph %p, Driver %p) AudioInputProcessing %p Resetting audio " @@ -1091,9 +1195,10 @@ void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) { mAudioProcessing->Initialize(); } - MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) + - mPacketizerInput->FramesAvailable() == - mPacketizerInput->mPacketSize); + MOZ_ASSERT_IF(mPacketizerInput, + static_cast<uint32_t>(mSegment.GetDuration()) + + mPacketizerInput->FramesAvailable() == + mPacketizerInput->mPacketSize); // It's ok to clear all the internal buffer here since we won't use mSegment // in pass-through mode or when audio processing is disabled. |