summaryrefslogtreecommitdiffstats
path: root/dom/media/webrtc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:34:42 +0000
commitda4c7e7ed675c3bf405668739c3012d140856109 (patch)
treecdd868dba063fecba609a1d819de271f0d51b23e /dom/media/webrtc
parentAdding upstream version 125.0.3. (diff)
downloadfirefox-da4c7e7ed675c3bf405668739c3012d140856109.tar.xz
firefox-da4c7e7ed675c3bf405668739c3012d140856109.zip
Adding upstream version 126.0.upstream/126.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/media/webrtc')
-rw-r--r--dom/media/webrtc/MediaEnginePrefs.h2
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.cpp279
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.h57
-rw-r--r--dom/media/webrtc/jsapi/PeerConnectionCtx.cpp5
-rw-r--r--dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp16
-rw-r--r--dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp2
-rw-r--r--dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h2
-rw-r--r--dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp2
-rw-r--r--dom/media/webrtc/libwebrtcglue/AudioConduit.cpp2
-rw-r--r--dom/media/webrtc/libwebrtcglue/VideoConduit.cpp2
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp17
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h2
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp20
-rw-r--r--dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h2
-rw-r--r--dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html9
-rw-r--r--dom/media/webrtc/third_party_build/default_config_env20
-rw-r--r--dom/media/webrtc/third_party_build/elm_rebase.sh15
-rw-r--r--dom/media/webrtc/third_party_build/fetch_github_repo.py4
-rw-r--r--dom/media/webrtc/third_party_build/vendor-libwebrtc.py1
-rw-r--r--dom/media/webrtc/transport/test/ice_unittest.cpp19
20 files changed, 281 insertions, 197 deletions
diff --git a/dom/media/webrtc/MediaEnginePrefs.h b/dom/media/webrtc/MediaEnginePrefs.h
index cedb7f457c..de5daf0ad9 100644
--- a/dom/media/webrtc/MediaEnginePrefs.h
+++ b/dom/media/webrtc/MediaEnginePrefs.h
@@ -35,6 +35,7 @@ class MediaEnginePrefs {
mNoiseOn(false),
mTransientOn(false),
mAgc2Forced(false),
+ mExpectDrift(-1), // auto
mAgc(0),
mNoise(0),
mChannels(0) {}
@@ -50,6 +51,7 @@ class MediaEnginePrefs {
bool mNoiseOn;
bool mTransientOn;
bool mAgc2Forced;
+ int32_t mExpectDrift;
int32_t mAgc;
int32_t mNoise;
int32_t mChannels;
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
index 9d778d411d..220dcf3bd8 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -20,6 +20,7 @@
#include "mozilla/Sprintf.h"
#include "mozilla/Logging.h"
+#include "api/audio/echo_canceller3_factory.h"
#include "common_audio/include/audio_util.h"
#include "modules/audio_processing/include/audio_processing.h"
@@ -146,22 +147,17 @@ nsresult MediaEngineWebRTCMicrophoneSource::Reconfigure(
return NS_OK;
}
-void MediaEngineWebRTCMicrophoneSource::ApplySettings(
+AudioProcessing::Config AudioInputProcessing::ConfigForPrefs(
const MediaEnginePrefs& aPrefs) {
- AssertIsOnOwningThread();
-
- TRACE("ApplySettings");
- MOZ_ASSERT(
- mTrack,
- "ApplySetting is to be called only after SetTrack has been called");
+ AudioProcessing::Config config;
- mAudioProcessingConfig.pipeline.multi_channel_render = true;
- mAudioProcessingConfig.pipeline.multi_channel_capture = true;
+ config.pipeline.multi_channel_render = true;
+ config.pipeline.multi_channel_capture = true;
- mAudioProcessingConfig.echo_canceller.enabled = aPrefs.mAecOn;
- mAudioProcessingConfig.echo_canceller.mobile_mode = aPrefs.mUseAecMobile;
+ config.echo_canceller.enabled = aPrefs.mAecOn;
+ config.echo_canceller.mobile_mode = aPrefs.mUseAecMobile;
- if ((mAudioProcessingConfig.gain_controller1.enabled =
+ if ((config.gain_controller1.enabled =
aPrefs.mAgcOn && !aPrefs.mAgc2Forced)) {
auto mode = static_cast<AudioProcessing::Config::GainController1::Mode>(
aPrefs.mAgc);
@@ -169,7 +165,7 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
mode != AudioProcessing::Config::GainController1::kAdaptiveDigital &&
mode != AudioProcessing::Config::GainController1::kFixedDigital) {
LOG_ERROR("AudioInputProcessing %p Attempt to set invalid AGC mode %d",
- mInputProcessing.get(), static_cast<int>(mode));
+ this, static_cast<int>(mode));
mode = AudioProcessing::Config::GainController1::kAdaptiveDigital;
}
#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
@@ -177,20 +173,20 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
LOG_ERROR(
"AudioInputProcessing %p Invalid AGC mode kAdaptiveAnalog on "
"mobile",
- mInputProcessing.get());
+ this);
MOZ_ASSERT_UNREACHABLE(
"Bad pref set in all.js or in about:config"
" for the auto gain, on mobile.");
mode = AudioProcessing::Config::GainController1::kFixedDigital;
}
#endif
- mAudioProcessingConfig.gain_controller1.mode = mode;
+ config.gain_controller1.mode = mode;
}
- mAudioProcessingConfig.gain_controller2.enabled =
- mAudioProcessingConfig.gain_controller2.adaptive_digital.enabled =
+ config.gain_controller2.enabled =
+ config.gain_controller2.adaptive_digital.enabled =
aPrefs.mAgcOn && aPrefs.mAgc2Forced;
- if ((mAudioProcessingConfig.noise_suppression.enabled = aPrefs.mNoiseOn)) {
+ if ((config.noise_suppression.enabled = aPrefs.mNoiseOn)) {
auto level = static_cast<AudioProcessing::Config::NoiseSuppression::Level>(
aPrefs.mNoise);
if (level != AudioProcessing::Config::NoiseSuppression::kLow &&
@@ -200,49 +196,44 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
LOG_ERROR(
"AudioInputProcessing %p Attempt to set invalid noise suppression "
"level %d",
- mInputProcessing.get(), static_cast<int>(level));
+ this, static_cast<int>(level));
level = AudioProcessing::Config::NoiseSuppression::kModerate;
}
- mAudioProcessingConfig.noise_suppression.level = level;
+ config.noise_suppression.level = level;
}
- mAudioProcessingConfig.transient_suppression.enabled = aPrefs.mTransientOn;
+ config.transient_suppression.enabled = aPrefs.mTransientOn;
+
+ config.high_pass_filter.enabled = aPrefs.mHPFOn;
- mAudioProcessingConfig.high_pass_filter.enabled = aPrefs.mHPFOn;
+ return config;
+}
+
+void MediaEngineWebRTCMicrophoneSource::ApplySettings(
+ const MediaEnginePrefs& aPrefs) {
+ AssertIsOnOwningThread();
+
+ TRACE("ApplySettings");
+ MOZ_ASSERT(
+ mTrack,
+ "ApplySetting is to be called only after SetTrack has been called");
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
NS_DispatchToMainThread(NS_NewRunnableFunction(
- __func__, [this, that, deviceID, track = mTrack, prefs = aPrefs,
- audioProcessingConfig = mAudioProcessingConfig] {
+ __func__, [this, that, deviceID, track = mTrack, prefs = aPrefs] {
mSettings->mEchoCancellation.Value() = prefs.mAecOn;
mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
mSettings->mChannelCount.Value() = prefs.mChannels;
- // The high-pass filter is not taken into account when activating the
- // pass through, since it's not controllable from content.
- bool passThrough = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
-
if (track->IsDestroyed()) {
return;
}
track->QueueControlMessageWithNoShutdown(
- [track, deviceID, inputProcessing = mInputProcessing,
- audioProcessingConfig, passThrough,
- requestedInputChannelCount = prefs.mChannels] {
- inputProcessing->ApplyConfig(track->Graph(),
- audioProcessingConfig);
- {
- TRACE("SetRequestedInputChannelCount");
- inputProcessing->SetRequestedInputChannelCount(
- track->Graph(), deviceID, requestedInputChannelCount);
- }
- {
- TRACE("SetPassThrough");
- inputProcessing->SetPassThrough(track->Graph(), passThrough);
- }
+ [track, deviceID, prefs, inputProcessing = mInputProcessing] {
+ inputProcessing->ApplySettings(track->Graph(), deviceID, prefs);
});
}));
}
@@ -408,57 +399,51 @@ void MediaEngineWebRTCMicrophoneSource::GetSettings(
}
AudioInputProcessing::AudioInputProcessing(uint32_t aMaxChannelCount)
- : mAudioProcessing(AudioProcessingBuilder().Create().release()),
- mRequestedInputChannelCount(aMaxChannelCount),
- mSkipProcessing(false),
- mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
+ : mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
mEnabled(false),
mEnded(false),
- mPacketCount(0) {}
+ mPacketCount(0) {
+ mSettings.mChannels = static_cast<int32_t>(std::min<uint32_t>(
+ std::numeric_limits<int32_t>::max(), aMaxChannelCount));
+}
void AudioInputProcessing::Disconnect(MediaTrackGraph* aGraph) {
// This method is just for asserts.
aGraph->AssertOnGraphThread();
}
-bool AudioInputProcessing::PassThrough(MediaTrackGraph* aGraph) const {
+bool AudioInputProcessing::IsPassThrough(MediaTrackGraph* aGraph) const {
aGraph->AssertOnGraphThread();
- return mSkipProcessing;
+ // The high-pass filter is not taken into account when activating the
+ // pass through, since it's not controllable from content.
+ return !(mSettings.mAecOn || mSettings.mAgcOn || mSettings.mNoiseOn);
}
-void AudioInputProcessing::SetPassThrough(MediaTrackGraph* aGraph,
- bool aPassThrough) {
+void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
- if (aPassThrough == mSkipProcessing) {
- return;
- }
- mSkipProcessing = aPassThrough;
-
if (!mEnabled) {
MOZ_ASSERT(!mPacketizerInput);
return;
}
- if (aPassThrough) {
- // Turn on pass-through
+ if (IsPassThrough(aGraph)) {
+ // Switching to pass-through. Clear state so that it doesn't affect any
+ // future processing, if re-enabled.
ResetAudioProcessing(aGraph);
} else {
- // Turn off pass-through
+ // No longer pass-through. Processing will not use old state.
+ // Packetizer setup is deferred until needed.
MOZ_ASSERT(!mPacketizerInput);
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
}
}
uint32_t AudioInputProcessing::GetRequestedInputChannelCount() {
- return mRequestedInputChannelCount;
+ return mSettings.mChannels;
}
-void AudioInputProcessing::SetRequestedInputChannelCount(
- MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId,
- uint32_t aRequestedInputChannelCount) {
- mRequestedInputChannelCount = aRequestedInputChannelCount;
-
+void AudioInputProcessing::RequestedInputChannelCountChanged(
+ MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId) {
aGraph->ReevaluateInputDevice(aDeviceId);
}
@@ -470,12 +455,7 @@ void AudioInputProcessing::Start(MediaTrackGraph* aGraph) {
}
mEnabled = true;
- if (mSkipProcessing) {
- return;
- }
-
MOZ_ASSERT(!mPacketizerInput);
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
}
void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
@@ -487,7 +467,7 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
mEnabled = false;
- if (mSkipProcessing) {
+ if (IsPassThrough(aGraph)) {
return;
}
@@ -605,10 +585,11 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
//
// The D(N) frames of data are just forwarded from input to output without any
// processing
-void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
- GraphTime aTo, AudioSegment* aInput,
+void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
+ GraphTime aFrom, GraphTime aTo,
+ AudioSegment* aInput,
AudioSegment* aOutput) {
- aGraph->AssertOnGraphThread();
+ aTrack->AssertOnGraphThread();
MOZ_ASSERT(aFrom <= aTo);
MOZ_ASSERT(!mEnded);
@@ -617,10 +598,11 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
return;
}
+ MediaTrackGraph* graph = aTrack->Graph();
if (!mEnabled) {
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Filling %" PRId64
" frames of silence to output (disabled)",
- aGraph, aGraph->CurrentDriver(), this, need);
+ graph, graph->CurrentDriver(), this, need);
aOutput->AppendNullData(need);
return;
}
@@ -628,22 +610,20 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(aInput->GetDuration() == need,
"Wrong data length from input port source");
- if (PassThrough(aGraph)) {
+ if (IsPassThrough(graph)) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Forwarding %" PRId64
" frames of input data to output directly (PassThrough)",
- aGraph, aGraph->CurrentDriver(), this, aInput->GetDuration());
+ graph, graph->CurrentDriver(), this, aInput->GetDuration());
aOutput->AppendSegment(aInput);
return;
}
- // SetPassThrough(false) must be called before reaching here.
- MOZ_ASSERT(mPacketizerInput);
- // If mRequestedInputChannelCount is updated, create a new packetizer. No
- // need to change the pre-buffering since the rate is always the same. The
- // frames left in the packetizer would be replaced by null data and then
- // transferred to mSegment.
- EnsureAudioProcessing(aGraph, mRequestedInputChannelCount);
+ // If the requested input channel count is updated, create a new
+ // packetizer. No need to change the pre-buffering since the rate is always
+ // the same. The frames left in the packetizer would be replaced by null
+ // data and then transferred to mSegment.
+ EnsurePacketizer(aTrack);
// Preconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@@ -655,10 +635,10 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(mSegment.GetDuration() >= 1);
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
- PacketizeAndProcess(aGraph, *aInput);
+ PacketizeAndProcess(aTrack, *aInput);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Buffer has %" PRId64
" frames of data now, after packetizing and processing",
- aGraph, aGraph->CurrentDriver(), this, mSegment.GetDuration());
+ graph, graph->CurrentDriver(), this, mSegment.GetDuration());
// By setting pre-buffering to the number of frames of one packet, and
// because the maximum number of frames stuck in the packetizer before
@@ -669,8 +649,7 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
mSegment.RemoveLeading(need);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p moving %" PRId64
" frames of data to output, leaving %" PRId64 " frames in buffer",
- aGraph, aGraph->CurrentDriver(), this, need,
- mSegment.GetDuration());
+ graph, graph->CurrentDriver(), this, need, mSegment.GetDuration());
// Postconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@@ -680,16 +659,16 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
}
-void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
+void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack,
const AudioChunk& aChunk) {
MOZ_ASSERT(aChunk.ChannelCount() > 0);
- aGraph->AssertOnGraphThread();
+ aTrack->AssertOnGraphThread();
- if (!mEnabled || PassThrough(aGraph)) {
+ if (!mEnabled || IsPassThrough(aTrack->Graph())) {
return;
}
- TrackRate sampleRate = aGraph->GraphRate();
+ TrackRate sampleRate = aTrack->mSampleRate;
uint32_t framesPerPacket = GetPacketSize(sampleRate); // in frames
// Downmix from aChannels to MAX_CHANNELS if needed.
uint32_t channelCount =
@@ -727,6 +706,7 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
if (mOutputBufferFrameCount == framesPerPacket) {
// Have a complete packet. Analyze it.
+ EnsureAudioProcessing(aTrack);
for (uint32_t channel = 0; channel < channelCount; channel++) {
channelPtrs[channel] = &mOutputBuffer[channel * framesPerPacket];
}
@@ -743,14 +723,15 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
}
// Only called if we're not in passthrough mode
-void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
+void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
const AudioSegment& aSegment) {
- MOZ_ASSERT(!PassThrough(aGraph),
+ MediaTrackGraph* graph = aTrack->Graph();
+ MOZ_ASSERT(!IsPassThrough(graph),
"This should be bypassed when in PassThrough mode.");
MOZ_ASSERT(mEnabled);
MOZ_ASSERT(mPacketizerInput);
MOZ_ASSERT(mPacketizerInput->mPacketSize ==
- GetPacketSize(aGraph->GraphRate()));
+ GetPacketSize(aTrack->mSampleRate));
// Calculate number of the pending frames in mChunksInPacketizer.
auto pendingFrames = [&]() {
@@ -792,7 +773,7 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Packetizing %zu frames. "
"Packetizer has %u frames (enough for %u packets) now",
- aGraph, aGraph->CurrentDriver(), this, frameCount,
+ graph, graph->CurrentDriver(), this, frameCount,
mPacketizerInput->FramesAvailable(),
mPacketizerInput->PacketsAvailable());
@@ -850,9 +831,10 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
deinterleavedPacketizedInputDataChannelPointers.Elements());
}
- StreamConfig inputConfig(aGraph->GraphRate(), channelCountInput);
+ StreamConfig inputConfig(aTrack->mSampleRate, channelCountInput);
StreamConfig outputConfig = inputConfig;
+ EnsureAudioProcessing(aTrack);
// Bug 1404965: Get the right delay here, it saves some work down the line.
mAudioProcessing->set_stream_delay_ms(0);
@@ -958,7 +940,7 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
"(Graph %p, Driver %p) AudioInputProcessing %p Appending %u frames of "
"packetized audio, leaving %u frames in packetizer (%" PRId64
" frames in mChunksInPacketizer)",
- aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
+ graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
mPacketizerInput->FramesAvailable(), pendingFrames());
// Postcondition of the Principal-labelling logic.
@@ -971,17 +953,36 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
// Reset some processing
- mAudioProcessing->Initialize();
+ if (mAudioProcessing) {
+ mAudioProcessing->Initialize();
+ }
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Reinitializing audio "
"processing",
aGraph, aGraph->CurrentDriver(), this);
}
-void AudioInputProcessing::ApplyConfig(MediaTrackGraph* aGraph,
- const AudioProcessing::Config& aConfig) {
+void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceID,
+ const MediaEnginePrefs& aSettings) {
+ TRACE("AudioInputProcessing::ApplySettings");
aGraph->AssertOnGraphThread();
- mAudioProcessing->ApplyConfig(aConfig);
+
+ // Read previous state from mSettings.
+ uint32_t oldChannelCount = GetRequestedInputChannelCount();
+ bool wasPassThrough = IsPassThrough(aGraph);
+
+ mSettings = aSettings;
+ if (mAudioProcessing) {
+ mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
+ }
+
+ if (oldChannelCount != GetRequestedInputChannelCount()) {
+ RequestedInputChannelCountChanged(aGraph, aDeviceID);
+ }
+ if (wasPassThrough != IsPassThrough(aGraph)) {
+ PassThroughChanged(aGraph);
+ }
}
void AudioInputProcessing::End() {
@@ -995,14 +996,15 @@ TrackTime AudioInputProcessing::NumBufferedFrames(
return mSegment.GetDuration();
}
-void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
- uint32_t aChannels) {
- aGraph->AssertOnGraphThread();
- MOZ_ASSERT(aChannels > 0);
+void AudioInputProcessing::EnsurePacketizer(AudioProcessingTrack* aTrack) {
+ aTrack->AssertOnGraphThread();
MOZ_ASSERT(mEnabled);
- MOZ_ASSERT(!mSkipProcessing);
+ MediaTrackGraph* graph = aTrack->Graph();
+ MOZ_ASSERT(!IsPassThrough(graph));
- if (mPacketizerInput && mPacketizerInput->mChannels == aChannels) {
+ uint32_t channelCount = GetRequestedInputChannelCount();
+ MOZ_ASSERT(channelCount > 0);
+ if (mPacketizerInput && mPacketizerInput->mChannels == channelCount) {
return;
}
@@ -1010,7 +1012,7 @@ void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
// need to change pre-buffering since the packet size is the same as the old
// one, since the rate is a constant.
MOZ_ASSERT_IF(mPacketizerInput, mPacketizerInput->mPacketSize ==
- GetPacketSize(aGraph->GraphRate()));
+ GetPacketSize(aTrack->mSampleRate));
bool needPreBuffering = !mPacketizerInput;
if (mPacketizerInput) {
const TrackTime numBufferedFrames =
@@ -1020,24 +1022,62 @@ void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph,
mChunksInPacketizer.clear();
}
- mPacketizerInput.emplace(GetPacketSize(aGraph->GraphRate()), aChannels);
+ mPacketizerInput.emplace(GetPacketSize(aTrack->mSampleRate), channelCount);
if (needPreBuffering) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p: Adding %u frames of "
"silence as pre-buffering",
- aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
+ graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
AudioSegment buffering;
buffering.AppendNullData(
static_cast<TrackTime>(mPacketizerInput->mPacketSize));
- PacketizeAndProcess(aGraph, buffering);
+ PacketizeAndProcess(aTrack, buffering);
+ }
+}
+
+void AudioInputProcessing::EnsureAudioProcessing(AudioProcessingTrack* aTrack) {
+ aTrack->AssertOnGraphThread();
+
+ MediaTrackGraph* graph = aTrack->Graph();
+ // If the AEC might need to deal with drift then inform it of this and it
+ // will be less conservative about echo suppression. This can lead to some
+ // suppression of non-echo signal, so do this only when drift is expected.
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=11985#c2
+ bool haveAECAndDrift = mSettings.mAecOn;
+ if (haveAECAndDrift) {
+ if (mSettings.mExpectDrift < 0) {
+ haveAECAndDrift =
+ graph->OutputForAECMightDrift() ||
+ aTrack->GetDeviceInputTrackGraphThread()->AsNonNativeInputTrack();
+ } else {
+ haveAECAndDrift = mSettings.mExpectDrift > 0;
+ }
+ }
+ if (!mAudioProcessing || haveAECAndDrift != mHadAECAndDrift) {
+ TRACE("AudioProcessing creation");
+ LOG("Track %p AudioInputProcessing %p creating AudioProcessing. "
+ "aec+drift: %s",
+ aTrack, this, haveAECAndDrift ? "Y" : "N");
+ mHadAECAndDrift = haveAECAndDrift;
+ AudioProcessingBuilder builder;
+ builder.SetConfig(ConfigForPrefs(mSettings));
+ if (haveAECAndDrift) {
+ // Setting an EchoControlFactory always enables AEC, overriding
+ // Config::echo_canceller.enabled, so do this only when AEC is enabled.
+ EchoCanceller3Config aec3Config;
+ aec3Config.echo_removal_control.has_clock_drift = true;
+ builder.SetEchoControlFactory(
+ std::make_unique<EchoCanceller3Factory>(aec3Config));
+ }
+ mAudioProcessing.reset(builder.Create().release());
}
}
void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
- MOZ_ASSERT(mSkipProcessing || !mEnabled);
+ MOZ_ASSERT(IsPassThrough(aGraph) || !mEnabled);
MOZ_ASSERT(mPacketizerInput);
LOG_FRAME(
@@ -1047,7 +1087,9 @@ void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
// Reset AudioProcessing so that if we resume processing in the future it
// doesn't depend on old state.
- mAudioProcessing->Initialize();
+ if (mAudioProcessing) {
+ mAudioProcessing->Initialize();
+ }
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
mPacketizerInput->FramesAvailable() ==
@@ -1124,9 +1166,8 @@ void AudioProcessingTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
} else {
MOZ_ASSERT(mInputs.Length() == 1);
AudioSegment data;
- DeviceInputConsumerTrack::GetInputSourceData(data, mInputs[0], aFrom,
- aTo);
- mInputProcessing->Process(Graph(), aFrom, aTo, &data,
+ DeviceInputConsumerTrack::GetInputSourceData(data, aFrom, aTo);
+ mInputProcessing->Process(this, aFrom, aTo, &data,
GetData<AudioSegment>());
}
MOZ_ASSERT(TrackTimeToGraphTime(GetEnd()) == aTo);
@@ -1142,7 +1183,7 @@ void AudioProcessingTrack::NotifyOutputData(MediaTrackGraph* aGraph,
MOZ_ASSERT(mGraph == aGraph, "Cannot feed audio output to another graph");
AssertOnGraphThread();
if (mInputProcessing) {
- mInputProcessing->ProcessOutputData(aGraph, aChunk);
+ mInputProcessing->ProcessOutputData(this, aChunk);
}
}
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.h b/dom/media/webrtc/MediaEngineWebRTCAudio.h
index e71b5ef826..6b1fbf0089 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -91,8 +91,7 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
// Current state of the resource for this source.
MediaEngineSourceState mState;
- // The current preferences that will be forwarded to mAudioProcessingConfig
- // below.
+ // The current preferences that will be forwarded to mInputProcessing below.
MediaEnginePrefs mCurrentPrefs;
// The AudioProcessingTrack used to inteface with the MediaTrackGraph. Set in
@@ -101,10 +100,6 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
// See note at the top of this class.
RefPtr<AudioInputProcessing> mInputProcessing;
-
- // Copy of the config currently applied to AudioProcessing through
- // mInputProcessing.
- webrtc::AudioProcessing::Config mAudioProcessingConfig;
};
// This class is created on the MediaManager thread, and then exclusively used
@@ -113,15 +108,16 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
class AudioInputProcessing : public AudioDataListener {
public:
explicit AudioInputProcessing(uint32_t aMaxChannelCount);
- void Process(MediaTrackGraph* aGraph, GraphTime aFrom, GraphTime aTo,
+ void Process(AudioProcessingTrack* aTrack, GraphTime aFrom, GraphTime aTo,
AudioSegment* aInput, AudioSegment* aOutput);
- void ProcessOutputData(MediaTrackGraph* aGraph, const AudioChunk& aChunk);
+ void ProcessOutputData(AudioProcessingTrack* aTrack,
+ const AudioChunk& aChunk);
bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
// If we're passing data directly without AEC or any other process, this
// means that all voice-processing has been disabled intentionaly. In this
// case, consider that the device is not used for voice input.
- return !PassThrough(aGraph);
+ return !IsPassThrough(aGraph);
}
void Start(MediaTrackGraph* aGraph);
@@ -135,23 +131,20 @@ class AudioInputProcessing : public AudioDataListener {
void Disconnect(MediaTrackGraph* aGraph) override;
- void PacketizeAndProcess(MediaTrackGraph* aGraph,
+ void PacketizeAndProcess(AudioProcessingTrack* aTrack,
const AudioSegment& aSegment);
- void SetPassThrough(MediaTrackGraph* aGraph, bool aPassThrough);
uint32_t GetRequestedInputChannelCount();
- void SetRequestedInputChannelCount(MediaTrackGraph* aGraph,
- CubebUtils::AudioDeviceID aDeviceId,
- uint32_t aRequestedInputChannelCount);
- // This is true when all processing is disabled, we can skip
+ // This is true when all processing is disabled, in which case we can skip
// packetization, resampling and other processing passes.
- bool PassThrough(MediaTrackGraph* aGraph) const;
+ bool IsPassThrough(MediaTrackGraph* aGraph) const;
// This allow changing the APM options, enabling or disabling processing
- // steps. The config gets applied the next time we're about to process input
+ // steps. The settings get applied the next time we're about to process input
// data.
- void ApplyConfig(MediaTrackGraph* aGraph,
- const webrtc::AudioProcessing::Config& aConfig);
+ void ApplySettings(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceID,
+ const MediaEnginePrefs& aSettings);
void End();
@@ -164,9 +157,18 @@ class AudioInputProcessing : public AudioDataListener {
bool IsEnded() const { return mEnded; }
+ // For testing:
+ bool HadAECAndDrift() const { return mHadAECAndDrift; }
+
private:
~AudioInputProcessing() = default;
- void EnsureAudioProcessing(MediaTrackGraph* aGraph, uint32_t aChannels);
+ webrtc::AudioProcessing::Config ConfigForPrefs(
+ const MediaEnginePrefs& aPrefs);
+ void PassThroughChanged(MediaTrackGraph* aGraph);
+ void RequestedInputChannelCountChanged(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceId);
+ void EnsurePacketizer(AudioProcessingTrack* aTrack);
+ void EnsureAudioProcessing(AudioProcessingTrack* aTrack);
void ResetAudioProcessing(MediaTrackGraph* aGraph);
PrincipalHandle GetCheckedPrincipal(const AudioSegment& aSegment);
// This implements the processing algoritm to apply to the input (e.g. a
@@ -174,17 +176,16 @@ class AudioInputProcessing : public AudioDataListener {
// class only accepts audio chunks of 10ms. It has two inputs and one output:
// it is fed the speaker data and the microphone data. It outputs processed
// input data.
- const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
+ UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
+ // Whether mAudioProcessing was created for AEC with clock drift.
+ // Meaningful only when mAudioProcessing is non-null;
+ bool mHadAECAndDrift = false;
// Packetizer to be able to feed 10ms packets to the input side of
// mAudioProcessing. Not used if the processing is bypassed.
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
- // The number of channels asked for by content, after clamping to the range of
- // legal channel count for this particular device.
- uint32_t mRequestedInputChannelCount;
- // mSkipProcessing is true if none of the processing passes are enabled,
- // because of prefs or constraints. This allows simply copying the audio into
- // the MTG, skipping resampling and the whole webrtc.org code.
- bool mSkipProcessing;
+ // The current settings from about:config preferences and content-provided
+ // constraints.
+ MediaEnginePrefs mSettings;
// Buffer for up to one 10ms packet of planar mixed audio output for the
// reverse-stream (speaker data) of mAudioProcessing AEC.
// Length is packet size * channel count, regardless of how many frames are
diff --git a/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp b/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp
index d293fa0be6..b9b9ab8fc5 100644
--- a/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp
+++ b/dom/media/webrtc/jsapi/PeerConnectionCtx.cpp
@@ -129,11 +129,12 @@ class DummyAudioProcessing : public AudioProcessing {
}
void set_stream_key_pressed(bool) override { MOZ_CRASH("Unexpected call"); }
bool CreateAndAttachAecDump(absl::string_view, int64_t,
- rtc::TaskQueue*) override {
+ absl::Nonnull<TaskQueueBase*>) override {
MOZ_CRASH("Unexpected call");
return false;
}
- bool CreateAndAttachAecDump(FILE*, int64_t, rtc::TaskQueue*) override {
+ bool CreateAndAttachAecDump(FILE*, int64_t,
+ absl::Nonnull<TaskQueueBase*>) override {
MOZ_CRASH("Unexpected call");
return false;
}
diff --git a/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp b/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp
index 43f34c456f..8fa0bade00 100644
--- a/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp
+++ b/dom/media/webrtc/jsapi/RTCRtpScriptTransform.cpp
@@ -47,6 +47,22 @@ already_AddRefed<RTCRtpScriptTransform> RTCRtpScriptTransform::Constructor(
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
+
+ // The spec currently fails to describe what to do when the worker is closing
+ // or closed; the following placeholder text can be found in the spec at:
+ // https://w3c.github.io/webrtc-encoded-transform/#dom-rtcrtpscripttransform-rtcrtpscripttransform
+ //
+ // > FIXME: Describe error handling (worker closing flag true at
+ // > RTCRtpScriptTransform creation time. And worker being terminated while
+ // > transform is processing data).
+ //
+ // Because our worker runnables do not like to be pointed at a nonexistant
+ // worker, we throw in this case.
+ if (!aWorker.IsEligibleForMessaging()) {
+ aRv.Throw(NS_ERROR_FAILURE);
+ return nullptr;
+ }
+
auto newTransform = MakeRefPtr<RTCRtpScriptTransform>(ownerWindow);
RefPtr<RTCTransformEventRunnable> runnable =
new RTCTransformEventRunnable(aWorker, &newTransform->GetProxy());
diff --git a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp
index 126020a94f..f2fbd6d637 100644
--- a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp
+++ b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.cpp
@@ -148,7 +148,7 @@ WritableStreamRTCFrameSink::WritableStreamRTCFrameSink(
WritableStreamRTCFrameSink::~WritableStreamRTCFrameSink() = default;
-already_AddRefed<Promise> WritableStreamRTCFrameSink::WriteCallback(
+already_AddRefed<Promise> WritableStreamRTCFrameSink::WriteCallbackImpl(
JSContext* aCx, JS::Handle<JS::Value> aChunk,
WritableStreamDefaultController& aController, ErrorResult& aError) {
// Spec does not say to do this right now. Might be a spec bug, needs
diff --git a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h
index 6d61ac3cd5..7a22612254 100644
--- a/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h
+++ b/dom/media/webrtc/jsapi/RTCRtpScriptTransformer.h
@@ -87,7 +87,7 @@ class WritableStreamRTCFrameSink final
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(WritableStreamRTCFrameSink,
UnderlyingSinkAlgorithmsWrapper)
- already_AddRefed<Promise> WriteCallback(
+ already_AddRefed<Promise> WriteCallbackImpl(
JSContext* aCx, JS::Handle<JS::Value> aChunk,
WritableStreamDefaultController& aController,
ErrorResult& aError) override;
diff --git a/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp b/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp
index 4e4bf9ab93..eabf7ee335 100644
--- a/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp
+++ b/dom/media/webrtc/jsapi/WebrtcGlobalInformation.cpp
@@ -234,7 +234,7 @@ void WebrtcGlobalInformation::GetStatsHistorySince(
auto statsAfter = aAfter.WasPassed() ? Some(aAfter.Value()) : Nothing();
auto sdpAfter = aSdpAfter.WasPassed() ? Some(aSdpAfter.Value()) : Nothing();
- WebrtcGlobalStatsHistory::GetHistory(pcIdFilter).apply([&](auto& hist) {
+ WebrtcGlobalStatsHistory::GetHistory(pcIdFilter).apply([&](const auto& hist) {
if (!history.mReports.AppendElements(hist->Since(statsAfter), fallible)) {
mozalloc_handle_oom(0);
}
diff --git a/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp b/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
index 49f049cd21..91ad0d848c 100644
--- a/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
+++ b/dom/media/webrtc/libwebrtcglue/AudioConduit.cpp
@@ -907,7 +907,7 @@ RtpExtList WebrtcAudioConduit::FilterExtensions(LocalDirection aDirection,
webrtc::SdpAudioFormat WebrtcAudioConduit::CodecConfigToLibwebrtcFormat(
const AudioCodecConfig& aConfig) {
- webrtc::SdpAudioFormat::Parameters parameters;
+ webrtc::CodecParameterMap parameters;
if (aConfig.mName == kOpusCodecName) {
if (aConfig.mChannels == 2) {
parameters[kCodecParamStereo] = kParamValueTrue;
diff --git a/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
index 73e59f5ee2..5862237711 100644
--- a/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
+++ b/dom/media/webrtc/libwebrtcglue/VideoConduit.cpp
@@ -190,7 +190,7 @@ webrtc::VideoCodecType SupportedCodecType(webrtc::VideoCodecType aType) {
rtc::scoped_refptr<webrtc::VideoEncoderConfig::EncoderSpecificSettings>
ConfigureVideoEncoderSettings(const VideoCodecConfig& aConfig,
const WebrtcVideoConduit* aConduit,
- webrtc::SdpVideoFormat::Parameters& aParameters) {
+ webrtc::CodecParameterMap& aParameters) {
bool is_screencast =
aConduit->CodecMode() == webrtc::VideoCodecMode::kScreensharing;
// No automatic resizing when using simulcast or screencast.
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp
index 824f1cf6eb..b03c1772c4 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcCallWrapper.cpp
@@ -11,6 +11,8 @@
#include "TaskQueueWrapper.h"
// libwebrtc includes
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
#include "call/rtp_transport_controller_send_factory.h"
namespace mozilla {
@@ -28,17 +30,16 @@ namespace mozilla {
std::move(eventLog), std::move(taskQueueFactory), aTimestampMaker,
std::move(aShutdownTicket));
+ webrtc::Environment env = CreateEnvironment(
+ wrapper->mEventLog.get(), wrapper->mClock.GetRealTimeClockRaw(),
+ wrapper->mTaskQueueFactory.get(), aSharedState->mTrials.get());
+
wrapper->mCallThread->Dispatch(
- NS_NewRunnableFunction(__func__, [wrapper, aSharedState] {
- webrtc::CallConfig config(wrapper->mEventLog.get());
+ NS_NewRunnableFunction(__func__, [wrapper, aSharedState, env] {
+ webrtc::CallConfig config(env, nullptr);
config.audio_state =
webrtc::AudioState::Create(aSharedState->mAudioStateConfig);
- config.task_queue_factory = wrapper->mTaskQueueFactory.get();
- config.trials = aSharedState->mTrials.get();
- wrapper->SetCall(WrapUnique(webrtc::Call::Create(
- config, &wrapper->mClock,
- webrtc::RtpTransportControllerSendFactory().Create(
- config.ExtractTransportConfig(), &wrapper->mClock)).release()));
+ wrapper->SetCall(WrapUnique(webrtc::Call::Create(config).release()));
}));
return wrapper;
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
index 865f9afff0..b8ee44c6b3 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcGmpVideoCodec.h
@@ -289,7 +289,7 @@ class WebrtcGmpVideoEncoder : public GMPVideoEncoderCallbackProxy,
GMPVideoHost* mHost;
GMPVideoCodec mCodecParams;
uint32_t mMaxPayloadSize;
- const webrtc::SdpVideoFormat::Parameters mFormatParams;
+ const webrtc::CodecParameterMap mFormatParams;
webrtc::CodecSpecificInfo mCodecSpecificInfo;
webrtc::H264BitstreamParser mH264BitstreamParser;
// Protects mCallback
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
index 844542cd0d..f5240ffa22 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.cpp
@@ -75,7 +75,7 @@ static const char* PacketModeStr(const webrtc::CodecSpecificInfo& aInfo) {
}
static std::pair<H264_PROFILE, H264_LEVEL> ConvertProfileLevel(
- const webrtc::SdpVideoFormat::Parameters& aParameters) {
+ const webrtc::CodecParameterMap& aParameters) {
const absl::optional<webrtc::H264ProfileLevelId> profileLevel =
webrtc::ParseSdpForH264ProfileLevelId(aParameters);
@@ -143,9 +143,9 @@ WebrtcMediaDataEncoder::~WebrtcMediaDataEncoder() {
}
}
-static void InitCodecSpecficInfo(
- webrtc::CodecSpecificInfo& aInfo, const webrtc::VideoCodec* aCodecSettings,
- const webrtc::SdpVideoFormat::Parameters& aParameters) {
+static void InitCodecSpecficInfo(webrtc::CodecSpecificInfo& aInfo,
+ const webrtc::VideoCodec* aCodecSettings,
+ const webrtc::CodecParameterMap& aParameters) {
MOZ_ASSERT(aCodecSettings);
aInfo.codecType = aCodecSettings->codecType;
@@ -290,13 +290,11 @@ already_AddRefed<MediaDataEncoder> WebrtcMediaDataEncoder::CreateEncoder(
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unsupported codec type");
}
EncoderConfig config(
- type, {aCodecSettings->width, aCodecSettings->height},
- MediaDataEncoder::Usage::Realtime, MediaDataEncoder::PixelFormat::YUV420P,
- MediaDataEncoder::PixelFormat::YUV420P, aCodecSettings->maxFramerate,
- keyframeInterval, mBitrateAdjuster.GetTargetBitrateBps(),
- MediaDataEncoder::BitrateMode::Variable,
- MediaDataEncoder::HardwarePreference::None,
- MediaDataEncoder::ScalabilityMode::None, specific);
+ type, {aCodecSettings->width, aCodecSettings->height}, Usage::Realtime,
+ dom::ImageBitmapFormat::YUV420P, dom::ImageBitmapFormat::YUV420P,
+ aCodecSettings->maxFramerate, keyframeInterval,
+ mBitrateAdjuster.GetTargetBitrateBps(), BitrateMode::Variable,
+ HardwarePreference::None, ScalabilityMode::None, specific);
return mFactory->CreateEncoder(config, mTaskQueue);
}
diff --git a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
index 9d750e85b2..0c2070f6a9 100644
--- a/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
+++ b/dom/media/webrtc/libwebrtcglue/WebrtcMediaDataEncoderCodec.h
@@ -65,7 +65,7 @@ class WebrtcMediaDataEncoder : public RefCountedWebrtcVideoEncoder {
MediaResult mError = NS_OK;
VideoInfo mInfo;
- webrtc::SdpVideoFormat::Parameters mFormatParams;
+ webrtc::CodecParameterMap mFormatParams;
webrtc::CodecSpecificInfo mCodecSpecific;
webrtc::BitrateAdjuster mBitrateAdjuster;
uint32_t mMaxFrameRate = {0};
diff --git a/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html b/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html
index 96c2c42b78..1f3662d9fc 100644
--- a/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html
+++ b/dom/media/webrtc/tests/mochitests/test_peerConnection_toJSON.html
@@ -28,10 +28,11 @@
sdpMid: "test",
sdpMLineIndex: 3 });
jsonCopy = JSON.parse(JSON.stringify(rtcIceCandidate));
- for (key in rtcIceCandidate) {
- if (typeof(rtcIceCandidate[key]) == "function") continue;
- is(rtcIceCandidate[key], jsonCopy[key], "key " + key + " should match.");
- }
+ is(jsonCopy.candidate, "dummy");
+ is(jsonCopy.sdpMid, "test");
+ is(jsonCopy.sdpMLineIndex, 3);
+ is(jsonCopy.usernameFragment, rtcIceCandidate.usernameFragment);
+ is(Object.keys(jsonCopy).length, 4, "JSON limited to those four members.");
});
</script>
</pre>
diff --git a/dom/media/webrtc/third_party_build/default_config_env b/dom/media/webrtc/third_party_build/default_config_env
index 7013520a30..be3c5ba7c1 100644
--- a/dom/media/webrtc/third_party_build/default_config_env
+++ b/dom/media/webrtc/third_party_build/default_config_env
@@ -5,41 +5,41 @@
export MOZ_LIBWEBRTC_SRC=$STATE_DIR/moz-libwebrtc
# The previous fast-forward bug number is used for some error messaging.
-export MOZ_PRIOR_FASTFORWARD_BUG="1871981"
+export MOZ_PRIOR_FASTFORWARD_BUG="1876843"
# Fast-forwarding each Chromium version of libwebrtc should be done
# under a separate bugzilla bug. This bug number is used when crafting
# the commit summary as each upstream commit is vendored into the
# mercurial repository. The bug used for the v106 fast-forward was
# 1800920.
-export MOZ_FASTFORWARD_BUG="1876843"
+export MOZ_FASTFORWARD_BUG="1883116"
# MOZ_NEXT_LIBWEBRTC_MILESTONE and MOZ_NEXT_FIREFOX_REL_TARGET are
# not used during fast-forward processing, but facilitate generating this
# default config. To generate an default config for the next update, run
# bash dom/media/webrtc/third_party_build/update_default_config_env.sh
-export MOZ_NEXT_LIBWEBRTC_MILESTONE=121
-export MOZ_NEXT_FIREFOX_REL_TARGET=125
+export MOZ_NEXT_LIBWEBRTC_MILESTONE=122
+export MOZ_NEXT_FIREFOX_REL_TARGET=126
# For Chromium release branches, see:
# https://chromiumdash.appspot.com/branches
-# Chromium's v120 release branch was 6099. This is used to pre-stack
+# Chromium's v121 release branch was 6167. This is used to pre-stack
# the previous release branch's commits onto the appropriate base commit
# (the first common commit between trunk and the release branch).
-export MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM="6099"
+export MOZ_PRIOR_UPSTREAM_BRANCH_HEAD_NUM="6167"
-# New target release branch for v121 is branch-heads/6167. This is used
+# New target release branch for v122 is branch-heads/6261. This is used
# to calculate the next upstream commit.
-export MOZ_TARGET_UPSTREAM_BRANCH_HEAD="branch-heads/6167"
+export MOZ_TARGET_UPSTREAM_BRANCH_HEAD="branch-heads/6261"
# For local development 'mozpatches' is fine for a branch name, but when
# pushing the patch stack to github, it should be named something like
-# 'moz-mods-chr121-for-rel125'.
+# 'moz-mods-chr122-for-rel126'.
export MOZ_LIBWEBRTC_BRANCH="mozpatches"
# After elm has been merged to mozilla-central, the patch stack in
# moz-libwebrtc should be pushed to github. The script
# push_official_branch.sh uses this branch name when pushing to the
# public repo.
-export MOZ_LIBWEBRTC_OFFICIAL_BRANCH="moz-mods-chr121-for-rel125"
+export MOZ_LIBWEBRTC_OFFICIAL_BRANCH="moz-mods-chr122-for-rel126"
diff --git a/dom/media/webrtc/third_party_build/elm_rebase.sh b/dom/media/webrtc/third_party_build/elm_rebase.sh
index ba0028b7a4..0dbf93d3ce 100644
--- a/dom/media/webrtc/third_party_build/elm_rebase.sh
+++ b/dom/media/webrtc/third_party_build/elm_rebase.sh
@@ -153,6 +153,15 @@ export MOZ_BOOKMARK=$MOZ_BOOKMARK
" > $STATE_DIR/rebase_resume_state
fi # if [ -f $STATE_DIR/rebase_resume_state ]; then ; else
+if [ "x$STOP_FOR_REORDER" = "x1" ]; then
+ echo ""
+ echo "Stopping after generating commit list ($COMMIT_LIST_FILE) to"
+ echo "allow tweaking commit ordering. Re-running $0 will resume the"
+ echo "rebase processing. To stop processing during the rebase,"
+ echo "insert a line with only 'STOP'."
+ exit
+fi
+
# grab all commits
COMMITS=`cat $COMMIT_LIST_FILE | awk '{print $1;}'`
@@ -171,6 +180,12 @@ for commit in $COMMITS; do
ed -s $COMMIT_LIST_FILE <<< $'1d\nw\nq'
}
+ if [ "$FULL_COMMIT_LINE" == "STOP" ]; then
+ echo "Stopping for history editing. Re-run $0 to resume."
+ remove_commit
+ exit
+ fi
+
IS_BUILD_COMMIT=`hg log -T '{desc|firstline}' -r $commit \
| grep "file updates" | wc -l | tr -d " " || true`
echo "IS_BUILD_COMMIT: $IS_BUILD_COMMIT"
diff --git a/dom/media/webrtc/third_party_build/fetch_github_repo.py b/dom/media/webrtc/third_party_build/fetch_github_repo.py
index b9d10e0b6c..8caa55d5c5 100644
--- a/dom/media/webrtc/third_party_build/fetch_github_repo.py
+++ b/dom/media/webrtc/third_party_build/fetch_github_repo.py
@@ -87,6 +87,10 @@ def fetch_repo(github_path, clone_protocol, force_fetch, tar_path):
else:
print("Upstream remote branch-heads already configured")
+ # prevent changing line endings when moving things out of the git repo
+ # (and into hg for instance)
+ run_git("git config --local core.autocrlf false")
+
# do a sanity fetch in case this was not a freshly cloned copy of the
# repo, meaning it may not have all the mozilla branches present.
run_git("git fetch --all", github_path)
diff --git a/dom/media/webrtc/third_party_build/vendor-libwebrtc.py b/dom/media/webrtc/third_party_build/vendor-libwebrtc.py
index d820d8c006..1c44fbd749 100644
--- a/dom/media/webrtc/third_party_build/vendor-libwebrtc.py
+++ b/dom/media/webrtc/third_party_build/vendor-libwebrtc.py
@@ -27,7 +27,6 @@ def get_excluded_files():
".clang-format",
".git-blame-ignore-revs",
".gitignore",
- ".vpython",
"CODE_OF_CONDUCT.md",
"ENG_REVIEW_OWNERS",
"PRESUBMIT.py",
diff --git a/dom/media/webrtc/transport/test/ice_unittest.cpp b/dom/media/webrtc/transport/test/ice_unittest.cpp
index 50febb3cdd..7df379e1c4 100644
--- a/dom/media/webrtc/transport/test/ice_unittest.cpp
+++ b/dom/media/webrtc/transport/test/ice_unittest.cpp
@@ -58,9 +58,9 @@ using namespace mozilla;
static unsigned int kDefaultTimeout = 7000;
-// TODO(nils@mozilla.com): This should get replaced with some non-external
-// solution like discussed in bug 860775.
-const std::string kDefaultStunServerHostname((char*)"stun.l.google.com");
+// TODO: It would be nice to have a test STUN/TURN server that can run with
+// gtest.
+const std::string kDefaultStunServerHostname((char*)"");
const std::string kBogusStunServerHostname(
(char*)"stun-server-nonexistent.invalid");
const uint16_t kDefaultStunServerPort = 19305;
@@ -1628,12 +1628,17 @@ class WebRtcIceConnectTest : public StunTest {
peer->SetMappingType(mapping_type_);
peer->SetBlockUdp(block_udp_);
} else if (setup_stun_servers) {
- std::vector<NrIceStunServer> stun_servers;
+ if (stun_server_address_.empty()) {
+ InitTestStunServer();
+ peer->UseTestStunServer();
+ } else {
+ std::vector<NrIceStunServer> stun_servers;
- stun_servers.push_back(*NrIceStunServer::Create(
- stun_server_address_, kDefaultStunServerPort, kNrIceTransportUdp));
+ stun_servers.push_back(*NrIceStunServer::Create(
+ stun_server_address_, kDefaultStunServerPort, kNrIceTransportUdp));
- peer->SetStunServers(stun_servers);
+ peer->SetStunServers(stun_servers);
+ }
}
}