summaryrefslogtreecommitdiffstats
path: root/dom/media/webrtc/MediaEngineWebRTCAudio.h
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media/webrtc/MediaEngineWebRTCAudio.h')
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.h57
1 files changed, 29 insertions, 28 deletions
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.h b/dom/media/webrtc/MediaEngineWebRTCAudio.h
index e71b5ef826..6b1fbf0089 100644
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -91,8 +91,7 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
// Current state of the resource for this source.
MediaEngineSourceState mState;
- // The current preferences that will be forwarded to mAudioProcessingConfig
- // below.
+ // The current preferences that will be forwarded to mInputProcessing below.
MediaEnginePrefs mCurrentPrefs;
// The AudioProcessingTrack used to inteface with the MediaTrackGraph. Set in
@@ -101,10 +100,6 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
// See note at the top of this class.
RefPtr<AudioInputProcessing> mInputProcessing;
-
- // Copy of the config currently applied to AudioProcessing through
- // mInputProcessing.
- webrtc::AudioProcessing::Config mAudioProcessingConfig;
};
// This class is created on the MediaManager thread, and then exclusively used
@@ -113,15 +108,16 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
class AudioInputProcessing : public AudioDataListener {
public:
explicit AudioInputProcessing(uint32_t aMaxChannelCount);
- void Process(MediaTrackGraph* aGraph, GraphTime aFrom, GraphTime aTo,
+ void Process(AudioProcessingTrack* aTrack, GraphTime aFrom, GraphTime aTo,
AudioSegment* aInput, AudioSegment* aOutput);
- void ProcessOutputData(MediaTrackGraph* aGraph, const AudioChunk& aChunk);
+ void ProcessOutputData(AudioProcessingTrack* aTrack,
+ const AudioChunk& aChunk);
bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
// If we're passing data directly without AEC or any other process, this
// means that all voice-processing has been disabled intentionaly. In this
// case, consider that the device is not used for voice input.
- return !PassThrough(aGraph);
+ return !IsPassThrough(aGraph);
}
void Start(MediaTrackGraph* aGraph);
@@ -135,23 +131,20 @@ class AudioInputProcessing : public AudioDataListener {
void Disconnect(MediaTrackGraph* aGraph) override;
- void PacketizeAndProcess(MediaTrackGraph* aGraph,
+ void PacketizeAndProcess(AudioProcessingTrack* aTrack,
const AudioSegment& aSegment);
- void SetPassThrough(MediaTrackGraph* aGraph, bool aPassThrough);
uint32_t GetRequestedInputChannelCount();
- void SetRequestedInputChannelCount(MediaTrackGraph* aGraph,
- CubebUtils::AudioDeviceID aDeviceId,
- uint32_t aRequestedInputChannelCount);
- // This is true when all processing is disabled, we can skip
+ // This is true when all processing is disabled, in which case we can skip
// packetization, resampling and other processing passes.
- bool PassThrough(MediaTrackGraph* aGraph) const;
+ bool IsPassThrough(MediaTrackGraph* aGraph) const;
// This allow changing the APM options, enabling or disabling processing
- // steps. The config gets applied the next time we're about to process input
+ // steps. The settings get applied the next time we're about to process input
// data.
- void ApplyConfig(MediaTrackGraph* aGraph,
- const webrtc::AudioProcessing::Config& aConfig);
+ void ApplySettings(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceID,
+ const MediaEnginePrefs& aSettings);
void End();
@@ -164,9 +157,18 @@ class AudioInputProcessing : public AudioDataListener {
bool IsEnded() const { return mEnded; }
+ // For testing:
+ bool HadAECAndDrift() const { return mHadAECAndDrift; }
+
private:
~AudioInputProcessing() = default;
- void EnsureAudioProcessing(MediaTrackGraph* aGraph, uint32_t aChannels);
+ webrtc::AudioProcessing::Config ConfigForPrefs(
+ const MediaEnginePrefs& aPrefs);
+ void PassThroughChanged(MediaTrackGraph* aGraph);
+ void RequestedInputChannelCountChanged(MediaTrackGraph* aGraph,
+ CubebUtils::AudioDeviceID aDeviceId);
+ void EnsurePacketizer(AudioProcessingTrack* aTrack);
+ void EnsureAudioProcessing(AudioProcessingTrack* aTrack);
void ResetAudioProcessing(MediaTrackGraph* aGraph);
PrincipalHandle GetCheckedPrincipal(const AudioSegment& aSegment);
// This implements the processing algoritm to apply to the input (e.g. a
@@ -174,17 +176,16 @@ class AudioInputProcessing : public AudioDataListener {
// class only accepts audio chunks of 10ms. It has two inputs and one output:
// it is fed the speaker data and the microphone data. It outputs processed
// input data.
- const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
+ UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
+ // Whether mAudioProcessing was created for AEC with clock drift.
+ // Meaningful only when mAudioProcessing is non-null;
+ bool mHadAECAndDrift = false;
// Packetizer to be able to feed 10ms packets to the input side of
// mAudioProcessing. Not used if the processing is bypassed.
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
- // The number of channels asked for by content, after clamping to the range of
- // legal channel count for this particular device.
- uint32_t mRequestedInputChannelCount;
- // mSkipProcessing is true if none of the processing passes are enabled,
- // because of prefs or constraints. This allows simply copying the audio into
- // the MTG, skipping resampling and the whole webrtc.org code.
- bool mSkipProcessing;
+ // The current settings from about:config preferences and content-provided
+ // constraints.
+ MediaEnginePrefs mSettings;
// Buffer for up to one 10ms packet of planar mixed audio output for the
// reverse-stream (speaker data) of mAudioProcessing AEC.
// Length is packet size * channel count, regardless of how many frames are