summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/modules/audio_device/linux
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/libwebrtc/modules/audio_device/linux
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/modules/audio_device/linux')
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc40
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h148
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc1636
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h208
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc2286
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h349
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc979
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h71
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc844
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h114
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc106
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h168
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc41
-rw-r--r--third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h106
14 files changed, 7096 insertions, 0 deletions
diff --git a/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc
new file mode 100644
index 0000000000..5dfb91d6f4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "modules/audio_device/linux/alsasymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_alsa {
+
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(AlsaSymbolTable, "libasound.so.2")
+#define X(sym) LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(AlsaSymbolTable, sym)
+ALSA_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DEFINE_END(AlsaSymbolTable)
+
+} // namespace adm_linux_alsa
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h
new file mode 100644
index 0000000000..c9970b02bc
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h
@@ -0,0 +1,148 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
+#define AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_alsa {
+
+// The ALSA symbols we need, as an X-Macro list.
+// This list must contain precisely every libasound function that is used in
+// alsasoundsystem.cc.
+#define ALSA_SYMBOLS_LIST \
+ X(snd_device_name_free_hint) \
+ X(snd_device_name_get_hint) \
+ X(snd_device_name_hint) \
+ X(snd_pcm_avail_update) \
+ X(snd_pcm_close) \
+ X(snd_pcm_delay) \
+ X(snd_pcm_drop) \
+ X(snd_pcm_open) \
+ X(snd_pcm_prepare) \
+ X(snd_pcm_readi) \
+ X(snd_pcm_recover) \
+ X(snd_pcm_resume) \
+ X(snd_pcm_reset) \
+ X(snd_pcm_state) \
+ X(snd_pcm_set_params) \
+ X(snd_pcm_get_params) \
+ X(snd_pcm_start) \
+ X(snd_pcm_stream) \
+ X(snd_pcm_frames_to_bytes) \
+ X(snd_pcm_bytes_to_frames) \
+ X(snd_pcm_wait) \
+ X(snd_pcm_writei) \
+ X(snd_pcm_info_get_class) \
+ X(snd_pcm_info_get_subdevices_avail) \
+ X(snd_pcm_info_get_subdevice_name) \
+ X(snd_pcm_info_set_subdevice) \
+ X(snd_pcm_info_get_id) \
+ X(snd_pcm_info_set_device) \
+ X(snd_pcm_info_set_stream) \
+ X(snd_pcm_info_get_name) \
+ X(snd_pcm_info_get_subdevices_count) \
+ X(snd_pcm_info_sizeof) \
+ X(snd_pcm_hw_params) \
+ X(snd_pcm_hw_params_malloc) \
+ X(snd_pcm_hw_params_free) \
+ X(snd_pcm_hw_params_any) \
+ X(snd_pcm_hw_params_set_access) \
+ X(snd_pcm_hw_params_set_format) \
+ X(snd_pcm_hw_params_set_channels) \
+ X(snd_pcm_hw_params_set_rate_near) \
+ X(snd_pcm_hw_params_set_buffer_size_near) \
+ X(snd_card_next) \
+ X(snd_card_get_name) \
+ X(snd_config_update) \
+ X(snd_config_copy) \
+ X(snd_config_get_id) \
+ X(snd_ctl_open) \
+ X(snd_ctl_close) \
+ X(snd_ctl_card_info) \
+ X(snd_ctl_card_info_sizeof) \
+ X(snd_ctl_card_info_get_id) \
+ X(snd_ctl_card_info_get_name) \
+ X(snd_ctl_pcm_next_device) \
+ X(snd_ctl_pcm_info) \
+ X(snd_mixer_load) \
+ X(snd_mixer_free) \
+ X(snd_mixer_detach) \
+ X(snd_mixer_close) \
+ X(snd_mixer_open) \
+ X(snd_mixer_attach) \
+ X(snd_mixer_first_elem) \
+ X(snd_mixer_elem_next) \
+ X(snd_mixer_selem_get_name) \
+ X(snd_mixer_selem_is_active) \
+ X(snd_mixer_selem_register) \
+ X(snd_mixer_selem_set_playback_volume_all) \
+ X(snd_mixer_selem_get_playback_volume) \
+ X(snd_mixer_selem_has_playback_volume) \
+ X(snd_mixer_selem_get_playback_volume_range) \
+ X(snd_mixer_selem_has_playback_switch) \
+ X(snd_mixer_selem_get_playback_switch) \
+ X(snd_mixer_selem_set_playback_switch_all) \
+ X(snd_mixer_selem_has_capture_switch) \
+ X(snd_mixer_selem_get_capture_switch) \
+ X(snd_mixer_selem_set_capture_switch_all) \
+ X(snd_mixer_selem_has_capture_volume) \
+ X(snd_mixer_selem_set_capture_volume_all) \
+ X(snd_mixer_selem_get_capture_volume) \
+ X(snd_mixer_selem_get_capture_volume_range) \
+ X(snd_dlopen) \
+ X(snd_dlclose) \
+ X(snd_config) \
+ X(snd_config_search) \
+ X(snd_config_get_string) \
+ X(snd_config_search_definition) \
+ X(snd_config_get_type) \
+ X(snd_config_delete) \
+ X(snd_config_iterator_entry) \
+ X(snd_config_iterator_first) \
+ X(snd_config_iterator_next) \
+ X(snd_config_iterator_end) \
+ X(snd_config_delete_compound_members) \
+ X(snd_config_get_integer) \
+ X(snd_config_get_bool) \
+ X(snd_dlsym) \
+ X(snd_strerror) \
+ X(snd_lib_error) \
+ X(snd_lib_error_set_handler)
+
+LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(AlsaSymbolTable)
+#define X(sym) LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(AlsaSymbolTable, sym)
+ALSA_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DECLARE_END(AlsaSymbolTable)
+
+} // namespace adm_linux_alsa
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
new file mode 100644
index 0000000000..1e0ac8be28
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -0,0 +1,1636 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_device_alsa_linux.h"
+
+#include "modules/audio_device/audio_device_config.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/system/arch.h"
+#include "system_wrappers/include/sleep.h"
+
+WebRTCAlsaSymbolTable* GetAlsaSymbolTable() {
+ static WebRTCAlsaSymbolTable* alsa_symbol_table = new WebRTCAlsaSymbolTable();
+ return alsa_symbol_table;
+}
+
+// Accesses ALSA functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libasound, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, GetAlsaSymbolTable(), \
+ sym)
+
+// Redefine these here to be able to do late-binding
+#undef snd_ctl_card_info_alloca
+#define snd_ctl_card_info_alloca(ptr) \
+ do { \
+ *ptr = (snd_ctl_card_info_t*)__builtin_alloca( \
+ LATE(snd_ctl_card_info_sizeof)()); \
+ memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); \
+ } while (0)
+
+#undef snd_pcm_info_alloca
+#define snd_pcm_info_alloca(pInfo) \
+ do { \
+ *pInfo = (snd_pcm_info_t*)__builtin_alloca(LATE(snd_pcm_info_sizeof)()); \
+ memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); \
+ } while (0)
+
+// snd_lib_error_handler_t
+void WebrtcAlsaErrorHandler(const char* file,
+ int line,
+ const char* function,
+ int err,
+ const char* fmt,
+ ...) {}
+
+namespace webrtc {
+static const unsigned int ALSA_PLAYOUT_FREQ = 48000;
+static const unsigned int ALSA_PLAYOUT_CH = 2;
+static const unsigned int ALSA_PLAYOUT_LATENCY = 40 * 1000; // in us
+static const unsigned int ALSA_CAPTURE_FREQ = 48000;
+static const unsigned int ALSA_CAPTURE_CH = 2;
+static const unsigned int ALSA_CAPTURE_LATENCY = 40 * 1000; // in us
+static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5; // in ms
+
+#define FUNC_GET_NUM_OF_DEVICE 0
+#define FUNC_GET_DEVICE_NAME 1
+#define FUNC_GET_DEVICE_NAME_FOR_AN_ENUM 2
+
+AudioDeviceLinuxALSA::AudioDeviceLinuxALSA()
+ : _ptrAudioBuffer(NULL),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0),
+ _inputDeviceIsSpecified(false),
+ _outputDeviceIsSpecified(false),
+ _handleRecord(NULL),
+ _handlePlayout(NULL),
+ _recordingBuffersizeInFrame(0),
+ _recordingPeriodSizeInFrame(0),
+ _playoutBufferSizeInFrame(0),
+ _playoutPeriodSizeInFrame(0),
+ _recordingBufferSizeIn10MS(0),
+ _playoutBufferSizeIn10MS(0),
+ _recordingFramesIn10MS(0),
+ _playoutFramesIn10MS(0),
+ _recordingFreq(ALSA_CAPTURE_FREQ),
+ _playoutFreq(ALSA_PLAYOUT_FREQ),
+ _recChannels(ALSA_CAPTURE_CH),
+ _playChannels(ALSA_PLAYOUT_CH),
+ _recordingBuffer(NULL),
+ _playoutBuffer(NULL),
+ _recordingFramesLeft(0),
+ _playoutFramesLeft(0),
+ _initialized(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _recordingDelay(0),
+ _playoutDelay(0) {
+ memset(_oldKeyState, 0, sizeof(_oldKeyState));
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+}
+
+// ----------------------------------------------------------------------------
+// AudioDeviceLinuxALSA - dtor
+// ----------------------------------------------------------------------------
+
+AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+
+ Terminate();
+
+ // Clean up the recording buffer and playout buffer.
+ if (_recordingBuffer) {
+ delete[] _recordingBuffer;
+ _recordingBuffer = NULL;
+ }
+ if (_playoutBuffer) {
+ delete[] _playoutBuffer;
+ _playoutBuffer = NULL;
+ }
+}
+
+void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ MutexLock lock(&mutex_);
+
+ _ptrAudioBuffer = audioBuffer;
+
+ // Inform the AudioBuffer about default settings for this implementation.
+ // Set all values to zero here since the actual settings will be done by
+ // InitPlayout and InitRecording later.
+ _ptrAudioBuffer->SetRecordingSampleRate(0);
+ _ptrAudioBuffer->SetPlayoutSampleRate(0);
+ _ptrAudioBuffer->SetRecordingChannels(0);
+ _ptrAudioBuffer->SetPlayoutChannels(0);
+}
+
+int32_t AudioDeviceLinuxALSA::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
+ return 0;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceLinuxALSA::Init() {
+ MutexLock lock(&mutex_);
+
+ // Load libasound
+ if (!GetAlsaSymbolTable()->Load()) {
+ // Alsa is not installed on this system
+ RTC_LOG(LS_ERROR) << "failed to load symbol table";
+ return InitStatus::OTHER_ERROR;
+ }
+
+ if (_initialized) {
+ return InitStatus::OK;
+ }
+#if defined(WEBRTC_USE_X11)
+ // Get X display handle for typing detection
+ _XDisplay = XOpenDisplay(NULL);
+ if (!_XDisplay) {
+ RTC_LOG(LS_WARNING)
+ << "failed to open X display, typing detection will not work";
+ }
+#endif
+
+ _initialized = true;
+
+ return InitStatus::OK;
+}
+
+int32_t AudioDeviceLinuxALSA::Terminate() {
+ if (!_initialized) {
+ return 0;
+ }
+
+ MutexLock lock(&mutex_);
+
+ _mixerManager.Close();
+
+ // RECORDING
+ mutex_.Unlock();
+ _ptrThreadRec.Finalize();
+
+ // PLAYOUT
+ _ptrThreadPlay.Finalize();
+ mutex_.Lock();
+
+#if defined(WEBRTC_USE_X11)
+ if (_XDisplay) {
+ XCloseDisplay(_XDisplay);
+ _XDisplay = NULL;
+ }
+#endif
+ _initialized = false;
+ _outputDeviceIsSpecified = false;
+ _inputDeviceIsSpecified = false;
+
+ return 0;
+}
+
+bool AudioDeviceLinuxALSA::Initialized() const {
+ return (_initialized);
+}
+
+int32_t AudioDeviceLinuxALSA::InitSpeaker() {
+ MutexLock lock(&mutex_);
+ return InitSpeakerLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitSpeakerLocked() {
+ if (_playing) {
+ return -1;
+ }
+
+ char devName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+ return _mixerManager.OpenSpeaker(devName);
+}
+
+int32_t AudioDeviceLinuxALSA::InitMicrophone() {
+ MutexLock lock(&mutex_);
+ return InitMicrophoneLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitMicrophoneLocked() {
+ if (_recording) {
+ return -1;
+ }
+
+ char devName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
+ return _mixerManager.OpenMicrophone(devName);
+}
+
+bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const {
+ return (_mixerManager.SpeakerIsInitialized());
+}
+
+bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const {
+ return (_mixerManager.MicrophoneIsInitialized());
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitSpeaker was successful, we know that a volume control
+ // exists
+ available = true;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+ return (_mixerManager.SetSpeakerVolume(volume));
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.SpeakerVolume(level) == -1) {
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected speaker has a mute control
+ _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+
+ available = isAvailable;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable) {
+ return (_mixerManager.SetSpeakerMute(enable));
+}
+
+int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const {
+ bool muted(0);
+
+ if (_mixerManager.SpeakerMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected input device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone has a mute control
+ //
+ _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable) {
+ return (_mixerManager.SetMicrophoneMute(enable));
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const {
+ bool muted(0);
+
+ if (_mixerManager.MicrophoneMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ // If we already have initialized in stereo it's obviously available
+ if (_recIsInitialized && (2 == _recChannels)) {
+ available = true;
+ return 0;
+ }
+
+ // Save rec states and the number of rec channels
+ bool recIsInitialized = _recIsInitialized;
+ bool recording = _recording;
+ int recChannels = _recChannels;
+
+ available = false;
+
+ // Stop/uninitialize recording if initialized (and possibly started)
+ if (_recIsInitialized) {
+ StopRecordingLocked();
+ }
+
+ // Try init in stereo;
+ _recChannels = 2;
+ if (InitRecordingLocked() == 0) {
+ available = true;
+ }
+
+ // Stop/uninitialize recording
+ StopRecordingLocked();
+
+ // Recover previous states
+ _recChannels = recChannels;
+ if (recIsInitialized) {
+ InitRecordingLocked();
+ }
+ if (recording) {
+ StartRecording();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable) {
+ if (enable)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const {
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ // If we already have initialized in stereo it's obviously available
+ if (_playIsInitialized && (2 == _playChannels)) {
+ available = true;
+ return 0;
+ }
+
+ // Save rec states and the number of rec channels
+ bool playIsInitialized = _playIsInitialized;
+ bool playing = _playing;
+ int playChannels = _playChannels;
+
+ available = false;
+
+ // Stop/uninitialize recording if initialized (and possibly started)
+ if (_playIsInitialized) {
+ StopPlayoutLocked();
+ }
+
+ // Try init in stereo;
+ _playChannels = 2;
+ if (InitPlayoutLocked() == 0) {
+ available = true;
+ }
+
+ // Stop/uninitialize recording
+ StopPlayoutLocked();
+
+ // Recover previous states
+ _playChannels = playChannels;
+ if (playIsInitialized) {
+ InitPlayoutLocked();
+ }
+ if (playing) {
+ StartPlayout();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable) {
+ if (enable)
+ _playChannels = 2;
+ else
+ _playChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const {
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitMicrophone was successful, we know that a volume control
+ // exists
+ available = true;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+ return (_mixerManager.SetMicrophoneVolume(volume));
+}
+
+int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.MicrophoneVolume(level) == -1) {
+ RTC_LOG(LS_WARNING) << "failed to retrive current microphone level";
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int16_t AudioDeviceLinuxALSA::PlayoutDevices() {
+ return (int16_t)GetDevicesInfo(0, true);
+}
+
+int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index) {
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ int32_t nDevices = GetDevicesInfo(0, true);
+ RTC_LOG(LS_VERBOSE) << "number of available audio output devices is "
+ << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _outputDeviceIndex = index;
+ _outputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(PlayoutDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
+}
+
+int32_t AudioDeviceLinuxALSA::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(RecordingDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+}
+
+int16_t AudioDeviceLinuxALSA::RecordingDevices() {
+ return (int16_t)GetDevicesInfo(0, false);
+}
+
+int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index) {
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ int32_t nDevices = GetDevicesInfo(0, false);
+ RTC_LOG(LS_VERBOSE) << "number of availiable audio input devices is "
+ << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _inputDeviceIndex = index;
+ _inputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetRecordingDevice II (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceLinuxALSA::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the playout side with mono
+ // Assumes that user set num channels after calling this function
+ _playChannels = 1;
+ int32_t res = InitPlayout();
+
+ // Cancel effect of initialization
+ StopPlayout();
+
+ if (res != -1) {
+ available = true;
+ } else {
+ // It may be possible to play out in stereo
+ res = StereoPlayoutIsAvailable(available);
+ if (available) {
+ // Then set channels to 2 so InitPlayout doesn't fail
+ _playChannels = 2;
+ }
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the recording side with mono
+ // Assumes that user set num channels after calling this function
+ _recChannels = 1;
+ int32_t res = InitRecording();
+
+ // Cancel effect of initialization
+ StopRecording();
+
+ if (res != -1) {
+ available = true;
+ } else {
+ // It may be possible to record in stereo
+ res = StereoRecordingIsAvailable(available);
+ if (available) {
+ // Then set channels to 2 so InitPlayout doesn't fail
+ _recChannels = 2;
+ }
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxALSA::InitPlayout() {
+ MutexLock lock(&mutex_);
+ return InitPlayoutLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitPlayoutLocked() {
+ int errVal = 0;
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_playIsInitialized) {
+ return 0;
+ }
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeakerLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
+ }
+
+ // Start by closing any existing wave-output devices
+ //
+ if (_handlePlayout != NULL) {
+ LATE(snd_pcm_close)(_handlePlayout);
+ _handlePlayout = NULL;
+ _playIsInitialized = false;
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error closing current playout sound device, error: "
+ << LATE(snd_strerror)(errVal);
+ }
+ }
+
+ // Open PCM device for playout
+ char deviceName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
+ kAdmMaxDeviceNameSize);
+
+ RTC_LOG(LS_VERBOSE) << "InitPlayout open (" << deviceName << ")";
+
+ errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+ SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
+
+ if (errVal == -EBUSY) // Device busy - try some more!
+ {
+ for (int i = 0; i < 5; i++) {
+ SleepMs(1000);
+ errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName,
+ SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK);
+ if (errVal == 0) {
+ break;
+ }
+ }
+ }
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "unable to open playback device: "
+ << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+ _handlePlayout = NULL;
+ return -1;
+ }
+
+ _playoutFramesIn10MS = _playoutFreq / 100;
+ if ((errVal = LATE(snd_pcm_set_params)(
+ _handlePlayout,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+ SND_PCM_FORMAT_S16_BE,
+#else
+ SND_PCM_FORMAT_S16_LE, // format
+#endif
+ SND_PCM_ACCESS_RW_INTERLEAVED, // access
+ _playChannels, // channels
+ _playoutFreq, // rate
+ 1, // soft_resample
+ ALSA_PLAYOUT_LATENCY // 40*1000 //latency required overall latency
+ // in us
+ )) < 0) { /* 0.5sec */
+ _playoutFramesIn10MS = 0;
+ RTC_LOG(LS_ERROR) << "unable to set playback device: "
+ << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+ ErrorRecovery(errVal, _handlePlayout);
+ errVal = LATE(snd_pcm_close)(_handlePlayout);
+ _handlePlayout = NULL;
+ return -1;
+ }
+
+ errVal = LATE(snd_pcm_get_params)(_handlePlayout, &_playoutBufferSizeInFrame,
+ &_playoutPeriodSizeInFrame);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_pcm_get_params: " << LATE(snd_strerror)(errVal)
+ << " (" << errVal << ")";
+ _playoutBufferSizeInFrame = 0;
+ _playoutPeriodSizeInFrame = 0;
+ } else {
+ RTC_LOG(LS_VERBOSE) << "playout snd_pcm_get_params buffer_size:"
+ << _playoutBufferSizeInFrame
+ << " period_size :" << _playoutPeriodSizeInFrame;
+ }
+
+ if (_ptrAudioBuffer) {
+ // Update webrtc audio buffer with the selected parameters
+ _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
+ _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
+ }
+
+ // Set play buffer size
+ _playoutBufferSizeIn10MS =
+ LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesIn10MS);
+
+ // Init varaibles used for play
+
+ if (_handlePlayout != NULL) {
+ _playIsInitialized = true;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int32_t AudioDeviceLinuxALSA::InitRecording() {
+ MutexLock lock(&mutex_);
+ return InitRecordingLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::InitRecordingLocked() {
+ int errVal = 0;
+
+ if (_recording) {
+ return -1;
+ }
+
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_recIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophoneLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
+ }
+
+ // Start by closing any existing pcm-input devices
+ //
+ if (_handleRecord != NULL) {
+ int errVal = LATE(snd_pcm_close)(_handleRecord);
+ _handleRecord = NULL;
+ _recIsInitialized = false;
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR)
+ << "Error closing current recording sound device, error: "
+ << LATE(snd_strerror)(errVal);
+ }
+ }
+
+ // Open PCM device for recording
+ // The corresponding settings for playout are made after the record settings
+ char deviceName[kAdmMaxDeviceNameSize] = {0};
+ GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
+ kAdmMaxDeviceNameSize);
+
+ RTC_LOG(LS_VERBOSE) << "InitRecording open (" << deviceName << ")";
+ errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+ SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+
+ // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
+ if (errVal == -EBUSY) // Device busy - try some more!
+ {
+ for (int i = 0; i < 5; i++) {
+ SleepMs(1000);
+ errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName,
+ SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
+ if (errVal == 0) {
+ break;
+ }
+ }
+ }
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "unable to open record device: "
+ << LATE(snd_strerror)(errVal);
+ _handleRecord = NULL;
+ return -1;
+ }
+
+ _recordingFramesIn10MS = _recordingFreq / 100;
+ if ((errVal =
+ LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+ SND_PCM_FORMAT_S16_BE, // format
+#else
+ SND_PCM_FORMAT_S16_LE, // format
+#endif
+ SND_PCM_ACCESS_RW_INTERLEAVED, // access
+ _recChannels, // channels
+ _recordingFreq, // rate
+ 1, // soft_resample
+ ALSA_CAPTURE_LATENCY // latency in us
+ )) < 0) {
+ // Fall back to another mode then.
+ if (_recChannels == 1)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
+
+ if ((errVal =
+ LATE(snd_pcm_set_params)(_handleRecord,
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
+ SND_PCM_FORMAT_S16_BE, // format
+#else
+ SND_PCM_FORMAT_S16_LE, // format
+#endif
+ SND_PCM_ACCESS_RW_INTERLEAVED, // access
+ _recChannels, // channels
+ _recordingFreq, // rate
+ 1, // soft_resample
+ ALSA_CAPTURE_LATENCY // latency in us
+ )) < 0) {
+ _recordingFramesIn10MS = 0;
+ RTC_LOG(LS_ERROR) << "unable to set record settings: "
+ << LATE(snd_strerror)(errVal) << " (" << errVal << ")";
+ ErrorRecovery(errVal, _handleRecord);
+ errVal = LATE(snd_pcm_close)(_handleRecord);
+ _handleRecord = NULL;
+ return -1;
+ }
+ }
+
+ errVal = LATE(snd_pcm_get_params)(_handleRecord, &_recordingBuffersizeInFrame,
+ &_recordingPeriodSizeInFrame);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_pcm_get_params " << LATE(snd_strerror)(errVal)
+ << " (" << errVal << ")";
+ _recordingBuffersizeInFrame = 0;
+ _recordingPeriodSizeInFrame = 0;
+ } else {
+ RTC_LOG(LS_VERBOSE) << "capture snd_pcm_get_params, buffer_size:"
+ << _recordingBuffersizeInFrame
+ << ", period_size:" << _recordingPeriodSizeInFrame;
+ }
+
+ if (_ptrAudioBuffer) {
+ // Update webrtc audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
+ _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+ }
+
+ // Set rec buffer size and create buffer
+ _recordingBufferSizeIn10MS =
+ LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesIn10MS);
+
+ if (_handleRecord != NULL) {
+ // Mark recording side as initialized
+ _recIsInitialized = true;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int32_t AudioDeviceLinuxALSA::StartRecording() {
+ if (!_recIsInitialized) {
+ return -1;
+ }
+
+ if (_recording) {
+ return 0;
+ }
+
+ _recording = true;
+
+ int errVal = 0;
+ _recordingFramesLeft = _recordingFramesIn10MS;
+
+ // Make sure we only create the buffer once.
+ if (!_recordingBuffer)
+ _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
+ if (!_recordingBuffer) {
+ RTC_LOG(LS_ERROR) << "failed to alloc recording buffer";
+ _recording = false;
+ return -1;
+ }
+ // RECORDING
+ _ptrThreadRec = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (RecThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_capture_thread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ errVal = LATE(snd_pcm_prepare)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_prepare failed ("
+ << LATE(snd_strerror)(errVal) << ")\n";
+ // just log error
+ // if snd_pcm_open fails will return -1
+ }
+
+ errVal = LATE(snd_pcm_start)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_start err: "
+ << LATE(snd_strerror)(errVal);
+ errVal = LATE(snd_pcm_start)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_start 2nd try err: "
+ << LATE(snd_strerror)(errVal);
+ StopRecording();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StopRecording() {
+ MutexLock lock(&mutex_);
+ return StopRecordingLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::StopRecordingLocked() {
+ if (!_recIsInitialized) {
+ return 0;
+ }
+
+ if (_handleRecord == NULL) {
+ return -1;
+ }
+
+ // Make sure we don't start recording (it's asynchronous).
+ _recIsInitialized = false;
+ _recording = false;
+
+ _ptrThreadRec.Finalize();
+
+ _recordingFramesLeft = 0;
+ if (_recordingBuffer) {
+ delete[] _recordingBuffer;
+ _recordingBuffer = NULL;
+ }
+
+ // Stop and close pcm recording device.
+ int errVal = LATE(snd_pcm_drop)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error stop recording: " << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ errVal = LATE(snd_pcm_close)(_handleRecord);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error closing record sound device, error: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ // Check if we have muted and unmute if so.
+ bool muteEnabled = false;
+ MicrophoneMute(muteEnabled);
+ if (muteEnabled) {
+ SetMicrophoneMute(false);
+ }
+
+ // set the pcm input handle to NULL
+ _handleRecord = NULL;
+ return 0;
+}
+
+bool AudioDeviceLinuxALSA::RecordingIsInitialized() const {
+ return (_recIsInitialized);
+}
+
+bool AudioDeviceLinuxALSA::Recording() const {
+ return (_recording);
+}
+
+bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const {
+ return (_playIsInitialized);
+}
+
+int32_t AudioDeviceLinuxALSA::StartPlayout() {
+ if (!_playIsInitialized) {
+ return -1;
+ }
+
+ if (_playing) {
+ return 0;
+ }
+
+ _playing = true;
+
+ _playoutFramesLeft = 0;
+ if (!_playoutBuffer)
+ _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
+ if (!_playoutBuffer) {
+ RTC_LOG(LS_ERROR) << "failed to alloc playout buf";
+ _playing = false;
+ return -1;
+ }
+
+ // PLAYOUT
+ _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (PlayThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_play_thread",
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+
+ int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "playout snd_pcm_prepare failed ("
+ << LATE(snd_strerror)(errVal) << ")\n";
+ // just log error
+ // if snd_pcm_open fails will return -1
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::StopPlayout() {
+ MutexLock lock(&mutex_);
+ return StopPlayoutLocked();
+}
+
+int32_t AudioDeviceLinuxALSA::StopPlayoutLocked() {
+ if (!_playIsInitialized) {
+ return 0;
+ }
+
+ if (_handlePlayout == NULL) {
+ return -1;
+ }
+
+ _playing = false;
+
+ // stop playout thread first
+ _ptrThreadPlay.Finalize();
+
+ _playoutFramesLeft = 0;
+ delete[] _playoutBuffer;
+ _playoutBuffer = NULL;
+
+ // stop and close pcm playout device
+ int errVal = LATE(snd_pcm_drop)(_handlePlayout);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error stop playing: " << LATE(snd_strerror)(errVal);
+ }
+
+ errVal = LATE(snd_pcm_close)(_handlePlayout);
+ if (errVal < 0)
+ RTC_LOG(LS_ERROR) << "Error closing playout sound device, error: "
+ << LATE(snd_strerror)(errVal);
+
+ // set the pcm input handle to NULL
+ _playIsInitialized = false;
+ _handlePlayout = NULL;
+ RTC_LOG(LS_VERBOSE) << "handle_playout is now set to NULL";
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const {
+ delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
+ return 0;
+}
+
+bool AudioDeviceLinuxALSA::Playing() const {
+ return (_playing);
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+int32_t AudioDeviceLinuxALSA::GetDevicesInfo(const int32_t function,
+ const bool playback,
+ const int32_t enumDeviceNo,
+ char* enumDeviceName,
+ const int32_t ednLen) const {
+ // Device enumeration based on libjingle implementation
+ // by Tristan Schmelcher at Google Inc.
+
+ const char* type = playback ? "Output" : "Input";
+ // dmix and dsnoop are only for playback and capture, respectively, but ALSA
+ // stupidly includes them in both lists.
+ const char* ignorePrefix = playback ? "dsnoop:" : "dmix:";
+ // (ALSA lists many more "devices" of questionable interest, but we show them
+ // just in case the weird devices may actually be desirable for some
+ // users/systems.)
+
+ int err;
+ int enumCount(0);
+ bool keepSearching(true);
+
+ // From Chromium issue 95797
+ // Loop through the sound cards to get Alsa device hints.
+ // Don't use snd_device_name_hint(-1,..) since there is a access violation
+ // inside this ALSA API with libasound.so.2.0.0.
+ int card = -1;
+ while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
+ void** hints;
+ err = LATE(snd_device_name_hint)(card, "pcm", &hints);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name hint error: "
+ << LATE(snd_strerror)(err);
+ return -1;
+ }
+
+ enumCount++; // default is 0
+ if ((function == FUNC_GET_DEVICE_NAME ||
+ function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) &&
+ enumDeviceNo == 0) {
+ strcpy(enumDeviceName, "default");
+
+ err = LATE(snd_device_name_free_hint)(hints);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+ << LATE(snd_strerror)(err);
+ }
+
+ return 0;
+ }
+
+ for (void** list = hints; *list != NULL; ++list) {
+ char* actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
+ if (actualType) { // NULL means it's both.
+ bool wrongType = (strcmp(actualType, type) != 0);
+ free(actualType);
+ if (wrongType) {
+ // Wrong type of device (i.e., input vs. output).
+ continue;
+ }
+ }
+
+ char* name = LATE(snd_device_name_get_hint)(*list, "NAME");
+ if (!name) {
+ RTC_LOG(LS_ERROR) << "Device has no name";
+ // Skip it.
+ continue;
+ }
+
+ // Now check if we actually want to show this device.
+ if (strcmp(name, "default") != 0 && strcmp(name, "null") != 0 &&
+ strcmp(name, "pulse") != 0 &&
+ strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0) {
+ // Yes, we do.
+ char* desc = LATE(snd_device_name_get_hint)(*list, "DESC");
+ if (!desc) {
+ // Virtual devices don't necessarily have descriptions.
+ // Use their names instead.
+ desc = name;
+ }
+
+ if (FUNC_GET_NUM_OF_DEVICE == function) {
+ RTC_LOG(LS_VERBOSE) << "Enum device " << enumCount << " - " << name;
+ }
+ if ((FUNC_GET_DEVICE_NAME == function) && (enumDeviceNo == enumCount)) {
+ // We have found the enum device, copy the name to buffer.
+ strncpy(enumDeviceName, desc, ednLen);
+ enumDeviceName[ednLen - 1] = '\0';
+ keepSearching = false;
+ // Replace '\n' with '-'.
+ char* pret = strchr(enumDeviceName, '\n' /*0xa*/); // LF
+ if (pret)
+ *pret = '-';
+ }
+ if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
+ (enumDeviceNo == enumCount)) {
+ // We have found the enum device, copy the name to buffer.
+ strncpy(enumDeviceName, name, ednLen);
+ enumDeviceName[ednLen - 1] = '\0';
+ keepSearching = false;
+ }
+
+ if (keepSearching)
+ ++enumCount;
+
+ if (desc != name)
+ free(desc);
+ }
+
+ free(name);
+
+ if (!keepSearching)
+ break;
+ }
+
+ err = LATE(snd_device_name_free_hint)(hints);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: "
+ << LATE(snd_strerror)(err);
+ // Continue and return true anyway, since we did get the whole list.
+ }
+ }
+
+ if (FUNC_GET_NUM_OF_DEVICE == function) {
+ if (enumCount == 1) // only default?
+ enumCount = 0;
+ return enumCount; // Normal return point for function 0
+ }
+
+ if (keepSearching) {
+ // If we get here for function 1 and 2, we didn't find the specified
+ // enum device.
+ RTC_LOG(LS_ERROR)
+ << "GetDevicesInfo - Could not find device name or numbers";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const {
+ if (_handleRecord == NULL) {
+ RTC_LOG(LS_ERROR) << "input state has been modified during unlocked period";
+ return -1;
+ }
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const {
+ if (_handlePlayout == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "output state has been modified during unlocked period";
+ return -1;
+ }
+ return 0;
+}
+
+int32_t AudioDeviceLinuxALSA::ErrorRecovery(int32_t error,
+ snd_pcm_t* deviceHandle) {
+ int st = LATE(snd_pcm_state)(deviceHandle);
+ RTC_LOG(LS_VERBOSE) << "Trying to recover from "
+ << ((LATE(snd_pcm_stream)(deviceHandle) ==
+ SND_PCM_STREAM_CAPTURE)
+ ? "capture"
+ : "playout")
+ << " error: " << LATE(snd_strerror)(error) << " ("
+ << error << ") (state " << st << ")";
+
+ // It is recommended to use snd_pcm_recover for all errors. If that function
+ // cannot handle the error, the input error code will be returned, otherwise
+ // 0 is returned. From snd_pcm_recover API doc: "This functions handles
+ // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
+ // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
+ // trying to prepare given stream for next I/O."
+
+ /** Open */
+ // SND_PCM_STATE_OPEN = 0,
+ /** Setup installed */
+ // SND_PCM_STATE_SETUP,
+ /** Ready to start */
+ // SND_PCM_STATE_PREPARED,
+ /** Running */
+ // SND_PCM_STATE_RUNNING,
+ /** Stopped: underrun (playback) or overrun (capture) detected */
+ // SND_PCM_STATE_XRUN,= 4
+ /** Draining: running (playback) or stopped (capture) */
+ // SND_PCM_STATE_DRAINING,
+ /** Paused */
+ // SND_PCM_STATE_PAUSED,
+ /** Hardware is suspended */
+ // SND_PCM_STATE_SUSPENDED,
+ // ** Hardware is disconnected */
+ // SND_PCM_STATE_DISCONNECTED,
+ // SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
+
+ // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
+ // in Sthlm lab.
+
+ int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
+ if (0 == res) {
+ RTC_LOG(LS_VERBOSE) << "Recovery - snd_pcm_recover OK";
+
+ if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
+ _recording &&
+ LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) {
+ // For capture streams we also have to repeat the explicit start()
+ // to get data flowing again.
+ int err = LATE(snd_pcm_start)(deviceHandle);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " << err;
+ return -1;
+ }
+ }
+
+ if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
+ _playing &&
+ LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK) {
+ // For capture streams we also have to repeat the explicit start() to get
+ // data flowing again.
+ int err = LATE(snd_pcm_start)(deviceHandle);
+ if (err != 0) {
+ RTC_LOG(LS_ERROR) << "Recovery - snd_pcm_start error: "
+ << LATE(snd_strerror)(err);
+ return -1;
+ }
+ }
+
+ return -EPIPE == error ? 1 : 0;
+ } else {
+ RTC_LOG(LS_ERROR) << "Unrecoverable alsa stream error: " << res;
+ }
+
+ return res;
+}
+
+// ============================================================================
+// Thread Methods
+// ============================================================================
+
+bool AudioDeviceLinuxALSA::PlayThreadProcess() {
+ if (!_playing)
+ return false;
+
+ int err;
+ snd_pcm_sframes_t frames;
+ snd_pcm_sframes_t avail_frames;
+
+ Lock();
+ // return a positive number of frames ready otherwise a negative error code
+ avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
+ if (avail_frames < 0) {
+ RTC_LOG(LS_ERROR) << "playout snd_pcm_avail_update error: "
+ << LATE(snd_strerror)(avail_frames);
+ ErrorRecovery(avail_frames, _handlePlayout);
+ UnLock();
+ return true;
+ } else if (avail_frames == 0) {
+ UnLock();
+
+ // maximum tixe in milliseconds to wait, a negative value means infinity
+ err = LATE(snd_pcm_wait)(_handlePlayout, 2);
+ if (err == 0) { // timeout occured
+ RTC_LOG(LS_VERBOSE) << "playout snd_pcm_wait timeout";
+ }
+
+ return true;
+ }
+
+ if (_playoutFramesLeft <= 0) {
+ UnLock();
+ _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
+ Lock();
+
+ _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
+ RTC_DCHECK_EQ(_playoutFramesLeft, _playoutFramesIn10MS);
+ }
+
+ if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
+ avail_frames = _playoutFramesLeft;
+
+ int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesLeft);
+ frames = LATE(snd_pcm_writei)(
+ _handlePlayout, &_playoutBuffer[_playoutBufferSizeIn10MS - size],
+ avail_frames);
+
+ if (frames < 0) {
+ RTC_LOG(LS_VERBOSE) << "playout snd_pcm_writei error: "
+ << LATE(snd_strerror)(frames);
+ _playoutFramesLeft = 0;
+ ErrorRecovery(frames, _handlePlayout);
+ UnLock();
+ return true;
+ } else {
+ RTC_DCHECK_EQ(frames, avail_frames);
+ _playoutFramesLeft -= frames;
+ }
+
+ UnLock();
+ return true;
+}
+
+bool AudioDeviceLinuxALSA::RecThreadProcess() {
+ if (!_recording)
+ return false;
+
+ int err;
+ snd_pcm_sframes_t frames;
+ snd_pcm_sframes_t avail_frames;
+ int8_t buffer[_recordingBufferSizeIn10MS];
+
+ Lock();
+
+ // return a positive number of frames ready otherwise a negative error code
+ avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
+ if (avail_frames < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_avail_update error: "
+ << LATE(snd_strerror)(avail_frames);
+ ErrorRecovery(avail_frames, _handleRecord);
+ UnLock();
+ return true;
+ } else if (avail_frames == 0) { // no frame is available now
+ UnLock();
+
+ // maximum time in milliseconds to wait, a negative value means infinity
+ err = LATE(snd_pcm_wait)(_handleRecord, ALSA_CAPTURE_WAIT_TIMEOUT);
+ if (err == 0) // timeout occured
+ RTC_LOG(LS_VERBOSE) << "capture snd_pcm_wait timeout";
+
+ return true;
+ }
+
+ if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
+ avail_frames = _recordingFramesLeft;
+
+ frames = LATE(snd_pcm_readi)(_handleRecord, buffer,
+ avail_frames); // frames to be written
+ if (frames < 0) {
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_readi error: "
+ << LATE(snd_strerror)(frames);
+ ErrorRecovery(frames, _handleRecord);
+ UnLock();
+ return true;
+ } else if (frames > 0) {
+ RTC_DCHECK_EQ(frames, avail_frames);
+
+ int left_size =
+ LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesLeft);
+ int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
+
+ memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size], buffer,
+ size);
+ _recordingFramesLeft -= frames;
+
+ if (!_recordingFramesLeft) { // buf is full
+ _recordingFramesLeft = _recordingFramesIn10MS;
+
+ // store the recorded buffer (no action will be taken if the
+ // #recorded samples is not a full buffer)
+ _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
+ _recordingFramesIn10MS);
+
+ // calculate delay
+ _playoutDelay = 0;
+ _recordingDelay = 0;
+ if (_handlePlayout) {
+ err = LATE(snd_pcm_delay)(_handlePlayout,
+ &_playoutDelay); // returned delay in frames
+ if (err < 0) {
+ // TODO(xians): Shall we call ErrorRecovery() here?
+ _playoutDelay = 0;
+ RTC_LOG(LS_ERROR)
+ << "playout snd_pcm_delay: " << LATE(snd_strerror)(err);
+ }
+ }
+
+ err = LATE(snd_pcm_delay)(_handleRecord,
+ &_recordingDelay); // returned delay in frames
+ if (err < 0) {
+ // TODO(xians): Shall we call ErrorRecovery() here?
+ _recordingDelay = 0;
+ RTC_LOG(LS_ERROR) << "capture snd_pcm_delay: "
+ << LATE(snd_strerror)(err);
+ }
+
+ // TODO(xians): Shall we add 10ms buffer delay to the record delay?
+ _ptrAudioBuffer->SetVQEData(_playoutDelay * 1000 / _playoutFreq,
+ _recordingDelay * 1000 / _recordingFreq);
+
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+ // Deliver recorded samples at specified sample rate, mic level etc.
+ // to the observer using callback.
+ UnLock();
+ _ptrAudioBuffer->DeliverRecordedData();
+ Lock();
+ }
+ }
+
+ UnLock();
+ return true;
+}
+
+bool AudioDeviceLinuxALSA::KeyPressed() const {
+#if defined(WEBRTC_USE_X11)
+ char szKey[32];
+ unsigned int i = 0;
+ char state = 0;
+
+ if (!_XDisplay)
+ return false;
+
+ // Check key map status
+ XQueryKeymap(_XDisplay, szKey);
+
+ // A bit change in keymap means a key is pressed
+ for (i = 0; i < sizeof(szKey); i++)
+ state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
+
+ // Save old state
+ memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
+ return (state != 0);
+#else
+ return false;
+#endif
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h
new file mode 100644
index 0000000000..23e21d3ce9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+
+#if defined(WEBRTC_USE_X11)
+#include <X11/Xlib.h>
+#endif
+#include <alsa/asoundlib.h>
+#include <sys/ioctl.h>
+#include <sys/soundcard.h>
+
+typedef webrtc::adm_linux_alsa::AlsaSymbolTable WebRTCAlsaSymbolTable;
+WebRTCAlsaSymbolTable* GetAlsaSymbolTable();
+
+namespace webrtc {
+
+class AudioDeviceLinuxALSA : public AudioDeviceGeneric {
+ public:
+ AudioDeviceLinuxALSA();
+ virtual ~AudioDeviceLinuxALSA();
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override;
+
+ // Main initializaton and termination
+ InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t& delayMS) const override;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ private:
+ int32_t InitRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t StopRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t StopPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t GetDevicesInfo(int32_t function,
+ bool playback,
+ int32_t enumDeviceNo = 0,
+ char* enumDeviceName = NULL,
+ int32_t ednLen = 0) const;
+ int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
+
+ bool KeyPressed() const;
+
+ void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); }
+ void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); }
+
+ inline int32_t InputSanityCheckAfterUnlockedPeriod() const;
+ inline int32_t OutputSanityCheckAfterUnlockedPeriod() const;
+
+ static void RecThreadFunc(void*);
+ static void PlayThreadFunc(void*);
+ bool RecThreadProcess();
+ bool PlayThreadProcess();
+
+ AudioDeviceBuffer* _ptrAudioBuffer;
+
+ Mutex mutex_;
+
+ rtc::PlatformThread _ptrThreadRec;
+ rtc::PlatformThread _ptrThreadPlay;
+
+ AudioMixerManagerLinuxALSA _mixerManager;
+
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+ bool _inputDeviceIsSpecified;
+ bool _outputDeviceIsSpecified;
+
+ snd_pcm_t* _handleRecord;
+ snd_pcm_t* _handlePlayout;
+
+ snd_pcm_uframes_t _recordingBuffersizeInFrame;
+ snd_pcm_uframes_t _recordingPeriodSizeInFrame;
+ snd_pcm_uframes_t _playoutBufferSizeInFrame;
+ snd_pcm_uframes_t _playoutPeriodSizeInFrame;
+
+ ssize_t _recordingBufferSizeIn10MS;
+ ssize_t _playoutBufferSizeIn10MS;
+ uint32_t _recordingFramesIn10MS;
+ uint32_t _playoutFramesIn10MS;
+
+ uint32_t _recordingFreq;
+ uint32_t _playoutFreq;
+ uint8_t _recChannels;
+ uint8_t _playChannels;
+
+ int8_t* _recordingBuffer; // in byte
+ int8_t* _playoutBuffer; // in byte
+ uint32_t _recordingFramesLeft;
+ uint32_t _playoutFramesLeft;
+
+ bool _initialized;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+
+ snd_pcm_sframes_t _recordingDelay;
+ snd_pcm_sframes_t _playoutDelay;
+
+ char _oldKeyState[32];
+#if defined(WEBRTC_USE_X11)
+ Display* _XDisplay;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_ALSA_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
new file mode 100644
index 0000000000..90cd58c497
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -0,0 +1,2286 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_device_pulse_linux.h"
+
+#include <string.h>
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+
+WebRTCPulseSymbolTable* GetPulseSymbolTable() {
+ static WebRTCPulseSymbolTable* pulse_symbol_table =
+ new WebRTCPulseSymbolTable();
+ return pulse_symbol_table;
+}
+
+// Accesses Pulse functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libpulse, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, \
+ GetPulseSymbolTable(), sym)
+
+namespace webrtc {
+
+AudioDeviceLinuxPulse::AudioDeviceLinuxPulse()
+ : _ptrAudioBuffer(NULL),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0),
+ _inputDeviceIsSpecified(false),
+ _outputDeviceIsSpecified(false),
+ sample_rate_hz_(0),
+ _recChannels(1),
+ _playChannels(1),
+ _initialized(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _startRec(false),
+ _startPlay(false),
+ update_speaker_volume_at_startup_(false),
+ quit_(false),
+ _sndCardPlayDelay(0),
+ _writeErrors(0),
+ _deviceIndex(-1),
+ _numPlayDevices(0),
+ _numRecDevices(0),
+ _playDeviceName(NULL),
+ _recDeviceName(NULL),
+ _playDisplayDeviceName(NULL),
+ _recDisplayDeviceName(NULL),
+ _playBuffer(NULL),
+ _playbackBufferSize(0),
+ _playbackBufferUnused(0),
+ _tempBufferSpace(0),
+ _recBuffer(NULL),
+ _recordBufferSize(0),
+ _recordBufferUsed(0),
+ _tempSampleData(NULL),
+ _tempSampleDataSize(0),
+ _configuredLatencyPlay(0),
+ _configuredLatencyRec(0),
+ _paDeviceIndex(-1),
+ _paStateChanged(false),
+ _paMainloop(NULL),
+ _paMainloopApi(NULL),
+ _paContext(NULL),
+ _recStream(NULL),
+ _playStream(NULL),
+ _recStreamFlags(0),
+ _playStreamFlags(0) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+
+ memset(_paServerVersion, 0, sizeof(_paServerVersion));
+ memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
+ memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
+ memset(_oldKeyState, 0, sizeof(_oldKeyState));
+}
+
+AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ Terminate();
+
+ if (_recBuffer) {
+ delete[] _recBuffer;
+ _recBuffer = NULL;
+ }
+ if (_playBuffer) {
+ delete[] _playBuffer;
+ _playBuffer = NULL;
+ }
+ if (_playDeviceName) {
+ delete[] _playDeviceName;
+ _playDeviceName = NULL;
+ }
+ if (_recDeviceName) {
+ delete[] _recDeviceName;
+ _recDeviceName = NULL;
+ }
+}
+
+void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ _ptrAudioBuffer = audioBuffer;
+
+ // Inform the AudioBuffer about default settings for this implementation.
+ // Set all values to zero here since the actual settings will be done by
+ // InitPlayout and InitRecording later.
+ _ptrAudioBuffer->SetRecordingSampleRate(0);
+ _ptrAudioBuffer->SetPlayoutSampleRate(0);
+ _ptrAudioBuffer->SetRecordingChannels(0);
+ _ptrAudioBuffer->SetPlayoutChannels(0);
+}
+
+// ----------------------------------------------------------------------------
+// ActiveAudioLayer
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kLinuxPulseAudio;
+ return 0;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_initialized) {
+ return InitStatus::OK;
+ }
+
+ // Initialize PulseAudio
+ if (InitPulseAudio() < 0) {
+ RTC_LOG(LS_ERROR) << "failed to initialize PulseAudio";
+ if (TerminatePulseAudio() < 0) {
+ RTC_LOG(LS_ERROR) << "failed to terminate PulseAudio";
+ }
+ return InitStatus::OTHER_ERROR;
+ }
+
+#if defined(WEBRTC_USE_X11)
+ // Get X display handle for typing detection
+ _XDisplay = XOpenDisplay(NULL);
+ if (!_XDisplay) {
+ RTC_LOG(LS_WARNING)
+ << "failed to open X display, typing detection will not work";
+ }
+#endif
+
+ // RECORDING
+ const auto attributes =
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime);
+ _ptrThreadRec = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (RecThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_rec_thread", attributes);
+
+ // PLAYOUT
+ _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable(
+ [this] {
+ while (PlayThreadProcess()) {
+ }
+ },
+ "webrtc_audio_module_play_thread", attributes);
+ _initialized = true;
+
+ return InitStatus::OK;
+}
+
+int32_t AudioDeviceLinuxPulse::Terminate() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!_initialized) {
+ return 0;
+ }
+ {
+ MutexLock lock(&mutex_);
+ quit_ = true;
+ }
+ _mixerManager.Close();
+
+ // RECORDING
+ _timeEventRec.Set();
+ _ptrThreadRec.Finalize();
+
+ // PLAYOUT
+ _timeEventPlay.Set();
+ _ptrThreadPlay.Finalize();
+
+ // Terminate PulseAudio
+ if (TerminatePulseAudio() < 0) {
+ RTC_LOG(LS_ERROR) << "failed to terminate PulseAudio";
+ return -1;
+ }
+
+#if defined(WEBRTC_USE_X11)
+ if (_XDisplay) {
+ XCloseDisplay(_XDisplay);
+ _XDisplay = NULL;
+ }
+#endif
+
+ _initialized = false;
+ _outputDeviceIsSpecified = false;
+ _inputDeviceIsSpecified = false;
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::Initialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_initialized);
+}
+
+int32_t AudioDeviceLinuxPulse::InitSpeaker() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
+
+ // check if default device
+ if (_outputDeviceIndex == 0) {
+ uint16_t deviceIndex = 0;
+ GetDefaultDeviceInfo(false, NULL, deviceIndex);
+ _paDeviceIndex = deviceIndex;
+ } else {
+ // get the PA device index from
+ // the callback
+ _deviceIndex = _outputDeviceIndex;
+
+ // get playout devices
+ PlayoutDevices();
+ }
+
+ // the callback has now set the _paDeviceIndex to
+ // the PulseAudio index of the device
+ if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) {
+ return -1;
+ }
+
+ // clear _deviceIndex
+ _deviceIndex = -1;
+ _paDeviceIndex = -1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitMicrophone() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recording) {
+ return -1;
+ }
+
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
+
+ // Check if default device
+ if (_inputDeviceIndex == 0) {
+ uint16_t deviceIndex = 0;
+ GetDefaultDeviceInfo(true, NULL, deviceIndex);
+ _paDeviceIndex = deviceIndex;
+ } else {
+ // Get the PA device index from
+ // the callback
+ _deviceIndex = _inputDeviceIndex;
+
+ // get recording devices
+ RecordingDevices();
+ }
+
+ // The callback has now set the _paDeviceIndex to
+ // the PulseAudio index of the device
+ if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) {
+ return -1;
+ }
+
+ // Clear _deviceIndex
+ _deviceIndex = -1;
+ _paDeviceIndex = -1;
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.SpeakerIsInitialized());
+}
+
+bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.MicrophoneIsInitialized());
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitSpeaker was successful, we know volume control exists.
+ available = true;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!_playing) {
+ // Only update the volume if it's been set while we weren't playing.
+ update_speaker_volume_at_startup_ = true;
+ }
+ return (_mixerManager.SetSpeakerVolume(volume));
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ uint32_t level(0);
+
+ if (_mixerManager.SpeakerVolume(level) == -1) {
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected speaker has a mute control
+ _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+
+ available = isAvailable;
+
+ // Close the initialized output mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.SetSpeakerMute(enable));
+}
+
+int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool muted(0);
+ if (_mixerManager.SpeakerMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected input device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no
+ // volume control, hence it is safe to state that there is no
+ // boost control already at this stage.
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone has a mute control
+ //
+ _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_mixerManager.SetMicrophoneMute(enable));
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool muted(0);
+ if (_mixerManager.MicrophoneMute(muted) == -1) {
+ return -1;
+ }
+
+ enabled = muted;
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recChannels == 2 && _recording) {
+ available = true;
+ return 0;
+ }
+
+ available = false;
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+ int error = 0;
+
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // Cannot open the specified device
+ available = false;
+ return 0;
+ }
+
+ // Check if the selected microphone can record stereo.
+ bool isAvailable(false);
+ error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
+ if (!error)
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return error;
+}
+
+int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (enable)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_playChannels == 2 && _playing) {
+ available = true;
+ return 0;
+ }
+
+ available = false;
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+ int error = 0;
+
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // Cannot open the specified device.
+ return -1;
+ }
+
+ // Check if the selected speaker can play stereo.
+ bool isAvailable(false);
+ error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
+ if (!error)
+ available = isAvailable;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
+
+ return error;
+}
+
+int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (enable)
+ _playChannels = 2;
+ else
+ _playChannels = 1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no
+ // volume control.
+ available = false;
+ return 0;
+ }
+
+ // Given that InitMicrophone was successful, we know that a volume control
+ // exists.
+ available = true;
+
+ // Close the initialized input mixer
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
+ return (_mixerManager.SetMicrophoneVolume(volume));
+}
+
+int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
+ uint32_t level(0);
+
+ if (_mixerManager.MicrophoneVolume(level) == -1) {
+ RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level";
+ return -1;
+ }
+
+ volume = level;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
+
+ if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
+
+ maxVolume = maxVol;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
+
+ if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
+
+ minVolume = minVol;
+
+ return 0;
+}
+
+int16_t AudioDeviceLinuxPulse::PlayoutDevices() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+ _numPlayDevices = 1; // init to 1 to account for "default"
+
+ // get the whole list of devices and update _numPlayDevices
+ paOperation =
+ LATE(pa_context_get_sink_info_list)(_paContext, PaSinkInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ return _numPlayDevices;
+}
+
+int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ const uint16_t nDevices = PlayoutDevices();
+
+ RTC_LOG(LS_VERBOSE) << "number of availiable output devices is " << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _outputDeviceIndex = index;
+ _outputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ const uint16_t nDevices = PlayoutDevices();
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ // Check if default device
+ if (index == 0) {
+ uint16_t deviceIndex = 0;
+ return GetDefaultDeviceInfo(false, name, deviceIndex);
+ }
+
+ // Tell the callback that we want
+ // The name for this device
+ _playDisplayDeviceName = name;
+ _deviceIndex = index;
+
+ // get playout devices
+ PlayoutDevices();
+
+ // clear device name and index
+ _playDisplayDeviceName = NULL;
+ _deviceIndex = -1;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ const uint16_t nDevices(RecordingDevices());
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ // Check if default device
+ if (index == 0) {
+ uint16_t deviceIndex = 0;
+ return GetDefaultDeviceInfo(true, name, deviceIndex);
+ }
+
+ // Tell the callback that we want
+ // the name for this device
+ _recDisplayDeviceName = name;
+ _deviceIndex = index;
+
+ // Get recording devices
+ RecordingDevices();
+
+ // Clear device name and index
+ _recDisplayDeviceName = NULL;
+ _deviceIndex = -1;
+
+ return 0;
+}
+
+int16_t AudioDeviceLinuxPulse::RecordingDevices() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+ _numRecDevices = 1; // Init to 1 to account for "default"
+
+ // Get the whole list of devices and update _numRecDevices
+ paOperation = LATE(pa_context_get_source_info_list)(
+ _paContext, PaSourceInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ return _numRecDevices;
+}
+
+int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ const uint16_t nDevices(RecordingDevices());
+
+ RTC_LOG(LS_VERBOSE) << "number of availiable input devices is " << nDevices;
+
+ if (index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ _inputDeviceIndex = index;
+ _inputDeviceIsSpecified = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
+ return -1;
+}
+
+int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ available = false;
+
+ // Try to initialize the playout side
+ int32_t res = InitPlayout();
+
+ // Cancel effect of initialization
+ StopPlayout();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ available = false;
+
+ // Try to initialize the playout side
+ int32_t res = InitRecording();
+
+ // Cancel effect of initialization
+ StopRecording();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return res;
+}
+
+int32_t AudioDeviceLinuxPulse::InitPlayout() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_playIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeaker() == -1) {
+ RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
+ }
+
+ // Set the play sample specification
+ pa_sample_spec playSampleSpec;
+ playSampleSpec.channels = _playChannels;
+ playSampleSpec.format = PA_SAMPLE_S16LE;
+ playSampleSpec.rate = sample_rate_hz_;
+
+ // Create a new play stream
+ {
+ MutexLock lock(&mutex_);
+ _playStream =
+ LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL);
+ }
+
+ if (!_playStream) {
+ RTC_LOG(LS_ERROR) << "failed to create play stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ // Provide the playStream to the mixer
+ _mixerManager.SetPlayStream(_playStream);
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters
+ _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
+ _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "stream state "
+ << LATE(pa_stream_get_state)(_playStream);
+
+ // Set stream flags
+ _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
+ PA_STREAM_INTERPOLATE_TIMING);
+
+ if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
+ // If configuring a specific latency then we want to specify
+ // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
+ // automatically to reach that target latency. However, that flag
+ // doesn't exist in Ubuntu 8.04 and many people still use that,
+ // so we have to check the protocol version of libpulse.
+ if (LATE(pa_context_get_protocol_version)(_paContext) >=
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
+ _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
+ }
+
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
+ return -1;
+ }
+
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
+ uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ // Set the play buffer attributes
+ _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
+ _playBufferAttr.tlength = latency; // target fill level of play buffer
+ // minimum free num bytes before server request more data
+ _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
+ // prebuffer tlength before starting playout
+ _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
+
+ _configuredLatencyPlay = latency;
+ }
+
+ // num samples in bytes * num channels
+ _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
+ _playbackBufferUnused = _playbackBufferSize;
+ _playBuffer = new int8_t[_playbackBufferSize];
+
+ // Enable underflow callback
+ LATE(pa_stream_set_underflow_callback)
+ (_playStream, PaStreamUnderflowCallback, this);
+
+ // Set the state callback function for the stream
+ LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this);
+
+ // Mark playout side as initialized
+ {
+ MutexLock lock(&mutex_);
+ _playIsInitialized = true;
+ _sndCardPlayDelay = 0;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitRecording() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (_recording) {
+ return -1;
+ }
+
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
+
+ if (_recIsInitialized) {
+ return 0;
+ }
+
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophone() == -1) {
+ RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
+ }
+
+ // Set the rec sample specification
+ pa_sample_spec recSampleSpec;
+ recSampleSpec.channels = _recChannels;
+ recSampleSpec.format = PA_SAMPLE_S16LE;
+ recSampleSpec.rate = sample_rate_hz_;
+
+ // Create a new rec stream
+ _recStream =
+ LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL);
+ if (!_recStream) {
+ RTC_LOG(LS_ERROR) << "failed to create rec stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ // Provide the recStream to the mixer
+ _mixerManager.SetRecStream(_recStream);
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+ }
+
+ if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
+ _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
+ PA_STREAM_INTERPOLATE_TIMING);
+
+ // If configuring a specific latency then we want to specify
+ // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
+ // automatically to reach that target latency. However, that flag
+ // doesn't exist in Ubuntu 8.04 and many people still use that,
+ // so we have to check the protocol version of libpulse.
+ if (LATE(pa_context_get_protocol_version)(_paContext) >=
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
+ _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
+ }
+
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec(rec)";
+ return -1;
+ }
+
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
+ uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ // Set the rec buffer attributes
+ // Note: fragsize specifies a maximum transfer size, not a minimum, so
+ // it is not possible to force a high latency setting, only a low one.
+ _recBufferAttr.fragsize = latency; // size of fragment
+ _recBufferAttr.maxlength =
+ latency + bytesPerSec * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ _configuredLatencyRec = latency;
+ }
+
+ _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
+ _recordBufferUsed = 0;
+ _recBuffer = new int8_t[_recordBufferSize];
+
+ // Enable overflow callback
+ LATE(pa_stream_set_overflow_callback)
+ (_recStream, PaStreamOverflowCallback, this);
+
+ // Set the state callback function for the stream
+ LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this);
+
+ // Mark recording side as initialized
+ _recIsInitialized = true;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StartRecording() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (!_recIsInitialized) {
+ return -1;
+ }
+
+ if (_recording) {
+ return 0;
+ }
+
+ // Set state to ensure that the recording starts from the audio thread.
+ _startRec = true;
+
+ // The audio thread will signal when recording has started.
+ _timeEventRec.Set();
+ if (!_recStartEvent.Wait(TimeDelta::Seconds(10))) {
+ {
+ MutexLock lock(&mutex_);
+ _startRec = false;
+ }
+ StopRecording();
+ RTC_LOG(LS_ERROR) << "failed to activate recording";
+ return -1;
+ }
+
+ {
+ MutexLock lock(&mutex_);
+ if (_recording) {
+ // The recording state is set by the audio thread after recording
+ // has started.
+ } else {
+ RTC_LOG(LS_ERROR) << "failed to activate recording";
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StopRecording() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ MutexLock lock(&mutex_);
+
+ if (!_recIsInitialized) {
+ return 0;
+ }
+
+ if (_recStream == NULL) {
+ return -1;
+ }
+
+ _recIsInitialized = false;
+ _recording = false;
+
+ RTC_LOG(LS_VERBOSE) << "stopping recording";
+
+ // Stop Recording
+ PaLock();
+
+ DisableReadCallback();
+ LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
+
+ // Unset this here so that we don't get a TERMINATED callback
+ LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
+
+ if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) {
+ // Disconnect the stream
+ if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to disconnect rec stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ PaUnLock();
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "disconnected recording";
+ }
+
+ LATE(pa_stream_unref)(_recStream);
+ _recStream = NULL;
+
+ PaUnLock();
+
+ // Provide the recStream to the mixer
+ _mixerManager.SetRecStream(_recStream);
+
+ if (_recBuffer) {
+ delete[] _recBuffer;
+ _recBuffer = NULL;
+ }
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::RecordingIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_recIsInitialized);
+}
+
+bool AudioDeviceLinuxPulse::Recording() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_recording);
+}
+
+bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_playIsInitialized);
+}
+
+int32_t AudioDeviceLinuxPulse::StartPlayout() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+
+ if (!_playIsInitialized) {
+ return -1;
+ }
+
+ if (_playing) {
+ return 0;
+ }
+
+ // Set state to ensure that playout starts from the audio thread.
+ {
+ MutexLock lock(&mutex_);
+ _startPlay = true;
+ }
+
+ // Both `_startPlay` and `_playing` needs protction since they are also
+ // accessed on the playout thread.
+
+ // The audio thread will signal when playout has started.
+ _timeEventPlay.Set();
+ if (!_playStartEvent.Wait(TimeDelta::Seconds(10))) {
+ {
+ MutexLock lock(&mutex_);
+ _startPlay = false;
+ }
+ StopPlayout();
+ RTC_LOG(LS_ERROR) << "failed to activate playout";
+ return -1;
+ }
+
+ {
+ MutexLock lock(&mutex_);
+ if (_playing) {
+ // The playing state is set by the audio thread after playout
+ // has started.
+ } else {
+ RTC_LOG(LS_ERROR) << "failed to activate playing";
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::StopPlayout() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ MutexLock lock(&mutex_);
+
+ if (!_playIsInitialized) {
+ return 0;
+ }
+
+ if (_playStream == NULL) {
+ return -1;
+ }
+
+ _playIsInitialized = false;
+ _playing = false;
+ _sndCardPlayDelay = 0;
+
+ RTC_LOG(LS_VERBOSE) << "stopping playback";
+
+ // Stop Playout
+ PaLock();
+
+ DisableWriteCallback();
+ LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
+
+ // Unset this here so that we don't get a TERMINATED callback
+ LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
+
+ if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) {
+ // Disconnect the stream
+ if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to disconnect play stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ PaUnLock();
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "disconnected playback";
+ }
+
+ LATE(pa_stream_unref)(_playStream);
+ _playStream = NULL;
+
+ PaUnLock();
+
+ // Provide the playStream to the mixer
+ _mixerManager.SetPlayStream(_playStream);
+
+ if (_playBuffer) {
+ delete[] _playBuffer;
+ _playBuffer = NULL;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const {
+ MutexLock lock(&mutex_);
+ delayMS = (uint16_t)_sndCardPlayDelay;
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::Playing() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ return (_playing);
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context* c, void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaContextStateCallbackHandler(c);
+}
+
+// ----------------------------------------------------------------------------
+// PaSinkInfoCallback
+// ----------------------------------------------------------------------------
+
+void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(i, eol);
+}
+
+void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/,
+ const pa_source_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(i,
+ eol);
+}
+
+void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context* /*c*/,
+ const pa_server_info* i,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i);
+}
+
+void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p);
+}
+
+void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) {
+ RTC_LOG(LS_VERBOSE) << "context state cb";
+
+ pa_context_state_t state = LATE(pa_context_get_state)(c);
+ switch (state) {
+ case PA_CONTEXT_UNCONNECTED:
+ RTC_LOG(LS_VERBOSE) << "unconnected";
+ break;
+ case PA_CONTEXT_CONNECTING:
+ case PA_CONTEXT_AUTHORIZING:
+ case PA_CONTEXT_SETTING_NAME:
+ RTC_LOG(LS_VERBOSE) << "no state";
+ break;
+ case PA_CONTEXT_FAILED:
+ case PA_CONTEXT_TERMINATED:
+ RTC_LOG(LS_VERBOSE) << "failed";
+ _paStateChanged = true;
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ break;
+ case PA_CONTEXT_READY:
+ RTC_LOG(LS_VERBOSE) << "ready";
+ _paStateChanged = true;
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ break;
+ }
+}
+
+void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ if (_numPlayDevices == _deviceIndex) {
+ // Convert the device index to the one of the sink
+ _paDeviceIndex = i->index;
+
+ if (_playDeviceName) {
+ // Copy the sink name
+ strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
+ _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ if (_playDisplayDeviceName) {
+ // Copy the sink display name
+ strncpy(_playDisplayDeviceName, i->description, kAdmMaxDeviceNameSize);
+ _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ }
+
+ _numPlayDevices++;
+}
+
+void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(const pa_source_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ // We don't want to list output devices
+ if (i->monitor_of_sink == PA_INVALID_INDEX) {
+ if (_numRecDevices == _deviceIndex) {
+ // Convert the device index to the one of the source
+ _paDeviceIndex = i->index;
+
+ if (_recDeviceName) {
+ // copy the source name
+ strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
+ _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ if (_recDisplayDeviceName) {
+ // Copy the source display name
+ strncpy(_recDisplayDeviceName, i->description, kAdmMaxDeviceNameSize);
+ _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+ }
+
+ _numRecDevices++;
+ }
+}
+
+void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(
+ const pa_server_info* i) {
+ // Use PA native sampling rate
+ sample_rate_hz_ = i->sample_spec.rate;
+
+ // Copy the PA server version
+ strncpy(_paServerVersion, i->server_version, 31);
+ _paServerVersion[31] = '\0';
+
+ if (_recDisplayDeviceName) {
+ // Copy the source name
+ strncpy(_recDisplayDeviceName, i->default_source_name,
+ kAdmMaxDeviceNameSize);
+ _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+
+ if (_playDisplayDeviceName) {
+ // Copy the sink name
+ strncpy(_playDisplayDeviceName, i->default_sink_name,
+ kAdmMaxDeviceNameSize);
+ _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
+ }
+
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+}
+
+void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) {
+ RTC_LOG(LS_VERBOSE) << "stream state cb";
+
+ pa_stream_state_t state = LATE(pa_stream_get_state)(p);
+ switch (state) {
+ case PA_STREAM_UNCONNECTED:
+ RTC_LOG(LS_VERBOSE) << "unconnected";
+ break;
+ case PA_STREAM_CREATING:
+ RTC_LOG(LS_VERBOSE) << "creating";
+ break;
+ case PA_STREAM_FAILED:
+ case PA_STREAM_TERMINATED:
+ RTC_LOG(LS_VERBOSE) << "failed";
+ break;
+ case PA_STREAM_READY:
+ RTC_LOG(LS_VERBOSE) << "ready";
+ break;
+ }
+
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+}
+
+int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+
+ // get the server info and update deviceName
+ paOperation =
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ RTC_LOG(LS_VERBOSE) << "checking PulseAudio version: " << _paServerVersion;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() {
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+
+ // Get the server info and update sample_rate_hz_
+ paOperation =
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
+ char* name,
+ uint16_t& index) {
+ char tmpName[kAdmMaxDeviceNameSize] = {0};
+ // subtract length of "default: "
+ uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
+ char* pName = NULL;
+
+ if (name) {
+ // Add "default: "
+ strcpy(name, "default: ");
+ pName = &name[9];
+ }
+
+ // Tell the callback that we want
+ // the name for this device
+ if (recDevice) {
+ _recDisplayDeviceName = tmpName;
+ } else {
+ _playDisplayDeviceName = tmpName;
+ }
+
+ // Set members
+ _paDeviceIndex = -1;
+ _deviceIndex = 0;
+ _numPlayDevices = 0;
+ _numRecDevices = 0;
+
+ PaLock();
+
+ pa_operation* paOperation = NULL;
+
+ // Get the server info and update deviceName
+ paOperation =
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
+
+ WaitForOperationCompletion(paOperation);
+
+ // Get the device index
+ if (recDevice) {
+ paOperation = LATE(pa_context_get_source_info_by_name)(
+ _paContext, (char*)tmpName, PaSourceInfoCallback, this);
+ } else {
+ paOperation = LATE(pa_context_get_sink_info_by_name)(
+ _paContext, (char*)tmpName, PaSinkInfoCallback, this);
+ }
+
+ WaitForOperationCompletion(paOperation);
+
+ PaUnLock();
+
+ // Set the index
+ index = _paDeviceIndex;
+
+ if (name) {
+ // Copy to name string
+ strncpy(pName, tmpName, nameLen);
+ }
+
+ // Clear members
+ _playDisplayDeviceName = NULL;
+ _recDisplayDeviceName = NULL;
+ _paDeviceIndex = -1;
+ _deviceIndex = -1;
+ _numPlayDevices = 0;
+ _numRecDevices = 0;
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::InitPulseAudio() {
+ int retVal = 0;
+
+ // Load libpulse
+ if (!GetPulseSymbolTable()->Load()) {
+ // Most likely the Pulse library and sound server are not installed on
+ // this system
+ RTC_LOG(LS_ERROR) << "failed to load symbol table";
+ return -1;
+ }
+
+ // Create a mainloop API and connection to the default server
+ // the mainloop is the internal asynchronous API event loop
+ if (_paMainloop) {
+ RTC_LOG(LS_ERROR) << "PA mainloop has already existed";
+ return -1;
+ }
+ _paMainloop = LATE(pa_threaded_mainloop_new)();
+ if (!_paMainloop) {
+ RTC_LOG(LS_ERROR) << "could not create mainloop";
+ return -1;
+ }
+
+ // Start the threaded main loop
+ retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
+ if (retVal != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to start main loop, error=" << retVal;
+ return -1;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "mainloop running!";
+
+ PaLock();
+
+ _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
+ if (!_paMainloopApi) {
+ RTC_LOG(LS_ERROR) << "could not create mainloop API";
+ PaUnLock();
+ return -1;
+ }
+
+ // Create a new PulseAudio context
+ if (_paContext) {
+ RTC_LOG(LS_ERROR) << "PA context has already existed";
+ PaUnLock();
+ return -1;
+ }
+ _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
+
+ if (!_paContext) {
+ RTC_LOG(LS_ERROR) << "could not create context";
+ PaUnLock();
+ return -1;
+ }
+
+ // Set state callback function
+ LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this);
+
+ // Connect the context to a server (default)
+ _paStateChanged = false;
+ retVal =
+ LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL);
+
+ if (retVal != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to connect context, error=" << retVal;
+ PaUnLock();
+ return -1;
+ }
+
+ // Wait for state change
+ while (!_paStateChanged) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ // Now check to see what final state we reached.
+ pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
+
+ if (state != PA_CONTEXT_READY) {
+ if (state == PA_CONTEXT_FAILED) {
+ RTC_LOG(LS_ERROR) << "failed to connect to PulseAudio sound server";
+ } else if (state == PA_CONTEXT_TERMINATED) {
+ RTC_LOG(LS_ERROR) << "PulseAudio connection terminated early";
+ } else {
+ // Shouldn't happen, because we only signal on one of those three
+ // states
+ RTC_LOG(LS_ERROR) << "unknown problem connecting to PulseAudio";
+ }
+ PaUnLock();
+ return -1;
+ }
+
+ PaUnLock();
+
+ // Give the objects to the mixer manager
+ _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
+
+ // Check the version
+ if (CheckPulseAudioVersion() < 0) {
+ RTC_LOG(LS_ERROR) << "PulseAudio version " << _paServerVersion
+ << " not supported";
+ return -1;
+ }
+
+ // Initialize sampling frequency
+ if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) {
+ RTC_LOG(LS_ERROR) << "failed to initialize sampling frequency, set to "
+ << sample_rate_hz_ << " Hz";
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() {
+ // Do nothing if the instance doesn't exist
+ // likely GetPulseSymbolTable.Load() fails
+ if (!_paMainloop) {
+ return 0;
+ }
+
+ PaLock();
+
+ // Disconnect the context
+ if (_paContext) {
+ LATE(pa_context_disconnect)(_paContext);
+ }
+
+ // Unreference the context
+ if (_paContext) {
+ LATE(pa_context_unref)(_paContext);
+ }
+
+ PaUnLock();
+ _paContext = NULL;
+
+ // Stop the threaded main loop
+ if (_paMainloop) {
+ LATE(pa_threaded_mainloop_stop)(_paMainloop);
+ }
+
+ // Free the mainloop
+ if (_paMainloop) {
+ LATE(pa_threaded_mainloop_free)(_paMainloop);
+ }
+
+ _paMainloop = NULL;
+
+ RTC_LOG(LS_VERBOSE) << "PulseAudio terminated";
+
+ return 0;
+}
+
+void AudioDeviceLinuxPulse::PaLock() {
+ LATE(pa_threaded_mainloop_lock)(_paMainloop);
+}
+
+void AudioDeviceLinuxPulse::PaUnLock() {
+ LATE(pa_threaded_mainloop_unlock)(_paMainloop);
+}
+
+void AudioDeviceLinuxPulse::WaitForOperationCompletion(
+ pa_operation* paOperation) const {
+ if (!paOperation) {
+ RTC_LOG(LS_ERROR) << "paOperation NULL in WaitForOperationCompletion";
+ return;
+ }
+
+ while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ LATE(pa_operation_unref)(paOperation);
+}
+
+// ============================================================================
+// Thread Methods
+// ============================================================================
+
+void AudioDeviceLinuxPulse::EnableWriteCallback() {
+ if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) {
+ // May already have available space. Must check.
+ _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
+ if (_tempBufferSpace > 0) {
+ // Yup, there is already space available, so if we register a
+ // write callback then it will not receive any event. So dispatch
+ // one ourself instead.
+ _timeEventPlay.Set();
+ return;
+ }
+ }
+
+ LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, this);
+}
+
+void AudioDeviceLinuxPulse::DisableWriteCallback() {
+ LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
+}
+
+void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream* /*unused*/,
+ size_t buffer_space,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamWriteCallbackHandler(
+ buffer_space);
+}
+
+void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) {
+ _tempBufferSpace = bufferSpace;
+
+ // Since we write the data asynchronously on a different thread, we have
+ // to temporarily disable the write callback or else Pulse will call it
+ // continuously until we write the data. We re-enable it below.
+ DisableWriteCallback();
+ _timeEventPlay.Set();
+}
+
+void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)
+ ->PaStreamUnderflowCallbackHandler();
+}
+
+void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() {
+ RTC_LOG(LS_WARNING) << "Playout underflow";
+
+ if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
+ // We didn't configure a pa_buffer_attr before, so switching to
+ // one now would be questionable.
+ return;
+ }
+
+ // Otherwise reconfigure the stream with a higher target latency.
+
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
+ return;
+ }
+
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
+ uint32_t newLatency =
+ _configuredLatencyPlay + bytesPerSec *
+ WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS /
+ WEBRTC_PA_MSECS_PER_SEC;
+
+ // Set the play buffer attributes
+ _playBufferAttr.maxlength = newLatency;
+ _playBufferAttr.tlength = newLatency;
+ _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
+ _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
+
+ pa_operation* op = LATE(pa_stream_set_buffer_attr)(
+ _playStream, &_playBufferAttr, NULL, NULL);
+ if (!op) {
+ RTC_LOG(LS_ERROR) << "pa_stream_set_buffer_attr()";
+ return;
+ }
+
+ // Don't need to wait for this to complete.
+ LATE(pa_operation_unref)(op);
+
+ // Save the new latency in case we underflow again.
+ _configuredLatencyPlay = newLatency;
+}
+
+void AudioDeviceLinuxPulse::EnableReadCallback() {
+ LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this);
+}
+
+void AudioDeviceLinuxPulse::DisableReadCallback() {
+ LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
+}
+
+void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/,
+ size_t /*unused2*/,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler();
+}
+
+void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() {
+ // We get the data pointer and size now in order to save one Lock/Unlock
+ // in the worker thread.
+ if (LATE(pa_stream_peek)(_recStream, &_tempSampleData,
+ &_tempSampleDataSize) != 0) {
+ RTC_LOG(LS_ERROR) << "Can't read data!";
+ return;
+ }
+
+ // Since we consume the data asynchronously on a different thread, we have
+ // to temporarily disable the read callback or else Pulse will call it
+ // continuously until we consume the data. We re-enable it below.
+ DisableReadCallback();
+ _timeEventRec.Set();
+}
+
+void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/,
+ void* pThis) {
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler();
+}
+
+void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() {
+ RTC_LOG(LS_WARNING) << "Recording overflow";
+}
+
+int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) {
+ if (!WEBRTC_PA_REPORT_LATENCY) {
+ return 0;
+ }
+
+ if (!stream) {
+ return 0;
+ }
+
+ pa_usec_t latency;
+ int negative;
+ if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) {
+ RTC_LOG(LS_ERROR) << "Can't query latency";
+ // We'd rather continue playout/capture with an incorrect delay than
+ // stop it altogether, so return a valid value.
+ return 0;
+ }
+
+ if (negative) {
+ RTC_LOG(LS_VERBOSE)
+ << "warning: pa_stream_get_latency reported negative delay";
+
+ // The delay can be negative for monitoring streams if the captured
+ // samples haven't been played yet. In such a case, "latency"
+ // contains the magnitude, so we must negate it to get the real value.
+ int32_t tmpLatency = (int32_t)-latency;
+ if (tmpLatency < 0) {
+ // Make sure that we don't use a negative delay.
+ tmpLatency = 0;
+ }
+
+ return tmpLatency;
+ } else {
+ return (int32_t)latency;
+ }
+}
+
+int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData,
+ size_t bufferSize)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
+ size_t size = bufferSize;
+ uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
+
+ // Account for the peeked data and the used data.
+ uint32_t recDelay =
+ (uint32_t)((LatencyUsecs(_recStream) / 1000) +
+ 10 * ((size + _recordBufferUsed) / _recordBufferSize));
+
+ if (_playStream) {
+ // Get the playout delay.
+ _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000);
+ }
+
+ if (_recordBufferUsed > 0) {
+ // Have to copy to the buffer until it is full.
+ size_t copy = _recordBufferSize - _recordBufferUsed;
+ if (size < copy) {
+ copy = size;
+ }
+
+ memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
+ _recordBufferUsed += copy;
+ bufferData = static_cast<const char*>(bufferData) + copy;
+ size -= copy;
+
+ if (_recordBufferUsed != _recordBufferSize) {
+ // Not enough data yet to pass to VoE.
+ return 0;
+ }
+
+ // Provide data to VoiceEngine.
+ if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) {
+ // We have stopped recording.
+ return -1;
+ }
+
+ _recordBufferUsed = 0;
+ }
+
+ // Now process full 10ms sample sets directly from the input.
+ while (size >= _recordBufferSize) {
+ // Provide data to VoiceEngine.
+ if (ProcessRecordedData(static_cast<int8_t*>(const_cast<void*>(bufferData)),
+ numRecSamples, recDelay) == -1) {
+ // We have stopped recording.
+ return -1;
+ }
+
+ bufferData = static_cast<const char*>(bufferData) + _recordBufferSize;
+ size -= _recordBufferSize;
+
+ // We have consumed 10ms of data.
+ recDelay -= 10;
+ }
+
+ // Now save any leftovers for later.
+ if (size > 0) {
+ memcpy(_recBuffer, bufferData, size);
+ _recordBufferUsed = size;
+ }
+
+ return 0;
+}
+
+int32_t AudioDeviceLinuxPulse::ProcessRecordedData(int8_t* bufferData,
+ uint32_t bufferSizeInSamples,
+ uint32_t recDelay)
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
+ _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
+
+ // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
+ // near-end signals at the AEC for PulseAudio. I think the system delay is
+ // being correctly calculated here, but for legacy reasons we add +10 ms
+ // to the value in the AEC. The real fix will be part of a larger
+ // investigation into managing system delay in the AEC.
+ if (recDelay > 10)
+ recDelay -= 10;
+ else
+ recDelay = 0;
+ _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay);
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+ // Deliver recorded samples at specified sample rate,
+ // mic level etc. to the observer using callback.
+ UnLock();
+ _ptrAudioBuffer->DeliverRecordedData();
+ Lock();
+
+ // We have been unlocked - check the flag again.
+ if (!_recording) {
+ return -1;
+ }
+
+ return 0;
+}
+
+bool AudioDeviceLinuxPulse::PlayThreadProcess() {
+ if (!_timeEventPlay.Wait(TimeDelta::Seconds(1))) {
+ return true;
+ }
+
+ MutexLock lock(&mutex_);
+
+ if (quit_) {
+ return false;
+ }
+
+ if (_startPlay) {
+ RTC_LOG(LS_VERBOSE) << "_startPlay true, performing initial actions";
+
+ _startPlay = false;
+ _playDeviceName = NULL;
+
+ // Set if not default device
+ if (_outputDeviceIndex > 0) {
+ // Get the playout device name
+ _playDeviceName = new char[kAdmMaxDeviceNameSize];
+ _deviceIndex = _outputDeviceIndex;
+ PlayoutDevices();
+ }
+
+ // Start muted only supported on 0.9.11 and up
+ if (LATE(pa_context_get_protocol_version)(_paContext) >=
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
+ // Get the currently saved speaker mute status
+ // and set the initial mute status accordingly
+ bool enabled(false);
+ _mixerManager.SpeakerMute(enabled);
+ if (enabled) {
+ _playStreamFlags |= PA_STREAM_START_MUTED;
+ }
+ }
+
+ // Get the currently saved speaker volume
+ uint32_t volume = 0;
+ if (update_speaker_volume_at_startup_)
+ _mixerManager.SpeakerVolume(volume);
+
+ PaLock();
+
+ // NULL gives PA the choice of startup volume.
+ pa_cvolume* ptr_cvolume = NULL;
+ if (update_speaker_volume_at_startup_) {
+ pa_cvolume cVolumes;
+ ptr_cvolume = &cVolumes;
+
+ // Set the same volume for all channels
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
+ LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
+ update_speaker_volume_at_startup_ = false;
+ }
+
+ // Connect the stream to a sink
+ if (LATE(pa_stream_connect_playback)(
+ _playStream, _playDeviceName, &_playBufferAttr,
+ (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to connect play stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "play stream connected";
+
+ // Wait for state change
+ while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "play stream ready";
+
+ // We can now handle write callbacks
+ EnableWriteCallback();
+
+ PaUnLock();
+
+ // Clear device name
+ if (_playDeviceName) {
+ delete[] _playDeviceName;
+ _playDeviceName = NULL;
+ }
+
+ _playing = true;
+ _playStartEvent.Set();
+
+ return true;
+ }
+
+ if (_playing) {
+ if (!_recording) {
+ // Update the playout delay
+ _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000);
+ }
+
+ if (_playbackBufferUnused < _playbackBufferSize) {
+ size_t write = _playbackBufferSize - _playbackBufferUnused;
+ if (_tempBufferSpace < write) {
+ write = _tempBufferSpace;
+ }
+
+ PaLock();
+ if (LATE(pa_stream_write)(
+ _playStream, (void*)&_playBuffer[_playbackBufferUnused], write,
+ NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
+ _writeErrors++;
+ if (_writeErrors > 10) {
+ RTC_LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
+ << ", error=" << LATE(pa_context_errno)(_paContext);
+ _writeErrors = 0;
+ }
+ }
+ PaUnLock();
+
+ _playbackBufferUnused += write;
+ _tempBufferSpace -= write;
+ }
+
+ uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
+ // Might have been reduced to zero by the above.
+ if (_tempBufferSpace > 0) {
+ // Ask for new PCM data to be played out using the
+ // AudioDeviceBuffer ensure that this callback is executed
+ // without taking the audio-thread lock.
+ UnLock();
+ RTC_LOG(LS_VERBOSE) << "requesting data";
+ uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
+ Lock();
+
+ // We have been unlocked - check the flag again.
+ if (!_playing) {
+ return true;
+ }
+
+ nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
+ if (nSamples != numPlaySamples) {
+ RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples
+ << ")";
+ }
+
+ size_t write = _playbackBufferSize;
+ if (_tempBufferSpace < write) {
+ write = _tempBufferSpace;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "will write";
+ PaLock();
+ if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write,
+ NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
+ _writeErrors++;
+ if (_writeErrors > 10) {
+ RTC_LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors
+ << ", error=" << LATE(pa_context_errno)(_paContext);
+ _writeErrors = 0;
+ }
+ }
+ PaUnLock();
+
+ _playbackBufferUnused = write;
+ }
+
+ _tempBufferSpace = 0;
+ PaLock();
+ EnableWriteCallback();
+ PaUnLock();
+
+ } // _playing
+
+ return true;
+}
+
+bool AudioDeviceLinuxPulse::RecThreadProcess() {
+ if (!_timeEventRec.Wait(TimeDelta::Seconds(1))) {
+ return true;
+ }
+
+ MutexLock lock(&mutex_);
+ if (quit_) {
+ return false;
+ }
+ if (_startRec) {
+ RTC_LOG(LS_VERBOSE) << "_startRec true, performing initial actions";
+
+ _recDeviceName = NULL;
+
+ // Set if not default device
+ if (_inputDeviceIndex > 0) {
+ // Get the recording device name
+ _recDeviceName = new char[kAdmMaxDeviceNameSize];
+ _deviceIndex = _inputDeviceIndex;
+ RecordingDevices();
+ }
+
+ PaLock();
+
+ RTC_LOG(LS_VERBOSE) << "connecting stream";
+
+ // Connect the stream to a source
+ if (LATE(pa_stream_connect_record)(
+ _recStream, _recDeviceName, &_recBufferAttr,
+ (pa_stream_flags_t)_recStreamFlags) != PA_OK) {
+ RTC_LOG(LS_ERROR) << "failed to connect rec stream, err="
+ << LATE(pa_context_errno)(_paContext);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "connected";
+
+ // Wait for state change
+ while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ RTC_LOG(LS_VERBOSE) << "done";
+
+ // We can now handle read callbacks
+ EnableReadCallback();
+
+ PaUnLock();
+
+ // Clear device name
+ if (_recDeviceName) {
+ delete[] _recDeviceName;
+ _recDeviceName = NULL;
+ }
+
+ _startRec = false;
+ _recording = true;
+ _recStartEvent.Set();
+
+ return true;
+ }
+
+ if (_recording) {
+ // Read data and provide it to VoiceEngine
+ if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) {
+ return true;
+ }
+
+ _tempSampleData = NULL;
+ _tempSampleDataSize = 0;
+
+ PaLock();
+ while (true) {
+ // Ack the last thing we read
+ if (LATE(pa_stream_drop)(_recStream) != 0) {
+ RTC_LOG(LS_WARNING)
+ << "failed to drop, err=" << LATE(pa_context_errno)(_paContext);
+ }
+
+ if (LATE(pa_stream_readable_size)(_recStream) <= 0) {
+ // Then that was all the data
+ break;
+ }
+
+ // Else more data.
+ const void* sampleData;
+ size_t sampleDataSize;
+
+ if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) {
+ RTC_LOG(LS_ERROR) << "RECORD_ERROR, error = "
+ << LATE(pa_context_errno)(_paContext);
+ break;
+ }
+
+ // Drop lock for sigslot dispatch, which could take a while.
+ PaUnLock();
+ // Read data and provide it to VoiceEngine
+ if (ReadRecordedData(sampleData, sampleDataSize) == -1) {
+ return true;
+ }
+ PaLock();
+
+ // Return to top of loop for the ack and the check for more data.
+ }
+
+ EnableReadCallback();
+ PaUnLock();
+
+ } // _recording
+
+ return true;
+}
+
+bool AudioDeviceLinuxPulse::KeyPressed() const {
+#if defined(WEBRTC_USE_X11)
+ char szKey[32];
+ unsigned int i = 0;
+ char state = 0;
+
+ if (!_XDisplay)
+ return false;
+
+ // Check key map status
+ XQueryKeymap(_XDisplay, szKey);
+
+ // A bit change in keymap means a key is pressed
+ for (i = 0; i < sizeof(szKey); i++)
+ state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
+
+ // Save old state
+ memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
+ return (state != 0);
+#else
+ return false;
+#endif
+}
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h
new file mode 100644
index 0000000000..0cf89ef011
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
+#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
+#include "rtc_base/event.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
+
+#if defined(WEBRTC_USE_X11)
+#include <X11/Xlib.h>
+#endif
+
+#include <pulse/pulseaudio.h>
+#include <stddef.h>
+#include <stdint.h>
+
+// We define this flag if it's missing from our headers, because we want to be
+// able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY
+// if run against a recent version of the library.
+#ifndef PA_STREAM_ADJUST_LATENCY
+#define PA_STREAM_ADJUST_LATENCY 0x2000U
+#endif
+#ifndef PA_STREAM_START_MUTED
+#define PA_STREAM_START_MUTED 0x1000U
+#endif
+
+// Set this constant to 0 to disable latency reading
+const uint32_t WEBRTC_PA_REPORT_LATENCY = 1;
+
+// Constants from implementation by Tristan Schmelcher [tschmelcher@google.com]
+
+// First PulseAudio protocol version that supports PA_STREAM_ADJUST_LATENCY.
+const uint32_t WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION = 13;
+
+// Some timing constants for optimal operation. See
+// https://tango.0pointer.de/pipermail/pulseaudio-discuss/2008-January/001170.html
+// for a good explanation of some of the factors that go into this.
+
+// Playback.
+
+// For playback, there is a round-trip delay to fill the server-side playback
+// buffer, so setting too low of a latency is a buffer underflow risk. We will
+// automatically increase the latency if a buffer underflow does occur, but we
+// also enforce a sane minimum at start-up time. Anything lower would be
+// virtually guaranteed to underflow at least once, so there's no point in
+// allowing lower latencies.
+const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS = 20;
+
+// Every time a playback stream underflows, we will reconfigure it with target
+// latency that is greater by this amount.
+const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS = 20;
+
+// We also need to configure a suitable request size. Too small and we'd burn
+// CPU from the overhead of transfering small amounts of data at once. Too large
+// and the amount of data remaining in the buffer right before refilling it
+// would be a buffer underflow risk. We set it to half of the buffer size.
+const uint32_t WEBRTC_PA_PLAYBACK_REQUEST_FACTOR = 2;
+
+// Capture.
+
+// For capture, low latency is not a buffer overflow risk, but it makes us burn
+// CPU from the overhead of transfering small amounts of data at once, so we set
+// a recommended value that we use for the kLowLatency constant (but if the user
+// explicitly requests something lower then we will honour it).
+// 1ms takes about 6-7% CPU. 5ms takes about 5%. 10ms takes about 4.x%.
+const uint32_t WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS = 10;
+
+// There is a round-trip delay to ack the data to the server, so the
+// server-side buffer needs extra space to prevent buffer overflow. 20ms is
+// sufficient, but there is no penalty to making it bigger, so we make it huge.
+// (750ms is libpulse's default value for the _total_ buffer size in the
+// kNoLatencyRequirements case.)
+const uint32_t WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS = 750;
+
+const uint32_t WEBRTC_PA_MSECS_PER_SEC = 1000;
+
+// Init _configuredLatencyRec/Play to this value to disable latency requirements
+const int32_t WEBRTC_PA_NO_LATENCY_REQUIREMENTS = -1;
+
+// Set this const to 1 to account for peeked and used data in latency
+// calculation
+const uint32_t WEBRTC_PA_CAPTURE_BUFFER_LATENCY_ADJUSTMENT = 0;
+
+typedef webrtc::adm_linux_pulse::PulseAudioSymbolTable WebRTCPulseSymbolTable;
+WebRTCPulseSymbolTable* GetPulseSymbolTable();
+
+namespace webrtc {
+
+class AudioDeviceLinuxPulse : public AudioDeviceGeneric {
+ public:
+ AudioDeviceLinuxPulse();
+ virtual ~AudioDeviceLinuxPulse();
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override;
+
+ // Main initializaton and termination
+ InitStatus Init() override;
+ int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Playing() const override;
+ int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool& available) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t& delayMS) const
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+
+ private:
+ void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); }
+ void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); }
+ void WaitForOperationCompletion(pa_operation* paOperation) const;
+ void WaitForSuccess(pa_operation* paOperation) const;
+
+ bool KeyPressed() const;
+
+ static void PaContextStateCallback(pa_context* c, void* pThis);
+ static void PaSinkInfoCallback(pa_context* c,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis);
+ static void PaSourceInfoCallback(pa_context* c,
+ const pa_source_info* i,
+ int eol,
+ void* pThis);
+ static void PaServerInfoCallback(pa_context* c,
+ const pa_server_info* i,
+ void* pThis);
+ static void PaStreamStateCallback(pa_stream* p, void* pThis);
+ void PaContextStateCallbackHandler(pa_context* c);
+ void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol);
+ void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol);
+ void PaServerInfoCallbackHandler(const pa_server_info* i);
+ void PaStreamStateCallbackHandler(pa_stream* p);
+
+ void EnableWriteCallback();
+ void DisableWriteCallback();
+ static void PaStreamWriteCallback(pa_stream* unused,
+ size_t buffer_space,
+ void* pThis);
+ void PaStreamWriteCallbackHandler(size_t buffer_space);
+ static void PaStreamUnderflowCallback(pa_stream* unused, void* pThis);
+ void PaStreamUnderflowCallbackHandler();
+ void EnableReadCallback();
+ void DisableReadCallback();
+ static void PaStreamReadCallback(pa_stream* unused1,
+ size_t unused2,
+ void* pThis);
+ void PaStreamReadCallbackHandler();
+ static void PaStreamOverflowCallback(pa_stream* unused, void* pThis);
+ void PaStreamOverflowCallbackHandler();
+ int32_t LatencyUsecs(pa_stream* stream);
+ int32_t ReadRecordedData(const void* bufferData, size_t bufferSize);
+ int32_t ProcessRecordedData(int8_t* bufferData,
+ uint32_t bufferSizeInSamples,
+ uint32_t recDelay);
+
+ int32_t CheckPulseAudioVersion();
+ int32_t InitSamplingFrequency();
+ int32_t GetDefaultDeviceInfo(bool recDevice, char* name, uint16_t& index);
+ int32_t InitPulseAudio();
+ int32_t TerminatePulseAudio();
+
+ void PaLock();
+ void PaUnLock();
+
+ static void RecThreadFunc(void*);
+ static void PlayThreadFunc(void*);
+ bool RecThreadProcess() RTC_LOCKS_EXCLUDED(mutex_);
+ bool PlayThreadProcess() RTC_LOCKS_EXCLUDED(mutex_);
+
+ AudioDeviceBuffer* _ptrAudioBuffer;
+
+ mutable Mutex mutex_;
+ rtc::Event _timeEventRec;
+ rtc::Event _timeEventPlay;
+ rtc::Event _recStartEvent;
+ rtc::Event _playStartEvent;
+
+ rtc::PlatformThread _ptrThreadPlay;
+ rtc::PlatformThread _ptrThreadRec;
+
+ AudioMixerManagerLinuxPulse _mixerManager;
+
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+ bool _inputDeviceIsSpecified;
+ bool _outputDeviceIsSpecified;
+
+ int sample_rate_hz_;
+ uint8_t _recChannels;
+ uint8_t _playChannels;
+
+ // Stores thread ID in constructor.
+ // We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that
+ // other methods are called from the same thread.
+ // Currently only does RTC_DCHECK(thread_checker_.IsCurrent()).
+ SequenceChecker thread_checker_;
+
+ bool _initialized;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+ bool _startRec;
+ bool _startPlay;
+ bool update_speaker_volume_at_startup_;
+ bool quit_ RTC_GUARDED_BY(&mutex_);
+
+ uint32_t _sndCardPlayDelay RTC_GUARDED_BY(&mutex_);
+
+ int32_t _writeErrors;
+
+ uint16_t _deviceIndex;
+ int16_t _numPlayDevices;
+ int16_t _numRecDevices;
+ char* _playDeviceName;
+ char* _recDeviceName;
+ char* _playDisplayDeviceName;
+ char* _recDisplayDeviceName;
+ char _paServerVersion[32];
+
+ int8_t* _playBuffer;
+ size_t _playbackBufferSize;
+ size_t _playbackBufferUnused;
+ size_t _tempBufferSpace;
+ int8_t* _recBuffer;
+ size_t _recordBufferSize;
+ size_t _recordBufferUsed;
+ const void* _tempSampleData;
+ size_t _tempSampleDataSize;
+ int32_t _configuredLatencyPlay;
+ int32_t _configuredLatencyRec;
+
+ // PulseAudio
+ uint16_t _paDeviceIndex;
+ bool _paStateChanged;
+
+ pa_threaded_mainloop* _paMainloop;
+ pa_mainloop_api* _paMainloopApi;
+ pa_context* _paContext;
+
+ pa_stream* _recStream;
+ pa_stream* _playStream;
+ uint32_t _recStreamFlags;
+ uint32_t _playStreamFlags;
+ pa_buffer_attr _playBufferAttr;
+ pa_buffer_attr _recBufferAttr;
+
+ char _oldKeyState[32];
+#if defined(WEBRTC_USE_X11)
+ Display* _XDisplay;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_PULSE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
new file mode 100644
index 0000000000..e7e7033173
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc
@@ -0,0 +1,979 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
+
+#include "modules/audio_device/linux/audio_device_alsa_linux.h"
+#include "rtc_base/logging.h"
+
+// Accesses ALSA functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libasound, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, GetAlsaSymbolTable(), \
+ sym)
+
+namespace webrtc {
+
+AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA()
+ : _outputMixerHandle(NULL),
+ _inputMixerHandle(NULL),
+ _outputMixerElement(NULL),
+ _inputMixerElement(NULL) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+
+ memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
+ memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
+}
+
+AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+ Close();
+}
+
+// ============================================================================
+// PUBLIC METHODS
+// ============================================================================
+
+int32_t AudioMixerManagerLinuxALSA::Close() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ MutexLock lock(&mutex_);
+
+ CloseSpeakerLocked();
+ CloseMicrophoneLocked();
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseSpeaker() {
+ MutexLock lock(&mutex_);
+ return CloseSpeakerLocked();
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseSpeakerLocked() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ int errVal = 0;
+
+ if (_outputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing playout mixer";
+ LATE(snd_mixer_free)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+ _outputMixerHandle = NULL;
+ _outputMixerElement = NULL;
+ }
+ memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseMicrophone() {
+ MutexLock lock(&mutex_);
+ return CloseMicrophoneLocked();
+}
+
+int32_t AudioMixerManagerLinuxALSA::CloseMicrophoneLocked() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ int errVal = 0;
+
+ if (_inputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ LATE(snd_mixer_free)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer 2";
+
+ errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer 3";
+
+ errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer 4";
+ _inputMixerHandle = NULL;
+ _inputMixerElement = NULL;
+ }
+ memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name="
+ << deviceName << ")";
+
+ MutexLock lock(&mutex_);
+
+ int errVal = 0;
+
+ // Close any existing output mixer handle
+ //
+ if (_outputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing playout mixer";
+
+ LATE(snd_mixer_free)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching playout mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ errVal = LATE(snd_mixer_close)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+ }
+ _outputMixerHandle = NULL;
+ _outputMixerElement = NULL;
+
+ errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error";
+ return -1;
+ }
+
+ char controlName[kAdmMaxDeviceNameSize] = {0};
+ GetControlName(controlName, deviceName);
+
+ RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+ << ")";
+
+ errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName
+ << ") error: " << LATE(snd_strerror)(errVal);
+ _outputMixerHandle = NULL;
+ return -1;
+ }
+ strcpy(_outputMixerStr, controlName);
+
+ errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR)
+ << "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), "
+ "error: "
+ << LATE(snd_strerror)(errVal);
+ _outputMixerHandle = NULL;
+ return -1;
+ }
+
+ // Load and find the proper mixer element
+ if (LoadSpeakerMixerElement() < 0) {
+ return -1;
+ }
+
+ if (_outputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "the output mixer device is now open ("
+ << _outputMixerHandle << ")";
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char* deviceName) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name="
+ << deviceName << ")";
+
+ MutexLock lock(&mutex_);
+
+ int errVal = 0;
+
+ // Close any existing input mixer handle
+ //
+ if (_inputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ LATE(snd_mixer_free)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error freeing record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error detaching record mixer: "
+ << LATE(snd_strerror)(errVal);
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+
+ errVal = LATE(snd_mixer_close)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal="
+ << errVal;
+ }
+ RTC_LOG(LS_VERBOSE) << "Closing record mixer";
+ }
+ _inputMixerHandle = NULL;
+ _inputMixerElement = NULL;
+
+ errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error";
+ return -1;
+ }
+
+ char controlName[kAdmMaxDeviceNameSize] = {0};
+ GetControlName(controlName, deviceName);
+
+ RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+ << ")";
+
+ errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName
+ << ") error: " << LATE(snd_strerror)(errVal);
+
+ _inputMixerHandle = NULL;
+ return -1;
+ }
+ strcpy(_inputMixerStr, controlName);
+
+ errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR)
+ << "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), "
+ "error: "
+ << LATE(snd_strerror)(errVal);
+
+ _inputMixerHandle = NULL;
+ return -1;
+ }
+ // Load and find the proper mixer element
+ if (LoadMicMixerElement() < 0) {
+ return -1;
+ }
+
+ if (_inputMixerHandle != NULL) {
+ RTC_LOG(LS_VERBOSE) << "the input mixer device is now open ("
+ << _inputMixerHandle << ")";
+ }
+
+ return 0;
+}
+
+bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_outputMixerHandle != NULL);
+}
+
+bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_inputMixerHandle != NULL);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume="
+ << volume << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ int errVal = LATE(snd_mixer_selem_set_playback_volume_all)(
+ _outputMixerElement, volume);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error changing master volume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(uint32_t& volume) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int vol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_playback_volume)(
+ _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting outputvolume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol="
+ << vol;
+
+ volume = static_cast<uint32_t>(vol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MaxSpeakerVolume(
+ uint32_t& maxVolume) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avilable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+ _outputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ maxVolume = static_cast<uint32_t>(maxVol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MinSpeakerVolume(
+ uint32_t& minVolume) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_playback_volume_range)(
+ _outputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ minVolume = static_cast<uint32_t>(minVol);
+
+ return 0;
+}
+
+// TL: Have done testnig with these but they don't seem reliable and
+// they were therefore not added
+/*
+ // ----------------------------------------------------------------------------
+ // SetMaxSpeakerVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMaxSpeakerVolume(
+ uint32_t maxVolume)
+ {
+
+ if (_outputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_playback_volume_range(
+ _outputMixerElement, &minVol, &maxVol);
+ if ((maxVol <= minVol) || (errVal != 0))
+ {
+ RTC_LOG(LS_WARNING) << "Error getting playback volume range: "
+ << snd_strerror(errVal);
+ }
+
+ maxVol = maxVolume;
+ errVal = snd_mixer_selem_set_playback_volume_range(
+ _outputMixerElement, minVol, maxVol);
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting playback volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+
+ // ----------------------------------------------------------------------------
+ // SetMinSpeakerVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMinSpeakerVolume(
+ uint32_t minVolume)
+ {
+
+ if (_outputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_playback_volume_range(
+ _outputMixerElement, &minVol, &maxVol);
+ if ((maxVol <= minVol) || (errVal != 0))
+ {
+ RTC_LOG(LS_WARNING) << "Error getting playback volume range: "
+ << snd_strerror(errVal);
+ }
+
+ minVol = minVolume;
+ errVal = snd_mixer_selem_set_playback_volume_range(
+ _outputMixerElement, minVol, maxVol);
+ RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting playback volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+ */
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(bool& available) {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(bool& available) {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable="
+ << enable << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ // Ensure that the selected speaker destination has a valid mute control.
+ bool available(false);
+ SpeakerMuteIsAvailable(available);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker";
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ int errVal = LATE(snd_mixer_selem_set_playback_switch_all)(
+ _outputMixerElement, !enable);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error setting playback switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const {
+ if (_outputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer exists";
+ return -1;
+ }
+
+ // Ensure that the selected speaker destination has a valid mute control.
+ bool available =
+ LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker";
+ return -1;
+ }
+
+ int value(false);
+
+ // Retrieve one boolean control value for a specified mute-control
+ //
+ int errVal = LATE(snd_mixer_selem_get_playback_switch)(
+ _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting playback switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ enabled = (bool)!value;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(bool& available) {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable) {
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable="
+ << enable << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ // Ensure that the selected microphone destination has a valid mute control.
+ bool available(false);
+ MicrophoneMuteIsAvailable(available);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone";
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ int errVal =
+ LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement, !enable);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error setting capture switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer exists";
+ return -1;
+ }
+
+ // Ensure that the selected microphone destination has a valid mute control.
+ bool available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement);
+ if (!available) {
+ RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone";
+ return -1;
+ }
+
+ int value(false);
+
+ // Retrieve one boolean control value for a specified mute-control
+ //
+ int errVal = LATE(snd_mixer_selem_get_capture_switch)(
+ _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &value);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting capture switch: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ // Note value = 0 (off) means muted
+ enabled = (bool)!value;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneVolumeIsAvailable(
+ bool& available) {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume=" << volume
+ << ")";
+
+ MutexLock lock(&mutex_);
+
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ int errVal =
+ LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement, volume);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error changing microphone volume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+
+ return (0);
+}
+
+// TL: Have done testnig with these but they don't seem reliable and
+// they were therefore not added
+/*
+ // ----------------------------------------------------------------------------
+ // SetMaxMicrophoneVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMaxMicrophoneVolume(
+ uint32_t maxVolume)
+ {
+
+ if (_inputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_capture_volume_range(_inputMixerElement,
+ &minVol, &maxVol);
+ if ((maxVol <= minVol) || (errVal != 0))
+ {
+ RTC_LOG(LS_WARNING) << "Error getting capture volume range: "
+ << snd_strerror(errVal);
+ }
+
+ maxVol = (long int)maxVolume;
+ printf("min %d max %d", minVol, maxVol);
+ errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol,
+ maxVol); RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " <<
+ minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting capture volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+
+ // ----------------------------------------------------------------------------
+ // SetMinMicrophoneVolume
+ // ----------------------------------------------------------------------------
+
+ int32_t AudioMixerManagerLinuxALSA::SetMinMicrophoneVolume(
+ uint32_t minVolume)
+ {
+
+ if (_inputMixerElement == NULL)
+ {
+ RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = snd_mixer_selem_get_capture_volume_range(
+ _inputMixerElement, &minVol, &maxVol);
+ if (maxVol <= minVol)
+ {
+ //maxVol = 255;
+ RTC_LOG(LS_WARNING) << "Error getting capture volume range: "
+ << snd_strerror(errVal);
+ }
+
+ printf("min %d max %d", minVol, maxVol);
+ minVol = (long int)minVolume;
+ errVal = snd_mixer_selem_set_capture_volume_range(
+ _inputMixerElement, minVol, maxVol);
+ RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (errVal != 0)
+ {
+ RTC_LOG(LS_ERROR) << "Error setting capture volume range: "
+ << snd_strerror(errVal);
+ return -1;
+ }
+
+ return 0;
+ }
+ */
+
+int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(uint32_t& volume) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ long int vol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_capture_volume)(
+ _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "Error getting inputvolume: "
+ << LATE(snd_strerror)(errVal);
+ return -1;
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol=" << vol;
+
+ volume = static_cast<uint32_t>(vol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MaxMicrophoneVolume(
+ uint32_t& maxVolume) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ // check if we have mic volume at all
+ if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement)) {
+ RTC_LOG(LS_ERROR) << "No microphone volume available";
+ return -1;
+ }
+
+ int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+ _inputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting microphone volume range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ maxVolume = static_cast<uint32_t>(maxVol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::MinMicrophoneVolume(
+ uint32_t& minVolume) const {
+ if (_inputMixerElement == NULL) {
+ RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists";
+ return -1;
+ }
+
+ long int minVol(0);
+ long int maxVol(0);
+
+ int errVal = LATE(snd_mixer_selem_get_capture_volume_range)(
+ _inputMixerElement, &minVol, &maxVol);
+
+ RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol
+ << ", max: " << maxVol;
+ if (maxVol <= minVol) {
+ RTC_LOG(LS_ERROR) << "Error getting microphone volume range: "
+ << LATE(snd_strerror)(errVal);
+ }
+
+ minVolume = static_cast<uint32_t>(minVol);
+
+ return 0;
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const {
+ int errVal = LATE(snd_mixer_load)(_inputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: "
+ << LATE(snd_strerror)(errVal);
+ _inputMixerHandle = NULL;
+ return -1;
+ }
+
+ snd_mixer_elem_t* elem = NULL;
+ snd_mixer_elem_t* micElem = NULL;
+ unsigned mixerIdx = 0;
+ const char* selemName = NULL;
+
+ // Find and store handles to the right mixer elements
+ for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem;
+ elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+ if (LATE(snd_mixer_selem_is_active)(elem)) {
+ selemName = LATE(snd_mixer_selem_get_name)(elem);
+ if (strcmp(selemName, "Capture") == 0) // "Capture", "Mic"
+ {
+ _inputMixerElement = elem;
+ RTC_LOG(LS_VERBOSE) << "Capture element set";
+ } else if (strcmp(selemName, "Mic") == 0) {
+ micElem = elem;
+ RTC_LOG(LS_VERBOSE) << "Mic element found";
+ }
+ }
+
+ if (_inputMixerElement) {
+ // Use the first Capture element that is found
+ // The second one may not work
+ break;
+ }
+ }
+
+ if (_inputMixerElement == NULL) {
+ // We didn't find a Capture handle, use Mic.
+ if (micElem != NULL) {
+ _inputMixerElement = micElem;
+ RTC_LOG(LS_VERBOSE) << "Using Mic as capture volume.";
+ } else {
+ _inputMixerElement = NULL;
+ RTC_LOG(LS_ERROR) << "Could not find capture volume on the mixer.";
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const {
+ int errVal = LATE(snd_mixer_load)(_outputMixerHandle);
+ if (errVal < 0) {
+ RTC_LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: "
+ << LATE(snd_strerror)(errVal);
+ _outputMixerHandle = NULL;
+ return -1;
+ }
+
+ snd_mixer_elem_t* elem = NULL;
+ snd_mixer_elem_t* masterElem = NULL;
+ snd_mixer_elem_t* speakerElem = NULL;
+ unsigned mixerIdx = 0;
+ const char* selemName = NULL;
+
+ // Find and store handles to the right mixer elements
+ for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem;
+ elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) {
+ if (LATE(snd_mixer_selem_is_active)(elem)) {
+ selemName = LATE(snd_mixer_selem_get_name)(elem);
+ RTC_LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": "
+ << selemName << " =" << elem;
+
+ // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave"
+ if (strcmp(selemName, "PCM") == 0) {
+ _outputMixerElement = elem;
+ RTC_LOG(LS_VERBOSE) << "PCM element set";
+ } else if (strcmp(selemName, "Master") == 0) {
+ masterElem = elem;
+ RTC_LOG(LS_VERBOSE) << "Master element found";
+ } else if (strcmp(selemName, "Speaker") == 0) {
+ speakerElem = elem;
+ RTC_LOG(LS_VERBOSE) << "Speaker element found";
+ }
+ }
+
+ if (_outputMixerElement) {
+ // We have found the element we want
+ break;
+ }
+ }
+
+ // If we didn't find a PCM Handle, use Master or Speaker
+ if (_outputMixerElement == NULL) {
+ if (masterElem != NULL) {
+ _outputMixerElement = masterElem;
+ RTC_LOG(LS_VERBOSE) << "Using Master as output volume.";
+ } else if (speakerElem != NULL) {
+ _outputMixerElement = speakerElem;
+ RTC_LOG(LS_VERBOSE) << "Using Speaker as output volume.";
+ } else {
+ _outputMixerElement = NULL;
+ RTC_LOG(LS_ERROR) << "Could not find output volume in the mixer.";
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+void AudioMixerManagerLinuxALSA::GetControlName(char* controlName,
+ char* deviceName) const {
+ // Example
+ // deviceName: "front:CARD=Intel,DEV=0"
+ // controlName: "hw:CARD=Intel"
+ char* pos1 = strchr(deviceName, ':');
+ char* pos2 = strchr(deviceName, ',');
+ if (!pos2) {
+ // Can also be default:CARD=Intel
+ pos2 = &deviceName[strlen(deviceName)];
+ }
+ if (pos1 && pos2) {
+ strcpy(controlName, "hw");
+ int nChar = (int)(pos2 - pos1);
+ strncpy(&controlName[2], pos1, nChar);
+ controlName[2 + nChar] = '\0';
+ } else {
+ strcpy(controlName, deviceName);
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h
new file mode 100644
index 0000000000..d98287822d
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
+
+#include <alsa/asoundlib.h>
+
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/linux/alsasymboltable_linux.h"
+#include "rtc_base/synchronization/mutex.h"
+
+namespace webrtc {
+
+class AudioMixerManagerLinuxALSA {
+ public:
+ int32_t OpenSpeaker(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t OpenMicrophone(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SpeakerVolume(uint32_t& volume) const;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ int32_t SpeakerVolumeIsAvailable(bool& available);
+ int32_t SpeakerMuteIsAvailable(bool& available);
+ int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t SpeakerMute(bool& enabled) const;
+ int32_t MicrophoneMuteIsAvailable(bool& available);
+ int32_t SetMicrophoneMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t MicrophoneMute(bool& enabled) const;
+ int32_t MicrophoneVolumeIsAvailable(bool& available);
+ int32_t SetMicrophoneVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t MicrophoneVolume(uint32_t& volume) const;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ int32_t Close() RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t CloseSpeaker() RTC_LOCKS_EXCLUDED(mutex_);
+ int32_t CloseMicrophone() RTC_LOCKS_EXCLUDED(mutex_);
+ bool SpeakerIsInitialized() const;
+ bool MicrophoneIsInitialized() const;
+
+ public:
+ AudioMixerManagerLinuxALSA();
+ ~AudioMixerManagerLinuxALSA();
+
+ private:
+ int32_t CloseSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t CloseMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t LoadMicMixerElement() const;
+ int32_t LoadSpeakerMixerElement() const;
+ void GetControlName(char* controlName, char* deviceName) const;
+
+ private:
+ Mutex mutex_;
+ mutable snd_mixer_t* _outputMixerHandle;
+ char _outputMixerStr[kAdmMaxDeviceNameSize];
+ mutable snd_mixer_t* _inputMixerHandle;
+ char _inputMixerStr[kAdmMaxDeviceNameSize];
+ mutable snd_mixer_elem_t* _outputMixerElement;
+ mutable snd_mixer_elem_t* _inputMixerElement;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
new file mode 100644
index 0000000000..91beee3c87
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
@@ -0,0 +1,844 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
+
+#include <stddef.h>
+
+#include "modules/audio_device/linux/audio_device_pulse_linux.h"
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// Accesses Pulse functions through our late-binding symbol table instead of
+// directly. This way we don't have to link to libpulse, which means our binary
+// will work on systems that don't have it.
+#define LATE(sym) \
+ LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, \
+ GetPulseSymbolTable(), sym)
+
+namespace webrtc {
+
+class AutoPulseLock {
+ public:
+ explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop)
+ : pa_mainloop_(pa_mainloop) {
+ LATE(pa_threaded_mainloop_lock)(pa_mainloop_);
+ }
+
+ ~AutoPulseLock() { LATE(pa_threaded_mainloop_unlock)(pa_mainloop_); }
+
+ private:
+ pa_threaded_mainloop* const pa_mainloop_;
+};
+
+AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse()
+ : _paOutputDeviceIndex(-1),
+ _paInputDeviceIndex(-1),
+ _paPlayStream(NULL),
+ _paRecStream(NULL),
+ _paMainloop(NULL),
+ _paContext(NULL),
+ _paVolume(0),
+ _paMute(0),
+ _paVolSteps(0),
+ _paSpeakerMute(false),
+ _paSpeakerVolume(PA_VOLUME_NORM),
+ _paChannels(0),
+ _paObjectsSet(false) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+}
+
+AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+
+ Close();
+}
+
+// ===========================================================================
+// PUBLIC METHODS
+// ===========================================================================
+
+int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
+ pa_threaded_mainloop* mainloop,
+ pa_context* context) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ if (!mainloop || !context) {
+ RTC_LOG(LS_ERROR) << "could not set PulseAudio objects for mixer";
+ return -1;
+ }
+
+ _paMainloop = mainloop;
+ _paContext = context;
+ _paObjectsSet = true;
+
+ RTC_LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set";
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::Close() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ CloseSpeaker();
+ CloseMicrophone();
+
+ _paMainloop = NULL;
+ _paContext = NULL;
+ _paObjectsSet = false;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ // Reset the index to -1
+ _paOutputDeviceIndex = -1;
+ _paPlayStream = NULL;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ // Reset the index to -1
+ _paInputDeviceIndex = -1;
+ _paRecStream = NULL;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)";
+
+ _paPlayStream = playStream;
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)";
+
+ _paRecStream = recStream;
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(uint16_t deviceIndex) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex="
+ << deviceIndex << ")";
+
+ // No point in opening the speaker
+ // if PA objects have not been set
+ if (!_paObjectsSet) {
+ RTC_LOG(LS_ERROR) << "PulseAudio objects has not been set";
+ return -1;
+ }
+
+ // Set the index for the PulseAudio
+ // output device to control
+ _paOutputDeviceIndex = deviceIndex;
+
+ RTC_LOG(LS_VERBOSE) << "the output mixer device is now open";
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(uint16_t deviceIndex) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex="
+ << deviceIndex << ")";
+
+ // No point in opening the microphone
+ // if PA objects have not been set
+ if (!_paObjectsSet) {
+ RTC_LOG(LS_ERROR) << "PulseAudio objects have not been set";
+ return -1;
+ }
+
+ // Set the index for the PulseAudio
+ // input device to control
+ _paInputDeviceIndex = deviceIndex;
+
+ RTC_LOG(LS_VERBOSE) << "the input mixer device is now open";
+
+ return 0;
+}
+
+bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_paOutputDeviceIndex != -1);
+}
+
+bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ return (_paInputDeviceIndex != -1);
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(uint32_t volume) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume="
+ << volume << ")";
+
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ bool setFailed(false);
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only really set the volume if we have a connected stream
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the number of channels from the sample specification
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_paPlayStream);
+ if (!spec) {
+ RTC_LOG(LS_ERROR) << "could not get sample specification";
+ return -1;
+ }
+
+ // Set the same volume for all channels
+ pa_cvolume cVolumes;
+ LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
+
+ pa_operation* paOperation = NULL;
+ paOperation = LATE(pa_context_set_sink_input_volume)(
+ _paContext, LATE(pa_stream_get_index)(_paPlayStream), &cVolumes,
+ PaSetVolumeCallback, NULL);
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for the completion
+ LATE(pa_operation_unref)(paOperation);
+ } else {
+ // We have not created a stream or it's not connected to the sink
+ // Save the volume to be set at connection
+ _paSpeakerVolume = volume;
+ }
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not set speaker volume, error="
+ << LATE(pa_context_errno)(_paContext);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only get the volume if we have a connected stream
+ if (!GetSinkInputInfo())
+ return -1;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ volume = static_cast<uint32_t>(_paVolume);
+ } else {
+ AutoPulseLock auto_lock(_paMainloop);
+ volume = _paSpeakerVolume;
+ }
+
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol="
+ << volume;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MaxSpeakerVolume(
+ uint32_t& maxVolume) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ // PA_VOLUME_NORM corresponds to 100% (0db)
+ // but PA allows up to 150 db amplification
+ maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MinSpeakerVolume(
+ uint32_t& minVolume) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable="
+ << enable << ")";
+
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ bool setFailed(false);
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only really mute if we have a connected stream
+ AutoPulseLock auto_lock(_paMainloop);
+
+ pa_operation* paOperation = NULL;
+ paOperation = LATE(pa_context_set_sink_input_mute)(
+ _paContext, LATE(pa_stream_get_index)(_paPlayStream), (int)enable,
+ PaSetVolumeCallback, NULL);
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for the completion
+ LATE(pa_operation_unref)(paOperation);
+ } else {
+ // We have not created a stream or it's not connected to the sink
+ // Save the mute status to be set at connection
+ _paSpeakerMute = enable;
+ }
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not mute speaker, error="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const {
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ // We can only get the mute status if we have a connected stream
+ if (!GetSinkInputInfo())
+ return -1;
+
+ enabled = static_cast<bool>(_paMute);
+ } else {
+ enabled = _paSpeakerMute;
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled=" << enabled;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paOutputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "output device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paOutputDeviceIndex;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paPlayStream &&
+ (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
+ }
+ }
+
+ if (!GetSinkInfoByIndex(deviceIndex))
+ return -1;
+
+ available = static_cast<bool>(_paChannels == 2);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(
+ bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+
+ pa_operation* paOperation = NULL;
+
+ // Get info for this source
+ // We want to know if the actual device can record in stereo
+ paOperation = LATE(pa_context_get_source_info_by_index)(
+ _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+
+ available = static_cast<bool>(_paChannels == 2);
+
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
+ " => available="
+ << available;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
+ bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=" << enable
+ << ")";
+
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ bool setFailed(false);
+ pa_operation* paOperation = NULL;
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ AutoPulseLock auto_lock(_paMainloop);
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+
+ // Set mute switch for the source
+ paOperation = LATE(pa_context_set_source_mute_by_index)(
+ _paContext, deviceIndex, enable, PaSetVolumeCallback, NULL);
+
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for this to complete.
+ LATE(pa_operation_unref)(paOperation);
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not mute microphone, error="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+ }
+
+ if (!GetSourceInfoByIndex(deviceIndex))
+ return -1;
+
+ enabled = static_cast<bool>(_paMute);
+
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled=" << enabled;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
+ bool& available) {
+ RTC_DCHECK(thread_checker_.IsCurrent());
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // Always available in Pulse Audio
+ available = true;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=" << volume
+ << ")";
+
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // Unlike output streams, input streams have no concept of a stream
+ // volume, only a device volume. So we have to change the volume of the
+ // device itself.
+
+ // The device may have a different number of channels than the stream and
+ // their mapping may be different, so we don't want to use the channel
+ // count from our sample spec. We could use PA_CHANNELS_MAX to cover our
+ // bases, and the server allows that even if the device's channel count
+ // is lower, but some buggy PA clients don't like that (the pavucontrol
+ // on Hardy dies in an assert if the channel count is different). So
+ // instead we look up the actual number of channels that the device has.
+ AutoPulseLock auto_lock(_paMainloop);
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ // Get the actual stream device index if we have a connected stream
+ // The device used by the stream can be changed
+ // during the call
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+
+ bool setFailed(false);
+ pa_operation* paOperation = NULL;
+
+ // Get the number of channels for this source
+ paOperation = LATE(pa_context_get_source_info_by_index)(
+ _paContext, deviceIndex, PaSourceInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+
+ uint8_t channels = _paChannels;
+ pa_cvolume cVolumes;
+ LATE(pa_cvolume_set)(&cVolumes, channels, volume);
+
+ // Set the volume for the source
+ paOperation = LATE(pa_context_set_source_volume_by_index)(
+ _paContext, deviceIndex, &cVolumes, PaSetVolumeCallback, NULL);
+
+ if (!paOperation) {
+ setFailed = true;
+ }
+
+ // Don't need to wait for this to complete.
+ LATE(pa_operation_unref)(paOperation);
+
+ if (setFailed) {
+ RTC_LOG(LS_WARNING) << "could not set microphone volume, error="
+ << LATE(pa_context_errno)(_paContext);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+ // Get the actual stream device index if we have a connected stream.
+ // The device used by the stream can be changed during the call.
+ if (_paRecStream &&
+ (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) {
+ deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
+ }
+ }
+
+ if (!GetSourceInfoByIndex(deviceIndex))
+ return -1;
+
+ {
+ AutoPulseLock auto_lock(_paMainloop);
+ volume = static_cast<uint32_t>(_paVolume);
+ }
+
+ RTC_LOG(LS_VERBOSE)
+ << "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol=" << volume;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(
+ uint32_t& maxVolume) const {
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ // PA_VOLUME_NORM corresponds to 100% (0db)
+ // PA allows up to 150 db amplification (PA_VOLUME_MAX)
+ // but that doesn't work well for all sound cards
+ maxVolume = static_cast<uint32_t>(PA_VOLUME_NORM);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerLinuxPulse::MinMicrophoneVolume(
+ uint32_t& minVolume) const {
+ if (_paInputDeviceIndex == -1) {
+ RTC_LOG(LS_WARNING) << "input device index has not been set";
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(PA_VOLUME_MUTED);
+
+ return 0;
+}
+
+// ===========================================================================
+// Private Methods
+// ===========================================================================
+
+void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(
+ i, eol);
+}
+
+void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
+ pa_context* /*c*/,
+ const pa_sink_input_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioMixerManagerLinuxPulse*>(pThis)
+ ->PaSinkInputInfoCallbackHandler(i, eol);
+}
+
+void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/,
+ const pa_source_info* i,
+ int eol,
+ void* pThis) {
+ static_cast<AudioMixerManagerLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(
+ i, eol);
+}
+
+void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context* c,
+ int success,
+ void* /*pThis*/) {
+ if (!success) {
+ RTC_LOG(LS_ERROR) << "failed to set volume";
+ }
+}
+
+void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler(
+ const pa_sink_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ _paChannels = i->channel_map.channels; // Get number of channels
+ pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
+ for (int j = 0; j < _paChannels; ++j) {
+ if (paVolume < i->volume.values[j]) {
+ paVolume = i->volume.values[j];
+ }
+ }
+ _paVolume = paVolume; // get the max volume for any channel
+ _paMute = i->mute; // get mute status
+
+ // supported since PA 0.9.15
+ //_paVolSteps = i->n_volume_steps; // get the number of volume steps
+ // default value is PA_VOLUME_NORM+1
+ _paVolSteps = PA_VOLUME_NORM + 1;
+}
+
+void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler(
+ const pa_sink_input_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ _paChannels = i->channel_map.channels; // Get number of channels
+ pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
+ for (int j = 0; j < _paChannels; ++j) {
+ if (paVolume < i->volume.values[j]) {
+ paVolume = i->volume.values[j];
+ }
+ }
+ _paVolume = paVolume; // Get the max volume for any channel
+ _paMute = i->mute; // Get mute status
+}
+
+void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler(
+ const pa_source_info* i,
+ int eol) {
+ if (eol) {
+ // Signal that we are done
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
+ return;
+ }
+
+ _paChannels = i->channel_map.channels; // Get number of channels
+ pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
+ for (int j = 0; j < _paChannels; ++j) {
+ if (paVolume < i->volume.values[j]) {
+ paVolume = i->volume.values[j];
+ }
+ }
+ _paVolume = paVolume; // Get the max volume for any channel
+ _paMute = i->mute; // Get mute status
+
+ // supported since PA 0.9.15
+ //_paVolSteps = i->n_volume_steps; // Get the number of volume steps
+ // default value is PA_VOLUME_NORM+1
+ _paVolSteps = PA_VOLUME_NORM + 1;
+}
+
+void AudioMixerManagerLinuxPulse::WaitForOperationCompletion(
+ pa_operation* paOperation) const {
+ while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
+ LATE(pa_threaded_mainloop_wait)(_paMainloop);
+ }
+
+ LATE(pa_operation_unref)(paOperation);
+}
+
+bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const {
+ pa_operation* paOperation = NULL;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ // Get info for this stream (sink input).
+ paOperation = LATE(pa_context_get_sink_input_info)(
+ _paContext, LATE(pa_stream_get_index)(_paPlayStream),
+ PaSinkInputInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+ return true;
+}
+
+bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(int device_index) const {
+ pa_operation* paOperation = NULL;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ paOperation = LATE(pa_context_get_sink_info_by_index)(
+ _paContext, device_index, PaSinkInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+ return true;
+}
+
+bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(int device_index) const {
+ pa_operation* paOperation = NULL;
+
+ AutoPulseLock auto_lock(_paMainloop);
+ paOperation = LATE(pa_context_get_source_info_by_index)(
+ _paContext, device_index, PaSourceInfoCallback, (void*)this);
+
+ WaitForOperationCompletion(paOperation);
+ return true;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
new file mode 100644
index 0000000000..546440c4a6
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
+#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
+
+#include <pulse/pulseaudio.h>
+#include <stdint.h>
+
+#include "api/sequence_checker.h"
+
+#ifndef UINT32_MAX
+#define UINT32_MAX ((uint32_t)-1)
+#endif
+
+namespace webrtc {
+
+class AudioMixerManagerLinuxPulse {
+ public:
+ int32_t SetPlayStream(pa_stream* playStream);
+ int32_t SetRecStream(pa_stream* recStream);
+ int32_t OpenSpeaker(uint16_t deviceIndex);
+ int32_t OpenMicrophone(uint16_t deviceIndex);
+ int32_t SetSpeakerVolume(uint32_t volume);
+ int32_t SpeakerVolume(uint32_t& volume) const;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ int32_t SpeakerVolumeIsAvailable(bool& available);
+ int32_t SpeakerMuteIsAvailable(bool& available);
+ int32_t SetSpeakerMute(bool enable);
+ int32_t StereoPlayoutIsAvailable(bool& available);
+ int32_t StereoRecordingIsAvailable(bool& available);
+ int32_t SpeakerMute(bool& enabled) const;
+ int32_t MicrophoneMuteIsAvailable(bool& available);
+ int32_t SetMicrophoneMute(bool enable);
+ int32_t MicrophoneMute(bool& enabled) const;
+ int32_t MicrophoneVolumeIsAvailable(bool& available);
+ int32_t SetMicrophoneVolume(uint32_t volume);
+ int32_t MicrophoneVolume(uint32_t& volume) const;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ int32_t SetPulseAudioObjects(pa_threaded_mainloop* mainloop,
+ pa_context* context);
+ int32_t Close();
+ int32_t CloseSpeaker();
+ int32_t CloseMicrophone();
+ bool SpeakerIsInitialized() const;
+ bool MicrophoneIsInitialized() const;
+
+ public:
+ AudioMixerManagerLinuxPulse();
+ ~AudioMixerManagerLinuxPulse();
+
+ private:
+ static void PaSinkInfoCallback(pa_context* c,
+ const pa_sink_info* i,
+ int eol,
+ void* pThis);
+ static void PaSinkInputInfoCallback(pa_context* c,
+ const pa_sink_input_info* i,
+ int eol,
+ void* pThis);
+ static void PaSourceInfoCallback(pa_context* c,
+ const pa_source_info* i,
+ int eol,
+ void* pThis);
+ static void PaSetVolumeCallback(pa_context* /*c*/,
+ int success,
+ void* /*pThis*/);
+ void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol);
+ void PaSinkInputInfoCallbackHandler(const pa_sink_input_info* i, int eol);
+ void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol);
+
+ void WaitForOperationCompletion(pa_operation* paOperation) const;
+
+ bool GetSinkInputInfo() const;
+ bool GetSinkInfoByIndex(int device_index) const;
+ bool GetSourceInfoByIndex(int device_index) const;
+
+ private:
+ int16_t _paOutputDeviceIndex;
+ int16_t _paInputDeviceIndex;
+
+ pa_stream* _paPlayStream;
+ pa_stream* _paRecStream;
+
+ pa_threaded_mainloop* _paMainloop;
+ pa_context* _paContext;
+
+ mutable uint32_t _paVolume;
+ mutable uint32_t _paMute;
+ mutable uint32_t _paVolSteps;
+ bool _paSpeakerMute;
+ mutable uint32_t _paSpeakerVolume;
+ mutable uint8_t _paChannels;
+ bool _paObjectsSet;
+
+ // Stores thread ID in constructor.
+ // We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that
+ // other methods are called from the same thread.
+ // Currently only does RTC_DCHECK(thread_checker_.IsCurrent()).
+ SequenceChecker thread_checker_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
new file mode 100644
index 0000000000..751edafd8b
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/logging.h"
+
+#ifdef WEBRTC_LINUX
+#include <dlfcn.h>
+#endif
+
+namespace webrtc {
+namespace adm_linux {
+
+inline static const char* GetDllError() {
+#ifdef WEBRTC_LINUX
+ char* err = dlerror();
+ if (err) {
+ return err;
+ } else {
+ return "No error";
+ }
+#else
+#error Not implemented
+#endif
+}
+
+DllHandle InternalLoadDll(absl::string_view dll_name) {
+#ifdef WEBRTC_LINUX
+ DllHandle handle = dlopen(std::string(dll_name).c_str(), RTLD_NOW);
+#else
+#error Not implemented
+#endif
+ if (handle == kInvalidDllHandle) {
+ RTC_LOG(LS_WARNING) << "Can't load " << dll_name << " : " << GetDllError();
+ }
+ return handle;
+}
+
+void InternalUnloadDll(DllHandle handle) {
+#ifdef WEBRTC_LINUX
+// TODO(pbos): Remove this dlclose() exclusion when leaks and suppressions from
+// here are gone (or AddressSanitizer can display them properly).
+//
+// Skip dlclose() on AddressSanitizer as leaks including this module in the
+// stack trace gets displayed as <unknown module> instead of the actual library
+// -> it can not be suppressed.
+// https://code.google.com/p/address-sanitizer/issues/detail?id=89
+#if !defined(ADDRESS_SANITIZER)
+ if (dlclose(handle) != 0) {
+ RTC_LOG(LS_ERROR) << GetDllError();
+ }
+#endif // !defined(ADDRESS_SANITIZER)
+#else
+#error Not implemented
+#endif
+}
+
+static bool LoadSymbol(DllHandle handle,
+ absl::string_view symbol_name,
+ void** symbol) {
+#ifdef WEBRTC_LINUX
+ *symbol = dlsym(handle, std::string(symbol_name).c_str());
+ char* err = dlerror();
+ if (err) {
+ RTC_LOG(LS_ERROR) << "Error loading symbol " << symbol_name << " : " << err;
+ return false;
+ } else if (!*symbol) {
+ RTC_LOG(LS_ERROR) << "Symbol " << symbol_name << " is NULL";
+ return false;
+ }
+ return true;
+#else
+#error Not implemented
+#endif
+}
+
+// This routine MUST assign SOME value for every symbol, even if that value is
+// NULL, or else some symbols may be left with uninitialized data that the
+// caller may later interpret as a valid address.
+bool InternalLoadSymbols(DllHandle handle,
+ int num_symbols,
+ const char* const symbol_names[],
+ void* symbols[]) {
+#ifdef WEBRTC_LINUX
+ // Clear any old errors.
+ dlerror();
+#endif
+ for (int i = 0; i < num_symbols; ++i) {
+ if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace adm_linux
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
new file mode 100644
index 0000000000..00f3c5a449
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_
+#define AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_
+
+#include <stddef.h> // for NULL
+#include <string.h>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/checks.h"
+
+// This file provides macros for creating "symbol table" classes to simplify the
+// dynamic loading of symbols from DLLs. Currently the implementation only
+// supports Linux and pure C symbols.
+// See talk/sound/pulseaudiosymboltable.(h|cc) for an example.
+
+namespace webrtc {
+namespace adm_linux {
+
+#ifdef WEBRTC_LINUX
+typedef void* DllHandle;
+
+const DllHandle kInvalidDllHandle = NULL;
+#else
+#error Not implemented
+#endif
+
+// These are helpers for use only by the class below.
+DllHandle InternalLoadDll(absl::string_view);
+
+void InternalUnloadDll(DllHandle handle);
+
+bool InternalLoadSymbols(DllHandle handle,
+ int num_symbols,
+ const char* const symbol_names[],
+ void* symbols[]);
+
+template <int SYMBOL_TABLE_SIZE,
+ const char kDllName[],
+ const char* const kSymbolNames[]>
+class LateBindingSymbolTable {
+ public:
+ LateBindingSymbolTable()
+ : handle_(kInvalidDllHandle), undefined_symbols_(false) {
+ memset(symbols_, 0, sizeof(symbols_));
+ }
+
+ ~LateBindingSymbolTable() { Unload(); }
+
+ LateBindingSymbolTable(const LateBindingSymbolTable&) = delete;
+ LateBindingSymbolTable& operator=(LateBindingSymbolTable&) = delete;
+
+ static int NumSymbols() { return SYMBOL_TABLE_SIZE; }
+
+ // We do not use this, but we offer it for theoretical convenience.
+ static const char* GetSymbolName(int index) {
+ RTC_DCHECK_LT(index, NumSymbols());
+ return kSymbolNames[index];
+ }
+
+ bool IsLoaded() const { return handle_ != kInvalidDllHandle; }
+
+ // Loads the DLL and the symbol table. Returns true iff the DLL and symbol
+ // table loaded successfully.
+ bool Load() {
+ if (IsLoaded()) {
+ return true;
+ }
+ if (undefined_symbols_) {
+ // We do not attempt to load again because repeated attempts are not
+ // likely to succeed and DLL loading is costly.
+ return false;
+ }
+ handle_ = InternalLoadDll(kDllName);
+ if (!IsLoaded()) {
+ return false;
+ }
+ if (!InternalLoadSymbols(handle_, NumSymbols(), kSymbolNames, symbols_)) {
+ undefined_symbols_ = true;
+ Unload();
+ return false;
+ }
+ return true;
+ }
+
+ void Unload() {
+ if (!IsLoaded()) {
+ return;
+ }
+ InternalUnloadDll(handle_);
+ handle_ = kInvalidDllHandle;
+ memset(symbols_, 0, sizeof(symbols_));
+ }
+
+ // Retrieves the given symbol. NOTE: Recommended to use LATESYM_GET below
+ // instead of this.
+ void* GetSymbol(int index) const {
+ RTC_DCHECK(IsLoaded());
+ RTC_DCHECK_LT(index, NumSymbols());
+ return symbols_[index];
+ }
+
+ private:
+ DllHandle handle_;
+ bool undefined_symbols_;
+ void* symbols_[SYMBOL_TABLE_SIZE];
+};
+
+// This macro must be invoked in a header to declare a symbol table class.
+#define LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(ClassName) enum {
+// This macro must be invoked in the header declaration once for each symbol
+// (recommended to use an X-Macro to avoid duplication).
+// This macro defines an enum with names built from the symbols, which
+// essentially creates a hash table in the compiler from symbol names to their
+// indices in the symbol table class.
+#define LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(ClassName, sym) \
+ ClassName##_SYMBOL_TABLE_INDEX_##sym,
+
+// This macro completes the header declaration.
+#define LATE_BINDING_SYMBOL_TABLE_DECLARE_END(ClassName) \
+ ClassName##_SYMBOL_TABLE_SIZE \
+ } \
+ ; \
+ \
+ extern const char ClassName##_kDllName[]; \
+ extern const char* const \
+ ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE]; \
+ \
+ typedef ::webrtc::adm_linux::LateBindingSymbolTable< \
+ ClassName##_SYMBOL_TABLE_SIZE, ClassName##_kDllName, \
+ ClassName##_kSymbolNames> \
+ ClassName;
+
+// This macro must be invoked in a .cc file to define a previously-declared
+// symbol table class.
+#define LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(ClassName, dllName) \
+ const char ClassName##_kDllName[] = dllName; \
+ const char* const ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE] = {
+// This macro must be invoked in the .cc definition once for each symbol
+// (recommended to use an X-Macro to avoid duplication).
+// This would have to use the mangled name if we were to ever support C++
+// symbols.
+#define LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(ClassName, sym) #sym,
+
+#define LATE_BINDING_SYMBOL_TABLE_DEFINE_END(ClassName) \
+ } \
+ ;
+
+// Index of a given symbol in the given symbol table class.
+#define LATESYM_INDEXOF(ClassName, sym) (ClassName##_SYMBOL_TABLE_INDEX_##sym)
+
+// Returns a reference to the given late-binded symbol, with the correct type.
+#define LATESYM_GET(ClassName, inst, sym) \
+ (*reinterpret_cast<__typeof__(&sym)>( \
+ (inst)->GetSymbol(LATESYM_INDEXOF(ClassName, sym))))
+
+} // namespace adm_linux
+} // namespace webrtc
+
+#endif // ADM_LATEBINDINGSYMBOLTABLE_LINUX_H_
diff --git a/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
new file mode 100644
index 0000000000..e0759e6ca3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
@@ -0,0 +1,41 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_pulse {
+
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
+#define X(sym) \
+ LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
+PULSE_AUDIO_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable)
+
+} // namespace adm_linux_pulse
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h
new file mode 100644
index 0000000000..2f6a9510d8
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h
@@ -0,0 +1,106 @@
+/*
+ * libjingle
+ * Copyright 2004--2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
+#define AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_
+
+#include "modules/audio_device/linux/latebindingsymboltable_linux.h"
+
+namespace webrtc {
+namespace adm_linux_pulse {
+
+// The PulseAudio symbols we need, as an X-Macro list.
+// This list must contain precisely every libpulse function that is used in
+// the ADM LINUX PULSE Device and Mixer classes
+#define PULSE_AUDIO_SYMBOLS_LIST \
+ X(pa_bytes_per_second) \
+ X(pa_context_connect) \
+ X(pa_context_disconnect) \
+ X(pa_context_errno) \
+ X(pa_context_get_protocol_version) \
+ X(pa_context_get_server_info) \
+ X(pa_context_get_sink_info_list) \
+ X(pa_context_get_sink_info_by_index) \
+ X(pa_context_get_sink_info_by_name) \
+ X(pa_context_get_sink_input_info) \
+ X(pa_context_get_source_info_by_index) \
+ X(pa_context_get_source_info_by_name) \
+ X(pa_context_get_source_info_list) \
+ X(pa_context_get_state) \
+ X(pa_context_new) \
+ X(pa_context_set_sink_input_volume) \
+ X(pa_context_set_sink_input_mute) \
+ X(pa_context_set_source_volume_by_index) \
+ X(pa_context_set_source_mute_by_index) \
+ X(pa_context_set_state_callback) \
+ X(pa_context_unref) \
+ X(pa_cvolume_set) \
+ X(pa_operation_get_state) \
+ X(pa_operation_unref) \
+ X(pa_stream_connect_playback) \
+ X(pa_stream_connect_record) \
+ X(pa_stream_disconnect) \
+ X(pa_stream_drop) \
+ X(pa_stream_get_device_index) \
+ X(pa_stream_get_index) \
+ X(pa_stream_get_latency) \
+ X(pa_stream_get_sample_spec) \
+ X(pa_stream_get_state) \
+ X(pa_stream_new) \
+ X(pa_stream_peek) \
+ X(pa_stream_readable_size) \
+ X(pa_stream_set_buffer_attr) \
+ X(pa_stream_set_overflow_callback) \
+ X(pa_stream_set_read_callback) \
+ X(pa_stream_set_state_callback) \
+ X(pa_stream_set_underflow_callback) \
+ X(pa_stream_set_write_callback) \
+ X(pa_stream_unref) \
+ X(pa_stream_writable_size) \
+ X(pa_stream_write) \
+ X(pa_strerror) \
+ X(pa_threaded_mainloop_free) \
+ X(pa_threaded_mainloop_get_api) \
+ X(pa_threaded_mainloop_lock) \
+ X(pa_threaded_mainloop_new) \
+ X(pa_threaded_mainloop_signal) \
+ X(pa_threaded_mainloop_start) \
+ X(pa_threaded_mainloop_stop) \
+ X(pa_threaded_mainloop_unlock) \
+ X(pa_threaded_mainloop_wait)
+
+LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(PulseAudioSymbolTable)
+#define X(sym) \
+ LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(PulseAudioSymbolTable, sym)
+PULSE_AUDIO_SYMBOLS_LIST
+#undef X
+LATE_BINDING_SYMBOL_TABLE_DECLARE_END(PulseAudioSymbolTable)
+
+} // namespace adm_linux_pulse
+} // namespace webrtc
+
+#endif // AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_