summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/modules/audio_device/win
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/libwebrtc/modules/audio_device/win')
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc4178
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h299
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc522
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h87
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc948
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h203
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc453
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h73
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc422
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h72
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc1529
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h560
-rw-r--r--third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc877
13 files changed, 10223 insertions, 0 deletions
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc
new file mode 100644
index 0000000000..aa8b6a9ebe
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc
@@ -0,0 +1,4178 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#pragma warning(disable : 4995) // name was marked as #pragma deprecated
+
+#if (_MSC_VER >= 1310) && (_MSC_VER < 1400)
+// Reports the major and minor versions of the compiler.
+// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version
+// 13 and a 1.0 point release. The Visual C++ 2005 compiler version is 1400.
+// Type cl /? at the command line to see the major and minor versions of your
+// compiler along with the build number.
+#pragma message(">> INFO: Windows Core Audio is not supported in VS 2003")
+#endif
+
+#include "modules/audio_device/audio_device_config.h"
+
+#ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD
+
+// clang-format off
+// To get Windows includes in the right order, this must come before the Windows
+// includes below.
+#include "modules/audio_device/win/audio_device_core_win.h"
+// clang-format on
+
+#include <comdef.h>
+#include <dmo.h>
+#include <functiondiscoverykeys_devpkey.h>
+#include <mmsystem.h>
+#include <string.h>
+#include <strsafe.h>
+#include <uuids.h>
+#include <windows.h>
+
+#include <iomanip>
+
+#include "api/make_ref_counted.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/sleep.h"
+
+// Macro that calls a COM method returning HRESULT value.
+#define EXIT_ON_ERROR(hres) \
+ do { \
+ if (FAILED(hres)) \
+ goto Exit; \
+ } while (0)
+
+// Macro that continues to a COM error.
+#define CONTINUE_ON_ERROR(hres) \
+ do { \
+ if (FAILED(hres)) \
+ goto Next; \
+ } while (0)
+
+// Macro that releases a COM object if not NULL.
+#define SAFE_RELEASE(p) \
+ do { \
+ if ((p)) { \
+ (p)->Release(); \
+ (p) = NULL; \
+ } \
+ } while (0)
+
+#define ROUND(x) ((x) >= 0 ? (int)((x) + 0.5) : (int)((x)-0.5))
+
+// REFERENCE_TIME time units per millisecond
+#define REFTIMES_PER_MILLISEC 10000
+
+typedef struct tagTHREADNAME_INFO {
+ DWORD dwType; // must be 0x1000
+ LPCSTR szName; // pointer to name (in user addr space)
+ DWORD dwThreadID; // thread ID (-1=caller thread)
+ DWORD dwFlags; // reserved for future use, must be zero
+} THREADNAME_INFO;
+
+namespace webrtc {
+namespace {
+
+enum { COM_THREADING_MODEL = COINIT_MULTITHREADED };
+
+enum { kAecCaptureStreamIndex = 0, kAecRenderStreamIndex = 1 };
+
+// An implementation of IMediaBuffer, as required for
+// IMediaObject::ProcessOutput(). After consuming data provided by
+// ProcessOutput(), call SetLength() to update the buffer availability.
+//
+// Example implementation:
+// http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx
+class MediaBufferImpl final : public IMediaBuffer {
+ public:
+ explicit MediaBufferImpl(DWORD maxLength)
+ : _data(new BYTE[maxLength]),
+ _length(0),
+ _maxLength(maxLength),
+ _refCount(0) {}
+
+ // IMediaBuffer methods.
+ STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength)) {
+ if (!ppBuffer || !pcbLength) {
+ return E_POINTER;
+ }
+
+ *ppBuffer = _data;
+ *pcbLength = _length;
+
+ return S_OK;
+ }
+
+ STDMETHOD(GetMaxLength(DWORD* pcbMaxLength)) {
+ if (!pcbMaxLength) {
+ return E_POINTER;
+ }
+
+ *pcbMaxLength = _maxLength;
+ return S_OK;
+ }
+
+ STDMETHOD(SetLength(DWORD cbLength)) {
+ if (cbLength > _maxLength) {
+ return E_INVALIDARG;
+ }
+
+ _length = cbLength;
+ return S_OK;
+ }
+
+ // IUnknown methods.
+ STDMETHOD_(ULONG, AddRef()) { return InterlockedIncrement(&_refCount); }
+
+ STDMETHOD(QueryInterface(REFIID riid, void** ppv)) {
+ if (!ppv) {
+ return E_POINTER;
+ } else if (riid != IID_IMediaBuffer && riid != IID_IUnknown) {
+ return E_NOINTERFACE;
+ }
+
+ *ppv = static_cast<IMediaBuffer*>(this);
+ AddRef();
+ return S_OK;
+ }
+
+ STDMETHOD_(ULONG, Release()) {
+ LONG refCount = InterlockedDecrement(&_refCount);
+ if (refCount == 0) {
+ delete this;
+ }
+
+ return refCount;
+ }
+
+ private:
+ ~MediaBufferImpl() { delete[] _data; }
+
+ BYTE* _data;
+ DWORD _length;
+ const DWORD _maxLength;
+ LONG _refCount;
+};
+} // namespace
+
+// ============================================================================
+// Static Methods
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// CoreAudioIsSupported
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::CoreAudioIsSupported() {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ bool MMDeviceIsAvailable(false);
+ bool coreAudioIsSupported(false);
+
+ HRESULT hr(S_OK);
+ wchar_t buf[MAXERRORLENGTH];
+ wchar_t errorText[MAXERRORLENGTH];
+
+ // 1) Check if Windows version is Vista SP1 or later.
+ //
+ // CoreAudio is only available on Vista SP1 and later.
+ //
+ OSVERSIONINFOEX osvi;
+ DWORDLONG dwlConditionMask = 0;
+ int op = VER_LESS_EQUAL;
+
+ // Initialize the OSVERSIONINFOEX structure.
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+ osvi.dwMajorVersion = 6;
+ osvi.dwMinorVersion = 0;
+ osvi.wServicePackMajor = 0;
+ osvi.wServicePackMinor = 0;
+ osvi.wProductType = VER_NT_WORKSTATION;
+
+ // Initialize the condition mask.
+ VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
+ VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
+
+ DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
+ VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
+ VER_PRODUCT_TYPE;
+
+ // Perform the test.
+ BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask, dwlConditionMask);
+ if (isVistaRTMorXP != 0) {
+ RTC_LOG(LS_VERBOSE)
+ << "*** Windows Core Audio is only supported on Vista SP1 or later";
+ return false;
+ }
+
+ // 2) Initializes the COM library for use by the calling thread.
+
+ // The COM init wrapper sets the thread's concurrency model to MTA,
+ // and creates a new apartment for the thread if one is required. The
+ // wrapper also ensures that each call to CoInitializeEx is balanced
+ // by a corresponding call to CoUninitialize.
+ //
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ // Things will work even if an STA thread is calling this method but we
+ // want to ensure that MTA is used and therefore return false here.
+ return false;
+ }
+
+ // 3) Check if the MMDevice API is available.
+ //
+ // The Windows Multimedia Device (MMDevice) API enables audio clients to
+ // discover audio endpoint devices, determine their capabilities, and create
+ // driver instances for those devices.
+ // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
+ // The MMDevice API consists of several interfaces. The first of these is the
+ // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice
+ // API, a client obtains a reference to the IMMDeviceEnumerator interface of a
+ // device-enumerator object by calling the CoCreateInstance function.
+ //
+ // Through the IMMDeviceEnumerator interface, the client can obtain references
+ // to the other interfaces in the MMDevice API. The MMDevice API implements
+ // the following interfaces:
+ //
+ // IMMDevice Represents an audio device.
+ // IMMDeviceCollection Represents a collection of audio devices.
+ // IMMDeviceEnumerator Provides methods for enumerating audio devices.
+ // IMMEndpoint Represents an audio endpoint device.
+ //
+ IMMDeviceEnumerator* pIMMD(NULL);
+ const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
+ const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
+
+ hr = CoCreateInstance(
+ CLSID_MMDeviceEnumerator, // GUID value of MMDeviceEnumerator coclass
+ NULL, CLSCTX_ALL,
+ IID_IMMDeviceEnumerator, // GUID value of the IMMDeviceEnumerator
+ // interface
+ (void**)&pIMMD);
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+ " Failed to create the required COM object (hr="
+ << hr << ")";
+ RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+ " CoCreateInstance(MMDeviceEnumerator) failed (hr="
+ << hr << ")";
+
+ const DWORD dwFlags =
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+
+ // Gets the system's human readable message string for this HRESULT.
+ // All error message in English by default.
+ DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+ MAXERRORLENGTH, NULL);
+
+ RTC_DCHECK_LE(messageLength, MAXERRORLENGTH);
+
+ // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+ for (; messageLength && ::isspace(errorText[messageLength - 1]);
+ --messageLength) {
+ errorText[messageLength - 1] = '\0';
+ }
+
+ StringCchPrintfW(buf, MAXERRORLENGTH, L"Error details: ");
+ StringCchCatW(buf, MAXERRORLENGTH, errorText);
+ RTC_LOG(LS_VERBOSE) << buf;
+ } else {
+ MMDeviceIsAvailable = true;
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::CoreAudioIsSupported()"
+ " CoCreateInstance(MMDeviceEnumerator) succeeded (hr="
+ << hr << ")";
+ SAFE_RELEASE(pIMMD);
+ }
+
+ // 4) Verify that we can create and initialize our Core Audio class.
+ //
+ if (MMDeviceIsAvailable) {
+ coreAudioIsSupported = false;
+
+ AudioDeviceWindowsCore* p = new (std::nothrow) AudioDeviceWindowsCore();
+ if (p == NULL) {
+ return false;
+ }
+
+ int ok(0);
+
+ if (p->Init() != InitStatus::OK) {
+ ok |= -1;
+ }
+
+ ok |= p->Terminate();
+
+ if (ok == 0) {
+ coreAudioIsSupported = true;
+ }
+
+ delete p;
+ }
+
+ if (coreAudioIsSupported) {
+ RTC_LOG(LS_VERBOSE) << "*** Windows Core Audio is supported ***";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "*** Windows Core Audio is NOT supported";
+ }
+
+ return (coreAudioIsSupported);
+}
+
+// ============================================================================
+// Construction & Destruction
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// AudioDeviceWindowsCore() - ctor
+// ----------------------------------------------------------------------------
+
+AudioDeviceWindowsCore::AudioDeviceWindowsCore()
+ : _avrtLibrary(nullptr),
+ _winSupportAvrt(false),
+ _comInit(ScopedCOMInitializer::kMTA),
+ _ptrAudioBuffer(nullptr),
+ _ptrEnumerator(nullptr),
+ _ptrRenderCollection(nullptr),
+ _ptrCaptureCollection(nullptr),
+ _ptrDeviceOut(nullptr),
+ _ptrDeviceIn(nullptr),
+ _ptrClientOut(nullptr),
+ _ptrClientIn(nullptr),
+ _ptrRenderClient(nullptr),
+ _ptrCaptureClient(nullptr),
+ _ptrCaptureVolume(nullptr),
+ _ptrRenderSimpleVolume(nullptr),
+ _dmo(nullptr),
+ _mediaBuffer(nullptr),
+ _builtInAecEnabled(false),
+ _hRenderSamplesReadyEvent(nullptr),
+ _hPlayThread(nullptr),
+ _hRenderStartedEvent(nullptr),
+ _hShutdownRenderEvent(nullptr),
+ _hCaptureSamplesReadyEvent(nullptr),
+ _hRecThread(nullptr),
+ _hCaptureStartedEvent(nullptr),
+ _hShutdownCaptureEvent(nullptr),
+ _hMmTask(nullptr),
+ _playAudioFrameSize(0),
+ _playSampleRate(0),
+ _playBlockSize(0),
+ _playChannels(2),
+ _sndCardPlayDelay(0),
+ _writtenSamples(0),
+ _readSamples(0),
+ _recAudioFrameSize(0),
+ _recSampleRate(0),
+ _recBlockSize(0),
+ _recChannels(2),
+ _initialized(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _speakerIsInitialized(false),
+ _microphoneIsInitialized(false),
+ _usingInputDeviceIndex(false),
+ _usingOutputDeviceIndex(false),
+ _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
+ _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " created";
+ RTC_DCHECK(_comInit.Succeeded());
+
+ // Try to load the Avrt DLL
+ if (!_avrtLibrary) {
+ // Get handle to the Avrt DLL module.
+ _avrtLibrary = LoadLibrary(TEXT("Avrt.dll"));
+ if (_avrtLibrary) {
+ // Handle is valid (should only happen if OS larger than vista & win7).
+ // Try to get the function addresses.
+ RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " The Avrt DLL module is now loaded";
+
+ _PAvRevertMmThreadCharacteristics =
+ (PAvRevertMmThreadCharacteristics)GetProcAddress(
+ _avrtLibrary, "AvRevertMmThreadCharacteristics");
+ _PAvSetMmThreadCharacteristicsA =
+ (PAvSetMmThreadCharacteristicsA)GetProcAddress(
+ _avrtLibrary, "AvSetMmThreadCharacteristicsA");
+ _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress(
+ _avrtLibrary, "AvSetMmThreadPriority");
+
+ if (_PAvRevertMmThreadCharacteristics &&
+ _PAvSetMmThreadCharacteristicsA && _PAvSetMmThreadPriority) {
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " AvRevertMmThreadCharacteristics() is OK";
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " AvSetMmThreadCharacteristicsA() is OK";
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()"
+ " AvSetMmThreadPriority() is OK";
+ _winSupportAvrt = true;
+ }
+ }
+ }
+
+ // Create our samples ready events - we want auto reset events that start in
+ // the not-signaled state. The state of an auto-reset event object remains
+ // signaled until a single waiting thread is released, at which time the
+ // system automatically sets the state to nonsignaled. If no threads are
+ // waiting, the event object's state remains signaled. (Except for
+ // _hShutdownCaptureEvent, which is used to shutdown multiple threads).
+ _hRenderSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hCaptureSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hShutdownRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hShutdownCaptureEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
+ _hRenderStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ _hCaptureStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+
+ _perfCounterFreq.QuadPart = 1;
+ _perfCounterFactor = 0.0;
+
+ // list of number of channels to use on recording side
+ _recChannelsPrioList[0] = 2; // stereo is prio 1
+ _recChannelsPrioList[1] = 1; // mono is prio 2
+ _recChannelsPrioList[2] = 4; // quad is prio 3
+
+ // list of number of channels to use on playout side
+ _playChannelsPrioList[0] = 2; // stereo is prio 1
+ _playChannelsPrioList[1] = 1; // mono is prio 2
+
+ HRESULT hr;
+
+ // We know that this API will work since it has already been verified in
+ // CoreAudioIsSupported, hence no need to check for errors here as well.
+
+ // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll)
+ // TODO(henrika): we should probably move this allocation to Init() instead
+ // and deallocate in Terminate() to make the implementation more symmetric.
+ CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL,
+ __uuidof(IMMDeviceEnumerator),
+ reinterpret_cast<void**>(&_ptrEnumerator));
+ RTC_DCHECK(_ptrEnumerator);
+
+ // DMO initialization for built-in WASAPI AEC.
+ {
+ IMediaObject* ptrDMO = NULL;
+ hr = CoCreateInstance(CLSID_CWMAudioAEC, NULL, CLSCTX_INPROC_SERVER,
+ IID_IMediaObject, reinterpret_cast<void**>(&ptrDMO));
+ if (FAILED(hr) || ptrDMO == NULL) {
+ // Since we check that _dmo is non-NULL in EnableBuiltInAEC(), the
+ // feature is prevented from being enabled.
+ _builtInAecEnabled = false;
+ _TraceCOMError(hr);
+ }
+ _dmo = ptrDMO;
+ SAFE_RELEASE(ptrDMO);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// AudioDeviceWindowsCore() - dtor
+// ----------------------------------------------------------------------------
+
+AudioDeviceWindowsCore::~AudioDeviceWindowsCore() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed";
+
+ Terminate();
+
+ // The IMMDeviceEnumerator is created during construction. Must release
+ // it here and not in Terminate() since we don't recreate it in Init().
+ SAFE_RELEASE(_ptrEnumerator);
+
+ _ptrAudioBuffer = NULL;
+
+ if (NULL != _hRenderSamplesReadyEvent) {
+ CloseHandle(_hRenderSamplesReadyEvent);
+ _hRenderSamplesReadyEvent = NULL;
+ }
+
+ if (NULL != _hCaptureSamplesReadyEvent) {
+ CloseHandle(_hCaptureSamplesReadyEvent);
+ _hCaptureSamplesReadyEvent = NULL;
+ }
+
+ if (NULL != _hRenderStartedEvent) {
+ CloseHandle(_hRenderStartedEvent);
+ _hRenderStartedEvent = NULL;
+ }
+
+ if (NULL != _hCaptureStartedEvent) {
+ CloseHandle(_hCaptureStartedEvent);
+ _hCaptureStartedEvent = NULL;
+ }
+
+ if (NULL != _hShutdownRenderEvent) {
+ CloseHandle(_hShutdownRenderEvent);
+ _hShutdownRenderEvent = NULL;
+ }
+
+ if (NULL != _hShutdownCaptureEvent) {
+ CloseHandle(_hShutdownCaptureEvent);
+ _hShutdownCaptureEvent = NULL;
+ }
+
+ if (_avrtLibrary) {
+ BOOL freeOK = FreeLibrary(_avrtLibrary);
+ if (!freeOK) {
+ RTC_LOG(LS_WARNING)
+ << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+ " failed to free the loaded Avrt DLL module correctly";
+ } else {
+ RTC_LOG(LS_WARNING) << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()"
+ " the Avrt DLL module is now unloaded";
+ }
+ }
+}
+
+// ============================================================================
+// API
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// AttachAudioBuffer
+// ----------------------------------------------------------------------------
+
+void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ _ptrAudioBuffer = audioBuffer;
+
+ // Inform the AudioBuffer about default settings for this implementation.
+ // Set all values to zero here since the actual settings will be done by
+ // InitPlayout and InitRecording later.
+ _ptrAudioBuffer->SetRecordingSampleRate(0);
+ _ptrAudioBuffer->SetPlayoutSampleRate(0);
+ _ptrAudioBuffer->SetRecordingChannels(0);
+ _ptrAudioBuffer->SetPlayoutChannels(0);
+}
+
+// ----------------------------------------------------------------------------
+// ActiveAudioLayer
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kWindowsCoreAudio;
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// Init
+// ----------------------------------------------------------------------------
+
+AudioDeviceGeneric::InitStatus AudioDeviceWindowsCore::Init() {
+ MutexLock lock(&mutex_);
+
+ if (_initialized) {
+ return InitStatus::OK;
+ }
+
+ // Enumerate all audio rendering and capturing endpoint devices.
+ // Note that, some of these will not be able to select by the user.
+ // The complete collection is for internal use only.
+ _EnumerateEndpointDevicesAll(eRender);
+ _EnumerateEndpointDevicesAll(eCapture);
+
+ _initialized = true;
+
+ return InitStatus::OK;
+}
+
+// ----------------------------------------------------------------------------
+// Terminate
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::Terminate() {
+ MutexLock lock(&mutex_);
+
+ if (!_initialized) {
+ return 0;
+ }
+
+ _initialized = false;
+ _speakerIsInitialized = false;
+ _microphoneIsInitialized = false;
+ _playing = false;
+ _recording = false;
+
+ SAFE_RELEASE(_ptrRenderCollection);
+ SAFE_RELEASE(_ptrCaptureCollection);
+ SAFE_RELEASE(_ptrDeviceOut);
+ SAFE_RELEASE(_ptrDeviceIn);
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrRenderClient);
+ SAFE_RELEASE(_ptrCaptureClient);
+ SAFE_RELEASE(_ptrCaptureVolume);
+ SAFE_RELEASE(_ptrRenderSimpleVolume);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// Initialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::Initialized() const {
+ return (_initialized);
+}
+
+// ----------------------------------------------------------------------------
+// InitSpeaker
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitSpeaker() {
+ MutexLock lock(&mutex_);
+ return InitSpeakerLocked();
+}
+
+int32_t AudioDeviceWindowsCore::InitSpeakerLocked() {
+ if (_playing) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ if (_usingOutputDeviceIndex) {
+ int16_t nDevices = PlayoutDevicesLocked();
+ if (_outputDeviceIndex > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "current device selection is invalid => unable to"
+ " initialize";
+ return -1;
+ }
+ }
+
+ int32_t ret(0);
+
+ SAFE_RELEASE(_ptrDeviceOut);
+ if (_usingOutputDeviceIndex) {
+ // Refresh the selected rendering endpoint device using current index
+ ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
+ } else {
+ ERole role;
+ (_outputDevice == AudioDeviceModule::kDefaultDevice)
+ ? role = eConsole
+ : role = eCommunications;
+ // Refresh the selected rendering endpoint device using role
+ ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
+ }
+
+ if (ret != 0 || (_ptrDeviceOut == NULL)) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the rendering enpoint device";
+ SAFE_RELEASE(_ptrDeviceOut);
+ return -1;
+ }
+
+ IAudioSessionManager* pManager = NULL;
+ ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL,
+ NULL, (void**)&pManager);
+ if (ret != 0 || pManager == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the render manager";
+ SAFE_RELEASE(pManager);
+ return -1;
+ }
+
+ SAFE_RELEASE(_ptrRenderSimpleVolume);
+ ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
+ if (ret != 0 || _ptrRenderSimpleVolume == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the render simple volume";
+ SAFE_RELEASE(pManager);
+ SAFE_RELEASE(_ptrRenderSimpleVolume);
+ return -1;
+ }
+ SAFE_RELEASE(pManager);
+
+ _speakerIsInitialized = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// InitMicrophone
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitMicrophone() {
+ MutexLock lock(&mutex_);
+ return InitMicrophoneLocked();
+}
+
+int32_t AudioDeviceWindowsCore::InitMicrophoneLocked() {
+ if (_recording) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ if (_usingInputDeviceIndex) {
+ int16_t nDevices = RecordingDevicesLocked();
+ if (_inputDeviceIndex > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "current device selection is invalid => unable to"
+ " initialize";
+ return -1;
+ }
+ }
+
+ int32_t ret(0);
+
+ SAFE_RELEASE(_ptrDeviceIn);
+ if (_usingInputDeviceIndex) {
+ // Refresh the selected capture endpoint device using current index
+ ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
+ } else {
+ ERole role;
+ (_inputDevice == AudioDeviceModule::kDefaultDevice)
+ ? role = eConsole
+ : role = eCommunications;
+ // Refresh the selected capture endpoint device using role
+ ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
+ }
+
+ if (ret != 0 || (_ptrDeviceIn == NULL)) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the capturing enpoint device";
+ SAFE_RELEASE(_ptrDeviceIn);
+ return -1;
+ }
+
+ SAFE_RELEASE(_ptrCaptureVolume);
+ ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&_ptrCaptureVolume));
+ if (ret != 0 || _ptrCaptureVolume == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to initialize the capture volume";
+ SAFE_RELEASE(_ptrCaptureVolume);
+ return -1;
+ }
+
+ _microphoneIsInitialized = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::SpeakerIsInitialized() const {
+ return (_speakerIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const {
+ return (_microphoneIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerVolumeIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioSessionManager* pManager = NULL;
+ ISimpleAudioVolume* pVolume = NULL;
+
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL,
+ (void**)&pManager);
+ EXIT_ON_ERROR(hr);
+
+ hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
+ EXIT_ON_ERROR(hr);
+
+ float volume(0.0f);
+ hr = pVolume->GetMasterVolume(&volume);
+ if (FAILED(hr)) {
+ available = false;
+ }
+ available = true;
+
+ SAFE_RELEASE(pManager);
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pManager);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetSpeakerVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume) {
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+ }
+
+ if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
+ volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+
+ // scale input volume to valid range (0.0 to 1.0)
+ const float fLevel = (float)volume / MAX_CORE_SPEAKER_VOLUME;
+ volume_mutex_.Lock();
+ hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel, NULL);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const {
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+ }
+
+ HRESULT hr = S_OK;
+ float fLevel(0.0f);
+
+ volume_mutex_.Lock();
+ hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ // scale input volume range [0.0,1.0] to valid output range
+ volume = static_cast<uint32_t>(fLevel * MAX_CORE_SPEAKER_VOLUME);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MaxSpeakerVolume
+//
+// The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
+// silence and 1.0 indicates full volume (no attenuation).
+// We add our (webrtc-internal) own max level to match the Wave API and
+// how it is used today in VoE.
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ maxVolume = static_cast<uint32_t>(MAX_CORE_SPEAKER_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// MinSpeakerVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const {
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(MIN_CORE_SPEAKER_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerMuteIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the speaker system mute state.
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ if (FAILED(hr))
+ available = false;
+ else
+ available = true;
+
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetSpeakerMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable) {
+ MutexLock lock(&mutex_);
+
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Set the speaker system mute state.
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ const BOOL mute(enable);
+ hr = pVolume->SetMute(mute, NULL);
+ EXIT_ON_ERROR(hr);
+
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SpeakerMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const {
+ if (!_speakerIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the speaker system mute state.
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ EXIT_ON_ERROR(hr);
+
+ enabled = (mute == TRUE) ? true : false;
+
+ SAFE_RELEASE(pVolume);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneMuteIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the microphone system mute state.
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ if (FAILED(hr))
+ available = false;
+ else
+ available = true;
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetMicrophoneMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable) {
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Set the microphone system mute state.
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ const BOOL mute(enable);
+ hr = pVolume->SetMute(mute, NULL);
+ EXIT_ON_ERROR(hr);
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneMute
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const {
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ // Query the microphone system mute state.
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ BOOL mute;
+ hr = pVolume->GetMute(&mute);
+ EXIT_ON_ERROR(hr);
+
+ enabled = (mute == TRUE) ? true : false;
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// StereoRecordingIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetStereoRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable) {
+ MutexLock lock(&mutex_);
+
+ if (enable) {
+ _recChannelsPrioList[0] = 2; // try stereo first
+ _recChannelsPrioList[1] = 1;
+ _recChannels = 2;
+ } else {
+ _recChannelsPrioList[0] = 1; // try mono first
+ _recChannelsPrioList[1] = 2;
+ _recChannels = 1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StereoRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const {
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StereoPlayoutIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetStereoPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable) {
+ MutexLock lock(&mutex_);
+
+ if (enable) {
+ _playChannelsPrioList[0] = 2; // try stereo first
+ _playChannelsPrioList[1] = 1;
+ _playChannels = 2;
+ } else {
+ _playChannelsPrioList[0] = 1; // try mono first
+ _playChannelsPrioList[1] = 2;
+ _playChannels = 1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StereoPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const {
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneVolumeIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available) {
+ MutexLock lock(&mutex_);
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ IAudioEndpointVolume* pVolume = NULL;
+
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ reinterpret_cast<void**>(&pVolume));
+ EXIT_ON_ERROR(hr);
+
+ float volume(0.0f);
+ hr = pVolume->GetMasterVolumeLevelScalar(&volume);
+ if (FAILED(hr)) {
+ available = false;
+ }
+ available = true;
+
+ SAFE_RELEASE(pVolume);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pVolume);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetMicrophoneVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume) {
+ RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::SetMicrophoneVolume(volume="
+ << volume << ")";
+
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+ }
+
+ if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
+ volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME)) {
+ return -1;
+ }
+
+ HRESULT hr = S_OK;
+ // scale input volume to valid range (0.0 to 1.0)
+ const float fLevel = static_cast<float>(volume) / MAX_CORE_MICROPHONE_VOLUME;
+ volume_mutex_.Lock();
+ _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MicrophoneVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const {
+ {
+ MutexLock lock(&mutex_);
+
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+ }
+
+ HRESULT hr = S_OK;
+ float fLevel(0.0f);
+ volume = 0;
+ volume_mutex_.Lock();
+ hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
+ volume_mutex_.Unlock();
+ EXIT_ON_ERROR(hr);
+
+ // scale input volume range [0.0,1.0] to valid output range
+ volume = static_cast<uint32_t>(fLevel * MAX_CORE_MICROPHONE_VOLUME);
+
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// MaxMicrophoneVolume
+//
+// The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
+// silence and 1.0 indicates full volume (no attenuation).
+// We add our (webrtc-internal) own max level to match the Wave API and
+// how it is used today in VoE.
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ maxVolume = static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// MinMicrophoneVolume
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const {
+ if (!_microphoneIsInitialized) {
+ return -1;
+ }
+
+ minVolume = static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutDevices
+// ----------------------------------------------------------------------------
+int16_t AudioDeviceWindowsCore::PlayoutDevices() {
+ MutexLock lock(&mutex_);
+ return PlayoutDevicesLocked();
+}
+
+int16_t AudioDeviceWindowsCore::PlayoutDevicesLocked() {
+ if (_RefreshDeviceList(eRender) != -1) {
+ return (_DeviceListCount(eRender));
+ }
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetPlayoutDevice I (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index) {
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ // Get current number of available rendering endpoint devices and refresh the
+ // rendering collection.
+ UINT nDevices = PlayoutDevices();
+
+ if (index < 0 || index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ MutexLock lock(&mutex_);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrRenderCollection);
+
+ // Select an endpoint rendering device given the specified index
+ SAFE_RELEASE(_ptrDeviceOut);
+ hr = _ptrRenderCollection->Item(index, &_ptrDeviceOut);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceOut);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingOutputDeviceIndex = true;
+ _outputDeviceIndex = index;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetPlayoutDevice II (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ if (_playIsInitialized) {
+ return -1;
+ }
+
+ ERole role(eCommunications);
+
+ if (device == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+ role = eCommunications;
+ }
+
+ MutexLock lock(&mutex_);
+
+ // Refresh the list of rendering endpoint devices
+ _RefreshDeviceList(eRender);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ // Select an endpoint rendering device given the specified role
+ SAFE_RELEASE(_ptrDeviceOut);
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(eRender, role, &_ptrDeviceOut);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceOut);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingOutputDeviceIndex = false;
+ _outputDevice = device;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutDeviceName
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ bool defaultCommunicationDevice(false);
+ const int16_t nDevices(PlayoutDevices()); // also updates the list of devices
+
+ // Special fix for the case when the user selects '-1' as index (<=> Default
+ // Communication Device)
+ if (index == (uint16_t)(-1)) {
+ defaultCommunicationDevice = true;
+ index = 0;
+ RTC_LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+ }
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ MutexLock lock(&mutex_);
+
+ int32_t ret(-1);
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (defaultCommunicationDevice) {
+ ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName,
+ bufferLen);
+ } else {
+ ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
+ }
+
+ if (ret == 0) {
+ // Convert the endpoint device's friendly-name to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+ kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ // Get the endpoint ID string (uniquely identifies the device among all audio
+ // endpoint devices)
+ if (defaultCommunicationDevice) {
+ ret =
+ _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
+ } else {
+ ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
+ }
+
+ if (guid != NULL && ret == 0) {
+ // Convert the endpoint device's ID string to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+ NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingDeviceName
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ bool defaultCommunicationDevice(false);
+ const int16_t nDevices(
+ RecordingDevices()); // also updates the list of devices
+
+ // Special fix for the case when the user selects '-1' as index (<=> Default
+ // Communication Device)
+ if (index == (uint16_t)(-1)) {
+ defaultCommunicationDevice = true;
+ index = 0;
+ RTC_LOG(LS_VERBOSE) << "Default Communication endpoint device will be used";
+ }
+
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
+
+ memset(name, 0, kAdmMaxDeviceNameSize);
+
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
+
+ MutexLock lock(&mutex_);
+
+ int32_t ret(-1);
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (defaultCommunicationDevice) {
+ ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName,
+ bufferLen);
+ } else {
+ ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
+ }
+
+ if (ret == 0) {
+ // Convert the endpoint device's friendly-name to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+ kAdmMaxDeviceNameSize, NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ // Get the endpoint ID string (uniquely identifies the device among all audio
+ // endpoint devices)
+ if (defaultCommunicationDevice) {
+ ret =
+ _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
+ } else {
+ ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
+ }
+
+ if (guid != NULL && ret == 0) {
+ // Convert the endpoint device's ID string to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize,
+ NULL, NULL) == 0) {
+ RTC_LOG(LS_ERROR)
+ << "WideCharToMultiByte(CP_UTF8) failed with error code "
+ << GetLastError();
+ }
+ }
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingDevices
+// ----------------------------------------------------------------------------
+
+int16_t AudioDeviceWindowsCore::RecordingDevices() {
+ MutexLock lock(&mutex_);
+ return RecordingDevicesLocked();
+}
+
+int16_t AudioDeviceWindowsCore::RecordingDevicesLocked() {
+ if (_RefreshDeviceList(eCapture) != -1) {
+ return (_DeviceListCount(eCapture));
+ }
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// SetRecordingDevice I (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index) {
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ // Get current number of available capture endpoint devices and refresh the
+ // capture collection.
+ UINT nDevices = RecordingDevices();
+
+ if (index < 0 || index > (nDevices - 1)) {
+ RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
+ << "]";
+ return -1;
+ }
+
+ MutexLock lock(&mutex_);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrCaptureCollection);
+
+ // Select an endpoint capture device given the specified index
+ SAFE_RELEASE(_ptrDeviceIn);
+ hr = _ptrCaptureCollection->Item(index, &_ptrDeviceIn);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceIn);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingInputDeviceIndex = true;
+ _inputDeviceIndex = index;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// SetRecordingDevice II (II)
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ if (_recIsInitialized) {
+ return -1;
+ }
+
+ ERole role(eCommunications);
+
+ if (device == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) {
+ role = eCommunications;
+ }
+
+ MutexLock lock(&mutex_);
+
+ // Refresh the list of capture endpoint devices
+ _RefreshDeviceList(eCapture);
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ // Select an endpoint capture device given the specified role
+ SAFE_RELEASE(_ptrDeviceIn);
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(eCapture, role, &_ptrDeviceIn);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(_ptrDeviceIn);
+ return -1;
+ }
+
+ WCHAR szDeviceName[MAX_PATH];
+ const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0];
+
+ // Get the endpoint device's friendly-name
+ if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) {
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\"";
+ }
+
+ _usingInputDeviceIndex = false;
+ _inputDevice = device;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the playout side
+ int32_t res = InitPlayout();
+
+ // Cancel effect of initialization
+ StopPlayout();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingIsAvailable
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available) {
+ available = false;
+
+ // Try to initialize the recording side
+ int32_t res = InitRecording();
+
+ // Cancel effect of initialization
+ StopRecording();
+
+ if (res != -1) {
+ available = true;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// InitPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitPlayout() {
+ MutexLock lock(&mutex_);
+
+ if (_playing) {
+ return -1;
+ }
+
+ if (_playIsInitialized) {
+ return 0;
+ }
+
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeakerLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
+ }
+
+ // Ensure that the updated rendering endpoint device is valid
+ if (_ptrDeviceOut == NULL) {
+ return -1;
+ }
+
+ if (_builtInAecEnabled && _recIsInitialized) {
+ // Ensure the correct render device is configured in case
+ // InitRecording() was called before InitPlayout().
+ if (SetDMOProperties() == -1) {
+ return -1;
+ }
+ }
+
+ HRESULT hr = S_OK;
+ WAVEFORMATEX* pWfxOut = NULL;
+ WAVEFORMATEX Wfx = WAVEFORMATEX();
+ WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+ // Create COM object with IAudioClient interface.
+ SAFE_RELEASE(_ptrClientOut);
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+ (void**)&_ptrClientOut);
+ EXIT_ON_ERROR(hr);
+
+ // Retrieve the stream format that the audio engine uses for its internal
+ // processing (mixing) of shared-mode streams.
+ hr = _ptrClientOut->GetMixFormat(&pWfxOut);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "Audio Engine's current rendering mix format:";
+ // format type
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(pWfxOut->wFormatTag) << " ("
+ << pWfxOut->wFormatTag << ")";
+ // number of channels (i.e. mono, stereo...)
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << pWfxOut->nChannels;
+ // sample rate
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxOut->nSamplesPerSec;
+ // for buffer estimation
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxOut->nAvgBytesPerSec;
+ // block size of data
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << pWfxOut->nBlockAlign;
+ // number of bits per sample of mono data
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxOut->wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << pWfxOut->cbSize;
+ }
+
+ // Set wave format
+ Wfx.wFormatTag = WAVE_FORMAT_PCM;
+ Wfx.wBitsPerSample = 16;
+ Wfx.cbSize = 0;
+
+ const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
+ hr = S_FALSE;
+
+ // Iterate over frequencies and channels, in order of priority
+ for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+ for (unsigned int chan = 0; chan < sizeof(_playChannelsPrioList) /
+ sizeof(_playChannelsPrioList[0]);
+ chan++) {
+ Wfx.nChannels = _playChannelsPrioList[chan];
+ Wfx.nSamplesPerSec = freqs[freq];
+ Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
+ Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
+ // If the method succeeds and the audio endpoint device supports the
+ // specified stream format, it returns S_OK. If the method succeeds and
+ // provides a closest match to the specified format, it returns S_FALSE.
+ hr = _ptrClientOut->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &Wfx,
+ &pWfxClosestMatch);
+ if (hr == S_OK) {
+ break;
+ } else {
+ if (pWfxClosestMatch) {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels
+ << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+ << " is not supported. Closest match: "
+ "nChannels="
+ << pWfxClosestMatch->nChannels << ", nSamplesPerSec="
+ << pWfxClosestMatch->nSamplesPerSec;
+ CoTaskMemFree(pWfxClosestMatch);
+ pWfxClosestMatch = NULL;
+ } else {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels
+ << ", nSamplesPerSec=" << Wfx.nSamplesPerSec
+ << " is not supported. No closest match.";
+ }
+ }
+ }
+ if (hr == S_OK)
+ break;
+ }
+
+ // TODO(andrew): what happens in the event of failure in the above loop?
+ // Is _ptrClientOut->Initialize expected to fail?
+ // Same in InitRecording().
+ if (hr == S_OK) {
+ _playAudioFrameSize = Wfx.nBlockAlign;
+ // Block size is the number of samples each channel in 10ms.
+ _playBlockSize = Wfx.nSamplesPerSec / 100;
+ _playSampleRate = Wfx.nSamplesPerSec;
+ _devicePlaySampleRate =
+ Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
+ _devicePlayBlockSize = Wfx.nSamplesPerSec / 100;
+ _playChannels = Wfx.nChannels;
+
+ RTC_LOG(LS_VERBOSE) << "VoE selected this rendering format:";
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(Wfx.wFormatTag) << " (" << Wfx.wFormatTag
+ << ")";
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << Wfx.nChannels;
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << Wfx.nSamplesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec : " << Wfx.nAvgBytesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << Wfx.nBlockAlign;
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << Wfx.wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << Wfx.cbSize;
+ RTC_LOG(LS_VERBOSE) << "Additional settings:";
+ RTC_LOG(LS_VERBOSE) << "_playAudioFrameSize: " << _playAudioFrameSize;
+ RTC_LOG(LS_VERBOSE) << "_playBlockSize : " << _playBlockSize;
+ RTC_LOG(LS_VERBOSE) << "_playChannels : " << _playChannels;
+ }
+
+ // Create a rendering stream.
+ //
+ // ****************************************************************************
+ // For a shared-mode stream that uses event-driven buffering, the caller must
+ // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
+ // determines how large a buffer to allocate based on the scheduling period
+ // of the audio engine. Although the client's buffer processing thread is
+ // event driven, the basic buffer management process, as described previously,
+ // is unaltered.
+ // Each time the thread awakens, it should call
+ // IAudioClient::GetCurrentPadding to determine how much data to write to a
+ // rendering buffer or read from a capture buffer. In contrast to the two
+ // buffers that the Initialize method allocates for an exclusive-mode stream
+ // that uses event-driven buffering, a shared-mode stream requires a single
+ // buffer.
+ // ****************************************************************************
+ //
+ REFERENCE_TIME hnsBufferDuration =
+ 0; // ask for minimum buffer size (default)
+ if (_devicePlaySampleRate == 44100) {
+ // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
+ // There seems to be a larger risk of underruns for 44.1 compared
+ // with the default rate (48kHz). When using default, we set the requested
+ // buffer duration to 0, which sets the buffer to the minimum size
+ // required by the engine thread. The actual buffer size can then be
+ // read by GetBufferSize() and it is 20ms on most machines.
+ hnsBufferDuration = 30 * 10000;
+ }
+ hr = _ptrClientOut->Initialize(
+ AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK, // processing of the audio buffer by
+ // the client will be event driven
+ hnsBufferDuration, // requested buffer capacity as a time value (in
+ // 100-nanosecond units)
+ 0, // periodicity
+ &Wfx, // selected wave format
+ NULL); // session GUID
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+ }
+ EXIT_ON_ERROR(hr);
+
+ if (_ptrAudioBuffer) {
+ // Update the audio buffer with the selected parameters
+ _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
+ _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+ } else {
+ // We can enter this state during CoreAudioIsSupported() when no
+ // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+ // does not exist. It is OK to end up here since we don't initiate any media
+ // in CoreAudioIsSupported().
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceBuffer must be attached before streaming can start";
+ }
+
+ // Get the actual size of the shared (endpoint buffer).
+ // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+ UINT bufferFrameCount(0);
+ hr = _ptrClientOut->GetBufferSize(&bufferFrameCount);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
+ << bufferFrameCount << " (<=> "
+ << bufferFrameCount * _playAudioFrameSize << " bytes)";
+ }
+
+ // Set the event handle that the system signals when an audio buffer is ready
+ // to be processed by the client.
+ hr = _ptrClientOut->SetEventHandle(_hRenderSamplesReadyEvent);
+ EXIT_ON_ERROR(hr);
+
+ // Get an IAudioRenderClient interface.
+ SAFE_RELEASE(_ptrRenderClient);
+ hr = _ptrClientOut->GetService(__uuidof(IAudioRenderClient),
+ (void**)&_ptrRenderClient);
+ EXIT_ON_ERROR(hr);
+
+ // Mark playout side as initialized
+ _playIsInitialized = true;
+
+ CoTaskMemFree(pWfxOut);
+ CoTaskMemFree(pWfxClosestMatch);
+
+ RTC_LOG(LS_VERBOSE) << "render side is now initialized";
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ CoTaskMemFree(pWfxOut);
+ CoTaskMemFree(pWfxClosestMatch);
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrRenderClient);
+ return -1;
+}
+
+// Capture initialization when the built-in AEC DirectX Media Object (DMO) is
+// used. Called from InitRecording(), most of which is skipped over. The DMO
+// handles device initialization itself.
+// Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx
+int32_t AudioDeviceWindowsCore::InitRecordingDMO() {
+ RTC_DCHECK(_builtInAecEnabled);
+ RTC_DCHECK(_dmo);
+
+ if (SetDMOProperties() == -1) {
+ return -1;
+ }
+
+ DMO_MEDIA_TYPE mt = {};
+ HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
+ if (FAILED(hr)) {
+ MoFreeMediaType(&mt);
+ _TraceCOMError(hr);
+ return -1;
+ }
+ mt.majortype = MEDIATYPE_Audio;
+ mt.subtype = MEDIASUBTYPE_PCM;
+ mt.formattype = FORMAT_WaveFormatEx;
+
+ // Supported formats
+ // nChannels: 1 (in AEC-only mode)
+ // nSamplesPerSec: 8000, 11025, 16000, 22050
+ // wBitsPerSample: 16
+ WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
+ ptrWav->wFormatTag = WAVE_FORMAT_PCM;
+ ptrWav->nChannels = 1;
+ // 16000 is the highest we can support with our resampler.
+ ptrWav->nSamplesPerSec = 16000;
+ ptrWav->nAvgBytesPerSec = 32000;
+ ptrWav->nBlockAlign = 2;
+ ptrWav->wBitsPerSample = 16;
+ ptrWav->cbSize = 0;
+
+ // Set the VoE format equal to the AEC output format.
+ _recAudioFrameSize = ptrWav->nBlockAlign;
+ _recSampleRate = ptrWav->nSamplesPerSec;
+ _recBlockSize = ptrWav->nSamplesPerSec / 100;
+ _recChannels = ptrWav->nChannels;
+
+ // Set the DMO output format parameters.
+ hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
+ MoFreeMediaType(&mt);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ if (_ptrAudioBuffer) {
+ _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+ _ptrAudioBuffer->SetRecordingChannels(_recChannels);
+ } else {
+ // Refer to InitRecording() for comments.
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceBuffer must be attached before streaming can start";
+ }
+
+ _mediaBuffer = rtc::make_ref_counted<MediaBufferImpl>(_recBlockSize *
+ _recAudioFrameSize);
+
+ // Optional, but if called, must be after media types are set.
+ hr = _dmo->AllocateStreamingResources();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ _recIsInitialized = true;
+ RTC_LOG(LS_VERBOSE) << "Capture side is now initialized";
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// InitRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::InitRecording() {
+ MutexLock lock(&mutex_);
+
+ if (_recording) {
+ return -1;
+ }
+
+ if (_recIsInitialized) {
+ return 0;
+ }
+
+ if (QueryPerformanceFrequency(&_perfCounterFreq) == 0) {
+ return -1;
+ }
+ _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
+
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophoneLocked() == -1) {
+ RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
+ }
+
+ // Ensure that the updated capturing endpoint device is valid
+ if (_ptrDeviceIn == NULL) {
+ return -1;
+ }
+
+ if (_builtInAecEnabled) {
+ // The DMO will configure the capture device.
+ return InitRecordingDMO();
+ }
+
+ HRESULT hr = S_OK;
+ WAVEFORMATEX* pWfxIn = NULL;
+ WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE();
+ WAVEFORMATEX* pWfxClosestMatch = NULL;
+
+ // Create COM object with IAudioClient interface.
+ SAFE_RELEASE(_ptrClientIn);
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL,
+ (void**)&_ptrClientIn);
+ EXIT_ON_ERROR(hr);
+
+ // Retrieve the stream format that the audio engine uses for its internal
+ // processing (mixing) of shared-mode streams.
+ hr = _ptrClientIn->GetMixFormat(&pWfxIn);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "Audio Engine's current capturing mix format:";
+ // format type
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(pWfxIn->wFormatTag) << " ("
+ << pWfxIn->wFormatTag << ")";
+ // number of channels (i.e. mono, stereo...)
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << pWfxIn->nChannels;
+ // sample rate
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxIn->nSamplesPerSec;
+ // for buffer estimation
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxIn->nAvgBytesPerSec;
+ // block size of data
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << pWfxIn->nBlockAlign;
+ // number of bits per sample of mono data
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxIn->wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << pWfxIn->cbSize;
+ }
+
+ // Set wave format
+ Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ Wfx.Format.wBitsPerSample = 16;
+ Wfx.Format.cbSize = 22;
+ Wfx.dwChannelMask = 0;
+ Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample;
+ Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+
+ const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
+ hr = S_FALSE;
+
+ // Iterate over frequencies and channels, in order of priority
+ for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) {
+ for (unsigned int chan = 0;
+ chan < sizeof(_recChannelsPrioList) / sizeof(_recChannelsPrioList[0]);
+ chan++) {
+ Wfx.Format.nChannels = _recChannelsPrioList[chan];
+ Wfx.Format.nSamplesPerSec = freqs[freq];
+ Wfx.Format.nBlockAlign =
+ Wfx.Format.nChannels * Wfx.Format.wBitsPerSample / 8;
+ Wfx.Format.nAvgBytesPerSec =
+ Wfx.Format.nSamplesPerSec * Wfx.Format.nBlockAlign;
+ // If the method succeeds and the audio endpoint device supports the
+ // specified stream format, it returns S_OK. If the method succeeds and
+ // provides a closest match to the specified format, it returns S_FALSE.
+ hr = _ptrClientIn->IsFormatSupported(
+ AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*)&Wfx, &pWfxClosestMatch);
+ if (hr == S_OK) {
+ break;
+ } else {
+ if (pWfxClosestMatch) {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels
+ << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+ << " is not supported. Closest match: "
+ "nChannels="
+ << pWfxClosestMatch->nChannels << ", nSamplesPerSec="
+ << pWfxClosestMatch->nSamplesPerSec;
+ CoTaskMemFree(pWfxClosestMatch);
+ pWfxClosestMatch = NULL;
+ } else {
+ RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels
+ << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec
+ << " is not supported. No closest match.";
+ }
+ }
+ }
+ if (hr == S_OK)
+ break;
+ }
+
+ if (hr == S_OK) {
+ _recAudioFrameSize = Wfx.Format.nBlockAlign;
+ _recSampleRate = Wfx.Format.nSamplesPerSec;
+ _recBlockSize = Wfx.Format.nSamplesPerSec / 100;
+ _recChannels = Wfx.Format.nChannels;
+
+ RTC_LOG(LS_VERBOSE) << "VoE selected this capturing format:";
+ RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x"
+ << rtc::ToHex(Wfx.Format.wFormatTag) << " ("
+ << Wfx.Format.wFormatTag << ")";
+ RTC_LOG(LS_VERBOSE) << "nChannels : " << Wfx.Format.nChannels;
+ RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << Wfx.Format.nSamplesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec : " << Wfx.Format.nAvgBytesPerSec;
+ RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << Wfx.Format.nBlockAlign;
+ RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << Wfx.Format.wBitsPerSample;
+ RTC_LOG(LS_VERBOSE) << "cbSize : " << Wfx.Format.cbSize;
+ RTC_LOG(LS_VERBOSE) << "Additional settings:";
+ RTC_LOG(LS_VERBOSE) << "_recAudioFrameSize: " << _recAudioFrameSize;
+ RTC_LOG(LS_VERBOSE) << "_recBlockSize : " << _recBlockSize;
+ RTC_LOG(LS_VERBOSE) << "_recChannels : " << _recChannels;
+ }
+
+ // Create a capturing stream.
+ hr = _ptrClientIn->Initialize(
+ AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK | // processing of the audio buffer by
+ // the client will be event driven
+ AUDCLNT_STREAMFLAGS_NOPERSIST, // volume and mute settings for an
+ // audio session will not persist
+ // across system restarts
+ 0, // required for event-driven shared mode
+ 0, // periodicity
+ (WAVEFORMATEX*)&Wfx, // selected wave format
+ NULL); // session GUID
+
+ if (hr != S_OK) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Initialize() failed:";
+ }
+ EXIT_ON_ERROR(hr);
+
+ if (_ptrAudioBuffer) {
+ // Update the audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+ } else {
+ // We can enter this state during CoreAudioIsSupported() when no
+ // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer
+ // does not exist. It is OK to end up here since we don't initiate any media
+ // in CoreAudioIsSupported().
+ RTC_LOG(LS_VERBOSE)
+ << "AudioDeviceBuffer must be attached before streaming can start";
+ }
+
+ // Get the actual size of the shared (endpoint buffer).
+ // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+ UINT bufferFrameCount(0);
+ hr = _ptrClientIn->GetBufferSize(&bufferFrameCount);
+ if (SUCCEEDED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => "
+ << bufferFrameCount << " (<=> "
+ << bufferFrameCount * _recAudioFrameSize << " bytes)";
+ }
+
+ // Set the event handle that the system signals when an audio buffer is ready
+ // to be processed by the client.
+ hr = _ptrClientIn->SetEventHandle(_hCaptureSamplesReadyEvent);
+ EXIT_ON_ERROR(hr);
+
+ // Get an IAudioCaptureClient interface.
+ SAFE_RELEASE(_ptrCaptureClient);
+ hr = _ptrClientIn->GetService(__uuidof(IAudioCaptureClient),
+ (void**)&_ptrCaptureClient);
+ EXIT_ON_ERROR(hr);
+
+ // Mark capture side as initialized
+ _recIsInitialized = true;
+
+ CoTaskMemFree(pWfxIn);
+ CoTaskMemFree(pWfxClosestMatch);
+
+ RTC_LOG(LS_VERBOSE) << "capture side is now initialized";
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ CoTaskMemFree(pWfxIn);
+ CoTaskMemFree(pWfxClosestMatch);
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrCaptureClient);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// StartRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StartRecording() {
+ if (!_recIsInitialized) {
+ return -1;
+ }
+
+ if (_hRecThread != NULL) {
+ return 0;
+ }
+
+ if (_recording) {
+ return 0;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+
+ // Create thread which will drive the capturing
+ LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
+ if (_builtInAecEnabled) {
+ // Redirect to the DMO polling method.
+ lpStartAddress = WSAPICaptureThreadPollDMO;
+
+ if (!_playing) {
+ // The DMO won't provide us captured output data unless we
+ // give it render data to process.
+ RTC_LOG(LS_ERROR)
+ << "Playout must be started before recording when using"
+ " the built-in AEC";
+ return -1;
+ }
+ }
+
+ RTC_DCHECK(_hRecThread == NULL);
+ _hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL);
+ if (_hRecThread == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to create the recording thread";
+ return -1;
+ }
+
+ // Set thread priority to highest possible
+ SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
+ } // critScoped
+
+ DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
+ if (ret != WAIT_OBJECT_0) {
+ RTC_LOG(LS_VERBOSE) << "capturing did not start up properly";
+ return -1;
+ }
+ RTC_LOG(LS_VERBOSE) << "capture audio stream has now started...";
+
+ _recording = true;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StopRecording
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StopRecording() {
+ int32_t err = 0;
+
+ if (!_recIsInitialized) {
+ return 0;
+ }
+
+ _Lock();
+
+ if (_hRecThread == NULL) {
+ RTC_LOG(LS_VERBOSE)
+ << "no capturing stream is active => close down WASAPI only";
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrCaptureClient);
+ _recIsInitialized = false;
+ _recording = false;
+ _UnLock();
+ return 0;
+ }
+
+ // Stop the driving thread...
+ RTC_LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_capture_thread...";
+ // Manual-reset event; it will remain signalled to stop all capture threads.
+ SetEvent(_hShutdownCaptureEvent);
+
+ _UnLock();
+ DWORD ret = WaitForSingleObject(_hRecThread, 2000);
+ if (ret != WAIT_OBJECT_0) {
+ RTC_LOG(LS_ERROR)
+ << "failed to close down webrtc_core_audio_capture_thread";
+ err = -1;
+ } else {
+ RTC_LOG(LS_VERBOSE) << "webrtc_core_audio_capture_thread is now closed";
+ }
+ _Lock();
+
+ ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
+ // Ensure that the thread has released these interfaces properly.
+ RTC_DCHECK(err == -1 || _ptrClientIn == NULL);
+ RTC_DCHECK(err == -1 || _ptrCaptureClient == NULL);
+
+ _recIsInitialized = false;
+ _recording = false;
+
+ // These will create thread leaks in the result of an error,
+ // but we can at least resume the call.
+ CloseHandle(_hRecThread);
+ _hRecThread = NULL;
+
+ if (_builtInAecEnabled) {
+ RTC_DCHECK(_dmo);
+ // This is necessary. Otherwise the DMO can generate garbage render
+ // audio even after rendering has stopped.
+ HRESULT hr = _dmo->FreeStreamingResources();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ err = -1;
+ }
+ }
+
+ _UnLock();
+
+ return err;
+}
+
+// ----------------------------------------------------------------------------
+// RecordingIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::RecordingIsInitialized() const {
+ return (_recIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// Recording
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::Recording() const {
+ return (_recording);
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutIsInitialized
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::PlayoutIsInitialized() const {
+ return (_playIsInitialized);
+}
+
+// ----------------------------------------------------------------------------
+// StartPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StartPlayout() {
+ if (!_playIsInitialized) {
+ return -1;
+ }
+
+ if (_hPlayThread != NULL) {
+ return 0;
+ }
+
+ if (_playing) {
+ return 0;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+
+ // Create thread which will drive the rendering.
+ RTC_DCHECK(_hPlayThread == NULL);
+ _hPlayThread = CreateThread(NULL, 0, WSAPIRenderThread, this, 0, NULL);
+ if (_hPlayThread == NULL) {
+ RTC_LOG(LS_ERROR) << "failed to create the playout thread";
+ return -1;
+ }
+
+ // Set thread priority to highest possible.
+ SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
+ } // critScoped
+
+ DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
+ if (ret != WAIT_OBJECT_0) {
+ RTC_LOG(LS_VERBOSE) << "rendering did not start up properly";
+ return -1;
+ }
+
+ _playing = true;
+ RTC_LOG(LS_VERBOSE) << "rendering audio stream has now started...";
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StopPlayout
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::StopPlayout() {
+ if (!_playIsInitialized) {
+ return 0;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+
+ if (_hPlayThread == NULL) {
+ RTC_LOG(LS_VERBOSE)
+ << "no rendering stream is active => close down WASAPI only";
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrRenderClient);
+ _playIsInitialized = false;
+ _playing = false;
+ return 0;
+ }
+
+ // stop the driving thread...
+ RTC_LOG(LS_VERBOSE)
+ << "closing down the webrtc_core_audio_render_thread...";
+ SetEvent(_hShutdownRenderEvent);
+ } // critScoped
+
+ DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
+ if (ret != WAIT_OBJECT_0) {
+ // the thread did not stop as it should
+ RTC_LOG(LS_ERROR) << "failed to close down webrtc_core_audio_render_thread";
+ CloseHandle(_hPlayThread);
+ _hPlayThread = NULL;
+ _playIsInitialized = false;
+ _playing = false;
+ return -1;
+ }
+
+ {
+ MutexLock lockScoped(&mutex_);
+ RTC_LOG(LS_VERBOSE) << "webrtc_core_audio_render_thread is now closed";
+
+ // to reset this event manually at each time we finish with it,
+ // in case that the render thread has exited before StopPlayout(),
+ // this event might be caught by the new render thread within same VoE
+ // instance.
+ ResetEvent(_hShutdownRenderEvent);
+
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrRenderClient);
+
+ _playIsInitialized = false;
+ _playing = false;
+
+ CloseHandle(_hPlayThread);
+ _hPlayThread = NULL;
+
+ if (_builtInAecEnabled && _recording) {
+ // The DMO won't provide us captured output data unless we
+ // give it render data to process.
+ //
+ // We still permit the playout to shutdown, and trace a warning.
+ // Otherwise, VoE can get into a state which will never permit
+ // playout to stop properly.
+ RTC_LOG(LS_WARNING)
+ << "Recording should be stopped before playout when using the"
+ " built-in AEC";
+ }
+
+ // Reset the playout delay value.
+ _sndCardPlayDelay = 0;
+ } // critScoped
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// PlayoutDelay
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const {
+ MutexLock lockScoped(&mutex_);
+ delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
+ return 0;
+}
+
+bool AudioDeviceWindowsCore::BuiltInAECIsAvailable() const {
+ return _dmo != nullptr;
+}
+
+// ----------------------------------------------------------------------------
+// Playing
+// ----------------------------------------------------------------------------
+
+bool AudioDeviceWindowsCore::Playing() const {
+ return (_playing);
+}
+
+// ============================================================================
+// Private Methods
+// ============================================================================
+
+// ----------------------------------------------------------------------------
+// [static] WSAPIRenderThread
+// ----------------------------------------------------------------------------
+
+DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context) {
+ return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoRenderThread();
+}
+
+// ----------------------------------------------------------------------------
+// [static] WSAPICaptureThread
+// ----------------------------------------------------------------------------
+
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context) {
+ return reinterpret_cast<AudioDeviceWindowsCore*>(context)->DoCaptureThread();
+}
+
+DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context) {
+ return reinterpret_cast<AudioDeviceWindowsCore*>(context)
+ ->DoCaptureThreadPollDMO();
+}
+
+// ----------------------------------------------------------------------------
+// DoRenderThread
+// ----------------------------------------------------------------------------
+
+DWORD AudioDeviceWindowsCore::DoRenderThread() {
+ bool keepPlaying = true;
+ HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
+ HRESULT hr = S_OK;
+ HANDLE hMmTask = NULL;
+
+ // Initialize COM as MTA in this thread.
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ RTC_LOG(LS_ERROR) << "failed to initialize COM in render thread";
+ return 1;
+ }
+
+ rtc::SetCurrentThreadName("webrtc_core_audio_render_thread");
+
+ // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+ // priority.
+ //
+ if (_winSupportAvrt) {
+ DWORD taskIndex(0);
+ hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+ if (hMmTask) {
+ if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) {
+ RTC_LOG(LS_WARNING) << "failed to boost play-thread using MMCSS";
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "render thread is now registered with MMCSS (taskIndex="
+ << taskIndex << ")";
+ } else {
+ RTC_LOG(LS_WARNING) << "failed to enable MMCSS on render thread (err="
+ << GetLastError() << ")";
+ _TraceCOMError(GetLastError());
+ }
+ }
+
+ _Lock();
+
+ IAudioClock* clock = NULL;
+
+ // Get size of rendering buffer (length is expressed as the number of audio
+ // frames the buffer can hold). This value is fixed during the rendering
+ // session.
+ //
+ UINT32 bufferLength = 0;
+ hr = _ptrClientOut->GetBufferSize(&bufferLength);
+ EXIT_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "[REND] size of buffer : " << bufferLength;
+
+ // Get maximum latency for the current stream (will not change for the
+ // lifetime of the IAudioClient object).
+ //
+ REFERENCE_TIME latency;
+ _ptrClientOut->GetStreamLatency(&latency);
+ RTC_LOG(LS_VERBOSE) << "[REND] max stream latency : " << (DWORD)latency
+ << " (" << (double)(latency / 10000.0) << " ms)";
+
+ // Get the length of the periodic interval separating successive processing
+ // passes by the audio engine on the data in the endpoint buffer.
+ //
+ // The period between processing passes by the audio engine is fixed for a
+ // particular audio endpoint device and represents the smallest processing
+ // quantum for the audio engine. This period plus the stream latency between
+ // the buffer and endpoint device represents the minimum possible latency that
+ // an audio application can achieve. Typical value: 100000 <=> 0.01 sec =
+ // 10ms.
+ //
+ REFERENCE_TIME devPeriod = 0;
+ REFERENCE_TIME devPeriodMin = 0;
+ _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
+ RTC_LOG(LS_VERBOSE) << "[REND] device period : " << (DWORD)devPeriod
+ << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+ // Derive initial rendering delay.
+ // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
+ //
+ int playout_delay = 10 * (bufferLength / _playBlockSize) +
+ (int)((latency + devPeriod) / 10000);
+ _sndCardPlayDelay = playout_delay;
+ _writtenSamples = 0;
+ RTC_LOG(LS_VERBOSE) << "[REND] initial delay : " << playout_delay;
+
+ double endpointBufferSizeMS =
+ 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
+ RTC_LOG(LS_VERBOSE) << "[REND] endpointBufferSizeMS : "
+ << endpointBufferSizeMS;
+
+ // Before starting the stream, fill the rendering buffer with silence.
+ //
+ BYTE* pData = NULL;
+ hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
+ EXIT_ON_ERROR(hr);
+
+ hr =
+ _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
+ EXIT_ON_ERROR(hr);
+
+ _writtenSamples += bufferLength;
+
+ hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_WARNING)
+ << "failed to get IAudioClock interface from the IAudioClient";
+ }
+
+ // Start up the rendering audio stream.
+ hr = _ptrClientOut->Start();
+ EXIT_ON_ERROR(hr);
+
+ _UnLock();
+
+ // Set event which will ensure that the calling thread modifies the playing
+ // state to true.
+ //
+ SetEvent(_hRenderStartedEvent);
+
+ // >> ------------------ THREAD LOOP ------------------
+
+ while (keepPlaying) {
+ // Wait for a render notification event or a shutdown event
+ DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+ switch (waitResult) {
+ case WAIT_OBJECT_0 + 0: // _hShutdownRenderEvent
+ keepPlaying = false;
+ break;
+ case WAIT_OBJECT_0 + 1: // _hRenderSamplesReadyEvent
+ break;
+ case WAIT_TIMEOUT: // timeout notification
+ RTC_LOG(LS_WARNING) << "render event timed out after 0.5 seconds";
+ goto Exit;
+ default: // unexpected error
+ RTC_LOG(LS_WARNING) << "unknown wait termination on render side";
+ goto Exit;
+ }
+
+ while (keepPlaying) {
+ _Lock();
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period.
+ if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR)
+ << "output state has been modified during unlocked period";
+ goto Exit;
+ }
+
+ // Get the number of frames of padding (queued up to play) in the endpoint
+ // buffer.
+ UINT32 padding = 0;
+ hr = _ptrClientOut->GetCurrentPadding(&padding);
+ EXIT_ON_ERROR(hr);
+
+ // Derive the amount of available space in the output buffer
+ uint32_t framesAvailable = bufferLength - padding;
+
+ // Do we have 10 ms available in the render buffer?
+ if (framesAvailable < _playBlockSize) {
+ // Not enough space in render buffer to store next render packet.
+ _UnLock();
+ break;
+ }
+
+ // Write n*10ms buffers to the render buffer
+ const uint32_t n10msBuffers = (framesAvailable / _playBlockSize);
+ for (uint32_t n = 0; n < n10msBuffers; n++) {
+ // Get pointer (i.e., grab the buffer) to next space in the shared
+ // render buffer.
+ hr = _ptrRenderClient->GetBuffer(_playBlockSize, &pData);
+ EXIT_ON_ERROR(hr);
+
+ if (_ptrAudioBuffer) {
+ // Request data to be played out (#bytes =
+ // _playBlockSize*_audioFrameSize)
+ _UnLock();
+ int32_t nSamples =
+ _ptrAudioBuffer->RequestPlayoutData(_playBlockSize);
+ _Lock();
+
+ if (nSamples == -1) {
+ _UnLock();
+ RTC_LOG(LS_ERROR) << "failed to read data from render client";
+ goto Exit;
+ }
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period
+ if (_ptrRenderClient == NULL || _ptrClientOut == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR)
+ << "output state has been modified during unlocked"
+ " period";
+ goto Exit;
+ }
+ if (nSamples != static_cast<int32_t>(_playBlockSize)) {
+ RTC_LOG(LS_WARNING)
+ << "nSamples(" << nSamples << ") != _playBlockSize"
+ << _playBlockSize << ")";
+ }
+
+ // Get the actual (stored) data
+ nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
+ }
+
+ DWORD dwFlags(0);
+ hr = _ptrRenderClient->ReleaseBuffer(_playBlockSize, dwFlags);
+ // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
+ // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
+ EXIT_ON_ERROR(hr);
+
+ _writtenSamples += _playBlockSize;
+ }
+
+ // Check the current delay on the playout side.
+ if (clock) {
+ UINT64 pos = 0;
+ UINT64 freq = 1;
+ clock->GetPosition(&pos, NULL);
+ clock->GetFrequency(&freq);
+ playout_delay = ROUND((double(_writtenSamples) / _devicePlaySampleRate -
+ double(pos) / freq) *
+ 1000.0);
+ _sndCardPlayDelay = playout_delay;
+ }
+
+ _UnLock();
+ }
+ }
+
+ // ------------------ THREAD LOOP ------------------ <<
+
+ SleepMs(static_cast<DWORD>(endpointBufferSizeMS + 0.5));
+ hr = _ptrClientOut->Stop();
+
+Exit:
+ SAFE_RELEASE(clock);
+
+ if (FAILED(hr)) {
+ _ptrClientOut->Stop();
+ _UnLock();
+ _TraceCOMError(hr);
+ }
+
+ if (_winSupportAvrt) {
+ if (NULL != hMmTask) {
+ _PAvRevertMmThreadCharacteristics(hMmTask);
+ }
+ }
+
+ _Lock();
+
+ if (keepPlaying) {
+ if (_ptrClientOut != NULL) {
+ hr = _ptrClientOut->Stop();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ hr = _ptrClientOut->Reset();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ }
+ RTC_LOG(LS_ERROR)
+ << "Playout error: rendering thread has ended pre-maturely";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "_Rendering thread is now terminated properly";
+ }
+
+ _UnLock();
+
+ return (DWORD)hr;
+}
+
+DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority() {
+ _hMmTask = NULL;
+
+ rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
+
+ // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+ // priority.
+ if (_winSupportAvrt) {
+ DWORD taskIndex(0);
+ _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+ if (_hMmTask) {
+ if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL)) {
+ RTC_LOG(LS_WARNING) << "failed to boost rec-thread using MMCSS";
+ }
+ RTC_LOG(LS_VERBOSE)
+ << "capture thread is now registered with MMCSS (taskIndex="
+ << taskIndex << ")";
+ } else {
+ RTC_LOG(LS_WARNING) << "failed to enable MMCSS on capture thread (err="
+ << GetLastError() << ")";
+ _TraceCOMError(GetLastError());
+ }
+ }
+
+ return S_OK;
+}
+
+void AudioDeviceWindowsCore::RevertCaptureThreadPriority() {
+ if (_winSupportAvrt) {
+ if (NULL != _hMmTask) {
+ _PAvRevertMmThreadCharacteristics(_hMmTask);
+ }
+ }
+
+ _hMmTask = NULL;
+}
+
+DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() {
+ RTC_DCHECK(_mediaBuffer);
+ bool keepRecording = true;
+
+ // Initialize COM as MTA in this thread.
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ RTC_LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread";
+ return 1;
+ }
+
+ HRESULT hr = InitCaptureThreadPriority();
+ if (FAILED(hr)) {
+ return hr;
+ }
+
+ // Set event which will ensure that the calling thread modifies the
+ // recording state to true.
+ SetEvent(_hCaptureStartedEvent);
+
+ // >> ---------------------------- THREAD LOOP ----------------------------
+ while (keepRecording) {
+ // Poll the DMO every 5 ms.
+ // (The same interval used in the Wave implementation.)
+ DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
+ switch (waitResult) {
+ case WAIT_OBJECT_0: // _hShutdownCaptureEvent
+ keepRecording = false;
+ break;
+ case WAIT_TIMEOUT: // timeout notification
+ break;
+ default: // unexpected error
+ RTC_LOG(LS_WARNING) << "Unknown wait termination on capture side";
+ hr = -1; // To signal an error callback.
+ keepRecording = false;
+ break;
+ }
+
+ while (keepRecording) {
+ MutexLock lockScoped(&mutex_);
+
+ DWORD dwStatus = 0;
+ {
+ DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
+ dmoBuffer.pBuffer = _mediaBuffer.get();
+ dmoBuffer.pBuffer->AddRef();
+
+ // Poll the DMO for AEC processed capture data. The DMO will
+ // copy available data to `dmoBuffer`, and should only return
+ // 10 ms frames. The value of `dwStatus` should be ignored.
+ hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
+ SAFE_RELEASE(dmoBuffer.pBuffer);
+ dwStatus = dmoBuffer.dwStatus;
+ }
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ keepRecording = false;
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ ULONG bytesProduced = 0;
+ BYTE* data;
+ // Get a pointer to the data buffer. This should be valid until
+ // the next call to ProcessOutput.
+ hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ keepRecording = false;
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ if (bytesProduced > 0) {
+ const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
+ // TODO(andrew): verify that this is always satisfied. It might
+ // be that ProcessOutput will try to return more than 10 ms if
+ // we fail to call it frequently enough.
+ RTC_DCHECK_EQ(kSamplesProduced, static_cast<int>(_recBlockSize));
+ RTC_DCHECK_EQ(sizeof(BYTE), sizeof(int8_t));
+ _ptrAudioBuffer->SetRecordedBuffer(reinterpret_cast<int8_t*>(data),
+ kSamplesProduced);
+ _ptrAudioBuffer->SetVQEData(0, 0);
+
+ _UnLock(); // Release lock while making the callback.
+ _ptrAudioBuffer->DeliverRecordedData();
+ _Lock();
+ }
+
+ // Reset length to indicate buffer availability.
+ hr = _mediaBuffer->SetLength(0);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ keepRecording = false;
+ RTC_DCHECK_NOTREACHED();
+ break;
+ }
+
+ if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE)) {
+ // The DMO cannot currently produce more data. This is the
+ // normal case; otherwise it means the DMO had more than 10 ms
+ // of data available and ProcessOutput should be called again.
+ break;
+ }
+ }
+ }
+ // ---------------------------- THREAD LOOP ---------------------------- <<
+
+ RevertCaptureThreadPriority();
+
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR)
+ << "Recording error: capturing thread has ended prematurely";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "Capturing thread is now terminated properly";
+ }
+
+ return hr;
+}
+
+// ----------------------------------------------------------------------------
+// DoCaptureThread
+// ----------------------------------------------------------------------------
+
+DWORD AudioDeviceWindowsCore::DoCaptureThread() {
+ bool keepRecording = true;
+ HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
+ HRESULT hr = S_OK;
+
+ LARGE_INTEGER t1;
+
+ BYTE* syncBuffer = NULL;
+ UINT32 syncBufIndex = 0;
+
+ _readSamples = 0;
+
+ // Initialize COM as MTA in this thread.
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.Succeeded()) {
+ RTC_LOG(LS_ERROR) << "failed to initialize COM in capture thread";
+ return 1;
+ }
+
+ hr = InitCaptureThreadPriority();
+ if (FAILED(hr)) {
+ return hr;
+ }
+
+ _Lock();
+
+ // Get size of capturing buffer (length is expressed as the number of audio
+ // frames the buffer can hold). This value is fixed during the capturing
+ // session.
+ //
+ UINT32 bufferLength = 0;
+ if (_ptrClientIn == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "input state has been modified before capture loop starts.";
+ return 1;
+ }
+ hr = _ptrClientIn->GetBufferSize(&bufferLength);
+ EXIT_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] size of buffer : " << bufferLength;
+
+ // Allocate memory for sync buffer.
+ // It is used for compensation between native 44.1 and internal 44.0 and
+ // for cases when the capture buffer is larger than 10ms.
+ //
+ const UINT32 syncBufferSize = 2 * (bufferLength * _recAudioFrameSize);
+ syncBuffer = new BYTE[syncBufferSize];
+ if (syncBuffer == NULL) {
+ return (DWORD)E_POINTER;
+ }
+ RTC_LOG(LS_VERBOSE) << "[CAPT] size of sync buffer : " << syncBufferSize
+ << " [bytes]";
+
+ // Get maximum latency for the current stream (will not change for the
+ // lifetime of the IAudioClient object).
+ //
+ REFERENCE_TIME latency;
+ _ptrClientIn->GetStreamLatency(&latency);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] max stream latency : " << (DWORD)latency
+ << " (" << (double)(latency / 10000.0) << " ms)";
+
+ // Get the length of the periodic interval separating successive processing
+ // passes by the audio engine on the data in the endpoint buffer.
+ //
+ REFERENCE_TIME devPeriod = 0;
+ REFERENCE_TIME devPeriodMin = 0;
+ _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] device period : " << (DWORD)devPeriod
+ << " (" << (double)(devPeriod / 10000.0) << " ms)";
+
+ double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] extraDelayMS : " << extraDelayMS;
+
+ double endpointBufferSizeMS =
+ 10.0 * ((double)bufferLength / (double)_recBlockSize);
+ RTC_LOG(LS_VERBOSE) << "[CAPT] endpointBufferSizeMS : "
+ << endpointBufferSizeMS;
+
+ // Start up the capturing stream.
+ //
+ hr = _ptrClientIn->Start();
+ EXIT_ON_ERROR(hr);
+
+ _UnLock();
+
+ // Set event which will ensure that the calling thread modifies the recording
+ // state to true.
+ //
+ SetEvent(_hCaptureStartedEvent);
+
+ // >> ---------------------------- THREAD LOOP ----------------------------
+
+ while (keepRecording) {
+ // Wait for a capture notification event or a shutdown event
+ DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
+ switch (waitResult) {
+ case WAIT_OBJECT_0 + 0: // _hShutdownCaptureEvent
+ keepRecording = false;
+ break;
+ case WAIT_OBJECT_0 + 1: // _hCaptureSamplesReadyEvent
+ break;
+ case WAIT_TIMEOUT: // timeout notification
+ RTC_LOG(LS_WARNING) << "capture event timed out after 0.5 seconds";
+ goto Exit;
+ default: // unexpected error
+ RTC_LOG(LS_WARNING) << "unknown wait termination on capture side";
+ goto Exit;
+ }
+
+ while (keepRecording) {
+ BYTE* pData = 0;
+ UINT32 framesAvailable = 0;
+ DWORD flags = 0;
+ UINT64 recTime = 0;
+ UINT64 recPos = 0;
+
+ _Lock();
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period.
+ if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR)
+ << "input state has been modified during unlocked period";
+ goto Exit;
+ }
+
+ // Find out how much capture data is available
+ //
+ hr = _ptrCaptureClient->GetBuffer(
+ &pData, // packet which is ready to be read by used
+ &framesAvailable, // #frames in the captured packet (can be zero)
+ &flags, // support flags (check)
+ &recPos, // device position of first audio frame in data packet
+ &recTime); // value of performance counter at the time of recording
+ // the first audio frame
+
+ if (SUCCEEDED(hr)) {
+ if (AUDCLNT_S_BUFFER_EMPTY == hr) {
+ // Buffer was empty => start waiting for a new capture notification
+ // event
+ _UnLock();
+ break;
+ }
+
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ // Treat all of the data in the packet as silence and ignore the
+ // actual data values.
+ RTC_LOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_SILENT";
+ pData = NULL;
+ }
+
+ RTC_DCHECK_NE(framesAvailable, 0);
+
+ if (pData) {
+ CopyMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], pData,
+ framesAvailable * _recAudioFrameSize);
+ } else {
+ ZeroMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize],
+ framesAvailable * _recAudioFrameSize);
+ }
+ RTC_DCHECK_GE(syncBufferSize, (syncBufIndex * _recAudioFrameSize) +
+ framesAvailable * _recAudioFrameSize);
+
+ // Release the capture buffer
+ //
+ hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
+ EXIT_ON_ERROR(hr);
+
+ _readSamples += framesAvailable;
+ syncBufIndex += framesAvailable;
+
+ QueryPerformanceCounter(&t1);
+
+ // Get the current recording and playout delay.
+ uint32_t sndCardRecDelay =
+ (uint32_t)(((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime) /
+ 10000) +
+ (10 * syncBufIndex) / _recBlockSize - 10);
+ uint32_t sndCardPlayDelay = static_cast<uint32_t>(_sndCardPlayDelay);
+
+ while (syncBufIndex >= _recBlockSize) {
+ if (_ptrAudioBuffer) {
+ _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer,
+ _recBlockSize);
+ _ptrAudioBuffer->SetVQEData(sndCardPlayDelay, sndCardRecDelay);
+
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+ _UnLock(); // release lock while making the callback
+ _ptrAudioBuffer->DeliverRecordedData();
+ _Lock(); // restore the lock
+
+ // Sanity check to ensure that essential states are not modified
+ // during the unlocked period
+ if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) {
+ _UnLock();
+ RTC_LOG(LS_ERROR) << "input state has been modified during"
+ " unlocked period";
+ goto Exit;
+ }
+ }
+
+ // store remaining data which was not able to deliver as 10ms segment
+ MoveMemory(&syncBuffer[0],
+ &syncBuffer[_recBlockSize * _recAudioFrameSize],
+ (syncBufIndex - _recBlockSize) * _recAudioFrameSize);
+ syncBufIndex -= _recBlockSize;
+ sndCardRecDelay -= 10;
+ }
+ } else {
+ // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the
+ // audio samples must wait for the next processing pass. The client
+ // might benefit from keeping a count of the failed GetBuffer calls. If
+ // GetBuffer returns this error repeatedly, the client can start a new
+ // processing loop after shutting down the current client by calling
+ // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio
+ // client.
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer returned"
+ " AUDCLNT_E_BUFFER_ERROR, hr = 0x"
+ << rtc::ToHex(hr);
+ goto Exit;
+ }
+
+ _UnLock();
+ }
+ }
+
+ // ---------------------------- THREAD LOOP ---------------------------- <<
+
+ if (_ptrClientIn) {
+ hr = _ptrClientIn->Stop();
+ }
+
+Exit:
+ if (FAILED(hr)) {
+ _ptrClientIn->Stop();
+ _UnLock();
+ _TraceCOMError(hr);
+ }
+
+ RevertCaptureThreadPriority();
+
+ _Lock();
+
+ if (keepRecording) {
+ if (_ptrClientIn != NULL) {
+ hr = _ptrClientIn->Stop();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ hr = _ptrClientIn->Reset();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ }
+
+ RTC_LOG(LS_ERROR)
+ << "Recording error: capturing thread has ended pre-maturely";
+ } else {
+ RTC_LOG(LS_VERBOSE) << "_Capturing thread is now terminated properly";
+ }
+
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrCaptureClient);
+
+ _UnLock();
+
+ if (syncBuffer) {
+ delete[] syncBuffer;
+ }
+
+ return (DWORD)hr;
+}
+
+int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable) {
+ if (_recIsInitialized) {
+ RTC_LOG(LS_ERROR)
+ << "Attempt to set Windows AEC with recording already initialized";
+ return -1;
+ }
+
+ if (_dmo == NULL) {
+ RTC_LOG(LS_ERROR)
+ << "Built-in AEC DMO was not initialized properly at create time";
+ return -1;
+ }
+
+ _builtInAecEnabled = enable;
+ return 0;
+}
+
+void AudioDeviceWindowsCore::_Lock() RTC_NO_THREAD_SAFETY_ANALYSIS {
+ mutex_.Lock();
+}
+
+void AudioDeviceWindowsCore::_UnLock() RTC_NO_THREAD_SAFETY_ANALYSIS {
+ mutex_.Unlock();
+}
+
+int AudioDeviceWindowsCore::SetDMOProperties() {
+ HRESULT hr = S_OK;
+ RTC_DCHECK(_dmo);
+
+ rtc::scoped_refptr<IPropertyStore> ps;
+ {
+ IPropertyStore* ptrPS = NULL;
+ hr = _dmo->QueryInterface(IID_IPropertyStore,
+ reinterpret_cast<void**>(&ptrPS));
+ if (FAILED(hr) || ptrPS == NULL) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ ps = ptrPS;
+ SAFE_RELEASE(ptrPS);
+ }
+
+ // Set the AEC system mode.
+ // SINGLE_CHANNEL_AEC - AEC processing only.
+ if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_SYSTEM_MODE,
+ SINGLE_CHANNEL_AEC)) {
+ return -1;
+ }
+
+ // Set the AEC source mode.
+ // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
+ if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
+ VARIANT_TRUE) == -1) {
+ return -1;
+ }
+
+ // Enable the feature mode.
+ // This lets us override all the default processing settings below.
+ if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) ==
+ -1) {
+ return -1;
+ }
+
+ // Disable analog AGC (default enabled).
+ if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
+ VARIANT_FALSE) == -1) {
+ return -1;
+ }
+
+ // Disable noise suppression (default enabled).
+ // 0 - Disabled, 1 - Enabled
+ if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_FEATR_NS, 0) == -1) {
+ return -1;
+ }
+
+ // Relevant parameters to leave at default settings:
+ // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
+ // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
+ // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
+ // TODO(andrew): investigate decresing the length to 128 ms.
+ // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
+ // 0 is automatic; defaults to 160 samples (or 10 ms frames at the
+ // selected 16 kHz) as long as mic array processing is disabled.
+ // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
+ // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
+
+ // Set the devices selected by VoE. If using a default device, we need to
+ // search for the device index.
+ int inDevIndex = _inputDeviceIndex;
+ int outDevIndex = _outputDeviceIndex;
+ if (!_usingInputDeviceIndex) {
+ ERole role = eCommunications;
+ if (_inputDevice == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ }
+
+ if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1) {
+ return -1;
+ }
+ }
+
+ if (!_usingOutputDeviceIndex) {
+ ERole role = eCommunications;
+ if (_outputDevice == AudioDeviceModule::kDefaultDevice) {
+ role = eConsole;
+ }
+
+ if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1) {
+ return -1;
+ }
+ }
+
+ DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
+ static_cast<uint32_t>(0x0000ffff & inDevIndex);
+ RTC_LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex
+ << ", render device index: " << outDevIndex;
+ if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex) ==
+ -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS,
+ REFPROPERTYKEY key,
+ VARIANT_BOOL value) {
+ PROPVARIANT pv;
+ PropVariantInit(&pv);
+ pv.vt = VT_BOOL;
+ pv.boolVal = value;
+ HRESULT hr = ptrPS->SetValue(key, pv);
+ PropVariantClear(&pv);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ return 0;
+}
+
+int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS,
+ REFPROPERTYKEY key,
+ LONG value) {
+ PROPVARIANT pv;
+ PropVariantInit(&pv);
+ pv.vt = VT_I4;
+ pv.lVal = value;
+ HRESULT hr = ptrPS->SetValue(key, pv);
+ PropVariantClear(&pv);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _RefreshDeviceList
+//
+// Creates a new list of endpoint rendering or capture devices after
+// deleting any previously created (and possibly out-of-date) list of
+// such devices.
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDeviceCollection* pCollection = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+ RTC_DCHECK(_ptrEnumerator);
+
+ // Create a fresh list of devices using the specified direction
+ hr = _ptrEnumerator->EnumAudioEndpoints(dir, DEVICE_STATE_ACTIVE,
+ &pCollection);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pCollection);
+ return -1;
+ }
+
+ if (dir == eRender) {
+ SAFE_RELEASE(_ptrRenderCollection);
+ _ptrRenderCollection = pCollection;
+ } else {
+ SAFE_RELEASE(_ptrCaptureCollection);
+ _ptrCaptureCollection = pCollection;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _DeviceListCount
+//
+// Gets a count of the endpoint rendering or capture devices in the
+// current list of such devices.
+// ----------------------------------------------------------------------------
+
+int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ UINT count = 0;
+
+ RTC_DCHECK(eRender == dir || eCapture == dir);
+
+ if (eRender == dir && NULL != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->GetCount(&count);
+ } else if (NULL != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->GetCount(&count);
+ }
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ return static_cast<int16_t>(count);
+}
+
+// ----------------------------------------------------------------------------
+// _GetListDeviceName
+//
+// Gets the friendly name of an endpoint rendering or capture device
+// from the current list of such devices. The caller uses an index
+// into the list to identify the device.
+//
+// Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
+// in _RefreshDeviceList().
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+
+ if (eRender == dir && NULL != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->Item(index, &pDevice);
+ } else if (NULL != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->Item(index, &pDevice);
+ }
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDefaultDeviceName
+//
+// Gets the friendly name of an endpoint rendering or capture device
+// given a specified device role.
+//
+// Uses: _ptrEnumerator
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+ RTC_DCHECK(role == eConsole || role == eCommunications);
+ RTC_DCHECK(_ptrEnumerator);
+
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+// ----------------------------------------------------------------------------
+// _GetListDeviceID
+//
+// Gets the unique ID string of an endpoint rendering or capture device
+// from the current list of such devices. The caller uses an index
+// into the list to identify the device.
+//
+// Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
+// in _RefreshDeviceList().
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+
+ if (eRender == dir && NULL != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->Item(index, &pDevice);
+ } else if (NULL != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->Item(index, &pDevice);
+ }
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDefaultDeviceID
+//
+// Gets the uniqe device ID of an endpoint rendering or capture device
+// given a specified device role.
+//
+// Uses: _ptrEnumerator
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ IMMDevice* pDevice = NULL;
+
+ RTC_DCHECK(dir == eRender || dir == eCapture);
+ RTC_DCHECK(role == eConsole || role == eCommunications);
+ RTC_DCHECK(_ptrEnumerator);
+
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice);
+
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pDevice);
+ return -1;
+ }
+
+ int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
+ SAFE_RELEASE(pDevice);
+ return res;
+}
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
+ ERole role,
+ int* index) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr = S_OK;
+ WCHAR szDefaultDeviceID[MAX_PATH] = {0};
+ WCHAR szDeviceID[MAX_PATH] = {0};
+
+ const size_t kDeviceIDLength = sizeof(szDeviceID) / sizeof(szDeviceID[0]);
+ RTC_DCHECK_EQ(kDeviceIDLength,
+ sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
+
+ if (_GetDefaultDeviceID(dir, role, szDefaultDeviceID, kDeviceIDLength) ==
+ -1) {
+ return -1;
+ }
+
+ IMMDeviceCollection* collection = _ptrCaptureCollection;
+ if (dir == eRender) {
+ collection = _ptrRenderCollection;
+ }
+
+ if (!collection) {
+ RTC_LOG(LS_ERROR) << "Device collection not valid";
+ return -1;
+ }
+
+ UINT count = 0;
+ hr = collection->GetCount(&count);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ *index = -1;
+ for (UINT i = 0; i < count; i++) {
+ memset(szDeviceID, 0, sizeof(szDeviceID));
+ rtc::scoped_refptr<IMMDevice> device;
+ {
+ IMMDevice* ptrDevice = NULL;
+ hr = collection->Item(i, &ptrDevice);
+ if (FAILED(hr) || ptrDevice == NULL) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+ device = ptrDevice;
+ SAFE_RELEASE(ptrDevice);
+ }
+
+ if (_GetDeviceID(device.get(), szDeviceID, kDeviceIDLength) == -1) {
+ return -1;
+ }
+
+ if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0) {
+ // Found a match.
+ *index = i;
+ break;
+ }
+ }
+
+ if (*index == -1) {
+ RTC_LOG(LS_ERROR) << "Unable to find collection index for default device";
+ return -1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDeviceName
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
+ LPWSTR pszBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ static const WCHAR szDefault[] = L"<Device not available>";
+
+ HRESULT hr = E_FAIL;
+ IPropertyStore* pProps = NULL;
+ PROPVARIANT varName;
+
+ RTC_DCHECK(pszBuffer);
+ RTC_DCHECK_GT(bufferLen, 0);
+
+ if (pDevice != NULL) {
+ hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::OpenPropertyStore failed, hr = 0x"
+ << rtc::ToHex(hr);
+ }
+ }
+
+ // Initialize container for property value.
+ PropVariantInit(&varName);
+
+ if (SUCCEEDED(hr)) {
+ // Get the endpoint device's friendly-name property.
+ hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+ if (FAILED(hr)) {
+ RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue failed, hr = 0x"
+ << rtc::ToHex(hr);
+ }
+ }
+
+ if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt)) {
+ hr = E_FAIL;
+ RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue returned no value,"
+ " hr = 0x"
+ << rtc::ToHex(hr);
+ }
+
+ if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt)) {
+ // The returned value is not a wide null terminated string.
+ hr = E_UNEXPECTED;
+ RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue returned unexpected"
+ " type, hr = 0x"
+ << rtc::ToHex(hr);
+ }
+
+ if (SUCCEEDED(hr) && (varName.pwszVal != NULL)) {
+ // Copy the valid device name to the provided ouput buffer.
+ wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
+ } else {
+ // Failed to find the device name.
+ wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+ }
+
+ PropVariantClear(&varName);
+ SAFE_RELEASE(pProps);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDeviceID
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice,
+ LPWSTR pszBuffer,
+ int bufferLen) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ static const WCHAR szDefault[] = L"<Device not available>";
+
+ HRESULT hr = E_FAIL;
+ LPWSTR pwszID = NULL;
+
+ RTC_DCHECK(pszBuffer);
+ RTC_DCHECK_GT(bufferLen, 0);
+
+ if (pDevice != NULL) {
+ hr = pDevice->GetId(&pwszID);
+ }
+
+ if (hr == S_OK) {
+ // Found the device ID.
+ wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
+ } else {
+ // Failed to find the device ID.
+ wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
+ }
+
+ CoTaskMemFree(pwszID);
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetDefaultDevice
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir,
+ ERole role,
+ IMMDevice** ppDevice) {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, ppDevice);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ return -1;
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _GetListDevice
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir,
+ int index,
+ IMMDevice** ppDevice) {
+ HRESULT hr(S_OK);
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ IMMDeviceCollection* pCollection = NULL;
+
+ hr = _ptrEnumerator->EnumAudioEndpoints(
+ dir,
+ DEVICE_STATE_ACTIVE, // only active endpoints are OK
+ &pCollection);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pCollection);
+ return -1;
+ }
+
+ hr = pCollection->Item(index, ppDevice);
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ SAFE_RELEASE(pCollection);
+ return -1;
+ }
+
+ SAFE_RELEASE(pCollection);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// _EnumerateEndpointDevicesAll
+// ----------------------------------------------------------------------------
+
+int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(
+ EDataFlow dataFlow) const {
+ RTC_DLOG(LS_VERBOSE) << __FUNCTION__;
+
+ RTC_DCHECK(_ptrEnumerator);
+
+ HRESULT hr = S_OK;
+ IMMDeviceCollection* pCollection = NULL;
+ IMMDevice* pEndpoint = NULL;
+ IPropertyStore* pProps = NULL;
+ IAudioEndpointVolume* pEndpointVolume = NULL;
+ LPWSTR pwszID = NULL;
+
+ // Generate a collection of audio endpoint devices in the system.
+ // Get states for *all* endpoint devices.
+ // Output: IMMDeviceCollection interface.
+ hr = _ptrEnumerator->EnumAudioEndpoints(
+ dataFlow, // data-flow direction (input parameter)
+ DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
+ &pCollection); // release interface when done
+
+ EXIT_ON_ERROR(hr);
+
+ // use the IMMDeviceCollection interface...
+
+ UINT count = 0;
+
+ // Retrieve a count of the devices in the device collection.
+ hr = pCollection->GetCount(&count);
+ EXIT_ON_ERROR(hr);
+ if (dataFlow == eRender)
+ RTC_LOG(LS_VERBOSE) << "#rendering endpoint devices (counting all): "
+ << count;
+ else if (dataFlow == eCapture)
+ RTC_LOG(LS_VERBOSE) << "#capturing endpoint devices (counting all): "
+ << count;
+
+ if (count == 0) {
+ return 0;
+ }
+
+ // Each loop prints the name of an endpoint device.
+ for (ULONG i = 0; i < count; i++) {
+ RTC_LOG(LS_VERBOSE) << "Endpoint " << i << ":";
+
+ // Get pointer to endpoint number i.
+ // Output: IMMDevice interface.
+ hr = pCollection->Item(i, &pEndpoint);
+ CONTINUE_ON_ERROR(hr);
+
+ // use the IMMDevice interface of the specified endpoint device...
+
+ // Get the endpoint ID string (uniquely identifies the device among all
+ // audio endpoint devices)
+ hr = pEndpoint->GetId(&pwszID);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "ID string : " << pwszID;
+
+ // Retrieve an interface to the device's property store.
+ // Output: IPropertyStore interface.
+ hr = pEndpoint->OpenPropertyStore(STGM_READ, &pProps);
+ CONTINUE_ON_ERROR(hr);
+
+ // use the IPropertyStore interface...
+
+ PROPVARIANT varName;
+ // Initialize container for property value.
+ PropVariantInit(&varName);
+
+ // Get the endpoint's friendly-name property.
+ // Example: "Speakers (Realtek High Definition Audio)"
+ hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "friendly name: \"" << varName.pwszVal << "\"";
+
+ // Get the endpoint's current device state
+ DWORD dwState;
+ hr = pEndpoint->GetState(&dwState);
+ CONTINUE_ON_ERROR(hr);
+ if (dwState & DEVICE_STATE_ACTIVE)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : *ACTIVE*";
+ if (dwState & DEVICE_STATE_DISABLED)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : DISABLED";
+ if (dwState & DEVICE_STATE_NOTPRESENT)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : NOTPRESENT";
+ if (dwState & DEVICE_STATE_UNPLUGGED)
+ RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState)
+ << ") : UNPLUGGED";
+
+ // Check the hardware volume capabilities.
+ DWORD dwHwSupportMask = 0;
+ hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,
+ (void**)&pEndpointVolume);
+ CONTINUE_ON_ERROR(hr);
+ hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
+ CONTINUE_ON_ERROR(hr);
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
+ // The audio endpoint device supports a hardware volume control
+ RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask)
+ << ") : HARDWARE_SUPPORT_VOLUME";
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
+ // The audio endpoint device supports a hardware mute control
+ RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask)
+ << ") : HARDWARE_SUPPORT_MUTE";
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
+ // The audio endpoint device supports a hardware peak meter
+ RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask)
+ << ") : HARDWARE_SUPPORT_METER";
+
+ // Check the channel count (#channels in the audio stream that enters or
+ // leaves the audio endpoint device)
+ UINT nChannelCount(0);
+ hr = pEndpointVolume->GetChannelCount(&nChannelCount);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "#channels : " << nChannelCount;
+
+ if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME) {
+ // Get the volume range.
+ float fLevelMinDB(0.0);
+ float fLevelMaxDB(0.0);
+ float fVolumeIncrementDB(0.0);
+ hr = pEndpointVolume->GetVolumeRange(&fLevelMinDB, &fLevelMaxDB,
+ &fVolumeIncrementDB);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "volume range : " << fLevelMinDB << " (min), "
+ << fLevelMaxDB << " (max), " << fVolumeIncrementDB
+ << " (inc) [dB]";
+
+ // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is
+ // divided into n uniform intervals of size vinc = fVolumeIncrementDB,
+ // where n = (vmax ?vmin) / vinc. The values vmin, vmax, and vinc are
+ // measured in decibels. The client can set the volume level to one of n +
+ // 1 discrete values in the range from vmin to vmax.
+ int n = (int)((fLevelMaxDB - fLevelMinDB) / fVolumeIncrementDB);
+ RTC_LOG(LS_VERBOSE) << "#intervals : " << n;
+
+ // Get information about the current step in the volume range.
+ // This method represents the volume level of the audio stream that enters
+ // or leaves the audio endpoint device as an index or "step" in a range of
+ // discrete volume levels. Output value nStepCount is the number of steps
+ // in the range. Output value nStep is the step index of the current
+ // volume level. If the number of steps is n = nStepCount, then step index
+ // nStep can assume values from 0 (minimum volume) to n ?1 (maximum
+ // volume).
+ UINT nStep(0);
+ UINT nStepCount(0);
+ hr = pEndpointVolume->GetVolumeStepInfo(&nStep, &nStepCount);
+ CONTINUE_ON_ERROR(hr);
+ RTC_LOG(LS_VERBOSE) << "volume steps : " << nStep << " (nStep), "
+ << nStepCount << " (nStepCount)";
+ }
+ Next:
+ if (FAILED(hr)) {
+ RTC_LOG(LS_VERBOSE) << "Error when logging device information";
+ }
+ CoTaskMemFree(pwszID);
+ pwszID = NULL;
+ PropVariantClear(&varName);
+ SAFE_RELEASE(pProps);
+ SAFE_RELEASE(pEndpoint);
+ SAFE_RELEASE(pEndpointVolume);
+ }
+ SAFE_RELEASE(pCollection);
+ return 0;
+
+Exit:
+ _TraceCOMError(hr);
+ CoTaskMemFree(pwszID);
+ pwszID = NULL;
+ SAFE_RELEASE(pCollection);
+ SAFE_RELEASE(pEndpoint);
+ SAFE_RELEASE(pEndpointVolume);
+ SAFE_RELEASE(pProps);
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// _TraceCOMError
+// ----------------------------------------------------------------------------
+
+void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const {
+ wchar_t buf[MAXERRORLENGTH];
+ wchar_t errorText[MAXERRORLENGTH];
+
+ const DWORD dwFlags =
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
+
+ // Gets the system's human readable message string for this HRESULT.
+ // All error message in English by default.
+ DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+ MAXERRORLENGTH, NULL);
+
+ RTC_DCHECK_LE(messageLength, MAXERRORLENGTH);
+
+ // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
+ for (; messageLength && ::isspace(errorText[messageLength - 1]);
+ --messageLength) {
+ errorText[messageLength - 1] = '\0';
+ }
+
+ RTC_LOG(LS_ERROR) << "Core Audio method failed (hr=" << hr << ")";
+ StringCchPrintfW(buf, MAXERRORLENGTH, L"Error details: ");
+ StringCchCatW(buf, MAXERRORLENGTH, errorText);
+ RTC_LOG(LS_ERROR) << rtc::ToUtf8(buf);
+}
+
+bool AudioDeviceWindowsCore::KeyPressed() const {
+ int key_down = 0;
+ for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
+ short res = GetAsyncKeyState(key);
+ key_down |= res & 0x1; // Get the LSB
+ }
+ return (key_down > 0);
+}
+} // namespace webrtc
+
+#endif // WEBRTC_WINDOWS_CORE_AUDIO_BUILD
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h
new file mode 100644
index 0000000000..380effb449
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_
+
+#if (_MSC_VER >= 1400) // only include for VS 2005 and higher
+
+#include <wmcodecdsp.h> // CLSID_CWMAudioAEC
+//(must be before audioclient.h)
+
+#include <audioclient.h> // WASAPI
+#include <audiopolicy.h>
+#include <avrt.h> // Avrt
+#include <endpointvolume.h>
+#include <mediaobj.h> // IMediaObject
+#include <mmdeviceapi.h> // MMDevice
+
+#include "api/scoped_refptr.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#include "rtc_base/win32.h"
+
+// Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority
+#pragma comment(lib, "avrt.lib")
+// AVRT function pointers
+typedef BOOL(WINAPI* PAvRevertMmThreadCharacteristics)(HANDLE);
+typedef HANDLE(WINAPI* PAvSetMmThreadCharacteristicsA)(LPCSTR, LPDWORD);
+typedef BOOL(WINAPI* PAvSetMmThreadPriority)(HANDLE, AVRT_PRIORITY);
+
+namespace webrtc {
+
+const float MAX_CORE_SPEAKER_VOLUME = 255.0f;
+const float MIN_CORE_SPEAKER_VOLUME = 0.0f;
+const float MAX_CORE_MICROPHONE_VOLUME = 255.0f;
+const float MIN_CORE_MICROPHONE_VOLUME = 0.0f;
+const uint16_t CORE_SPEAKER_VOLUME_STEP_SIZE = 1;
+const uint16_t CORE_MICROPHONE_VOLUME_STEP_SIZE = 1;
+
+class AudioDeviceWindowsCore : public AudioDeviceGeneric {
+ public:
+ AudioDeviceWindowsCore();
+ ~AudioDeviceWindowsCore();
+
+ static bool CoreAudioIsSupported();
+
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const;
+
+ // Main initializaton and termination
+ virtual InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool Initialized() const;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int16_t RecordingDevices() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize])
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize])
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available);
+ virtual int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool& available);
+ virtual int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool RecordingIsInitialized() const;
+
+ // Audio transport control
+ virtual int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool Playing() const;
+ virtual int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_);
+ virtual bool MicrophoneIsInitialized() const;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SpeakerVolume(uint32_t& volume) const
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume)
+ RTC_LOCKS_EXCLUDED(mutex_, volume_mutex_);
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const
+ RTC_LOCKS_EXCLUDED(mutex_, volume_mutex_);
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SpeakerMute(bool& enabled) const;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available)
+ RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool& enabled) const;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available);
+ virtual int32_t SetStereoPlayout(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StereoPlayout(bool& enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool& available);
+ virtual int32_t SetStereoRecording(bool enable) RTC_LOCKS_EXCLUDED(mutex_);
+ virtual int32_t StereoRecording(bool& enabled) const
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ // Delay information and control
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const
+ RTC_LOCKS_EXCLUDED(mutex_);
+
+ virtual bool BuiltInAECIsAvailable() const;
+
+ virtual int32_t EnableBuiltInAEC(bool enable);
+
+ public:
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ bool KeyPressed() const;
+
+ private: // avrt function pointers
+ PAvRevertMmThreadCharacteristics _PAvRevertMmThreadCharacteristics;
+ PAvSetMmThreadCharacteristicsA _PAvSetMmThreadCharacteristicsA;
+ PAvSetMmThreadPriority _PAvSetMmThreadPriority;
+ HMODULE _avrtLibrary;
+ bool _winSupportAvrt;
+
+ private: // thread functions
+ int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int16_t PlayoutDevicesLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ int16_t RecordingDevicesLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ DWORD InitCaptureThreadPriority();
+ void RevertCaptureThreadPriority();
+ static DWORD WINAPI WSAPICaptureThread(LPVOID context);
+ DWORD DoCaptureThread();
+
+ static DWORD WINAPI WSAPICaptureThreadPollDMO(LPVOID context);
+ DWORD DoCaptureThreadPollDMO() RTC_LOCKS_EXCLUDED(mutex_);
+
+ static DWORD WINAPI WSAPIRenderThread(LPVOID context);
+ DWORD DoRenderThread();
+
+ void _Lock();
+ void _UnLock();
+
+ int SetDMOProperties();
+
+ int SetBoolProperty(IPropertyStore* ptrPS,
+ REFPROPERTYKEY key,
+ VARIANT_BOOL value);
+
+ int SetVtI4Property(IPropertyStore* ptrPS, REFPROPERTYKEY key, LONG value);
+
+ int32_t _EnumerateEndpointDevicesAll(EDataFlow dataFlow) const;
+ void _TraceCOMError(HRESULT hr) const;
+
+ int32_t _RefreshDeviceList(EDataFlow dir);
+ int16_t _DeviceListCount(EDataFlow dir);
+ int32_t _GetDefaultDeviceName(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetListDeviceName(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetDeviceName(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen);
+ int32_t _GetListDeviceID(EDataFlow dir,
+ int index,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetDefaultDeviceID(EDataFlow dir,
+ ERole role,
+ LPWSTR szBuffer,
+ int bufferLen);
+ int32_t _GetDefaultDeviceIndex(EDataFlow dir, ERole role, int* index);
+ int32_t _GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen);
+ int32_t _GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice);
+ int32_t _GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice);
+
+ int32_t InitRecordingDMO();
+
+ ScopedCOMInitializer _comInit;
+ AudioDeviceBuffer* _ptrAudioBuffer;
+ mutable Mutex mutex_;
+ mutable Mutex volume_mutex_ RTC_ACQUIRED_AFTER(mutex_);
+
+ IMMDeviceEnumerator* _ptrEnumerator;
+ IMMDeviceCollection* _ptrRenderCollection;
+ IMMDeviceCollection* _ptrCaptureCollection;
+ IMMDevice* _ptrDeviceOut;
+ IMMDevice* _ptrDeviceIn;
+
+ IAudioClient* _ptrClientOut;
+ IAudioClient* _ptrClientIn;
+ IAudioRenderClient* _ptrRenderClient;
+ IAudioCaptureClient* _ptrCaptureClient;
+ IAudioEndpointVolume* _ptrCaptureVolume;
+ ISimpleAudioVolume* _ptrRenderSimpleVolume;
+
+ // DirectX Media Object (DMO) for the built-in AEC.
+ rtc::scoped_refptr<IMediaObject> _dmo;
+ rtc::scoped_refptr<IMediaBuffer> _mediaBuffer;
+ bool _builtInAecEnabled;
+
+ HANDLE _hRenderSamplesReadyEvent;
+ HANDLE _hPlayThread;
+ HANDLE _hRenderStartedEvent;
+ HANDLE _hShutdownRenderEvent;
+
+ HANDLE _hCaptureSamplesReadyEvent;
+ HANDLE _hRecThread;
+ HANDLE _hCaptureStartedEvent;
+ HANDLE _hShutdownCaptureEvent;
+
+ HANDLE _hMmTask;
+
+ UINT _playAudioFrameSize;
+ uint32_t _playSampleRate;
+ uint32_t _devicePlaySampleRate;
+ uint32_t _playBlockSize;
+ uint32_t _devicePlayBlockSize;
+ uint32_t _playChannels;
+ uint32_t _sndCardPlayDelay;
+ UINT64 _writtenSamples;
+ UINT64 _readSamples;
+
+ UINT _recAudioFrameSize;
+ uint32_t _recSampleRate;
+ uint32_t _recBlockSize;
+ uint32_t _recChannels;
+
+ uint16_t _recChannelsPrioList[3];
+ uint16_t _playChannelsPrioList[2];
+
+ LARGE_INTEGER _perfCounterFreq;
+ double _perfCounterFactor;
+
+ private:
+ bool _initialized;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+ bool _speakerIsInitialized;
+ bool _microphoneIsInitialized;
+
+ bool _usingInputDeviceIndex;
+ bool _usingOutputDeviceIndex;
+ AudioDeviceModule::WindowsDeviceType _inputDevice;
+ AudioDeviceModule::WindowsDeviceType _outputDevice;
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+};
+
+#endif // #if (_MSC_VER >= 1400)
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc
new file mode 100644
index 0000000000..a36c40735e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/audio_device_module_win.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/make_ref_counted.h"
+#include "api/sequence_checker.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+#define RETURN_IF_OUTPUT_RESTARTS(...) \
+ do { \
+ if (output_->Restarting()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_INPUT_RESTARTS(...) \
+ do { \
+ if (input_->Restarting()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_OUTPUT_IS_INITIALIZED(...) \
+ do { \
+ if (output_->PlayoutIsInitialized()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_INPUT_IS_INITIALIZED(...) \
+ do { \
+ if (input_->RecordingIsInitialized()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_OUTPUT_IS_ACTIVE(...) \
+ do { \
+ if (output_->Playing()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+#define RETURN_IF_INPUT_IS_ACTIVE(...) \
+ do { \
+ if (input_->Recording()) { \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+// This class combines a generic instance of an AudioInput and a generic
+// instance of an AudioOutput to create an AudioDeviceModule. This is mostly
+// done by delegating to the audio input/output with some glue code. This class
+// also directly implements some of the AudioDeviceModule methods with dummy
+// implementations.
+//
+// An instance must be created, destroyed and used on one and the same thread,
+// i.e., all public methods must also be called on the same thread. A thread
+// checker will RTC_DCHECK if any method is called on an invalid thread.
+// TODO(henrika): is thread checking needed in AudioInput and AudioOutput?
+class WindowsAudioDeviceModule : public AudioDeviceModuleForTest {
+ public:
+ enum class InitStatus {
+ OK = 0,
+ PLAYOUT_ERROR = 1,
+ RECORDING_ERROR = 2,
+ OTHER_ERROR = 3,
+ NUM_STATUSES = 4
+ };
+
+ WindowsAudioDeviceModule(std::unique_ptr<AudioInput> audio_input,
+ std::unique_ptr<AudioOutput> audio_output,
+ TaskQueueFactory* task_queue_factory)
+ : input_(std::move(audio_input)),
+ output_(std::move(audio_output)),
+ task_queue_factory_(task_queue_factory) {
+ RTC_CHECK(input_);
+ RTC_CHECK(output_);
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ }
+
+ ~WindowsAudioDeviceModule() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ Terminate();
+ }
+
+ WindowsAudioDeviceModule(const WindowsAudioDeviceModule&) = delete;
+ WindowsAudioDeviceModule& operator=(const WindowsAudioDeviceModule&) = delete;
+
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer* audioLayer) const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ // TODO(henrika): it might be possible to remove this unique signature.
+ *audioLayer = AudioDeviceModule::kWindowsCoreAudio2;
+ return 0;
+ }
+
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(audio_device_buffer_);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return audio_device_buffer_->RegisterAudioCallback(audioCallback);
+ }
+
+ int32_t Init() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_INPUT_RESTARTS(0);
+ if (initialized_) {
+ return 0;
+ }
+ audio_device_buffer_ =
+ std::make_unique<AudioDeviceBuffer>(task_queue_factory_);
+ AttachAudioBuffer();
+ InitStatus status;
+ if (output_->Init() != 0) {
+ status = InitStatus::PLAYOUT_ERROR;
+ } else if (input_->Init() != 0) {
+ output_->Terminate();
+ status = InitStatus::RECORDING_ERROR;
+ } else {
+ initialized_ = true;
+ status = InitStatus::OK;
+ }
+ if (status != InitStatus::OK) {
+ RTC_LOG(LS_ERROR) << "Audio device initialization failed";
+ return -1;
+ }
+ return 0;
+ }
+
+ int32_t Terminate() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_INPUT_RESTARTS(0);
+ if (!initialized_)
+ return 0;
+ int32_t err = input_->Terminate();
+ err |= output_->Terminate();
+ initialized_ = false;
+ RTC_DCHECK_EQ(err, 0);
+ return err;
+ }
+
+ bool Initialized() const override {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return initialized_;
+ }
+
+ int16_t PlayoutDevices() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->NumDevices();
+ }
+
+ int16_t RecordingDevices() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ return input_->NumDevices();
+ }
+
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ std::string name_str, guid_str;
+ int ret = -1;
+ if (guid != nullptr) {
+ ret = output_->DeviceName(index, &name_str, &guid_str);
+ rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str());
+ } else {
+ ret = output_->DeviceName(index, &name_str, nullptr);
+ }
+ rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str());
+ return ret;
+ }
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ std::string name_str, guid_str;
+ int ret = -1;
+ if (guid != nullptr) {
+ ret = input_->DeviceName(index, &name_str, &guid_str);
+ rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str());
+ } else {
+ ret = input_->DeviceName(index, &name_str, nullptr);
+ }
+ rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str());
+ return ret;
+ }
+
+ int32_t SetPlayoutDevice(uint16_t index) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->SetDevice(index);
+ }
+
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->SetDevice(device);
+ }
+ int32_t SetRecordingDevice(uint16_t index) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->SetDevice(index);
+ }
+
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->SetDevice(device);
+ }
+
+ int32_t PlayoutIsAvailable(bool* available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t InitPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_OUTPUT_IS_INITIALIZED(0);
+ return output_->InitPlayout();
+ }
+
+ bool PlayoutIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(true);
+ return output_->PlayoutIsInitialized();
+ }
+
+ int32_t RecordingIsAvailable(bool* available) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t InitRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ RETURN_IF_INPUT_IS_INITIALIZED(0);
+ return input_->InitRecording();
+ }
+
+ bool RecordingIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(true);
+ return input_->RecordingIsInitialized();
+ }
+
+ int32_t StartPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ RETURN_IF_OUTPUT_IS_ACTIVE(0);
+ return output_->StartPlayout();
+ }
+
+ int32_t StopPlayout() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(-1);
+ return output_->StopPlayout();
+ }
+
+ bool Playing() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(true);
+ return output_->Playing();
+ }
+
+ int32_t StartRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(0);
+ RETURN_IF_INPUT_IS_ACTIVE(0);
+ return input_->StartRecording();
+ }
+
+ int32_t StopRecording() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_INPUT_RESTARTS(-1);
+ return input_->StopRecording();
+ }
+
+ bool Recording() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RETURN_IF_INPUT_RESTARTS(true);
+ return input_->Recording();
+ }
+
+ int32_t InitSpeaker() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_ ? 0 : -1;
+ }
+
+ bool SpeakerIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_;
+ }
+
+ int32_t InitMicrophone() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_ ? 0 : -1;
+ }
+
+ bool MicrophoneIsInitialized() const override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DLOG(LS_WARNING) << "This method has no effect";
+ return initialized_;
+ }
+
+ int32_t SpeakerVolumeIsAvailable(bool* available) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = false;
+ return 0;
+ }
+
+ int32_t SetSpeakerVolume(uint32_t volume) override { return 0; }
+ int32_t SpeakerVolume(uint32_t* volume) const override { return 0; }
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; }
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; }
+
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = false;
+ return 0;
+ }
+
+ int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; }
+ int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; }
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; }
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; }
+
+ int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; }
+ int32_t SetSpeakerMute(bool enable) override { return 0; }
+ int32_t SpeakerMute(bool* enabled) const override { return 0; }
+
+ int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; }
+ int32_t SetMicrophoneMute(bool enable) override { return 0; }
+ int32_t MicrophoneMute(bool* enabled) const override { return 0; }
+
+ int32_t StereoPlayoutIsAvailable(bool* available) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t SetStereoPlayout(bool enable) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+ }
+
+ int32_t StereoPlayout(bool* enabled) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *enabled = true;
+ return 0;
+ }
+
+ int32_t StereoRecordingIsAvailable(bool* available) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = true;
+ return 0;
+ }
+
+ int32_t SetStereoRecording(bool enable) override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+ }
+
+ int32_t StereoRecording(bool* enabled) const override {
+ // TODO(henrika): improve support.
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *enabled = true;
+ return 0;
+ }
+
+ int32_t PlayoutDelay(uint16_t* delayMS) const override { return 0; }
+
+ bool BuiltInAECIsAvailable() const override { return false; }
+ bool BuiltInAGCIsAvailable() const override { return false; }
+ bool BuiltInNSIsAvailable() const override { return false; }
+
+ int32_t EnableBuiltInAEC(bool enable) override { return 0; }
+ int32_t EnableBuiltInAGC(bool enable) override { return 0; }
+ int32_t EnableBuiltInNS(bool enable) override { return 0; }
+
+ int32_t AttachAudioBuffer() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ output_->AttachAudioBuffer(audio_device_buffer_.get());
+ input_->AttachAudioBuffer(audio_device_buffer_.get());
+ return 0;
+ }
+
+ int RestartPlayoutInternally() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RETURN_IF_OUTPUT_RESTARTS(0);
+ return output_->RestartPlayout();
+ }
+
+ int RestartRecordingInternally() override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->RestartRecording();
+ }
+
+ int SetPlayoutSampleRate(uint32_t sample_rate) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return output_->SetSampleRate(sample_rate);
+ }
+
+ int SetRecordingSampleRate(uint32_t sample_rate) override {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return input_->SetSampleRate(sample_rate);
+ }
+
+ private:
+ // Ensures that the class is used on the same thread as it is constructed
+ // and destroyed on.
+ SequenceChecker thread_checker_;
+
+ // Implements the AudioInput interface and deals with audio capturing parts.
+ const std::unique_ptr<AudioInput> input_;
+
+ // Implements the AudioOutput interface and deals with audio rendering parts.
+ const std::unique_ptr<AudioOutput> output_;
+
+ TaskQueueFactory* const task_queue_factory_;
+
+ // The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio
+ // to/from the WebRTC layer. Created and owned by this object. Used by
+ // both `input_` and `output_` but they use orthogonal parts of the ADB.
+ std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
+
+ // Set to true after a successful call to Init(). Cleared by Terminate().
+ bool initialized_ = false;
+};
+
+} // namespace
+
+rtc::scoped_refptr<AudioDeviceModuleForTest>
+CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
+ std::unique_ptr<AudioInput> audio_input,
+ std::unique_ptr<AudioOutput> audio_output,
+ TaskQueueFactory* task_queue_factory) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return rtc::make_ref_counted<WindowsAudioDeviceModule>(
+ std::move(audio_input), std::move(audio_output), task_queue_factory);
+}
+
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h
new file mode 100644
index 0000000000..1ed0b25620
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
+
+#include <memory>
+#include <string>
+
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+
+namespace webrtc_win {
+
+// This interface represents the main input-related parts of the complete
+// AudioDeviceModule interface.
+class AudioInput {
+ public:
+ virtual ~AudioInput() {}
+
+ virtual int Init() = 0;
+ virtual int Terminate() = 0;
+ virtual int NumDevices() const = 0;
+ virtual int SetDevice(int index) = 0;
+ virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0;
+ virtual int DeviceName(int index, std::string* name, std::string* guid) = 0;
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0;
+ virtual bool RecordingIsInitialized() const = 0;
+ virtual int InitRecording() = 0;
+ virtual int StartRecording() = 0;
+ virtual int StopRecording() = 0;
+ virtual bool Recording() = 0;
+ virtual int VolumeIsAvailable(bool* available) = 0;
+ virtual int RestartRecording() = 0;
+ virtual bool Restarting() const = 0;
+ virtual int SetSampleRate(uint32_t sample_rate) = 0;
+};
+
+// This interface represents the main output-related parts of the complete
+// AudioDeviceModule interface.
+class AudioOutput {
+ public:
+ virtual ~AudioOutput() {}
+
+ virtual int Init() = 0;
+ virtual int Terminate() = 0;
+ virtual int NumDevices() const = 0;
+ virtual int SetDevice(int index) = 0;
+ virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0;
+ virtual int DeviceName(int index, std::string* name, std::string* guid) = 0;
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0;
+ virtual bool PlayoutIsInitialized() const = 0;
+ virtual int InitPlayout() = 0;
+ virtual int StartPlayout() = 0;
+ virtual int StopPlayout() = 0;
+ virtual bool Playing() = 0;
+ virtual int VolumeIsAvailable(bool* available) = 0;
+ virtual int RestartPlayout() = 0;
+ virtual bool Restarting() const = 0;
+ virtual int SetSampleRate(uint32_t sample_rate) = 0;
+};
+
+// Combines an AudioInput and an AudioOutput implementation to build an
+// AudioDeviceModule. Hides most parts of the full ADM interface.
+rtc::scoped_refptr<AudioDeviceModuleForTest>
+CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput(
+ std::unique_ptr<AudioInput> audio_input,
+ std::unique_ptr<AudioOutput> audio_output,
+ TaskQueueFactory* task_queue_factory);
+
+} // namespace webrtc_win
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc
new file mode 100644
index 0000000000..dc8526b625
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc
@@ -0,0 +1,948 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_base_win.h"
+
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#include "rtc_base/win/windows_version.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+// Even if the device supports low latency and even if IAudioClient3 can be
+// used (requires Win10 or higher), we currently disable any attempts to
+// initialize the client for low-latency.
+// TODO(henrika): more research is needed before we can enable low-latency.
+const bool kEnableLowLatencyIfSupported = false;
+
+// Each unit of reference time is 100 nanoseconds, hence `kReftimesPerSec`
+// corresponds to one second.
+// TODO(henrika): possibly add usage in Init().
+// const REFERENCE_TIME kReferenceTimesPerSecond = 10000000;
+
+enum DefaultDeviceType {
+ kUndefined = -1,
+ kDefault = 0,
+ kDefaultCommunications = 1,
+ kDefaultDeviceTypeMaxCount = kDefaultCommunications + 1,
+};
+
+const char* DirectionToString(CoreAudioBase::Direction direction) {
+ switch (direction) {
+ case CoreAudioBase::Direction::kOutput:
+ return "Output";
+ case CoreAudioBase::Direction::kInput:
+ return "Input";
+ default:
+ return "Unkown";
+ }
+}
+
+const char* RoleToString(const ERole role) {
+ switch (role) {
+ case eConsole:
+ return "Console";
+ case eMultimedia:
+ return "Multimedia";
+ case eCommunications:
+ return "Communications";
+ default:
+ return "Unsupported";
+ }
+}
+
+std::string IndexToString(int index) {
+ std::string ss = std::to_string(index);
+ switch (index) {
+ case kDefault:
+ ss += " (Default)";
+ break;
+ case kDefaultCommunications:
+ ss += " (Communications)";
+ break;
+ default:
+ break;
+ }
+ return ss;
+}
+
+const char* SessionStateToString(AudioSessionState state) {
+ switch (state) {
+ case AudioSessionStateActive:
+ return "Active";
+ case AudioSessionStateInactive:
+ return "Inactive";
+ case AudioSessionStateExpired:
+ return "Expired";
+ default:
+ return "Invalid";
+ }
+}
+
+const char* SessionDisconnectReasonToString(
+ AudioSessionDisconnectReason reason) {
+ switch (reason) {
+ case DisconnectReasonDeviceRemoval:
+ return "DeviceRemoval";
+ case DisconnectReasonServerShutdown:
+ return "ServerShutdown";
+ case DisconnectReasonFormatChanged:
+ return "FormatChanged";
+ case DisconnectReasonSessionLogoff:
+ return "SessionLogoff";
+ case DisconnectReasonSessionDisconnected:
+ return "Disconnected";
+ case DisconnectReasonExclusiveModeOverride:
+ return "ExclusiveModeOverride";
+ default:
+ return "Invalid";
+ }
+}
+
+// Returns true if the selected audio device supports low latency, i.e, if it
+// is possible to initialize the engine using periods less than the default
+// period (10ms).
+bool IsLowLatencySupported(IAudioClient3* client3,
+ const WAVEFORMATEXTENSIBLE* format,
+ uint32_t* min_period_in_frames) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+
+ // Get the range of periodicities supported by the engine for the specified
+ // stream format.
+ uint32_t default_period = 0;
+ uint32_t fundamental_period = 0;
+ uint32_t min_period = 0;
+ uint32_t max_period = 0;
+ if (FAILED(core_audio_utility::GetSharedModeEnginePeriod(
+ client3, format, &default_period, &fundamental_period, &min_period,
+ &max_period))) {
+ return false;
+ }
+
+ // Low latency is supported if the shortest allowed period is less than the
+ // default engine period.
+ // TODO(henrika): verify that this assumption is correct.
+ const bool low_latency = min_period < default_period;
+ RTC_LOG(LS_INFO) << "low_latency: " << low_latency;
+ *min_period_in_frames = low_latency ? min_period : 0;
+ return low_latency;
+}
+
+} // namespace
+
+CoreAudioBase::CoreAudioBase(Direction direction,
+ bool automatic_restart,
+ OnDataCallback data_callback,
+ OnErrorCallback error_callback)
+ : format_(),
+ direction_(direction),
+ automatic_restart_(automatic_restart),
+ on_data_callback_(data_callback),
+ on_error_callback_(error_callback),
+ device_index_(kUndefined),
+ is_restarting_(false) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction)
+ << "]";
+ RTC_DLOG(LS_INFO) << "Automatic restart: " << automatic_restart;
+ RTC_DLOG(LS_INFO) << "Windows version: " << rtc::rtc_win::GetVersion();
+
+ // Create the event which the audio engine will signal each time a buffer
+ // becomes ready to be processed by the client.
+ audio_samples_event_.Set(CreateEvent(nullptr, false, false, nullptr));
+ RTC_DCHECK(audio_samples_event_.IsValid());
+
+ // Event to be set in Stop() when rendering/capturing shall stop.
+ stop_event_.Set(CreateEvent(nullptr, false, false, nullptr));
+ RTC_DCHECK(stop_event_.IsValid());
+
+ // Event to be set when it has been detected that an active device has been
+ // invalidated or the stream format has changed.
+ restart_event_.Set(CreateEvent(nullptr, false, false, nullptr));
+ RTC_DCHECK(restart_event_.IsValid());
+}
+
+CoreAudioBase::~CoreAudioBase() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_EQ(ref_count_, 1);
+}
+
+EDataFlow CoreAudioBase::GetDataFlow() const {
+ return direction_ == CoreAudioBase::Direction::kOutput ? eRender : eCapture;
+}
+
+bool CoreAudioBase::IsRestarting() const {
+ return is_restarting_;
+}
+
+int64_t CoreAudioBase::TimeSinceStart() const {
+ return rtc::TimeSince(start_time_);
+}
+
+int CoreAudioBase::NumberOfActiveDevices() const {
+ return core_audio_utility::NumberOfActiveDevices(GetDataFlow());
+}
+
+int CoreAudioBase::NumberOfEnumeratedDevices() const {
+ const int num_active = NumberOfActiveDevices();
+ return num_active > 0 ? num_active + kDefaultDeviceTypeMaxCount : 0;
+}
+
+void CoreAudioBase::ReleaseCOMObjects() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ // ComPtr::Reset() sets the ComPtr to nullptr releasing any previous
+ // reference.
+ if (audio_client_) {
+ audio_client_.Reset();
+ }
+ if (audio_clock_.Get()) {
+ audio_clock_.Reset();
+ }
+ if (audio_session_control_.Get()) {
+ audio_session_control_.Reset();
+ }
+}
+
+bool CoreAudioBase::IsDefaultDevice(int index) const {
+ return index == kDefault;
+}
+
+bool CoreAudioBase::IsDefaultCommunicationsDevice(int index) const {
+ return index == kDefaultCommunications;
+}
+
+bool CoreAudioBase::IsDefaultDeviceId(absl::string_view device_id) const {
+ // Returns true if `device_id` corresponds to the id of the default
+ // device. Note that, if only one device is available (or if the user has not
+ // explicitly set a default device), `device_id` will also math
+ // IsDefaultCommunicationsDeviceId().
+ return (IsInput() &&
+ (device_id == core_audio_utility::GetDefaultInputDeviceID())) ||
+ (IsOutput() &&
+ (device_id == core_audio_utility::GetDefaultOutputDeviceID()));
+}
+
+bool CoreAudioBase::IsDefaultCommunicationsDeviceId(
+ absl::string_view device_id) const {
+ // Returns true if `device_id` corresponds to the id of the default
+ // communication device. Note that, if only one device is available (or if
+ // the user has not explicitly set a communication device), `device_id` will
+ // also math IsDefaultDeviceId().
+ return (IsInput() &&
+ (device_id ==
+ core_audio_utility::GetCommunicationsInputDeviceID())) ||
+ (IsOutput() &&
+ (device_id == core_audio_utility::GetCommunicationsOutputDeviceID()));
+}
+
+bool CoreAudioBase::IsInput() const {
+ return direction_ == CoreAudioBase::Direction::kInput;
+}
+
+bool CoreAudioBase::IsOutput() const {
+ return direction_ == CoreAudioBase::Direction::kOutput;
+}
+
+std::string CoreAudioBase::GetDeviceID(int index) const {
+ if (index >= NumberOfEnumeratedDevices()) {
+ RTC_LOG(LS_ERROR) << "Invalid device index";
+ return std::string();
+ }
+
+ std::string device_id;
+ if (IsDefaultDevice(index)) {
+ device_id = IsInput() ? core_audio_utility::GetDefaultInputDeviceID()
+ : core_audio_utility::GetDefaultOutputDeviceID();
+ } else if (IsDefaultCommunicationsDevice(index)) {
+ device_id = IsInput()
+ ? core_audio_utility::GetCommunicationsInputDeviceID()
+ : core_audio_utility::GetCommunicationsOutputDeviceID();
+ } else {
+ AudioDeviceNames device_names;
+ bool ok = IsInput()
+ ? core_audio_utility::GetInputDeviceNames(&device_names)
+ : core_audio_utility::GetOutputDeviceNames(&device_names);
+ if (ok) {
+ device_id = device_names[index].unique_id;
+ }
+ }
+ return device_id;
+}
+
+int CoreAudioBase::SetDevice(int index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]: index=" << IndexToString(index);
+ if (initialized_) {
+ return -1;
+ }
+
+ std::string device_id = GetDeviceID(index);
+ RTC_DLOG(LS_INFO) << "index=" << IndexToString(index)
+ << " => device_id: " << device_id;
+ device_index_ = index;
+ device_id_ = device_id;
+
+ return device_id_.empty() ? -1 : 0;
+}
+
+int CoreAudioBase::DeviceName(int index,
+ std::string* name,
+ std::string* guid) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]: index=" << IndexToString(index);
+ if (index > NumberOfEnumeratedDevices() - 1) {
+ RTC_LOG(LS_ERROR) << "Invalid device index";
+ return -1;
+ }
+
+ AudioDeviceNames device_names;
+ bool ok = IsInput() ? core_audio_utility::GetInputDeviceNames(&device_names)
+ : core_audio_utility::GetOutputDeviceNames(&device_names);
+ // Validate the index one extra time in-case the size of the generated list
+ // did not match NumberOfEnumeratedDevices().
+ if (!ok || static_cast<int>(device_names.size()) <= index) {
+ RTC_LOG(LS_ERROR) << "Failed to get the device name";
+ return -1;
+ }
+
+ *name = device_names[index].device_name;
+ RTC_DLOG(LS_INFO) << "name: " << *name;
+ if (guid != nullptr) {
+ *guid = device_names[index].unique_id;
+ RTC_DLOG(LS_INFO) << "guid: " << *guid;
+ }
+ return 0;
+}
+
+bool CoreAudioBase::Init() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DCHECK_GE(device_index_, 0);
+ RTC_DCHECK(!device_id_.empty());
+ RTC_DCHECK(audio_device_buffer_);
+ RTC_DCHECK(!audio_client_);
+ RTC_DCHECK(!audio_session_control_.Get());
+
+ // Use an existing combination of `device_index_` and `device_id_` to set
+ // parameters which are required to create an audio client. It is up to the
+ // parent class to set `device_index_` and `device_id_`.
+ std::string device_id = AudioDeviceName::kDefaultDeviceId;
+ ERole role = ERole();
+ if (IsDefaultDevice(device_index_)) {
+ role = eConsole;
+ } else if (IsDefaultCommunicationsDevice(device_index_)) {
+ role = eCommunications;
+ } else {
+ device_id = device_id_;
+ }
+ RTC_LOG(LS_INFO) << "Unique device identifier: device_id=" << device_id
+ << ", role=" << RoleToString(role);
+
+ // Create an IAudioClient interface which enables us to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ ComPtr<IAudioClient> audio_client;
+ if (core_audio_utility::GetAudioClientVersion() == 3) {
+ RTC_DLOG(LS_INFO) << "Using IAudioClient3";
+ audio_client =
+ core_audio_utility::CreateClient3(device_id, GetDataFlow(), role);
+ } else if (core_audio_utility::GetAudioClientVersion() == 2) {
+ RTC_DLOG(LS_INFO) << "Using IAudioClient2";
+ audio_client =
+ core_audio_utility::CreateClient2(device_id, GetDataFlow(), role);
+ } else {
+ RTC_DLOG(LS_INFO) << "Using IAudioClient";
+ audio_client =
+ core_audio_utility::CreateClient(device_id, GetDataFlow(), role);
+ }
+ if (!audio_client) {
+ return false;
+ }
+
+ // Set extra client properties before initialization if the audio client
+ // supports it.
+ // TODO(henrika): evaluate effect(s) of making these changes. Also, perhaps
+ // these types of settings belongs to the client and not the utility parts.
+ if (core_audio_utility::GetAudioClientVersion() >= 2) {
+ if (FAILED(core_audio_utility::SetClientProperties(
+ static_cast<IAudioClient2*>(audio_client.Get())))) {
+ return false;
+ }
+ }
+
+ // Retrieve preferred audio input or output parameters for the given client
+ // and the specified client properties. Override the preferred rate if sample
+ // rate has been defined by the user. Rate conversion will be performed by
+ // the audio engine to match the client if needed.
+ AudioParameters params;
+ HRESULT res = sample_rate_ ? core_audio_utility::GetPreferredAudioParameters(
+ audio_client.Get(), &params, *sample_rate_)
+ : core_audio_utility::GetPreferredAudioParameters(
+ audio_client.Get(), &params);
+ if (FAILED(res)) {
+ return false;
+ }
+
+ // Define the output WAVEFORMATEXTENSIBLE format in `format_`.
+ WAVEFORMATEX* format = &format_.Format;
+ format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ // Check the preferred channel configuration and request implicit channel
+ // upmixing (audio engine extends from 2 to N channels internally) if the
+ // preferred number of channels is larger than two; i.e., initialize the
+ // stream in stereo even if the preferred configuration is multi-channel.
+ if (params.channels() <= 2) {
+ format->nChannels = rtc::dchecked_cast<WORD>(params.channels());
+ } else {
+ // TODO(henrika): ensure that this approach works on different multi-channel
+ // devices. Verified on:
+ // - Corsair VOID PRO Surround USB Adapter (supports 7.1)
+ RTC_LOG(LS_WARNING)
+ << "Using channel upmixing in WASAPI audio engine (2 => "
+ << params.channels() << ")";
+ format->nChannels = 2;
+ }
+ format->nSamplesPerSec = params.sample_rate();
+ format->wBitsPerSample = rtc::dchecked_cast<WORD>(params.bits_per_sample());
+ format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
+ format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
+ format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+ // Add the parts which are unique for the WAVE_FORMAT_EXTENSIBLE structure.
+ format_.Samples.wValidBitsPerSample =
+ rtc::dchecked_cast<WORD>(params.bits_per_sample());
+ format_.dwChannelMask =
+ format->nChannels == 1 ? KSAUDIO_SPEAKER_MONO : KSAUDIO_SPEAKER_STEREO;
+ format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ RTC_DLOG(LS_INFO) << core_audio_utility::WaveFormatToString(&format_);
+
+ // Verify that the format is supported but exclude the test if the default
+ // sample rate has been overridden. If so, the WASAPI audio engine will do
+ // any necessary conversions between the client format we have given it and
+ // the playback mix format or recording split format.
+ if (!sample_rate_) {
+ if (!core_audio_utility::IsFormatSupported(
+ audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &format_)) {
+ return false;
+ }
+ }
+
+ // Check if low-latency is supported and use special initialization if it is.
+ // Low-latency initialization requires these things:
+ // - IAudioClient3 (>= Win10)
+ // - HDAudio driver
+ // - kEnableLowLatencyIfSupported changed from false (default) to true.
+ // TODO(henrika): IsLowLatencySupported() returns AUDCLNT_E_UNSUPPORTED_FORMAT
+ // when `sample_rate_.has_value()` returns true if rate conversion is
+ // actually required (i.e., client asks for other than the default rate).
+ bool low_latency_support = false;
+ uint32_t min_period_in_frames = 0;
+ if (kEnableLowLatencyIfSupported &&
+ core_audio_utility::GetAudioClientVersion() >= 3) {
+ low_latency_support =
+ IsLowLatencySupported(static_cast<IAudioClient3*>(audio_client.Get()),
+ &format_, &min_period_in_frames);
+ }
+
+ if (low_latency_support) {
+ RTC_DCHECK_GE(core_audio_utility::GetAudioClientVersion(), 3);
+ // Use IAudioClient3::InitializeSharedAudioStream() API to initialize a
+ // low-latency event-driven client. Request the smallest possible
+ // periodicity.
+ // TODO(henrika): evaluate this scheme in terms of CPU etc.
+ if (FAILED(core_audio_utility::SharedModeInitializeLowLatency(
+ static_cast<IAudioClient3*>(audio_client.Get()), &format_,
+ audio_samples_event_, min_period_in_frames,
+ sample_rate_.has_value(), &endpoint_buffer_size_frames_))) {
+ return false;
+ }
+ } else {
+ // Initialize the audio stream between the client and the device in shared
+ // mode using event-driven buffer handling. Also, using 0 as requested
+ // buffer size results in a default (minimum) endpoint buffer size.
+ // TODO(henrika): possibly increase `requested_buffer_size` to add
+ // robustness.
+ const REFERENCE_TIME requested_buffer_size = 0;
+ if (FAILED(core_audio_utility::SharedModeInitialize(
+ audio_client.Get(), &format_, audio_samples_event_,
+ requested_buffer_size, sample_rate_.has_value(),
+ &endpoint_buffer_size_frames_))) {
+ return false;
+ }
+ }
+
+ // Check device period and the preferred buffer size and log a warning if
+ // WebRTC's buffer size is not an even divisor of the preferred buffer size
+ // in Core Audio.
+ // TODO(henrika): sort out if a non-perfect match really is an issue.
+ // TODO(henrika): compare with IAudioClient3::GetSharedModeEnginePeriod().
+ REFERENCE_TIME device_period;
+ if (FAILED(core_audio_utility::GetDevicePeriod(
+ audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) {
+ return false;
+ }
+ const double device_period_in_seconds =
+ static_cast<double>(
+ core_audio_utility::ReferenceTimeToTimeDelta(device_period).ms()) /
+ 1000.0L;
+ const int preferred_frames_per_buffer =
+ static_cast<int>(params.sample_rate() * device_period_in_seconds + 0.5);
+ RTC_DLOG(LS_INFO) << "preferred_frames_per_buffer: "
+ << preferred_frames_per_buffer;
+ if (preferred_frames_per_buffer % params.frames_per_buffer()) {
+ RTC_LOG(LS_WARNING) << "Buffer size of " << params.frames_per_buffer()
+ << " is not an even divisor of "
+ << preferred_frames_per_buffer;
+ }
+
+ // Create an AudioSessionControl interface given the initialized client.
+ // The IAudioControl interface enables a client to configure the control
+ // parameters for an audio session and to monitor events in the session.
+ ComPtr<IAudioSessionControl> audio_session_control =
+ core_audio_utility::CreateAudioSessionControl(audio_client.Get());
+ if (!audio_session_control.Get()) {
+ return false;
+ }
+
+ // The Sndvol program displays volume and mute controls for sessions that
+ // are in the active and inactive states.
+ AudioSessionState state;
+ if (FAILED(audio_session_control->GetState(&state))) {
+ return false;
+ }
+ RTC_DLOG(LS_INFO) << "audio session state: " << SessionStateToString(state);
+ RTC_DCHECK_EQ(state, AudioSessionStateInactive);
+
+ // Register the client to receive notifications of session events, including
+ // changes in the stream state.
+ if (FAILED(audio_session_control->RegisterAudioSessionNotification(this))) {
+ return false;
+ }
+
+ // Store valid COM interfaces.
+ audio_client_ = audio_client;
+ audio_session_control_ = audio_session_control;
+
+ return true;
+}
+
+bool CoreAudioBase::Start() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ if (IsRestarting()) {
+ // Audio thread should be alive during internal restart since the restart
+ // callback is triggered on that thread and it also makes the restart
+ // sequence less complex.
+ RTC_DCHECK(!audio_thread_.empty());
+ }
+
+ // Start an audio thread but only if one does not already exist (which is the
+ // case during restart).
+ if (audio_thread_.empty()) {
+ const absl::string_view name =
+ IsInput() ? "wasapi_capture_thread" : "wasapi_render_thread";
+ audio_thread_ = rtc::PlatformThread::SpawnJoinable(
+ [this] { ThreadRun(); }, name,
+ rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime));
+ RTC_DLOG(LS_INFO) << "Started thread with name: " << name
+ << " and handle: " << *audio_thread_.GetHandle();
+ }
+
+ // Start streaming data between the endpoint buffer and the audio engine.
+ _com_error error = audio_client_->Start();
+ if (FAILED(error.Error())) {
+ StopThread();
+ RTC_LOG(LS_ERROR) << "IAudioClient::Start failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ start_time_ = rtc::TimeMillis();
+ num_data_callbacks_ = 0;
+
+ return true;
+}
+
+bool CoreAudioBase::Stop() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DLOG(LS_INFO) << "total activity time: " << TimeSinceStart();
+
+ // Stop audio streaming.
+ _com_error error = audio_client_->Stop();
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Stop failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+ // Stop and destroy the audio thread but only when a restart attempt is not
+ // ongoing.
+ if (!IsRestarting()) {
+ StopThread();
+ }
+
+ // Flush all pending data and reset the audio clock stream position to 0.
+ error = audio_client_->Reset();
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Reset failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+
+ if (IsOutput()) {
+ // Extra safety check to ensure that the buffers are cleared.
+ // If the buffers are not cleared correctly, the next call to Start()
+ // would fail with AUDCLNT_E_BUFFER_ERROR at
+ // IAudioRenderClient::GetBuffer().
+ UINT32 num_queued_frames = 0;
+ audio_client_->GetCurrentPadding(&num_queued_frames);
+ RTC_DCHECK_EQ(0u, num_queued_frames);
+ }
+
+ // Delete the previous registration by the client to receive notifications
+ // about audio session events.
+ RTC_DLOG(LS_INFO) << "audio session state: "
+ << SessionStateToString(GetAudioSessionState());
+ error = audio_session_control_->UnregisterAudioSessionNotification(this);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioSessionControl::UnregisterAudioSessionNotification failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+
+ // To ensure that the restart process is as simple as possible, the audio
+ // thread is not destroyed during restart attempts triggered by internal
+ // error callbacks.
+ if (!IsRestarting()) {
+ thread_checker_audio_.Detach();
+ }
+
+ // Release all allocated COM interfaces to allow for a restart without
+ // intermediate destruction.
+ ReleaseCOMObjects();
+
+ return true;
+}
+
+bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const {
+ // A valid IAudioClient is required to access the ISimpleAudioVolume interface
+ // properly. It is possible to use IAudioSessionManager::GetSimpleAudioVolume
+ // as well but we use the audio client here to ensure that the initialized
+ // audio session is visible under group box labeled "Applications" in
+ // Sndvol.exe.
+ if (!audio_client_) {
+ return false;
+ }
+
+ // Try to create an ISimpleAudioVolume instance.
+ ComPtr<ISimpleAudioVolume> audio_volume =
+ core_audio_utility::CreateSimpleAudioVolume(audio_client_.Get());
+ if (!audio_volume.Get()) {
+ RTC_DLOG(LS_ERROR) << "Volume control is not supported";
+ return false;
+ }
+
+ // Try to use the valid volume control.
+ float volume = 0.0;
+ _com_error error = audio_volume->GetMasterVolume(&volume);
+ if (error.Error() != S_OK) {
+ RTC_LOG(LS_ERROR) << "ISimpleAudioVolume::GetMasterVolume failed: "
+ << core_audio_utility::ErrorToString(error);
+ *available = false;
+ }
+ RTC_DLOG(LS_INFO) << "master volume for output audio session: " << volume;
+
+ *available = true;
+ return false;
+}
+
+// Internal test method which can be used in tests to emulate a restart signal.
+// It simply sets the same event which is normally triggered by session and
+// device notifications. Hence, the emulated restart sequence covers most parts
+// of a real sequence expect the actual device switch.
+bool CoreAudioBase::Restart() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ if (!automatic_restart()) {
+ return false;
+ }
+ is_restarting_ = true;
+ SetEvent(restart_event_.Get());
+ return true;
+}
+
+void CoreAudioBase::StopThread() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(!IsRestarting());
+ if (!audio_thread_.empty()) {
+ RTC_DLOG(LS_INFO) << "Sets stop_event...";
+ SetEvent(stop_event_.Get());
+ RTC_DLOG(LS_INFO) << "PlatformThread::Finalize...";
+ audio_thread_.Finalize();
+
+ // Ensure that we don't quit the main thread loop immediately next
+ // time Start() is called.
+ ResetEvent(stop_event_.Get());
+ ResetEvent(restart_event_.Get());
+ }
+}
+
+bool CoreAudioBase::HandleRestartEvent() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(!audio_thread_.empty());
+ RTC_DCHECK(IsRestarting());
+ // Let each client (input and/or output) take care of its own restart
+ // sequence since each side might need unique actions.
+ // TODO(henrika): revisit and investigate if one common base implementation
+ // is possible
+ bool restart_ok = on_error_callback_(ErrorType::kStreamDisconnected);
+ is_restarting_ = false;
+ return restart_ok;
+}
+
+bool CoreAudioBase::SwitchDeviceIfNeeded() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction())
+ << "]";
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(IsRestarting());
+
+ RTC_DLOG(LS_INFO) << "device_index=" << device_index_
+ << " => device_id: " << device_id_;
+
+ // Ensure that at least one device exists and can be utilized. The most
+ // probable cause for ending up here is that a device has been removed.
+ if (core_audio_utility::NumberOfActiveDevices(IsInput() ? eCapture
+ : eRender) < 1) {
+ RTC_DLOG(LS_ERROR) << "All devices are disabled or removed";
+ return false;
+ }
+
+ // Get the unique device ID for the index which is currently used. It seems
+ // safe to assume that if the ID is the same as the existing device ID, then
+ // the device configuration is the same as before.
+ std::string device_id = GetDeviceID(device_index_);
+ if (device_id != device_id_) {
+ RTC_LOG(LS_WARNING)
+ << "Device configuration has changed => changing device selection...";
+ // TODO(henrika): depending on the current state and how we got here, we
+ // must select a new device here.
+ if (SetDevice(kDefault) == -1) {
+ RTC_LOG(LS_WARNING) << "Failed to set new audio device";
+ return false;
+ }
+ } else {
+ RTC_LOG(LS_INFO)
+ << "Device configuration has not changed => keeping selected device";
+ }
+ return true;
+}
+
+AudioSessionState CoreAudioBase::GetAudioSessionState() const {
+ AudioSessionState state = AudioSessionStateInactive;
+ RTC_DCHECK(audio_session_control_.Get());
+ _com_error error = audio_session_control_->GetState(&state);
+ if (FAILED(error.Error())) {
+ RTC_DLOG(LS_ERROR) << "IAudioSessionControl::GetState failed: "
+ << core_audio_utility::ErrorToString(error);
+ }
+ return state;
+}
+
+// TODO(henrika): only used for debugging purposes currently.
+ULONG CoreAudioBase::AddRef() {
+ ULONG new_ref = InterlockedIncrement(&ref_count_);
+ // RTC_DLOG(LS_INFO) << "__AddRef => " << new_ref;
+ return new_ref;
+}
+
+// TODO(henrika): does not call delete this.
+ULONG CoreAudioBase::Release() {
+ ULONG new_ref = InterlockedDecrement(&ref_count_);
+ // RTC_DLOG(LS_INFO) << "__Release => " << new_ref;
+ return new_ref;
+}
+
+// TODO(henrika): can probably be replaced by "return S_OK" only.
+HRESULT CoreAudioBase::QueryInterface(REFIID iid, void** object) {
+ if (object == nullptr) {
+ return E_POINTER;
+ }
+ if (iid == IID_IUnknown || iid == __uuidof(IAudioSessionEvents)) {
+ *object = static_cast<IAudioSessionEvents*>(this);
+ return S_OK;
+ }
+ *object = nullptr;
+ return E_NOINTERFACE;
+}
+
+// IAudioSessionEvents::OnStateChanged.
+HRESULT CoreAudioBase::OnStateChanged(AudioSessionState new_state) {
+ RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "["
+ << DirectionToString(direction())
+ << "] new_state: " << SessionStateToString(new_state);
+ return S_OK;
+}
+
+// When a session is disconnected because of a device removal or format change
+// event, we want to inform the audio thread about the lost audio session and
+// trigger an attempt to restart audio using a new (default) device.
+// This method is called on separate threads owned by the session manager and
+// it can happen that the same type of callback is called more than once for the
+// same event.
+HRESULT CoreAudioBase::OnSessionDisconnected(
+ AudioSessionDisconnectReason disconnect_reason) {
+ RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "["
+ << DirectionToString(direction()) << "] reason: "
+ << SessionDisconnectReasonToString(disconnect_reason);
+ // Ignore changes in the audio session (don't try to restart) if the user
+ // has explicitly asked for this type of ADM during construction.
+ if (!automatic_restart()) {
+ RTC_DLOG(LS_WARNING) << "___Automatic restart is disabled";
+ return S_OK;
+ }
+
+ if (IsRestarting()) {
+ RTC_DLOG(LS_WARNING) << "___Ignoring since restart is already active";
+ return S_OK;
+ }
+
+ // By default, automatic restart is enabled and the restart event will be set
+ // below if the device was removed or the format was changed.
+ if (disconnect_reason == DisconnectReasonDeviceRemoval ||
+ disconnect_reason == DisconnectReasonFormatChanged) {
+ is_restarting_ = true;
+ SetEvent(restart_event_.Get());
+ }
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnDisplayNameChanged
+HRESULT CoreAudioBase::OnDisplayNameChanged(LPCWSTR new_display_name,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnIconPathChanged
+HRESULT CoreAudioBase::OnIconPathChanged(LPCWSTR new_icon_path,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnSimpleVolumeChanged
+HRESULT CoreAudioBase::OnSimpleVolumeChanged(float new_simple_volume,
+ BOOL new_mute,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnChannelVolumeChanged
+HRESULT CoreAudioBase::OnChannelVolumeChanged(DWORD channel_count,
+ float new_channel_volumes[],
+ DWORD changed_channel,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+// IAudioSessionEvents::OnGroupingParamChanged
+HRESULT CoreAudioBase::OnGroupingParamChanged(LPCGUID new_grouping_param,
+ LPCGUID event_context) {
+ return S_OK;
+}
+
+void CoreAudioBase::ThreadRun() {
+ if (!core_audio_utility::IsMMCSSSupported()) {
+ RTC_LOG(LS_ERROR) << "MMCSS is not supported";
+ return;
+ }
+ RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction())
+ << "] ThreadRun starts...";
+ // TODO(henrika): difference between "Pro Audio" and "Audio"?
+ ScopedMMCSSRegistration mmcss_registration(L"Pro Audio");
+ ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+ RTC_DCHECK(mmcss_registration.Succeeded());
+ RTC_DCHECK(com_initializer.Succeeded());
+ RTC_DCHECK(stop_event_.IsValid());
+ RTC_DCHECK(audio_samples_event_.IsValid());
+
+ bool streaming = true;
+ bool error = false;
+ HANDLE wait_array[] = {stop_event_.Get(), restart_event_.Get(),
+ audio_samples_event_.Get()};
+
+ // The device frequency is the frequency generated by the hardware clock in
+ // the audio device. The GetFrequency() method reports a constant frequency.
+ UINT64 device_frequency = 0;
+ _com_error result(S_FALSE);
+ if (audio_clock_) {
+ RTC_DCHECK(IsOutput());
+ result = audio_clock_->GetFrequency(&device_frequency);
+ if (FAILED(result.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClock::GetFrequency failed: "
+ << core_audio_utility::ErrorToString(result);
+ }
+ }
+
+ // Keep streaming audio until the stop event or the stream-switch event
+ // is signaled. An error event can also break the main thread loop.
+ while (streaming && !error) {
+ // Wait for a close-down event, stream-switch event or a new render event.
+ DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
+ wait_array, false, INFINITE);
+ switch (wait_result) {
+ case WAIT_OBJECT_0 + 0:
+ // `stop_event_` has been set.
+ streaming = false;
+ break;
+ case WAIT_OBJECT_0 + 1:
+ // `restart_event_` has been set.
+ error = !HandleRestartEvent();
+ break;
+ case WAIT_OBJECT_0 + 2:
+ // `audio_samples_event_` has been set.
+ error = !on_data_callback_(device_frequency);
+ break;
+ default:
+ error = true;
+ break;
+ }
+ }
+
+ if (streaming && error) {
+ RTC_LOG(LS_ERROR) << "[" << DirectionToString(direction())
+ << "] WASAPI streaming failed.";
+ // Stop audio streaming since something has gone wrong in our main thread
+ // loop. Note that, we are still in a "started" state, hence a Stop() call
+ // is required to join the thread properly.
+ result = audio_client_->Stop();
+ if (FAILED(result.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Stop failed: "
+ << core_audio_utility::ErrorToString(result);
+ }
+
+ // TODO(henrika): notify clients that something has gone wrong and that
+ // this stream should be destroyed instead of reused in the future.
+ }
+
+ RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction())
+ << "] ...ThreadRun stops";
+}
+
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h
new file mode 100644
index 0000000000..6c1357e059
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
+#include "api/sequence_checker.h"
+#include "modules/audio_device/win/core_audio_utility_win.h"
+#include "rtc_base/platform_thread.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+
+namespace webrtc_win {
+
+// Serves as base class for CoreAudioInput and CoreAudioOutput and supports
+// device handling and audio streaming where the direction (input or output)
+// is set at constructions by the parent.
+// The IAudioSessionEvents interface provides notifications of session-related
+// events such as changes in the volume level, display name, and session state.
+// This class does not use the default ref-counting memory management method
+// provided by IUnknown: calling CoreAudioBase::Release() will not delete the
+// object. The client will receive notification from the session manager on
+// a separate thread owned and controlled by the manager.
+// TODO(henrika): investigate if CoreAudioBase should implement
+// IMMNotificationClient as well (might improve support for device changes).
+class CoreAudioBase : public IAudioSessionEvents {
+ public:
+ enum class Direction {
+ kInput,
+ kOutput,
+ };
+
+ // TODO(henrika): add more error types.
+ enum class ErrorType {
+ kStreamDisconnected,
+ };
+
+ template <typename T>
+ auto as_integer(T const value) -> typename std::underlying_type<T>::type {
+ return static_cast<typename std::underlying_type<T>::type>(value);
+ }
+
+ // Callback definition for notifications of new audio data. For input clients,
+ // it means that "new audio data has now been captured", and for output
+ // clients, "the output layer now needs new audio data".
+ typedef std::function<bool(uint64_t device_frequency)> OnDataCallback;
+
+ // Callback definition for notifications of run-time error messages. It can
+ // be called e.g. when an active audio device is removed and an audio stream
+ // is disconnected (`error` is then set to kStreamDisconnected). Both input
+ // and output clients implements OnErrorCallback() and will trigger an
+ // internal restart sequence for kStreamDisconnected.
+ // This method is currently always called on the audio thread.
+ // TODO(henrika): add support for more error types.
+ typedef std::function<bool(ErrorType error)> OnErrorCallback;
+
+ void ThreadRun();
+
+ CoreAudioBase(const CoreAudioBase&) = delete;
+ CoreAudioBase& operator=(const CoreAudioBase&) = delete;
+
+ protected:
+ explicit CoreAudioBase(Direction direction,
+ bool automatic_restart,
+ OnDataCallback data_callback,
+ OnErrorCallback error_callback);
+ ~CoreAudioBase();
+
+ std::string GetDeviceID(int index) const;
+ int SetDevice(int index);
+ int DeviceName(int index, std::string* name, std::string* guid) const;
+
+ // Checks if the current device ID is no longer in use (e.g. due to a
+ // disconnected stream), and if so, switches device to the default audio
+ // device. Called on the audio thread during restart attempts.
+ bool SwitchDeviceIfNeeded();
+
+ bool Init();
+ bool Start();
+ bool Stop();
+ bool IsVolumeControlAvailable(bool* available) const;
+ bool Restart();
+
+ Direction direction() const { return direction_; }
+ bool automatic_restart() const { return automatic_restart_; }
+
+ // Releases all allocated COM resources in the base class.
+ void ReleaseCOMObjects();
+
+ // Returns number of active devices given the specified `direction_` set
+ // by the parent (input or output).
+ int NumberOfActiveDevices() const;
+
+ // Returns total number of enumerated audio devices which is the sum of all
+ // active devices plus two extra (one default and one default
+ // communications). The value in `direction_` determines if capture or
+ // render devices are counted.
+ int NumberOfEnumeratedDevices() const;
+
+ bool IsInput() const;
+ bool IsOutput() const;
+ bool IsDefaultDevice(int index) const;
+ bool IsDefaultCommunicationsDevice(int index) const;
+ bool IsDefaultDeviceId(absl::string_view device_id) const;
+ bool IsDefaultCommunicationsDeviceId(absl::string_view device_id) const;
+ EDataFlow GetDataFlow() const;
+ bool IsRestarting() const;
+ int64_t TimeSinceStart() const;
+
+ // TODO(henrika): is the existing thread checker in WindowsAudioDeviceModule
+ // sufficient? As is, we have one top-level protection and then a second
+ // level here. In addition, calls to Init(), Start() and Stop() are not
+ // included to allow for support of internal restart (where these methods are
+ // called on the audio thread).
+ SequenceChecker thread_checker_;
+ SequenceChecker thread_checker_audio_;
+ AudioDeviceBuffer* audio_device_buffer_ = nullptr;
+ bool initialized_ = false;
+ WAVEFORMATEXTENSIBLE format_ = {};
+ uint32_t endpoint_buffer_size_frames_ = 0;
+ Microsoft::WRL::ComPtr<IAudioClock> audio_clock_;
+ Microsoft::WRL::ComPtr<IAudioClient> audio_client_;
+ bool is_active_ = false;
+ int64_t num_data_callbacks_ = 0;
+ int latency_ms_ = 0;
+ absl::optional<uint32_t> sample_rate_;
+
+ private:
+ const Direction direction_;
+ const bool automatic_restart_;
+ const OnDataCallback on_data_callback_;
+ const OnErrorCallback on_error_callback_;
+ ScopedHandle audio_samples_event_;
+ ScopedHandle stop_event_;
+ ScopedHandle restart_event_;
+ int64_t start_time_ = 0;
+ std::string device_id_;
+ int device_index_ = -1;
+ // Used by the IAudioSessionEvents implementations. Currently only utilized
+ // for debugging purposes.
+ LONG ref_count_ = 1;
+ // Set when restart process starts and cleared when restart stops
+ // successfully. Accessed atomically.
+ std::atomic<bool> is_restarting_;
+ rtc::PlatformThread audio_thread_;
+ Microsoft::WRL::ComPtr<IAudioSessionControl> audio_session_control_;
+
+ void StopThread();
+ AudioSessionState GetAudioSessionState() const;
+
+ // Called on the audio thread when a restart event has been set.
+ // It will then trigger calls to the installed error callbacks with error
+ // type set to kStreamDisconnected.
+ bool HandleRestartEvent();
+
+ // IUnknown (required by IAudioSessionEvents and IMMNotificationClient).
+ ULONG __stdcall AddRef() override;
+ ULONG __stdcall Release() override;
+ HRESULT __stdcall QueryInterface(REFIID iid, void** object) override;
+
+ // IAudioSessionEvents implementation.
+ // These methods are called on separate threads owned by the session manager.
+ // More than one thread can be involved depending on the type of callback
+ // and audio session.
+ HRESULT __stdcall OnStateChanged(AudioSessionState new_state) override;
+ HRESULT __stdcall OnSessionDisconnected(
+ AudioSessionDisconnectReason disconnect_reason) override;
+ HRESULT __stdcall OnDisplayNameChanged(LPCWSTR new_display_name,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnIconPathChanged(LPCWSTR new_icon_path,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnSimpleVolumeChanged(float new_simple_volume,
+ BOOL new_mute,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnChannelVolumeChanged(DWORD channel_count,
+ float new_channel_volumes[],
+ DWORD changed_channel,
+ LPCGUID event_context) override;
+ HRESULT __stdcall OnGroupingParamChanged(LPCGUID new_grouping_param,
+ LPCGUID event_context) override;
+};
+
+} // namespace webrtc_win
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc
new file mode 100644
index 0000000000..17790dafc4
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_input_win.h"
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+namespace webrtc_win {
+
+enum AudioDeviceMessageType : uint32_t {
+ kMessageInputStreamDisconnected,
+};
+
+CoreAudioInput::CoreAudioInput(bool automatic_restart)
+ : CoreAudioBase(
+ CoreAudioBase::Direction::kInput,
+ automatic_restart,
+ [this](uint64_t freq) { return OnDataCallback(freq); },
+ [this](ErrorType err) { return OnErrorCallback(err); }) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ thread_checker_audio_.Detach();
+}
+
+CoreAudioInput::~CoreAudioInput() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+}
+
+int CoreAudioInput::Init() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+}
+
+int CoreAudioInput::Terminate() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ StopRecording();
+ return 0;
+}
+
+int CoreAudioInput::NumDevices() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return core_audio_utility::NumberOfActiveDevices(eCapture);
+}
+
+int CoreAudioInput::SetDevice(int index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return CoreAudioBase::SetDevice(index);
+}
+
+int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": "
+ << ((device == AudioDeviceModule::kDefaultDevice)
+ ? "Default"
+ : "DefaultCommunication");
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1);
+}
+
+int CoreAudioInput::DeviceName(int index,
+ std::string* name,
+ std::string* guid) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(name);
+ return CoreAudioBase::DeviceName(index, name, guid);
+}
+
+void CoreAudioInput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ audio_device_buffer_ = audio_buffer;
+}
+
+bool CoreAudioInput::RecordingIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return initialized_;
+}
+
+int CoreAudioInput::InitRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!Recording());
+ RTC_DCHECK(!audio_capture_client_);
+
+ // Creates an IAudioClient instance and stores the valid interface pointer in
+ // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
+ // platform support. The base class will use optimal input parameters and do
+ // an event driven shared mode initialization. The utilized format will be
+ // stored in `format_` and can be used for configuration and allocation of
+ // audio buffers.
+ if (!CoreAudioBase::Init()) {
+ return -1;
+ }
+ RTC_DCHECK(audio_client_);
+
+ // Configure the recording side of the audio device buffer using `format_`
+ // after a trivial sanity check of the format structure.
+ RTC_DCHECK(audio_device_buffer_);
+ WAVEFORMATEX* format = &format_.Format;
+ RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ audio_device_buffer_->SetRecordingSampleRate(format->nSamplesPerSec);
+ audio_device_buffer_->SetRecordingChannels(format->nChannels);
+
+ // Create a modified audio buffer class which allows us to supply any number
+ // of samples (and not only multiple of 10ms) to match the optimal buffer
+ // size per callback used by Core Audio.
+ // TODO(henrika): can we share one FineAudioBuffer with the output side?
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+
+ // Create an IAudioCaptureClient for an initialized IAudioClient.
+ // The IAudioCaptureClient interface enables a client to read input data from
+ // a capture endpoint buffer.
+ ComPtr<IAudioCaptureClient> audio_capture_client =
+ core_audio_utility::CreateCaptureClient(audio_client_.Get());
+ if (!audio_capture_client) {
+ return -1;
+ }
+
+ // Query performance frequency.
+ LARGE_INTEGER ticks_per_sec = {};
+ qpc_to_100ns_.reset();
+ if (::QueryPerformanceFrequency(&ticks_per_sec)) {
+ double qpc_ticks_per_second =
+ rtc::dchecked_cast<double>(ticks_per_sec.QuadPart);
+ qpc_to_100ns_ = 10000000.0 / qpc_ticks_per_second;
+ }
+
+ // Store valid COM interfaces.
+ audio_capture_client_ = audio_capture_client;
+
+ initialized_ = true;
+ return 0;
+}
+
+int CoreAudioInput::StartRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK(!Recording());
+ RTC_DCHECK(fine_audio_buffer_);
+ RTC_DCHECK(audio_device_buffer_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Recording can not start since InitRecording must succeed first";
+ return 0;
+ }
+
+ fine_audio_buffer_->ResetRecord();
+ if (!IsRestarting()) {
+ audio_device_buffer_->StartRecording();
+ }
+
+ if (!Start()) {
+ return -1;
+ }
+
+ is_active_ = true;
+ return 0;
+}
+
+int CoreAudioInput::StopRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (!initialized_) {
+ return 0;
+ }
+
+ // Release resources allocated in InitRecording() and then return if this
+ // method is called without any active input audio.
+ if (!Recording()) {
+ RTC_DLOG(LS_WARNING) << "No input stream is active";
+ ReleaseCOMObjects();
+ initialized_ = false;
+ return 0;
+ }
+
+ if (!Stop()) {
+ RTC_LOG(LS_ERROR) << "StopRecording failed";
+ return -1;
+ }
+
+ if (!IsRestarting()) {
+ RTC_DCHECK(audio_device_buffer_);
+ audio_device_buffer_->StopRecording();
+ }
+
+ // Release all allocated resources to allow for a restart without
+ // intermediate destruction.
+ ReleaseCOMObjects();
+ qpc_to_100ns_.reset();
+
+ initialized_ = false;
+ is_active_ = false;
+ return 0;
+}
+
+bool CoreAudioInput::Recording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_;
+ return is_active_;
+}
+
+// TODO(henrika): finalize support of audio session volume control. As is, we
+// are not compatible with the old ADM implementation since it allows accessing
+// the volume control with any active audio output stream.
+int CoreAudioInput::VolumeIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsVolumeControlAvailable(available) ? 0 : -1;
+}
+
+// Triggers the restart sequence. Only used for testing purposes to emulate
+// a real event where e.g. an active input device is removed.
+int CoreAudioInput::RestartRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!Recording()) {
+ return 0;
+ }
+
+ if (!Restart()) {
+ RTC_LOG(LS_ERROR) << "RestartRecording failed";
+ return -1;
+ }
+ return 0;
+}
+
+bool CoreAudioInput::Restarting() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsRestarting();
+}
+
+int CoreAudioInput::SetSampleRate(uint32_t sample_rate) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ sample_rate_ = sample_rate;
+ return 0;
+}
+
+void CoreAudioInput::ReleaseCOMObjects() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CoreAudioBase::ReleaseCOMObjects();
+ if (audio_capture_client_.Get()) {
+ audio_capture_client_.Reset();
+ }
+}
+
+bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) {
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+
+ if (!initialized_ || !is_active_) {
+ // This is concurrent examination of state across multiple threads so will
+ // be somewhat error prone, but we should still be defensive and not use
+ // audio_capture_client_ if we know it's not there.
+ return false;
+ }
+ if (num_data_callbacks_ == 0) {
+ RTC_LOG(LS_INFO) << "--- Input audio stream is alive ---";
+ }
+ UINT32 num_frames_in_next_packet = 0;
+ _com_error error =
+ audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet);
+ if (error.Error() == AUDCLNT_E_DEVICE_INVALIDATED) {
+ // Avoid breaking the thread loop implicitly by returning false and return
+ // true instead for AUDCLNT_E_DEVICE_INVALIDATED even it is a valid error
+ // message. We will use notifications about device changes instead to stop
+ // data callbacks and attempt to restart streaming .
+ RTC_DLOG(LS_ERROR) << "AUDCLNT_E_DEVICE_INVALIDATED";
+ return true;
+ }
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Drain the WASAPI capture buffer fully if audio has been recorded.
+ while (num_frames_in_next_packet > 0) {
+ uint8_t* audio_data;
+ UINT32 num_frames_to_read = 0;
+ DWORD flags = 0;
+ UINT64 device_position_frames = 0;
+ UINT64 capture_time_100ns = 0;
+ error = audio_capture_client_->GetBuffer(&audio_data, &num_frames_to_read,
+ &flags, &device_position_frames,
+ &capture_time_100ns);
+ if (error.Error() == AUDCLNT_S_BUFFER_EMPTY) {
+ // The call succeeded but no capture data is available to be read.
+ // Return and start waiting for new capture event
+ RTC_DCHECK_EQ(num_frames_to_read, 0u);
+ return true;
+ }
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Update input delay estimate but only about once per second to save
+ // resources. The estimate is usually stable.
+ if (num_data_callbacks_ % 100 == 0) {
+ absl::optional<int> opt_record_delay_ms;
+ // TODO(henrika): note that FineAudioBuffer adds latency as well.
+ opt_record_delay_ms = EstimateLatencyMillis(capture_time_100ns);
+ if (opt_record_delay_ms) {
+ latency_ms_ = *opt_record_delay_ms;
+ } else {
+ RTC_DLOG(LS_WARNING) << "Input latency is set to fixed value";
+ latency_ms_ = 20;
+ }
+ }
+ if (num_data_callbacks_ % 500 == 0) {
+ RTC_DLOG(LS_INFO) << "latency: " << latency_ms_;
+ }
+
+ // The data in the packet is not correlated with the previous packet's
+ // device position; possibly due to a stream state transition or timing
+ // glitch. The behavior of the AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY flag
+ // is undefined on the application's first call to GetBuffer after Start.
+ if (device_position_frames != 0 &&
+ flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
+ RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY";
+ }
+ // The time at which the device's stream position was recorded is uncertain.
+ // Thus, the client might be unable to accurately set a time stamp for the
+ // current data packet.
+ if (flags & AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR) {
+ RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR";
+ }
+
+ // Treat all of the data in the packet as silence and ignore the actual
+ // data values when AUDCLNT_BUFFERFLAGS_SILENT is set.
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ rtc::ExplicitZeroMemory(audio_data,
+ format_.Format.nBlockAlign * num_frames_to_read);
+ RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence";
+ } else {
+ // Copy recorded audio in `audio_data` to the WebRTC sink using the
+ // FineAudioBuffer object.
+ fine_audio_buffer_->DeliverRecordedData(
+ rtc::MakeArrayView(reinterpret_cast<const int16_t*>(audio_data),
+ format_.Format.nChannels * num_frames_to_read),
+
+ latency_ms_);
+ }
+
+ error = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::ReleaseBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ error =
+ audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+ }
+ ++num_data_callbacks_;
+ return true;
+}
+
+bool CoreAudioInput::OnErrorCallback(ErrorType error) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error);
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ if (error == CoreAudioBase::ErrorType::kStreamDisconnected) {
+ HandleStreamDisconnected();
+ } else {
+ RTC_DLOG(LS_WARNING) << "Unsupported error type";
+ }
+ return true;
+}
+
+absl::optional<int> CoreAudioInput::EstimateLatencyMillis(
+ uint64_t capture_time_100ns) {
+ if (!qpc_to_100ns_) {
+ return absl::nullopt;
+ }
+ // Input parameter `capture_time_100ns` contains the performance counter at
+ // the time that the audio endpoint device recorded the device position of
+ // the first audio frame in the data packet converted into 100ns units.
+ // We derive a delay estimate by:
+ // - sampling the current performance counter (qpc_now_raw),
+ // - converting it into 100ns time units (now_time_100ns), and
+ // - subtracting `capture_time_100ns` from now_time_100ns.
+ LARGE_INTEGER perf_counter_now = {};
+ if (!::QueryPerformanceCounter(&perf_counter_now)) {
+ return absl::nullopt;
+ }
+ uint64_t qpc_now_raw = perf_counter_now.QuadPart;
+ uint64_t now_time_100ns = qpc_now_raw * (*qpc_to_100ns_);
+ webrtc::TimeDelta delay_us = webrtc::TimeDelta::Micros(
+ 0.1 * (now_time_100ns - capture_time_100ns) + 0.5);
+ return delay_us.ms();
+}
+
+// Called from OnErrorCallback() when error type is kStreamDisconnected.
+// Note that this method is called on the audio thread and the internal restart
+// sequence is also executed on that same thread. The audio thread is therefore
+// not stopped during restart. Such a scheme also makes the restart process less
+// complex.
+// Note that, none of the called methods are thread checked since they can also
+// be called on the main thread. Thread checkers are instead added on one layer
+// above (in audio_device_module.cc) which ensures that the public API is thread
+// safe.
+// TODO(henrika): add more details.
+bool CoreAudioInput::HandleStreamDisconnected() {
+ RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(automatic_restart());
+
+ if (StopRecording() != 0) {
+ return false;
+ }
+
+ if (!SwitchDeviceIfNeeded()) {
+ return false;
+ }
+
+ if (InitRecording() != 0) {
+ return false;
+ }
+ if (StartRecording() != 0) {
+ return false;
+ }
+
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>";
+ return true;
+}
+
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h
new file mode 100644
index 0000000000..be290f9f4e
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "modules/audio_device/win/audio_device_module_win.h"
+#include "modules/audio_device/win/core_audio_base_win.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+
+namespace webrtc_win {
+
+// Windows specific AudioInput implementation using a CoreAudioBase class where
+// an input direction is set at construction. Supports capture device handling
+// and streaming of captured audio to a WebRTC client.
+class CoreAudioInput final : public CoreAudioBase, public AudioInput {
+ public:
+ CoreAudioInput(bool automatic_restart);
+ ~CoreAudioInput() override;
+
+ // AudioInput implementation.
+ int Init() override;
+ int Terminate() override;
+ int NumDevices() const override;
+ int SetDevice(int index) override;
+ int SetDevice(AudioDeviceModule::WindowsDeviceType device) override;
+ int DeviceName(int index, std::string* name, std::string* guid) override;
+ void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
+ bool RecordingIsInitialized() const override;
+ int InitRecording() override;
+ int StartRecording() override;
+ int StopRecording() override;
+ bool Recording() override;
+ int VolumeIsAvailable(bool* available) override;
+ int RestartRecording() override;
+ bool Restarting() const override;
+ int SetSampleRate(uint32_t sample_rate) override;
+
+ CoreAudioInput(const CoreAudioInput&) = delete;
+ CoreAudioInput& operator=(const CoreAudioInput&) = delete;
+
+ private:
+ void ReleaseCOMObjects();
+ bool OnDataCallback(uint64_t device_frequency);
+ bool OnErrorCallback(ErrorType error);
+ absl::optional<int> EstimateLatencyMillis(uint64_t capture_time_100ns);
+ bool HandleStreamDisconnected();
+
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+ Microsoft::WRL::ComPtr<IAudioCaptureClient> audio_capture_client_;
+ absl::optional<double> qpc_to_100ns_;
+};
+
+} // namespace webrtc_win
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc
new file mode 100644
index 0000000000..c92fedf0e9
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_output_win.h"
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace webrtc {
+namespace webrtc_win {
+
+CoreAudioOutput::CoreAudioOutput(bool automatic_restart)
+ : CoreAudioBase(
+ CoreAudioBase::Direction::kOutput,
+ automatic_restart,
+ [this](uint64_t freq) { return OnDataCallback(freq); },
+ [this](ErrorType err) { return OnErrorCallback(err); }) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ thread_checker_audio_.Detach();
+}
+
+CoreAudioOutput::~CoreAudioOutput() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ Terminate();
+}
+
+int CoreAudioOutput::Init() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return 0;
+}
+
+int CoreAudioOutput::Terminate() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ StopPlayout();
+ return 0;
+}
+
+int CoreAudioOutput::NumDevices() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return core_audio_utility::NumberOfActiveDevices(eRender);
+}
+
+int CoreAudioOutput::SetDevice(int index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_GE(index, 0);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return CoreAudioBase::SetDevice(index);
+}
+
+int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": "
+ << ((device == AudioDeviceModule::kDefaultDevice)
+ ? "Default"
+ : "DefaultCommunication");
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1);
+}
+
+int CoreAudioOutput::DeviceName(int index,
+ std::string* name,
+ std::string* guid) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(name);
+ return CoreAudioBase::DeviceName(index, name, guid);
+}
+
+void CoreAudioOutput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ audio_device_buffer_ = audio_buffer;
+}
+
+bool CoreAudioOutput::PlayoutIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return initialized_;
+}
+
+int CoreAudioOutput::InitPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting();
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!Playing());
+ RTC_DCHECK(!audio_render_client_);
+
+ // Creates an IAudioClient instance and stores the valid interface pointer in
+ // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
+ // platform support. The base class will use optimal output parameters and do
+ // an event driven shared mode initialization. The utilized format will be
+ // stored in `format_` and can be used for configuration and allocation of
+ // audio buffers.
+ if (!CoreAudioBase::Init()) {
+ return -1;
+ }
+ RTC_DCHECK(audio_client_);
+
+ // Configure the playout side of the audio device buffer using `format_`
+ // after a trivial sanity check of the format structure.
+ RTC_DCHECK(audio_device_buffer_);
+ WAVEFORMATEX* format = &format_.Format;
+ RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ audio_device_buffer_->SetPlayoutSampleRate(format->nSamplesPerSec);
+ audio_device_buffer_->SetPlayoutChannels(format->nChannels);
+
+ // Create a modified audio buffer class which allows us to ask for any number
+ // of samples (and not only multiple of 10ms) to match the optimal
+ // buffer size per callback used by Core Audio.
+ // TODO(henrika): can we share one FineAudioBuffer with the input side?
+ fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+
+ // Create an IAudioRenderClient for an initialized IAudioClient.
+ // The IAudioRenderClient interface enables us to write output data to
+ // a rendering endpoint buffer.
+ ComPtr<IAudioRenderClient> audio_render_client =
+ core_audio_utility::CreateRenderClient(audio_client_.Get());
+ if (!audio_render_client.Get()) {
+ return -1;
+ }
+
+ ComPtr<IAudioClock> audio_clock =
+ core_audio_utility::CreateAudioClock(audio_client_.Get());
+ if (!audio_clock.Get()) {
+ return -1;
+ }
+
+ // Store valid COM interfaces.
+ audio_render_client_ = audio_render_client;
+ audio_clock_ = audio_clock;
+
+ initialized_ = true;
+ return 0;
+}
+
+int CoreAudioOutput::StartPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting();
+ RTC_DCHECK(!Playing());
+ RTC_DCHECK(fine_audio_buffer_);
+ RTC_DCHECK(audio_device_buffer_);
+ if (!initialized_) {
+ RTC_DLOG(LS_WARNING)
+ << "Playout can not start since InitPlayout must succeed first";
+ }
+
+ fine_audio_buffer_->ResetPlayout();
+ if (!IsRestarting()) {
+ audio_device_buffer_->StartPlayout();
+ }
+
+ if (!core_audio_utility::FillRenderEndpointBufferWithSilence(
+ audio_client_.Get(), audio_render_client_.Get())) {
+ RTC_LOG(LS_WARNING) << "Failed to prepare output endpoint with silence";
+ }
+
+ num_frames_written_ = endpoint_buffer_size_frames_;
+
+ if (!Start()) {
+ return -1;
+ }
+
+ is_active_ = true;
+ return 0;
+}
+
+int CoreAudioOutput::StopPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting();
+ if (!initialized_) {
+ return 0;
+ }
+
+ // Release resources allocated in InitPlayout() and then return if this
+ // method is called without any active output audio.
+ if (!Playing()) {
+ RTC_DLOG(LS_WARNING) << "No output stream is active";
+ ReleaseCOMObjects();
+ initialized_ = false;
+ return 0;
+ }
+
+ if (!Stop()) {
+ RTC_LOG(LS_ERROR) << "StopPlayout failed";
+ return -1;
+ }
+
+ if (!IsRestarting()) {
+ RTC_DCHECK(audio_device_buffer_);
+ audio_device_buffer_->StopPlayout();
+ }
+
+ // Release all allocated resources to allow for a restart without
+ // intermediate destruction.
+ ReleaseCOMObjects();
+
+ initialized_ = false;
+ is_active_ = false;
+ return 0;
+}
+
+bool CoreAudioOutput::Playing() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_;
+ return is_active_;
+}
+
+// TODO(henrika): finalize support of audio session volume control. As is, we
+// are not compatible with the old ADM implementation since it allows accessing
+// the volume control with any active audio output stream.
+int CoreAudioOutput::VolumeIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsVolumeControlAvailable(available) ? 0 : -1;
+}
+
+// Triggers the restart sequence. Only used for testing purposes to emulate
+// a real event where e.g. an active output device is removed.
+int CoreAudioOutput::RestartPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!Playing()) {
+ return 0;
+ }
+ if (!Restart()) {
+ RTC_LOG(LS_ERROR) << "RestartPlayout failed";
+ return -1;
+ }
+ return 0;
+}
+
+bool CoreAudioOutput::Restarting() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return IsRestarting();
+}
+
+int CoreAudioOutput::SetSampleRate(uint32_t sample_rate) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ sample_rate_ = sample_rate;
+ return 0;
+}
+
+void CoreAudioOutput::ReleaseCOMObjects() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CoreAudioBase::ReleaseCOMObjects();
+ if (audio_render_client_.Get()) {
+ audio_render_client_.Reset();
+ }
+}
+
+bool CoreAudioOutput::OnErrorCallback(ErrorType error) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error);
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ if (!initialized_ || !Playing()) {
+ return true;
+ }
+
+ if (error == CoreAudioBase::ErrorType::kStreamDisconnected) {
+ HandleStreamDisconnected();
+ } else {
+ RTC_DLOG(LS_WARNING) << "Unsupported error type";
+ }
+ return true;
+}
+
+bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) {
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ if (num_data_callbacks_ == 0) {
+ RTC_LOG(LS_INFO) << "--- Output audio stream is alive ---";
+ }
+ // Get the padding value which indicates the amount of valid unread data that
+ // the endpoint buffer currently contains.
+ UINT32 num_unread_frames = 0;
+ _com_error error = audio_client_->GetCurrentPadding(&num_unread_frames);
+ if (error.Error() == AUDCLNT_E_DEVICE_INVALIDATED) {
+ // Avoid breaking the thread loop implicitly by returning false and return
+ // true instead for AUDCLNT_E_DEVICE_INVALIDATED even it is a valid error
+ // message. We will use notifications about device changes instead to stop
+ // data callbacks and attempt to restart streaming .
+ RTC_DLOG(LS_ERROR) << "AUDCLNT_E_DEVICE_INVALIDATED";
+ return true;
+ }
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Contains how much new data we can write to the buffer without the risk of
+ // overwriting previously written data that the audio engine has not yet read
+ // from the buffer. I.e., it is the maximum buffer size we can request when
+ // calling IAudioRenderClient::GetBuffer().
+ UINT32 num_requested_frames =
+ endpoint_buffer_size_frames_ - num_unread_frames;
+ if (num_requested_frames == 0) {
+ RTC_DLOG(LS_WARNING)
+ << "Audio thread is signaled but no new audio samples are needed";
+ return true;
+ }
+
+ // Request all available space in the rendering endpoint buffer into which the
+ // client can later write an audio packet.
+ uint8_t* audio_data;
+ error = audio_render_client_->GetBuffer(num_requested_frames, &audio_data);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ // Update output delay estimate but only about once per second to save
+ // resources. The estimate is usually stable.
+ if (num_data_callbacks_ % 100 == 0) {
+ // TODO(henrika): note that FineAudioBuffer adds latency as well.
+ latency_ms_ = EstimateOutputLatencyMillis(device_frequency);
+ if (num_data_callbacks_ % 500 == 0) {
+ RTC_DLOG(LS_INFO) << "latency: " << latency_ms_;
+ }
+ }
+
+ // Get audio data from WebRTC and write it to the allocated buffer in
+ // `audio_data`. The playout latency is not updated for each callback.
+ fine_audio_buffer_->GetPlayoutData(
+ rtc::MakeArrayView(reinterpret_cast<int16_t*>(audio_data),
+ num_requested_frames * format_.Format.nChannels),
+ latency_ms_);
+
+ // Release the buffer space acquired in IAudioRenderClient::GetBuffer.
+ error = audio_render_client_->ReleaseBuffer(num_requested_frames, 0);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: "
+ << core_audio_utility::ErrorToString(error);
+ return false;
+ }
+
+ num_frames_written_ += num_requested_frames;
+ ++num_data_callbacks_;
+
+ return true;
+}
+
+// TODO(henrika): IAudioClock2::GetDevicePosition could perhaps be used here
+// instead. Tried it once, but it crashed for capture devices.
+int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) {
+ UINT64 position = 0;
+ UINT64 qpc_position = 0;
+ int delay_ms = 0;
+ // Get the device position through output parameter `position`. This is the
+ // stream position of the sample that is currently playing through the
+ // speakers.
+ _com_error error = audio_clock_->GetPosition(&position, &qpc_position);
+ if (error.Error() == S_OK) {
+ // Number of frames already played out through the speaker.
+ const uint64_t num_played_out_frames =
+ format_.Format.nSamplesPerSec * position / device_frequency;
+
+ // Number of frames that have been written to the buffer but not yet
+ // played out corresponding to the estimated latency measured in number
+ // of audio frames.
+ const uint64_t delay_frames = num_frames_written_ - num_played_out_frames;
+
+ // Convert latency in number of frames into milliseconds.
+ webrtc::TimeDelta delay =
+ webrtc::TimeDelta::Micros(delay_frames * rtc::kNumMicrosecsPerSec /
+ format_.Format.nSamplesPerSec);
+ delay_ms = delay.ms();
+ }
+ return delay_ms;
+}
+
+// Called from OnErrorCallback() when error type is kStreamDisconnected.
+// Note that this method is called on the audio thread and the internal restart
+// sequence is also executed on that same thread. The audio thread is therefore
+// not stopped during restart. Such a scheme also makes the restart process less
+// complex.
+// Note that, none of the called methods are thread checked since they can also
+// be called on the main thread. Thread checkers are instead added on one layer
+// above (in audio_device_module.cc) which ensures that the public API is thread
+// safe.
+// TODO(henrika): add more details.
+bool CoreAudioOutput::HandleStreamDisconnected() {
+ RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__;
+ RTC_DCHECK_RUN_ON(&thread_checker_audio_);
+ RTC_DCHECK(automatic_restart());
+
+ if (StopPlayout() != 0) {
+ return false;
+ }
+
+ if (!SwitchDeviceIfNeeded()) {
+ return false;
+ }
+
+ if (InitPlayout() != 0) {
+ return false;
+ }
+ if (StartPlayout() != 0) {
+ return false;
+ }
+
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>";
+ return true;
+}
+
+} // namespace webrtc_win
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h
new file mode 100644
index 0000000000..5a547498a3
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_device/win/audio_device_module_win.h"
+#include "modules/audio_device/win/core_audio_base_win.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+
+namespace webrtc_win {
+
+// Windows specific AudioOutput implementation using a CoreAudioBase class where
+// an output direction is set at construction. Supports render device handling
+// and streaming of decoded audio from a WebRTC client to the native audio
+// layer.
+class CoreAudioOutput final : public CoreAudioBase, public AudioOutput {
+ public:
+ CoreAudioOutput(bool automatic_restart);
+ ~CoreAudioOutput() override;
+
+ // AudioOutput implementation.
+ int Init() override;
+ int Terminate() override;
+ int NumDevices() const override;
+ int SetDevice(int index) override;
+ int SetDevice(AudioDeviceModule::WindowsDeviceType device) override;
+ int DeviceName(int index, std::string* name, std::string* guid) override;
+ void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override;
+ bool PlayoutIsInitialized() const override;
+ int InitPlayout() override;
+ int StartPlayout() override;
+ int StopPlayout() override;
+ bool Playing() override;
+ int VolumeIsAvailable(bool* available) override;
+ int RestartPlayout() override;
+ bool Restarting() const override;
+ int SetSampleRate(uint32_t sample_rate) override;
+
+ CoreAudioOutput(const CoreAudioOutput&) = delete;
+ CoreAudioOutput& operator=(const CoreAudioOutput&) = delete;
+
+ private:
+ void ReleaseCOMObjects();
+ bool OnDataCallback(uint64_t device_frequency);
+ bool OnErrorCallback(ErrorType error);
+ int EstimateOutputLatencyMillis(uint64_t device_frequency);
+ bool HandleStreamDisconnected();
+
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+ Microsoft::WRL::ComPtr<IAudioRenderClient> audio_render_client_;
+ uint64_t num_frames_written_ = 0;
+};
+
+} // namespace webrtc_win
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc
new file mode 100644
index 0000000000..e4e2864db5
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc
@@ -0,0 +1,1529 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_utility_win.h"
+
+#include <functiondiscoverykeys_devpkey.h>
+#include <stdio.h>
+#include <tchar.h>
+
+#include <iomanip>
+#include <string>
+#include <utility>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread_types.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/win/windows_version.h"
+
+using Microsoft::WRL::ComPtr;
+using webrtc::AudioDeviceName;
+using webrtc::AudioParameters;
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+using core_audio_utility::ErrorToString;
+
+// Converts from channel mask to list of included channels.
+// Each audio data format contains channels for one or more of the positions
+// listed below. The number of channels simply equals the number of nonzero
+// flag bits in the `channel_mask`. The relative positions of the channels
+// within each block of audio data always follow the same relative ordering
+// as the flag bits in the table below. For example, if `channel_mask` contains
+// the value 0x00000033, the format defines four audio channels that are
+// assigned for playback to the front-left, front-right, back-left,
+// and back-right speakers, respectively. The channel data should be interleaved
+// in that order within each block.
+std::string ChannelMaskToString(DWORD channel_mask) {
+ std::string ss;
+ int n = 0;
+ if (channel_mask & SPEAKER_FRONT_LEFT) {
+ ss += "FRONT_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_RIGHT) {
+ ss += "FRONT_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_CENTER) {
+ ss += "FRONT_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_LOW_FREQUENCY) {
+ ss += "LOW_FREQUENCY | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_BACK_LEFT) {
+ ss += "BACK_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_BACK_RIGHT) {
+ ss += "BACK_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_LEFT_OF_CENTER) {
+ ss += "FRONT_LEFT_OF_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_FRONT_RIGHT_OF_CENTER) {
+ ss += "RIGHT_OF_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_BACK_CENTER) {
+ ss += "BACK_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_SIDE_LEFT) {
+ ss += "SIDE_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_SIDE_RIGHT) {
+ ss += "SIDE_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_CENTER) {
+ ss += "TOP_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_FRONT_LEFT) {
+ ss += "TOP_FRONT_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_FRONT_CENTER) {
+ ss += "TOP_FRONT_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_FRONT_RIGHT) {
+ ss += "TOP_FRONT_RIGHT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_BACK_LEFT) {
+ ss += "TOP_BACK_LEFT | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_BACK_CENTER) {
+ ss += "TOP_BACK_CENTER | ";
+ ++n;
+ }
+ if (channel_mask & SPEAKER_TOP_BACK_RIGHT) {
+ ss += "TOP_BACK_RIGHT | ";
+ ++n;
+ }
+
+ if (!ss.empty()) {
+ // Delete last appended " | " substring.
+ ss.erase(ss.end() - 3, ss.end());
+ }
+ ss += " (";
+ ss += std::to_string(n);
+ ss += ")";
+ return ss;
+}
+
+#if !defined(KSAUDIO_SPEAKER_1POINT1)
+// These values are only defined in ksmedia.h after a certain version, to build
+// cleanly for older windows versions this just defines the ones that are
+// missing.
+#define KSAUDIO_SPEAKER_1POINT1 (SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY)
+#define KSAUDIO_SPEAKER_2POINT1 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY)
+#define KSAUDIO_SPEAKER_3POINT0 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER)
+#define KSAUDIO_SPEAKER_3POINT1 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
+ SPEAKER_LOW_FREQUENCY)
+#define KSAUDIO_SPEAKER_5POINT0 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
+ SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT)
+#define KSAUDIO_SPEAKER_7POINT0 \
+ (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
+ SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | \
+ SPEAKER_SIDE_RIGHT)
+#endif
+
+#if !defined(AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY)
+#define AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000
+#define AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
+#endif
+
+// Converts the most common format tags defined in mmreg.h into string
+// equivalents. Mainly intended for log messages.
+const char* WaveFormatTagToString(WORD format_tag) {
+ switch (format_tag) {
+ case WAVE_FORMAT_UNKNOWN:
+ return "WAVE_FORMAT_UNKNOWN";
+ case WAVE_FORMAT_PCM:
+ return "WAVE_FORMAT_PCM";
+ case WAVE_FORMAT_IEEE_FLOAT:
+ return "WAVE_FORMAT_IEEE_FLOAT";
+ case WAVE_FORMAT_EXTENSIBLE:
+ return "WAVE_FORMAT_EXTENSIBLE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* RoleToString(const ERole role) {
+ switch (role) {
+ case eConsole:
+ return "Console";
+ case eMultimedia:
+ return "Multimedia";
+ case eCommunications:
+ return "Communications";
+ default:
+ return "Unsupported";
+ }
+}
+
+const char* FlowToString(const EDataFlow flow) {
+ switch (flow) {
+ case eRender:
+ return "Render";
+ case eCapture:
+ return "Capture";
+ case eAll:
+ return "Render or Capture";
+ default:
+ return "Unsupported";
+ }
+}
+
+bool LoadAudiosesDll() {
+ static const wchar_t* const kAudiosesDLL =
+ L"%WINDIR%\\system32\\audioses.dll";
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
+ RTC_DLOG(LS_INFO) << rtc::ToUtf8(path);
+ return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
+ nullptr);
+}
+
+bool LoadAvrtDll() {
+ static const wchar_t* const kAvrtDLL = L"%WINDIR%\\system32\\Avrt.dll";
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStringsW(kAvrtDLL, path, arraysize(path));
+ RTC_DLOG(LS_INFO) << rtc::ToUtf8(path);
+ return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
+ nullptr);
+}
+
+ComPtr<IMMDeviceEnumerator> CreateDeviceEnumeratorInternal(
+ bool allow_reinitialize) {
+ ComPtr<IMMDeviceEnumerator> device_enumerator;
+ _com_error error =
+ ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL,
+ IID_PPV_ARGS(&device_enumerator));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "CoCreateInstance failed: " << ErrorToString(error);
+ }
+
+ if (error.Error() == CO_E_NOTINITIALIZED && allow_reinitialize) {
+ RTC_LOG(LS_ERROR) << "CoCreateInstance failed with CO_E_NOTINITIALIZED";
+ // We have seen crashes which indicates that this method can in fact
+ // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
+ // modules. Calling CoInitializeEx() is an attempt to resolve the reported
+ // issues. See http://crbug.com/378465 for details.
+ error = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if (FAILED(error.Error())) {
+ error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr,
+ CLSCTX_ALL, IID_PPV_ARGS(&device_enumerator));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "CoCreateInstance failed: "
+ << ErrorToString(error);
+ }
+ }
+ }
+ return device_enumerator;
+}
+
+bool IsSupportedInternal() {
+ // The Core Audio APIs are implemented in the user-mode system components
+ // Audioses.dll and Mmdevapi.dll. Dependency Walker shows that it is
+ // enough to verify possibility to load the Audioses DLL since it depends
+ // on Mmdevapi.dll. See http://crbug.com/166397 why this extra step is
+ // required to guarantee Core Audio support.
+ if (!LoadAudiosesDll())
+ return false;
+
+ // Being able to load the Audioses.dll does not seem to be sufficient for
+ // all devices to guarantee Core Audio support. To be 100%, we also verify
+ // that it is possible to a create the IMMDeviceEnumerator interface. If
+ // this works as well we should be home free.
+ ComPtr<IMMDeviceEnumerator> device_enumerator =
+ CreateDeviceEnumeratorInternal(false);
+ if (!device_enumerator) {
+ RTC_LOG(LS_ERROR)
+ << "Failed to create Core Audio device enumerator on thread with ID "
+ << rtc::CurrentThreadId();
+ return false;
+ }
+
+ return true;
+}
+
+bool IsDeviceActive(IMMDevice* device) {
+ DWORD state = DEVICE_STATE_DISABLED;
+ return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE);
+}
+
+// Retrieve an audio device specified by `device_id` or a default device
+// specified by data-flow direction and role if `device_id` is default.
+ComPtr<IMMDevice> CreateDeviceInternal(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateDeviceInternal: "
+ "id="
+ << device_id << ", flow=" << FlowToString(data_flow)
+ << ", role=" << RoleToString(role);
+ ComPtr<IMMDevice> audio_endpoint_device;
+
+ // Create the IMMDeviceEnumerator interface.
+ ComPtr<IMMDeviceEnumerator> device_enum(CreateDeviceEnumeratorInternal(true));
+ if (!device_enum.Get())
+ return audio_endpoint_device;
+
+ _com_error error(S_FALSE);
+ if (device_id == AudioDeviceName::kDefaultDeviceId) {
+ // Get the default audio endpoint for the specified data-flow direction and
+ // role. Note that, if only a single rendering or capture device is
+ // available, the system always assigns all three rendering or capture roles
+ // to that device. If the method fails to find a rendering or capture device
+ // for the specified role, this means that no rendering or capture device is
+ // available at all. If no device is available, the method sets the output
+ // pointer to NULL and returns ERROR_NOT_FOUND.
+ error = device_enum->GetDefaultAudioEndpoint(
+ data_flow, role, audio_endpoint_device.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IMMDeviceEnumerator::GetDefaultAudioEndpoint failed: "
+ << ErrorToString(error);
+ }
+ } else {
+ // Ask for an audio endpoint device that is identified by an endpoint ID
+ // string.
+ error = device_enum->GetDevice(rtc::ToUtf16(device_id).c_str(),
+ audio_endpoint_device.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDeviceEnumerator::GetDevice failed: "
+ << ErrorToString(error);
+ }
+ }
+
+ // Verify that the audio endpoint device is active, i.e., that the audio
+ // adapter that connects to the endpoint device is present and enabled.
+ if (SUCCEEDED(error.Error()) && audio_endpoint_device.Get() &&
+ !IsDeviceActive(audio_endpoint_device.Get())) {
+ RTC_LOG(LS_WARNING) << "Selected endpoint device is not active";
+ audio_endpoint_device.Reset();
+ }
+
+ return audio_endpoint_device;
+}
+
+std::string GetDeviceIdInternal(IMMDevice* device) {
+ // Retrieve unique name of endpoint device.
+ // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
+ LPWSTR device_id;
+ if (SUCCEEDED(device->GetId(&device_id))) {
+ std::string device_id_utf8 = rtc::ToUtf8(device_id, wcslen(device_id));
+ CoTaskMemFree(device_id);
+ return device_id_utf8;
+ } else {
+ return std::string();
+ }
+}
+
+std::string GetDeviceFriendlyNameInternal(IMMDevice* device) {
+ // Retrieve user-friendly name of endpoint device.
+ // Example: "Microphone (Realtek High Definition Audio)".
+ ComPtr<IPropertyStore> properties;
+ HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.GetAddressOf());
+ if (FAILED(hr))
+ return std::string();
+
+ ScopedPropVariant friendly_name_pv;
+ hr = properties->GetValue(PKEY_Device_FriendlyName,
+ friendly_name_pv.Receive());
+ if (FAILED(hr))
+ return std::string();
+
+ if (friendly_name_pv.get().vt == VT_LPWSTR &&
+ friendly_name_pv.get().pwszVal) {
+ return rtc::ToUtf8(friendly_name_pv.get().pwszVal,
+ wcslen(friendly_name_pv.get().pwszVal));
+ } else {
+ return std::string();
+ }
+}
+
+ComPtr<IAudioSessionManager2> CreateSessionManager2Internal(
+ IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioSessionManager2>();
+
+ ComPtr<IAudioSessionManager2> audio_session_manager;
+ _com_error error =
+ audio_device->Activate(__uuidof(IAudioSessionManager2), CLSCTX_ALL,
+ nullptr, &audio_session_manager);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioSessionManager2) failed: "
+ << ErrorToString(error);
+ }
+ return audio_session_manager;
+}
+
+ComPtr<IAudioSessionEnumerator> CreateSessionEnumeratorInternal(
+ IMMDevice* audio_device) {
+ if (!audio_device) {
+ return ComPtr<IAudioSessionEnumerator>();
+ }
+
+ ComPtr<IAudioSessionEnumerator> audio_session_enumerator;
+ ComPtr<IAudioSessionManager2> audio_session_manager =
+ CreateSessionManager2Internal(audio_device);
+ if (!audio_session_manager.Get()) {
+ return audio_session_enumerator;
+ }
+ _com_error error =
+ audio_session_manager->GetSessionEnumerator(&audio_session_enumerator);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioSessionEnumerator::IAudioSessionEnumerator failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioSessionEnumerator>();
+ }
+ return audio_session_enumerator;
+}
+
+// Creates and activates an IAudioClient COM object given the selected
+// endpoint device.
+ComPtr<IAudioClient> CreateClientInternal(IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioClient>();
+
+ ComPtr<IAudioClient> audio_client;
+ _com_error error = audio_device->Activate(__uuidof(IAudioClient), CLSCTX_ALL,
+ nullptr, &audio_client);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient) failed: "
+ << ErrorToString(error);
+ }
+ return audio_client;
+}
+
+ComPtr<IAudioClient2> CreateClient2Internal(IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioClient2>();
+
+ ComPtr<IAudioClient2> audio_client;
+ _com_error error = audio_device->Activate(__uuidof(IAudioClient2), CLSCTX_ALL,
+ nullptr, &audio_client);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient2) failed: "
+ << ErrorToString(error);
+ }
+ return audio_client;
+}
+
+ComPtr<IAudioClient3> CreateClient3Internal(IMMDevice* audio_device) {
+ if (!audio_device)
+ return ComPtr<IAudioClient3>();
+
+ ComPtr<IAudioClient3> audio_client;
+ _com_error error = audio_device->Activate(__uuidof(IAudioClient3), CLSCTX_ALL,
+ nullptr, &audio_client);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient3) failed: "
+ << ErrorToString(error);
+ }
+ return audio_client;
+}
+
+ComPtr<IMMDeviceCollection> CreateCollectionInternal(EDataFlow data_flow) {
+ ComPtr<IMMDeviceEnumerator> device_enumerator(
+ CreateDeviceEnumeratorInternal(true));
+ if (!device_enumerator) {
+ return ComPtr<IMMDeviceCollection>();
+ }
+
+ // Generate a collection of active (present and not disabled) audio endpoint
+ // devices for the specified data-flow direction.
+ // This method will succeed even if all devices are disabled.
+ ComPtr<IMMDeviceCollection> collection;
+ _com_error error = device_enumerator->EnumAudioEndpoints(
+ data_flow, DEVICE_STATE_ACTIVE, collection.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDeviceCollection::EnumAudioEndpoints failed: "
+ << ErrorToString(error);
+ }
+ return collection;
+}
+
+bool GetDeviceNamesInternal(EDataFlow data_flow,
+ webrtc::AudioDeviceNames* device_names) {
+ RTC_DLOG(LS_INFO) << "GetDeviceNamesInternal: flow="
+ << FlowToString(data_flow);
+
+ // Generate a collection of active audio endpoint devices for the specified
+ // direction.
+ ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
+ if (!collection.Get()) {
+ RTC_LOG(LS_ERROR) << "Failed to create a collection of active devices";
+ return false;
+ }
+
+ // Retrieve the number of active (present, not disabled and plugged in) audio
+ // devices for the specified direction.
+ UINT number_of_active_devices = 0;
+ _com_error error = collection->GetCount(&number_of_active_devices);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDeviceCollection::GetCount failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ if (number_of_active_devices == 0) {
+ RTC_DLOG(LS_WARNING) << "Found no active devices";
+ return false;
+ }
+
+ // Loop over all active devices and add friendly name and unique id to the
+ // `device_names` queue. For now, devices are added at indexes 0, 1, ..., N-1
+ // but they will be moved to 2,3,..., N+1 at the next stage when default and
+ // default communication devices are added at index 0 and 1.
+ ComPtr<IMMDevice> audio_device;
+ for (UINT i = 0; i < number_of_active_devices; ++i) {
+ // Retrieve a pointer to the specified item in the device collection.
+ error = collection->Item(i, audio_device.GetAddressOf());
+ if (FAILED(error.Error())) {
+ // Skip this item and try to get the next item instead; will result in an
+ // incomplete list of devices.
+ RTC_LOG(LS_WARNING) << "IMMDeviceCollection::Item failed: "
+ << ErrorToString(error);
+ continue;
+ }
+ if (!audio_device.Get()) {
+ RTC_LOG(LS_WARNING) << "Invalid audio device";
+ continue;
+ }
+
+ // Retrieve the complete device name for the given audio device endpoint.
+ AudioDeviceName device_name(
+ GetDeviceFriendlyNameInternal(audio_device.Get()),
+ GetDeviceIdInternal(audio_device.Get()));
+ // Add combination of user-friendly and unique name to the output list.
+ device_names->push_back(device_name);
+ }
+
+ // Log a warning of the list of device is not complete but let's keep on
+ // trying to add default and default communications device at the front.
+ if (device_names->size() != number_of_active_devices) {
+ RTC_DLOG(LS_WARNING)
+ << "List of device names does not contain all active devices";
+ }
+
+ // Avoid adding default and default communication devices if no active device
+ // could be added to the queue. We might as well break here and return false
+ // since no active devices were identified.
+ if (device_names->empty()) {
+ RTC_DLOG(LS_ERROR) << "List of active devices is empty";
+ return false;
+ }
+
+ // Prepend the queue with two more elements: one for the default device and
+ // one for the default communication device (can correspond to the same unique
+ // id if only one active device exists). The first element (index 0) is the
+ // default device and the second element (index 1) is the default
+ // communication device.
+ ERole role[] = {eCommunications, eConsole};
+ ComPtr<IMMDevice> default_device;
+ AudioDeviceName default_device_name;
+ for (size_t i = 0; i < arraysize(role); ++i) {
+ default_device = CreateDeviceInternal(AudioDeviceName::kDefaultDeviceId,
+ data_flow, role[i]);
+ if (!default_device.Get()) {
+ // Add empty strings to device name if the device could not be created.
+ RTC_DLOG(LS_WARNING) << "Failed to add device with role: "
+ << RoleToString(role[i]);
+ default_device_name.device_name = std::string();
+ default_device_name.unique_id = std::string();
+ } else {
+ // Populate the device name with friendly name and unique id.
+ std::string device_name;
+ device_name += (role[i] == eConsole ? "Default - " : "Communication - ");
+ device_name += GetDeviceFriendlyNameInternal(default_device.Get());
+ std::string unique_id = GetDeviceIdInternal(default_device.Get());
+ default_device_name.device_name = std::move(device_name);
+ default_device_name.unique_id = std::move(unique_id);
+ }
+
+ // Add combination of user-friendly and unique name to the output queue.
+ // The last element (<=> eConsole) will be at the front of the queue, hence
+ // at index 0. Empty strings will be added for cases where no default
+ // devices were found.
+ device_names->push_front(default_device_name);
+ }
+
+ // Example of log output when only one device is active. Note that the queue
+ // contains two extra elements at index 0 (Default) and 1 (Communication) to
+ // allow selection of device by role instead of id. All elements corresponds
+ // the same unique id.
+ // [0] friendly name: Default - Headset Microphone (2- Arctis 7 Chat)
+ // [0] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
+ // [1] friendly name: Communication - Headset Microphone (2- Arctis 7 Chat)
+ // [1] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
+ // [2] friendly name: Headset Microphone (2- Arctis 7 Chat)
+ // [2] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
+ for (size_t i = 0; i < device_names->size(); ++i) {
+ RTC_DLOG(LS_INFO) << "[" << i
+ << "] friendly name: " << (*device_names)[i].device_name;
+ RTC_DLOG(LS_INFO) << "[" << i
+ << "] unique id : " << (*device_names)[i].unique_id;
+ }
+
+ return true;
+}
+
+HRESULT GetPreferredAudioParametersInternal(IAudioClient* client,
+ AudioParameters* params,
+ int fixed_sample_rate) {
+ WAVEFORMATPCMEX mix_format;
+ HRESULT hr = core_audio_utility::GetSharedModeMixFormat(client, &mix_format);
+ if (FAILED(hr))
+ return hr;
+
+ REFERENCE_TIME default_period = 0;
+ hr = core_audio_utility::GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED,
+ &default_period);
+ if (FAILED(hr))
+ return hr;
+
+ int sample_rate = mix_format.Format.nSamplesPerSec;
+ // Override default sample rate if `fixed_sample_rate` is set and different
+ // from the default rate.
+ if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) {
+ RTC_DLOG(LS_INFO) << "Using fixed sample rate instead of the preferred: "
+ << sample_rate << " is replaced by " << fixed_sample_rate;
+ sample_rate = fixed_sample_rate;
+ }
+ // TODO(henrika): utilize full mix_format.Format.wBitsPerSample.
+ // const size_t bits_per_sample = AudioParameters::kBitsPerSample;
+ // TODO(henrika): improve channel layout support.
+ const size_t channels = mix_format.Format.nChannels;
+
+ // Use the native device period to derive the smallest possible buffer size
+ // in shared mode.
+ double device_period_in_seconds =
+ static_cast<double>(
+ core_audio_utility::ReferenceTimeToTimeDelta(default_period).ms()) /
+ 1000.0L;
+ const size_t frames_per_buffer =
+ static_cast<size_t>(sample_rate * device_period_in_seconds + 0.5);
+
+ AudioParameters audio_params(sample_rate, channels, frames_per_buffer);
+ *params = audio_params;
+ RTC_DLOG(LS_INFO) << audio_params.ToString();
+
+ return hr;
+}
+
+} // namespace
+
+namespace core_audio_utility {
+
+// core_audio_utility::WaveFormatWrapper implementation.
+WAVEFORMATEXTENSIBLE* WaveFormatWrapper::GetExtensible() const {
+ RTC_CHECK(IsExtensible());
+ return reinterpret_cast<WAVEFORMATEXTENSIBLE*>(ptr_);
+}
+
+bool WaveFormatWrapper::IsExtensible() const {
+ return ptr_->wFormatTag == WAVE_FORMAT_EXTENSIBLE && ptr_->cbSize >= 22;
+}
+
+bool WaveFormatWrapper::IsPcm() const {
+ return IsExtensible() ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_PCM
+ : ptr_->wFormatTag == WAVE_FORMAT_PCM;
+}
+
+bool WaveFormatWrapper::IsFloat() const {
+ return IsExtensible()
+ ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
+ : ptr_->wFormatTag == WAVE_FORMAT_IEEE_FLOAT;
+}
+
+size_t WaveFormatWrapper::size() const {
+ return sizeof(*ptr_) + ptr_->cbSize;
+}
+
+bool IsSupported() {
+ RTC_DLOG(LS_INFO) << "IsSupported";
+ static bool g_is_supported = IsSupportedInternal();
+ return g_is_supported;
+}
+
+bool IsMMCSSSupported() {
+ RTC_DLOG(LS_INFO) << "IsMMCSSSupported";
+ return LoadAvrtDll();
+}
+
+int NumberOfActiveDevices(EDataFlow data_flow) {
+ // Generate a collection of active audio endpoint devices for the specified
+ // data-flow direction.
+ ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
+ if (!collection.Get()) {
+ return 0;
+ }
+
+ // Retrieve the number of active audio devices for the specified direction.
+ UINT number_of_active_devices = 0;
+ collection->GetCount(&number_of_active_devices);
+ std::string str;
+ if (data_flow == eCapture) {
+ str = "Number of capture devices: ";
+ } else if (data_flow == eRender) {
+ str = "Number of render devices: ";
+ } else if (data_flow == eAll) {
+ str = "Total number of devices: ";
+ }
+ RTC_DLOG(LS_INFO) << str << number_of_active_devices;
+ return static_cast<int>(number_of_active_devices);
+}
+
+uint32_t GetAudioClientVersion() {
+ uint32_t version = 1;
+ if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN10) {
+ version = 3;
+ } else if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN8) {
+ version = 2;
+ }
+ return version;
+}
+
+ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator() {
+ RTC_DLOG(LS_INFO) << "CreateDeviceEnumerator";
+ return CreateDeviceEnumeratorInternal(true);
+}
+
+std::string GetDefaultInputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetDefaultInputDeviceID";
+ ComPtr<IMMDevice> device(
+ CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eConsole));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+std::string GetDefaultOutputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetDefaultOutputDeviceID";
+ ComPtr<IMMDevice> device(
+ CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eConsole));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+std::string GetCommunicationsInputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetCommunicationsInputDeviceID";
+ ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
+ eCapture, eCommunications));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+std::string GetCommunicationsOutputDeviceID() {
+ RTC_DLOG(LS_INFO) << "GetCommunicationsOutputDeviceID";
+ ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
+ eRender, eCommunications));
+ return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
+}
+
+ComPtr<IMMDevice> CreateDevice(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateDevice";
+ return CreateDeviceInternal(device_id, data_flow, role);
+}
+
+AudioDeviceName GetDeviceName(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "GetDeviceName";
+ RTC_DCHECK(device);
+ AudioDeviceName device_name(GetDeviceFriendlyNameInternal(device),
+ GetDeviceIdInternal(device));
+ RTC_DLOG(LS_INFO) << "friendly name: " << device_name.device_name;
+ RTC_DLOG(LS_INFO) << "unique id : " << device_name.unique_id;
+ return device_name;
+}
+
+std::string GetFriendlyName(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "GetFriendlyName";
+ ComPtr<IMMDevice> audio_device = CreateDevice(device_id, data_flow, role);
+ if (!audio_device.Get())
+ return std::string();
+
+ AudioDeviceName device_name = GetDeviceName(audio_device.Get());
+ return device_name.device_name;
+}
+
+EDataFlow GetDataFlow(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "GetDataFlow";
+ RTC_DCHECK(device);
+ ComPtr<IMMEndpoint> endpoint;
+ _com_error error = device->QueryInterface(endpoint.GetAddressOf());
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMDevice::QueryInterface failed: "
+ << ErrorToString(error);
+ return eAll;
+ }
+
+ EDataFlow data_flow;
+ error = endpoint->GetDataFlow(&data_flow);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IMMEndpoint::GetDataFlow failed: "
+ << ErrorToString(error);
+ return eAll;
+ }
+ return data_flow;
+}
+
+bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names) {
+ RTC_DLOG(LS_INFO) << "GetInputDeviceNames";
+ RTC_DCHECK(device_names);
+ RTC_DCHECK(device_names->empty());
+ return GetDeviceNamesInternal(eCapture, device_names);
+}
+
+bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) {
+ RTC_DLOG(LS_INFO) << "GetOutputDeviceNames";
+ RTC_DCHECK(device_names);
+ RTC_DCHECK(device_names->empty());
+ return GetDeviceNamesInternal(eRender, device_names);
+}
+
+ComPtr<IAudioSessionManager2> CreateSessionManager2(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "CreateSessionManager2";
+ return CreateSessionManager2Internal(device);
+}
+
+Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
+ IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "CreateSessionEnumerator";
+ return CreateSessionEnumeratorInternal(device);
+}
+
+int NumberOfActiveSessions(IMMDevice* device) {
+ RTC_DLOG(LS_INFO) << "NumberOfActiveSessions";
+ ComPtr<IAudioSessionEnumerator> session_enumerator =
+ CreateSessionEnumerator(device);
+
+ // Iterate over all audio sessions for the given device.
+ int session_count = 0;
+ _com_error error = session_enumerator->GetCount(&session_count);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetCount failed: "
+ << ErrorToString(error);
+ return 0;
+ }
+ RTC_DLOG(LS_INFO) << "Total number of audio sessions: " << session_count;
+
+ int num_active = 0;
+ for (int session = 0; session < session_count; session++) {
+ // Acquire the session control interface.
+ ComPtr<IAudioSessionControl> session_control;
+ error = session_enumerator->GetSession(session, &session_control);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetSession failed: "
+ << ErrorToString(error);
+ return 0;
+ }
+
+ // Log the display name of the audio session for debugging purposes.
+ LPWSTR display_name;
+ if (SUCCEEDED(session_control->GetDisplayName(&display_name))) {
+ RTC_DLOG(LS_INFO) << "display name: "
+ << rtc::ToUtf8(display_name, wcslen(display_name));
+ CoTaskMemFree(display_name);
+ }
+
+ // Get the current state and check if the state is active or not.
+ AudioSessionState state;
+ error = session_control->GetState(&state);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioSessionControl::GetState failed: "
+ << ErrorToString(error);
+ return 0;
+ }
+ if (state == AudioSessionStateActive) {
+ ++num_active;
+ }
+ }
+
+ RTC_DLOG(LS_INFO) << "Number of active audio sessions: " << num_active;
+ return num_active;
+}
+
+ComPtr<IAudioClient> CreateClient(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateClient";
+ ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
+ return CreateClientInternal(device.Get());
+}
+
+ComPtr<IAudioClient2> CreateClient2(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateClient2";
+ ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
+ return CreateClient2Internal(device.Get());
+}
+
+ComPtr<IAudioClient3> CreateClient3(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role) {
+ RTC_DLOG(LS_INFO) << "CreateClient3";
+ ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
+ return CreateClient3Internal(device.Get());
+}
+
+HRESULT SetClientProperties(IAudioClient2* client) {
+ RTC_DLOG(LS_INFO) << "SetClientProperties";
+ RTC_DCHECK(client);
+ if (GetAudioClientVersion() < 2) {
+ RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher";
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+ }
+ AudioClientProperties props = {0};
+ props.cbSize = sizeof(AudioClientProperties);
+ // Real-time VoIP communication.
+ // TODO(henrika): other categories?
+ props.eCategory = AudioCategory_Communications;
+ // Hardware-offloaded audio processing allows the main audio processing tasks
+ // to be performed outside the computer's main CPU. Check support and log the
+ // result but hard-code `bIsOffload` to FALSE for now.
+ // TODO(henrika): evaluate hardware-offloading. Might complicate usage of
+ // IAudioClient::GetMixFormat().
+ BOOL supports_offload = FALSE;
+ _com_error error =
+ client->IsOffloadCapable(props.eCategory, &supports_offload);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient2::IsOffloadCapable failed: "
+ << ErrorToString(error);
+ }
+ RTC_DLOG(LS_INFO) << "supports_offload: " << supports_offload;
+ props.bIsOffload = false;
+#if (NTDDI_VERSION < NTDDI_WINBLUE)
+ RTC_DLOG(LS_INFO) << "options: Not supported in this build";
+#else
+ // TODO(henrika): pros and cons compared with AUDCLNT_STREAMOPTIONS_NONE?
+ props.Options |= AUDCLNT_STREAMOPTIONS_NONE;
+ // Requires System.Devices.AudioDevice.RawProcessingSupported.
+ // The application can choose to *always ignore* the OEM AEC/AGC by setting
+ // the AUDCLNT_STREAMOPTIONS_RAW flag in the call to SetClientProperties.
+ // This flag will preserve the user experience aspect of Communications
+ // streams, but will not insert any OEM provided communications specific
+ // processing in the audio signal path.
+ // props.Options |= AUDCLNT_STREAMOPTIONS_RAW;
+
+ // If it is important to avoid resampling in the audio engine, set this flag.
+ // AUDCLNT_STREAMOPTIONS_MATCH_FORMAT (or anything in IAudioClient3) is not
+ // an appropriate interface to use for communications scenarios.
+ // This interface is mainly meant for pro audio scenarios.
+ // props.Options |= AUDCLNT_STREAMOPTIONS_MATCH_FORMAT;
+ RTC_DLOG(LS_INFO) << "options: 0x" << rtc::ToHex(props.Options);
+#endif
+ error = client->SetClientProperties(&props);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient2::SetClientProperties failed: "
+ << ErrorToString(error);
+ }
+ return error.Error();
+}
+
+HRESULT GetBufferSizeLimits(IAudioClient2* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ REFERENCE_TIME* min_buffer_duration,
+ REFERENCE_TIME* max_buffer_duration) {
+ RTC_DLOG(LS_INFO) << "GetBufferSizeLimits";
+ RTC_DCHECK(client);
+ if (GetAudioClientVersion() < 2) {
+ RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher";
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+ }
+ REFERENCE_TIME min_duration = 0;
+ REFERENCE_TIME max_duration = 0;
+ _com_error error =
+ client->GetBufferSizeLimits(reinterpret_cast<const WAVEFORMATEX*>(format),
+ TRUE, &min_duration, &max_duration);
+ if (error.Error() == AUDCLNT_E_OFFLOAD_MODE_ONLY) {
+ // This API seems to be supported in off-load mode only but it is not
+ // documented as a valid error code. Making a special note about it here.
+ RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: "
+ "AUDCLNT_E_OFFLOAD_MODE_ONLY";
+ } else if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: "
+ << ErrorToString(error);
+ } else {
+ *min_buffer_duration = min_duration;
+ *max_buffer_duration = max_duration;
+ RTC_DLOG(LS_INFO) << "min_buffer_duration: " << min_buffer_duration;
+ RTC_DLOG(LS_INFO) << "max_buffer_duration: " << max_buffer_duration;
+ }
+ return error.Error();
+}
+
+HRESULT GetSharedModeMixFormat(IAudioClient* client,
+ WAVEFORMATEXTENSIBLE* format) {
+ RTC_DLOG(LS_INFO) << "GetSharedModeMixFormat";
+ RTC_DCHECK(client);
+
+ // The GetMixFormat method retrieves the stream format that the audio engine
+ // uses for its internal processing of shared-mode streams. The method
+ // allocates the storage for the structure and this memory will be released
+ // when `mix_format` goes out of scope. The GetMixFormat method retrieves a
+ // format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure
+ // instead of a standalone WAVEFORMATEX structure. The method outputs a
+ // pointer to the WAVEFORMATEX structure that is embedded at the start of
+ // this WAVEFORMATEXTENSIBLE structure.
+ // Note that, crbug/803056 indicates that some devices can return a format
+ // where only the WAVEFORMATEX parts is initialized and we must be able to
+ // account for that.
+ ScopedCoMem<WAVEFORMATEXTENSIBLE> mix_format;
+ _com_error error =
+ client->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&mix_format));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetMixFormat failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ // Use a wave format wrapper to make things simpler.
+ WaveFormatWrapper wrapped_format(mix_format.Get());
+
+ // Verify that the reported format can be mixed by the audio engine in
+ // shared mode.
+ if (!wrapped_format.IsPcm() && !wrapped_format.IsFloat()) {
+ RTC_DLOG(LS_ERROR)
+ << "Only pure PCM or float audio streams can be mixed in shared mode";
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+ }
+
+ // Log a warning for the rare case where `mix_format` only contains a
+ // stand-alone WAVEFORMATEX structure but don't return.
+ if (!wrapped_format.IsExtensible()) {
+ RTC_DLOG(LS_WARNING)
+ << "The returned format contains no extended information. "
+ "The size is "
+ << wrapped_format.size() << " bytes.";
+ }
+
+ // Copy the correct number of bytes into |*format| taking into account if
+ // the returned structure is correctly extended or not.
+ RTC_CHECK_LE(wrapped_format.size(), sizeof(WAVEFORMATEXTENSIBLE));
+ memcpy(format, wrapped_format.get(), wrapped_format.size());
+ RTC_DLOG(LS_INFO) << WaveFormatToString(format);
+
+ return error.Error();
+}
+
+bool IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATEXTENSIBLE* format) {
+ RTC_DLOG(LS_INFO) << "IsFormatSupported";
+ RTC_DCHECK(client);
+ ScopedCoMem<WAVEFORMATEX> closest_match;
+ // This method provides a way for a client to determine, before calling
+ // IAudioClient::Initialize, whether the audio engine supports a particular
+ // stream format or not. In shared mode, the audio engine always supports
+ // the mix format (see GetSharedModeMixFormat).
+ // TODO(henrika): verify support for exclusive mode as well?
+ _com_error error = client->IsFormatSupported(
+ share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
+ &closest_match);
+ RTC_LOG(LS_INFO) << WaveFormatToString(
+ const_cast<WAVEFORMATEXTENSIBLE*>(format));
+ if ((error.Error() == S_OK) && (closest_match == nullptr)) {
+ RTC_DLOG(LS_INFO)
+ << "The audio endpoint device supports the specified stream format";
+ } else if ((error.Error() == S_FALSE) && (closest_match != nullptr)) {
+ // Call succeeded with a closest match to the specified format. This log can
+ // only be triggered for shared mode.
+ RTC_LOG(LS_WARNING)
+ << "Exact format is not supported, but a closest match exists";
+ RTC_LOG(LS_INFO) << WaveFormatToString(closest_match.Get());
+ } else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) &&
+ (closest_match == nullptr)) {
+ // The audio engine does not support the caller-specified format or any
+ // similar format.
+ RTC_DLOG(LS_INFO) << "The audio endpoint device does not support the "
+ "specified stream format";
+ } else {
+ RTC_LOG(LS_ERROR) << "IAudioClient::IsFormatSupported failed: "
+ << ErrorToString(error);
+ }
+
+ return (error.Error() == S_OK);
+}
+
+HRESULT GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period) {
+ RTC_DLOG(LS_INFO) << "GetDevicePeriod";
+ RTC_DCHECK(client);
+ // The `default_period` parameter specifies the default scheduling period
+ // for a shared-mode stream. The `minimum_period` parameter specifies the
+ // minimum scheduling period for an exclusive-mode stream.
+ // The time is expressed in 100-nanosecond units.
+ REFERENCE_TIME default_period = 0;
+ REFERENCE_TIME minimum_period = 0;
+ _com_error error = client->GetDevicePeriod(&default_period, &minimum_period);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetDevicePeriod failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period
+ : minimum_period;
+ RTC_LOG(LS_INFO) << "device_period: "
+ << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]";
+ RTC_LOG(LS_INFO) << "minimum_period: "
+ << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]";
+ return error.Error();
+}
+
+HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
+ const WAVEFORMATEXTENSIBLE* format,
+ uint32_t* default_period_in_frames,
+ uint32_t* fundamental_period_in_frames,
+ uint32_t* min_period_in_frames,
+ uint32_t* max_period_in_frames) {
+ RTC_DLOG(LS_INFO) << "GetSharedModeEnginePeriod";
+ RTC_DCHECK(client3);
+
+ UINT32 default_period = 0;
+ UINT32 fundamental_period = 0;
+ UINT32 min_period = 0;
+ UINT32 max_period = 0;
+ _com_error error = client3->GetSharedModeEnginePeriod(
+ reinterpret_cast<const WAVEFORMATEX*>(format), &default_period,
+ &fundamental_period, &min_period, &max_period);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient3::GetSharedModeEnginePeriod failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ WAVEFORMATEX format_ex = format->Format;
+ const WORD sample_rate = format_ex.nSamplesPerSec;
+ RTC_LOG(LS_INFO) << "default_period_in_frames: " << default_period << " ("
+ << FramesToMilliseconds(default_period, sample_rate)
+ << " ms)";
+ RTC_LOG(LS_INFO) << "fundamental_period_in_frames: " << fundamental_period
+ << " ("
+ << FramesToMilliseconds(fundamental_period, sample_rate)
+ << " ms)";
+ RTC_LOG(LS_INFO) << "min_period_in_frames: " << min_period << " ("
+ << FramesToMilliseconds(min_period, sample_rate) << " ms)";
+ RTC_LOG(LS_INFO) << "max_period_in_frames: " << max_period << " ("
+ << FramesToMilliseconds(max_period, sample_rate) << " ms)";
+ *default_period_in_frames = default_period;
+ *fundamental_period_in_frames = fundamental_period;
+ *min_period_in_frames = min_period;
+ *max_period_in_frames = max_period;
+ return error.Error();
+}
+
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ AudioParameters* params) {
+ RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters";
+ RTC_DCHECK(client);
+ return GetPreferredAudioParametersInternal(client, params, -1);
+}
+
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ webrtc::AudioParameters* params,
+ uint32_t sample_rate) {
+ RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters: " << sample_rate;
+ RTC_DCHECK(client);
+ return GetPreferredAudioParametersInternal(client, params, sample_rate);
+}
+
+HRESULT SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ REFERENCE_TIME buffer_duration,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size) {
+ RTC_DLOG(LS_INFO) << "SharedModeInitialize: buffer_duration="
+ << buffer_duration
+ << ", auto_convert_pcm=" << auto_convert_pcm;
+ RTC_DCHECK(client);
+ RTC_DCHECK_GE(buffer_duration, 0);
+ if (buffer_duration != 0) {
+ RTC_DLOG(LS_WARNING) << "Non-default buffer size is used";
+ }
+ if (auto_convert_pcm) {
+ RTC_DLOG(LS_WARNING) << "Sample rate converter can be utilized";
+ }
+ // The AUDCLNT_STREAMFLAGS_NOPERSIST flag disables persistence of the volume
+ // and mute settings for a session that contains rendering streams.
+ // By default, the volume level and muting state for a rendering session are
+ // persistent across system restarts. The volume level and muting state for a
+ // capture session are never persistent.
+ DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
+
+ // Enable event-driven streaming if a valid event handle is provided.
+ // After the stream starts, the audio engine will signal the event handle
+ // to notify the client each time a buffer becomes ready to process.
+ // Event-driven buffering is supported for both rendering and capturing.
+ // Both shared-mode and exclusive-mode streams can use event-driven buffering.
+ bool use_event =
+ (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE);
+ if (use_event) {
+ stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven";
+ }
+
+ // Check if sample-rate conversion is requested.
+ if (auto_convert_pcm) {
+ // Add channel matrixer (not utilized here) and rate converter to convert
+ // from our (the client's) format to the audio engine mix format.
+ // Currently only supported for testing, i.e., not possible to enable using
+ // public APIs.
+ RTC_DLOG(LS_INFO) << "The stream is initialized to support rate conversion";
+ stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
+ stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
+ }
+ RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
+
+ // Initialize the shared mode client for minimal delay if `buffer_duration`
+ // is 0 or possibly a higher delay (more robust) if `buffer_duration` is
+ // larger than 0. The actual size is given by IAudioClient::GetBufferSize().
+ _com_error error = client->Initialize(
+ AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0,
+ reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::Initialize failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ // If a stream is initialized to be event driven and in shared mode, the
+ // associated application must also obtain a handle by making a call to
+ // IAudioClient::SetEventHandle.
+ if (use_event) {
+ error = client->SetEventHandle(event_handle);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+ }
+
+ UINT32 buffer_size_in_frames = 0;
+ // Retrieves the size (maximum capacity) of the endpoint buffer. The size is
+ // expressed as the number of audio frames the buffer can hold.
+ // For rendering clients, the buffer length determines the maximum amount of
+ // rendering data that the application can write to the endpoint buffer
+ // during a single processing pass. For capture clients, the buffer length
+ // determines the maximum amount of capture data that the audio engine can
+ // read from the endpoint buffer during a single processing pass.
+ error = client->GetBufferSize(&buffer_size_in_frames);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ *endpoint_buffer_size = buffer_size_in_frames;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames
+ << " [audio frames]";
+ const double size_in_ms = static_cast<double>(buffer_size_in_frames) /
+ (format->Format.nSamplesPerSec / 1000.0);
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << static_cast<int>(size_in_ms + 0.5) << " [ms]";
+ RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << buffer_size_in_frames * format->Format.nChannels *
+ (format->Format.wBitsPerSample / 8)
+ << " [bytes]";
+
+ // TODO(henrika): utilize when delay measurements are added.
+ REFERENCE_TIME latency = 0;
+ error = client->GetStreamLatency(&latency);
+ RTC_DLOG(LS_INFO) << "stream latency: "
+ << ReferenceTimeToTimeDelta(latency).ms() << " [ms]";
+ return error.Error();
+}
+
+HRESULT SharedModeInitializeLowLatency(IAudioClient3* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ uint32_t period_in_frames,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size) {
+ RTC_DLOG(LS_INFO) << "SharedModeInitializeLowLatency: period_in_frames="
+ << period_in_frames
+ << ", auto_convert_pcm=" << auto_convert_pcm;
+ RTC_DCHECK(client);
+ RTC_DCHECK_GT(period_in_frames, 0);
+ if (auto_convert_pcm) {
+ RTC_DLOG(LS_WARNING) << "Sample rate converter is enabled";
+ }
+
+ // Define stream flags.
+ DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
+ bool use_event =
+ (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE);
+ if (use_event) {
+ stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven";
+ }
+ if (auto_convert_pcm) {
+ stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
+ stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
+ }
+ RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
+
+ // Initialize the shared mode client for lowest possible latency.
+ // It is assumed that GetSharedModeEnginePeriod() has been used to query the
+ // smallest possible engine period and that it is given by `period_in_frames`.
+ _com_error error = client->InitializeSharedAudioStream(
+ stream_flags, period_in_frames,
+ reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient3::InitializeSharedAudioStream failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ // Set the event handle.
+ if (use_event) {
+ error = client->SetEventHandle(event_handle);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+ }
+
+ UINT32 buffer_size_in_frames = 0;
+ // Retrieve the size (maximum capacity) of the endpoint buffer.
+ error = client->GetBufferSize(&buffer_size_in_frames);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
+ << ErrorToString(error);
+ return error.Error();
+ }
+
+ *endpoint_buffer_size = buffer_size_in_frames;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames
+ << " [audio frames]";
+ const double size_in_ms = static_cast<double>(buffer_size_in_frames) /
+ (format->Format.nSamplesPerSec / 1000.0);
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << static_cast<int>(size_in_ms + 0.5) << " [ms]";
+ RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign;
+ RTC_DLOG(LS_INFO) << "endpoint buffer size: "
+ << buffer_size_in_frames * format->Format.nChannels *
+ (format->Format.wBitsPerSample / 8)
+ << " [bytes]";
+
+ // TODO(henrika): utilize when delay measurements are added.
+ REFERENCE_TIME latency = 0;
+ error = client->GetStreamLatency(&latency);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_WARNING) << "IAudioClient::GetStreamLatency failed: "
+ << ErrorToString(error);
+ } else {
+ RTC_DLOG(LS_INFO) << "stream latency: "
+ << ReferenceTimeToTimeDelta(latency).ms() << " [ms]";
+ }
+ return error.Error();
+}
+
+ComPtr<IAudioRenderClient> CreateRenderClient(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateRenderClient";
+ RTC_DCHECK(client);
+ // Get access to the IAudioRenderClient interface. This interface
+ // enables us to write output data to a rendering endpoint buffer.
+ ComPtr<IAudioRenderClient> audio_render_client;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_render_client));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioClient::GetService(IID_IAudioRenderClient) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioRenderClient>();
+ }
+ return audio_render_client;
+}
+
+ComPtr<IAudioCaptureClient> CreateCaptureClient(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateCaptureClient";
+ RTC_DCHECK(client);
+ // Get access to the IAudioCaptureClient interface. This interface
+ // enables us to read input data from a capturing endpoint buffer.
+ ComPtr<IAudioCaptureClient> audio_capture_client;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_capture_client));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioClient::GetService(IID_IAudioCaptureClient) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioCaptureClient>();
+ }
+ return audio_capture_client;
+}
+
+ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateAudioClock";
+ RTC_DCHECK(client);
+ // Get access to the IAudioClock interface. This interface enables us to
+ // monitor a stream's data rate and the current position in the stream.
+ ComPtr<IAudioClock> audio_clock;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_clock));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioClock) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioClock>();
+ }
+ return audio_clock;
+}
+
+ComPtr<IAudioSessionControl> CreateAudioSessionControl(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateAudioSessionControl";
+ RTC_DCHECK(client);
+ ComPtr<IAudioSessionControl> audio_session_control;
+ _com_error error = client->GetService(IID_PPV_ARGS(&audio_session_control));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioControl) failed: "
+ << ErrorToString(error);
+ return ComPtr<IAudioSessionControl>();
+ }
+ return audio_session_control;
+}
+
+ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(IAudioClient* client) {
+ RTC_DLOG(LS_INFO) << "CreateSimpleAudioVolume";
+ RTC_DCHECK(client);
+ // Get access to the ISimpleAudioVolume interface. This interface enables a
+ // client to control the master volume level of an audio session.
+ ComPtr<ISimpleAudioVolume> simple_audio_volume;
+ _com_error error = client->GetService(IID_PPV_ARGS(&simple_audio_volume));
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR)
+ << "IAudioClient::GetService(IID_ISimpleAudioVolume) failed: "
+ << ErrorToString(error);
+ return ComPtr<ISimpleAudioVolume>();
+ }
+ return simple_audio_volume;
+}
+
+bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
+ IAudioRenderClient* render_client) {
+ RTC_DLOG(LS_INFO) << "FillRenderEndpointBufferWithSilence";
+ RTC_DCHECK(client);
+ RTC_DCHECK(render_client);
+ UINT32 endpoint_buffer_size = 0;
+ _com_error error = client->GetBufferSize(&endpoint_buffer_size);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ UINT32 num_queued_frames = 0;
+ // Get number of audio frames that are queued up to play in the endpoint
+ // buffer.
+ error = client->GetCurrentPadding(&num_queued_frames);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: "
+ << ErrorToString(error);
+ return false;
+ }
+ RTC_DLOG(LS_INFO) << "num_queued_frames: " << num_queued_frames;
+
+ BYTE* data = nullptr;
+ int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
+ RTC_DLOG(LS_INFO) << "num_frames_to_fill: " << num_frames_to_fill;
+ error = render_client->GetBuffer(num_frames_to_fill, &data);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
+ // explicitly write silence data to the rendering buffer.
+ error = render_client->ReleaseBuffer(num_frames_to_fill,
+ AUDCLNT_BUFFERFLAGS_SILENT);
+ if (FAILED(error.Error())) {
+ RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: "
+ << ErrorToString(error);
+ return false;
+ }
+
+ return true;
+}
+
+std::string WaveFormatToString(const WaveFormatWrapper format) {
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ // Start with the WAVEFORMATEX part (which always exists).
+ ss.AppendFormat("wFormatTag: %s (0x%X)",
+ WaveFormatTagToString(format->wFormatTag),
+ format->wFormatTag);
+ ss.AppendFormat(", nChannels: %d", format->nChannels);
+ ss.AppendFormat(", nSamplesPerSec: %d", format->nSamplesPerSec);
+ ss.AppendFormat(", nAvgBytesPerSec: %d", format->nAvgBytesPerSec);
+ ss.AppendFormat(", nBlockAlign: %d", format->nBlockAlign);
+ ss.AppendFormat(", wBitsPerSample: %d", format->wBitsPerSample);
+ ss.AppendFormat(", cbSize: %d", format->cbSize);
+ if (!format.IsExtensible())
+ return ss.str();
+
+ // Append the WAVEFORMATEXTENSIBLE part (which we know exists).
+ ss.AppendFormat(
+ " [+] wValidBitsPerSample: %d, dwChannelMask: %s",
+ format.GetExtensible()->Samples.wValidBitsPerSample,
+ ChannelMaskToString(format.GetExtensible()->dwChannelMask).c_str());
+ if (format.IsPcm()) {
+ ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_PCM");
+ } else if (format.IsFloat()) {
+ ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_IEEE_FLOAT");
+ } else {
+ ss.AppendFormat("%s", ", SubFormat: NOT_SUPPORTED");
+ }
+ return ss.str();
+}
+
+webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time) {
+ // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
+ return webrtc::TimeDelta::Micros(0.1 * time + 0.5);
+}
+
+double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate) {
+ // Convert the current period in frames into milliseconds.
+ return static_cast<double>(num_frames) / (sample_rate / 1000.0);
+}
+
+std::string ErrorToString(const _com_error& error) {
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ ss.AppendFormat("(HRESULT: 0x%08X)", error.Error());
+ return ss.str();
+}
+
+} // namespace core_audio_utility
+} // namespace webrtc_win
+} // namespace webrtc
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h
new file mode 100644
index 0000000000..454e60bf31
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
+#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
+
+#include <audioclient.h>
+#include <audiopolicy.h>
+#include <avrt.h>
+#include <comdef.h>
+#include <mmdeviceapi.h>
+#include <objbase.h>
+#include <propidl.h>
+#include <wrl/client.h>
+
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "api/units/time_delta.h"
+#include "modules/audio_device/audio_device_name.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+#pragma comment(lib, "Avrt.lib")
+
+namespace webrtc {
+namespace webrtc_win {
+
+// Utility class which registers a thread with MMCSS in the constructor and
+// deregisters MMCSS in the destructor. The task name is given by `task_name`.
+// The Multimedia Class Scheduler service (MMCSS) enables multimedia
+// applications to ensure that their time-sensitive processing receives
+// prioritized access to CPU resources without denying CPU resources to
+// lower-priority applications.
+class ScopedMMCSSRegistration {
+ public:
+ const char* PriorityClassToString(DWORD priority_class) {
+ switch (priority_class) {
+ case ABOVE_NORMAL_PRIORITY_CLASS:
+ return "ABOVE_NORMAL";
+ case BELOW_NORMAL_PRIORITY_CLASS:
+ return "BELOW_NORMAL";
+ case HIGH_PRIORITY_CLASS:
+ return "HIGH";
+ case IDLE_PRIORITY_CLASS:
+ return "IDLE";
+ case NORMAL_PRIORITY_CLASS:
+ return "NORMAL";
+ case REALTIME_PRIORITY_CLASS:
+ return "REALTIME";
+ default:
+ return "INVALID";
+ }
+ }
+
+ const char* PriorityToString(int priority) {
+ switch (priority) {
+ case THREAD_PRIORITY_ABOVE_NORMAL:
+ return "ABOVE_NORMAL";
+ case THREAD_PRIORITY_BELOW_NORMAL:
+ return "BELOW_NORMAL";
+ case THREAD_PRIORITY_HIGHEST:
+ return "HIGHEST";
+ case THREAD_PRIORITY_IDLE:
+ return "IDLE";
+ case THREAD_PRIORITY_LOWEST:
+ return "LOWEST";
+ case THREAD_PRIORITY_NORMAL:
+ return "NORMAL";
+ case THREAD_PRIORITY_TIME_CRITICAL:
+ return "TIME_CRITICAL";
+ default:
+ // Can happen in combination with REALTIME_PRIORITY_CLASS.
+ return "INVALID";
+ }
+ }
+
+ explicit ScopedMMCSSRegistration(const wchar_t* task_name) {
+ RTC_DLOG(LS_INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name);
+ // Register the calling thread with MMCSS for the supplied `task_name`.
+ DWORD mmcss_task_index = 0;
+ mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index);
+ if (mmcss_handle_ == nullptr) {
+ RTC_LOG(LS_ERROR) << "Failed to enable MMCSS on this thread: "
+ << GetLastError();
+ } else {
+ const DWORD priority_class = GetPriorityClass(GetCurrentProcess());
+ const int priority = GetThreadPriority(GetCurrentThread());
+ RTC_DLOG(LS_INFO) << "priority class: "
+ << PriorityClassToString(priority_class) << "("
+ << priority_class << ")";
+ RTC_DLOG(LS_INFO) << "priority: " << PriorityToString(priority) << "("
+ << priority << ")";
+ }
+ }
+
+ ~ScopedMMCSSRegistration() {
+ if (Succeeded()) {
+ // Deregister with MMCSS.
+ RTC_DLOG(LS_INFO) << "~ScopedMMCSSRegistration";
+ AvRevertMmThreadCharacteristics(mmcss_handle_);
+ }
+ }
+
+ ScopedMMCSSRegistration(const ScopedMMCSSRegistration&) = delete;
+ ScopedMMCSSRegistration& operator=(const ScopedMMCSSRegistration&) = delete;
+
+ bool Succeeded() const { return mmcss_handle_ != nullptr; }
+
+ private:
+ HANDLE mmcss_handle_ = nullptr;
+};
+
+// A PROPVARIANT that is automatically initialized and cleared upon respective
+// construction and destruction of this class.
+class ScopedPropVariant {
+ public:
+ ScopedPropVariant() { PropVariantInit(&pv_); }
+
+ ~ScopedPropVariant() { Reset(); }
+
+ ScopedPropVariant(const ScopedPropVariant&) = delete;
+ ScopedPropVariant& operator=(const ScopedPropVariant&) = delete;
+ bool operator==(const ScopedPropVariant&) const = delete;
+ bool operator!=(const ScopedPropVariant&) const = delete;
+
+ // Returns a pointer to the underlying PROPVARIANT for use as an out param in
+ // a function call.
+ PROPVARIANT* Receive() {
+ RTC_DCHECK_EQ(pv_.vt, VT_EMPTY);
+ return &pv_;
+ }
+
+ // Clears the instance to prepare it for re-use (e.g., via Receive).
+ void Reset() {
+ if (pv_.vt != VT_EMPTY) {
+ HRESULT result = PropVariantClear(&pv_);
+ RTC_DCHECK_EQ(result, S_OK);
+ }
+ }
+
+ const PROPVARIANT& get() const { return pv_; }
+ const PROPVARIANT* ptr() const { return &pv_; }
+
+ private:
+ PROPVARIANT pv_;
+};
+
+// Simple scoped memory releaser class for COM allocated memory.
+template <typename T>
+class ScopedCoMem {
+ public:
+ ScopedCoMem() : mem_ptr_(nullptr) {}
+
+ ~ScopedCoMem() { Reset(nullptr); }
+
+ ScopedCoMem(const ScopedCoMem&) = delete;
+ ScopedCoMem& operator=(const ScopedCoMem&) = delete;
+
+ T** operator&() { // NOLINT
+ RTC_DCHECK(mem_ptr_ == nullptr); // To catch memory leaks.
+ return &mem_ptr_;
+ }
+
+ operator T*() { return mem_ptr_; }
+
+ T* operator->() {
+ RTC_DCHECK(mem_ptr_ != nullptr);
+ return mem_ptr_;
+ }
+
+ const T* operator->() const {
+ RTC_DCHECK(mem_ptr_ != nullptr);
+ return mem_ptr_;
+ }
+
+ explicit operator bool() const { return mem_ptr_; }
+
+ friend bool operator==(const ScopedCoMem& lhs, std::nullptr_t) {
+ return lhs.Get() == nullptr;
+ }
+
+ friend bool operator==(std::nullptr_t, const ScopedCoMem& rhs) {
+ return rhs.Get() == nullptr;
+ }
+
+ friend bool operator!=(const ScopedCoMem& lhs, std::nullptr_t) {
+ return lhs.Get() != nullptr;
+ }
+
+ friend bool operator!=(std::nullptr_t, const ScopedCoMem& rhs) {
+ return rhs.Get() != nullptr;
+ }
+
+ void Reset(T* ptr) {
+ if (mem_ptr_)
+ CoTaskMemFree(mem_ptr_);
+ mem_ptr_ = ptr;
+ }
+
+ T* Get() const { return mem_ptr_; }
+
+ private:
+ T* mem_ptr_;
+};
+
+// A HANDLE that is automatically initialized and closed upon respective
+// construction and destruction of this class.
+class ScopedHandle {
+ public:
+ ScopedHandle() : handle_(nullptr) {}
+ explicit ScopedHandle(HANDLE h) : handle_(nullptr) { Set(h); }
+
+ ~ScopedHandle() { Close(); }
+
+ ScopedHandle& operator=(const ScopedHandle&) = delete;
+ bool operator==(const ScopedHandle&) const = delete;
+ bool operator!=(const ScopedHandle&) const = delete;
+
+ // Use this instead of comparing to INVALID_HANDLE_VALUE.
+ bool IsValid() const { return handle_ != nullptr; }
+
+ void Set(HANDLE new_handle) {
+ Close();
+ // Windows is inconsistent about invalid handles.
+ // See https://blogs.msdn.microsoft.com/oldnewthing/20040302-00/?p=40443
+ // for details.
+ if (new_handle != INVALID_HANDLE_VALUE) {
+ handle_ = new_handle;
+ }
+ }
+
+ HANDLE Get() const { return handle_; }
+
+ operator HANDLE() const { return handle_; }
+
+ void Close() {
+ if (handle_) {
+ if (!::CloseHandle(handle_)) {
+ RTC_DCHECK_NOTREACHED();
+ }
+ handle_ = nullptr;
+ }
+ }
+
+ private:
+ HANDLE handle_;
+};
+
+// Utility methods for the Core Audio API on Windows.
+// Always ensure that Core Audio is supported before using these methods.
+// Use webrtc_win::core_audio_utility::IsSupported() for this purpose.
+// Also, all methods must be called on a valid COM thread. This can be done
+// by using the ScopedCOMInitializer helper class.
+// These methods are based on media::CoreAudioUtil in Chrome.
+namespace core_audio_utility {
+
+// Helper class which automates casting between WAVEFORMATEX and
+// WAVEFORMATEXTENSIBLE raw pointers using implicit constructors and
+// operator overloading. Note that, no memory is allocated by this utility
+// structure. It only serves as a handle (or a wrapper) of the structure
+// provided to it at construction.
+class WaveFormatWrapper {
+ public:
+ WaveFormatWrapper(WAVEFORMATEXTENSIBLE* p)
+ : ptr_(reinterpret_cast<WAVEFORMATEX*>(p)) {}
+ WaveFormatWrapper(WAVEFORMATEX* p) : ptr_(p) {}
+ ~WaveFormatWrapper() = default;
+
+ operator WAVEFORMATEX*() const { return ptr_; }
+ WAVEFORMATEX* operator->() const { return ptr_; }
+ WAVEFORMATEX* get() const { return ptr_; }
+ WAVEFORMATEXTENSIBLE* GetExtensible() const;
+
+ bool IsExtensible() const;
+ bool IsPcm() const;
+ bool IsFloat() const;
+ size_t size() const;
+
+ private:
+ WAVEFORMATEX* ptr_;
+};
+
+// Returns true if Windows Core Audio is supported.
+// Always verify that this method returns true before using any of the
+// other methods in this class.
+bool IsSupported();
+
+// Returns true if Multimedia Class Scheduler service (MMCSS) is supported.
+// The MMCSS enables multimedia applications to ensure that their time-sensitive
+// processing receives prioritized access to CPU resources without denying CPU
+// resources to lower-priority applications.
+bool IsMMCSSSupported();
+
+// The MMDevice API lets clients discover the audio endpoint devices in the
+// system and determine which devices are suitable for the application to use.
+// Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
+
+// Number of active audio devices in the specified data flow direction.
+// Set `data_flow` to eAll to retrieve the total number of active audio
+// devices.
+int NumberOfActiveDevices(EDataFlow data_flow);
+
+// Returns 1, 2, or 3 depending on what version of IAudioClient the platform
+// supports.
+// Example: IAudioClient2 is supported on Windows 8 and higher => 2 is returned.
+uint32_t GetAudioClientVersion();
+
+// Creates an IMMDeviceEnumerator interface which provides methods for
+// enumerating audio endpoint devices.
+// TODO(henrika): IMMDeviceEnumerator::RegisterEndpointNotificationCallback.
+Microsoft::WRL::ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator();
+
+// These functions return the unique device id of the default or
+// communications input/output device, or an empty string if no such device
+// exists or if the device has been disabled.
+std::string GetDefaultInputDeviceID();
+std::string GetDefaultOutputDeviceID();
+std::string GetCommunicationsInputDeviceID();
+std::string GetCommunicationsOutputDeviceID();
+
+// Creates an IMMDevice interface corresponding to the unique device id in
+// `device_id`, or by data-flow direction and role if `device_id` is set to
+// AudioDeviceName::kDefaultDeviceId.
+Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+
+// Returns the unique ID and user-friendly name of a given endpoint device.
+// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and
+// "Microphone (Realtek High Definition Audio)".
+webrtc::AudioDeviceName GetDeviceName(IMMDevice* device);
+
+// Gets the user-friendly name of the endpoint device which is represented
+// by a unique id in `device_id`, or by data-flow direction and role if
+// `device_id` is set to AudioDeviceName::kDefaultDeviceId.
+std::string GetFriendlyName(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+
+// Query if the audio device is a rendering device or a capture device.
+EDataFlow GetDataFlow(IMMDevice* device);
+
+// Enumerates all input devices and adds the names (friendly name and unique
+// device id) to the list in `device_names`.
+bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names);
+
+// Enumerates all output devices and adds the names (friendly name and unique
+// device id) to the list in `device_names`.
+bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names);
+
+// The Windows Audio Session API (WASAPI) enables client applications to
+// manage the flow of audio data between the application and an audio endpoint
+// device. Header files Audioclient.h and Audiopolicy.h define the WASAPI
+// interfaces.
+
+// Creates an IAudioSessionManager2 interface for the specified `device`.
+// This interface provides access to e.g. the IAudioSessionEnumerator
+Microsoft::WRL::ComPtr<IAudioSessionManager2> CreateSessionManager2(
+ IMMDevice* device);
+
+// Creates an IAudioSessionEnumerator interface for the specified `device`.
+// The client can use the interface to enumerate audio sessions on the audio
+// device
+Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
+ IMMDevice* device);
+
+// Number of active audio sessions for the given `device`. Expired or inactive
+// sessions are not included.
+int NumberOfActiveSessions(IMMDevice* device);
+
+// Creates an IAudioClient instance for a specific device or the default
+// device specified by data-flow direction and role.
+Microsoft::WRL::ComPtr<IAudioClient> CreateClient(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+Microsoft::WRL::ComPtr<IAudioClient2> CreateClient2(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+Microsoft::WRL::ComPtr<IAudioClient3> CreateClient3(absl::string_view device_id,
+ EDataFlow data_flow,
+ ERole role);
+
+// Sets the AudioCategory_Communications category. Should be called before
+// GetSharedModeMixFormat() and IsFormatSupported(). The `client` argument must
+// be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported
+// on Windows 8 and above.
+// TODO(henrika): evaluate effect (if any).
+HRESULT SetClientProperties(IAudioClient2* client);
+
+// Returns the buffer size limits of the hardware audio engine in
+// 100-nanosecond units given a specified `format`. Does not require prior
+// audio stream initialization. The `client` argument must be an IAudioClient2
+// or IAudioClient3 interface pointer, hence only supported on Windows 8 and
+// above.
+// TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY.
+HRESULT GetBufferSizeLimits(IAudioClient2* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ REFERENCE_TIME* min_buffer_duration,
+ REFERENCE_TIME* max_buffer_duration);
+
+// Get the mix format that the audio engine uses internally for processing
+// of shared-mode streams. The client can call this method before calling
+// IAudioClient::Initialize. When creating a shared-mode stream for an audio
+// endpoint device, the Initialize method always accepts the stream format
+// obtained by this method.
+HRESULT GetSharedModeMixFormat(IAudioClient* client,
+ WAVEFORMATEXTENSIBLE* format);
+
+// Returns true if the specified `client` supports the format in `format`
+// for the given `share_mode` (shared or exclusive). The client can call this
+// method before calling IAudioClient::Initialize.
+bool IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATEXTENSIBLE* format);
+
+// For a shared-mode stream, the audio engine periodically processes the
+// data in the endpoint buffer at the period obtained in `device_period`.
+// For an exclusive mode stream, `device_period` corresponds to the minimum
+// time interval between successive processing by the endpoint device.
+// This period plus the stream latency between the buffer and endpoint device
+// represents the minimum possible latency that an audio application can
+// achieve. The time in `device_period` is expressed in 100-nanosecond units.
+HRESULT GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period);
+
+// Returns the range of periodicities supported by the engine for the specified
+// stream `format`. The periodicity of the engine is the rate at which the
+// engine wakes an event-driven audio client to transfer audio data to or from
+// the engine. Can be used for low-latency support on some devices.
+// The `client` argument must be an IAudioClient3 interface pointer, hence only
+// supported on Windows 10 and above.
+HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
+ const WAVEFORMATEXTENSIBLE* format,
+ uint32_t* default_period_in_frames,
+ uint32_t* fundamental_period_in_frames,
+ uint32_t* min_period_in_frames,
+ uint32_t* max_period_in_frames);
+
+// Get the preferred audio parameters for the given `client` corresponding to
+// the stream format that the audio engine uses for its internal processing of
+// shared-mode streams. The acquired values should only be utilized for shared
+// mode streamed since there are no preferred settings for an exclusive mode
+// stream.
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ webrtc::AudioParameters* params);
+// As above but override the preferred sample rate and use `sample_rate`
+// instead. Intended mainly for testing purposes and in combination with rate
+// conversion.
+HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ webrtc::AudioParameters* params,
+ uint32_t sample_rate);
+
+// After activating an IAudioClient interface on an audio endpoint device,
+// the client must initialize it once, and only once, to initialize the audio
+// stream between the client and the device. In shared mode, the client
+// connects indirectly through the audio engine which does the mixing.
+// If a valid event is provided in `event_handle`, the client will be
+// initialized for event-driven buffer handling. If `event_handle` is set to
+// nullptr, event-driven buffer handling is not utilized. To achieve the
+// minimum stream latency between the client application and audio endpoint
+// device, set `buffer_duration` to 0. A client has the option of requesting a
+// buffer size that is larger than what is strictly necessary to make timing
+// glitches rare or nonexistent. Increasing the buffer size does not necessarily
+// increase the stream latency. Each unit of reference time is 100 nanoseconds.
+// The `auto_convert_pcm` parameter can be used for testing purposes to ensure
+// that the sample rate of the client side does not have to match the audio
+// engine mix format. If `auto_convert_pcm` is set to true, a rate converter
+// will be inserted to convert between the sample rate in `format` and the
+// preferred rate given by GetPreferredAudioParameters().
+// The output parameter `endpoint_buffer_size` contains the size of the
+// endpoint buffer and it is expressed as the number of audio frames the
+// buffer can hold.
+HRESULT SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ REFERENCE_TIME buffer_duration,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size);
+
+// Works as SharedModeInitialize() but adds support for using smaller engine
+// periods than the default period.
+// The `client` argument must be an IAudioClient3 interface pointer, hence only
+// supported on Windows 10 and above.
+// TODO(henrika): can probably be merged into SharedModeInitialize() to avoid
+// duplicating code. Keeping as separate method for now until decided if we
+// need low-latency support.
+HRESULT SharedModeInitializeLowLatency(IAudioClient3* client,
+ const WAVEFORMATEXTENSIBLE* format,
+ HANDLE event_handle,
+ uint32_t period_in_frames,
+ bool auto_convert_pcm,
+ uint32_t* endpoint_buffer_size);
+
+// Creates an IAudioRenderClient client for an existing IAudioClient given by
+// `client`. The IAudioRenderClient interface enables a client to write
+// output data to a rendering endpoint buffer. The methods in this interface
+// manage the movement of data packets that contain audio-rendering data.
+Microsoft::WRL::ComPtr<IAudioRenderClient> CreateRenderClient(
+ IAudioClient* client);
+
+// Creates an IAudioCaptureClient client for an existing IAudioClient given by
+// `client`. The IAudioCaptureClient interface enables a client to read
+// input data from a capture endpoint buffer. The methods in this interface
+// manage the movement of data packets that contain capture data.
+Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient(
+ IAudioClient* client);
+
+// Creates an IAudioClock interface for an existing IAudioClient given by
+// `client`. The IAudioClock interface enables a client to monitor a stream's
+// data rate and the current position in the stream.
+Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client);
+
+// Creates an AudioSessionControl interface for an existing IAudioClient given
+// by `client`. The IAudioControl interface enables a client to configure the
+// control parameters for an audio session and to monitor events in the session.
+Microsoft::WRL::ComPtr<IAudioSessionControl> CreateAudioSessionControl(
+ IAudioClient* client);
+
+// Creates an ISimpleAudioVolume interface for an existing IAudioClient given by
+// `client`. This interface enables a client to control the master volume level
+// of an active audio session.
+Microsoft::WRL::ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(
+ IAudioClient* client);
+
+// Fills up the endpoint rendering buffer with silence for an existing
+// IAudioClient given by `client` and a corresponding IAudioRenderClient
+// given by `render_client`.
+bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
+ IAudioRenderClient* render_client);
+
+// Prints/logs all fields of the format structure in `format`.
+// Also supports extended versions (WAVEFORMATEXTENSIBLE).
+std::string WaveFormatToString(WaveFormatWrapper format);
+
+// Converts Windows internal REFERENCE_TIME (100 nanosecond units) into
+// generic webrtc::TimeDelta which then can be converted to any time unit.
+webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time);
+
+// Converts size expressed in number of audio frames, `num_frames`, into
+// milliseconds given a specified `sample_rate`.
+double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate);
+
+// Converts a COM error into a human-readable string.
+std::string ErrorToString(const _com_error& error);
+
+} // namespace core_audio_utility
+} // namespace webrtc_win
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc
new file mode 100644
index 0000000000..fc4a610eef
--- /dev/null
+++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc
@@ -0,0 +1,877 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/win/core_audio_utility_win.h"
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/win/scoped_com_initializer.h"
+#include "rtc_base/win/windows_version.h"
+#include "test/gtest.h"
+
+using Microsoft::WRL::ComPtr;
+using webrtc::AudioDeviceName;
+
+namespace webrtc {
+namespace webrtc_win {
+namespace {
+
+#define ABORT_TEST_IF_NOT(requirements_satisfied) \
+ do { \
+ bool fail = false; \
+ if (ShouldAbortTest(requirements_satisfied, #requirements_satisfied, \
+ &fail)) { \
+ if (fail) \
+ FAIL(); \
+ else \
+ return; \
+ } \
+ } while (false)
+
+bool ShouldAbortTest(bool requirements_satisfied,
+ const char* requirements_expression,
+ bool* should_fail) {
+ if (!requirements_satisfied) {
+ RTC_LOG(LS_ERROR) << "Requirement(s) not satisfied ("
+ << requirements_expression << ")";
+ // TODO(henrika): improve hard-coded condition to determine if test should
+ // fail or be ignored. Could use e.g. a command-line argument here to
+ // determine if the test should fail or not.
+ *should_fail = false;
+ return true;
+ }
+ *should_fail = false;
+ return false;
+}
+
+} // namespace
+
+// CoreAudioUtilityWinTest test fixture.
+class CoreAudioUtilityWinTest : public ::testing::Test {
+ protected:
+ CoreAudioUtilityWinTest() : com_init_(ScopedCOMInitializer::kMTA) {
+ // We must initialize the COM library on a thread before we calling any of
+ // the library functions. All COM functions will return CO_E_NOTINITIALIZED
+ // otherwise.
+ EXPECT_TRUE(com_init_.Succeeded());
+
+ // Configure logging.
+ rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+ rtc::LogMessage::LogTimestamps();
+ rtc::LogMessage::LogThreads();
+ }
+
+ virtual ~CoreAudioUtilityWinTest() {}
+
+ bool DevicesAvailable() {
+ return core_audio_utility::IsSupported() &&
+ core_audio_utility::NumberOfActiveDevices(eCapture) > 0 &&
+ core_audio_utility::NumberOfActiveDevices(eRender) > 0;
+ }
+
+ private:
+ ScopedCOMInitializer com_init_;
+};
+
+TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapper) {
+ // Use default constructor for WAVEFORMATEX and verify its size.
+ WAVEFORMATEX format = {};
+ core_audio_utility::WaveFormatWrapper wave_format(&format);
+ EXPECT_FALSE(wave_format.IsExtensible());
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format->cbSize, 0);
+
+ // Ensure that the stand-alone WAVEFORMATEX structure has a valid format tag
+ // and that all accessors work.
+ format.wFormatTag = WAVE_FORMAT_PCM;
+ EXPECT_FALSE(wave_format.IsExtensible());
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format.get()->wFormatTag, WAVE_FORMAT_PCM);
+ EXPECT_EQ(wave_format->wFormatTag, WAVE_FORMAT_PCM);
+
+ // Next, ensure that the size is valid. Stand-alone is not extended.
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+
+ // Verify format types for the stand-alone version.
+ EXPECT_TRUE(wave_format.IsPcm());
+ EXPECT_FALSE(wave_format.IsFloat());
+ format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
+ EXPECT_TRUE(wave_format.IsFloat());
+}
+
+TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) {
+ // Use default constructor for WAVEFORMATEXTENSIBLE and verify that it
+ // results in same size as for WAVEFORMATEX even if the size of `format_ex`
+ // equals the size of WAVEFORMATEXTENSIBLE.
+ WAVEFORMATEXTENSIBLE format_ex = {};
+ core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex);
+ EXPECT_FALSE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format_ex->cbSize, 0);
+
+ // Ensure that the extended structure has a valid format tag and that all
+ // accessors work.
+ format_ex.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ EXPECT_FALSE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format_ex->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ EXPECT_EQ(wave_format_ex.get()->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+
+ // Next, ensure that the size is valid (sum of stand-alone and extended).
+ // Now the structure qualifies as extended.
+ format_ex.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+ EXPECT_TRUE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEXTENSIBLE));
+ EXPECT_TRUE(wave_format_ex.GetExtensible());
+ EXPECT_EQ(wave_format_ex.GetExtensible()->Format.wFormatTag,
+ WAVE_FORMAT_EXTENSIBLE);
+
+ // Verify format types for the extended version.
+ EXPECT_FALSE(wave_format_ex.IsPcm());
+ format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ EXPECT_TRUE(wave_format_ex.IsPcm());
+ EXPECT_FALSE(wave_format_ex.IsFloat());
+ format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ EXPECT_TRUE(wave_format_ex.IsFloat());
+}
+
+TEST_F(CoreAudioUtilityWinTest, NumberOfActiveDevices) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ int render_devices = core_audio_utility::NumberOfActiveDevices(eRender);
+ EXPECT_GT(render_devices, 0);
+ int capture_devices = core_audio_utility::NumberOfActiveDevices(eCapture);
+ EXPECT_GT(capture_devices, 0);
+ int total_devices = core_audio_utility::NumberOfActiveDevices(eAll);
+ EXPECT_EQ(total_devices, render_devices + capture_devices);
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetAudioClientVersion) {
+ uint32_t client_version = core_audio_utility::GetAudioClientVersion();
+ EXPECT_GE(client_version, 1u);
+ EXPECT_LE(client_version, 3u);
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateDeviceEnumerator) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ ComPtr<IMMDeviceEnumerator> enumerator =
+ core_audio_utility::CreateDeviceEnumerator();
+ EXPECT_TRUE(enumerator.Get());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDefaultInputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id = core_audio_utility::GetDefaultInputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDefaultOutputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id =
+ core_audio_utility::GetDefaultOutputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetCommunicationsInputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id =
+ core_audio_utility::GetCommunicationsInputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetCommunicationsOutputDeviceID) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id =
+ core_audio_utility::GetCommunicationsOutputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateDefaultDevice) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {{eRender, eConsole}, {eRender, eCommunications},
+ {eRender, eMultimedia}, {eCapture, eConsole},
+ {eCapture, eCommunications}, {eCapture, eMultimedia}};
+
+ // Create default devices for all flow/role combinations above.
+ ComPtr<IMMDevice> audio_device;
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role);
+ EXPECT_TRUE(audio_device.Get());
+ EXPECT_EQ(data[i].flow,
+ core_audio_utility::GetDataFlow(audio_device.Get()));
+ }
+
+ // Only eRender and eCapture are allowed as flow parameter.
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eAll, eConsole);
+ EXPECT_FALSE(audio_device.Get());
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateDevice) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Get name and ID of default device used for playback.
+ ComPtr<IMMDevice> default_render_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ AudioDeviceName default_render_name =
+ core_audio_utility::GetDeviceName(default_render_device.Get());
+ EXPECT_TRUE(default_render_name.IsValid());
+
+ // Use the unique ID as input to CreateDevice() and create a corresponding
+ // IMMDevice. The data-flow direction and role parameters are ignored for
+ // this scenario.
+ ComPtr<IMMDevice> audio_device = core_audio_utility::CreateDevice(
+ default_render_name.unique_id, EDataFlow(), ERole());
+ EXPECT_TRUE(audio_device.Get());
+
+ // Verify that the two IMMDevice interfaces represents the same endpoint
+ // by comparing their unique IDs.
+ AudioDeviceName device_name =
+ core_audio_utility::GetDeviceName(audio_device.Get());
+ EXPECT_EQ(default_render_name.unique_id, device_name.unique_id);
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDefaultDeviceName) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {{eRender, eConsole},
+ {eRender, eCommunications},
+ {eCapture, eConsole},
+ {eCapture, eCommunications}};
+
+ // Get name and ID of default devices for all flow/role combinations above.
+ ComPtr<IMMDevice> audio_device;
+ AudioDeviceName device_name;
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role);
+ device_name = core_audio_utility::GetDeviceName(audio_device.Get());
+ EXPECT_TRUE(device_name.IsValid());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetFriendlyName) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Get name and ID of default device used for recording.
+ ComPtr<IMMDevice> audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eCapture, eConsole);
+ AudioDeviceName device_name =
+ core_audio_utility::GetDeviceName(audio_device.Get());
+ EXPECT_TRUE(device_name.IsValid());
+
+ // Use unique ID as input to GetFriendlyName() and compare the result
+ // with the already obtained friendly name for the default capture device.
+ std::string friendly_name = core_audio_utility::GetFriendlyName(
+ device_name.unique_id, eCapture, eConsole);
+ EXPECT_EQ(friendly_name, device_name.device_name);
+
+ // Same test as above but for playback.
+ audio_device = core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ device_name = core_audio_utility::GetDeviceName(audio_device.Get());
+ friendly_name = core_audio_utility::GetFriendlyName(device_name.unique_id,
+ eRender, eConsole);
+ EXPECT_EQ(friendly_name, device_name.device_name);
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetInputDeviceNames) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ webrtc::AudioDeviceNames device_names;
+ EXPECT_TRUE(core_audio_utility::GetInputDeviceNames(&device_names));
+ // Number of elements in the list should be two more than the number of
+ // active devices since we always add default and default communication
+ // devices on index 0 and 1.
+ EXPECT_EQ(static_cast<int>(device_names.size()),
+ 2 + core_audio_utility::NumberOfActiveDevices(eCapture));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetOutputDeviceNames) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ webrtc::AudioDeviceNames device_names;
+ EXPECT_TRUE(core_audio_utility::GetOutputDeviceNames(&device_names));
+ // Number of elements in the list should be two more than the number of
+ // active devices since we always add default and default communication
+ // devices on index 0 and 1.
+ EXPECT_EQ(static_cast<int>(device_names.size()),
+ 2 + core_audio_utility::NumberOfActiveDevices(eRender));
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateSessionManager2) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioSessionManager2 interface for a default audio
+ // endpoint device specified by two different data flows and the `eConsole`
+ // role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
+ EXPECT_TRUE(device.Get());
+ ComPtr<IAudioSessionManager2> session_manager =
+ core_audio_utility::CreateSessionManager2(device.Get());
+ EXPECT_TRUE(session_manager.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateSessionEnumerator) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioSessionEnumerator interface for a default
+ // audio endpoint device specified by two different data flows and the
+ // `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
+ EXPECT_TRUE(device.Get());
+ ComPtr<IAudioSessionEnumerator> session_enumerator =
+ core_audio_utility::CreateSessionEnumerator(device.Get());
+ EXPECT_TRUE(session_enumerator.Get());
+
+ // Perform a sanity test of the interface by asking for the total number
+ // of audio sessions that are open on the audio device. Note that, we do
+ // not check if the session is active or not.
+ int session_count = 0;
+ EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&session_count)));
+ EXPECT_GE(session_count, 0);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, NumberOfActiveSessions) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Count number of active audio session for a default audio endpoint device
+ // specified by two different data flows and the `eConsole` role.
+ // Ensure that the number of active audio sessions is less than or equal to
+ // the total number of audio sessions on that same device.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ // Create an audio endpoint device.
+ ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
+ EXPECT_TRUE(device.Get());
+
+ // Ask for total number of audio sessions on the created device.
+ ComPtr<IAudioSessionEnumerator> session_enumerator =
+ core_audio_utility::CreateSessionEnumerator(device.Get());
+ EXPECT_TRUE(session_enumerator.Get());
+ int total_session_count = 0;
+ EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&total_session_count)));
+ EXPECT_GE(total_session_count, 0);
+
+ // Use NumberOfActiveSessions and get number of active audio sessions.
+ int active_session_count =
+ core_audio_utility::NumberOfActiveSessions(device.Get());
+ EXPECT_LE(active_session_count, total_session_count);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateClient) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioClient interface for a default audio endpoint
+ // device specified by two different data flows and the `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateClient2) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 2);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioClient2 interface for a default audio endpoint
+ // device specified by two different data flows and the `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
+ EXPECT_TRUE(client2.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateClient3) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 3);
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Obtain reference to an IAudioClient3 interface for a default audio endpoint
+ // device specified by two different data flows and the `eConsole` role.
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
+ AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
+ EXPECT_TRUE(client3.Get());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, SetClientProperties) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 2);
+
+ ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client2.Get());
+ EXPECT_TRUE(
+ SUCCEEDED(core_audio_utility::SetClientProperties(client2.Get())));
+
+ ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client3.Get());
+ EXPECT_TRUE(
+ SUCCEEDED(core_audio_utility::SetClientProperties(client3.Get())));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetSharedModeEnginePeriod) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 3);
+
+ ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client3.Get());
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client3.Get(), &format)));
+
+ uint32_t default_period = 0;
+ uint32_t fundamental_period = 0;
+ uint32_t min_period = 0;
+ uint32_t max_period = 0;
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetSharedModeEnginePeriod(
+ client3.Get(), &format, &default_period, &fundamental_period, &min_period,
+ &max_period)));
+}
+
+// TODO(henrika): figure out why usage of this API always reports
+// AUDCLNT_E_OFFLOAD_MODE_ONLY.
+TEST_F(CoreAudioUtilityWinTest, DISABLED_GetBufferSizeLimits) {
+ ABORT_TEST_IF_NOT(DevicesAvailable() &&
+ core_audio_utility::GetAudioClientVersion() >= 2);
+
+ ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client2.Get());
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client2.Get(), &format)));
+
+ REFERENCE_TIME min_buffer_duration = 0;
+ REFERENCE_TIME max_buffer_duration = 0;
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetBufferSizeLimits(
+ client2.Get(), &format, &min_buffer_duration, &max_buffer_duration)));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetSharedModeMixFormat) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+
+ // Perform a simple sanity test of the acquired format structure.
+ WAVEFORMATEXTENSIBLE format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ core_audio_utility::WaveFormatWrapper wformat(&format);
+ EXPECT_GE(wformat->nChannels, 1);
+ EXPECT_GE(wformat->nSamplesPerSec, 8000u);
+ EXPECT_GE(wformat->wBitsPerSample, 16);
+ if (wformat.IsExtensible()) {
+ EXPECT_EQ(wformat->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ EXPECT_GE(wformat->cbSize, 22);
+ EXPECT_GE(wformat.GetExtensible()->Samples.wValidBitsPerSample, 16);
+ } else {
+ EXPECT_EQ(wformat->cbSize, 0);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, IsFormatSupported) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Create a default render client.
+ ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+
+ // Get the default, shared mode, mixing format.
+ WAVEFORMATEXTENSIBLE format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // In shared mode, the audio engine always supports the mix format.
+ EXPECT_TRUE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+
+ // Use an invalid format and verify that it is not supported.
+ format.Format.nSamplesPerSec += 1;
+ EXPECT_FALSE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ // Verify that the device periods are valid for the default render and
+ // capture devices.
+ ComPtr<IAudioClient> client;
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ REFERENCE_TIME shared_time_period = 0;
+ REFERENCE_TIME exclusive_time_period = 0;
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
+ EXPECT_GT(shared_time_period, 0);
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod(
+ client.Get(), AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
+ EXPECT_GT(exclusive_time_period, 0);
+ EXPECT_LE(exclusive_time_period, shared_time_period);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, GetPreferredAudioParameters) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {{eRender, eConsole},
+ {eRender, eCommunications},
+ {eCapture, eConsole},
+ {eCapture, eCommunications}};
+
+ // Verify that the preferred audio parameters are OK for all flow/role
+ // combinations above.
+ ComPtr<IAudioClient> client;
+ webrtc::AudioParameters params;
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data[i].flow, data[i].role);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
+ client.Get(), &params)));
+ EXPECT_TRUE(params.is_valid());
+ EXPECT_TRUE(params.is_complete());
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, SharedModeInitialize) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ ComPtr<IAudioClient> client;
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // Perform a shared-mode initialization without event-driven buffer handling.
+ uint32_t endpoint_buffer_size = 0;
+ HRESULT hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // It is only possible to create a client once.
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_FALSE(SUCCEEDED(hr));
+ EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
+
+ // Verify that it is possible to reinitialize the client after releasing it
+ // and then creating a new client.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use a non-supported format and verify that initialization fails.
+ // A simple way to emulate an invalid format is to use the shared-mode
+ // mixing format and modify the preferred sample rate.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+ format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1;
+ EXPECT_FALSE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(FAILED(hr));
+ EXPECT_EQ(hr, E_INVALIDARG);
+
+ // Finally, perform a shared-mode initialization using event-driven buffer
+ // handling. The event handle will be signaled when an audio buffer is ready
+ // to be processed by the client (not verified here). The event handle should
+ // be in the non-signaled state.
+ ScopedHandle event_handle(::CreateEvent(nullptr, TRUE, FALSE, nullptr));
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ eRender, eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ EXPECT_TRUE(core_audio_utility::IsFormatSupported(
+ client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = core_audio_utility::SharedModeInitialize(
+ client.Get(), &format, event_handle, 0, false, &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // TODO(henrika): possibly add test for signature which overrides the default
+ // sample rate.
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateRenderAndCaptureClients) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<IAudioRenderClient> render_client;
+ ComPtr<IAudioCaptureClient> capture_client;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ if (data_flow[i] == eRender) {
+ // It is not possible to create a render client using an unitialized
+ // client interface.
+ render_client = core_audio_utility::CreateRenderClient(client.Get());
+ EXPECT_FALSE(render_client.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
+ 0, false, &endpoint_buffer_size);
+ render_client = core_audio_utility::CreateRenderClient(client.Get());
+ EXPECT_TRUE(render_client.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ } else if (data_flow[i] == eCapture) {
+ // It is not possible to create a capture client using an unitialized
+ // client interface.
+ capture_client = core_audio_utility::CreateCaptureClient(client.Get());
+ EXPECT_FALSE(capture_client.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
+ 0, false, &endpoint_buffer_size);
+ capture_client = core_audio_utility::CreateCaptureClient(client.Get());
+ EXPECT_TRUE(capture_client.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ }
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateAudioClock) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<IAudioClock> audio_clock;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // It is not possible to create an audio clock using an unitialized client
+ // interface.
+ audio_clock = core_audio_utility::CreateAudioClock(client.Get());
+ EXPECT_FALSE(audio_clock.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ audio_clock = core_audio_utility::CreateAudioClock(client.Get());
+ EXPECT_TRUE(audio_clock.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use the audio clock and verify that querying the device frequency works.
+ UINT64 frequency = 0;
+ EXPECT_TRUE(SUCCEEDED(audio_clock->GetFrequency(&frequency)));
+ EXPECT_GT(frequency, 0u);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateAudioSessionControl) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<IAudioSessionControl> audio_session_control;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // It is not possible to create an audio session control using an
+ // unitialized client interface.
+ audio_session_control =
+ core_audio_utility::CreateAudioSessionControl(client.Get());
+ EXPECT_FALSE(audio_session_control.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ audio_session_control =
+ core_audio_utility::CreateAudioSessionControl(client.Get());
+ EXPECT_TRUE(audio_session_control.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use the audio session control and verify that the session state can be
+ // queried. When a client opens a session by assigning the first stream to
+ // the session (by calling the IAudioClient::Initialize method), the initial
+ // session state is inactive. The session state changes from inactive to
+ // active when a stream in the session begins running (because the client
+ // has called the IAudioClient::Start method).
+ AudioSessionState state;
+ EXPECT_TRUE(SUCCEEDED(audio_session_control->GetState(&state)));
+ EXPECT_EQ(state, AudioSessionStateInactive);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, CreateSimpleAudioVolume) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ EDataFlow data_flow[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+
+ for (size_t i = 0; i < arraysize(data_flow); ++i) {
+ ComPtr<IAudioClient> client;
+ ComPtr<ISimpleAudioVolume> simple_audio_volume;
+
+ // Create a default client for the given data-flow direction.
+ client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
+ data_flow[i], eConsole);
+ EXPECT_TRUE(client.Get());
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+
+ // It is not possible to create an audio volume using an uninitialized
+ // client interface.
+ simple_audio_volume =
+ core_audio_utility::CreateSimpleAudioVolume(client.Get());
+ EXPECT_FALSE(simple_audio_volume.Get());
+
+ // Do a proper initialization and verify that it works this time.
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ simple_audio_volume =
+ core_audio_utility::CreateSimpleAudioVolume(client.Get());
+ EXPECT_TRUE(simple_audio_volume.Get());
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use the audio volume interface and validate that it works. The volume
+ // level should be value in the range 0.0 to 1.0 at first call.
+ float volume = 0.0;
+ EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume)));
+ EXPECT_GE(volume, 0.0);
+ EXPECT_LE(volume, 1.0);
+
+ // Next, set a new volume and verify that the setter does its job.
+ const float target_volume = 0.5;
+ EXPECT_TRUE(SUCCEEDED(
+ simple_audio_volume->SetMasterVolume(target_volume, nullptr)));
+ EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume)));
+ EXPECT_EQ(volume, target_volume);
+ }
+}
+
+TEST_F(CoreAudioUtilityWinTest, FillRenderEndpointBufferWithSilence) {
+ ABORT_TEST_IF_NOT(DevicesAvailable());
+
+ // Create default clients using the default mixing format for shared mode.
+ ComPtr<IAudioClient> client(core_audio_utility::CreateClient(
+ AudioDeviceName::kDefaultDeviceId, eRender, eConsole));
+ EXPECT_TRUE(client.Get());
+
+ WAVEFORMATPCMEX format;
+ uint32_t endpoint_buffer_size = 0;
+ EXPECT_TRUE(SUCCEEDED(
+ core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
+ core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0,
+ false, &endpoint_buffer_size);
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ ComPtr<IAudioRenderClient> render_client(
+ core_audio_utility::CreateRenderClient(client.Get()));
+ EXPECT_TRUE(render_client.Get());
+
+ // The endpoint audio buffer should not be filled up by default after being
+ // created.
+ UINT32 num_queued_frames = 0;
+ client->GetCurrentPadding(&num_queued_frames);
+ EXPECT_EQ(num_queued_frames, 0u);
+
+ // Fill it up with zeros and verify that the buffer is full.
+ // It is not possible to verify that the actual data consists of zeros
+ // since we can't access data that has already been sent to the endpoint
+ // buffer.
+ EXPECT_TRUE(core_audio_utility::FillRenderEndpointBufferWithSilence(
+ client.Get(), render_client.Get()));
+ client->GetCurrentPadding(&num_queued_frames);
+ EXPECT_EQ(num_queued_frames, endpoint_buffer_size);
+}
+
+} // namespace webrtc_win
+} // namespace webrtc