From 26a029d407be480d791972afb5975cf62c9360a6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 19 Apr 2024 02:47:55 +0200 Subject: Adding upstream version 124.0.1. Signed-off-by: Daniel Baumann --- .../libwebrtc/modules/audio_device/BUILD.gn | 535 +++ third_party/libwebrtc/modules/audio_device/DEPS | 14 + third_party/libwebrtc/modules/audio_device/OWNERS | 2 + .../modules/audio_device/audio_device_buffer.cc | 539 +++ .../modules/audio_device/audio_device_buffer.h | 253 ++ .../modules/audio_device/audio_device_config.h | 30 + .../audio_device/audio_device_data_observer.cc | 373 ++ .../modules/audio_device/audio_device_generic.cc | 66 + .../modules/audio_device/audio_device_generic.h | 145 + .../modules/audio_device/audio_device_gn/moz.build | 205 + .../modules/audio_device/audio_device_impl.cc | 909 +++++ .../modules/audio_device/audio_device_impl.h | 180 + .../modules/audio_device/audio_device_name.cc | 27 + .../modules/audio_device/audio_device_name.h | 50 + .../modules/audio_device/audio_device_unittest.cc | 1241 ++++++ .../audio_device/dummy/audio_device_dummy.cc | 226 ++ .../audio_device/dummy/audio_device_dummy.h | 117 + .../audio_device/dummy/file_audio_device.cc | 508 +++ .../modules/audio_device/dummy/file_audio_device.h | 163 + .../dummy/file_audio_device_factory.cc | 62 + .../audio_device/dummy/file_audio_device_factory.h | 44 + .../modules/audio_device/fine_audio_buffer.cc | 130 + .../modules/audio_device/fine_audio_buffer.h | 94 + .../audio_device/fine_audio_buffer_unittest.cc | 158 + .../audio_device/g3doc/audio_device_module.md | 171 + .../modules/audio_device/include/audio_device.h | 194 + .../include/audio_device_data_observer.h | 72 + .../audio_device/include/audio_device_default.h | 132 + .../audio_device/include/audio_device_defines.h | 177 + .../audio_device/include/audio_device_factory.cc | 53 + .../audio_device/include/audio_device_factory.h | 59 + .../audio_device/include/fake_audio_device.h | 33 + .../audio_device/include/mock_audio_device.h | 156 + .../audio_device/include/mock_audio_transport.h | 81 + .../audio_device/include/test_audio_device.cc | 540 +++ .../audio_device/include/test_audio_device.h | 155 + .../include/test_audio_device_unittest.cc | 528 +++ .../audio_device/linux/alsasymboltable_linux.cc | 40 + .../audio_device/linux/alsasymboltable_linux.h | 148 + .../audio_device/linux/audio_device_alsa_linux.cc | 1636 ++++++++ .../audio_device/linux/audio_device_alsa_linux.h | 208 + .../audio_device/linux/audio_device_pulse_linux.cc | 2286 +++++++++++ .../audio_device/linux/audio_device_pulse_linux.h | 349 ++ .../linux/audio_mixer_manager_alsa_linux.cc | 979 +++++ .../linux/audio_mixer_manager_alsa_linux.h | 71 + .../linux/audio_mixer_manager_pulse_linux.cc | 844 ++++ .../linux/audio_mixer_manager_pulse_linux.h | 114 + .../linux/latebindingsymboltable_linux.cc | 106 + .../linux/latebindingsymboltable_linux.h | 168 + .../linux/pulseaudiosymboltable_linux.cc | 41 + .../linux/pulseaudiosymboltable_linux.h | 106 + .../modules/audio_device/mac/audio_device_mac.cc | 2500 ++++++++++++ .../modules/audio_device/mac/audio_device_mac.h | 350 ++ .../audio_device/mac/audio_mixer_manager_mac.cc | 924 +++++ .../audio_device/mac/audio_mixer_manager_mac.h | 73 + .../audio_device/mock_audio_device_buffer.h | 35 + .../modules/audio_device/test_audio_device_impl.cc | 211 + .../modules/audio_device/test_audio_device_impl.h | 198 + .../audio_device/test_audio_device_impl_test.cc | 275 ++ .../audio_device/win/audio_device_core_win.cc | 4178 ++++++++++++++++++++ .../audio_device/win/audio_device_core_win.h | 299 ++ .../audio_device/win/audio_device_module_win.cc | 522 +++ .../audio_device/win/audio_device_module_win.h | 87 + .../audio_device/win/core_audio_base_win.cc | 948 +++++ .../modules/audio_device/win/core_audio_base_win.h | 203 + .../audio_device/win/core_audio_input_win.cc | 453 +++ .../audio_device/win/core_audio_input_win.h | 73 + .../audio_device/win/core_audio_output_win.cc | 422 ++ .../audio_device/win/core_audio_output_win.h | 72 + .../audio_device/win/core_audio_utility_win.cc | 1529 +++++++ .../audio_device/win/core_audio_utility_win.h | 560 +++ .../win/core_audio_utility_win_unittest.cc | 877 ++++ 72 files changed, 30307 insertions(+) create mode 100644 third_party/libwebrtc/modules/audio_device/BUILD.gn create mode 100644 third_party/libwebrtc/modules/audio_device/DEPS create mode 100644 third_party/libwebrtc/modules/audio_device/OWNERS create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_buffer.h create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_config.h create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_generic.cc create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_generic.h create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_impl.cc create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_impl.h create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_name.cc create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_name.h create mode 100644 third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc create mode 100644 third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc create mode 100644 third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h create mode 100644 third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc create mode 100644 third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h create mode 100644 third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc create mode 100644 third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h create mode 100644 third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc create mode 100644 third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h create mode 100644 third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc create mode 100644 third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md create mode 100644 third_party/libwebrtc/modules/audio_device/include/audio_device.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/audio_device_default.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc create mode 100644 third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc create mode 100644 third_party/libwebrtc/modules/audio_device/include/test_audio_device.h create mode 100644 third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h create mode 100644 third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h create mode 100644 third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc create mode 100644 third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h create mode 100644 third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc create mode 100644 third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h create mode 100644 third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc create mode 100644 third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h create mode 100644 third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h create mode 100644 third_party/libwebrtc/modules/audio_device/test_audio_device_impl.cc create mode 100644 third_party/libwebrtc/modules/audio_device/test_audio_device_impl.h create mode 100644 third_party/libwebrtc/modules/audio_device/test_audio_device_impl_test.cc create mode 100644 third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc create mode 100644 third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h create mode 100644 third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc create mode 100644 third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h create mode 100644 third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc (limited to 'third_party/libwebrtc/modules/audio_device') diff --git a/third_party/libwebrtc/modules/audio_device/BUILD.gn b/third_party/libwebrtc/modules/audio_device/BUILD.gn new file mode 100644 index 0000000000..4726f93279 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/BUILD.gn @@ -0,0 +1,535 @@ +# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../webrtc.gni") + +if (is_android) { + import("//build/config/android/config.gni") + import("//build/config/android/rules.gni") +} + +config("audio_device_warnings_config") { + if (is_win && is_clang) { + cflags = [ + # Disable warnings failing when compiling with Clang on Windows. + # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366 + "-Wno-microsoft-goto", + ] + } +} + +rtc_source_set("audio_device_default") { + visibility = [ "*" ] + sources = [ "include/audio_device_default.h" ] + deps = [ ":audio_device_api" ] +} + +rtc_source_set("audio_device") { +if (!build_with_mozilla) { # See Bug 1820869. + visibility = [ "*" ] + public_deps += [ # no-presubmit-check TODO(webrtc:8603) + ":audio_device_api", + + # Deprecated. + # TODO(webrtc:7452): Remove this public dep. audio_device_impl should + # be depended on directly if needed. + ":audio_device_impl", + ] +} +} + +rtc_source_set("audio_device_api") { + visibility = [ "*" ] + sources = [ + "include/audio_device.h", + "include/audio_device_defines.h", + ] + deps = [ + "../../api:scoped_refptr", + "../../api/task_queue", + "../../rtc_base:checks", + "../../rtc_base:refcount", + "../../rtc_base:stringutils", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("audio_device_config") { + sources = [ "audio_device_config.h" ] +} + +rtc_library("audio_device_buffer") { +if (!build_with_mozilla) { # See Bug 1820869. + sources = [ + "audio_device_buffer.cc", + "audio_device_buffer.h", + "fine_audio_buffer.cc", + "fine_audio_buffer.h", + ] + deps = [ + ":audio_device_api", + "../../api:array_view", + "../../api:sequence_checker", + "../../api/task_queue", + "../../common_audio:common_audio_c", + "../../rtc_base:buffer", + "../../rtc_base:checks", + "../../rtc_base:event_tracer", + "../../rtc_base:logging", + "../../rtc_base:macromagic", + "../../rtc_base:rtc_task_queue", + "../../rtc_base:safe_conversions", + "../../rtc_base:timestamp_aligner", + "../../rtc_base:timeutils", + "../../rtc_base/synchronization:mutex", + "../../system_wrappers", + "../../system_wrappers:metrics", + ] +} +} + +rtc_library("audio_device_generic") { + sources = [ + "audio_device_generic.cc", + "audio_device_generic.h", + ] + deps = [ + ":audio_device_api", + ":audio_device_buffer", + "../../rtc_base:logging", + ] +} + +rtc_library("audio_device_name") { + sources = [ + "audio_device_name.cc", + "audio_device_name.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +rtc_source_set("windows_core_audio_utility") { + if (is_win && !build_with_chromium) { + sources = [ + "win/core_audio_utility_win.cc", + "win/core_audio_utility_win.h", + ] + + deps = [ + ":audio_device_api", + ":audio_device_name", + "../../api/units:time_delta", + "../../rtc_base:checks", + "../../rtc_base:logging", + "../../rtc_base:macromagic", + "../../rtc_base:platform_thread_types", + "../../rtc_base:stringutils", + "../../rtc_base/win:windows_version", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings:strings" ] + + libs = [ "oleaut32.lib" ] + } +} + +# An ADM with a dedicated factory method which does not depend on the +# audio_device_impl target. The goal is to use this new structure and +# gradually phase out the old design. +# TODO(henrika): currently only supported on Windows. +rtc_source_set("audio_device_module_from_input_and_output") { + visibility = [ "*" ] + if (is_win && !build_with_chromium) { + sources = [ + "include/audio_device_factory.cc", + "include/audio_device_factory.h", + ] + sources += [ + "win/audio_device_module_win.cc", + "win/audio_device_module_win.h", + "win/core_audio_base_win.cc", + "win/core_audio_base_win.h", + "win/core_audio_input_win.cc", + "win/core_audio_input_win.h", + "win/core_audio_output_win.cc", + "win/core_audio_output_win.h", + ] + + deps = [ + ":audio_device_api", + ":audio_device_buffer", + ":windows_core_audio_utility", + "../../api:make_ref_counted", + "../../api:scoped_refptr", + "../../api:sequence_checker", + "../../api/task_queue", + "../../rtc_base:checks", + "../../rtc_base:logging", + "../../rtc_base:macromagic", + "../../rtc_base:platform_thread", + "../../rtc_base:safe_conversions", + "../../rtc_base:stringutils", + "../../rtc_base:timeutils", + "../../rtc_base/win:scoped_com_initializer", + "../../rtc_base/win:windows_version", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings:strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + } +} + +if (!build_with_chromium) { + rtc_library("test_audio_device_module") { + visibility = [ "*" ] + sources = [ + "include/test_audio_device.cc", + "include/test_audio_device.h", + "test_audio_device_impl.cc", + "test_audio_device_impl.h", + ] + deps = [ + ":audio_device_api", + ":audio_device_buffer", + ":audio_device_default", + ":audio_device_generic", + ":audio_device_impl", + "../../api:array_view", + "../../api:make_ref_counted", + "../../api:scoped_refptr", + "../../api/task_queue", + "../../api/units:time_delta", + "../../common_audio", + "../../rtc_base:buffer", + "../../rtc_base:checks", + "../../rtc_base:logging", + "../../rtc_base:macromagic", + "../../rtc_base:platform_thread", + "../../rtc_base:random", + "../../rtc_base:rtc_event", + "../../rtc_base:rtc_task_queue", + "../../rtc_base:safe_conversions", + "../../rtc_base:timeutils", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/task_utils:repeating_task", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + } +} + +rtc_library("audio_device_dummy") { + visibility = [ "*" ] + sources = [ + "dummy/audio_device_dummy.cc", + "dummy/audio_device_dummy.h", + ] + deps = [ + ":audio_device_api", + ":audio_device_buffer", + ":audio_device_generic", + ] +} + +if (!build_with_chromium) { + rtc_library("file_audio_device") { + visibility = [ "*" ] + sources = [ + "dummy/file_audio_device.cc", + "dummy/file_audio_device.h", + "dummy/file_audio_device_factory.cc", + "dummy/file_audio_device_factory.h", + ] + defines = [] + if (rtc_use_dummy_audio_file_devices) { + defines += [ "WEBRTC_DUMMY_FILE_DEVICES" ] + } + deps = [ + ":audio_device_generic", + "../../rtc_base:checks", + "../../rtc_base:logging", + "../../rtc_base:platform_thread", + "../../rtc_base:stringutils", + "../../rtc_base:timeutils", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:file_wrapper", + "../../system_wrappers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] + } +} + +# Contains default implementations of webrtc::AudioDeviceModule for Windows, +# Linux, Mac, iOS and Android. +rtc_library("audio_device_impl") { +if (!build_with_mozilla) { # See Bug 1820869. + visibility = [ "*" ] + deps = [ + ":audio_device_api", + ":audio_device_buffer", + ":audio_device_config", + ":audio_device_default", + ":audio_device_dummy", + ":audio_device_generic", + "../../api:array_view", + "../../api:make_ref_counted", + "../../api:refcountedbase", + "../../api:scoped_refptr", + "../../api:sequence_checker", + "../../api/task_queue", + "../../api/units:time_delta", + "../../common_audio", + "../../common_audio:common_audio_c", + "../../rtc_base:buffer", + "../../rtc_base:checks", + "../../rtc_base:logging", + "../../rtc_base:macromagic", + "../../rtc_base:platform_thread", + "../../rtc_base:random", + "../../rtc_base:rtc_event", + "../../rtc_base:rtc_task_queue", + "../../rtc_base:safe_conversions", + "../../rtc_base:stringutils", + "../../rtc_base:timeutils", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:arch", + "../../rtc_base/system:file_wrapper", + "../../rtc_base/task_utils:repeating_task", + "../../system_wrappers", + "../../system_wrappers:field_trial", + "../../system_wrappers:metrics", + "../utility", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/base:core_headers", + "//third_party/abseil-cpp/absl/strings:strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + if (rtc_include_internal_audio_device && is_ios) { + deps += [ "../../sdk:audio_device" ] + } + + sources = [ "include/fake_audio_device.h" ] + + if (build_with_mozilla) { + sources -= [ + "include/test_audio_device.cc", + "include/test_audio_device.h", + ] + } + + defines = [] + cflags = [] + if (rtc_audio_device_plays_sinus_tone) { + defines += [ "AUDIO_DEVICE_PLAYS_SINUS_TONE" ] + } + if (rtc_enable_android_aaudio) { + defines += [ "WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO" ] + } + if (rtc_include_internal_audio_device) { + sources += [ + "audio_device_data_observer.cc", + "audio_device_impl.cc", + "audio_device_impl.h", + "include/audio_device_data_observer.h", + ] + if (is_android) { + deps += [ "../../sdk/android:native_api_audio_device_module" ] + + if (build_with_mozilla) { + include_dirs += [ + "/config/external/nspr", + "/nsprpub/lib/ds", + "/nsprpub/pr/include", + ] + } + } + if (rtc_use_dummy_audio_file_devices) { + defines += [ "WEBRTC_DUMMY_FILE_DEVICES" ] + } else { + if (is_linux || is_chromeos) { + sources += [ + "linux/alsasymboltable_linux.cc", + "linux/alsasymboltable_linux.h", + "linux/audio_device_alsa_linux.cc", + "linux/audio_device_alsa_linux.h", + "linux/audio_mixer_manager_alsa_linux.cc", + "linux/audio_mixer_manager_alsa_linux.h", + "linux/latebindingsymboltable_linux.cc", + "linux/latebindingsymboltable_linux.h", + ] + defines += [ "WEBRTC_ENABLE_LINUX_ALSA" ] + libs = [ "dl" ] + if (rtc_use_x11) { + libs += [ "X11" ] + defines += [ "WEBRTC_USE_X11" ] + } + if (rtc_include_pulse_audio) { + defines += [ "WEBRTC_ENABLE_LINUX_PULSE" ] + } + sources += [ + "linux/audio_device_pulse_linux.cc", + "linux/audio_device_pulse_linux.h", + "linux/audio_mixer_manager_pulse_linux.cc", + "linux/audio_mixer_manager_pulse_linux.h", + "linux/pulseaudiosymboltable_linux.cc", + "linux/pulseaudiosymboltable_linux.h", + ] + } + if (is_mac) { + sources += [ + "mac/audio_device_mac.cc", + "mac/audio_device_mac.h", + "mac/audio_mixer_manager_mac.cc", + "mac/audio_mixer_manager_mac.h", + ] + deps += [ + ":audio_device_impl_frameworks", + "../third_party/portaudio:mac_portaudio", + ] + } + if (is_win) { + sources += [ + "win/audio_device_core_win.cc", + "win/audio_device_core_win.h", + ] + libs = [ + # Required for the built-in WASAPI AEC. + "dmoguids.lib", + "wmcodecdspuuid.lib", + "amstrmid.lib", + "msdmo.lib", + "oleaut32.lib", + ] + deps += [ + "../../rtc_base:win32", + "../../rtc_base/win:scoped_com_initializer", + ] + } + configs += [ ":audio_device_warnings_config" ] + } + } else { + defines = [ "WEBRTC_DUMMY_AUDIO_BUILD" ] + } + + if (!build_with_chromium) { + deps += [ ":file_audio_device" ] + + # TODO(titovartem): remove after downstream is fixed + sources += [ "dummy/file_audio_device_factory.h" ] + } +} +} + +if (is_mac) { + rtc_source_set("audio_device_impl_frameworks") { + visibility = [ ":*" ] + frameworks = [ + # Needed for CoreGraphics: + "ApplicationServices.framework", + + "AudioToolbox.framework", + "CoreAudio.framework", + + # Needed for CGEventSourceKeyState in audio_device_mac.cc: + "CoreGraphics.framework", + ] + } +} + +if (!build_with_mozilla) { # See Bug 1820869. +rtc_source_set("mock_audio_device") { + visibility = [ "*" ] + testonly = true + sources = [ + "include/mock_audio_device.h", + "include/mock_audio_transport.h", + "mock_audio_device_buffer.h", + ] + deps = [ + ":audio_device", + ":audio_device_buffer", + ":audio_device_impl", + "../../api:make_ref_counted", + "../../test:test_support", + ] +} +} + +# See Bug 1820869 for !build_with_mozilla. +if (rtc_include_tests && !build_with_chromium && !build_with_mozilla) { + rtc_library("audio_device_unittests") { + testonly = true + + sources = [ + "fine_audio_buffer_unittest.cc", + "include/test_audio_device_unittest.cc", + "test_audio_device_impl_test.cc", + ] + deps = [ + ":audio_device", + ":audio_device_buffer", + ":audio_device_generic", + ":audio_device_impl", + ":mock_audio_device", + ":test_audio_device_module", + "../../api:array_view", + "../../api:scoped_refptr", + "../../api:sequence_checker", + "../../api/task_queue", + "../../api/task_queue:default_task_queue_factory", + "../../api/units:time_delta", + "../../api/units:timestamp", + "../../common_audio", + "../../rtc_base:buffer", + "../../rtc_base:checks", + "../../rtc_base:ignore_wundef", + "../../rtc_base:logging", + "../../rtc_base:macromagic", + "../../rtc_base:race_checker", + "../../rtc_base:rtc_event", + "../../rtc_base:safe_conversions", + "../../rtc_base:timeutils", + "../../rtc_base/synchronization:mutex", + "../../system_wrappers", + "../../test:fileutils", + "../../test:test_support", + "../../test/time_controller", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + if (is_linux || is_chromeos || is_mac || is_win) { + sources += [ "audio_device_unittest.cc" ] + } + if (is_win) { + sources += [ "win/core_audio_utility_win_unittest.cc" ] + deps += [ + ":audio_device_module_from_input_and_output", + ":windows_core_audio_utility", + "../../rtc_base/win:scoped_com_initializer", + "../../rtc_base/win:windows_version", + ] + } + if (is_android) { + deps += [ + "../../sdk/android:internal_jni", + "../../sdk/android:libjingle_peerconnection_java", + "../../sdk/android:native_api_jni", + "../../sdk/android:native_test_jni_onload", + "../utility", + ] + } + if (!rtc_include_internal_audio_device) { + defines = [ "WEBRTC_DUMMY_AUDIO_BUILD" ] + } + } +} diff --git a/third_party/libwebrtc/modules/audio_device/DEPS b/third_party/libwebrtc/modules/audio_device/DEPS new file mode 100644 index 0000000000..b0571deb0e --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/DEPS @@ -0,0 +1,14 @@ +include_rules = [ + "+common_audio", + "+system_wrappers", +] + +specific_include_rules = { + "ensure_initialized\.cc": [ + "+sdk/android", + ], + "audio_device_impl\.cc": [ + "+sdk/objc", + "+sdk/android", + ], +} diff --git a/third_party/libwebrtc/modules/audio_device/OWNERS b/third_party/libwebrtc/modules/audio_device/OWNERS new file mode 100644 index 0000000000..22d03d552b --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/OWNERS @@ -0,0 +1,2 @@ +henrika@webrtc.org +tkchin@webrtc.org diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc new file mode 100644 index 0000000000..f1bd8e823b --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.cc @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/audio_device_buffer.h" + +#include + +#include +#include +#include + +#include "common_audio/signal_processing/include/signal_processing_library.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { + +static const char kTimerQueueName[] = "AudioDeviceBufferTimer"; + +// Time between two sucessive calls to LogStats(). +static const size_t kTimerIntervalInSeconds = 10; +static const size_t kTimerIntervalInMilliseconds = + kTimerIntervalInSeconds * rtc::kNumMillisecsPerSec; +// Min time required to qualify an audio session as a "call". If playout or +// recording has been active for less than this time we will not store any +// logs or UMA stats but instead consider the call as too short. +static const size_t kMinValidCallTimeTimeInSeconds = 10; +static const size_t kMinValidCallTimeTimeInMilliseconds = + kMinValidCallTimeTimeInSeconds * rtc::kNumMillisecsPerSec; +#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE +static const double k2Pi = 6.28318530717959; +#endif + +AudioDeviceBuffer::AudioDeviceBuffer(TaskQueueFactory* task_queue_factory, + bool create_detached) + : task_queue_(task_queue_factory->CreateTaskQueue( + kTimerQueueName, + TaskQueueFactory::Priority::NORMAL)), + audio_transport_cb_(nullptr), + rec_sample_rate_(0), + play_sample_rate_(0), + rec_channels_(0), + play_channels_(0), + playing_(false), + recording_(false), + typing_status_(false), + play_delay_ms_(0), + rec_delay_ms_(0), + num_stat_reports_(0), + last_timer_task_time_(0), + rec_stat_count_(0), + play_stat_count_(0), + play_start_time_(0), + only_silence_recorded_(true), + log_stats_(false) { + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::ctor"; +#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE + phase_ = 0.0; + RTC_LOG(LS_WARNING) << "AUDIO_DEVICE_PLAYS_SINUS_TONE is defined!"; +#endif + if (create_detached) { + main_thread_checker_.Detach(); + } +} + +AudioDeviceBuffer::~AudioDeviceBuffer() { + RTC_DCHECK_RUN_ON(&main_thread_checker_); + RTC_DCHECK(!playing_); + RTC_DCHECK(!recording_); + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::~dtor"; +} + +int32_t AudioDeviceBuffer::RegisterAudioCallback( + AudioTransport* audio_callback) { + RTC_DCHECK_RUN_ON(&main_thread_checker_); + RTC_DLOG(LS_INFO) << __FUNCTION__; + if (playing_ || recording_) { + RTC_LOG(LS_ERROR) << "Failed to set audio transport since media was active"; + return -1; + } + audio_transport_cb_ = audio_callback; + return 0; +} + +void AudioDeviceBuffer::StartPlayout() { + RTC_DCHECK_RUN_ON(&main_thread_checker_); + // TODO(henrika): allow for usage of DCHECK(!playing_) here instead. Today the + // ADM allows calling Start(), Start() by ignoring the second call but it + // makes more sense to only allow one call. + if (playing_) { + return; + } + RTC_DLOG(LS_INFO) << __FUNCTION__; + // Clear members tracking playout stats and do it on the task queue. + task_queue_.PostTask([this] { ResetPlayStats(); }); + // Start a periodic timer based on task queue if not already done by the + // recording side. + if (!recording_) { + StartPeriodicLogging(); + } + const int64_t now_time = rtc::TimeMillis(); + // Clear members that are only touched on the main (creating) thread. + play_start_time_ = now_time; + playing_ = true; +} + +void AudioDeviceBuffer::StartRecording() { + RTC_DCHECK_RUN_ON(&main_thread_checker_); + if (recording_) { + return; + } + RTC_DLOG(LS_INFO) << __FUNCTION__; + // Clear members tracking recording stats and do it on the task queue. + task_queue_.PostTask([this] { ResetRecStats(); }); + // Start a periodic timer based on task queue if not already done by the + // playout side. + if (!playing_) { + StartPeriodicLogging(); + } + // Clear members that will be touched on the main (creating) thread. + rec_start_time_ = rtc::TimeMillis(); + recording_ = true; + // And finally a member which can be modified on the native audio thread. + // It is safe to do so since we know by design that the owning ADM has not + // yet started the native audio recording. + only_silence_recorded_ = true; +} + +void AudioDeviceBuffer::StopPlayout() { + RTC_DCHECK_RUN_ON(&main_thread_checker_); + if (!playing_) { + return; + } + RTC_DLOG(LS_INFO) << __FUNCTION__; + playing_ = false; + // Stop periodic logging if no more media is active. + if (!recording_) { + StopPeriodicLogging(); + } + RTC_LOG(LS_INFO) << "total playout time: " + << rtc::TimeSince(play_start_time_); +} + +void AudioDeviceBuffer::StopRecording() { + RTC_DCHECK_RUN_ON(&main_thread_checker_); + if (!recording_) { + return; + } + RTC_DLOG(LS_INFO) << __FUNCTION__; + recording_ = false; + // Stop periodic logging if no more media is active. + if (!playing_) { + StopPeriodicLogging(); + } + // Add UMA histogram to keep track of the case when only zeros have been + // recorded. Measurements (max of absolute level) are taken twice per second, + // which means that if e.g 10 seconds of audio has been recorded, a total of + // 20 level estimates must all be identical to zero to trigger the histogram. + // `only_silence_recorded_` can only be cleared on the native audio thread + // that drives audio capture but we know by design that the audio has stopped + // when this method is called, hence there should not be aby conflicts. Also, + // the fact that `only_silence_recorded_` can be affected during the complete + // call makes chances of conflicts with potentially one last callback very + // small. + const size_t time_since_start = rtc::TimeSince(rec_start_time_); + if (time_since_start > kMinValidCallTimeTimeInMilliseconds) { + const int only_zeros = static_cast(only_silence_recorded_); + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.RecordedOnlyZeros", only_zeros); + RTC_LOG(LS_INFO) << "HISTOGRAM(WebRTC.Audio.RecordedOnlyZeros): " + << only_zeros; + } + RTC_LOG(LS_INFO) << "total recording time: " << time_since_start; +} + +int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) { + RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << fsHz << ")"; + rec_sample_rate_ = fsHz; + return 0; +} + +int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) { + RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << fsHz << ")"; + play_sample_rate_ = fsHz; + return 0; +} + +uint32_t AudioDeviceBuffer::RecordingSampleRate() const { + return rec_sample_rate_; +} + +uint32_t AudioDeviceBuffer::PlayoutSampleRate() const { + return play_sample_rate_; +} + +int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) { + RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")"; + rec_channels_ = channels; + return 0; +} + +int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) { + RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")"; + play_channels_ = channels; + return 0; +} + +size_t AudioDeviceBuffer::RecordingChannels() const { + return rec_channels_; +} + +size_t AudioDeviceBuffer::PlayoutChannels() const { + return play_channels_; +} + +int32_t AudioDeviceBuffer::SetTypingStatus(bool typing_status) { + typing_status_ = typing_status; + return 0; +} + +void AudioDeviceBuffer::SetVQEData(int play_delay_ms, int rec_delay_ms) { + play_delay_ms_ = play_delay_ms; + rec_delay_ms_ = rec_delay_ms; +} + +int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer, + size_t samples_per_channel) { + return SetRecordedBuffer(audio_buffer, samples_per_channel, absl::nullopt); +} + +int32_t AudioDeviceBuffer::SetRecordedBuffer( + const void* audio_buffer, + size_t samples_per_channel, + absl::optional capture_timestamp_ns) { + // Copy the complete input buffer to the local buffer. + const size_t old_size = rec_buffer_.size(); + rec_buffer_.SetData(static_cast(audio_buffer), + rec_channels_ * samples_per_channel); + // Keep track of the size of the recording buffer. Only updated when the + // size changes, which is a rare event. + if (old_size != rec_buffer_.size()) { + RTC_LOG(LS_INFO) << "Size of recording buffer: " << rec_buffer_.size(); + } + + if (capture_timestamp_ns) { + int64_t align_offsync_estimation_time = rtc::TimeMicros(); + if (align_offsync_estimation_time - + rtc::TimestampAligner::kMinFrameIntervalUs > + align_offsync_estimation_time_) { + align_offsync_estimation_time_ = align_offsync_estimation_time; + capture_timestamp_ns_ = + rtc::kNumNanosecsPerMicrosec * + timestamp_aligner_.TranslateTimestamp( + *capture_timestamp_ns / rtc::kNumNanosecsPerMicrosec, + align_offsync_estimation_time); + } else { + // The Timestamp aligner is designed to prevent timestamps that are too + // similar, and produces warnings if it is called to often. We do not care + // about that here, so we do this workaround. If we where to call the + // aligner within a millisecond, we instead call this, that do not update + // the clock offset estimation. This get us timestamps without generating + // warnings, but could generate two timestamps within a millisecond. + capture_timestamp_ns_ = + rtc::kNumNanosecsPerMicrosec * + timestamp_aligner_.TranslateTimestamp(*capture_timestamp_ns / + rtc::kNumNanosecsPerMicrosec); + } + } + // Derive a new level value twice per second and check if it is non-zero. + int16_t max_abs = 0; + RTC_DCHECK_LT(rec_stat_count_, 50); + if (++rec_stat_count_ >= 50) { + // Returns the largest absolute value in a signed 16-bit vector. + max_abs = WebRtcSpl_MaxAbsValueW16(rec_buffer_.data(), rec_buffer_.size()); + rec_stat_count_ = 0; + // Set `only_silence_recorded_` to false as soon as at least one detection + // of a non-zero audio packet is found. It can only be restored to true + // again by restarting the call. + if (max_abs > 0) { + only_silence_recorded_ = false; + } + } + // Update recording stats which is used as base for periodic logging of the + // audio input state. + UpdateRecStats(max_abs, samples_per_channel); + return 0; +} + +int32_t AudioDeviceBuffer::DeliverRecordedData() { + if (!audio_transport_cb_) { + RTC_LOG(LS_WARNING) << "Invalid audio transport"; + return 0; + } + const size_t frames = rec_buffer_.size() / rec_channels_; + const size_t bytes_per_frame = rec_channels_ * sizeof(int16_t); + uint32_t new_mic_level_dummy = 0; + uint32_t total_delay_ms = play_delay_ms_ + rec_delay_ms_; + int32_t res = audio_transport_cb_->RecordedDataIsAvailable( + rec_buffer_.data(), frames, bytes_per_frame, rec_channels_, + rec_sample_rate_, total_delay_ms, 0, 0, typing_status_, + new_mic_level_dummy, capture_timestamp_ns_); + if (res == -1) { + RTC_LOG(LS_ERROR) << "RecordedDataIsAvailable() failed"; + } + return 0; +} + +int32_t AudioDeviceBuffer::RequestPlayoutData(size_t samples_per_channel) { + TRACE_EVENT1("webrtc", "AudioDeviceBuffer::RequestPlayoutData", + "samples_per_channel", samples_per_channel); + + // The consumer can change the requested size on the fly and we therefore + // resize the buffer accordingly. Also takes place at the first call to this + // method. + const size_t total_samples = play_channels_ * samples_per_channel; + if (play_buffer_.size() != total_samples) { + play_buffer_.SetSize(total_samples); + RTC_LOG(LS_INFO) << "Size of playout buffer: " << play_buffer_.size(); + } + + size_t num_samples_out(0); + // It is currently supported to start playout without a valid audio + // transport object. Leads to warning and silence. + if (!audio_transport_cb_) { + RTC_LOG(LS_WARNING) << "Invalid audio transport"; + return 0; + } + + // Retrieve new 16-bit PCM audio data using the audio transport instance. + int64_t elapsed_time_ms = -1; + int64_t ntp_time_ms = -1; + const size_t bytes_per_frame = play_channels_ * sizeof(int16_t); + uint32_t res = audio_transport_cb_->NeedMorePlayData( + samples_per_channel, bytes_per_frame, play_channels_, play_sample_rate_, + play_buffer_.data(), num_samples_out, &elapsed_time_ms, &ntp_time_ms); + if (res != 0) { + RTC_LOG(LS_ERROR) << "NeedMorePlayData() failed"; + } + + // Derive a new level value twice per second. + int16_t max_abs = 0; + RTC_DCHECK_LT(play_stat_count_, 50); + if (++play_stat_count_ >= 50) { + // Returns the largest absolute value in a signed 16-bit vector. + max_abs = + WebRtcSpl_MaxAbsValueW16(play_buffer_.data(), play_buffer_.size()); + play_stat_count_ = 0; + } + // Update playout stats which is used as base for periodic logging of the + // audio output state. + UpdatePlayStats(max_abs, num_samples_out / play_channels_); + return static_cast(num_samples_out / play_channels_); +} + +int32_t AudioDeviceBuffer::GetPlayoutData(void* audio_buffer) { + RTC_DCHECK_GT(play_buffer_.size(), 0); +#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE + const double phase_increment = + k2Pi * 440.0 / static_cast(play_sample_rate_); + int16_t* destination_r = reinterpret_cast(audio_buffer); + if (play_channels_ == 1) { + for (size_t i = 0; i < play_buffer_.size(); ++i) { + destination_r[i] = static_cast((sin(phase_) * (1 << 14))); + phase_ += phase_increment; + } + } else if (play_channels_ == 2) { + for (size_t i = 0; i < play_buffer_.size() / 2; ++i) { + destination_r[2 * i] = destination_r[2 * i + 1] = + static_cast((sin(phase_) * (1 << 14))); + phase_ += phase_increment; + } + } +#else + memcpy(audio_buffer, play_buffer_.data(), + play_buffer_.size() * sizeof(int16_t)); +#endif + // Return samples per channel or number of frames. + return static_cast(play_buffer_.size() / play_channels_); +} + +void AudioDeviceBuffer::StartPeriodicLogging() { + task_queue_.PostTask([this] { LogStats(AudioDeviceBuffer::LOG_START); }); +} + +void AudioDeviceBuffer::StopPeriodicLogging() { + task_queue_.PostTask([this] { LogStats(AudioDeviceBuffer::LOG_STOP); }); +} + +void AudioDeviceBuffer::LogStats(LogState state) { + RTC_DCHECK_RUN_ON(&task_queue_); + int64_t now_time = rtc::TimeMillis(); + + if (state == AudioDeviceBuffer::LOG_START) { + // Reset counters at start. We will not add any logging in this state but + // the timer will started by posting a new (delayed) task. + num_stat_reports_ = 0; + last_timer_task_time_ = now_time; + log_stats_ = true; + } else if (state == AudioDeviceBuffer::LOG_STOP) { + // Stop logging and posting new tasks. + log_stats_ = false; + } else if (state == AudioDeviceBuffer::LOG_ACTIVE) { + // Keep logging unless logging was disabled while task was posted. + } + + // Avoid adding more logs since we are in STOP mode. + if (!log_stats_) { + return; + } + + int64_t next_callback_time = now_time + kTimerIntervalInMilliseconds; + int64_t time_since_last = rtc::TimeDiff(now_time, last_timer_task_time_); + last_timer_task_time_ = now_time; + + Stats stats; + { + MutexLock lock(&lock_); + stats = stats_; + stats_.max_rec_level = 0; + stats_.max_play_level = 0; + } + + // Cache current sample rate from atomic members. + const uint32_t rec_sample_rate = rec_sample_rate_; + const uint32_t play_sample_rate = play_sample_rate_; + + // Log the latest statistics but skip the first two rounds just after state + // was set to LOG_START to ensure that we have at least one full stable + // 10-second interval for sample-rate estimation. Hence, first printed log + // will be after ~20 seconds. + if (++num_stat_reports_ > 2 && + static_cast(time_since_last) > kTimerIntervalInMilliseconds / 2) { + uint32_t diff_samples = stats.rec_samples - last_stats_.rec_samples; + float rate = diff_samples / (static_cast(time_since_last) / 1000.0); + uint32_t abs_diff_rate_in_percent = 0; + if (rec_sample_rate > 0 && rate > 0) { + abs_diff_rate_in_percent = static_cast( + 0.5f + + ((100.0f * std::abs(rate - rec_sample_rate)) / rec_sample_rate)); + RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.RecordSampleRateOffsetInPercent", + abs_diff_rate_in_percent); + RTC_LOG(LS_INFO) << "[REC : " << time_since_last << "msec, " + << rec_sample_rate / 1000 << "kHz] callbacks: " + << stats.rec_callbacks - last_stats_.rec_callbacks + << ", " + "samples: " + << diff_samples + << ", " + "rate: " + << static_cast(rate + 0.5) + << ", " + "rate diff: " + << abs_diff_rate_in_percent + << "%, " + "level: " + << stats.max_rec_level; + } + + diff_samples = stats.play_samples - last_stats_.play_samples; + rate = diff_samples / (static_cast(time_since_last) / 1000.0); + abs_diff_rate_in_percent = 0; + if (play_sample_rate > 0 && rate > 0) { + abs_diff_rate_in_percent = static_cast( + 0.5f + + ((100.0f * std::abs(rate - play_sample_rate)) / play_sample_rate)); + RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.PlayoutSampleRateOffsetInPercent", + abs_diff_rate_in_percent); + RTC_LOG(LS_INFO) << "[PLAY: " << time_since_last << "msec, " + << play_sample_rate / 1000 << "kHz] callbacks: " + << stats.play_callbacks - last_stats_.play_callbacks + << ", " + "samples: " + << diff_samples + << ", " + "rate: " + << static_cast(rate + 0.5) + << ", " + "rate diff: " + << abs_diff_rate_in_percent + << "%, " + "level: " + << stats.max_play_level; + } + } + last_stats_ = stats; + + int64_t time_to_wait_ms = next_callback_time - rtc::TimeMillis(); + RTC_DCHECK_GT(time_to_wait_ms, 0) << "Invalid timer interval"; + + // Keep posting new (delayed) tasks until state is changed to kLogStop. + task_queue_.PostDelayedTask( + [this] { AudioDeviceBuffer::LogStats(AudioDeviceBuffer::LOG_ACTIVE); }, + TimeDelta::Millis(time_to_wait_ms)); +} + +void AudioDeviceBuffer::ResetRecStats() { + RTC_DCHECK_RUN_ON(&task_queue_); + last_stats_.ResetRecStats(); + MutexLock lock(&lock_); + stats_.ResetRecStats(); +} + +void AudioDeviceBuffer::ResetPlayStats() { + RTC_DCHECK_RUN_ON(&task_queue_); + last_stats_.ResetPlayStats(); + MutexLock lock(&lock_); + stats_.ResetPlayStats(); +} + +void AudioDeviceBuffer::UpdateRecStats(int16_t max_abs, + size_t samples_per_channel) { + MutexLock lock(&lock_); + ++stats_.rec_callbacks; + stats_.rec_samples += samples_per_channel; + if (max_abs > stats_.max_rec_level) { + stats_.max_rec_level = max_abs; + } +} + +void AudioDeviceBuffer::UpdatePlayStats(int16_t max_abs, + size_t samples_per_channel) { + MutexLock lock(&lock_); + ++stats_.play_callbacks; + stats_.play_samples += samples_per_channel; + if (max_abs > stats_.max_play_level) { + stats_.max_play_level = max_abs; + } +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_buffer.h b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.h new file mode 100644 index 0000000000..1260a24c61 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_buffer.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_ +#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_ + +#include +#include + +#include + +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "rtc_base/buffer.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_queue.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/timestamp_aligner.h" + +namespace webrtc { + +// Delta times between two successive playout callbacks are limited to this +// value before added to an internal array. +const size_t kMaxDeltaTimeInMs = 500; +// TODO(henrika): remove when no longer used by external client. +const size_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz + +class AudioDeviceBuffer { + public: + enum LogState { + LOG_START = 0, + LOG_STOP, + LOG_ACTIVE, + }; + + struct Stats { + void ResetRecStats() { + rec_callbacks = 0; + rec_samples = 0; + max_rec_level = 0; + } + + void ResetPlayStats() { + play_callbacks = 0; + play_samples = 0; + max_play_level = 0; + } + + // Total number of recording callbacks where the source provides 10ms audio + // data each time. + uint64_t rec_callbacks = 0; + + // Total number of playback callbacks where the sink asks for 10ms audio + // data each time. + uint64_t play_callbacks = 0; + + // Total number of recorded audio samples. + uint64_t rec_samples = 0; + + // Total number of played audio samples. + uint64_t play_samples = 0; + + // Contains max level (max(abs(x))) of recorded audio packets over the last + // 10 seconds where a new measurement is done twice per second. The level + // is reset to zero at each call to LogStats(). + int16_t max_rec_level = 0; + + // Contains max level of recorded audio packets over the last 10 seconds + // where a new measurement is done twice per second. + int16_t max_play_level = 0; + }; + + // If `create_detached` is true, the created buffer can be used on another + // thread compared to the one on which it was created. It's useful for + // testing. + explicit AudioDeviceBuffer(TaskQueueFactory* task_queue_factory, + bool create_detached = false); + virtual ~AudioDeviceBuffer(); + + int32_t RegisterAudioCallback(AudioTransport* audio_callback); + + void StartPlayout(); + void StartRecording(); + void StopPlayout(); + void StopRecording(); + + int32_t SetRecordingSampleRate(uint32_t fsHz); + int32_t SetPlayoutSampleRate(uint32_t fsHz); + uint32_t RecordingSampleRate() const; + uint32_t PlayoutSampleRate() const; + + int32_t SetRecordingChannels(size_t channels); + int32_t SetPlayoutChannels(size_t channels); + size_t RecordingChannels() const; + size_t PlayoutChannels() const; + + // TODO(bugs.webrtc.org/13621) Deprecate this function + virtual int32_t SetRecordedBuffer(const void* audio_buffer, + size_t samples_per_channel); + + virtual int32_t SetRecordedBuffer( + const void* audio_buffer, + size_t samples_per_channel, + absl::optional capture_timestamp_ns); + virtual void SetVQEData(int play_delay_ms, int rec_delay_ms); + virtual int32_t DeliverRecordedData(); + uint32_t NewMicLevel() const; + + virtual int32_t RequestPlayoutData(size_t samples_per_channel); + virtual int32_t GetPlayoutData(void* audio_buffer); + + int32_t SetTypingStatus(bool typing_status); + + private: + // Starts/stops periodic logging of audio stats. + void StartPeriodicLogging(); + void StopPeriodicLogging(); + + // Called periodically on the internal thread created by the TaskQueue. + // Updates some stats but dooes it on the task queue to ensure that access of + // members is serialized hence avoiding usage of locks. + // state = LOG_START => members are initialized and the timer starts. + // state = LOG_STOP => no logs are printed and the timer stops. + // state = LOG_ACTIVE => logs are printed and the timer is kept alive. + void LogStats(LogState state); + + // Updates counters in each play/record callback. These counters are later + // (periodically) read by LogStats() using a lock. + void UpdateRecStats(int16_t max_abs, size_t samples_per_channel); + void UpdatePlayStats(int16_t max_abs, size_t samples_per_channel); + + // Clears all members tracking stats for recording and playout. + // These methods both run on the task queue. + void ResetRecStats(); + void ResetPlayStats(); + + // This object lives on the main (creating) thread and most methods are + // called on that same thread. When audio has started some methods will be + // called on either a native audio thread for playout or a native thread for + // recording. Some members are not annotated since they are "protected by + // design" and adding e.g. a race checker can cause failures for very few + // edge cases and it is IMHO not worth the risk to use them in this class. + // TODO(henrika): see if it is possible to refactor and annotate all members. + + // Main thread on which this object is created. + SequenceChecker main_thread_checker_; + + Mutex lock_; + + // Task queue used to invoke LogStats() periodically. Tasks are executed on a + // worker thread but it does not necessarily have to be the same thread for + // each task. + rtc::TaskQueue task_queue_; + + // Raw pointer to AudioTransport instance. Supplied to RegisterAudioCallback() + // and it must outlive this object. It is not possible to change this member + // while any media is active. It is possible to start media without calling + // RegisterAudioCallback() but that will lead to ignored audio callbacks in + // both directions where native audio will be active but no audio samples will + // be transported. + AudioTransport* audio_transport_cb_; + + // Sample rate in Hertz. Accessed atomically. + std::atomic rec_sample_rate_; + std::atomic play_sample_rate_; + + // Number of audio channels. Accessed atomically. + std::atomic rec_channels_; + std::atomic play_channels_; + + // Keeps track of if playout/recording are active or not. A combination + // of these states are used to determine when to start and stop the timer. + // Only used on the creating thread and not used to control any media flow. + bool playing_ RTC_GUARDED_BY(main_thread_checker_); + bool recording_ RTC_GUARDED_BY(main_thread_checker_); + + // Buffer used for audio samples to be played out. Size can be changed + // dynamically. The 16-bit samples are interleaved, hence the size is + // proportional to the number of channels. + rtc::BufferT play_buffer_; + + // Byte buffer used for recorded audio samples. Size can be changed + // dynamically. + rtc::BufferT rec_buffer_; + + // Contains true of a key-press has been detected. + bool typing_status_; + + // Delay values used by the AEC. + int play_delay_ms_; + int rec_delay_ms_; + + // Capture timestamp. + absl::optional capture_timestamp_ns_; + // The last time the Timestamp Aligner was used to estimate clock offset + // between system clock and capture time from audio. + // This is used to prevent estimating the clock offset too often. + absl::optional align_offsync_estimation_time_; + + // Counts number of times LogStats() has been called. + size_t num_stat_reports_ RTC_GUARDED_BY(task_queue_); + + // Time stamp of last timer task (drives logging). + int64_t last_timer_task_time_ RTC_GUARDED_BY(task_queue_); + + // Counts number of audio callbacks modulo 50 to create a signal when + // a new storage of audio stats shall be done. + int16_t rec_stat_count_; + int16_t play_stat_count_; + + // Time stamps of when playout and recording starts. + int64_t play_start_time_ RTC_GUARDED_BY(main_thread_checker_); + int64_t rec_start_time_ RTC_GUARDED_BY(main_thread_checker_); + + // Contains counters for playout and recording statistics. + Stats stats_ RTC_GUARDED_BY(lock_); + + // Stores current stats at each timer task. Used to calculate differences + // between two successive timer events. + Stats last_stats_ RTC_GUARDED_BY(task_queue_); + + // Set to true at construction and modified to false as soon as one audio- + // level estimate larger than zero is detected. + bool only_silence_recorded_; + + // Set to true when logging of audio stats is enabled for the first time in + // StartPeriodicLogging() and set to false by StopPeriodicLogging(). + // Setting this member to false prevents (possiby invalid) log messages from + // being printed in the LogStats() task. + bool log_stats_ RTC_GUARDED_BY(task_queue_); + + // Used for converting capture timestaps (received from AudioRecordThread + // via AudioRecordJni::DataIsRecorded) to RTC clock. + rtc::TimestampAligner timestamp_aligner_; + +// Should *never* be defined in production builds. Only used for testing. +// When defined, the output signal will be replaced by a sinus tone at 440Hz. +#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE + double phase_; +#endif +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H_ diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_config.h b/third_party/libwebrtc/modules/audio_device/audio_device_config.h new file mode 100644 index 0000000000..fa51747b67 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_config.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_ +#define AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_ + +// Enumerators +// +enum { GET_MIC_VOLUME_INTERVAL_MS = 1000 }; + +// Platform specifics +// +#if defined(_WIN32) +#if (_MSC_VER >= 1400) +#if !defined(WEBRTC_DUMMY_FILE_DEVICES) +// Windows Core Audio is the default audio layer in Windows. +// Only supported for VS 2005 and higher. +#define WEBRTC_WINDOWS_CORE_AUDIO_BUILD +#endif +#endif +#endif + +#endif // AUDIO_DEVICE_AUDIO_DEVICE_CONFIG_H_ diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc b/third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc new file mode 100644 index 0000000000..0524830327 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_data_observer.cc @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/include/audio_device_data_observer.h" + +#include "api/make_ref_counted.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { + +// A wrapper over AudioDeviceModule that registers itself as AudioTransport +// callback and redirects the PCM data to AudioDeviceDataObserver callback. +class ADMWrapper : public AudioDeviceModule, public AudioTransport { + public: + ADMWrapper(rtc::scoped_refptr impl, + AudioDeviceDataObserver* legacy_observer, + std::unique_ptr observer) + : impl_(impl), + legacy_observer_(legacy_observer), + observer_(std::move(observer)) { + is_valid_ = impl_.get() != nullptr; + } + ADMWrapper(AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory, + AudioDeviceDataObserver* legacy_observer, + std::unique_ptr observer) + : ADMWrapper(AudioDeviceModule::Create(audio_layer, task_queue_factory), + legacy_observer, + std::move(observer)) {} + ~ADMWrapper() override { + audio_transport_ = nullptr; + observer_ = nullptr; + } + + // Make sure we have a valid ADM before returning it to user. + bool IsValid() { return is_valid_; } + + int32_t RecordedDataIsAvailable(const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel) override { + return RecordedDataIsAvailable( + audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec, + total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel, + /*capture_timestamp_ns=*/absl::nullopt); + } + + // AudioTransport methods overrides. + int32_t RecordedDataIsAvailable( + const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel, + absl::optional capture_timestamp_ns) override { + int32_t res = 0; + // Capture PCM data of locally captured audio. + if (observer_) { + observer_->OnCaptureData(audioSamples, nSamples, nBytesPerSample, + nChannels, samples_per_sec); + } + + // Send to the actual audio transport. + if (audio_transport_) { + res = audio_transport_->RecordedDataIsAvailable( + audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec, + total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel, + capture_timestamp_ns); + } + + return res; + } + + int32_t NeedMorePlayData(const size_t nSamples, + const size_t nBytesPerSample, + const size_t nChannels, + const uint32_t samples_per_sec, + void* audioSamples, + size_t& nSamplesOut, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override { + int32_t res = 0; + // Set out parameters to safe values to be sure not to return corrupted + // data. + nSamplesOut = 0; + *elapsed_time_ms = -1; + *ntp_time_ms = -1; + // Request data from audio transport. + if (audio_transport_) { + res = audio_transport_->NeedMorePlayData( + nSamples, nBytesPerSample, nChannels, samples_per_sec, audioSamples, + nSamplesOut, elapsed_time_ms, ntp_time_ms); + } + + // Capture rendered data. + if (observer_) { + observer_->OnRenderData(audioSamples, nSamples, nBytesPerSample, + nChannels, samples_per_sec); + } + + return res; + } + + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override { + RTC_DCHECK_NOTREACHED(); + } + + // Override AudioDeviceModule's RegisterAudioCallback method to remember the + // actual audio transport (e.g.: voice engine). + int32_t RegisterAudioCallback(AudioTransport* audio_callback) override { + // Remember the audio callback to forward PCM data + audio_transport_ = audio_callback; + return 0; + } + + // AudioDeviceModule pass through method overrides. + int32_t ActiveAudioLayer(AudioLayer* audio_layer) const override { + return impl_->ActiveAudioLayer(audio_layer); + } + int32_t Init() override { + int res = impl_->Init(); + if (res != 0) { + return res; + } + // Register self as the audio transport callback for underlying ADM impl. + impl_->RegisterAudioCallback(this); + return res; + } + int32_t Terminate() override { return impl_->Terminate(); } + bool Initialized() const override { return impl_->Initialized(); } + int16_t PlayoutDevices() override { return impl_->PlayoutDevices(); } + int16_t RecordingDevices() override { return impl_->RecordingDevices(); } + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + return impl_->PlayoutDeviceName(index, name, guid); + } + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + return impl_->RecordingDeviceName(index, name, guid); + } + int32_t SetPlayoutDevice(uint16_t index) override { + return impl_->SetPlayoutDevice(index); + } + int32_t SetPlayoutDevice(WindowsDeviceType device) override { + return impl_->SetPlayoutDevice(device); + } + int32_t SetRecordingDevice(uint16_t index) override { + return impl_->SetRecordingDevice(index); + } + int32_t SetRecordingDevice(WindowsDeviceType device) override { + return impl_->SetRecordingDevice(device); + } + int32_t PlayoutIsAvailable(bool* available) override { + return impl_->PlayoutIsAvailable(available); + } + int32_t InitPlayout() override { return impl_->InitPlayout(); } + bool PlayoutIsInitialized() const override { + return impl_->PlayoutIsInitialized(); + } + int32_t RecordingIsAvailable(bool* available) override { + return impl_->RecordingIsAvailable(available); + } + int32_t InitRecording() override { return impl_->InitRecording(); } + bool RecordingIsInitialized() const override { + return impl_->RecordingIsInitialized(); + } + int32_t StartPlayout() override { return impl_->StartPlayout(); } + int32_t StopPlayout() override { return impl_->StopPlayout(); } + bool Playing() const override { return impl_->Playing(); } + int32_t StartRecording() override { return impl_->StartRecording(); } + int32_t StopRecording() override { return impl_->StopRecording(); } + bool Recording() const override { return impl_->Recording(); } + int32_t InitSpeaker() override { return impl_->InitSpeaker(); } + bool SpeakerIsInitialized() const override { + return impl_->SpeakerIsInitialized(); + } + int32_t InitMicrophone() override { return impl_->InitMicrophone(); } + bool MicrophoneIsInitialized() const override { + return impl_->MicrophoneIsInitialized(); + } + int32_t SpeakerVolumeIsAvailable(bool* available) override { + return impl_->SpeakerVolumeIsAvailable(available); + } + int32_t SetSpeakerVolume(uint32_t volume) override { + return impl_->SetSpeakerVolume(volume); + } + int32_t SpeakerVolume(uint32_t* volume) const override { + return impl_->SpeakerVolume(volume); + } + int32_t MaxSpeakerVolume(uint32_t* max_volume) const override { + return impl_->MaxSpeakerVolume(max_volume); + } + int32_t MinSpeakerVolume(uint32_t* min_volume) const override { + return impl_->MinSpeakerVolume(min_volume); + } + int32_t MicrophoneVolumeIsAvailable(bool* available) override { + return impl_->MicrophoneVolumeIsAvailable(available); + } + int32_t SetMicrophoneVolume(uint32_t volume) override { + return impl_->SetMicrophoneVolume(volume); + } + int32_t MicrophoneVolume(uint32_t* volume) const override { + return impl_->MicrophoneVolume(volume); + } + int32_t MaxMicrophoneVolume(uint32_t* max_volume) const override { + return impl_->MaxMicrophoneVolume(max_volume); + } + int32_t MinMicrophoneVolume(uint32_t* min_volume) const override { + return impl_->MinMicrophoneVolume(min_volume); + } + int32_t SpeakerMuteIsAvailable(bool* available) override { + return impl_->SpeakerMuteIsAvailable(available); + } + int32_t SetSpeakerMute(bool enable) override { + return impl_->SetSpeakerMute(enable); + } + int32_t SpeakerMute(bool* enabled) const override { + return impl_->SpeakerMute(enabled); + } + int32_t MicrophoneMuteIsAvailable(bool* available) override { + return impl_->MicrophoneMuteIsAvailable(available); + } + int32_t SetMicrophoneMute(bool enable) override { + return impl_->SetMicrophoneMute(enable); + } + int32_t MicrophoneMute(bool* enabled) const override { + return impl_->MicrophoneMute(enabled); + } + int32_t StereoPlayoutIsAvailable(bool* available) const override { + return impl_->StereoPlayoutIsAvailable(available); + } + int32_t SetStereoPlayout(bool enable) override { + return impl_->SetStereoPlayout(enable); + } + int32_t StereoPlayout(bool* enabled) const override { + return impl_->StereoPlayout(enabled); + } + int32_t StereoRecordingIsAvailable(bool* available) const override { + return impl_->StereoRecordingIsAvailable(available); + } + int32_t SetStereoRecording(bool enable) override { + return impl_->SetStereoRecording(enable); + } + int32_t StereoRecording(bool* enabled) const override { + return impl_->StereoRecording(enabled); + } + int32_t PlayoutDelay(uint16_t* delay_ms) const override { + return impl_->PlayoutDelay(delay_ms); + } + bool BuiltInAECIsAvailable() const override { + return impl_->BuiltInAECIsAvailable(); + } + bool BuiltInAGCIsAvailable() const override { + return impl_->BuiltInAGCIsAvailable(); + } + bool BuiltInNSIsAvailable() const override { + return impl_->BuiltInNSIsAvailable(); + } + int32_t EnableBuiltInAEC(bool enable) override { + return impl_->EnableBuiltInAEC(enable); + } + int32_t EnableBuiltInAGC(bool enable) override { + return impl_->EnableBuiltInAGC(enable); + } + int32_t EnableBuiltInNS(bool enable) override { + return impl_->EnableBuiltInNS(enable); + } + int32_t GetPlayoutUnderrunCount() const override { + return impl_->GetPlayoutUnderrunCount(); + } +// Only supported on iOS. +#if defined(WEBRTC_IOS) + int GetPlayoutAudioParameters(AudioParameters* params) const override { + return impl_->GetPlayoutAudioParameters(params); + } + int GetRecordAudioParameters(AudioParameters* params) const override { + return impl_->GetRecordAudioParameters(params); + } +#endif // WEBRTC_IOS + + protected: + rtc::scoped_refptr impl_; + AudioDeviceDataObserver* legacy_observer_ = nullptr; + std::unique_ptr observer_; + AudioTransport* audio_transport_ = nullptr; + bool is_valid_ = false; +}; + +} // namespace + +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + rtc::scoped_refptr impl, + std::unique_ptr observer) { + auto audio_device = rtc::make_ref_counted(impl, observer.get(), + std::move(observer)); + + if (!audio_device->IsValid()) { + return nullptr; + } + + return audio_device; +} + +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + rtc::scoped_refptr impl, + AudioDeviceDataObserver* legacy_observer) { + auto audio_device = + rtc::make_ref_counted(impl, legacy_observer, nullptr); + + if (!audio_device->IsValid()) { + return nullptr; + } + + return audio_device; +} + +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + AudioDeviceModule::AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory, + std::unique_ptr observer) { + auto audio_device = rtc::make_ref_counted( + audio_layer, task_queue_factory, observer.get(), std::move(observer)); + + if (!audio_device->IsValid()) { + return nullptr; + } + + return audio_device; +} + +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + AudioDeviceModule::AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory, + AudioDeviceDataObserver* legacy_observer) { + auto audio_device = rtc::make_ref_counted( + audio_layer, task_queue_factory, legacy_observer, nullptr); + + if (!audio_device->IsValid()) { + return nullptr; + } + + return audio_device; +} +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_generic.cc b/third_party/libwebrtc/modules/audio_device/audio_device_generic.cc new file mode 100644 index 0000000000..7b8cfd1734 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_generic.cc @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/audio_device_generic.h" + +#include "rtc_base/logging.h" + +namespace webrtc { + +bool AudioDeviceGeneric::BuiltInAECIsAvailable() const { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return false; +} + +int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable) { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return -1; +} + +bool AudioDeviceGeneric::BuiltInAGCIsAvailable() const { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return false; +} + +int32_t AudioDeviceGeneric::EnableBuiltInAGC(bool enable) { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return -1; +} + +bool AudioDeviceGeneric::BuiltInNSIsAvailable() const { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return false; +} + +int32_t AudioDeviceGeneric::EnableBuiltInNS(bool enable) { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return -1; +} + +int32_t AudioDeviceGeneric::GetPlayoutUnderrunCount() const { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return -1; +} + +#if defined(WEBRTC_IOS) +int AudioDeviceGeneric::GetPlayoutAudioParameters( + AudioParameters* params) const { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return -1; +} + +int AudioDeviceGeneric::GetRecordAudioParameters( + AudioParameters* params) const { + RTC_LOG_F(LS_ERROR) << "Not supported on this platform"; + return -1; +} +#endif // WEBRTC_IOS + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_generic.h b/third_party/libwebrtc/modules/audio_device/audio_device_generic.h new file mode 100644 index 0000000000..41e24eb3b0 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_generic.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_ +#define AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_ + +#include + +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_defines.h" + +namespace webrtc { + +class AudioDeviceGeneric { + public: + // For use with UMA logging. Must be kept in sync with histograms.xml in + // Chrome, located at + // https://cs.chromium.org/chromium/src/tools/metrics/histograms/histograms.xml + enum class InitStatus { + OK = 0, + PLAYOUT_ERROR = 1, + RECORDING_ERROR = 2, + OTHER_ERROR = 3, + NUM_STATUSES = 4 + }; + // Retrieve the currently utilized audio layer + virtual int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const = 0; + + // Main initializaton and termination + virtual InitStatus Init() = 0; + virtual int32_t Terminate() = 0; + virtual bool Initialized() const = 0; + + // Device enumeration + virtual int16_t PlayoutDevices() = 0; + virtual int16_t RecordingDevices() = 0; + virtual int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) = 0; + virtual int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) = 0; + + // Device selection + virtual int32_t SetPlayoutDevice(uint16_t index) = 0; + virtual int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) = 0; + virtual int32_t SetRecordingDevice(uint16_t index) = 0; + virtual int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) = 0; + + // Audio transport initialization + virtual int32_t PlayoutIsAvailable(bool& available) = 0; + virtual int32_t InitPlayout() = 0; + virtual bool PlayoutIsInitialized() const = 0; + virtual int32_t RecordingIsAvailable(bool& available) = 0; + virtual int32_t InitRecording() = 0; + virtual bool RecordingIsInitialized() const = 0; + + // Audio transport control + virtual int32_t StartPlayout() = 0; + virtual int32_t StopPlayout() = 0; + virtual bool Playing() const = 0; + virtual int32_t StartRecording() = 0; + virtual int32_t StopRecording() = 0; + virtual bool Recording() const = 0; + + // Audio mixer initialization + virtual int32_t InitSpeaker() = 0; + virtual bool SpeakerIsInitialized() const = 0; + virtual int32_t InitMicrophone() = 0; + virtual bool MicrophoneIsInitialized() const = 0; + + // Speaker volume controls + virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0; + virtual int32_t SetSpeakerVolume(uint32_t volume) = 0; + virtual int32_t SpeakerVolume(uint32_t& volume) const = 0; + virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0; + virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0; + + // Microphone volume controls + virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0; + virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0; + virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0; + virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const = 0; + virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const = 0; + + // Speaker mute control + virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0; + virtual int32_t SetSpeakerMute(bool enable) = 0; + virtual int32_t SpeakerMute(bool& enabled) const = 0; + + // Microphone mute control + virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0; + virtual int32_t SetMicrophoneMute(bool enable) = 0; + virtual int32_t MicrophoneMute(bool& enabled) const = 0; + + // Stereo support + virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0; + virtual int32_t SetStereoPlayout(bool enable) = 0; + virtual int32_t StereoPlayout(bool& enabled) const = 0; + virtual int32_t StereoRecordingIsAvailable(bool& available) = 0; + virtual int32_t SetStereoRecording(bool enable) = 0; + virtual int32_t StereoRecording(bool& enabled) const = 0; + + // Delay information and control + virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0; + + // Android only + virtual bool BuiltInAECIsAvailable() const; + virtual bool BuiltInAGCIsAvailable() const; + virtual bool BuiltInNSIsAvailable() const; + + // Windows Core Audio and Android only. + virtual int32_t EnableBuiltInAEC(bool enable); + virtual int32_t EnableBuiltInAGC(bool enable); + virtual int32_t EnableBuiltInNS(bool enable); + + // Play underrun count. + virtual int32_t GetPlayoutUnderrunCount() const; + +// iOS only. +// TODO(henrika): add Android support. +#if defined(WEBRTC_IOS) + virtual int GetPlayoutAudioParameters(AudioParameters* params) const; + virtual int GetRecordAudioParameters(AudioParameters* params) const; +#endif // WEBRTC_IOS + + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0; + + virtual ~AudioDeviceGeneric() {} +}; + +} // namespace webrtc + +#endif // AUDIO_DEVICE_AUDIO_DEVICE_GENERIC_H_ diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build b/third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build new file mode 100644 index 0000000000..df00e056c6 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_gn/moz.build @@ -0,0 +1,205 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + + ### This moz.build was AUTOMATICALLY GENERATED from a GN config, ### + ### DO NOT edit it by hand. ### + +COMPILE_FLAGS["OS_INCLUDES"] = [] +AllowCompilerWarnings() + +DEFINES["ABSL_ALLOCATOR_NOTHROW"] = "1" +DEFINES["RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY"] = True +DEFINES["RTC_ENABLE_VP9"] = True +DEFINES["WEBRTC_ENABLE_PROTOBUF"] = "0" +DEFINES["WEBRTC_LIBRARY_IMPL"] = True +DEFINES["WEBRTC_MOZILLA_BUILD"] = True +DEFINES["WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS"] = "0" +DEFINES["WEBRTC_STRICT_FIELD_TRIALS"] = "0" + +FINAL_LIBRARY = "webrtc" + + +LOCAL_INCLUDES += [ + "!/ipc/ipdl/_ipdlheaders", + "!/third_party/libwebrtc/gen", + "/ipc/chromium/src", + "/third_party/libwebrtc/", + "/third_party/libwebrtc/third_party/abseil-cpp/", + "/tools/profiler/public" +] + +if not CONFIG["MOZ_DEBUG"]: + + DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "0" + DEFINES["NDEBUG"] = True + DEFINES["NVALGRIND"] = True + +if CONFIG["MOZ_DEBUG"] == "1": + + DEFINES["DYNAMIC_ANNOTATIONS_ENABLED"] = "1" + +if CONFIG["OS_TARGET"] == "Android": + + DEFINES["ANDROID"] = True + DEFINES["ANDROID_NDK_VERSION_ROLL"] = "r22_1" + DEFINES["HAVE_SYS_UIO_H"] = True + DEFINES["WEBRTC_ANDROID"] = True + DEFINES["WEBRTC_ANDROID_OPENSLES"] = True + DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True + DEFINES["WEBRTC_LINUX"] = True + DEFINES["WEBRTC_POSIX"] = True + DEFINES["_GNU_SOURCE"] = True + DEFINES["__STDC_CONSTANT_MACROS"] = True + DEFINES["__STDC_FORMAT_MACROS"] = True + +if CONFIG["OS_TARGET"] == "Darwin": + + DEFINES["WEBRTC_MAC"] = True + DEFINES["WEBRTC_POSIX"] = True + DEFINES["_LIBCPP_HAS_NO_ALIGNED_ALLOCATION"] = True + DEFINES["__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES"] = "0" + DEFINES["__STDC_CONSTANT_MACROS"] = True + DEFINES["__STDC_FORMAT_MACROS"] = True + +if CONFIG["OS_TARGET"] == "Linux": + + DEFINES["USE_AURA"] = "1" + DEFINES["USE_GLIB"] = "1" + DEFINES["USE_NSS_CERTS"] = "1" + DEFINES["USE_OZONE"] = "1" + DEFINES["USE_UDEV"] = True + DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True + DEFINES["WEBRTC_LINUX"] = True + DEFINES["WEBRTC_POSIX"] = True + DEFINES["_FILE_OFFSET_BITS"] = "64" + DEFINES["_LARGEFILE64_SOURCE"] = True + DEFINES["_LARGEFILE_SOURCE"] = True + DEFINES["__STDC_CONSTANT_MACROS"] = True + DEFINES["__STDC_FORMAT_MACROS"] = True + +if CONFIG["OS_TARGET"] == "OpenBSD": + + DEFINES["USE_GLIB"] = "1" + DEFINES["USE_OZONE"] = "1" + DEFINES["USE_X11"] = "1" + DEFINES["WEBRTC_BSD"] = True + DEFINES["WEBRTC_ENABLE_LIBEVENT"] = True + DEFINES["WEBRTC_POSIX"] = True + DEFINES["_FILE_OFFSET_BITS"] = "64" + DEFINES["_LARGEFILE64_SOURCE"] = True + DEFINES["_LARGEFILE_SOURCE"] = True + DEFINES["__STDC_CONSTANT_MACROS"] = True + DEFINES["__STDC_FORMAT_MACROS"] = True + +if CONFIG["OS_TARGET"] == "WINNT": + + DEFINES["CERT_CHAIN_PARA_HAS_EXTRA_FIELDS"] = True + DEFINES["NOMINMAX"] = True + DEFINES["NTDDI_VERSION"] = "0x0A000000" + DEFINES["PSAPI_VERSION"] = "2" + DEFINES["RTC_ENABLE_WIN_WGC"] = True + DEFINES["UNICODE"] = True + DEFINES["USE_AURA"] = "1" + DEFINES["WEBRTC_WIN"] = True + DEFINES["WIN32"] = True + DEFINES["WIN32_LEAN_AND_MEAN"] = True + DEFINES["WINAPI_FAMILY"] = "WINAPI_FAMILY_DESKTOP_APP" + DEFINES["WINVER"] = "0x0A00" + DEFINES["_ATL_NO_OPENGL"] = True + DEFINES["_CRT_RAND_S"] = True + DEFINES["_CRT_SECURE_NO_DEPRECATE"] = True + DEFINES["_ENABLE_EXTENDED_ALIGNED_STORAGE"] = True + DEFINES["_HAS_EXCEPTIONS"] = "0" + DEFINES["_HAS_NODISCARD"] = True + DEFINES["_SCL_SECURE_NO_DEPRECATE"] = True + DEFINES["_SECURE_ATL"] = True + DEFINES["_UNICODE"] = True + DEFINES["_WIN32_WINNT"] = "0x0A00" + DEFINES["_WINDOWS"] = True + DEFINES["__STD_C"] = True + +if CONFIG["TARGET_CPU"] == "aarch64": + + DEFINES["WEBRTC_ARCH_ARM64"] = True + DEFINES["WEBRTC_HAS_NEON"] = True + +if CONFIG["TARGET_CPU"] == "arm": + + DEFINES["WEBRTC_ARCH_ARM"] = True + DEFINES["WEBRTC_ARCH_ARM_V7"] = True + DEFINES["WEBRTC_HAS_NEON"] = True + +if CONFIG["TARGET_CPU"] == "mips32": + + DEFINES["MIPS32_LE"] = True + DEFINES["MIPS_FPU_LE"] = True + DEFINES["_GNU_SOURCE"] = True + +if CONFIG["TARGET_CPU"] == "mips64": + + DEFINES["_GNU_SOURCE"] = True + +if CONFIG["TARGET_CPU"] == "x86": + + DEFINES["WEBRTC_ENABLE_AVX2"] = True + +if CONFIG["TARGET_CPU"] == "x86_64": + + DEFINES["WEBRTC_ENABLE_AVX2"] = True + +if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Android": + + DEFINES["_DEBUG"] = True + +if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Darwin": + + DEFINES["_DEBUG"] = True + +if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "Linux": + + DEFINES["_DEBUG"] = True + +if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "OpenBSD": + + DEFINES["_DEBUG"] = True + +if CONFIG["MOZ_DEBUG"] == "1" and CONFIG["OS_TARGET"] == "WINNT": + + DEFINES["_HAS_ITERATOR_DEBUGGING"] = "0" + +if CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux": + + DEFINES["USE_X11"] = "1" + +if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "arm": + + OS_LIBS += [ + "android_support", + "unwind" + ] + +if CONFIG["OS_TARGET"] == "Android" and CONFIG["TARGET_CPU"] == "x86": + + OS_LIBS += [ + "android_support" + ] + +if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "aarch64": + + DEFINES["_GNU_SOURCE"] = True + +if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "arm": + + DEFINES["_GNU_SOURCE"] = True + +if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86": + + DEFINES["_GNU_SOURCE"] = True + +if CONFIG["OS_TARGET"] == "Linux" and CONFIG["TARGET_CPU"] == "x86_64": + + DEFINES["_GNU_SOURCE"] = True + +Library("audio_device_gn") diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_impl.cc b/third_party/libwebrtc/modules/audio_device/audio_device_impl.cc new file mode 100644 index 0000000000..80ed928933 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_impl.cc @@ -0,0 +1,909 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/audio_device_impl.h" + +#include + +#include "api/make_ref_counted.h" +#include "api/scoped_refptr.h" +#include "modules/audio_device/audio_device_config.h" // IWYU pragma: keep +#include "modules/audio_device/audio_device_generic.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "system_wrappers/include/metrics.h" + +#if defined(_WIN32) +#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) +#include "modules/audio_device/win/audio_device_core_win.h" +#endif +#elif defined(WEBRTC_ANDROID) +#include +#include "sdk/android/native_api/audio_device_module/audio_device_android.h" +#elif defined(WEBRTC_LINUX) +#if defined(WEBRTC_ENABLE_LINUX_ALSA) +#include "modules/audio_device/linux/audio_device_alsa_linux.h" +#endif +#if defined(WEBRTC_ENABLE_LINUX_PULSE) +#include "modules/audio_device/linux/audio_device_pulse_linux.h" +#endif +#elif defined(WEBRTC_IOS) +#include "sdk/objc/native/src/audio/audio_device_ios.h" +#elif defined(WEBRTC_MAC) +#include "modules/audio_device/mac/audio_device_mac.h" +#endif +#if defined(WEBRTC_DUMMY_FILE_DEVICES) +#include "modules/audio_device/dummy/file_audio_device.h" +#include "modules/audio_device/dummy/file_audio_device_factory.h" +#endif +#include "modules/audio_device/dummy/audio_device_dummy.h" + +#define CHECKinitialized_() \ + { \ + if (!initialized_) { \ + return -1; \ + } \ + } + +#define CHECKinitialized__BOOL() \ + { \ + if (!initialized_) { \ + return false; \ + } \ + } + +namespace webrtc { + +rtc::scoped_refptr AudioDeviceModule::Create( + AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory); +} + +// static +rtc::scoped_refptr AudioDeviceModule::CreateForTest( + AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + + // The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own + // dedicated factory method which should be used instead. + if (audio_layer == AudioDeviceModule::kWindowsCoreAudio2) { + RTC_LOG(LS_ERROR) << "Use the CreateWindowsCoreAudioAudioDeviceModule() " + "factory method instead for this option."; + return nullptr; + } else if (audio_layer == AudioDeviceModule::kAndroidJavaAudio || + audio_layer == AudioDeviceModule::kAndroidOpenSLESAudio || + audio_layer == AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio || + audio_layer == kAndroidAAudioAudio || + audio_layer == kAndroidJavaInputAndAAudioOutputAudio) { + RTC_LOG(LS_ERROR) << "Use the CreateAndroidAudioDeviceModule() " + "factory method instead for this option."; + return nullptr; + } + + // Create the generic reference counted (platform independent) implementation. + auto audio_device = rtc::make_ref_counted( + audio_layer, task_queue_factory); + + // Ensure that the current platform is supported. + if (audio_device->CheckPlatform() == -1) { + return nullptr; + } + + // Create the platform-dependent implementation. + if (audio_device->CreatePlatformSpecificObjects() == -1) { + return nullptr; + } + + // Ensure that the generic audio buffer can communicate with the platform + // specific parts. + if (audio_device->AttachAudioBuffer() == -1) { + return nullptr; + } + + return audio_device; +} + +AudioDeviceModuleImpl::AudioDeviceModuleImpl( + AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory) + : audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) { + RTC_DLOG(LS_INFO) << __FUNCTION__; +} + +AudioDeviceModuleImpl::AudioDeviceModuleImpl( + AudioLayer audio_layer, + std::unique_ptr audio_device, + TaskQueueFactory* task_queue_factory, + bool create_detached) + : audio_layer_(audio_layer), + audio_device_buffer_(task_queue_factory, create_detached), + audio_device_(std::move(audio_device)) { + RTC_DLOG(LS_INFO) << __FUNCTION__; +} + +int32_t AudioDeviceModuleImpl::CheckPlatform() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + // Ensure that the current platform is supported + PlatformType platform(kPlatformNotSupported); +#if defined(_WIN32) + platform = kPlatformWin32; + RTC_LOG(LS_INFO) << "current platform is Win32"; +#elif defined(WEBRTC_ANDROID) + platform = kPlatformAndroid; + RTC_LOG(LS_INFO) << "current platform is Android"; +#elif defined(WEBRTC_LINUX) + platform = kPlatformLinux; + RTC_LOG(LS_INFO) << "current platform is Linux"; +#elif defined(WEBRTC_IOS) + platform = kPlatformIOS; + RTC_LOG(LS_INFO) << "current platform is IOS"; +#elif defined(WEBRTC_MAC) + platform = kPlatformMac; + RTC_LOG(LS_INFO) << "current platform is Mac"; +#elif defined(WEBRTC_FUCHSIA) + platform = kPlatformFuchsia; + RTC_LOG(LS_INFO) << "current platform is Fuchsia"; +#endif + if (platform == kPlatformNotSupported) { + RTC_LOG(LS_ERROR) + << "current platform is not supported => this module will self " + "destruct!"; + return -1; + } + platform_type_ = platform; + return 0; +} + +int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { + RTC_LOG(LS_INFO) << __FUNCTION__; + if (audio_device_ != nullptr) { + RTC_LOG(LS_INFO) << "Reusing provided audio device"; + return 0; + } +// Dummy ADM implementations if build flags are set. +#if defined(WEBRTC_DUMMY_AUDIO_BUILD) + audio_device_.reset(new AudioDeviceDummy()); + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized"; +#elif defined(WEBRTC_DUMMY_FILE_DEVICES) + audio_device_.reset(FileAudioDeviceFactory::CreateFileAudioDevice()); + if (audio_device_) { + RTC_LOG(LS_INFO) << "Will use file-playing dummy device."; + } else { + // Create a dummy device instead. + audio_device_.reset(new AudioDeviceDummy()); + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized"; + } + +// Real (non-dummy) ADM implementations. +#else + AudioLayer audio_layer(PlatformAudioLayer()); +// Windows ADM implementation. +#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) + if ((audio_layer == kWindowsCoreAudio) || + (audio_layer == kPlatformDefaultAudio)) { + RTC_LOG(LS_INFO) << "Attempting to use the Windows Core Audio APIs..."; + if (AudioDeviceWindowsCore::CoreAudioIsSupported()) { + audio_device_.reset(new AudioDeviceWindowsCore()); + RTC_LOG(LS_INFO) << "Windows Core Audio APIs will be utilized"; + } + } +#endif // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) + +// Linux ADM implementation. +// Note that, WEBRTC_ENABLE_LINUX_ALSA is always defined by default when +// WEBRTC_LINUX is defined. WEBRTC_ENABLE_LINUX_PULSE depends on the +// 'rtc_include_pulse_audio' build flag. +// TODO(bugs.webrtc.org/9127): improve support and make it more clear that +// PulseAudio is the default selection. +#if !defined(WEBRTC_ANDROID) && defined(WEBRTC_LINUX) +#if !defined(WEBRTC_ENABLE_LINUX_PULSE) + // Build flag 'rtc_include_pulse_audio' is set to false. In this mode: + // - kPlatformDefaultAudio => ALSA, and + // - kLinuxAlsaAudio => ALSA, and + // - kLinuxPulseAudio => Invalid selection. + RTC_LOG(LS_WARNING) << "PulseAudio is disabled using build flag."; + if ((audio_layer == kLinuxAlsaAudio) || + (audio_layer == kPlatformDefaultAudio)) { + audio_device_.reset(new AudioDeviceLinuxALSA()); + RTC_LOG(LS_INFO) << "Linux ALSA APIs will be utilized."; + } +#else + // Build flag 'rtc_include_pulse_audio' is set to true (default). In this + // mode: + // - kPlatformDefaultAudio => PulseAudio, and + // - kLinuxPulseAudio => PulseAudio, and + // - kLinuxAlsaAudio => ALSA (supported but not default). + RTC_LOG(LS_INFO) << "PulseAudio support is enabled."; + if ((audio_layer == kLinuxPulseAudio) || + (audio_layer == kPlatformDefaultAudio)) { + // Linux PulseAudio implementation is default. + audio_device_.reset(new AudioDeviceLinuxPulse()); + RTC_LOG(LS_INFO) << "Linux PulseAudio APIs will be utilized"; + } else if (audio_layer == kLinuxAlsaAudio) { + audio_device_.reset(new AudioDeviceLinuxALSA()); + RTC_LOG(LS_WARNING) << "Linux ALSA APIs will be utilized."; + } +#endif // #if !defined(WEBRTC_ENABLE_LINUX_PULSE) +#endif // #if defined(WEBRTC_LINUX) + +// iOS ADM implementation. +#if defined(WEBRTC_IOS) + if (audio_layer == kPlatformDefaultAudio) { + audio_device_.reset( + new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false)); + RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized."; + } +// END #if defined(WEBRTC_IOS) + +// Mac OS X ADM implementation. +#elif defined(WEBRTC_MAC) + if (audio_layer == kPlatformDefaultAudio) { + audio_device_.reset(new AudioDeviceMac()); + RTC_LOG(LS_INFO) << "Mac OS X Audio APIs will be utilized."; + } +#endif // WEBRTC_MAC + + // Dummy ADM implementation. + if (audio_layer == kDummyAudio) { + audio_device_.reset(new AudioDeviceDummy()); + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized."; + } +#endif // if defined(WEBRTC_DUMMY_AUDIO_BUILD) + + if (!audio_device_) { + RTC_LOG(LS_ERROR) + << "Failed to create the platform specific ADM implementation."; + return -1; + } + return 0; +} + +int32_t AudioDeviceModuleImpl::AttachAudioBuffer() { + RTC_LOG(LS_INFO) << __FUNCTION__; + audio_device_->AttachAudioBuffer(&audio_device_buffer_); + return 0; +} + +AudioDeviceModuleImpl::~AudioDeviceModuleImpl() { + RTC_LOG(LS_INFO) << __FUNCTION__; +} + +int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + AudioLayer activeAudio; + if (audio_device_->ActiveAudioLayer(activeAudio) == -1) { + return -1; + } + *audioLayer = activeAudio; + return 0; +} + +int32_t AudioDeviceModuleImpl::Init() { + RTC_LOG(LS_INFO) << __FUNCTION__; + if (initialized_) + return 0; + RTC_CHECK(audio_device_); + AudioDeviceGeneric::InitStatus status = audio_device_->Init(); + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.Audio.InitializationResult", static_cast(status), + static_cast(AudioDeviceGeneric::InitStatus::NUM_STATUSES)); + if (status != AudioDeviceGeneric::InitStatus::OK) { + RTC_LOG(LS_ERROR) << "Audio device initialization failed."; + return -1; + } + initialized_ = true; + return 0; +} + +int32_t AudioDeviceModuleImpl::Terminate() { + RTC_LOG(LS_INFO) << __FUNCTION__; + if (!initialized_) + return 0; + if (audio_device_->Terminate() == -1) { + return -1; + } + initialized_ = false; + return 0; +} + +bool AudioDeviceModuleImpl::Initialized() const { + RTC_LOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; + return initialized_; +} + +int32_t AudioDeviceModuleImpl::InitSpeaker() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->InitSpeaker(); +} + +int32_t AudioDeviceModuleImpl::InitMicrophone() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->InitMicrophone(); +} + +int32_t AudioDeviceModuleImpl::SpeakerVolumeIsAvailable(bool* available) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::SetSpeakerVolume(uint32_t volume) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; + CHECKinitialized_(); + return audio_device_->SetSpeakerVolume(volume); +} + +int32_t AudioDeviceModuleImpl::SpeakerVolume(uint32_t* volume) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint32_t level = 0; + if (audio_device_->SpeakerVolume(level) == -1) { + return -1; + } + *volume = level; + RTC_LOG(LS_INFO) << "output: " << *volume; + return 0; +} + +bool AudioDeviceModuleImpl::SpeakerIsInitialized() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isInitialized = audio_device_->SpeakerIsInitialized(); + RTC_LOG(LS_INFO) << "output: " << isInitialized; + return isInitialized; +} + +bool AudioDeviceModuleImpl::MicrophoneIsInitialized() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isInitialized = audio_device_->MicrophoneIsInitialized(); + RTC_LOG(LS_INFO) << "output: " << isInitialized; + return isInitialized; +} + +int32_t AudioDeviceModuleImpl::MaxSpeakerVolume(uint32_t* maxVolume) const { + CHECKinitialized_(); + uint32_t maxVol = 0; + if (audio_device_->MaxSpeakerVolume(maxVol) == -1) { + return -1; + } + *maxVolume = maxVol; + return 0; +} + +int32_t AudioDeviceModuleImpl::MinSpeakerVolume(uint32_t* minVolume) const { + CHECKinitialized_(); + uint32_t minVol = 0; + if (audio_device_->MinSpeakerVolume(minVol) == -1) { + return -1; + } + *minVolume = minVol; + return 0; +} + +int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::SetSpeakerMute(bool enable) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + return audio_device_->SetSpeakerMute(enable); +} + +int32_t AudioDeviceModuleImpl::SpeakerMute(bool* enabled) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool muted = false; + if (audio_device_->SpeakerMute(muted) == -1) { + return -1; + } + *enabled = muted; + RTC_LOG(LS_INFO) << "output: " << muted; + return 0; +} + +int32_t AudioDeviceModuleImpl::MicrophoneMuteIsAvailable(bool* available) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::SetMicrophoneMute(bool enable) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + return (audio_device_->SetMicrophoneMute(enable)); +} + +int32_t AudioDeviceModuleImpl::MicrophoneMute(bool* enabled) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool muted = false; + if (audio_device_->MicrophoneMute(muted) == -1) { + return -1; + } + *enabled = muted; + RTC_LOG(LS_INFO) << "output: " << muted; + return 0; +} + +int32_t AudioDeviceModuleImpl::MicrophoneVolumeIsAvailable(bool* available) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::SetMicrophoneVolume(uint32_t volume) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; + CHECKinitialized_(); + return (audio_device_->SetMicrophoneVolume(volume)); +} + +int32_t AudioDeviceModuleImpl::MicrophoneVolume(uint32_t* volume) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint32_t level = 0; + if (audio_device_->MicrophoneVolume(level) == -1) { + return -1; + } + *volume = level; + RTC_LOG(LS_INFO) << "output: " << *volume; + return 0; +} + +int32_t AudioDeviceModuleImpl::StereoRecordingIsAvailable( + bool* available) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::SetStereoRecording(bool enable) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + if (audio_device_->RecordingIsInitialized()) { + RTC_LOG(LS_ERROR) + << "unable to set stereo mode after recording is initialized"; + return -1; + } + if (audio_device_->SetStereoRecording(enable) == -1) { + if (enable) { + RTC_LOG(LS_WARNING) << "failed to enable stereo recording"; + } + return -1; + } + int8_t nChannels(1); + if (enable) { + nChannels = 2; + } + audio_device_buffer_.SetRecordingChannels(nChannels); + return 0; +} + +int32_t AudioDeviceModuleImpl::StereoRecording(bool* enabled) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool stereo = false; + if (audio_device_->StereoRecording(stereo) == -1) { + return -1; + } + *enabled = stereo; + RTC_LOG(LS_INFO) << "output: " << stereo; + return 0; +} + +int32_t AudioDeviceModuleImpl::StereoPlayoutIsAvailable(bool* available) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::SetStereoPlayout(bool enable) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + if (audio_device_->PlayoutIsInitialized()) { + RTC_LOG(LS_ERROR) + << "unable to set stereo mode while playing side is initialized"; + return -1; + } + if (audio_device_->SetStereoPlayout(enable)) { + RTC_LOG(LS_WARNING) << "stereo playout is not supported"; + return -1; + } + int8_t nChannels(1); + if (enable) { + nChannels = 2; + } + audio_device_buffer_.SetPlayoutChannels(nChannels); + return 0; +} + +int32_t AudioDeviceModuleImpl::StereoPlayout(bool* enabled) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool stereo = false; + if (audio_device_->StereoPlayout(stereo) == -1) { + return -1; + } + *enabled = stereo; + RTC_LOG(LS_INFO) << "output: " << stereo; + return 0; +} + +int32_t AudioDeviceModuleImpl::PlayoutIsAvailable(bool* available) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::RecordingIsAvailable(bool* available) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->RecordingIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return 0; +} + +int32_t AudioDeviceModuleImpl::MaxMicrophoneVolume(uint32_t* maxVolume) const { + CHECKinitialized_(); + uint32_t maxVol(0); + if (audio_device_->MaxMicrophoneVolume(maxVol) == -1) { + return -1; + } + *maxVolume = maxVol; + return 0; +} + +int32_t AudioDeviceModuleImpl::MinMicrophoneVolume(uint32_t* minVolume) const { + CHECKinitialized_(); + uint32_t minVol(0); + if (audio_device_->MinMicrophoneVolume(minVol) == -1) { + return -1; + } + *minVolume = minVol; + return 0; +} + +int16_t AudioDeviceModuleImpl::PlayoutDevices() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint16_t nPlayoutDevices = audio_device_->PlayoutDevices(); + RTC_LOG(LS_INFO) << "output: " << nPlayoutDevices; + return (int16_t)(nPlayoutDevices); +} + +int32_t AudioDeviceModuleImpl::SetPlayoutDevice(uint16_t index) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; + CHECKinitialized_(); + return audio_device_->SetPlayoutDevice(index); +} + +int32_t AudioDeviceModuleImpl::SetPlayoutDevice(WindowsDeviceType device) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->SetPlayoutDevice(device); +} + +int32_t AudioDeviceModuleImpl::PlayoutDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; + CHECKinitialized_(); + if (name == NULL) { + return -1; + } + if (audio_device_->PlayoutDeviceName(index, name, guid) == -1) { + return -1; + } + if (name != NULL) { + RTC_LOG(LS_INFO) << "output: name = " << name; + } + if (guid != NULL) { + RTC_LOG(LS_INFO) << "output: guid = " << guid; + } + return 0; +} + +int32_t AudioDeviceModuleImpl::RecordingDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; + CHECKinitialized_(); + if (name == NULL) { + return -1; + } + if (audio_device_->RecordingDeviceName(index, name, guid) == -1) { + return -1; + } + if (name != NULL) { + RTC_LOG(LS_INFO) << "output: name = " << name; + } + if (guid != NULL) { + RTC_LOG(LS_INFO) << "output: guid = " << guid; + } + return 0; +} + +int16_t AudioDeviceModuleImpl::RecordingDevices() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint16_t nRecordingDevices = audio_device_->RecordingDevices(); + RTC_LOG(LS_INFO) << "output: " << nRecordingDevices; + return (int16_t)nRecordingDevices; +} + +int32_t AudioDeviceModuleImpl::SetRecordingDevice(uint16_t index) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; + CHECKinitialized_(); + return audio_device_->SetRecordingDevice(index); +} + +int32_t AudioDeviceModuleImpl::SetRecordingDevice(WindowsDeviceType device) { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->SetRecordingDevice(device); +} + +int32_t AudioDeviceModuleImpl::InitPlayout() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (PlayoutIsInitialized()) { + return 0; + } + int32_t result = audio_device_->InitPlayout(); + RTC_LOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", + static_cast(result == 0)); + return result; +} + +int32_t AudioDeviceModuleImpl::InitRecording() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (RecordingIsInitialized()) { + return 0; + } + int32_t result = audio_device_->InitRecording(); + RTC_LOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", + static_cast(result == 0)); + return result; +} + +bool AudioDeviceModuleImpl::PlayoutIsInitialized() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->PlayoutIsInitialized(); +} + +bool AudioDeviceModuleImpl::RecordingIsInitialized() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->RecordingIsInitialized(); +} + +int32_t AudioDeviceModuleImpl::StartPlayout() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (Playing()) { + return 0; + } + audio_device_buffer_.StartPlayout(); + int32_t result = audio_device_->StartPlayout(); + RTC_LOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", + static_cast(result == 0)); + return result; +} + +int32_t AudioDeviceModuleImpl::StopPlayout() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + int32_t result = audio_device_->StopPlayout(); + audio_device_buffer_.StopPlayout(); + RTC_LOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", + static_cast(result == 0)); + return result; +} + +bool AudioDeviceModuleImpl::Playing() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->Playing(); +} + +int32_t AudioDeviceModuleImpl::StartRecording() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (Recording()) { + return 0; + } + audio_device_buffer_.StartRecording(); + int32_t result = audio_device_->StartRecording(); + RTC_LOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", + static_cast(result == 0)); + return result; +} + +int32_t AudioDeviceModuleImpl::StopRecording() { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + int32_t result = audio_device_->StopRecording(); + audio_device_buffer_.StopRecording(); + RTC_LOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", + static_cast(result == 0)); + return result; +} + +bool AudioDeviceModuleImpl::Recording() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->Recording(); +} + +int32_t AudioDeviceModuleImpl::RegisterAudioCallback( + AudioTransport* audioCallback) { + RTC_LOG(LS_INFO) << __FUNCTION__; + return audio_device_buffer_.RegisterAudioCallback(audioCallback); +} + +int32_t AudioDeviceModuleImpl::PlayoutDelay(uint16_t* delayMS) const { + CHECKinitialized_(); + uint16_t delay = 0; + if (audio_device_->PlayoutDelay(delay) == -1) { + RTC_LOG(LS_ERROR) << "failed to retrieve the playout delay"; + return -1; + } + *delayMS = delay; + return 0; +} + +bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isAvailable = audio_device_->BuiltInAECIsAvailable(); + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return isAvailable; +} + +int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + int32_t ok = audio_device_->EnableBuiltInAEC(enable); + RTC_LOG(LS_INFO) << "output: " << ok; + return ok; +} + +bool AudioDeviceModuleImpl::BuiltInAGCIsAvailable() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isAvailable = audio_device_->BuiltInAGCIsAvailable(); + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return isAvailable; +} + +int32_t AudioDeviceModuleImpl::EnableBuiltInAGC(bool enable) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + int32_t ok = audio_device_->EnableBuiltInAGC(enable); + RTC_LOG(LS_INFO) << "output: " << ok; + return ok; +} + +bool AudioDeviceModuleImpl::BuiltInNSIsAvailable() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isAvailable = audio_device_->BuiltInNSIsAvailable(); + RTC_LOG(LS_INFO) << "output: " << isAvailable; + return isAvailable; +} + +int32_t AudioDeviceModuleImpl::EnableBuiltInNS(bool enable) { + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + int32_t ok = audio_device_->EnableBuiltInNS(enable); + RTC_LOG(LS_INFO) << "output: " << ok; + return ok; +} + +int32_t AudioDeviceModuleImpl::GetPlayoutUnderrunCount() const { + CHECKinitialized_(); + int32_t underrunCount = audio_device_->GetPlayoutUnderrunCount(); + return underrunCount; +} + +#if defined(WEBRTC_IOS) +int AudioDeviceModuleImpl::GetPlayoutAudioParameters( + AudioParameters* params) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + int r = audio_device_->GetPlayoutAudioParameters(params); + RTC_LOG(LS_INFO) << "output: " << r; + return r; +} + +int AudioDeviceModuleImpl::GetRecordAudioParameters( + AudioParameters* params) const { + RTC_LOG(LS_INFO) << __FUNCTION__; + int r = audio_device_->GetRecordAudioParameters(params); + RTC_LOG(LS_INFO) << "output: " << r; + return r; +} +#endif // WEBRTC_IOS + +AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const { + RTC_LOG(LS_INFO) << __FUNCTION__; + return platform_type_; +} + +AudioDeviceModule::AudioLayer AudioDeviceModuleImpl::PlatformAudioLayer() + const { + RTC_LOG(LS_INFO) << __FUNCTION__; + return audio_layer_; +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_impl.h b/third_party/libwebrtc/modules/audio_device/audio_device_impl.h new file mode 100644 index 0000000000..46d91a46c8 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_impl.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_ +#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_ + +#if defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE) + +#include + +#include + +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/include/audio_device.h" + +namespace webrtc { + +class AudioDeviceGeneric; + +class AudioDeviceModuleImpl : public AudioDeviceModuleForTest { + public: + enum PlatformType { + kPlatformNotSupported = 0, + kPlatformWin32 = 1, + kPlatformWinCe = 2, + kPlatformLinux = 3, + kPlatformMac = 4, + kPlatformAndroid = 5, + kPlatformIOS = 6, + // Fuchsia isn't fully supported, as there is no implementation for + // AudioDeviceGeneric which will be created for Fuchsia, so + // `CreatePlatformSpecificObjects()` call will fail unless usable + // implementation will be provided by the user. + kPlatformFuchsia = 7, + }; + + int32_t CheckPlatform(); + int32_t CreatePlatformSpecificObjects(); + int32_t AttachAudioBuffer(); + + AudioDeviceModuleImpl(AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory); + // If `create_detached` is true, created ADM can be used on another thread + // compared to the one on which it was created. It's useful for testing. + AudioDeviceModuleImpl(AudioLayer audio_layer, + std::unique_ptr audio_device, + TaskQueueFactory* task_queue_factory, + bool create_detached); + ~AudioDeviceModuleImpl() override; + + // Retrieve the currently utilized audio layer + int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override; + + // Full-duplex transportation of PCM audio + int32_t RegisterAudioCallback(AudioTransport* audioCallback) override; + + // Main initializaton and termination + int32_t Init() override; + int32_t Terminate() override; + bool Initialized() const override; + + // Device enumeration + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + + // Device selection + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice(WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice(WindowsDeviceType device) override; + + // Audio transport initialization + int32_t PlayoutIsAvailable(bool* available) override; + int32_t InitPlayout() override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool* available) override; + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + // Audio transport control + int32_t StartPlayout() override; + int32_t StopPlayout() override; + bool Playing() const override; + int32_t StartRecording() override; + int32_t StopRecording() override; + bool Recording() const override; + + // Audio mixer initialization + int32_t InitSpeaker() override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() override; + bool MicrophoneIsInitialized() const override; + + // Speaker volume controls + int32_t SpeakerVolumeIsAvailable(bool* available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t* volume) const override; + int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t* minVolume) const override; + + // Microphone volume controls + int32_t MicrophoneVolumeIsAvailable(bool* available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t* volume) const override; + int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t* minVolume) const override; + + // Speaker mute control + int32_t SpeakerMuteIsAvailable(bool* available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool* enabled) const override; + + // Microphone mute control + int32_t MicrophoneMuteIsAvailable(bool* available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool* enabled) const override; + + // Stereo support + int32_t StereoPlayoutIsAvailable(bool* available) const override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool* enabled) const override; + int32_t StereoRecordingIsAvailable(bool* available) const override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool* enabled) const override; + + // Delay information and control + int32_t PlayoutDelay(uint16_t* delayMS) const override; + + bool BuiltInAECIsAvailable() const override; + int32_t EnableBuiltInAEC(bool enable) override; + bool BuiltInAGCIsAvailable() const override; + int32_t EnableBuiltInAGC(bool enable) override; + bool BuiltInNSIsAvailable() const override; + int32_t EnableBuiltInNS(bool enable) override; + + // Play underrun count. + int32_t GetPlayoutUnderrunCount() const override; + +#if defined(WEBRTC_IOS) + int GetPlayoutAudioParameters(AudioParameters* params) const override; + int GetRecordAudioParameters(AudioParameters* params) const override; +#endif // WEBRTC_IOS + + AudioDeviceBuffer* GetAudioDeviceBuffer() { return &audio_device_buffer_; } + + int RestartPlayoutInternally() override { return -1; } + int RestartRecordingInternally() override { return -1; } + int SetPlayoutSampleRate(uint32_t sample_rate) override { return -1; } + int SetRecordingSampleRate(uint32_t sample_rate) override { return -1; } + + private: + PlatformType Platform() const; + AudioLayer PlatformAudioLayer() const; + + AudioLayer audio_layer_; + PlatformType platform_type_ = kPlatformNotSupported; + bool initialized_ = false; + AudioDeviceBuffer audio_device_buffer_; + std::unique_ptr audio_device_; +}; + +} // namespace webrtc + +#endif // defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE) + +#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H_ diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_name.cc b/third_party/libwebrtc/modules/audio_device/audio_device_name.cc new file mode 100644 index 0000000000..5318496768 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_name.cc @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/audio_device_name.h" + +#include "absl/strings/string_view.h" + +namespace webrtc { + +const char AudioDeviceName::kDefaultDeviceId[] = "default"; + +AudioDeviceName::AudioDeviceName(absl::string_view device_name, + absl::string_view unique_id) + : device_name(device_name), unique_id(unique_id) {} + +bool AudioDeviceName::IsValid() { + return !device_name.empty() && !unique_id.empty(); +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_name.h b/third_party/libwebrtc/modules/audio_device/audio_device_name.h new file mode 100644 index 0000000000..db37852e9a --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_name.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_ +#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_ + +#include +#include + +#include "absl/strings/string_view.h" + +namespace webrtc { + +struct AudioDeviceName { + // Represents a default device. Note that, on Windows there are two different + // types of default devices (Default and Default Communication). They can + // either be two different physical devices or be two different roles for one + // single device. Hence, this id must be combined with a "role parameter" on + // Windows to uniquely identify a default device. + static const char kDefaultDeviceId[]; + + AudioDeviceName() = default; + AudioDeviceName(absl::string_view device_name, absl::string_view unique_id); + + ~AudioDeviceName() = default; + + // Support copy and move. + AudioDeviceName(const AudioDeviceName& other) = default; + AudioDeviceName(AudioDeviceName&&) = default; + AudioDeviceName& operator=(const AudioDeviceName&) = default; + AudioDeviceName& operator=(AudioDeviceName&&) = default; + + bool IsValid(); + + std::string device_name; // Friendly name of the device. + std::string unique_id; // Unique identifier for the device. +}; + +typedef std::deque AudioDeviceNames; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_ diff --git a/third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc b/third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc new file mode 100644 index 0000000000..e03c11655b --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/audio_device_unittest.cc @@ -0,0 +1,1241 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/include/audio_device.h" + +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/audio_device_impl.h" +#include "modules/audio_device/include/mock_audio_transport.h" +#include "rtc_base/arraysize.h" +#include "rtc_base/buffer.h" +#include "rtc_base/event.h" +#include "rtc_base/logging.h" +#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/race_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/time_utils.h" +#include "test/gmock.h" +#include "test/gtest.h" +#ifdef WEBRTC_WIN +#include "modules/audio_device/include/audio_device_factory.h" +#include "modules/audio_device/win/core_audio_utility_win.h" +#include "rtc_base/win/scoped_com_initializer.h" +#endif // WEBRTC_WIN + +using ::testing::_; +using ::testing::AtLeast; +using ::testing::Ge; +using ::testing::Invoke; +using ::testing::Mock; +using ::testing::NiceMock; +using ::testing::NotNull; + +namespace webrtc { +namespace { + +// Using a #define for AUDIO_DEVICE since we will call *different* versions of +// the ADM functions, depending on the ID type. +#if defined(WEBRTC_WIN) +#define AUDIO_DEVICE_ID (AudioDeviceModule::WindowsDeviceType::kDefaultDevice) +#else +#define AUDIO_DEVICE_ID (0u) +#endif // defined(WEBRTC_WIN) + +// #define ENABLE_DEBUG_PRINTF +#ifdef ENABLE_DEBUG_PRINTF +#define PRINTD(...) fprintf(stderr, __VA_ARGS__); +#else +#define PRINTD(...) ((void)0) +#endif +#define PRINT(...) fprintf(stderr, __VA_ARGS__); + +// Don't run these tests if audio-related requirements are not met. +#define SKIP_TEST_IF_NOT(requirements_satisfied) \ + do { \ + if (!requirements_satisfied) { \ + GTEST_SKIP() << "Skipped. No audio device found."; \ + } \ + } while (false) + +// Number of callbacks (input or output) the tests waits for before we set +// an event indicating that the test was OK. +static constexpr size_t kNumCallbacks = 10; +// Max amount of time we wait for an event to be set while counting callbacks. +static constexpr TimeDelta kTestTimeOut = TimeDelta::Seconds(10); +// Average number of audio callbacks per second assuming 10ms packet size. +static constexpr size_t kNumCallbacksPerSecond = 100; +// Run the full-duplex test during this time (unit is in seconds). +static constexpr TimeDelta kFullDuplexTime = TimeDelta::Seconds(5); +// Length of round-trip latency measurements. Number of deteced impulses +// shall be kImpulseFrequencyInHz * kMeasureLatencyTime - 1 since the +// last transmitted pulse is not used. +static constexpr TimeDelta kMeasureLatencyTime = TimeDelta::Seconds(10); +// Sets the number of impulses per second in the latency test. +static constexpr size_t kImpulseFrequencyInHz = 1; +// Utilized in round-trip latency measurements to avoid capturing noise samples. +static constexpr int kImpulseThreshold = 1000; + +enum class TransportType { + kInvalid, + kPlay, + kRecord, + kPlayAndRecord, +}; + +// Interface for processing the audio stream. Real implementations can e.g. +// run audio in loopback, read audio from a file or perform latency +// measurements. +class AudioStream { + public: + virtual void Write(rtc::ArrayView source) = 0; + virtual void Read(rtc::ArrayView destination) = 0; + + virtual ~AudioStream() = default; +}; + +// Converts index corresponding to position within a 10ms buffer into a +// delay value in milliseconds. +// Example: index=240, frames_per_10ms_buffer=480 => 5ms as output. +int IndexToMilliseconds(size_t index, size_t frames_per_10ms_buffer) { + return rtc::checked_cast( + 10.0 * (static_cast(index) / frames_per_10ms_buffer) + 0.5); +} + +} // namespace + +// Simple first in first out (FIFO) class that wraps a list of 16-bit audio +// buffers of fixed size and allows Write and Read operations. The idea is to +// store recorded audio buffers (using Write) and then read (using Read) these +// stored buffers with as short delay as possible when the audio layer needs +// data to play out. The number of buffers in the FIFO will stabilize under +// normal conditions since there will be a balance between Write and Read calls. +// The container is a std::list container and access is protected with a lock +// since both sides (playout and recording) are driven by its own thread. +// Note that, we know by design that the size of the audio buffer will not +// change over time and that both sides will in most cases use the same size. +class FifoAudioStream : public AudioStream { + public: + void Write(rtc::ArrayView source) override { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + const size_t size = [&] { + MutexLock lock(&lock_); + fifo_.push_back(Buffer16(source.data(), source.size())); + return fifo_.size(); + }(); + if (size > max_size_) { + max_size_ = size; + } + // Add marker once per second to signal that audio is active. + if (write_count_++ % 100 == 0) { + PRINTD("."); + } + written_elements_ += size; + } + + void Read(rtc::ArrayView destination) override { + MutexLock lock(&lock_); + if (fifo_.empty()) { + std::fill(destination.begin(), destination.end(), 0); + } else { + const Buffer16& buffer = fifo_.front(); + if (buffer.size() == destination.size()) { + // Default case where input and output uses same sample rate and + // channel configuration. No conversion is needed. + std::copy(buffer.begin(), buffer.end(), destination.begin()); + } else if (destination.size() == 2 * buffer.size()) { + // Recorded input signal in `buffer` is in mono. Do channel upmix to + // match stereo output (1 -> 2). + for (size_t i = 0; i < buffer.size(); ++i) { + destination[2 * i] = buffer[i]; + destination[2 * i + 1] = buffer[i]; + } + } else if (buffer.size() == 2 * destination.size()) { + // Recorded input signal in `buffer` is in stereo. Do channel downmix + // to match mono output (2 -> 1). + for (size_t i = 0; i < destination.size(); ++i) { + destination[i] = + (static_cast(buffer[2 * i]) + buffer[2 * i + 1]) / 2; + } + } else { + RTC_DCHECK_NOTREACHED() << "Required conversion is not support"; + } + fifo_.pop_front(); + } + } + + size_t size() const { + MutexLock lock(&lock_); + return fifo_.size(); + } + + size_t max_size() const { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + return max_size_; + } + + size_t average_size() const { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + return 0.5 + static_cast(written_elements_ / write_count_); + } + + using Buffer16 = rtc::BufferT; + + mutable Mutex lock_; + rtc::RaceChecker race_checker_; + + std::list fifo_ RTC_GUARDED_BY(lock_); + size_t write_count_ RTC_GUARDED_BY(race_checker_) = 0; + size_t max_size_ RTC_GUARDED_BY(race_checker_) = 0; + size_t written_elements_ RTC_GUARDED_BY(race_checker_) = 0; +}; + +// Inserts periodic impulses and measures the latency between the time of +// transmission and time of receiving the same impulse. +class LatencyAudioStream : public AudioStream { + public: + LatencyAudioStream() { + // Delay thread checkers from being initialized until first callback from + // respective thread. + read_thread_checker_.Detach(); + write_thread_checker_.Detach(); + } + + // Insert periodic impulses in first two samples of `destination`. + void Read(rtc::ArrayView destination) override { + RTC_DCHECK_RUN_ON(&read_thread_checker_); + if (read_count_ == 0) { + PRINT("["); + } + read_count_++; + std::fill(destination.begin(), destination.end(), 0); + if (read_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) { + PRINT("."); + { + MutexLock lock(&lock_); + if (!pulse_time_) { + pulse_time_ = rtc::TimeMillis(); + } + } + constexpr int16_t impulse = std::numeric_limits::max(); + std::fill_n(destination.begin(), 2, impulse); + } + } + + // Detect received impulses in `source`, derive time between transmission and + // detection and add the calculated delay to list of latencies. + void Write(rtc::ArrayView source) override { + RTC_DCHECK_RUN_ON(&write_thread_checker_); + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + MutexLock lock(&lock_); + write_count_++; + if (!pulse_time_) { + // Avoid detection of new impulse response until a new impulse has + // been transmitted (sets `pulse_time_` to value larger than zero). + return; + } + // Find index (element position in vector) of the max element. + const size_t index_of_max = + std::max_element(source.begin(), source.end()) - source.begin(); + // Derive time between transmitted pulse and received pulse if the level + // is high enough (removes noise). + const size_t max = source[index_of_max]; + if (max > kImpulseThreshold) { + PRINTD("(%zu, %zu)", max, index_of_max); + int64_t now_time = rtc::TimeMillis(); + int extra_delay = IndexToMilliseconds(index_of_max, source.size()); + PRINTD("[%d]", rtc::checked_cast(now_time - pulse_time_)); + PRINTD("[%d]", extra_delay); + // Total latency is the difference between transmit time and detection + // tome plus the extra delay within the buffer in which we detected the + // received impulse. It is transmitted at sample 0 but can be received + // at sample N where N > 0. The term `extra_delay` accounts for N and it + // is a value between 0 and 10ms. + latencies_.push_back(now_time - *pulse_time_ + extra_delay); + pulse_time_.reset(); + } else { + PRINTD("-"); + } + } + + size_t num_latency_values() const { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + return latencies_.size(); + } + + int min_latency() const { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + if (latencies_.empty()) + return 0; + return *std::min_element(latencies_.begin(), latencies_.end()); + } + + int max_latency() const { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + if (latencies_.empty()) + return 0; + return *std::max_element(latencies_.begin(), latencies_.end()); + } + + int average_latency() const { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + if (latencies_.empty()) + return 0; + return 0.5 + static_cast( + std::accumulate(latencies_.begin(), latencies_.end(), 0)) / + latencies_.size(); + } + + void PrintResults() const { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + PRINT("] "); + for (auto it = latencies_.begin(); it != latencies_.end(); ++it) { + PRINTD("%d ", *it); + } + PRINT("\n"); + PRINT("[..........] [min, max, avg]=[%d, %d, %d] ms\n", min_latency(), + max_latency(), average_latency()); + } + + Mutex lock_; + rtc::RaceChecker race_checker_; + SequenceChecker read_thread_checker_; + SequenceChecker write_thread_checker_; + + absl::optional pulse_time_ RTC_GUARDED_BY(lock_); + std::vector latencies_ RTC_GUARDED_BY(race_checker_); + size_t read_count_ RTC_GUARDED_BY(read_thread_checker_) = 0; + size_t write_count_ RTC_GUARDED_BY(write_thread_checker_) = 0; +}; + +// Mocks the AudioTransport object and proxies actions for the two callbacks +// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations +// of AudioStreamInterface. +class MockAudioTransport : public test::MockAudioTransport { + public: + explicit MockAudioTransport(TransportType type) : type_(type) {} + ~MockAudioTransport() {} + + // Set default actions of the mock object. We are delegating to fake + // implementation where the number of callbacks is counted and an event + // is set after a certain number of callbacks. Audio parameters are also + // checked. + void HandleCallbacks(rtc::Event* event, + AudioStream* audio_stream, + int num_callbacks) { + event_ = event; + audio_stream_ = audio_stream; + num_callbacks_ = num_callbacks; + if (play_mode()) { + ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _)) + .WillByDefault( + Invoke(this, &MockAudioTransport::RealNeedMorePlayData)); + } + if (rec_mode()) { + ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _)) + .WillByDefault( + Invoke(this, &MockAudioTransport::RealRecordedDataIsAvailable)); + } + } + + // Special constructor used in manual tests where the user wants to run audio + // until e.g. a keyboard key is pressed. The event flag is set to nullptr by + // default since it is up to the user to stop the test. See e.g. + // DISABLED_RunPlayoutAndRecordingInFullDuplexAndWaitForEnterKey(). + void HandleCallbacks(AudioStream* audio_stream) { + HandleCallbacks(nullptr, audio_stream, 0); + } + + int32_t RealRecordedDataIsAvailable(const void* audio_buffer, + const size_t samples_per_channel, + const size_t bytes_per_frame, + const size_t channels, + const uint32_t sample_rate, + const uint32_t total_delay_ms, + const int32_t clock_drift, + const uint32_t current_mic_level, + const bool typing_status, + uint32_t& new_mic_level) { + EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks."; + // Store audio parameters once in the first callback. For all other + // callbacks, verify that the provided audio parameters are maintained and + // that each callback corresponds to 10ms for any given sample rate. + if (!record_parameters_.is_complete()) { + record_parameters_.reset(sample_rate, channels, samples_per_channel); + } else { + EXPECT_EQ(samples_per_channel, record_parameters_.frames_per_buffer()); + EXPECT_EQ(bytes_per_frame, record_parameters_.GetBytesPerFrame()); + EXPECT_EQ(channels, record_parameters_.channels()); + EXPECT_EQ(static_cast(sample_rate), + record_parameters_.sample_rate()); + EXPECT_EQ(samples_per_channel, + record_parameters_.frames_per_10ms_buffer()); + } + { + MutexLock lock(&lock_); + rec_count_++; + } + // Write audio data to audio stream object if one has been injected. + if (audio_stream_) { + audio_stream_->Write( + rtc::MakeArrayView(static_cast(audio_buffer), + samples_per_channel * channels)); + } + // Signal the event after given amount of callbacks. + if (event_ && ReceivedEnoughCallbacks()) { + event_->Set(); + } + return 0; + } + + int32_t RealNeedMorePlayData(const size_t samples_per_channel, + const size_t bytes_per_frame, + const size_t channels, + const uint32_t sample_rate, + void* audio_buffer, + size_t& samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) { + EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks."; + // Store audio parameters once in the first callback. For all other + // callbacks, verify that the provided audio parameters are maintained and + // that each callback corresponds to 10ms for any given sample rate. + if (!playout_parameters_.is_complete()) { + playout_parameters_.reset(sample_rate, channels, samples_per_channel); + } else { + EXPECT_EQ(samples_per_channel, playout_parameters_.frames_per_buffer()); + EXPECT_EQ(bytes_per_frame, playout_parameters_.GetBytesPerFrame()); + EXPECT_EQ(channels, playout_parameters_.channels()); + EXPECT_EQ(static_cast(sample_rate), + playout_parameters_.sample_rate()); + EXPECT_EQ(samples_per_channel, + playout_parameters_.frames_per_10ms_buffer()); + } + { + MutexLock lock(&lock_); + play_count_++; + } + samples_out = samples_per_channel * channels; + // Read audio data from audio stream object if one has been injected. + if (audio_stream_) { + audio_stream_->Read(rtc::MakeArrayView( + static_cast(audio_buffer), samples_per_channel * channels)); + } else { + // Fill the audio buffer with zeros to avoid disturbing audio. + const size_t num_bytes = samples_per_channel * bytes_per_frame; + std::memset(audio_buffer, 0, num_bytes); + } + // Signal the event after given amount of callbacks. + if (event_ && ReceivedEnoughCallbacks()) { + event_->Set(); + } + return 0; + } + + bool ReceivedEnoughCallbacks() { + bool recording_done = false; + if (rec_mode()) { + MutexLock lock(&lock_); + recording_done = rec_count_ >= num_callbacks_; + } else { + recording_done = true; + } + bool playout_done = false; + if (play_mode()) { + MutexLock lock(&lock_); + playout_done = play_count_ >= num_callbacks_; + } else { + playout_done = true; + } + return recording_done && playout_done; + } + + bool play_mode() const { + return type_ == TransportType::kPlay || + type_ == TransportType::kPlayAndRecord; + } + + bool rec_mode() const { + return type_ == TransportType::kRecord || + type_ == TransportType::kPlayAndRecord; + } + + void ResetCallbackCounters() { + MutexLock lock(&lock_); + if (play_mode()) { + play_count_ = 0; + } + if (rec_mode()) { + rec_count_ = 0; + } + } + + private: + Mutex lock_; + TransportType type_ = TransportType::kInvalid; + rtc::Event* event_ = nullptr; + AudioStream* audio_stream_ = nullptr; + size_t num_callbacks_ = 0; + size_t play_count_ RTC_GUARDED_BY(lock_) = 0; + size_t rec_count_ RTC_GUARDED_BY(lock_) = 0; + AudioParameters playout_parameters_; + AudioParameters record_parameters_; +}; + +// AudioDeviceTest test fixture. + +// bugs.webrtc.org/9808 +// Both the tests and the code under test are very old, unstaffed and not +// a part of webRTC stack. +// Here sanitizers make the tests hang, without providing usefull report. +// So we are just disabling them, without intention to re-enable them. +#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ + defined(THREAD_SANITIZER) || defined(UNDEFINED_SANITIZER) +#define MAYBE_AudioDeviceTest DISABLED_AudioDeviceTest +#else +#define MAYBE_AudioDeviceTest AudioDeviceTest +#endif + +class MAYBE_AudioDeviceTest + : public ::testing::TestWithParam { + protected: + MAYBE_AudioDeviceTest() + : audio_layer_(GetParam()), + task_queue_factory_(CreateDefaultTaskQueueFactory()) { + rtc::LogMessage::LogToDebug(rtc::LS_INFO); + // Add extra logging fields here if needed for debugging. + rtc::LogMessage::LogTimestamps(); + rtc::LogMessage::LogThreads(); + audio_device_ = CreateAudioDevice(); + EXPECT_NE(audio_device_.get(), nullptr); + AudioDeviceModule::AudioLayer audio_layer; + int got_platform_audio_layer = + audio_device_->ActiveAudioLayer(&audio_layer); + // First, ensure that a valid audio layer can be activated. + if (got_platform_audio_layer != 0) { + requirements_satisfied_ = false; + } + // Next, verify that the ADM can be initialized. + if (requirements_satisfied_) { + requirements_satisfied_ = (audio_device_->Init() == 0); + } + // Finally, ensure that at least one valid device exists in each direction. + if (requirements_satisfied_) { + const int16_t num_playout_devices = audio_device_->PlayoutDevices(); + const int16_t num_record_devices = audio_device_->RecordingDevices(); + requirements_satisfied_ = + num_playout_devices > 0 && num_record_devices > 0; + } + if (requirements_satisfied_) { + EXPECT_EQ(0, audio_device_->SetPlayoutDevice(AUDIO_DEVICE_ID)); + EXPECT_EQ(0, audio_device_->InitSpeaker()); + EXPECT_EQ(0, audio_device_->StereoPlayoutIsAvailable(&stereo_playout_)); + EXPECT_EQ(0, audio_device_->SetStereoPlayout(stereo_playout_)); + EXPECT_EQ(0, audio_device_->SetRecordingDevice(AUDIO_DEVICE_ID)); + EXPECT_EQ(0, audio_device_->InitMicrophone()); + // Avoid asking for input stereo support and always record in mono + // since asking can cause issues in combination with remote desktop. + // See https://bugs.chromium.org/p/webrtc/issues/detail?id=7397 for + // details. + EXPECT_EQ(0, audio_device_->SetStereoRecording(false)); + } + } + + // This is needed by all tests using MockAudioTransport, + // since there is no way to unregister it. + // Without Terminate(), audio_device would still accesses + // the destructed mock via "webrtc_audio_module_rec_thread". + // An alternative would be for the mock to outlive audio_device. + void PreTearDown() { EXPECT_EQ(0, audio_device_->Terminate()); } + + virtual ~MAYBE_AudioDeviceTest() { + if (audio_device_) { + EXPECT_EQ(0, audio_device_->Terminate()); + } + } + + bool requirements_satisfied() const { return requirements_satisfied_; } + rtc::Event* event() { return &event_; } + AudioDeviceModule::AudioLayer audio_layer() const { return audio_layer_; } + + // AudioDeviceModuleForTest extends the default ADM interface with some extra + // test methods. Intended for usage in tests only and requires a unique + // factory method. See CreateAudioDevice() for details. + const rtc::scoped_refptr& audio_device() const { + return audio_device_; + } + + rtc::scoped_refptr CreateAudioDevice() { + // Use the default factory for kPlatformDefaultAudio and a special factory + // CreateWindowsCoreAudioAudioDeviceModuleForTest() for kWindowsCoreAudio2. + // The value of `audio_layer_` is set at construction by GetParam() and two + // different layers are tested on Windows only. + if (audio_layer_ == AudioDeviceModule::kPlatformDefaultAudio) { + return AudioDeviceModule::CreateForTest(audio_layer_, + task_queue_factory_.get()); + } else if (audio_layer_ == AudioDeviceModule::kWindowsCoreAudio2) { +#ifdef WEBRTC_WIN + // We must initialize the COM library on a thread before we calling any of + // the library functions. All COM functions in the ADM will return + // CO_E_NOTINITIALIZED otherwise. + com_initializer_ = + std::make_unique(ScopedCOMInitializer::kMTA); + EXPECT_TRUE(com_initializer_->Succeeded()); + EXPECT_TRUE(webrtc_win::core_audio_utility::IsSupported()); + EXPECT_TRUE(webrtc_win::core_audio_utility::IsMMCSSSupported()); + return CreateWindowsCoreAudioAudioDeviceModuleForTest( + task_queue_factory_.get(), true); +#else + return nullptr; +#endif + } else { + return nullptr; + } + } + + void StartPlayout() { + EXPECT_FALSE(audio_device()->Playing()); + EXPECT_EQ(0, audio_device()->InitPlayout()); + EXPECT_TRUE(audio_device()->PlayoutIsInitialized()); + EXPECT_EQ(0, audio_device()->StartPlayout()); + EXPECT_TRUE(audio_device()->Playing()); + } + + void StopPlayout() { + EXPECT_EQ(0, audio_device()->StopPlayout()); + EXPECT_FALSE(audio_device()->Playing()); + EXPECT_FALSE(audio_device()->PlayoutIsInitialized()); + } + + void StartRecording() { + EXPECT_FALSE(audio_device()->Recording()); + EXPECT_EQ(0, audio_device()->InitRecording()); + EXPECT_TRUE(audio_device()->RecordingIsInitialized()); + EXPECT_EQ(0, audio_device()->StartRecording()); + EXPECT_TRUE(audio_device()->Recording()); + } + + void StopRecording() { + EXPECT_EQ(0, audio_device()->StopRecording()); + EXPECT_FALSE(audio_device()->Recording()); + EXPECT_FALSE(audio_device()->RecordingIsInitialized()); + } + + bool NewWindowsAudioDeviceModuleIsUsed() { +#ifdef WEBRTC_WIN + AudioDeviceModule::AudioLayer audio_layer; + EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer)); + if (audio_layer == AudioDeviceModule::kWindowsCoreAudio2) { + // Default device is always added as first element in the list and the + // default communication device as the second element. Hence, the list + // contains two extra elements in this case. + return true; + } +#endif + return false; + } + + private: +#ifdef WEBRTC_WIN + // Windows Core Audio based ADM needs to run on a COM initialized thread. + std::unique_ptr com_initializer_; +#endif + AudioDeviceModule::AudioLayer audio_layer_; + std::unique_ptr task_queue_factory_; + bool requirements_satisfied_ = true; + rtc::Event event_; + rtc::scoped_refptr audio_device_; + bool stereo_playout_ = false; +}; + +// Instead of using the test fixture, verify that the different factory methods +// work as intended. +TEST(MAYBE_AudioDeviceTestWin, ConstructDestructWithFactory) { + std::unique_ptr task_queue_factory = + CreateDefaultTaskQueueFactory(); + rtc::scoped_refptr audio_device; + // The default factory should work for all platforms when a default ADM is + // requested. + audio_device = AudioDeviceModule::Create( + AudioDeviceModule::kPlatformDefaultAudio, task_queue_factory.get()); + EXPECT_TRUE(audio_device); + audio_device = nullptr; +#ifdef WEBRTC_WIN + // For Windows, the old factory method creates an ADM where the platform- + // specific parts are implemented by an AudioDeviceGeneric object. Verify + // that the old factory can't be used in combination with the latest audio + // layer AudioDeviceModule::kWindowsCoreAudio2. + audio_device = AudioDeviceModule::Create( + AudioDeviceModule::kWindowsCoreAudio2, task_queue_factory.get()); + EXPECT_FALSE(audio_device); + audio_device = nullptr; + // Instead, ensure that the new dedicated factory method called + // CreateWindowsCoreAudioAudioDeviceModule() can be used on Windows and that + // it sets the audio layer to kWindowsCoreAudio2 implicitly. Note that, the + // new ADM for Windows must be created on a COM thread. + ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA); + EXPECT_TRUE(com_initializer.Succeeded()); + audio_device = + CreateWindowsCoreAudioAudioDeviceModule(task_queue_factory.get()); + EXPECT_TRUE(audio_device); + AudioDeviceModule::AudioLayer audio_layer; + EXPECT_EQ(0, audio_device->ActiveAudioLayer(&audio_layer)); + EXPECT_EQ(audio_layer, AudioDeviceModule::kWindowsCoreAudio2); +#endif +} + +// Uses the test fixture to create, initialize and destruct the ADM. +TEST_P(MAYBE_AudioDeviceTest, ConstructDestructDefault) {} + +TEST_P(MAYBE_AudioDeviceTest, InitTerminate) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + // Initialization is part of the test fixture. + EXPECT_TRUE(audio_device()->Initialized()); + EXPECT_EQ(0, audio_device()->Terminate()); + EXPECT_FALSE(audio_device()->Initialized()); +} + +// Enumerate all available and active output devices. +TEST_P(MAYBE_AudioDeviceTest, PlayoutDeviceNames) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + char device_name[kAdmMaxDeviceNameSize]; + char unique_id[kAdmMaxGuidSize]; + int num_devices = audio_device()->PlayoutDevices(); + if (NewWindowsAudioDeviceModuleIsUsed()) { + num_devices += 2; + } + EXPECT_GT(num_devices, 0); + for (int i = 0; i < num_devices; ++i) { + EXPECT_EQ(0, audio_device()->PlayoutDeviceName(i, device_name, unique_id)); + } + EXPECT_EQ(-1, audio_device()->PlayoutDeviceName(num_devices, device_name, + unique_id)); +} + +// Enumerate all available and active input devices. +TEST_P(MAYBE_AudioDeviceTest, RecordingDeviceNames) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + char device_name[kAdmMaxDeviceNameSize]; + char unique_id[kAdmMaxGuidSize]; + int num_devices = audio_device()->RecordingDevices(); + if (NewWindowsAudioDeviceModuleIsUsed()) { + num_devices += 2; + } + EXPECT_GT(num_devices, 0); + for (int i = 0; i < num_devices; ++i) { + EXPECT_EQ(0, + audio_device()->RecordingDeviceName(i, device_name, unique_id)); + } + EXPECT_EQ(-1, audio_device()->RecordingDeviceName(num_devices, device_name, + unique_id)); +} + +// Counts number of active output devices and ensure that all can be selected. +TEST_P(MAYBE_AudioDeviceTest, SetPlayoutDevice) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + int num_devices = audio_device()->PlayoutDevices(); + if (NewWindowsAudioDeviceModuleIsUsed()) { + num_devices += 2; + } + EXPECT_GT(num_devices, 0); + // Verify that all available playout devices can be set (not enabled yet). + for (int i = 0; i < num_devices; ++i) { + EXPECT_EQ(0, audio_device()->SetPlayoutDevice(i)); + } + EXPECT_EQ(-1, audio_device()->SetPlayoutDevice(num_devices)); +#ifdef WEBRTC_WIN + // On Windows, verify the alternative method where the user can select device + // by role. + EXPECT_EQ( + 0, audio_device()->SetPlayoutDevice(AudioDeviceModule::kDefaultDevice)); + EXPECT_EQ(0, audio_device()->SetPlayoutDevice( + AudioDeviceModule::kDefaultCommunicationDevice)); +#endif +} + +// Counts number of active input devices and ensure that all can be selected. +TEST_P(MAYBE_AudioDeviceTest, SetRecordingDevice) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + int num_devices = audio_device()->RecordingDevices(); + if (NewWindowsAudioDeviceModuleIsUsed()) { + num_devices += 2; + } + EXPECT_GT(num_devices, 0); + // Verify that all available recording devices can be set (not enabled yet). + for (int i = 0; i < num_devices; ++i) { + EXPECT_EQ(0, audio_device()->SetRecordingDevice(i)); + } + EXPECT_EQ(-1, audio_device()->SetRecordingDevice(num_devices)); +#ifdef WEBRTC_WIN + // On Windows, verify the alternative method where the user can select device + // by role. + EXPECT_EQ( + 0, audio_device()->SetRecordingDevice(AudioDeviceModule::kDefaultDevice)); + EXPECT_EQ(0, audio_device()->SetRecordingDevice( + AudioDeviceModule::kDefaultCommunicationDevice)); +#endif +} + +// Tests Start/Stop playout without any registered audio callback. +TEST_P(MAYBE_AudioDeviceTest, StartStopPlayout) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartPlayout(); + StopPlayout(); +} + +// Tests Start/Stop recording without any registered audio callback. +TEST_P(MAYBE_AudioDeviceTest, StartStopRecording) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartRecording(); + StopRecording(); +} + +// Tests Start/Stop playout for all available input devices to ensure that +// the selected device can be created and used as intended. +TEST_P(MAYBE_AudioDeviceTest, StartStopPlayoutWithRealDevice) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + int num_devices = audio_device()->PlayoutDevices(); + if (NewWindowsAudioDeviceModuleIsUsed()) { + num_devices += 2; + } + EXPECT_GT(num_devices, 0); + // Verify that all available playout devices can be set and used. + for (int i = 0; i < num_devices; ++i) { + EXPECT_EQ(0, audio_device()->SetPlayoutDevice(i)); + StartPlayout(); + StopPlayout(); + } +#ifdef WEBRTC_WIN + AudioDeviceModule::WindowsDeviceType device_role[] = { + AudioDeviceModule::kDefaultDevice, + AudioDeviceModule::kDefaultCommunicationDevice}; + for (size_t i = 0; i < arraysize(device_role); ++i) { + EXPECT_EQ(0, audio_device()->SetPlayoutDevice(device_role[i])); + StartPlayout(); + StopPlayout(); + } +#endif +} + +// Tests Start/Stop recording for all available input devices to ensure that +// the selected device can be created and used as intended. +TEST_P(MAYBE_AudioDeviceTest, StartStopRecordingWithRealDevice) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + int num_devices = audio_device()->RecordingDevices(); + if (NewWindowsAudioDeviceModuleIsUsed()) { + num_devices += 2; + } + EXPECT_GT(num_devices, 0); + // Verify that all available recording devices can be set and used. + for (int i = 0; i < num_devices; ++i) { + EXPECT_EQ(0, audio_device()->SetRecordingDevice(i)); + StartRecording(); + StopRecording(); + } +#ifdef WEBRTC_WIN + AudioDeviceModule::WindowsDeviceType device_role[] = { + AudioDeviceModule::kDefaultDevice, + AudioDeviceModule::kDefaultCommunicationDevice}; + for (size_t i = 0; i < arraysize(device_role); ++i) { + EXPECT_EQ(0, audio_device()->SetRecordingDevice(device_role[i])); + StartRecording(); + StopRecording(); + } +#endif +} + +// Tests Init/Stop/Init recording without any registered audio callback. +// See https://bugs.chromium.org/p/webrtc/issues/detail?id=8041 for details +// on why this test is useful. +TEST_P(MAYBE_AudioDeviceTest, InitStopInitRecording) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + EXPECT_EQ(0, audio_device()->InitRecording()); + EXPECT_TRUE(audio_device()->RecordingIsInitialized()); + StopRecording(); + EXPECT_EQ(0, audio_device()->InitRecording()); + StopRecording(); +} + +// Verify that additional attempts to initialize or start recording while +// already being active works. Additional calls should just be ignored. +TEST_P(MAYBE_AudioDeviceTest, StartInitRecording) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartRecording(); + // An additional attempt to initialize at this stage should be ignored. + EXPECT_EQ(0, audio_device()->InitRecording()); + // Same for additional request to start recording while already active. + EXPECT_EQ(0, audio_device()->StartRecording()); + StopRecording(); +} + +// Verify that additional attempts to initialize or start playou while +// already being active works. Additional calls should just be ignored. +TEST_P(MAYBE_AudioDeviceTest, StartInitPlayout) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartPlayout(); + // An additional attempt to initialize at this stage should be ignored. + EXPECT_EQ(0, audio_device()->InitPlayout()); + // Same for additional request to start playout while already active. + EXPECT_EQ(0, audio_device()->StartPlayout()); + StopPlayout(); +} + +// Tests Init/Stop/Init recording while playout is active. +TEST_P(MAYBE_AudioDeviceTest, InitStopInitRecordingWhilePlaying) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartPlayout(); + EXPECT_EQ(0, audio_device()->InitRecording()); + EXPECT_TRUE(audio_device()->RecordingIsInitialized()); + StopRecording(); + EXPECT_EQ(0, audio_device()->InitRecording()); + StopRecording(); + StopPlayout(); +} + +// Tests Init/Stop/Init playout without any registered audio callback. +TEST_P(MAYBE_AudioDeviceTest, InitStopInitPlayout) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + EXPECT_EQ(0, audio_device()->InitPlayout()); + EXPECT_TRUE(audio_device()->PlayoutIsInitialized()); + StopPlayout(); + EXPECT_EQ(0, audio_device()->InitPlayout()); + StopPlayout(); +} + +// Tests Init/Stop/Init playout while recording is active. +TEST_P(MAYBE_AudioDeviceTest, InitStopInitPlayoutWhileRecording) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartRecording(); + EXPECT_EQ(0, audio_device()->InitPlayout()); + EXPECT_TRUE(audio_device()->PlayoutIsInitialized()); + StopPlayout(); + EXPECT_EQ(0, audio_device()->InitPlayout()); + StopPlayout(); + StopRecording(); +} + +// TODO(henrika): restart without intermediate destruction is currently only +// supported on Windows. +#ifdef WEBRTC_WIN +// Tests Start/Stop playout followed by a second session (emulates a restart +// triggered by a user using public APIs). +TEST_P(MAYBE_AudioDeviceTest, StartStopPlayoutWithExternalRestart) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartPlayout(); + StopPlayout(); + // Restart playout without destroying the ADM in between. Ensures that we + // support: Init(), Start(), Stop(), Init(), Start(), Stop(). + StartPlayout(); + StopPlayout(); +} + +// Tests Start/Stop recording followed by a second session (emulates a restart +// triggered by a user using public APIs). +TEST_P(MAYBE_AudioDeviceTest, StartStopRecordingWithExternalRestart) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + StartRecording(); + StopRecording(); + // Restart recording without destroying the ADM in between. Ensures that we + // support: Init(), Start(), Stop(), Init(), Start(), Stop(). + StartRecording(); + StopRecording(); +} + +// Tests Start/Stop playout followed by a second session (emulates a restart +// triggered by an internal callback e.g. corresponding to a device switch). +// Note that, internal restart is only supported in combination with the latest +// Windows ADM. +TEST_P(MAYBE_AudioDeviceTest, StartStopPlayoutWithInternalRestart) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + if (audio_layer() != AudioDeviceModule::kWindowsCoreAudio2) { + return; + } + MockAudioTransport mock(TransportType::kPlay); + mock.HandleCallbacks(event(), nullptr, kNumCallbacks); + EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _)) + .Times(AtLeast(kNumCallbacks)); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + StartPlayout(); + event()->Wait(kTestTimeOut); + EXPECT_TRUE(audio_device()->Playing()); + // Restart playout but without stopping the internal audio thread. + // This procedure uses a non-public test API and it emulates what happens + // inside the ADM when e.g. a device is removed. + EXPECT_EQ(0, audio_device()->RestartPlayoutInternally()); + + // Run basic tests of public APIs while a restart attempt is active. + // These calls should now be very thin and not trigger any new actions. + EXPECT_EQ(-1, audio_device()->StopPlayout()); + EXPECT_TRUE(audio_device()->Playing()); + EXPECT_TRUE(audio_device()->PlayoutIsInitialized()); + EXPECT_EQ(0, audio_device()->InitPlayout()); + EXPECT_EQ(0, audio_device()->StartPlayout()); + + // Wait until audio has restarted and a new sequence of audio callbacks + // becomes active. + // TODO(henrika): is it possible to verify that the internal state transition + // is Stop->Init->Start? + ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock)); + mock.ResetCallbackCounters(); + EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _)) + .Times(AtLeast(kNumCallbacks)); + event()->Wait(kTestTimeOut); + EXPECT_TRUE(audio_device()->Playing()); + // Stop playout and the audio thread after successful internal restart. + StopPlayout(); + PreTearDown(); +} + +// Tests Start/Stop recording followed by a second session (emulates a restart +// triggered by an internal callback e.g. corresponding to a device switch). +// Note that, internal restart is only supported in combination with the latest +// Windows ADM. +TEST_P(MAYBE_AudioDeviceTest, StartStopRecordingWithInternalRestart) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + if (audio_layer() != AudioDeviceModule::kWindowsCoreAudio2) { + return; + } + MockAudioTransport mock(TransportType::kRecord); + mock.HandleCallbacks(event(), nullptr, kNumCallbacks); + EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _, + false, _, _)) + .Times(AtLeast(kNumCallbacks)); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + StartRecording(); + event()->Wait(kTestTimeOut); + EXPECT_TRUE(audio_device()->Recording()); + // Restart recording but without stopping the internal audio thread. + // This procedure uses a non-public test API and it emulates what happens + // inside the ADM when e.g. a device is removed. + EXPECT_EQ(0, audio_device()->RestartRecordingInternally()); + + // Run basic tests of public APIs while a restart attempt is active. + // These calls should now be very thin and not trigger any new actions. + EXPECT_EQ(-1, audio_device()->StopRecording()); + EXPECT_TRUE(audio_device()->Recording()); + EXPECT_TRUE(audio_device()->RecordingIsInitialized()); + EXPECT_EQ(0, audio_device()->InitRecording()); + EXPECT_EQ(0, audio_device()->StartRecording()); + + // Wait until audio has restarted and a new sequence of audio callbacks + // becomes active. + // TODO(henrika): is it possible to verify that the internal state transition + // is Stop->Init->Start? + ASSERT_TRUE(Mock::VerifyAndClearExpectations(&mock)); + mock.ResetCallbackCounters(); + EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _, + false, _, _)) + .Times(AtLeast(kNumCallbacks)); + event()->Wait(kTestTimeOut); + EXPECT_TRUE(audio_device()->Recording()); + // Stop recording and the audio thread after successful internal restart. + StopRecording(); + PreTearDown(); +} +#endif // #ifdef WEBRTC_WIN + +// Start playout and verify that the native audio layer starts asking for real +// audio samples to play out using the NeedMorePlayData() callback. +// Note that we can't add expectations on audio parameters in EXPECT_CALL +// since parameter are not provided in the each callback. We therefore test and +// verify the parameters in the fake audio transport implementation instead. +TEST_P(MAYBE_AudioDeviceTest, StartPlayoutVerifyCallbacks) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + MockAudioTransport mock(TransportType::kPlay); + mock.HandleCallbacks(event(), nullptr, kNumCallbacks); + EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _)) + .Times(AtLeast(kNumCallbacks)); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + StartPlayout(); + event()->Wait(kTestTimeOut); + StopPlayout(); + PreTearDown(); +} + +// Don't run these tests in combination with sanitizers. +// They are already flaky *without* sanitizers. +// Sanitizers seem to increase flakiness (which brings noise), +// without reporting anything. +// TODO(webrtc:10867): Re-enable when flakiness fixed. +#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ + defined(THREAD_SANITIZER) +#define MAYBE_StartRecordingVerifyCallbacks \ + DISABLED_StartRecordingVerifyCallbacks +#define MAYBE_StartPlayoutAndRecordingVerifyCallbacks \ + DISABLED_StartPlayoutAndRecordingVerifyCallbacks +#else +#define MAYBE_StartRecordingVerifyCallbacks StartRecordingVerifyCallbacks +#define MAYBE_StartPlayoutAndRecordingVerifyCallbacks \ + StartPlayoutAndRecordingVerifyCallbacks +#endif + +// Start recording and verify that the native audio layer starts providing real +// audio samples using the RecordedDataIsAvailable() callback. +TEST_P(MAYBE_AudioDeviceTest, MAYBE_StartRecordingVerifyCallbacks) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + MockAudioTransport mock(TransportType::kRecord); + mock.HandleCallbacks(event(), nullptr, kNumCallbacks); + EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _, + false, _, _)) + .Times(AtLeast(kNumCallbacks)); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + StartRecording(); + event()->Wait(kTestTimeOut); + StopRecording(); + PreTearDown(); +} + +// Start playout and recording (full-duplex audio) and verify that audio is +// active in both directions. +TEST_P(MAYBE_AudioDeviceTest, MAYBE_StartPlayoutAndRecordingVerifyCallbacks) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + MockAudioTransport mock(TransportType::kPlayAndRecord); + mock.HandleCallbacks(event(), nullptr, kNumCallbacks); + EXPECT_CALL(mock, NeedMorePlayData(_, _, _, _, NotNull(), _, _, _)) + .Times(AtLeast(kNumCallbacks)); + EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(), _, _, _, _, Ge(0u), 0, _, + false, _, _)) + .Times(AtLeast(kNumCallbacks)); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + StartPlayout(); + StartRecording(); + event()->Wait(kTestTimeOut); + StopRecording(); + StopPlayout(); + PreTearDown(); +} + +// Start playout and recording and store recorded data in an intermediate FIFO +// buffer from which the playout side then reads its samples in the same order +// as they were stored. Under ideal circumstances, a callback sequence would +// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-' +// means 'packet played'. Under such conditions, the FIFO would contain max 1, +// with an average somewhere in (0,1) depending on how long the packets are +// buffered. However, under more realistic conditions, the size +// of the FIFO will vary more due to an unbalance between the two sides. +// This test tries to verify that the device maintains a balanced callback- +// sequence by running in loopback for a few seconds while measuring the size +// (max and average) of the FIFO. The size of the FIFO is increased by the +// recording side and decreased by the playout side. +TEST_P(MAYBE_AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + NiceMock mock(TransportType::kPlayAndRecord); + FifoAudioStream audio_stream; + mock.HandleCallbacks(event(), &audio_stream, + kFullDuplexTime.seconds() * kNumCallbacksPerSecond); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + // Run both sides using the same channel configuration to avoid conversions + // between mono/stereo while running in full duplex mode. Also, some devices + // (mainly on Windows) do not support mono. + EXPECT_EQ(0, audio_device()->SetStereoPlayout(true)); + EXPECT_EQ(0, audio_device()->SetStereoRecording(true)); + // Mute speakers to prevent howling. + EXPECT_EQ(0, audio_device()->SetSpeakerVolume(0)); + StartPlayout(); + StartRecording(); + event()->Wait(std::max(kTestTimeOut, kFullDuplexTime)); + StopRecording(); + StopPlayout(); + PreTearDown(); +} + +// Runs audio in full duplex until user hits Enter. Intended as a manual test +// to ensure that the audio quality is good and that real device switches works +// as intended. +TEST_P(MAYBE_AudioDeviceTest, + DISABLED_RunPlayoutAndRecordingInFullDuplexAndWaitForEnterKey) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + if (audio_layer() != AudioDeviceModule::kWindowsCoreAudio2) { + return; + } + NiceMock mock(TransportType::kPlayAndRecord); + FifoAudioStream audio_stream; + mock.HandleCallbacks(&audio_stream); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + EXPECT_EQ(0, audio_device()->SetStereoPlayout(true)); + EXPECT_EQ(0, audio_device()->SetStereoRecording(true)); + // Ensure that the sample rate for both directions are identical so that we + // always can listen to our own voice. Will lead to rate conversion (and + // higher latency) if the native sample rate is not 48kHz. + EXPECT_EQ(0, audio_device()->SetPlayoutSampleRate(48000)); + EXPECT_EQ(0, audio_device()->SetRecordingSampleRate(48000)); + StartPlayout(); + StartRecording(); + do { + PRINT("Loopback audio is active at 48kHz. Press Enter to stop.\n"); + } while (getchar() != '\n'); + StopRecording(); + StopPlayout(); + PreTearDown(); +} + +// Measures loopback latency and reports the min, max and average values for +// a full duplex audio session. +// The latency is measured like so: +// - Insert impulses periodically on the output side. +// - Detect the impulses on the input side. +// - Measure the time difference between the transmit time and receive time. +// - Store time differences in a vector and calculate min, max and average. +// This test needs the '--gtest_also_run_disabled_tests' flag to run and also +// some sort of audio feedback loop. E.g. a headset where the mic is placed +// close to the speaker to ensure highest possible echo. It is also recommended +// to run the test at highest possible output volume. +TEST_P(MAYBE_AudioDeviceTest, DISABLED_MeasureLoopbackLatency) { + SKIP_TEST_IF_NOT(requirements_satisfied()); + NiceMock mock(TransportType::kPlayAndRecord); + LatencyAudioStream audio_stream; + mock.HandleCallbacks(event(), &audio_stream, + kMeasureLatencyTime.seconds() * kNumCallbacksPerSecond); + EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); + EXPECT_EQ(0, audio_device()->SetStereoPlayout(true)); + EXPECT_EQ(0, audio_device()->SetStereoRecording(true)); + StartPlayout(); + StartRecording(); + event()->Wait(std::max(kTestTimeOut, kMeasureLatencyTime)); + StopRecording(); + StopPlayout(); + // Avoid concurrent access to audio_stream. + PreTearDown(); + // Verify that a sufficient number of transmitted impulses are detected. + EXPECT_GE(audio_stream.num_latency_values(), + static_cast( + kImpulseFrequencyInHz * kMeasureLatencyTime.seconds() - 2)); + // Print out min, max and average delay values for debugging purposes. + audio_stream.PrintResults(); +} + +#ifdef WEBRTC_WIN +// Test two different audio layers (or rather two different Core Audio +// implementations) for Windows. +INSTANTIATE_TEST_SUITE_P( + AudioLayerWin, + MAYBE_AudioDeviceTest, + ::testing::Values(AudioDeviceModule::kPlatformDefaultAudio, + AudioDeviceModule::kWindowsCoreAudio2)); +#else +// For all platforms but Windows, only test the default audio layer. +INSTANTIATE_TEST_SUITE_P( + AudioLayer, + MAYBE_AudioDeviceTest, + ::testing::Values(AudioDeviceModule::kPlatformDefaultAudio)); +#endif + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc new file mode 100644 index 0000000000..b8fd837038 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.cc @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/dummy/audio_device_dummy.h" + +namespace webrtc { + +int32_t AudioDeviceDummy::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const { + return -1; +} + +AudioDeviceGeneric::InitStatus AudioDeviceDummy::Init() { + return InitStatus::OK; +} + +int32_t AudioDeviceDummy::Terminate() { + return 0; +} + +bool AudioDeviceDummy::Initialized() const { + return true; +} + +int16_t AudioDeviceDummy::PlayoutDevices() { + return -1; +} + +int16_t AudioDeviceDummy::RecordingDevices() { + return -1; +} + +int32_t AudioDeviceDummy::PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + return -1; +} + +int32_t AudioDeviceDummy::RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + return -1; +} + +int32_t AudioDeviceDummy::SetPlayoutDevice(uint16_t index) { + return -1; +} + +int32_t AudioDeviceDummy::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) { + return -1; +} + +int32_t AudioDeviceDummy::SetRecordingDevice(uint16_t index) { + return -1; +} + +int32_t AudioDeviceDummy::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) { + return -1; +} + +int32_t AudioDeviceDummy::PlayoutIsAvailable(bool& available) { + return -1; +} + +int32_t AudioDeviceDummy::InitPlayout() { + return -1; +} + +bool AudioDeviceDummy::PlayoutIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::RecordingIsAvailable(bool& available) { + return -1; +} + +int32_t AudioDeviceDummy::InitRecording() { + return -1; +} + +bool AudioDeviceDummy::RecordingIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::StartPlayout() { + return -1; +} + +int32_t AudioDeviceDummy::StopPlayout() { + return 0; +} + +bool AudioDeviceDummy::Playing() const { + return false; +} + +int32_t AudioDeviceDummy::StartRecording() { + return -1; +} + +int32_t AudioDeviceDummy::StopRecording() { + return 0; +} + +bool AudioDeviceDummy::Recording() const { + return false; +} + +int32_t AudioDeviceDummy::InitSpeaker() { + return -1; +} + +bool AudioDeviceDummy::SpeakerIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::InitMicrophone() { + return -1; +} + +bool AudioDeviceDummy::MicrophoneIsInitialized() const { + return false; +} + +int32_t AudioDeviceDummy::SpeakerVolumeIsAvailable(bool& available) { + return -1; +} + +int32_t AudioDeviceDummy::SetSpeakerVolume(uint32_t volume) { + return -1; +} + +int32_t AudioDeviceDummy::SpeakerVolume(uint32_t& volume) const { + return -1; +} + +int32_t AudioDeviceDummy::MaxSpeakerVolume(uint32_t& maxVolume) const { + return -1; +} + +int32_t AudioDeviceDummy::MinSpeakerVolume(uint32_t& minVolume) const { + return -1; +} + +int32_t AudioDeviceDummy::MicrophoneVolumeIsAvailable(bool& available) { + return -1; +} + +int32_t AudioDeviceDummy::SetMicrophoneVolume(uint32_t volume) { + return -1; +} + +int32_t AudioDeviceDummy::MicrophoneVolume(uint32_t& volume) const { + return -1; +} + +int32_t AudioDeviceDummy::MaxMicrophoneVolume(uint32_t& maxVolume) const { + return -1; +} + +int32_t AudioDeviceDummy::MinMicrophoneVolume(uint32_t& minVolume) const { + return -1; +} + +int32_t AudioDeviceDummy::SpeakerMuteIsAvailable(bool& available) { + return -1; +} + +int32_t AudioDeviceDummy::SetSpeakerMute(bool enable) { + return -1; +} + +int32_t AudioDeviceDummy::SpeakerMute(bool& enabled) const { + return -1; +} + +int32_t AudioDeviceDummy::MicrophoneMuteIsAvailable(bool& available) { + return -1; +} + +int32_t AudioDeviceDummy::SetMicrophoneMute(bool enable) { + return -1; +} + +int32_t AudioDeviceDummy::MicrophoneMute(bool& enabled) const { + return -1; +} + +int32_t AudioDeviceDummy::StereoPlayoutIsAvailable(bool& available) { + return -1; +} +int32_t AudioDeviceDummy::SetStereoPlayout(bool enable) { + return -1; +} + +int32_t AudioDeviceDummy::StereoPlayout(bool& enabled) const { + return -1; +} + +int32_t AudioDeviceDummy::StereoRecordingIsAvailable(bool& available) { + return -1; +} + +int32_t AudioDeviceDummy::SetStereoRecording(bool enable) { + return -1; +} + +int32_t AudioDeviceDummy::StereoRecording(bool& enabled) const { + return -1; +} + +int32_t AudioDeviceDummy::PlayoutDelay(uint16_t& delayMS) const { + return -1; +} + +void AudioDeviceDummy::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {} +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h new file mode 100644 index 0000000000..2a2541098e --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/dummy/audio_device_dummy.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_ +#define AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_ + +#include + +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/audio_device_generic.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_defines.h" + +namespace webrtc { + +class AudioDeviceDummy : public AudioDeviceGeneric { + public: + AudioDeviceDummy() {} + virtual ~AudioDeviceDummy() {} + + // Retrieve the currently utilized audio layer + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const override; + + // Main initializaton and termination + InitStatus Init() override; + int32_t Terminate() override; + bool Initialized() const override; + + // Device enumeration + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + + // Device selection + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override; + + // Audio transport initialization + int32_t PlayoutIsAvailable(bool& available) override; + int32_t InitPlayout() override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool& available) override; + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + // Audio transport control + int32_t StartPlayout() override; + int32_t StopPlayout() override; + bool Playing() const override; + int32_t StartRecording() override; + int32_t StopRecording() override; + bool Recording() const override; + + // Audio mixer initialization + int32_t InitSpeaker() override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() override; + bool MicrophoneIsInitialized() const override; + + // Speaker volume controls + int32_t SpeakerVolumeIsAvailable(bool& available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t& volume) const override; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t& minVolume) const override; + + // Microphone volume controls + int32_t MicrophoneVolumeIsAvailable(bool& available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t& volume) const override; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const override; + + // Speaker mute control + int32_t SpeakerMuteIsAvailable(bool& available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool& enabled) const override; + + // Microphone mute control + int32_t MicrophoneMuteIsAvailable(bool& available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool& enabled) const override; + + // Stereo support + int32_t StereoPlayoutIsAvailable(bool& available) override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool& enabled) const override; + int32_t StereoRecordingIsAvailable(bool& available) override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool& enabled) const override; + + // Delay information and control + int32_t PlayoutDelay(uint16_t& delayMS) const override; + + void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; +}; + +} // namespace webrtc + +#endif // AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H_ diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc new file mode 100644 index 0000000000..8c10ae4186 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.cc @@ -0,0 +1,508 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/dummy/file_audio_device.h" + +#include + +#include "absl/strings/string_view.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/time_utils.h" +#include "system_wrappers/include/sleep.h" + +namespace webrtc { + +const int kRecordingFixedSampleRate = 48000; +const size_t kRecordingNumChannels = 2; +const int kPlayoutFixedSampleRate = 48000; +const size_t kPlayoutNumChannels = 2; +const size_t kPlayoutBufferSize = + kPlayoutFixedSampleRate / 100 * kPlayoutNumChannels * 2; +const size_t kRecordingBufferSize = + kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2; + +FileAudioDevice::FileAudioDevice(absl::string_view inputFilename, + absl::string_view outputFilename) + : _ptrAudioBuffer(NULL), + _recordingBuffer(NULL), + _playoutBuffer(NULL), + _recordingFramesLeft(0), + _playoutFramesLeft(0), + _recordingBufferSizeIn10MS(0), + _recordingFramesIn10MS(0), + _playoutFramesIn10MS(0), + _playing(false), + _recording(false), + _lastCallPlayoutMillis(0), + _lastCallRecordMillis(0), + _outputFilename(outputFilename), + _inputFilename(inputFilename) {} + +FileAudioDevice::~FileAudioDevice() {} + +int32_t FileAudioDevice::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const { + return -1; +} + +AudioDeviceGeneric::InitStatus FileAudioDevice::Init() { + return InitStatus::OK; +} + +int32_t FileAudioDevice::Terminate() { + return 0; +} + +bool FileAudioDevice::Initialized() const { + return true; +} + +int16_t FileAudioDevice::PlayoutDevices() { + return 1; +} + +int16_t FileAudioDevice::RecordingDevices() { + return 1; +} + +int32_t FileAudioDevice::PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + const char* kName = "dummy_device"; + const char* kGuid = "dummy_device_unique_id"; + if (index < 1) { + memset(name, 0, kAdmMaxDeviceNameSize); + memset(guid, 0, kAdmMaxGuidSize); + memcpy(name, kName, strlen(kName)); + memcpy(guid, kGuid, strlen(guid)); + return 0; + } + return -1; +} + +int32_t FileAudioDevice::RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + const char* kName = "dummy_device"; + const char* kGuid = "dummy_device_unique_id"; + if (index < 1) { + memset(name, 0, kAdmMaxDeviceNameSize); + memset(guid, 0, kAdmMaxGuidSize); + memcpy(name, kName, strlen(kName)); + memcpy(guid, kGuid, strlen(guid)); + return 0; + } + return -1; +} + +int32_t FileAudioDevice::SetPlayoutDevice(uint16_t index) { + if (index == 0) { + _playout_index = index; + return 0; + } + return -1; +} + +int32_t FileAudioDevice::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) { + return -1; +} + +int32_t FileAudioDevice::SetRecordingDevice(uint16_t index) { + if (index == 0) { + _record_index = index; + return _record_index; + } + return -1; +} + +int32_t FileAudioDevice::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) { + return -1; +} + +int32_t FileAudioDevice::PlayoutIsAvailable(bool& available) { + if (_playout_index == 0) { + available = true; + return _playout_index; + } + available = false; + return -1; +} + +int32_t FileAudioDevice::InitPlayout() { + MutexLock lock(&mutex_); + + if (_playing) { + return -1; + } + + _playoutFramesIn10MS = static_cast(kPlayoutFixedSampleRate / 100); + + if (_ptrAudioBuffer) { + // Update webrtc audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(kPlayoutFixedSampleRate); + _ptrAudioBuffer->SetPlayoutChannels(kPlayoutNumChannels); + } + return 0; +} + +bool FileAudioDevice::PlayoutIsInitialized() const { + return _playoutFramesIn10MS != 0; +} + +int32_t FileAudioDevice::RecordingIsAvailable(bool& available) { + if (_record_index == 0) { + available = true; + return _record_index; + } + available = false; + return -1; +} + +int32_t FileAudioDevice::InitRecording() { + MutexLock lock(&mutex_); + + if (_recording) { + return -1; + } + + _recordingFramesIn10MS = static_cast(kRecordingFixedSampleRate / 100); + + if (_ptrAudioBuffer) { + _ptrAudioBuffer->SetRecordingSampleRate(kRecordingFixedSampleRate); + _ptrAudioBuffer->SetRecordingChannels(kRecordingNumChannels); + } + return 0; +} + +bool FileAudioDevice::RecordingIsInitialized() const { + return _recordingFramesIn10MS != 0; +} + +int32_t FileAudioDevice::StartPlayout() { + if (_playing) { + return 0; + } + + _playing = true; + _playoutFramesLeft = 0; + + if (!_playoutBuffer) { + _playoutBuffer = new int8_t[kPlayoutBufferSize]; + } + if (!_playoutBuffer) { + _playing = false; + return -1; + } + + // PLAYOUT + if (!_outputFilename.empty()) { + _outputFile = FileWrapper::OpenWriteOnly(_outputFilename); + if (!_outputFile.is_open()) { + RTC_LOG(LS_ERROR) << "Failed to open playout file: " << _outputFilename; + _playing = false; + delete[] _playoutBuffer; + _playoutBuffer = NULL; + return -1; + } + } + + _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable( + [this] { + while (PlayThreadProcess()) { + } + }, + "webrtc_audio_module_play_thread", + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); + + RTC_LOG(LS_INFO) << "Started playout capture to output file: " + << _outputFilename; + return 0; +} + +int32_t FileAudioDevice::StopPlayout() { + { + MutexLock lock(&mutex_); + _playing = false; + } + + // stop playout thread first + if (!_ptrThreadPlay.empty()) + _ptrThreadPlay.Finalize(); + + MutexLock lock(&mutex_); + + _playoutFramesLeft = 0; + delete[] _playoutBuffer; + _playoutBuffer = NULL; + _outputFile.Close(); + + RTC_LOG(LS_INFO) << "Stopped playout capture to output file: " + << _outputFilename; + return 0; +} + +bool FileAudioDevice::Playing() const { + return _playing; +} + +int32_t FileAudioDevice::StartRecording() { + _recording = true; + + // Make sure we only create the buffer once. + _recordingBufferSizeIn10MS = + _recordingFramesIn10MS * kRecordingNumChannels * 2; + if (!_recordingBuffer) { + _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS]; + } + + if (!_inputFilename.empty()) { + _inputFile = FileWrapper::OpenReadOnly(_inputFilename); + if (!_inputFile.is_open()) { + RTC_LOG(LS_ERROR) << "Failed to open audio input file: " + << _inputFilename; + _recording = false; + delete[] _recordingBuffer; + _recordingBuffer = NULL; + return -1; + } + } + + _ptrThreadRec = rtc::PlatformThread::SpawnJoinable( + [this] { + while (RecThreadProcess()) { + } + }, + "webrtc_audio_module_capture_thread", + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); + + RTC_LOG(LS_INFO) << "Started recording from input file: " << _inputFilename; + + return 0; +} + +int32_t FileAudioDevice::StopRecording() { + { + MutexLock lock(&mutex_); + _recording = false; + } + + if (!_ptrThreadRec.empty()) + _ptrThreadRec.Finalize(); + + MutexLock lock(&mutex_); + _recordingFramesLeft = 0; + if (_recordingBuffer) { + delete[] _recordingBuffer; + _recordingBuffer = NULL; + } + _inputFile.Close(); + + RTC_LOG(LS_INFO) << "Stopped recording from input file: " << _inputFilename; + return 0; +} + +bool FileAudioDevice::Recording() const { + return _recording; +} + +int32_t FileAudioDevice::InitSpeaker() { + return -1; +} + +bool FileAudioDevice::SpeakerIsInitialized() const { + return false; +} + +int32_t FileAudioDevice::InitMicrophone() { + return 0; +} + +bool FileAudioDevice::MicrophoneIsInitialized() const { + return true; +} + +int32_t FileAudioDevice::SpeakerVolumeIsAvailable(bool& available) { + return -1; +} + +int32_t FileAudioDevice::SetSpeakerVolume(uint32_t volume) { + return -1; +} + +int32_t FileAudioDevice::SpeakerVolume(uint32_t& volume) const { + return -1; +} + +int32_t FileAudioDevice::MaxSpeakerVolume(uint32_t& maxVolume) const { + return -1; +} + +int32_t FileAudioDevice::MinSpeakerVolume(uint32_t& minVolume) const { + return -1; +} + +int32_t FileAudioDevice::MicrophoneVolumeIsAvailable(bool& available) { + return -1; +} + +int32_t FileAudioDevice::SetMicrophoneVolume(uint32_t volume) { + return -1; +} + +int32_t FileAudioDevice::MicrophoneVolume(uint32_t& volume) const { + return -1; +} + +int32_t FileAudioDevice::MaxMicrophoneVolume(uint32_t& maxVolume) const { + return -1; +} + +int32_t FileAudioDevice::MinMicrophoneVolume(uint32_t& minVolume) const { + return -1; +} + +int32_t FileAudioDevice::SpeakerMuteIsAvailable(bool& available) { + return -1; +} + +int32_t FileAudioDevice::SetSpeakerMute(bool enable) { + return -1; +} + +int32_t FileAudioDevice::SpeakerMute(bool& enabled) const { + return -1; +} + +int32_t FileAudioDevice::MicrophoneMuteIsAvailable(bool& available) { + return -1; +} + +int32_t FileAudioDevice::SetMicrophoneMute(bool enable) { + return -1; +} + +int32_t FileAudioDevice::MicrophoneMute(bool& enabled) const { + return -1; +} + +int32_t FileAudioDevice::StereoPlayoutIsAvailable(bool& available) { + available = true; + return 0; +} +int32_t FileAudioDevice::SetStereoPlayout(bool enable) { + return 0; +} + +int32_t FileAudioDevice::StereoPlayout(bool& enabled) const { + enabled = true; + return 0; +} + +int32_t FileAudioDevice::StereoRecordingIsAvailable(bool& available) { + available = true; + return 0; +} + +int32_t FileAudioDevice::SetStereoRecording(bool enable) { + return 0; +} + +int32_t FileAudioDevice::StereoRecording(bool& enabled) const { + enabled = true; + return 0; +} + +int32_t FileAudioDevice::PlayoutDelay(uint16_t& delayMS) const { + return 0; +} + +void FileAudioDevice::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { + MutexLock lock(&mutex_); + + _ptrAudioBuffer = audioBuffer; + + // Inform the AudioBuffer about default settings for this implementation. + // Set all values to zero here since the actual settings will be done by + // InitPlayout and InitRecording later. + _ptrAudioBuffer->SetRecordingSampleRate(0); + _ptrAudioBuffer->SetPlayoutSampleRate(0); + _ptrAudioBuffer->SetRecordingChannels(0); + _ptrAudioBuffer->SetPlayoutChannels(0); +} + +bool FileAudioDevice::PlayThreadProcess() { + if (!_playing) { + return false; + } + int64_t currentTime = rtc::TimeMillis(); + mutex_.Lock(); + + if (_lastCallPlayoutMillis == 0 || + currentTime - _lastCallPlayoutMillis >= 10) { + mutex_.Unlock(); + _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS); + mutex_.Lock(); + + _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer); + RTC_DCHECK_EQ(_playoutFramesIn10MS, _playoutFramesLeft); + if (_outputFile.is_open()) { + _outputFile.Write(_playoutBuffer, kPlayoutBufferSize); + } + _lastCallPlayoutMillis = currentTime; + } + _playoutFramesLeft = 0; + mutex_.Unlock(); + + int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime; + if (deltaTimeMillis < 10) { + SleepMs(10 - deltaTimeMillis); + } + + return true; +} + +bool FileAudioDevice::RecThreadProcess() { + if (!_recording) { + return false; + } + + int64_t currentTime = rtc::TimeMillis(); + mutex_.Lock(); + + if (_lastCallRecordMillis == 0 || currentTime - _lastCallRecordMillis >= 10) { + if (_inputFile.is_open()) { + if (_inputFile.Read(_recordingBuffer, kRecordingBufferSize) > 0) { + _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer, + _recordingFramesIn10MS); + } else { + _inputFile.Rewind(); + } + _lastCallRecordMillis = currentTime; + mutex_.Unlock(); + _ptrAudioBuffer->DeliverRecordedData(); + mutex_.Lock(); + } + } + + mutex_.Unlock(); + + int64_t deltaTimeMillis = rtc::TimeMillis() - currentTime; + if (deltaTimeMillis < 10) { + SleepMs(10 - deltaTimeMillis); + } + + return true; +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h new file mode 100644 index 0000000000..27979933f2 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_ +#define AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_ + +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "modules/audio_device/audio_device_generic.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/file_wrapper.h" +#include "rtc_base/time_utils.h" + +namespace webrtc { + +// This is a fake audio device which plays audio from a file as its microphone +// and plays out into a file. +class FileAudioDevice : public AudioDeviceGeneric { + public: + // Constructs a file audio device with `id`. It will read audio from + // `inputFilename` and record output audio to `outputFilename`. + // + // The input file should be a readable 48k stereo raw file, and the output + // file should point to a writable location. The output format will also be + // 48k stereo raw audio. + FileAudioDevice(absl::string_view inputFilename, + absl::string_view outputFilename); + virtual ~FileAudioDevice(); + + // Retrieve the currently utilized audio layer + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const override; + + // Main initializaton and termination + InitStatus Init() override; + int32_t Terminate() override; + bool Initialized() const override; + + // Device enumeration + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + + // Device selection + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override; + + // Audio transport initialization + int32_t PlayoutIsAvailable(bool& available) override; + int32_t InitPlayout() override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool& available) override; + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + // Audio transport control + int32_t StartPlayout() override; + int32_t StopPlayout() override; + bool Playing() const override; + int32_t StartRecording() override; + int32_t StopRecording() override; + bool Recording() const override; + + // Audio mixer initialization + int32_t InitSpeaker() override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() override; + bool MicrophoneIsInitialized() const override; + + // Speaker volume controls + int32_t SpeakerVolumeIsAvailable(bool& available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t& volume) const override; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t& minVolume) const override; + + // Microphone volume controls + int32_t MicrophoneVolumeIsAvailable(bool& available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t& volume) const override; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const override; + + // Speaker mute control + int32_t SpeakerMuteIsAvailable(bool& available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool& enabled) const override; + + // Microphone mute control + int32_t MicrophoneMuteIsAvailable(bool& available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool& enabled) const override; + + // Stereo support + int32_t StereoPlayoutIsAvailable(bool& available) override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool& enabled) const override; + int32_t StereoRecordingIsAvailable(bool& available) override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool& enabled) const override; + + // Delay information and control + int32_t PlayoutDelay(uint16_t& delayMS) const override; + + void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; + + private: + static void RecThreadFunc(void*); + static void PlayThreadFunc(void*); + bool RecThreadProcess(); + bool PlayThreadProcess(); + + int32_t _playout_index; + int32_t _record_index; + AudioDeviceBuffer* _ptrAudioBuffer; + int8_t* _recordingBuffer; // In bytes. + int8_t* _playoutBuffer; // In bytes. + uint32_t _recordingFramesLeft; + uint32_t _playoutFramesLeft; + Mutex mutex_; + + size_t _recordingBufferSizeIn10MS; + size_t _recordingFramesIn10MS; + size_t _playoutFramesIn10MS; + + rtc::PlatformThread _ptrThreadRec; + rtc::PlatformThread _ptrThreadPlay; + + bool _playing; + bool _recording; + int64_t _lastCallPlayoutMillis; + int64_t _lastCallRecordMillis; + + FileWrapper _outputFile; + FileWrapper _inputFile; + std::string _outputFilename; + std::string _inputFilename; +}; + +} // namespace webrtc + +#endif // AUDIO_DEVICE_FILE_AUDIO_DEVICE_H_ diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc new file mode 100644 index 0000000000..8c41111478 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.cc @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/dummy/file_audio_device_factory.h" + +#include + +#include + +#include "absl/strings/string_view.h" +#include "modules/audio_device/dummy/file_audio_device.h" +#include "rtc_base/logging.h" +#include "rtc_base/string_utils.h" + +namespace webrtc { + +bool FileAudioDeviceFactory::_isConfigured = false; +char FileAudioDeviceFactory::_inputAudioFilename[MAX_FILENAME_LEN] = ""; +char FileAudioDeviceFactory::_outputAudioFilename[MAX_FILENAME_LEN] = ""; + +FileAudioDevice* FileAudioDeviceFactory::CreateFileAudioDevice() { + // Bail out here if the files haven't been set explicitly. + // audio_device_impl.cc should then fall back to dummy audio. + if (!_isConfigured) { + RTC_LOG(LS_WARNING) + << "WebRTC configured with WEBRTC_DUMMY_FILE_DEVICES but " + "no device files supplied. Will fall back to dummy " + "audio."; + + return nullptr; + } + return new FileAudioDevice(_inputAudioFilename, _outputAudioFilename); +} + +void FileAudioDeviceFactory::SetFilenamesToUse( + absl::string_view inputAudioFilename, + absl::string_view outputAudioFilename) { +#ifdef WEBRTC_DUMMY_FILE_DEVICES + RTC_DCHECK_LT(inputAudioFilename.size(), MAX_FILENAME_LEN); + RTC_DCHECK_LT(outputAudioFilename.size(), MAX_FILENAME_LEN); + + // Copy the strings since we don't know the lifetime of the input pointers. + rtc::strcpyn(_inputAudioFilename, MAX_FILENAME_LEN, inputAudioFilename); + rtc::strcpyn(_outputAudioFilename, MAX_FILENAME_LEN, outputAudioFilename); + _isConfigured = true; +#else + // Sanity: must be compiled with the right define to run this. + printf( + "Trying to use dummy file devices, but is not compiled " + "with WEBRTC_DUMMY_FILE_DEVICES. Bailing out.\n"); + std::exit(1); +#endif +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h new file mode 100644 index 0000000000..18f9388f21 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/dummy/file_audio_device_factory.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_ +#define AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_ + +#include + +#include "absl/strings/string_view.h" + +namespace webrtc { + +class FileAudioDevice; + +// This class is used by audio_device_impl.cc when WebRTC is compiled with +// WEBRTC_DUMMY_FILE_DEVICES. The application must include this file and set the +// filenames to use before the audio device module is initialized. This is +// intended for test tools which use the audio device module. +class FileAudioDeviceFactory { + public: + static FileAudioDevice* CreateFileAudioDevice(); + + // The input file must be a readable 48k stereo raw file. The output + // file must be writable. The strings will be copied. + static void SetFilenamesToUse(absl::string_view inputAudioFilename, + absl::string_view outputAudioFilename); + + private: + enum : uint32_t { MAX_FILENAME_LEN = 512 }; + static bool _isConfigured; + static char _inputAudioFilename[MAX_FILENAME_LEN]; + static char _outputAudioFilename[MAX_FILENAME_LEN]; +}; + +} // namespace webrtc + +#endif // AUDIO_DEVICE_FILE_AUDIO_DEVICE_FACTORY_H_ diff --git a/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc new file mode 100644 index 0000000000..86240da196 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.cc @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/fine_audio_buffer.h" + +#include +#include + +#include "modules/audio_device/audio_device_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/numerics/safe_conversions.h" + +namespace webrtc { + +FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer) + : audio_device_buffer_(audio_device_buffer), + playout_samples_per_channel_10ms_(rtc::dchecked_cast( + audio_device_buffer->PlayoutSampleRate() * 10 / 1000)), + record_samples_per_channel_10ms_(rtc::dchecked_cast( + audio_device_buffer->RecordingSampleRate() * 10 / 1000)), + playout_channels_(audio_device_buffer->PlayoutChannels()), + record_channels_(audio_device_buffer->RecordingChannels()) { + RTC_DCHECK(audio_device_buffer_); + RTC_DLOG(LS_INFO) << __FUNCTION__; + if (IsReadyForPlayout()) { + RTC_DLOG(LS_INFO) << "playout_samples_per_channel_10ms: " + << playout_samples_per_channel_10ms_; + RTC_DLOG(LS_INFO) << "playout_channels: " << playout_channels_; + } + if (IsReadyForRecord()) { + RTC_DLOG(LS_INFO) << "record_samples_per_channel_10ms: " + << record_samples_per_channel_10ms_; + RTC_DLOG(LS_INFO) << "record_channels: " << record_channels_; + } +} + +FineAudioBuffer::~FineAudioBuffer() { + RTC_DLOG(LS_INFO) << __FUNCTION__; +} + +void FineAudioBuffer::ResetPlayout() { + playout_buffer_.Clear(); +} + +void FineAudioBuffer::ResetRecord() { + record_buffer_.Clear(); +} + +bool FineAudioBuffer::IsReadyForPlayout() const { + return playout_samples_per_channel_10ms_ > 0 && playout_channels_ > 0; +} + +bool FineAudioBuffer::IsReadyForRecord() const { + return record_samples_per_channel_10ms_ > 0 && record_channels_ > 0; +} + +void FineAudioBuffer::GetPlayoutData(rtc::ArrayView audio_buffer, + int playout_delay_ms) { + RTC_DCHECK(IsReadyForPlayout()); + // Ask WebRTC for new data in chunks of 10ms until we have enough to + // fulfill the request. It is possible that the buffer already contains + // enough samples from the last round. + while (playout_buffer_.size() < audio_buffer.size()) { + // Get 10ms decoded audio from WebRTC. The ADB knows about number of + // channels; hence we can ask for number of samples per channel here. + if (audio_device_buffer_->RequestPlayoutData( + playout_samples_per_channel_10ms_) == + static_cast(playout_samples_per_channel_10ms_)) { + // Append 10ms to the end of the local buffer taking number of channels + // into account. + const size_t num_elements_10ms = + playout_channels_ * playout_samples_per_channel_10ms_; + const size_t written_elements = playout_buffer_.AppendData( + num_elements_10ms, [&](rtc::ArrayView buf) { + const size_t samples_per_channel_10ms = + audio_device_buffer_->GetPlayoutData(buf.data()); + return playout_channels_ * samples_per_channel_10ms; + }); + RTC_DCHECK_EQ(num_elements_10ms, written_elements); + } else { + // Provide silence if AudioDeviceBuffer::RequestPlayoutData() fails. + // Can e.g. happen when an AudioTransport has not been registered. + const size_t num_bytes = audio_buffer.size() * sizeof(int16_t); + std::memset(audio_buffer.data(), 0, num_bytes); + return; + } + } + + // Provide the requested number of bytes to the consumer. + const size_t num_bytes = audio_buffer.size() * sizeof(int16_t); + memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes); + // Move remaining samples to start of buffer to prepare for next round. + memmove(playout_buffer_.data(), playout_buffer_.data() + audio_buffer.size(), + (playout_buffer_.size() - audio_buffer.size()) * sizeof(int16_t)); + playout_buffer_.SetSize(playout_buffer_.size() - audio_buffer.size()); + // Cache playout latency for usage in DeliverRecordedData(); + playout_delay_ms_ = playout_delay_ms; +} + +void FineAudioBuffer::DeliverRecordedData( + rtc::ArrayView audio_buffer, + int record_delay_ms) { + RTC_DCHECK(IsReadyForRecord()); + // Always append new data and grow the buffer when needed. + record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size()); + // Consume samples from buffer in chunks of 10ms until there is not + // enough data left. The number of remaining samples in the cache is given by + // the new size of the internal `record_buffer_`. + const size_t num_elements_10ms = + record_channels_ * record_samples_per_channel_10ms_; + while (record_buffer_.size() >= num_elements_10ms) { + audio_device_buffer_->SetRecordedBuffer(record_buffer_.data(), + record_samples_per_channel_10ms_); + audio_device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms); + audio_device_buffer_->DeliverRecordedData(); + memmove(record_buffer_.data(), record_buffer_.data() + num_elements_10ms, + (record_buffer_.size() - num_elements_10ms) * sizeof(int16_t)); + record_buffer_.SetSize(record_buffer_.size() - num_elements_10ms); + } +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h new file mode 100644 index 0000000000..a6c3042bb2 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_ +#define MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_ + +#include "api/array_view.h" +#include "rtc_base/buffer.h" + +namespace webrtc { + +class AudioDeviceBuffer; + +// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with 16-bit PCM +// audio samples corresponding to 10ms of data. It then allows for this data +// to be pulled in a finer or coarser granularity. I.e. interacting with this +// class instead of directly with the AudioDeviceBuffer one can ask for any +// number of audio data samples. This class also ensures that audio data can be +// delivered to the ADB in 10ms chunks when the size of the provided audio +// buffers differs from 10ms. +// As an example: calling DeliverRecordedData() with 5ms buffers will deliver +// accumulated 10ms worth of data to the ADB every second call. +class FineAudioBuffer { + public: + // `device_buffer` is a buffer that provides 10ms of audio data. + FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer); + ~FineAudioBuffer(); + + // Clears buffers and counters dealing with playout and/or recording. + void ResetPlayout(); + void ResetRecord(); + + // Utility methods which returns true if valid parameters are acquired at + // constructions. + bool IsReadyForPlayout() const; + bool IsReadyForRecord() const; + + // Copies audio samples into `audio_buffer` where number of requested + // elements is specified by `audio_buffer.size()`. The producer will always + // fill up the audio buffer and if no audio exists, the buffer will contain + // silence instead. The provided delay estimate in `playout_delay_ms` should + // contain an estimate of the latency between when an audio frame is read from + // WebRTC and when it is played out on the speaker. + void GetPlayoutData(rtc::ArrayView audio_buffer, + int playout_delay_ms); + + // Consumes the audio data in `audio_buffer` and sends it to the WebRTC layer + // in chunks of 10ms. The sum of the provided delay estimate in + // `record_delay_ms` and the latest `playout_delay_ms` in GetPlayoutData() + // are given to the AEC in the audio processing module. + // They can be fixed values on most platforms and they are ignored if an + // external (hardware/built-in) AEC is used. + // Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores + // 5ms of data and sends a total of 10ms to WebRTC and clears the internal + // cache. Call #3 restarts the scheme above. + void DeliverRecordedData(rtc::ArrayView audio_buffer, + int record_delay_ms); + + private: + // Device buffer that works with 10ms chunks of data both for playout and + // for recording. I.e., the WebRTC side will always be asked for audio to be + // played out in 10ms chunks and recorded audio will be sent to WebRTC in + // 10ms chunks as well. This raw pointer is owned by the constructor of this + // class and the owner must ensure that the pointer is valid during the life- + // time of this object. + AudioDeviceBuffer* const audio_device_buffer_; + // Number of audio samples per channel per 10ms. Set once at construction + // based on parameters in `audio_device_buffer`. + const size_t playout_samples_per_channel_10ms_; + const size_t record_samples_per_channel_10ms_; + // Number of audio channels. Set once at construction based on parameters in + // `audio_device_buffer`. + const size_t playout_channels_; + const size_t record_channels_; + // Storage for output samples from which a consumer can read audio buffers + // in any size using GetPlayoutData(). + rtc::BufferT playout_buffer_; + // Storage for input samples that are about to be delivered to the WebRTC + // ADB or remains from the last successful delivery of a 10ms audio buffer. + rtc::BufferT record_buffer_; + // Contains latest delay estimate given to GetPlayoutData(). + int playout_delay_ms_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_ diff --git a/third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc new file mode 100644 index 0000000000..36ea85f7dd --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/fine_audio_buffer_unittest.cc @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/fine_audio_buffer.h" + +#include + +#include + +#include "api/array_view.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "modules/audio_device/mock_audio_device_buffer.h" +#include "test/gmock.h" +#include "test/gtest.h" + +using ::testing::_; +using ::testing::AtLeast; +using ::testing::InSequence; +using ::testing::Return; + +namespace webrtc { + +const int kSampleRate = 44100; +const int kChannels = 2; +const int kSamplesPer10Ms = kSampleRate * 10 / 1000; + +// The fake audio data is 0,1,..SCHAR_MAX-1,0,1,... This is to make it easy +// to detect errors. This function verifies that the buffers contain such data. +// E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and +// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around +// will happen. +// `buffer` is the audio buffer to verify. +bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) { + int start_value = (buffer_number * size) % SCHAR_MAX; + for (int i = 0; i < size; ++i) { + if (buffer[i] != (i + start_value) % SCHAR_MAX) { + return false; + } + } + return true; +} + +// This function replaces the real AudioDeviceBuffer::GetPlayoutData when it's +// called (which is done implicitly when calling GetBufferData). It writes the +// sequence 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a +// buffer of different size than the one VerifyBuffer verifies. +// `iteration` is the number of calls made to UpdateBuffer prior to this call. +// `samples_per_10_ms` is the number of samples that should be written to the +// buffer (`arg0`). +ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) { + int16_t* buffer = static_cast(arg0); + int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX; + for (int i = 0; i < samples_per_10_ms; ++i) { + buffer[i] = (i + start_value) % SCHAR_MAX; + } + // Should return samples per channel. + return samples_per_10_ms / kChannels; +} + +// Writes a periodic ramp pattern to the supplied `buffer`. See UpdateBuffer() +// for details. +void UpdateInputBuffer(int16_t* buffer, int iteration, int size) { + int start_value = (iteration * size) % SCHAR_MAX; + for (int i = 0; i < size; ++i) { + buffer[i] = (i + start_value) % SCHAR_MAX; + } +} + +// Action macro which verifies that the recorded 10ms chunk of audio data +// (in `arg0`) contains the correct reference values even if they have been +// supplied using a buffer size that is smaller or larger than 10ms. +// See VerifyBuffer() for details. +ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) { + const int16_t* buffer = static_cast(arg0); + int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX; + for (int i = 0; i < samples_per_10_ms; ++i) { + EXPECT_EQ(buffer[i], (i + start_value) % SCHAR_MAX); + } + return 0; +} + +void RunFineBufferTest(int frame_size_in_samples) { + const int kFrameSizeSamples = frame_size_in_samples; + const int kNumberOfFrames = 5; + // Ceiling of integer division: 1 + ((x - 1) / y) + const int kNumberOfUpdateBufferCalls = + 1 + ((kNumberOfFrames * frame_size_in_samples - 1) / kSamplesPer10Ms); + + auto task_queue_factory = CreateDefaultTaskQueueFactory(); + MockAudioDeviceBuffer audio_device_buffer(task_queue_factory.get()); + audio_device_buffer.SetPlayoutSampleRate(kSampleRate); + audio_device_buffer.SetPlayoutChannels(kChannels); + audio_device_buffer.SetRecordingSampleRate(kSampleRate); + audio_device_buffer.SetRecordingChannels(kChannels); + + EXPECT_CALL(audio_device_buffer, RequestPlayoutData(_)) + .WillRepeatedly(Return(kSamplesPer10Ms)); + { + InSequence s; + for (int i = 0; i < kNumberOfUpdateBufferCalls; ++i) { + EXPECT_CALL(audio_device_buffer, GetPlayoutData(_)) + .WillOnce(UpdateBuffer(i, kChannels * kSamplesPer10Ms)) + .RetiresOnSaturation(); + } + } + { + InSequence s; + for (int j = 0; j < kNumberOfUpdateBufferCalls - 1; ++j) { + EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms)) + .WillOnce(VerifyInputBuffer(j, kChannels * kSamplesPer10Ms)) + .RetiresOnSaturation(); + } + } + EXPECT_CALL(audio_device_buffer, SetVQEData(_, _)) + .Times(kNumberOfUpdateBufferCalls - 1); + EXPECT_CALL(audio_device_buffer, DeliverRecordedData()) + .Times(kNumberOfUpdateBufferCalls - 1) + .WillRepeatedly(Return(0)); + + FineAudioBuffer fine_buffer(&audio_device_buffer); + std::unique_ptr out_buffer( + new int16_t[kChannels * kFrameSizeSamples]); + std::unique_ptr in_buffer( + new int16_t[kChannels * kFrameSizeSamples]); + + for (int i = 0; i < kNumberOfFrames; ++i) { + fine_buffer.GetPlayoutData( + rtc::ArrayView(out_buffer.get(), + kChannels * kFrameSizeSamples), + 0); + EXPECT_TRUE( + VerifyBuffer(out_buffer.get(), i, kChannels * kFrameSizeSamples)); + UpdateInputBuffer(in_buffer.get(), i, kChannels * kFrameSizeSamples); + fine_buffer.DeliverRecordedData( + rtc::ArrayView(in_buffer.get(), + kChannels * kFrameSizeSamples), + 0); + } +} + +TEST(FineBufferTest, BufferLessThan10ms) { + const int kFrameSizeSamples = kSamplesPer10Ms - 50; + RunFineBufferTest(kFrameSizeSamples); +} + +TEST(FineBufferTest, GreaterThan10ms) { + const int kFrameSizeSamples = kSamplesPer10Ms + 50; + RunFineBufferTest(kFrameSizeSamples); +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md b/third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md new file mode 100644 index 0000000000..93e9aca741 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/g3doc/audio_device_module.md @@ -0,0 +1,171 @@ + + + +# Audio Device Module (ADM) + +## Overview + +The ADM(AudioDeviceModule) is responsible for driving input (microphone) and +output (speaker) audio in WebRTC and the API is defined in [audio_device.h][19]. + +Main functions of the ADM are: + +* Initialization and termination of native audio libraries. +* Registration of an [AudioTransport object][16] which handles audio callbacks + for audio in both directions. +* Device enumeration and selection (only for Linux, Windows and Mac OSX). +* Start/Stop physical audio streams: + * Recording audio from the selected microphone, and + * playing out audio on the selected speaker. +* Level control of the active audio streams. +* Control of built-in audio effects (Audio Echo Cancelation (AEC), Audio Gain + Control (AGC) and Noise Suppression (NS)) for Android and iOS. + +ADM implementations reside at two different locations in the WebRTC repository: +`/modules/audio_device/` and `/sdk/`. The latest implementations for [iOS][20] +and [Android][21] can be found under `/sdk/`. `/modules/audio_device/` contains +older versions for mobile platforms and also implementations for desktop +platforms such as [Linux][22], [Windows][23] and [Mac OSX][24]. This document is +focusing on the parts in `/modules/audio_device/` but implementation specific +details such as threading models are omitted to keep the descriptions as simple +as possible. + +By default, the ADM in WebRTC is created in [`WebRtcVoiceEngine::Init`][1] but +an external implementation can also be injected using +[`rtc::CreatePeerConnectionFactory`][25]. An example of where an external ADM is +injected can be found in [PeerConnectionInterfaceTest][26] where a so-called +[fake ADM][29] is utilized to avoid hardware dependency in a gtest. Clients can +also inject their own ADMs in situations where functionality is needed that is +not provided by the default implementations. + +## Background + +This section contains a historical background of the ADM API. + +The ADM interface is old and has undergone many changes over the years. It used +to be much more granular but it still contains more than 50 methods and is +implemented on several different hardware platforms. + +Some APIs are not implemented on all platforms, and functionality can be spread +out differently between the methods. + +The most up-to-date implementations of the ADM interface are for [iOS][27] and +for [Android][28]. + +Desktop version are not updated to comply with the latest +[C++ style guide](https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++.md) +and more work is also needed to improve the performance and stability of these +versions. + +## WebRtcVoiceEngine + +[`WebRtcVoiceEngine`][2] does not utilize all methods of the ADM but it still +serves as the best example of its architecture and how to use it. For a more +detailed view of all methods in the ADM interface, see [ADM unit tests][3]. + +Assuming that an external ADM implementation is not injected, a default - or +internal - ADM is created in [`WebRtcVoiceEngine::Init`][1] using +[`AudioDeviceModule::Create`][4]. + +Basic initialization is done using a utility method called +[`adm_helpers::Init`][5] which calls fundamental ADM APIs like: + +* [`AudiDeviceModule::Init`][6] - initializes the native audio parts required + for each platform. +* [`AudiDeviceModule::SetPlayoutDevice`][7] - specifies which speaker to use + for playing out audio using an `index` retrieved by the corresponding + enumeration method [`AudiDeviceModule::PlayoutDeviceName`][8]. +* [`AudiDeviceModule::SetRecordingDevice`][9] - specifies which microphone to + use for recording audio using an `index` retrieved by the corresponding + enumeration method which is [`AudiDeviceModule::RecordingDeviceName`][10]. +* [`AudiDeviceModule::InitSpeaker`][11] - sets up the parts of the ADM needed + to use the selected output device. +* [`AudiDeviceModule::InitMicrophone`][12] - sets up the parts of the ADM + needed to use the selected input device. +* [`AudiDeviceModule::SetStereoPlayout`][13] - enables playout in stereo if + the selected audio device supports it. +* [`AudiDeviceModule::SetStereoRecording`][14] - enables recording in stereo + if the selected audio device supports it. + +[`WebRtcVoiceEngine::Init`][1] also calls +[`AudiDeviceModule::RegisterAudioTransport`][15] to register an existing +[AudioTransport][16] implementation which handles audio callbacks in both +directions and therefore serves as the bridge between the native ADM and the +upper WebRTC layers. + +Recorded audio samples are delivered from the ADM to the `WebRtcVoiceEngine` +(who owns the `AudioTransport` object) via +[`AudioTransport::RecordedDataIsAvailable`][17]: + +``` +int32_t RecordedDataIsAvailable(const void* audioSamples, size_t nSamples, size_t nBytesPerSample, + size_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS, + int32_t clockDrift, uint32_t currentMicLevel, bool keyPressed, + uint32_t& newMicLevel) +``` + +Decoded audio samples ready to be played out are are delivered by the +`WebRtcVoiceEngine` to the ADM, via [`AudioTransport::NeedMorePlayoutData`][18]: + +``` +int32_t NeedMorePlayData(size_t nSamples, size_t nBytesPerSample, size_t nChannels, int32_t samplesPerSec, + void* audioSamples, size_t& nSamplesOut, + int64_t* elapsed_time_ms, int64_t* ntp_time_ms) +``` + +Audio samples are 16-bit [linear PCM](https://wiki.multimedia.cx/index.php/PCM) +using regular interleaving of channels within each sample. + +`WebRtcVoiceEngine` also owns an [`AudioState`][30] member and this class is +used has helper to start and stop audio to and from the ADM. To initialize and +start recording, it calls: + +* [`AudiDeviceModule::InitRecording`][31] +* [`AudiDeviceModule::StartRecording`][32] + +and to initialize and start playout: + +* [`AudiDeviceModule::InitPlayout`][33] +* [`AudiDeviceModule::StartPlayout`][34] + +Finally, the corresponding stop methods [`AudiDeviceModule::StopRecording`][35] +and [`AudiDeviceModule::StopPlayout`][36] are called followed by +[`AudiDeviceModule::Terminate`][37]. + +[1]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_voice_engine.cc;l=314;drc=f7b1b95f11c74cb5369fdd528b73c70a50f2e206 +[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_voice_engine.h;l=48;drc=d15a575ec3528c252419149d35977e55269d8a41 +[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/audio_device_unittest.cc;l=1;drc=d15a575ec3528c252419149d35977e55269d8a41 +[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=46;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[5]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/adm_helpers.h;drc=2222a80e79ae1ef5cb9510ec51d3868be75f47a2 +[6]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=62;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[7]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=77;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[8]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=69;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[9]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=79;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[10]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=72;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[11]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=99;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[12]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=101;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[13]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=130;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[14]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=133;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[15]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=59;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[16]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=34;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[17]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=36;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[18]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=48;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[19]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738es +[20]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2 +[21]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1 +[22]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/linux/;drc=d15a575ec3528c252419149d35977e55269d8a41 +[23]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/win/;drc=d15a575ec3528c252419149d35977e55269d8a41 +[24]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/mac/;drc=3b68aa346a5d3483c3448852d19d91723846825c +[25]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/create_peerconnection_factory.h;l=45;drc=09ceed2165137c4bea4e02e8d3db31970d0bf273 +[26]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/peer_connection_interface_unittest.cc;l=692;drc=2efb8a5ec61b1b87475d046c03d20244f53b14b6 +[27]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2 +[28]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1 +[29]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/test/fake_audio_capture_module.h;l=42;drc=d15a575ec3528c252419149d35977e55269d8a41 +[30]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/audio_state.h;drc=d15a575ec3528c252419149d35977e55269d8a41 +[31]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=87;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[32]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=94;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[33]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=84;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[34]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=91;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[35]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=95;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[36]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=92;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[37]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=63;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device.h b/third_party/libwebrtc/modules/audio_device/include/audio_device.h new file mode 100644 index 0000000000..936ee6cb04 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/audio_device.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_ + +#include "absl/types/optional.h" +#include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "rtc_base/ref_count.h" + +namespace webrtc { + +class AudioDeviceModuleForTest; + +class AudioDeviceModule : public rtc::RefCountInterface { + public: + enum AudioLayer { + kPlatformDefaultAudio = 0, + kWindowsCoreAudio, + kWindowsCoreAudio2, + kLinuxAlsaAudio, + kLinuxPulseAudio, + kAndroidJavaAudio, + kAndroidOpenSLESAudio, + kAndroidJavaInputAndOpenSLESOutputAudio, + kAndroidAAudioAudio, + kAndroidJavaInputAndAAudioOutputAudio, + kDummyAudio, + }; + + enum WindowsDeviceType { + kDefaultCommunicationDevice = -1, + kDefaultDevice = -2 + }; + + struct Stats { + // The fields below correspond to similarly-named fields in the WebRTC stats + // spec. https://w3c.github.io/webrtc-stats/#playoutstats-dict* + double synthesized_samples_duration_s = 0; + uint64_t synthesized_samples_events = 0; + double total_samples_duration_s = 0; + double total_playout_delay_s = 0; + uint64_t total_samples_count = 0; + }; + + public: + // Creates a default ADM for usage in production code. + static rtc::scoped_refptr Create( + AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory); + // Creates an ADM with support for extra test methods. Don't use this factory + // in production code. + static rtc::scoped_refptr CreateForTest( + AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory); + + // Retrieve the currently utilized audio layer + virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const = 0; + + // Full-duplex transportation of PCM audio + virtual int32_t RegisterAudioCallback(AudioTransport* audioCallback) = 0; + + // Main initialization and termination + virtual int32_t Init() = 0; + virtual int32_t Terminate() = 0; + virtual bool Initialized() const = 0; + + // Device enumeration + virtual int16_t PlayoutDevices() = 0; + virtual int16_t RecordingDevices() = 0; + virtual int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) = 0; + virtual int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) = 0; + + // Device selection + virtual int32_t SetPlayoutDevice(uint16_t index) = 0; + virtual int32_t SetPlayoutDevice(WindowsDeviceType device) = 0; + virtual int32_t SetRecordingDevice(uint16_t index) = 0; + virtual int32_t SetRecordingDevice(WindowsDeviceType device) = 0; + + // Audio transport initialization + virtual int32_t PlayoutIsAvailable(bool* available) = 0; + virtual int32_t InitPlayout() = 0; + virtual bool PlayoutIsInitialized() const = 0; + virtual int32_t RecordingIsAvailable(bool* available) = 0; + virtual int32_t InitRecording() = 0; + virtual bool RecordingIsInitialized() const = 0; + + // Audio transport control + virtual int32_t StartPlayout() = 0; + virtual int32_t StopPlayout() = 0; + virtual bool Playing() const = 0; + virtual int32_t StartRecording() = 0; + virtual int32_t StopRecording() = 0; + virtual bool Recording() const = 0; + + // Audio mixer initialization + virtual int32_t InitSpeaker() = 0; + virtual bool SpeakerIsInitialized() const = 0; + virtual int32_t InitMicrophone() = 0; + virtual bool MicrophoneIsInitialized() const = 0; + + // Speaker volume controls + virtual int32_t SpeakerVolumeIsAvailable(bool* available) = 0; + virtual int32_t SetSpeakerVolume(uint32_t volume) = 0; + virtual int32_t SpeakerVolume(uint32_t* volume) const = 0; + virtual int32_t MaxSpeakerVolume(uint32_t* maxVolume) const = 0; + virtual int32_t MinSpeakerVolume(uint32_t* minVolume) const = 0; + + // Microphone volume controls + virtual int32_t MicrophoneVolumeIsAvailable(bool* available) = 0; + virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0; + virtual int32_t MicrophoneVolume(uint32_t* volume) const = 0; + virtual int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const = 0; + virtual int32_t MinMicrophoneVolume(uint32_t* minVolume) const = 0; + + // Speaker mute control + virtual int32_t SpeakerMuteIsAvailable(bool* available) = 0; + virtual int32_t SetSpeakerMute(bool enable) = 0; + virtual int32_t SpeakerMute(bool* enabled) const = 0; + + // Microphone mute control + virtual int32_t MicrophoneMuteIsAvailable(bool* available) = 0; + virtual int32_t SetMicrophoneMute(bool enable) = 0; + virtual int32_t MicrophoneMute(bool* enabled) const = 0; + + // Stereo support + virtual int32_t StereoPlayoutIsAvailable(bool* available) const = 0; + virtual int32_t SetStereoPlayout(bool enable) = 0; + virtual int32_t StereoPlayout(bool* enabled) const = 0; + virtual int32_t StereoRecordingIsAvailable(bool* available) const = 0; + virtual int32_t SetStereoRecording(bool enable) = 0; + virtual int32_t StereoRecording(bool* enabled) const = 0; + + // Playout delay + virtual int32_t PlayoutDelay(uint16_t* delayMS) const = 0; + + // Only supported on Android. + virtual bool BuiltInAECIsAvailable() const = 0; + virtual bool BuiltInAGCIsAvailable() const = 0; + virtual bool BuiltInNSIsAvailable() const = 0; + + // Enables the built-in audio effects. Only supported on Android. + virtual int32_t EnableBuiltInAEC(bool enable) = 0; + virtual int32_t EnableBuiltInAGC(bool enable) = 0; + virtual int32_t EnableBuiltInNS(bool enable) = 0; + + // Play underrun count. Only supported on Android. + // TODO(alexnarest): Make it abstract after upstream projects support it. + virtual int32_t GetPlayoutUnderrunCount() const { return -1; } + + // Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will + // not be present in the stats. + virtual absl::optional GetStats() const { return absl::nullopt; } + +// Only supported on iOS. +#if defined(WEBRTC_IOS) + virtual int GetPlayoutAudioParameters(AudioParameters* params) const = 0; + virtual int GetRecordAudioParameters(AudioParameters* params) const = 0; +#endif // WEBRTC_IOS + + protected: + ~AudioDeviceModule() override {} +}; + +// Extends the default ADM interface with some extra test methods. +// Intended for usage in tests only and requires a unique factory method. +class AudioDeviceModuleForTest : public AudioDeviceModule { + public: + // Triggers internal restart sequences of audio streaming. Can be used by + // tests to emulate events corresponding to e.g. removal of an active audio + // device or other actions which causes the stream to be disconnected. + virtual int RestartPlayoutInternally() = 0; + virtual int RestartRecordingInternally() = 0; + + virtual int SetPlayoutSampleRate(uint32_t sample_rate) = 0; + virtual int SetRecordingSampleRate(uint32_t sample_rate) = 0; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h new file mode 100644 index 0000000000..36dc45f19e --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_data_observer.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/include/audio_device.h" + +namespace webrtc { + +// This interface will capture the raw PCM data of both the local captured as +// well as the mixed/rendered remote audio. +class AudioDeviceDataObserver { + public: + virtual void OnCaptureData(const void* audio_samples, + size_t num_samples, + size_t bytes_per_sample, + size_t num_channels, + uint32_t samples_per_sec) = 0; + + virtual void OnRenderData(const void* audio_samples, + size_t num_samples, + size_t bytes_per_sample, + size_t num_channels, + uint32_t samples_per_sec) = 0; + + AudioDeviceDataObserver() = default; + virtual ~AudioDeviceDataObserver() = default; +}; + +// Creates an ADMWrapper around an ADM instance that registers +// the provided AudioDeviceDataObserver. +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + rtc::scoped_refptr impl, + std::unique_ptr observer); + +// Creates an ADMWrapper around an ADM instance that registers +// the provided AudioDeviceDataObserver. +ABSL_DEPRECATED("") +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + rtc::scoped_refptr impl, + AudioDeviceDataObserver* observer); + +// Creates an ADM instance with AudioDeviceDataObserver registered. +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + AudioDeviceModule::AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory, + std::unique_ptr observer); + +// Creates an ADM instance with AudioDeviceDataObserver registered. +ABSL_DEPRECATED("") +rtc::scoped_refptr CreateAudioDeviceWithDataObserver( + AudioDeviceModule::AudioLayer audio_layer, + TaskQueueFactory* task_queue_factory, + AudioDeviceDataObserver* observer); + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DATA_OBSERVER_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_default.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_default.h new file mode 100644 index 0000000000..3779d6fb3b --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_default.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_ + +#include "modules/audio_device/include/audio_device.h" + +namespace webrtc { +namespace webrtc_impl { + +// AudioDeviceModuleDefault template adds default implementation for all +// AudioDeviceModule methods to the class, which inherits from +// AudioDeviceModuleDefault. +template +class AudioDeviceModuleDefault : public T { + public: + AudioDeviceModuleDefault() {} + virtual ~AudioDeviceModuleDefault() {} + + int32_t RegisterAudioCallback(AudioTransport* audioCallback) override { + return 0; + } + int32_t Init() override { return 0; } + int32_t InitSpeaker() override { return 0; } + int32_t SetPlayoutDevice(uint16_t index) override { return 0; } + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override { + return 0; + } + int32_t SetStereoPlayout(bool enable) override { return 0; } + int32_t StopPlayout() override { return 0; } + int32_t InitMicrophone() override { return 0; } + int32_t SetRecordingDevice(uint16_t index) override { return 0; } + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override { + return 0; + } + int32_t SetStereoRecording(bool enable) override { return 0; } + int32_t StopRecording() override { return 0; } + + int32_t Terminate() override { return 0; } + + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer* audioLayer) const override { + return 0; + } + bool Initialized() const override { return true; } + int16_t PlayoutDevices() override { return 0; } + int16_t RecordingDevices() override { return 0; } + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + return 0; + } + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + return 0; + } + int32_t PlayoutIsAvailable(bool* available) override { return 0; } + int32_t InitPlayout() override { return 0; } + bool PlayoutIsInitialized() const override { return true; } + int32_t RecordingIsAvailable(bool* available) override { return 0; } + int32_t InitRecording() override { return 0; } + bool RecordingIsInitialized() const override { return true; } + int32_t StartPlayout() override { return 0; } + bool Playing() const override { return false; } + int32_t StartRecording() override { return 0; } + bool Recording() const override { return false; } + bool SpeakerIsInitialized() const override { return true; } + bool MicrophoneIsInitialized() const override { return true; } + int32_t SpeakerVolumeIsAvailable(bool* available) override { return 0; } + int32_t SetSpeakerVolume(uint32_t volume) override { return 0; } + int32_t SpeakerVolume(uint32_t* volume) const override { return 0; } + int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; } + int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; } + int32_t MicrophoneVolumeIsAvailable(bool* available) override { return 0; } + int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; } + int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; } + int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; } + int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; } + int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; } + int32_t SetSpeakerMute(bool enable) override { return 0; } + int32_t SpeakerMute(bool* enabled) const override { return 0; } + int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; } + int32_t SetMicrophoneMute(bool enable) override { return 0; } + int32_t MicrophoneMute(bool* enabled) const override { return 0; } + int32_t StereoPlayoutIsAvailable(bool* available) const override { + *available = false; + return 0; + } + int32_t StereoPlayout(bool* enabled) const override { return 0; } + int32_t StereoRecordingIsAvailable(bool* available) const override { + *available = false; + return 0; + } + int32_t StereoRecording(bool* enabled) const override { return 0; } + int32_t PlayoutDelay(uint16_t* delayMS) const override { + *delayMS = 0; + return 0; + } + bool BuiltInAECIsAvailable() const override { return false; } + int32_t EnableBuiltInAEC(bool enable) override { return -1; } + bool BuiltInAGCIsAvailable() const override { return false; } + int32_t EnableBuiltInAGC(bool enable) override { return -1; } + bool BuiltInNSIsAvailable() const override { return false; } + int32_t EnableBuiltInNS(bool enable) override { return -1; } + + int32_t GetPlayoutUnderrunCount() const override { return -1; } + +#if defined(WEBRTC_IOS) + int GetPlayoutAudioParameters(AudioParameters* params) const override { + return -1; + } + int GetRecordAudioParameters(AudioParameters* params) const override { + return -1; + } +#endif // WEBRTC_IOS +}; + +} // namespace webrtc_impl +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFAULT_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h new file mode 100644 index 0000000000..d677d41f69 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_defines.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_ + +#include + +#include + +#include "rtc_base/checks.h" +#include "rtc_base/strings/string_builder.h" + +namespace webrtc { + +static const int kAdmMaxDeviceNameSize = 128; +static const int kAdmMaxFileNameSize = 512; +static const int kAdmMaxGuidSize = 128; + +static const int kAdmMinPlayoutBufferSizeMs = 10; +static const int kAdmMaxPlayoutBufferSizeMs = 250; + +// ---------------------------------------------------------------------------- +// AudioTransport +// ---------------------------------------------------------------------------- + +class AudioTransport { + public: + // TODO(bugs.webrtc.org/13620) Deprecate this function + virtual int32_t RecordedDataIsAvailable(const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel) = 0; // NOLINT + + virtual int32_t RecordedDataIsAvailable( + const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel, + absl::optional estimatedCaptureTimeNS) { // NOLINT + // TODO(webrtc:13620) Make the default behaver of the new API to behave as + // the old API. This can be pure virtual if all uses of the old API is + // removed. + return RecordedDataIsAvailable( + audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec, + totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel); + } + + // Implementation has to setup safe values for all specified out parameters. + virtual int32_t NeedMorePlayData(size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + void* audioSamples, + size_t& nSamplesOut, // NOLINT + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) = 0; // NOLINT + + // Method to pull mixed render audio data from all active VoE channels. + // The data will not be passed as reference for audio processing internally. + virtual void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) = 0; + + protected: + virtual ~AudioTransport() {} +}; + +// Helper class for storage of fundamental audio parameters such as sample rate, +// number of channels, native buffer size etc. +// Note that one audio frame can contain more than one channel sample and each +// sample is assumed to be a 16-bit PCM sample. Hence, one audio frame in +// stereo contains 2 * (16/8) = 4 bytes of data. +class AudioParameters { + public: + // This implementation does only support 16-bit PCM samples. + static const size_t kBitsPerSample = 16; + AudioParameters() + : sample_rate_(0), + channels_(0), + frames_per_buffer_(0), + frames_per_10ms_buffer_(0) {} + AudioParameters(int sample_rate, size_t channels, size_t frames_per_buffer) + : sample_rate_(sample_rate), + channels_(channels), + frames_per_buffer_(frames_per_buffer), + frames_per_10ms_buffer_(static_cast(sample_rate / 100)) {} + void reset(int sample_rate, size_t channels, size_t frames_per_buffer) { + sample_rate_ = sample_rate; + channels_ = channels; + frames_per_buffer_ = frames_per_buffer; + frames_per_10ms_buffer_ = static_cast(sample_rate / 100); + } + size_t bits_per_sample() const { return kBitsPerSample; } + void reset(int sample_rate, size_t channels, double buffer_duration) { + reset(sample_rate, channels, + static_cast(sample_rate * buffer_duration + 0.5)); + } + void reset(int sample_rate, size_t channels) { + reset(sample_rate, channels, static_cast(0)); + } + int sample_rate() const { return sample_rate_; } + size_t channels() const { return channels_; } + size_t frames_per_buffer() const { return frames_per_buffer_; } + size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; } + size_t GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; } + size_t GetBytesPerBuffer() const { + return frames_per_buffer_ * GetBytesPerFrame(); + } + // The WebRTC audio device buffer (ADB) only requires that the sample rate + // and number of channels are configured. Hence, to be "valid", only these + // two attributes must be set. + bool is_valid() const { return ((sample_rate_ > 0) && (channels_ > 0)); } + // Most platforms also require that a native buffer size is defined. + // An audio parameter instance is considered to be "complete" if it is both + // "valid" (can be used by the ADB) and also has a native frame size. + bool is_complete() const { return (is_valid() && (frames_per_buffer_ > 0)); } + size_t GetBytesPer10msBuffer() const { + return frames_per_10ms_buffer_ * GetBytesPerFrame(); + } + double GetBufferSizeInMilliseconds() const { + if (sample_rate_ == 0) + return 0.0; + return frames_per_buffer_ / (sample_rate_ / 1000.0); + } + double GetBufferSizeInSeconds() const { + if (sample_rate_ == 0) + return 0.0; + return static_cast(frames_per_buffer_) / (sample_rate_); + } + std::string ToString() const { + char ss_buf[1024]; + rtc::SimpleStringBuilder ss(ss_buf); + ss << "AudioParameters: "; + ss << "sample_rate=" << sample_rate() << ", channels=" << channels(); + ss << ", frames_per_buffer=" << frames_per_buffer(); + ss << ", frames_per_10ms_buffer=" << frames_per_10ms_buffer(); + ss << ", bytes_per_frame=" << GetBytesPerFrame(); + ss << ", bytes_per_buffer=" << GetBytesPerBuffer(); + ss << ", bytes_per_10ms_buffer=" << GetBytesPer10msBuffer(); + ss << ", size_in_ms=" << GetBufferSizeInMilliseconds(); + return ss.str(); + } + + private: + int sample_rate_; + size_t channels_; + size_t frames_per_buffer_; + size_t frames_per_10ms_buffer_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc new file mode 100644 index 0000000000..130e096e6d --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.cc @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/include/audio_device_factory.h" + +#include + +#if defined(WEBRTC_WIN) +#include "modules/audio_device/win/audio_device_module_win.h" +#include "modules/audio_device/win/core_audio_input_win.h" +#include "modules/audio_device/win/core_audio_output_win.h" +#include "modules/audio_device/win/core_audio_utility_win.h" +#endif + +#include "api/task_queue/task_queue_factory.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +rtc::scoped_refptr CreateWindowsCoreAudioAudioDeviceModule( + TaskQueueFactory* task_queue_factory, + bool automatic_restart) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + return CreateWindowsCoreAudioAudioDeviceModuleForTest(task_queue_factory, + automatic_restart); +} + +rtc::scoped_refptr +CreateWindowsCoreAudioAudioDeviceModuleForTest( + TaskQueueFactory* task_queue_factory, + bool automatic_restart) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + // Returns NULL if Core Audio is not supported or if COM has not been + // initialized correctly using ScopedCOMInitializer. + if (!webrtc_win::core_audio_utility::IsSupported()) { + RTC_LOG(LS_ERROR) + << "Unable to create ADM since Core Audio is not supported"; + return nullptr; + } + return CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput( + std::make_unique(automatic_restart), + std::make_unique(automatic_restart), + task_queue_factory); +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h new file mode 100644 index 0000000000..edd7686b8e --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/audio_device_factory.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_ + +#include + +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/include/audio_device.h" + +namespace webrtc { + +// Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API. +// The creating thread must be a COM thread; otherwise nullptr will be returned. +// By default `automatic_restart` is set to true and it results in support for +// automatic restart of audio if e.g. the existing device is removed. If set to +// false, no attempt to restart audio is performed under these conditions. +// +// Example (assuming webrtc namespace): +// +// public: +// rtc::scoped_refptr CreateAudioDevice() { +// task_queue_factory_ = CreateDefaultTaskQueueFactory(); +// // Tell COM that this thread shall live in the MTA. +// com_initializer_ = std::make_unique( +// ScopedCOMInitializer::kMTA); +// if (!com_initializer_->Succeeded()) { +// return nullptr; +// } +// // Create the ADM with support for automatic restart if devices are +// // unplugged. +// return CreateWindowsCoreAudioAudioDeviceModule( +// task_queue_factory_.get()); +// } +// +// private: +// std::unique_ptr com_initializer_; +// std::unique_ptr task_queue_factory_; +// +rtc::scoped_refptr CreateWindowsCoreAudioAudioDeviceModule( + TaskQueueFactory* task_queue_factory, + bool automatic_restart = true); + +rtc::scoped_refptr +CreateWindowsCoreAudioAudioDeviceModuleForTest( + TaskQueueFactory* task_queue_factory, + bool automatic_restart = true); + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_FACTORY_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h b/third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h new file mode 100644 index 0000000000..2322ce0263 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/fake_audio_device.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_ + +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_default.h" + +namespace webrtc { + +class FakeAudioDeviceModule + : public webrtc_impl::AudioDeviceModuleDefault { + public: + // TODO(bugs.webrtc.org/12701): Fix all users of this class to managed + // references using scoped_refptr. Current code doesn't always use refcounting + // for this class. + void AddRef() const override {} + rtc::RefCountReleaseStatus Release() const override { + return rtc::RefCountReleaseStatus::kDroppedLastRef; + } +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_FAKE_AUDIO_DEVICE_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h b/third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h new file mode 100644 index 0000000000..73fbdd547d --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/mock_audio_device.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_ + +#include + +#include "api/make_ref_counted.h" +#include "modules/audio_device/include/audio_device.h" +#include "test/gmock.h" + +namespace webrtc { +namespace test { + +class MockAudioDeviceModule : public AudioDeviceModule { + public: + static rtc::scoped_refptr CreateNice() { + return rtc::make_ref_counted<::testing::NiceMock>(); + } + static rtc::scoped_refptr CreateStrict() { + return rtc::make_ref_counted< + ::testing::StrictMock>(); + } + + // AudioDeviceModule. + MOCK_METHOD(int32_t, + ActiveAudioLayer, + (AudioLayer * audioLayer), + (const, override)); + MOCK_METHOD(int32_t, + RegisterAudioCallback, + (AudioTransport * audioCallback), + (override)); + MOCK_METHOD(int32_t, Init, (), (override)); + MOCK_METHOD(int32_t, Terminate, (), (override)); + MOCK_METHOD(bool, Initialized, (), (const, override)); + MOCK_METHOD(int16_t, PlayoutDevices, (), (override)); + MOCK_METHOD(int16_t, RecordingDevices, (), (override)); + MOCK_METHOD(int32_t, + PlayoutDeviceName, + (uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]), + (override)); + MOCK_METHOD(int32_t, + RecordingDeviceName, + (uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]), + (override)); + MOCK_METHOD(int32_t, SetPlayoutDevice, (uint16_t index), (override)); + MOCK_METHOD(int32_t, + SetPlayoutDevice, + (WindowsDeviceType device), + (override)); + MOCK_METHOD(int32_t, SetRecordingDevice, (uint16_t index), (override)); + MOCK_METHOD(int32_t, + SetRecordingDevice, + (WindowsDeviceType device), + (override)); + MOCK_METHOD(int32_t, PlayoutIsAvailable, (bool* available), (override)); + MOCK_METHOD(int32_t, InitPlayout, (), (override)); + MOCK_METHOD(bool, PlayoutIsInitialized, (), (const, override)); + MOCK_METHOD(int32_t, RecordingIsAvailable, (bool* available), (override)); + MOCK_METHOD(int32_t, InitRecording, (), (override)); + MOCK_METHOD(bool, RecordingIsInitialized, (), (const, override)); + MOCK_METHOD(int32_t, StartPlayout, (), (override)); + MOCK_METHOD(int32_t, StopPlayout, (), (override)); + MOCK_METHOD(bool, Playing, (), (const, override)); + MOCK_METHOD(int32_t, StartRecording, (), (override)); + MOCK_METHOD(int32_t, StopRecording, (), (override)); + MOCK_METHOD(bool, Recording, (), (const, override)); + MOCK_METHOD(int32_t, InitSpeaker, (), (override)); + MOCK_METHOD(bool, SpeakerIsInitialized, (), (const, override)); + MOCK_METHOD(int32_t, InitMicrophone, (), (override)); + MOCK_METHOD(bool, MicrophoneIsInitialized, (), (const, override)); + MOCK_METHOD(int32_t, SpeakerVolumeIsAvailable, (bool* available), (override)); + MOCK_METHOD(int32_t, SetSpeakerVolume, (uint32_t volume), (override)); + MOCK_METHOD(int32_t, SpeakerVolume, (uint32_t * volume), (const, override)); + MOCK_METHOD(int32_t, + MaxSpeakerVolume, + (uint32_t * maxVolume), + (const, override)); + MOCK_METHOD(int32_t, + MinSpeakerVolume, + (uint32_t * minVolume), + (const, override)); + MOCK_METHOD(int32_t, + MicrophoneVolumeIsAvailable, + (bool* available), + (override)); + MOCK_METHOD(int32_t, SetMicrophoneVolume, (uint32_t volume), (override)); + MOCK_METHOD(int32_t, + MicrophoneVolume, + (uint32_t * volume), + (const, override)); + MOCK_METHOD(int32_t, + MaxMicrophoneVolume, + (uint32_t * maxVolume), + (const, override)); + MOCK_METHOD(int32_t, + MinMicrophoneVolume, + (uint32_t * minVolume), + (const, override)); + MOCK_METHOD(int32_t, SpeakerMuteIsAvailable, (bool* available), (override)); + MOCK_METHOD(int32_t, SetSpeakerMute, (bool enable), (override)); + MOCK_METHOD(int32_t, SpeakerMute, (bool* enabled), (const, override)); + MOCK_METHOD(int32_t, + MicrophoneMuteIsAvailable, + (bool* available), + (override)); + MOCK_METHOD(int32_t, SetMicrophoneMute, (bool enable), (override)); + MOCK_METHOD(int32_t, MicrophoneMute, (bool* enabled), (const, override)); + MOCK_METHOD(int32_t, + StereoPlayoutIsAvailable, + (bool* available), + (const, override)); + MOCK_METHOD(int32_t, SetStereoPlayout, (bool enable), (override)); + MOCK_METHOD(int32_t, StereoPlayout, (bool* enabled), (const, override)); + MOCK_METHOD(int32_t, + StereoRecordingIsAvailable, + (bool* available), + (const, override)); + MOCK_METHOD(int32_t, SetStereoRecording, (bool enable), (override)); + MOCK_METHOD(int32_t, StereoRecording, (bool* enabled), (const, override)); + MOCK_METHOD(int32_t, PlayoutDelay, (uint16_t * delayMS), (const, override)); + MOCK_METHOD(bool, BuiltInAECIsAvailable, (), (const, override)); + MOCK_METHOD(bool, BuiltInAGCIsAvailable, (), (const, override)); + MOCK_METHOD(bool, BuiltInNSIsAvailable, (), (const, override)); + MOCK_METHOD(int32_t, EnableBuiltInAEC, (bool enable), (override)); + MOCK_METHOD(int32_t, EnableBuiltInAGC, (bool enable), (override)); + MOCK_METHOD(int32_t, EnableBuiltInNS, (bool enable), (override)); + MOCK_METHOD(int32_t, GetPlayoutUnderrunCount, (), (const, override)); +#if defined(WEBRTC_IOS) + MOCK_METHOD(int, + GetPlayoutAudioParameters, + (AudioParameters * params), + (const, override)); + MOCK_METHOD(int, + GetRecordAudioParameters, + (AudioParameters * params), + (const, override)); +#endif // WEBRTC_IOS +}; +} // namespace test +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_DEVICE_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h b/third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h new file mode 100644 index 0000000000..b886967319 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/mock_audio_transport.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_ + +#include "modules/audio_device/include/audio_device_defines.h" +#include "test/gmock.h" + +namespace webrtc { +namespace test { + +class MockAudioTransport : public AudioTransport { + public: + MockAudioTransport() {} + ~MockAudioTransport() {} + + MOCK_METHOD(int32_t, + RecordedDataIsAvailable, + (const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel), + (override)); + + MOCK_METHOD(int32_t, + RecordedDataIsAvailable, + (const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel, + absl::optional estimated_capture_time_ns), + (override)); + + MOCK_METHOD(int32_t, + NeedMorePlayData, + (size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + void* audioSamples, + size_t& nSamplesOut, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms), + (override)); + + MOCK_METHOD(void, + PullRenderData, + (int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms), + (override)); +}; + +} // namespace test +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_MOCK_AUDIO_TRANSPORT_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc new file mode 100644 index 0000000000..4c29c98f2c --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.cc @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/audio_device/include/test_audio_device.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "api/make_ref_counted.h" +#include "common_audio/wav_file.h" +#include "modules/audio_device/audio_device_impl.h" +#include "modules/audio_device/include/audio_device_default.h" +#include "modules/audio_device/test_audio_device_impl.h" +#include "rtc_base/buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/event.h" +#include "rtc_base/logging.h" +#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/random.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/repeating_task.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/time_utils.h" + +namespace webrtc { + +namespace { + +constexpr int kFrameLengthUs = 10000; +constexpr int kFramesPerSecond = rtc::kNumMicrosecsPerSec / kFrameLengthUs; + +class TestAudioDeviceModuleImpl : public AudioDeviceModuleImpl { + public: + TestAudioDeviceModuleImpl( + TaskQueueFactory* task_queue_factory, + std::unique_ptr capturer, + std::unique_ptr renderer, + float speed = 1) + : AudioDeviceModuleImpl( + AudioLayer::kDummyAudio, + std::make_unique(task_queue_factory, + std::move(capturer), + std::move(renderer), + speed), + task_queue_factory, + /*create_detached=*/true) {} + + ~TestAudioDeviceModuleImpl() override = default; +}; + +// A fake capturer that generates pulses with random samples between +// -max_amplitude and +max_amplitude. +class PulsedNoiseCapturerImpl final + : public TestAudioDeviceModule::PulsedNoiseCapturer { + public: + // Assuming 10ms audio packets. + PulsedNoiseCapturerImpl(int16_t max_amplitude, + int sampling_frequency_in_hz, + int num_channels) + : sampling_frequency_in_hz_(sampling_frequency_in_hz), + fill_with_zero_(false), + random_generator_(1), + max_amplitude_(max_amplitude), + num_channels_(num_channels) { + RTC_DCHECK_GT(max_amplitude, 0); + } + + int SamplingFrequency() const override { return sampling_frequency_in_hz_; } + + int NumChannels() const override { return num_channels_; } + + bool Capture(rtc::BufferT* buffer) override { + fill_with_zero_ = !fill_with_zero_; + int16_t max_amplitude; + { + MutexLock lock(&lock_); + max_amplitude = max_amplitude_; + } + buffer->SetData( + TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz_) * + num_channels_, + [&](rtc::ArrayView data) { + if (fill_with_zero_) { + std::fill(data.begin(), data.end(), 0); + } else { + std::generate(data.begin(), data.end(), [&]() { + return random_generator_.Rand(-max_amplitude, max_amplitude); + }); + } + return data.size(); + }); + return true; + } + + void SetMaxAmplitude(int16_t amplitude) override { + MutexLock lock(&lock_); + max_amplitude_ = amplitude; + } + + private: + int sampling_frequency_in_hz_; + bool fill_with_zero_; + Random random_generator_; + Mutex lock_; + int16_t max_amplitude_ RTC_GUARDED_BY(lock_); + const int num_channels_; +}; + +class WavFileReader final : public TestAudioDeviceModule::Capturer { + public: + WavFileReader(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels, + bool repeat) + : WavFileReader(std::make_unique(filename), + sampling_frequency_in_hz, + num_channels, + repeat) {} + + int SamplingFrequency() const override { return sampling_frequency_in_hz_; } + + int NumChannels() const override { return num_channels_; } + + bool Capture(rtc::BufferT* buffer) override { + buffer->SetData( + TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz_) * + num_channels_, + [&](rtc::ArrayView data) { + size_t read = wav_reader_->ReadSamples(data.size(), data.data()); + if (read < data.size() && repeat_) { + do { + wav_reader_->Reset(); + size_t delta = wav_reader_->ReadSamples( + data.size() - read, data.subview(read).data()); + RTC_CHECK_GT(delta, 0) << "No new data read from file"; + read += delta; + } while (read < data.size()); + } + return read; + }); + return buffer->size() > 0; + } + + private: + WavFileReader(std::unique_ptr wav_reader, + int sampling_frequency_in_hz, + int num_channels, + bool repeat) + : sampling_frequency_in_hz_(sampling_frequency_in_hz), + num_channels_(num_channels), + wav_reader_(std::move(wav_reader)), + repeat_(repeat) { + RTC_CHECK_EQ(wav_reader_->sample_rate(), sampling_frequency_in_hz); + RTC_CHECK_EQ(wav_reader_->num_channels(), num_channels); + } + + const int sampling_frequency_in_hz_; + const int num_channels_; + std::unique_ptr wav_reader_; + const bool repeat_; +}; + +class WavFileWriter final : public TestAudioDeviceModule::Renderer { + public: + WavFileWriter(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels) + : WavFileWriter(std::make_unique(filename, + sampling_frequency_in_hz, + num_channels), + sampling_frequency_in_hz, + num_channels) {} + + int SamplingFrequency() const override { return sampling_frequency_in_hz_; } + + int NumChannels() const override { return num_channels_; } + + bool Render(rtc::ArrayView data) override { + wav_writer_->WriteSamples(data.data(), data.size()); + return true; + } + + private: + WavFileWriter(std::unique_ptr wav_writer, + int sampling_frequency_in_hz, + int num_channels) + : sampling_frequency_in_hz_(sampling_frequency_in_hz), + wav_writer_(std::move(wav_writer)), + num_channels_(num_channels) {} + + int sampling_frequency_in_hz_; + std::unique_ptr wav_writer_; + const int num_channels_; +}; + +class BoundedWavFileWriter : public TestAudioDeviceModule::Renderer { + public: + BoundedWavFileWriter(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels) + : sampling_frequency_in_hz_(sampling_frequency_in_hz), + wav_writer_(filename, sampling_frequency_in_hz, num_channels), + num_channels_(num_channels), + silent_audio_( + TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) * + num_channels, + 0), + started_writing_(false), + trailing_zeros_(0) {} + + int SamplingFrequency() const override { return sampling_frequency_in_hz_; } + + int NumChannels() const override { return num_channels_; } + + bool Render(rtc::ArrayView data) override { + const int16_t kAmplitudeThreshold = 5; + + const int16_t* begin = data.begin(); + const int16_t* end = data.end(); + if (!started_writing_) { + // Cut off silence at the beginning. + while (begin < end) { + if (std::abs(*begin) > kAmplitudeThreshold) { + started_writing_ = true; + break; + } + ++begin; + } + } + if (started_writing_) { + // Cut off silence at the end. + while (begin < end) { + if (*(end - 1) != 0) { + break; + } + --end; + } + if (begin < end) { + // If it turns out that the silence was not final, need to write all the + // skipped zeros and continue writing audio. + while (trailing_zeros_ > 0) { + const size_t zeros_to_write = + std::min(trailing_zeros_, silent_audio_.size()); + wav_writer_.WriteSamples(silent_audio_.data(), zeros_to_write); + trailing_zeros_ -= zeros_to_write; + } + wav_writer_.WriteSamples(begin, end - begin); + } + // Save the number of zeros we skipped in case this needs to be restored. + trailing_zeros_ += data.end() - end; + } + return true; + } + + private: + int sampling_frequency_in_hz_; + WavWriter wav_writer_; + const int num_channels_; + std::vector silent_audio_; + bool started_writing_; + size_t trailing_zeros_; +}; + +class DiscardRenderer final : public TestAudioDeviceModule::Renderer { + public: + explicit DiscardRenderer(int sampling_frequency_in_hz, int num_channels) + : sampling_frequency_in_hz_(sampling_frequency_in_hz), + num_channels_(num_channels) {} + + int SamplingFrequency() const override { return sampling_frequency_in_hz_; } + + int NumChannels() const override { return num_channels_; } + + bool Render(rtc::ArrayView data) override { return true; } + + private: + int sampling_frequency_in_hz_; + const int num_channels_; +}; + +class RawFileReader final : public TestAudioDeviceModule::Capturer { + public: + RawFileReader(absl::string_view input_file_name, + int sampling_frequency_in_hz, + int num_channels, + bool repeat) + : input_file_name_(input_file_name), + sampling_frequency_in_hz_(sampling_frequency_in_hz), + num_channels_(num_channels), + repeat_(repeat), + read_buffer_( + TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) * + num_channels * 2, + 0) { + input_file_ = FileWrapper::OpenReadOnly(input_file_name_); + RTC_CHECK(input_file_.is_open()) + << "Failed to open audio input file: " << input_file_name_; + } + + ~RawFileReader() override { input_file_.Close(); } + + int SamplingFrequency() const override { return sampling_frequency_in_hz_; } + + int NumChannels() const override { return num_channels_; } + + bool Capture(rtc::BufferT* buffer) override { + buffer->SetData( + TestAudioDeviceModule::SamplesPerFrame(SamplingFrequency()) * + NumChannels(), + [&](rtc::ArrayView data) { + rtc::ArrayView read_buffer_view = ReadBufferView(); + size_t size = data.size() * 2; + size_t read = input_file_.Read(read_buffer_view.data(), size); + if (read < size && repeat_) { + do { + input_file_.Rewind(); + size_t delta = input_file_.Read( + read_buffer_view.subview(read).data(), size - read); + RTC_CHECK_GT(delta, 0) << "No new data to read from file"; + read += delta; + } while (read < size); + } + memcpy(data.data(), read_buffer_view.data(), size); + return read / 2; + }); + return buffer->size() > 0; + } + + private: + rtc::ArrayView ReadBufferView() { return read_buffer_; } + + const std::string input_file_name_; + const int sampling_frequency_in_hz_; + const int num_channels_; + const bool repeat_; + FileWrapper input_file_; + std::vector read_buffer_; +}; + +class RawFileWriter : public TestAudioDeviceModule::Renderer { + public: + RawFileWriter(absl::string_view output_file_name, + int sampling_frequency_in_hz, + int num_channels) + : output_file_name_(output_file_name), + sampling_frequency_in_hz_(sampling_frequency_in_hz), + num_channels_(num_channels), + silent_audio_( + TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) * + num_channels * 2, + 0), + write_buffer_( + TestAudioDeviceModule::SamplesPerFrame(sampling_frequency_in_hz) * + num_channels * 2, + 0), + started_writing_(false), + trailing_zeros_(0) { + output_file_ = FileWrapper::OpenWriteOnly(output_file_name_); + RTC_CHECK(output_file_.is_open()) + << "Failed to open playout file" << output_file_name_; + } + ~RawFileWriter() override { output_file_.Close(); } + + int SamplingFrequency() const override { return sampling_frequency_in_hz_; } + + int NumChannels() const override { return num_channels_; } + + bool Render(rtc::ArrayView data) override { + const int16_t kAmplitudeThreshold = 5; + + const int16_t* begin = data.begin(); + const int16_t* end = data.end(); + if (!started_writing_) { + // Cut off silence at the beginning. + while (begin < end) { + if (std::abs(*begin) > kAmplitudeThreshold) { + started_writing_ = true; + break; + } + ++begin; + } + } + if (started_writing_) { + // Cut off silence at the end. + while (begin < end) { + if (*(end - 1) != 0) { + break; + } + --end; + } + if (begin < end) { + // If it turns out that the silence was not final, need to write all the + // skipped zeros and continue writing audio. + while (trailing_zeros_ > 0) { + const size_t zeros_to_write = + std::min(trailing_zeros_, silent_audio_.size()); + output_file_.Write(silent_audio_.data(), zeros_to_write * 2); + trailing_zeros_ -= zeros_to_write; + } + WriteInt16(begin, end); + } + // Save the number of zeros we skipped in case this needs to be restored. + trailing_zeros_ += data.end() - end; + } + return true; + } + + private: + void WriteInt16(const int16_t* begin, const int16_t* end) { + int size = (end - begin) * sizeof(int16_t); + memcpy(write_buffer_.data(), begin, size); + output_file_.Write(write_buffer_.data(), size); + } + + const std::string output_file_name_; + const int sampling_frequency_in_hz_; + const int num_channels_; + FileWrapper output_file_; + std::vector silent_audio_; + std::vector write_buffer_; + bool started_writing_; + size_t trailing_zeros_; +}; + +} // namespace + +size_t TestAudioDeviceModule::SamplesPerFrame(int sampling_frequency_in_hz) { + return rtc::CheckedDivExact(sampling_frequency_in_hz, kFramesPerSecond); +} + +rtc::scoped_refptr TestAudioDeviceModule::Create( + TaskQueueFactory* task_queue_factory, + std::unique_ptr capturer, + std::unique_ptr renderer, + float speed) { + auto audio_device = rtc::make_ref_counted( + task_queue_factory, std::move(capturer), std::move(renderer), speed); + + // Ensure that the current platform is supported. + if (audio_device->CheckPlatform() == -1) { + return nullptr; + } + + // Create the platform-dependent implementation. + if (audio_device->CreatePlatformSpecificObjects() == -1) { + return nullptr; + } + + // Ensure that the generic audio buffer can communicate with the platform + // specific parts. + if (audio_device->AttachAudioBuffer() == -1) { + return nullptr; + } + + return audio_device; +} + +std::unique_ptr +TestAudioDeviceModule::CreatePulsedNoiseCapturer(int16_t max_amplitude, + int sampling_frequency_in_hz, + int num_channels) { + return std::make_unique( + max_amplitude, sampling_frequency_in_hz, num_channels); +} + +std::unique_ptr +TestAudioDeviceModule::CreateDiscardRenderer(int sampling_frequency_in_hz, + int num_channels) { + return std::make_unique(sampling_frequency_in_hz, + num_channels); +} + +std::unique_ptr +TestAudioDeviceModule::CreateWavFileReader(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels) { + return std::make_unique(filename, sampling_frequency_in_hz, + num_channels, false); +} + +std::unique_ptr +TestAudioDeviceModule::CreateWavFileReader(absl::string_view filename, + bool repeat) { + WavReader reader(filename); + int sampling_frequency_in_hz = reader.sample_rate(); + int num_channels = rtc::checked_cast(reader.num_channels()); + return std::make_unique(filename, sampling_frequency_in_hz, + num_channels, repeat); +} + +std::unique_ptr +TestAudioDeviceModule::CreateWavFileWriter(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels) { + return std::make_unique(filename, sampling_frequency_in_hz, + num_channels); +} + +std::unique_ptr +TestAudioDeviceModule::CreateBoundedWavFileWriter(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels) { + return std::make_unique( + filename, sampling_frequency_in_hz, num_channels); +} + +std::unique_ptr +TestAudioDeviceModule::CreateRawFileReader(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels, + bool repeat) { + return std::make_unique(filename, sampling_frequency_in_hz, + num_channels, repeat); +} + +std::unique_ptr +TestAudioDeviceModule::CreateRawFileWriter(absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels) { + return std::make_unique(filename, sampling_frequency_in_hz, + num_channels); +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/include/test_audio_device.h b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.h new file mode 100644 index 0000000000..4b2d755ae1 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/test_audio_device.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_ +#define MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_ + +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "rtc_base/buffer.h" + +namespace webrtc { + +// This is test API and is in development, so it can be changed/removed without +// notice. + +// This class exists for historical reasons. For now it only contains static +// methods to create test AudioDeviceModule. Implementation details of that +// module are considered private. This class isn't intended to be instantiated. +class TestAudioDeviceModule { + public: + // Returns the number of samples that Capturers and Renderers with this + // sampling frequency will work with every time Capture or Render is called. + static size_t SamplesPerFrame(int sampling_frequency_in_hz); + + class Capturer { + public: + virtual ~Capturer() {} + // Returns the sampling frequency in Hz of the audio data that this + // capturer produces. + virtual int SamplingFrequency() const = 0; + // Returns the number of channels of captured audio data. + virtual int NumChannels() const = 0; + // Replaces the contents of `buffer` with 10ms of captured audio data + // (see TestAudioDeviceModule::SamplesPerFrame). Returns true if the + // capturer can keep producing data, or false when the capture finishes. + virtual bool Capture(rtc::BufferT* buffer) = 0; + }; + + class Renderer { + public: + virtual ~Renderer() {} + // Returns the sampling frequency in Hz of the audio data that this + // renderer receives. + virtual int SamplingFrequency() const = 0; + // Returns the number of channels of audio data to be required. + virtual int NumChannels() const = 0; + // Renders the passed audio data and returns true if the renderer wants + // to keep receiving data, or false otherwise. + virtual bool Render(rtc::ArrayView data) = 0; + }; + + // A fake capturer that generates pulses with random samples between + // -max_amplitude and +max_amplitude. + class PulsedNoiseCapturer : public Capturer { + public: + ~PulsedNoiseCapturer() override {} + + virtual void SetMaxAmplitude(int16_t amplitude) = 0; + }; + + // Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio + // frames will be processed every 10ms / `speed`. + // `capturer` is an object that produces audio data. Can be nullptr if this + // device is never used for recording. + // `renderer` is an object that receives audio data that would have been + // played out. Can be nullptr if this device is never used for playing. + // Use one of the Create... functions to get these instances. + static rtc::scoped_refptr Create( + TaskQueueFactory* task_queue_factory, + std::unique_ptr capturer, + std::unique_ptr renderer, + float speed = 1); + + // Returns a Capturer instance that generates a signal of `num_channels` + // channels where every second frame is zero and every second frame is evenly + // distributed random noise with max amplitude `max_amplitude`. + static std::unique_ptr CreatePulsedNoiseCapturer( + int16_t max_amplitude, + int sampling_frequency_in_hz, + int num_channels = 1); + + // Returns a Renderer instance that does nothing with the audio data. + static std::unique_ptr CreateDiscardRenderer( + int sampling_frequency_in_hz, + int num_channels = 1); + + // WavReader and WavWriter creation based on file name. + + // Returns a Capturer instance that gets its data from a WAV file. The sample + // rate and channels will be checked against the Wav file. + static std::unique_ptr CreateWavFileReader( + absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels = 1); + + // Returns a Capturer instance that gets its data from a file. + // Automatically detects sample rate and num of channels. + // `repeat` - if true, the file will be replayed from the start when we reach + // the end of file. + static std::unique_ptr CreateWavFileReader( + absl::string_view filename, + bool repeat = false); + + // Returns a Renderer instance that writes its data to a file. + static std::unique_ptr CreateWavFileWriter( + absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels = 1); + + // Returns a Renderer instance that writes its data to a WAV file, cutting + // off silence at the beginning (not necessarily perfect silence, see + // kAmplitudeThreshold) and at the end (only actual 0 samples in this case). + static std::unique_ptr CreateBoundedWavFileWriter( + absl::string_view filename, + int sampling_frequency_in_hz, + int num_channels = 1); + + // Returns a Capturer instance that gets its data from a raw file (*.raw). + static std::unique_ptr CreateRawFileReader( + absl::string_view filename, + int sampling_frequency_in_hz = 48000, + int num_channels = 2, + bool repeat = true); + + // Returns a Renderer instance that writes its data to a raw file (*.raw), + // cutting off silence at the beginning (not necessarily perfect silence, see + // kAmplitudeThreshold) and at the end (only actual 0 samples in this case). + static std::unique_ptr CreateRawFileWriter( + absl::string_view filename, + int sampling_frequency_in_hz = 48000, + int num_channels = 2); + + private: + TestAudioDeviceModule() = default; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_INCLUDE_TEST_AUDIO_DEVICE_H_ diff --git a/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc b/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc new file mode 100644 index 0000000000..7a122ca84b --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/include/test_audio_device_unittest.cc @@ -0,0 +1,528 @@ +/* + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/include/test_audio_device.h" + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" +#include "common_audio/wav_file.h" +#include "common_audio/wav_header.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/synchronization/mutex.h" +#include "test/gmock.h" +#include "test/gtest.h" +#include "test/testsupport/file_utils.h" +#include "test/time_controller/simulated_time_controller.h" + +namespace webrtc { +namespace { + +void RunWavTest(const std::vector& input_samples, + const std::vector& expected_samples) { + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + + const std::string output_filename = + test::OutputPath() + "BoundedWavFileWriterTest_" + test_info->name() + + "_" + std::to_string(std::rand()) + ".wav"; + + static const size_t kSamplesPerFrame = 8; + static const int kSampleRate = kSamplesPerFrame * 100; + EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate), + kSamplesPerFrame); + + // Test through file name API. + { + std::unique_ptr writer = + TestAudioDeviceModule::CreateBoundedWavFileWriter(output_filename, 800); + + for (size_t i = 0; i < input_samples.size(); i += kSamplesPerFrame) { + EXPECT_TRUE(writer->Render(rtc::ArrayView( + &input_samples[i], + std::min(kSamplesPerFrame, input_samples.size() - i)))); + } + } + + { + WavReader reader(output_filename); + std::vector read_samples(expected_samples.size()); + EXPECT_EQ(expected_samples.size(), + reader.ReadSamples(read_samples.size(), read_samples.data())); + EXPECT_EQ(expected_samples, read_samples); + + EXPECT_EQ(0u, reader.ReadSamples(read_samples.size(), read_samples.data())); + } + + remove(output_filename.c_str()); +} + +TEST(BoundedWavFileWriterTest, NoSilence) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 3, 88, + 1222, -1213, -13222, -7, -3525, 5787, -25247, 8}; + static const std::vector kExpectedSamples = kInputSamples; + RunWavTest(kInputSamples, kExpectedSamples); +} + +TEST(BoundedWavFileWriterTest, SomeStartSilence) { + static const std::vector kInputSamples = { + 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8}; + static const std::vector kExpectedSamples(kInputSamples.begin() + 10, + kInputSamples.end()); + RunWavTest(kInputSamples, kExpectedSamples); +} + +TEST(BoundedWavFileWriterTest, NegativeStartSilence) { + static const std::vector kInputSamples = { + 0, -4, -6, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8}; + static const std::vector kExpectedSamples(kInputSamples.begin() + 2, + kInputSamples.end()); + RunWavTest(kInputSamples, kExpectedSamples); +} + +TEST(BoundedWavFileWriterTest, SomeEndSilence) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin(), + kInputSamples.end() - 9); + RunWavTest(kInputSamples, kExpectedSamples); +} + +TEST(BoundedWavFileWriterTest, DoubleEndSilence) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 0, 0, + 0, -1213, -13222, -7, -3525, 5787, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin(), + kInputSamples.end() - 2); + RunWavTest(kInputSamples, kExpectedSamples); +} + +TEST(BoundedWavFileWriterTest, DoubleSilence) { + static const std::vector kInputSamples = {0, -1213, -13222, -7, + -3525, 5787, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin() + 1, + kInputSamples.end() - 2); + RunWavTest(kInputSamples, kExpectedSamples); +} + +TEST(BoundedWavFileWriterTest, EndSilenceCutoff) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin(), + kInputSamples.end() - 4); + RunWavTest(kInputSamples, kExpectedSamples); +} + +TEST(WavFileReaderTest, RepeatedTrueWithSingleFrameFileReadTwice) { + static const std::vector kInputSamples = {75, 1234, 243, -1231, + -22222, 0, 3, 88}; + static const rtc::BufferT kExpectedSamples(kInputSamples.data(), + kInputSamples.size()); + + const std::string output_filename = test::OutputPath() + + "WavFileReaderTest_RepeatedTrue_" + + std::to_string(std::rand()) + ".wav"; + + static const size_t kSamplesPerFrame = 8; + static const int kSampleRate = kSamplesPerFrame * 100; + EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate), + kSamplesPerFrame); + + // Create raw file to read. + { + std::unique_ptr writer = + TestAudioDeviceModule::CreateWavFileWriter(output_filename, 800); + + for (size_t i = 0; i < kInputSamples.size(); i += kSamplesPerFrame) { + EXPECT_TRUE(writer->Render(rtc::ArrayView( + &kInputSamples[i], + std::min(kSamplesPerFrame, kInputSamples.size() - i)))); + } + } + + { + std::unique_ptr reader = + TestAudioDeviceModule::CreateWavFileReader(output_filename, true); + rtc::BufferT buffer(kExpectedSamples.size()); + EXPECT_TRUE(reader->Capture(&buffer)); + EXPECT_EQ(kExpectedSamples, buffer); + EXPECT_TRUE(reader->Capture(&buffer)); + EXPECT_EQ(kExpectedSamples, buffer); + } + + remove(output_filename.c_str()); +} + +void RunRawTestNoRepeat(const std::vector& input_samples, + const std::vector& expected_samples) { + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + + const std::string output_filename = test::OutputPath() + "RawFileTest_" + + test_info->name() + "_" + + std::to_string(std::rand()) + ".raw"; + + static const size_t kSamplesPerFrame = 8; + static const int kSampleRate = kSamplesPerFrame * 100; + EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate), + kSamplesPerFrame); + + // Test through file name API. + { + std::unique_ptr writer = + TestAudioDeviceModule::CreateRawFileWriter( + output_filename, /*sampling_frequency_in_hz=*/800); + + for (size_t i = 0; i < input_samples.size(); i += kSamplesPerFrame) { + EXPECT_TRUE(writer->Render(rtc::ArrayView( + &input_samples[i], + std::min(kSamplesPerFrame, input_samples.size() - i)))); + } + } + + { + std::unique_ptr reader = + TestAudioDeviceModule::CreateRawFileReader( + output_filename, /*sampling_frequency_in_hz=*/800, + /*num_channels=*/2, /*repeat=*/false); + rtc::BufferT buffer(expected_samples.size()); + rtc::BufferT expected_buffer(expected_samples.size()); + expected_buffer.SetData(expected_samples); + EXPECT_TRUE(reader->Capture(&buffer)); + EXPECT_EQ(expected_buffer, buffer); + EXPECT_FALSE(reader->Capture(&buffer)); + EXPECT_TRUE(buffer.empty()); + } + + remove(output_filename.c_str()); +} + +TEST(RawFileWriterTest, NoSilence) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 3, 88, + 1222, -1213, -13222, -7, -3525, 5787, -25247, 8}; + static const std::vector kExpectedSamples = kInputSamples; + RunRawTestNoRepeat(kInputSamples, kExpectedSamples); +} + +TEST(RawFileWriterTest, SomeStartSilence) { + static const std::vector kInputSamples = { + 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8}; + static const std::vector kExpectedSamples(kInputSamples.begin() + 10, + kInputSamples.end()); + RunRawTestNoRepeat(kInputSamples, kExpectedSamples); +} + +TEST(RawFileWriterTest, NegativeStartSilence) { + static const std::vector kInputSamples = { + 0, -4, -6, 0, 3, 0, 0, 0, 0, 3, -13222, -7, -3525, 5787, -25247, 8}; + static const std::vector kExpectedSamples(kInputSamples.begin() + 2, + kInputSamples.end()); + RunRawTestNoRepeat(kInputSamples, kExpectedSamples); +} + +TEST(RawFileWriterTest, SomeEndSilence) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin(), + kInputSamples.end() - 9); + RunRawTestNoRepeat(kInputSamples, kExpectedSamples); +} + +TEST(RawFileWriterTest, DoubleEndSilence) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 0, 0, + 0, -1213, -13222, -7, -3525, 5787, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin(), + kInputSamples.end() - 2); + RunRawTestNoRepeat(kInputSamples, kExpectedSamples); +} + +TEST(RawFileWriterTest, DoubleSilence) { + static const std::vector kInputSamples = {0, -1213, -13222, -7, + -3525, 5787, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin() + 1, + kInputSamples.end() - 2); + RunRawTestNoRepeat(kInputSamples, kExpectedSamples); +} + +TEST(RawFileWriterTest, EndSilenceCutoff) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 1, 0, 0, 0, 0}; + static const std::vector kExpectedSamples(kInputSamples.begin(), + kInputSamples.end() - 4); + RunRawTestNoRepeat(kInputSamples, kExpectedSamples); +} + +TEST(RawFileWriterTest, Repeat) { + static const std::vector kInputSamples = { + 75, 1234, 243, -1231, -22222, 0, 3, 88, + 1222, -1213, -13222, -7, -3525, 5787, -25247, 8}; + static const rtc::BufferT kExpectedSamples(kInputSamples.data(), + kInputSamples.size()); + + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + + const std::string output_filename = test::OutputPath() + "RawFileTest_" + + test_info->name() + "_" + + std::to_string(std::rand()) + ".raw"; + + static const size_t kSamplesPerFrame = 8; + static const int kSampleRate = kSamplesPerFrame * 100; + EXPECT_EQ(TestAudioDeviceModule::SamplesPerFrame(kSampleRate), + kSamplesPerFrame); + + // Test through file name API. + { + std::unique_ptr writer = + TestAudioDeviceModule::CreateRawFileWriter( + output_filename, /*sampling_frequency_in_hz=*/800); + + for (size_t i = 0; i < kInputSamples.size(); i += kSamplesPerFrame) { + EXPECT_TRUE(writer->Render(rtc::ArrayView( + &kInputSamples[i], + std::min(kSamplesPerFrame, kInputSamples.size() - i)))); + } + } + + { + std::unique_ptr reader = + TestAudioDeviceModule::CreateRawFileReader( + output_filename, /*sampling_frequency_in_hz=*/800, + /*num_channels=*/2, /*repeat=*/true); + rtc::BufferT buffer(kExpectedSamples.size()); + EXPECT_TRUE(reader->Capture(&buffer)); + EXPECT_EQ(kExpectedSamples, buffer); + EXPECT_TRUE(reader->Capture(&buffer)); + EXPECT_EQ(kExpectedSamples, buffer); + } + + remove(output_filename.c_str()); +} + +TEST(PulsedNoiseCapturerTest, SetMaxAmplitude) { + const int16_t kAmplitude = 50; + std::unique_ptr capturer = + TestAudioDeviceModule::CreatePulsedNoiseCapturer( + kAmplitude, /*sampling_frequency_in_hz=*/8000); + rtc::BufferT recording_buffer; + + // Verify that the capturer doesn't create entries louder than than + // kAmplitude. Since the pulse generator alternates between writing + // zeroes and actual entries, we need to do the capturing twice. + capturer->Capture(&recording_buffer); + capturer->Capture(&recording_buffer); + int16_t max_sample = + *std::max_element(recording_buffer.begin(), recording_buffer.end()); + EXPECT_LE(max_sample, kAmplitude); + + // Increase the amplitude and verify that the samples can now be louder + // than the previous max. + capturer->SetMaxAmplitude(kAmplitude * 2); + capturer->Capture(&recording_buffer); + capturer->Capture(&recording_buffer); + max_sample = + *std::max_element(recording_buffer.begin(), recording_buffer.end()); + EXPECT_GT(max_sample, kAmplitude); +} + +using ::testing::ElementsAre; + +constexpr Timestamp kStartTime = Timestamp::Millis(10000); + +class TestAudioTransport : public AudioTransport { + public: + enum class Mode { kPlaying, kRecording }; + + explicit TestAudioTransport(Mode mode) : mode_(mode) {} + ~TestAudioTransport() override = default; + + int32_t RecordedDataIsAvailable( + const void* audioSamples, + size_t samples_per_channel, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_second, + uint32_t total_delay_ms, + int32_t clock_drift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level, + absl::optional estimated_capture_time_ns) override { + new_mic_level = 1; + + if (mode_ != Mode::kRecording) { + EXPECT_TRUE(false) + << "NeedMorePlayData mustn't be called when mode isn't kRecording"; + return -1; + } + + MutexLock lock(&mutex_); + samples_per_channel_.push_back(samples_per_channel); + number_of_channels_.push_back(number_of_channels); + bytes_per_sample_.push_back(bytes_per_sample); + samples_per_second_.push_back(samples_per_second); + return 0; + } + + int32_t NeedMorePlayData(size_t samples_per_channel, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_second, + void* audio_samples, + size_t& samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override { + const size_t num_bytes = samples_per_channel * number_of_channels; + std::memset(audio_samples, 1, num_bytes); + samples_out = samples_per_channel * number_of_channels; + *elapsed_time_ms = 0; + *ntp_time_ms = 0; + + if (mode_ != Mode::kPlaying) { + EXPECT_TRUE(false) + << "NeedMorePlayData mustn't be called when mode isn't kPlaying"; + return -1; + } + + MutexLock lock(&mutex_); + samples_per_channel_.push_back(samples_per_channel); + number_of_channels_.push_back(number_of_channels); + bytes_per_sample_.push_back(bytes_per_sample); + samples_per_second_.push_back(samples_per_second); + return 0; + } + + int32_t RecordedDataIsAvailable(const void* audio_samples, + size_t samples_per_channel, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_second, + uint32_t total_delay_ms, + int32_t clockDrift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level) override { + RTC_CHECK(false) << "This methods should be never executed"; + } + + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override { + RTC_CHECK(false) << "This methods should be never executed"; + } + + std::vector samples_per_channel() const { + MutexLock lock(&mutex_); + return samples_per_channel_; + } + std::vector number_of_channels() const { + MutexLock lock(&mutex_); + return number_of_channels_; + } + std::vector bytes_per_sample() const { + MutexLock lock(&mutex_); + return bytes_per_sample_; + } + std::vector samples_per_second() const { + MutexLock lock(&mutex_); + return samples_per_second_; + } + + private: + const Mode mode_; + + mutable Mutex mutex_; + std::vector samples_per_channel_ RTC_GUARDED_BY(mutex_); + std::vector number_of_channels_ RTC_GUARDED_BY(mutex_); + std::vector bytes_per_sample_ RTC_GUARDED_BY(mutex_); + std::vector samples_per_second_ RTC_GUARDED_BY(mutex_); +}; + +TEST(TestAudioDeviceModuleTest, CreatedADMCanRecord) { + GlobalSimulatedTimeController time_controller(kStartTime); + TestAudioTransport audio_transport(TestAudioTransport::Mode::kRecording); + std::unique_ptr capturer = + TestAudioDeviceModule::CreatePulsedNoiseCapturer( + /*max_amplitude=*/1000, + /*sampling_frequency_in_hz=*/48000, /*num_channels=*/2); + + rtc::scoped_refptr adm = TestAudioDeviceModule::Create( + time_controller.GetTaskQueueFactory(), std::move(capturer), + /*renderer=*/nullptr); + + ASSERT_EQ(adm->RegisterAudioCallback(&audio_transport), 0); + ASSERT_EQ(adm->Init(), 0); + + EXPECT_FALSE(adm->RecordingIsInitialized()); + ASSERT_EQ(adm->InitRecording(), 0); + EXPECT_TRUE(adm->RecordingIsInitialized()); + ASSERT_EQ(adm->StartRecording(), 0); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_TRUE(adm->Recording()); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_EQ(adm->StopRecording(), 0); + + EXPECT_THAT(audio_transport.samples_per_channel(), + ElementsAre(480, 480, 480)); + EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2)); + EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4)); + EXPECT_THAT(audio_transport.samples_per_second(), + ElementsAre(48000, 48000, 48000)); +} + +TEST(TestAudioDeviceModuleTest, CreatedADMCanPlay) { + GlobalSimulatedTimeController time_controller(kStartTime); + TestAudioTransport audio_transport(TestAudioTransport::Mode::kPlaying); + std::unique_ptr renderer = + TestAudioDeviceModule::CreateDiscardRenderer( + /*sampling_frequency_in_hz=*/48000, /*num_channels=*/2); + + rtc::scoped_refptr adm = + TestAudioDeviceModule::Create(time_controller.GetTaskQueueFactory(), + /*capturer=*/nullptr, std::move(renderer)); + + ASSERT_EQ(adm->RegisterAudioCallback(&audio_transport), 0); + ASSERT_EQ(adm->Init(), 0); + + EXPECT_FALSE(adm->PlayoutIsInitialized()); + ASSERT_EQ(adm->InitPlayout(), 0); + EXPECT_TRUE(adm->PlayoutIsInitialized()); + ASSERT_EQ(adm->StartPlayout(), 0); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_TRUE(adm->Playing()); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_EQ(adm->StopPlayout(), 0); + + EXPECT_THAT(audio_transport.samples_per_channel(), + ElementsAre(480, 480, 480)); + EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2)); + EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4)); + EXPECT_THAT(audio_transport.samples_per_second(), + ElementsAre(48000, 48000, 48000)); +} + +} // namespace +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc new file mode 100644 index 0000000000..5dfb91d6f4 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.cc @@ -0,0 +1,40 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "modules/audio_device/linux/alsasymboltable_linux.h" + +namespace webrtc { +namespace adm_linux_alsa { + +LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(AlsaSymbolTable, "libasound.so.2") +#define X(sym) LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(AlsaSymbolTable, sym) +ALSA_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DEFINE_END(AlsaSymbolTable) + +} // namespace adm_linux_alsa +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h new file mode 100644 index 0000000000..c9970b02bc --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/alsasymboltable_linux.h @@ -0,0 +1,148 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_ +#define AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_ + +#include "modules/audio_device/linux/latebindingsymboltable_linux.h" + +namespace webrtc { +namespace adm_linux_alsa { + +// The ALSA symbols we need, as an X-Macro list. +// This list must contain precisely every libasound function that is used in +// alsasoundsystem.cc. +#define ALSA_SYMBOLS_LIST \ + X(snd_device_name_free_hint) \ + X(snd_device_name_get_hint) \ + X(snd_device_name_hint) \ + X(snd_pcm_avail_update) \ + X(snd_pcm_close) \ + X(snd_pcm_delay) \ + X(snd_pcm_drop) \ + X(snd_pcm_open) \ + X(snd_pcm_prepare) \ + X(snd_pcm_readi) \ + X(snd_pcm_recover) \ + X(snd_pcm_resume) \ + X(snd_pcm_reset) \ + X(snd_pcm_state) \ + X(snd_pcm_set_params) \ + X(snd_pcm_get_params) \ + X(snd_pcm_start) \ + X(snd_pcm_stream) \ + X(snd_pcm_frames_to_bytes) \ + X(snd_pcm_bytes_to_frames) \ + X(snd_pcm_wait) \ + X(snd_pcm_writei) \ + X(snd_pcm_info_get_class) \ + X(snd_pcm_info_get_subdevices_avail) \ + X(snd_pcm_info_get_subdevice_name) \ + X(snd_pcm_info_set_subdevice) \ + X(snd_pcm_info_get_id) \ + X(snd_pcm_info_set_device) \ + X(snd_pcm_info_set_stream) \ + X(snd_pcm_info_get_name) \ + X(snd_pcm_info_get_subdevices_count) \ + X(snd_pcm_info_sizeof) \ + X(snd_pcm_hw_params) \ + X(snd_pcm_hw_params_malloc) \ + X(snd_pcm_hw_params_free) \ + X(snd_pcm_hw_params_any) \ + X(snd_pcm_hw_params_set_access) \ + X(snd_pcm_hw_params_set_format) \ + X(snd_pcm_hw_params_set_channels) \ + X(snd_pcm_hw_params_set_rate_near) \ + X(snd_pcm_hw_params_set_buffer_size_near) \ + X(snd_card_next) \ + X(snd_card_get_name) \ + X(snd_config_update) \ + X(snd_config_copy) \ + X(snd_config_get_id) \ + X(snd_ctl_open) \ + X(snd_ctl_close) \ + X(snd_ctl_card_info) \ + X(snd_ctl_card_info_sizeof) \ + X(snd_ctl_card_info_get_id) \ + X(snd_ctl_card_info_get_name) \ + X(snd_ctl_pcm_next_device) \ + X(snd_ctl_pcm_info) \ + X(snd_mixer_load) \ + X(snd_mixer_free) \ + X(snd_mixer_detach) \ + X(snd_mixer_close) \ + X(snd_mixer_open) \ + X(snd_mixer_attach) \ + X(snd_mixer_first_elem) \ + X(snd_mixer_elem_next) \ + X(snd_mixer_selem_get_name) \ + X(snd_mixer_selem_is_active) \ + X(snd_mixer_selem_register) \ + X(snd_mixer_selem_set_playback_volume_all) \ + X(snd_mixer_selem_get_playback_volume) \ + X(snd_mixer_selem_has_playback_volume) \ + X(snd_mixer_selem_get_playback_volume_range) \ + X(snd_mixer_selem_has_playback_switch) \ + X(snd_mixer_selem_get_playback_switch) \ + X(snd_mixer_selem_set_playback_switch_all) \ + X(snd_mixer_selem_has_capture_switch) \ + X(snd_mixer_selem_get_capture_switch) \ + X(snd_mixer_selem_set_capture_switch_all) \ + X(snd_mixer_selem_has_capture_volume) \ + X(snd_mixer_selem_set_capture_volume_all) \ + X(snd_mixer_selem_get_capture_volume) \ + X(snd_mixer_selem_get_capture_volume_range) \ + X(snd_dlopen) \ + X(snd_dlclose) \ + X(snd_config) \ + X(snd_config_search) \ + X(snd_config_get_string) \ + X(snd_config_search_definition) \ + X(snd_config_get_type) \ + X(snd_config_delete) \ + X(snd_config_iterator_entry) \ + X(snd_config_iterator_first) \ + X(snd_config_iterator_next) \ + X(snd_config_iterator_end) \ + X(snd_config_delete_compound_members) \ + X(snd_config_get_integer) \ + X(snd_config_get_bool) \ + X(snd_dlsym) \ + X(snd_strerror) \ + X(snd_lib_error) \ + X(snd_lib_error_set_handler) + +LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(AlsaSymbolTable) +#define X(sym) LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(AlsaSymbolTable, sym) +ALSA_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DECLARE_END(AlsaSymbolTable) + +} // namespace adm_linux_alsa +} // namespace webrtc + +#endif // AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H_ diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc new file mode 100644 index 0000000000..1e0ac8be28 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.cc @@ -0,0 +1,1636 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/linux/audio_device_alsa_linux.h" + +#include "modules/audio_device/audio_device_config.h" +#include "rtc_base/logging.h" +#include "rtc_base/system/arch.h" +#include "system_wrappers/include/sleep.h" + +WebRTCAlsaSymbolTable* GetAlsaSymbolTable() { + static WebRTCAlsaSymbolTable* alsa_symbol_table = new WebRTCAlsaSymbolTable(); + return alsa_symbol_table; +} + +// Accesses ALSA functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libasound, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, GetAlsaSymbolTable(), \ + sym) + +// Redefine these here to be able to do late-binding +#undef snd_ctl_card_info_alloca +#define snd_ctl_card_info_alloca(ptr) \ + do { \ + *ptr = (snd_ctl_card_info_t*)__builtin_alloca( \ + LATE(snd_ctl_card_info_sizeof)()); \ + memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); \ + } while (0) + +#undef snd_pcm_info_alloca +#define snd_pcm_info_alloca(pInfo) \ + do { \ + *pInfo = (snd_pcm_info_t*)__builtin_alloca(LATE(snd_pcm_info_sizeof)()); \ + memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); \ + } while (0) + +// snd_lib_error_handler_t +void WebrtcAlsaErrorHandler(const char* file, + int line, + const char* function, + int err, + const char* fmt, + ...) {} + +namespace webrtc { +static const unsigned int ALSA_PLAYOUT_FREQ = 48000; +static const unsigned int ALSA_PLAYOUT_CH = 2; +static const unsigned int ALSA_PLAYOUT_LATENCY = 40 * 1000; // in us +static const unsigned int ALSA_CAPTURE_FREQ = 48000; +static const unsigned int ALSA_CAPTURE_CH = 2; +static const unsigned int ALSA_CAPTURE_LATENCY = 40 * 1000; // in us +static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5; // in ms + +#define FUNC_GET_NUM_OF_DEVICE 0 +#define FUNC_GET_DEVICE_NAME 1 +#define FUNC_GET_DEVICE_NAME_FOR_AN_ENUM 2 + +AudioDeviceLinuxALSA::AudioDeviceLinuxALSA() + : _ptrAudioBuffer(NULL), + _inputDeviceIndex(0), + _outputDeviceIndex(0), + _inputDeviceIsSpecified(false), + _outputDeviceIsSpecified(false), + _handleRecord(NULL), + _handlePlayout(NULL), + _recordingBuffersizeInFrame(0), + _recordingPeriodSizeInFrame(0), + _playoutBufferSizeInFrame(0), + _playoutPeriodSizeInFrame(0), + _recordingBufferSizeIn10MS(0), + _playoutBufferSizeIn10MS(0), + _recordingFramesIn10MS(0), + _playoutFramesIn10MS(0), + _recordingFreq(ALSA_CAPTURE_FREQ), + _playoutFreq(ALSA_PLAYOUT_FREQ), + _recChannels(ALSA_CAPTURE_CH), + _playChannels(ALSA_PLAYOUT_CH), + _recordingBuffer(NULL), + _playoutBuffer(NULL), + _recordingFramesLeft(0), + _playoutFramesLeft(0), + _initialized(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _recordingDelay(0), + _playoutDelay(0) { + memset(_oldKeyState, 0, sizeof(_oldKeyState)); + RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; +} + +// ---------------------------------------------------------------------------- +// AudioDeviceLinuxALSA - dtor +// ---------------------------------------------------------------------------- + +AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; + + Terminate(); + + // Clean up the recording buffer and playout buffer. + if (_recordingBuffer) { + delete[] _recordingBuffer; + _recordingBuffer = NULL; + } + if (_playoutBuffer) { + delete[] _playoutBuffer; + _playoutBuffer = NULL; + } +} + +void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { + MutexLock lock(&mutex_); + + _ptrAudioBuffer = audioBuffer; + + // Inform the AudioBuffer about default settings for this implementation. + // Set all values to zero here since the actual settings will be done by + // InitPlayout and InitRecording later. + _ptrAudioBuffer->SetRecordingSampleRate(0); + _ptrAudioBuffer->SetPlayoutSampleRate(0); + _ptrAudioBuffer->SetRecordingChannels(0); + _ptrAudioBuffer->SetPlayoutChannels(0); +} + +int32_t AudioDeviceLinuxALSA::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const { + audioLayer = AudioDeviceModule::kLinuxAlsaAudio; + return 0; +} + +AudioDeviceGeneric::InitStatus AudioDeviceLinuxALSA::Init() { + MutexLock lock(&mutex_); + + // Load libasound + if (!GetAlsaSymbolTable()->Load()) { + // Alsa is not installed on this system + RTC_LOG(LS_ERROR) << "failed to load symbol table"; + return InitStatus::OTHER_ERROR; + } + + if (_initialized) { + return InitStatus::OK; + } +#if defined(WEBRTC_USE_X11) + // Get X display handle for typing detection + _XDisplay = XOpenDisplay(NULL); + if (!_XDisplay) { + RTC_LOG(LS_WARNING) + << "failed to open X display, typing detection will not work"; + } +#endif + + _initialized = true; + + return InitStatus::OK; +} + +int32_t AudioDeviceLinuxALSA::Terminate() { + if (!_initialized) { + return 0; + } + + MutexLock lock(&mutex_); + + _mixerManager.Close(); + + // RECORDING + mutex_.Unlock(); + _ptrThreadRec.Finalize(); + + // PLAYOUT + _ptrThreadPlay.Finalize(); + mutex_.Lock(); + +#if defined(WEBRTC_USE_X11) + if (_XDisplay) { + XCloseDisplay(_XDisplay); + _XDisplay = NULL; + } +#endif + _initialized = false; + _outputDeviceIsSpecified = false; + _inputDeviceIsSpecified = false; + + return 0; +} + +bool AudioDeviceLinuxALSA::Initialized() const { + return (_initialized); +} + +int32_t AudioDeviceLinuxALSA::InitSpeaker() { + MutexLock lock(&mutex_); + return InitSpeakerLocked(); +} + +int32_t AudioDeviceLinuxALSA::InitSpeakerLocked() { + if (_playing) { + return -1; + } + + char devName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize); + return _mixerManager.OpenSpeaker(devName); +} + +int32_t AudioDeviceLinuxALSA::InitMicrophone() { + MutexLock lock(&mutex_); + return InitMicrophoneLocked(); +} + +int32_t AudioDeviceLinuxALSA::InitMicrophoneLocked() { + if (_recording) { + return -1; + } + + char devName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize); + return _mixerManager.OpenMicrophone(devName); +} + +bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const { + return (_mixerManager.SpeakerIsInitialized()); +} + +bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const { + return (_mixerManager.MicrophoneIsInitialized()); +} + +int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available) { + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + if (!wasInitialized && InitSpeaker() == -1) { + // If we end up here it means that the selected speaker has no volume + // control. + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a volume control + // exists + available = true; + + // Close the initialized output mixer + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume) { + return (_mixerManager.SetSpeakerVolume(volume)); +} + +int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const { + uint32_t level(0); + + if (_mixerManager.SpeakerVolume(level) == -1) { + return -1; + } + + volume = level; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(uint32_t& maxVolume) const { + uint32_t maxVol(0); + + if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(uint32_t& minVolume) const { + uint32_t minVol(0); + + if (_mixerManager.MinSpeakerVolume(minVol) == -1) { + return -1; + } + + minVolume = minVol; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available) { + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) { + // If we end up here it means that the selected speaker has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected speaker has a mute control + _mixerManager.SpeakerMuteIsAvailable(isAvailable); + + available = isAvailable; + + // Close the initialized output mixer + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable) { + return (_mixerManager.SetSpeakerMute(enable)); +} + +int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const { + bool muted(0); + + if (_mixerManager.SpeakerMute(muted) == -1) { + return -1; + } + + enabled = muted; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available) { + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a mute control + // + _mixerManager.MicrophoneMuteIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable) { + return (_mixerManager.SetMicrophoneMute(enable)); +} + +// ---------------------------------------------------------------------------- +// MicrophoneMute +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const { + bool muted(0); + + if (_mixerManager.MicrophoneMute(muted) == -1) { + return -1; + } + + enabled = muted; + return 0; +} + +int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available) { + MutexLock lock(&mutex_); + + // If we already have initialized in stereo it's obviously available + if (_recIsInitialized && (2 == _recChannels)) { + available = true; + return 0; + } + + // Save rec states and the number of rec channels + bool recIsInitialized = _recIsInitialized; + bool recording = _recording; + int recChannels = _recChannels; + + available = false; + + // Stop/uninitialize recording if initialized (and possibly started) + if (_recIsInitialized) { + StopRecordingLocked(); + } + + // Try init in stereo; + _recChannels = 2; + if (InitRecordingLocked() == 0) { + available = true; + } + + // Stop/uninitialize recording + StopRecordingLocked(); + + // Recover previous states + _recChannels = recChannels; + if (recIsInitialized) { + InitRecordingLocked(); + } + if (recording) { + StartRecording(); + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable) { + if (enable) + _recChannels = 2; + else + _recChannels = 1; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const { + if (_recChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available) { + MutexLock lock(&mutex_); + + // If we already have initialized in stereo it's obviously available + if (_playIsInitialized && (2 == _playChannels)) { + available = true; + return 0; + } + + // Save rec states and the number of rec channels + bool playIsInitialized = _playIsInitialized; + bool playing = _playing; + int playChannels = _playChannels; + + available = false; + + // Stop/uninitialize recording if initialized (and possibly started) + if (_playIsInitialized) { + StopPlayoutLocked(); + } + + // Try init in stereo; + _playChannels = 2; + if (InitPlayoutLocked() == 0) { + available = true; + } + + // Stop/uninitialize recording + StopPlayoutLocked(); + + // Recover previous states + _playChannels = playChannels; + if (playIsInitialized) { + InitPlayoutLocked(); + } + if (playing) { + StartPlayout(); + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable) { + if (enable) + _playChannels = 2; + else + _playChannels = 1; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const { + if (_playChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available) { + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + if (!wasInitialized && InitMicrophone() == -1) { + // If we end up here it means that the selected microphone has no volume + // control. + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a volume control + // exists + available = true; + + // Close the initialized input mixer + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume) { + return (_mixerManager.SetMicrophoneVolume(volume)); +} + +int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const { + uint32_t level(0); + + if (_mixerManager.MicrophoneVolume(level) == -1) { + RTC_LOG(LS_WARNING) << "failed to retrive current microphone level"; + return -1; + } + + volume = level; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(uint32_t& maxVolume) const { + uint32_t maxVol(0); + + if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(uint32_t& minVolume) const { + uint32_t minVol(0); + + if (_mixerManager.MinMicrophoneVolume(minVol) == -1) { + return -1; + } + + minVolume = minVol; + + return 0; +} + +int16_t AudioDeviceLinuxALSA::PlayoutDevices() { + return (int16_t)GetDevicesInfo(0, true); +} + +int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index) { + if (_playIsInitialized) { + return -1; + } + + int32_t nDevices = GetDevicesInfo(0, true); + RTC_LOG(LS_VERBOSE) << "number of available audio output devices is " + << nDevices; + + if (index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + _outputDeviceIndex = index; + _outputDeviceIsSpecified = true; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) { + RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; + return -1; +} + +int32_t AudioDeviceLinuxALSA::PlayoutDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + const uint16_t nDevices(PlayoutDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize); +} + +int32_t AudioDeviceLinuxALSA::RecordingDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + const uint16_t nDevices(RecordingDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize); +} + +int16_t AudioDeviceLinuxALSA::RecordingDevices() { + return (int16_t)GetDevicesInfo(0, false); +} + +int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index) { + if (_recIsInitialized) { + return -1; + } + + int32_t nDevices = GetDevicesInfo(0, false); + RTC_LOG(LS_VERBOSE) << "number of availiable audio input devices is " + << nDevices; + + if (index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + _inputDeviceIndex = index; + _inputDeviceIsSpecified = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetRecordingDevice II (II) +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceLinuxALSA::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) { + RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; + return -1; +} + +int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available) { + available = false; + + // Try to initialize the playout side with mono + // Assumes that user set num channels after calling this function + _playChannels = 1; + int32_t res = InitPlayout(); + + // Cancel effect of initialization + StopPlayout(); + + if (res != -1) { + available = true; + } else { + // It may be possible to play out in stereo + res = StereoPlayoutIsAvailable(available); + if (available) { + // Then set channels to 2 so InitPlayout doesn't fail + _playChannels = 2; + } + } + + return res; +} + +int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available) { + available = false; + + // Try to initialize the recording side with mono + // Assumes that user set num channels after calling this function + _recChannels = 1; + int32_t res = InitRecording(); + + // Cancel effect of initialization + StopRecording(); + + if (res != -1) { + available = true; + } else { + // It may be possible to record in stereo + res = StereoRecordingIsAvailable(available); + if (available) { + // Then set channels to 2 so InitPlayout doesn't fail + _recChannels = 2; + } + } + + return res; +} + +int32_t AudioDeviceLinuxALSA::InitPlayout() { + MutexLock lock(&mutex_); + return InitPlayoutLocked(); +} + +int32_t AudioDeviceLinuxALSA::InitPlayoutLocked() { + int errVal = 0; + + if (_playing) { + return -1; + } + + if (!_outputDeviceIsSpecified) { + return -1; + } + + if (_playIsInitialized) { + return 0; + } + // Initialize the speaker (devices might have been added or removed) + if (InitSpeakerLocked() == -1) { + RTC_LOG(LS_WARNING) << "InitSpeaker() failed"; + } + + // Start by closing any existing wave-output devices + // + if (_handlePlayout != NULL) { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout = NULL; + _playIsInitialized = false; + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error closing current playout sound device, error: " + << LATE(snd_strerror)(errVal); + } + } + + // Open PCM device for playout + char deviceName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, true, _outputDeviceIndex, deviceName, + kAdmMaxDeviceNameSize); + + RTC_LOG(LS_VERBOSE) << "InitPlayout open (" << deviceName << ")"; + + errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName, + SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK); + + if (errVal == -EBUSY) // Device busy - try some more! + { + for (int i = 0; i < 5; i++) { + SleepMs(1000); + errVal = LATE(snd_pcm_open)(&_handlePlayout, deviceName, + SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK); + if (errVal == 0) { + break; + } + } + } + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "unable to open playback device: " + << LATE(snd_strerror)(errVal) << " (" << errVal << ")"; + _handlePlayout = NULL; + return -1; + } + + _playoutFramesIn10MS = _playoutFreq / 100; + if ((errVal = LATE(snd_pcm_set_params)( + _handlePlayout, +#if defined(WEBRTC_ARCH_BIG_ENDIAN) + SND_PCM_FORMAT_S16_BE, +#else + SND_PCM_FORMAT_S16_LE, // format +#endif + SND_PCM_ACCESS_RW_INTERLEAVED, // access + _playChannels, // channels + _playoutFreq, // rate + 1, // soft_resample + ALSA_PLAYOUT_LATENCY // 40*1000 //latency required overall latency + // in us + )) < 0) { /* 0.5sec */ + _playoutFramesIn10MS = 0; + RTC_LOG(LS_ERROR) << "unable to set playback device: " + << LATE(snd_strerror)(errVal) << " (" << errVal << ")"; + ErrorRecovery(errVal, _handlePlayout); + errVal = LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout = NULL; + return -1; + } + + errVal = LATE(snd_pcm_get_params)(_handlePlayout, &_playoutBufferSizeInFrame, + &_playoutPeriodSizeInFrame); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_pcm_get_params: " << LATE(snd_strerror)(errVal) + << " (" << errVal << ")"; + _playoutBufferSizeInFrame = 0; + _playoutPeriodSizeInFrame = 0; + } else { + RTC_LOG(LS_VERBOSE) << "playout snd_pcm_get_params buffer_size:" + << _playoutBufferSizeInFrame + << " period_size :" << _playoutPeriodSizeInFrame; + } + + if (_ptrAudioBuffer) { + // Update webrtc audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq); + _ptrAudioBuffer->SetPlayoutChannels(_playChannels); + } + + // Set play buffer size + _playoutBufferSizeIn10MS = + LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesIn10MS); + + // Init varaibles used for play + + if (_handlePlayout != NULL) { + _playIsInitialized = true; + return 0; + } else { + return -1; + } +} + +int32_t AudioDeviceLinuxALSA::InitRecording() { + MutexLock lock(&mutex_); + return InitRecordingLocked(); +} + +int32_t AudioDeviceLinuxALSA::InitRecordingLocked() { + int errVal = 0; + + if (_recording) { + return -1; + } + + if (!_inputDeviceIsSpecified) { + return -1; + } + + if (_recIsInitialized) { + return 0; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophoneLocked() == -1) { + RTC_LOG(LS_WARNING) << "InitMicrophone() failed"; + } + + // Start by closing any existing pcm-input devices + // + if (_handleRecord != NULL) { + int errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + _recIsInitialized = false; + if (errVal < 0) { + RTC_LOG(LS_ERROR) + << "Error closing current recording sound device, error: " + << LATE(snd_strerror)(errVal); + } + } + + // Open PCM device for recording + // The corresponding settings for playout are made after the record settings + char deviceName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, false, _inputDeviceIndex, deviceName, + kAdmMaxDeviceNameSize); + + RTC_LOG(LS_VERBOSE) << "InitRecording open (" << deviceName << ")"; + errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName, + SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK); + + // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC + if (errVal == -EBUSY) // Device busy - try some more! + { + for (int i = 0; i < 5; i++) { + SleepMs(1000); + errVal = LATE(snd_pcm_open)(&_handleRecord, deviceName, + SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK); + if (errVal == 0) { + break; + } + } + } + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "unable to open record device: " + << LATE(snd_strerror)(errVal); + _handleRecord = NULL; + return -1; + } + + _recordingFramesIn10MS = _recordingFreq / 100; + if ((errVal = + LATE(snd_pcm_set_params)(_handleRecord, +#if defined(WEBRTC_ARCH_BIG_ENDIAN) + SND_PCM_FORMAT_S16_BE, // format +#else + SND_PCM_FORMAT_S16_LE, // format +#endif + SND_PCM_ACCESS_RW_INTERLEAVED, // access + _recChannels, // channels + _recordingFreq, // rate + 1, // soft_resample + ALSA_CAPTURE_LATENCY // latency in us + )) < 0) { + // Fall back to another mode then. + if (_recChannels == 1) + _recChannels = 2; + else + _recChannels = 1; + + if ((errVal = + LATE(snd_pcm_set_params)(_handleRecord, +#if defined(WEBRTC_ARCH_BIG_ENDIAN) + SND_PCM_FORMAT_S16_BE, // format +#else + SND_PCM_FORMAT_S16_LE, // format +#endif + SND_PCM_ACCESS_RW_INTERLEAVED, // access + _recChannels, // channels + _recordingFreq, // rate + 1, // soft_resample + ALSA_CAPTURE_LATENCY // latency in us + )) < 0) { + _recordingFramesIn10MS = 0; + RTC_LOG(LS_ERROR) << "unable to set record settings: " + << LATE(snd_strerror)(errVal) << " (" << errVal << ")"; + ErrorRecovery(errVal, _handleRecord); + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + return -1; + } + } + + errVal = LATE(snd_pcm_get_params)(_handleRecord, &_recordingBuffersizeInFrame, + &_recordingPeriodSizeInFrame); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_pcm_get_params " << LATE(snd_strerror)(errVal) + << " (" << errVal << ")"; + _recordingBuffersizeInFrame = 0; + _recordingPeriodSizeInFrame = 0; + } else { + RTC_LOG(LS_VERBOSE) << "capture snd_pcm_get_params, buffer_size:" + << _recordingBuffersizeInFrame + << ", period_size:" << _recordingPeriodSizeInFrame; + } + + if (_ptrAudioBuffer) { + // Update webrtc audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq); + _ptrAudioBuffer->SetRecordingChannels(_recChannels); + } + + // Set rec buffer size and create buffer + _recordingBufferSizeIn10MS = + LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesIn10MS); + + if (_handleRecord != NULL) { + // Mark recording side as initialized + _recIsInitialized = true; + return 0; + } else { + return -1; + } +} + +int32_t AudioDeviceLinuxALSA::StartRecording() { + if (!_recIsInitialized) { + return -1; + } + + if (_recording) { + return 0; + } + + _recording = true; + + int errVal = 0; + _recordingFramesLeft = _recordingFramesIn10MS; + + // Make sure we only create the buffer once. + if (!_recordingBuffer) + _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS]; + if (!_recordingBuffer) { + RTC_LOG(LS_ERROR) << "failed to alloc recording buffer"; + _recording = false; + return -1; + } + // RECORDING + _ptrThreadRec = rtc::PlatformThread::SpawnJoinable( + [this] { + while (RecThreadProcess()) { + } + }, + "webrtc_audio_module_capture_thread", + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); + + errVal = LATE(snd_pcm_prepare)(_handleRecord); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "capture snd_pcm_prepare failed (" + << LATE(snd_strerror)(errVal) << ")\n"; + // just log error + // if snd_pcm_open fails will return -1 + } + + errVal = LATE(snd_pcm_start)(_handleRecord); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "capture snd_pcm_start err: " + << LATE(snd_strerror)(errVal); + errVal = LATE(snd_pcm_start)(_handleRecord); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "capture snd_pcm_start 2nd try err: " + << LATE(snd_strerror)(errVal); + StopRecording(); + return -1; + } + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::StopRecording() { + MutexLock lock(&mutex_); + return StopRecordingLocked(); +} + +int32_t AudioDeviceLinuxALSA::StopRecordingLocked() { + if (!_recIsInitialized) { + return 0; + } + + if (_handleRecord == NULL) { + return -1; + } + + // Make sure we don't start recording (it's asynchronous). + _recIsInitialized = false; + _recording = false; + + _ptrThreadRec.Finalize(); + + _recordingFramesLeft = 0; + if (_recordingBuffer) { + delete[] _recordingBuffer; + _recordingBuffer = NULL; + } + + // Stop and close pcm recording device. + int errVal = LATE(snd_pcm_drop)(_handleRecord); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error stop recording: " << LATE(snd_strerror)(errVal); + return -1; + } + + errVal = LATE(snd_pcm_close)(_handleRecord); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error closing record sound device, error: " + << LATE(snd_strerror)(errVal); + return -1; + } + + // Check if we have muted and unmute if so. + bool muteEnabled = false; + MicrophoneMute(muteEnabled); + if (muteEnabled) { + SetMicrophoneMute(false); + } + + // set the pcm input handle to NULL + _handleRecord = NULL; + return 0; +} + +bool AudioDeviceLinuxALSA::RecordingIsInitialized() const { + return (_recIsInitialized); +} + +bool AudioDeviceLinuxALSA::Recording() const { + return (_recording); +} + +bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const { + return (_playIsInitialized); +} + +int32_t AudioDeviceLinuxALSA::StartPlayout() { + if (!_playIsInitialized) { + return -1; + } + + if (_playing) { + return 0; + } + + _playing = true; + + _playoutFramesLeft = 0; + if (!_playoutBuffer) + _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS]; + if (!_playoutBuffer) { + RTC_LOG(LS_ERROR) << "failed to alloc playout buf"; + _playing = false; + return -1; + } + + // PLAYOUT + _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable( + [this] { + while (PlayThreadProcess()) { + } + }, + "webrtc_audio_module_play_thread", + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); + + int errVal = LATE(snd_pcm_prepare)(_handlePlayout); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "playout snd_pcm_prepare failed (" + << LATE(snd_strerror)(errVal) << ")\n"; + // just log error + // if snd_pcm_open fails will return -1 + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::StopPlayout() { + MutexLock lock(&mutex_); + return StopPlayoutLocked(); +} + +int32_t AudioDeviceLinuxALSA::StopPlayoutLocked() { + if (!_playIsInitialized) { + return 0; + } + + if (_handlePlayout == NULL) { + return -1; + } + + _playing = false; + + // stop playout thread first + _ptrThreadPlay.Finalize(); + + _playoutFramesLeft = 0; + delete[] _playoutBuffer; + _playoutBuffer = NULL; + + // stop and close pcm playout device + int errVal = LATE(snd_pcm_drop)(_handlePlayout); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error stop playing: " << LATE(snd_strerror)(errVal); + } + + errVal = LATE(snd_pcm_close)(_handlePlayout); + if (errVal < 0) + RTC_LOG(LS_ERROR) << "Error closing playout sound device, error: " + << LATE(snd_strerror)(errVal); + + // set the pcm input handle to NULL + _playIsInitialized = false; + _handlePlayout = NULL; + RTC_LOG(LS_VERBOSE) << "handle_playout is now set to NULL"; + + return 0; +} + +int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const { + delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq; + return 0; +} + +bool AudioDeviceLinuxALSA::Playing() const { + return (_playing); +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +int32_t AudioDeviceLinuxALSA::GetDevicesInfo(const int32_t function, + const bool playback, + const int32_t enumDeviceNo, + char* enumDeviceName, + const int32_t ednLen) const { + // Device enumeration based on libjingle implementation + // by Tristan Schmelcher at Google Inc. + + const char* type = playback ? "Output" : "Input"; + // dmix and dsnoop are only for playback and capture, respectively, but ALSA + // stupidly includes them in both lists. + const char* ignorePrefix = playback ? "dsnoop:" : "dmix:"; + // (ALSA lists many more "devices" of questionable interest, but we show them + // just in case the weird devices may actually be desirable for some + // users/systems.) + + int err; + int enumCount(0); + bool keepSearching(true); + + // From Chromium issue 95797 + // Loop through the sound cards to get Alsa device hints. + // Don't use snd_device_name_hint(-1,..) since there is a access violation + // inside this ALSA API with libasound.so.2.0.0. + int card = -1; + while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) { + void** hints; + err = LATE(snd_device_name_hint)(card, "pcm", &hints); + if (err != 0) { + RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name hint error: " + << LATE(snd_strerror)(err); + return -1; + } + + enumCount++; // default is 0 + if ((function == FUNC_GET_DEVICE_NAME || + function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && + enumDeviceNo == 0) { + strcpy(enumDeviceName, "default"); + + err = LATE(snd_device_name_free_hint)(hints); + if (err != 0) { + RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: " + << LATE(snd_strerror)(err); + } + + return 0; + } + + for (void** list = hints; *list != NULL; ++list) { + char* actualType = LATE(snd_device_name_get_hint)(*list, "IOID"); + if (actualType) { // NULL means it's both. + bool wrongType = (strcmp(actualType, type) != 0); + free(actualType); + if (wrongType) { + // Wrong type of device (i.e., input vs. output). + continue; + } + } + + char* name = LATE(snd_device_name_get_hint)(*list, "NAME"); + if (!name) { + RTC_LOG(LS_ERROR) << "Device has no name"; + // Skip it. + continue; + } + + // Now check if we actually want to show this device. + if (strcmp(name, "default") != 0 && strcmp(name, "null") != 0 && + strcmp(name, "pulse") != 0 && + strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0) { + // Yes, we do. + char* desc = LATE(snd_device_name_get_hint)(*list, "DESC"); + if (!desc) { + // Virtual devices don't necessarily have descriptions. + // Use their names instead. + desc = name; + } + + if (FUNC_GET_NUM_OF_DEVICE == function) { + RTC_LOG(LS_VERBOSE) << "Enum device " << enumCount << " - " << name; + } + if ((FUNC_GET_DEVICE_NAME == function) && (enumDeviceNo == enumCount)) { + // We have found the enum device, copy the name to buffer. + strncpy(enumDeviceName, desc, ednLen); + enumDeviceName[ednLen - 1] = '\0'; + keepSearching = false; + // Replace '\n' with '-'. + char* pret = strchr(enumDeviceName, '\n' /*0xa*/); // LF + if (pret) + *pret = '-'; + } + if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) && + (enumDeviceNo == enumCount)) { + // We have found the enum device, copy the name to buffer. + strncpy(enumDeviceName, name, ednLen); + enumDeviceName[ednLen - 1] = '\0'; + keepSearching = false; + } + + if (keepSearching) + ++enumCount; + + if (desc != name) + free(desc); + } + + free(name); + + if (!keepSearching) + break; + } + + err = LATE(snd_device_name_free_hint)(hints); + if (err != 0) { + RTC_LOG(LS_ERROR) << "GetDevicesInfo - device name free hint error: " + << LATE(snd_strerror)(err); + // Continue and return true anyway, since we did get the whole list. + } + } + + if (FUNC_GET_NUM_OF_DEVICE == function) { + if (enumCount == 1) // only default? + enumCount = 0; + return enumCount; // Normal return point for function 0 + } + + if (keepSearching) { + // If we get here for function 1 and 2, we didn't find the specified + // enum device. + RTC_LOG(LS_ERROR) + << "GetDevicesInfo - Could not find device name or numbers"; + return -1; + } + + return 0; +} + +int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const { + if (_handleRecord == NULL) { + RTC_LOG(LS_ERROR) << "input state has been modified during unlocked period"; + return -1; + } + return 0; +} + +int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const { + if (_handlePlayout == NULL) { + RTC_LOG(LS_ERROR) + << "output state has been modified during unlocked period"; + return -1; + } + return 0; +} + +int32_t AudioDeviceLinuxALSA::ErrorRecovery(int32_t error, + snd_pcm_t* deviceHandle) { + int st = LATE(snd_pcm_state)(deviceHandle); + RTC_LOG(LS_VERBOSE) << "Trying to recover from " + << ((LATE(snd_pcm_stream)(deviceHandle) == + SND_PCM_STREAM_CAPTURE) + ? "capture" + : "playout") + << " error: " << LATE(snd_strerror)(error) << " (" + << error << ") (state " << st << ")"; + + // It is recommended to use snd_pcm_recover for all errors. If that function + // cannot handle the error, the input error code will be returned, otherwise + // 0 is returned. From snd_pcm_recover API doc: "This functions handles + // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or + // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes + // trying to prepare given stream for next I/O." + + /** Open */ + // SND_PCM_STATE_OPEN = 0, + /** Setup installed */ + // SND_PCM_STATE_SETUP, + /** Ready to start */ + // SND_PCM_STATE_PREPARED, + /** Running */ + // SND_PCM_STATE_RUNNING, + /** Stopped: underrun (playback) or overrun (capture) detected */ + // SND_PCM_STATE_XRUN,= 4 + /** Draining: running (playback) or stopped (capture) */ + // SND_PCM_STATE_DRAINING, + /** Paused */ + // SND_PCM_STATE_PAUSED, + /** Hardware is suspended */ + // SND_PCM_STATE_SUSPENDED, + // ** Hardware is disconnected */ + // SND_PCM_STATE_DISCONNECTED, + // SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED + + // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine + // in Sthlm lab. + + int res = LATE(snd_pcm_recover)(deviceHandle, error, 1); + if (0 == res) { + RTC_LOG(LS_VERBOSE) << "Recovery - snd_pcm_recover OK"; + + if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun. + _recording && + LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) { + // For capture streams we also have to repeat the explicit start() + // to get data flowing again. + int err = LATE(snd_pcm_start)(deviceHandle); + if (err != 0) { + RTC_LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " << err; + return -1; + } + } + + if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun. + _playing && + LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK) { + // For capture streams we also have to repeat the explicit start() to get + // data flowing again. + int err = LATE(snd_pcm_start)(deviceHandle); + if (err != 0) { + RTC_LOG(LS_ERROR) << "Recovery - snd_pcm_start error: " + << LATE(snd_strerror)(err); + return -1; + } + } + + return -EPIPE == error ? 1 : 0; + } else { + RTC_LOG(LS_ERROR) << "Unrecoverable alsa stream error: " << res; + } + + return res; +} + +// ============================================================================ +// Thread Methods +// ============================================================================ + +bool AudioDeviceLinuxALSA::PlayThreadProcess() { + if (!_playing) + return false; + + int err; + snd_pcm_sframes_t frames; + snd_pcm_sframes_t avail_frames; + + Lock(); + // return a positive number of frames ready otherwise a negative error code + avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout); + if (avail_frames < 0) { + RTC_LOG(LS_ERROR) << "playout snd_pcm_avail_update error: " + << LATE(snd_strerror)(avail_frames); + ErrorRecovery(avail_frames, _handlePlayout); + UnLock(); + return true; + } else if (avail_frames == 0) { + UnLock(); + + // maximum tixe in milliseconds to wait, a negative value means infinity + err = LATE(snd_pcm_wait)(_handlePlayout, 2); + if (err == 0) { // timeout occured + RTC_LOG(LS_VERBOSE) << "playout snd_pcm_wait timeout"; + } + + return true; + } + + if (_playoutFramesLeft <= 0) { + UnLock(); + _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS); + Lock(); + + _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer); + RTC_DCHECK_EQ(_playoutFramesLeft, _playoutFramesIn10MS); + } + + if (static_cast(avail_frames) > _playoutFramesLeft) + avail_frames = _playoutFramesLeft; + + int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout, _playoutFramesLeft); + frames = LATE(snd_pcm_writei)( + _handlePlayout, &_playoutBuffer[_playoutBufferSizeIn10MS - size], + avail_frames); + + if (frames < 0) { + RTC_LOG(LS_VERBOSE) << "playout snd_pcm_writei error: " + << LATE(snd_strerror)(frames); + _playoutFramesLeft = 0; + ErrorRecovery(frames, _handlePlayout); + UnLock(); + return true; + } else { + RTC_DCHECK_EQ(frames, avail_frames); + _playoutFramesLeft -= frames; + } + + UnLock(); + return true; +} + +bool AudioDeviceLinuxALSA::RecThreadProcess() { + if (!_recording) + return false; + + int err; + snd_pcm_sframes_t frames; + snd_pcm_sframes_t avail_frames; + int8_t buffer[_recordingBufferSizeIn10MS]; + + Lock(); + + // return a positive number of frames ready otherwise a negative error code + avail_frames = LATE(snd_pcm_avail_update)(_handleRecord); + if (avail_frames < 0) { + RTC_LOG(LS_ERROR) << "capture snd_pcm_avail_update error: " + << LATE(snd_strerror)(avail_frames); + ErrorRecovery(avail_frames, _handleRecord); + UnLock(); + return true; + } else if (avail_frames == 0) { // no frame is available now + UnLock(); + + // maximum time in milliseconds to wait, a negative value means infinity + err = LATE(snd_pcm_wait)(_handleRecord, ALSA_CAPTURE_WAIT_TIMEOUT); + if (err == 0) // timeout occured + RTC_LOG(LS_VERBOSE) << "capture snd_pcm_wait timeout"; + + return true; + } + + if (static_cast(avail_frames) > _recordingFramesLeft) + avail_frames = _recordingFramesLeft; + + frames = LATE(snd_pcm_readi)(_handleRecord, buffer, + avail_frames); // frames to be written + if (frames < 0) { + RTC_LOG(LS_ERROR) << "capture snd_pcm_readi error: " + << LATE(snd_strerror)(frames); + ErrorRecovery(frames, _handleRecord); + UnLock(); + return true; + } else if (frames > 0) { + RTC_DCHECK_EQ(frames, avail_frames); + + int left_size = + LATE(snd_pcm_frames_to_bytes)(_handleRecord, _recordingFramesLeft); + int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames); + + memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size], buffer, + size); + _recordingFramesLeft -= frames; + + if (!_recordingFramesLeft) { // buf is full + _recordingFramesLeft = _recordingFramesIn10MS; + + // store the recorded buffer (no action will be taken if the + // #recorded samples is not a full buffer) + _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer, + _recordingFramesIn10MS); + + // calculate delay + _playoutDelay = 0; + _recordingDelay = 0; + if (_handlePlayout) { + err = LATE(snd_pcm_delay)(_handlePlayout, + &_playoutDelay); // returned delay in frames + if (err < 0) { + // TODO(xians): Shall we call ErrorRecovery() here? + _playoutDelay = 0; + RTC_LOG(LS_ERROR) + << "playout snd_pcm_delay: " << LATE(snd_strerror)(err); + } + } + + err = LATE(snd_pcm_delay)(_handleRecord, + &_recordingDelay); // returned delay in frames + if (err < 0) { + // TODO(xians): Shall we call ErrorRecovery() here? + _recordingDelay = 0; + RTC_LOG(LS_ERROR) << "capture snd_pcm_delay: " + << LATE(snd_strerror)(err); + } + + // TODO(xians): Shall we add 10ms buffer delay to the record delay? + _ptrAudioBuffer->SetVQEData(_playoutDelay * 1000 / _playoutFreq, + _recordingDelay * 1000 / _recordingFreq); + + _ptrAudioBuffer->SetTypingStatus(KeyPressed()); + + // Deliver recorded samples at specified sample rate, mic level etc. + // to the observer using callback. + UnLock(); + _ptrAudioBuffer->DeliverRecordedData(); + Lock(); + } + } + + UnLock(); + return true; +} + +bool AudioDeviceLinuxALSA::KeyPressed() const { +#if defined(WEBRTC_USE_X11) + char szKey[32]; + unsigned int i = 0; + char state = 0; + + if (!_XDisplay) + return false; + + // Check key map status + XQueryKeymap(_XDisplay, szKey); + + // A bit change in keymap means a key is pressed + for (i = 0; i < sizeof(szKey); i++) + state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i]; + + // Save old state + memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState)); + return (state != 0); +#else + return false; +#endif +} +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h new file mode 100644 index 0000000000..23e21d3ce9 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_alsa_linux.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_ +#define AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H_ + +#include + +#include "modules/audio_device/audio_device_generic.h" +#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" + +#if defined(WEBRTC_USE_X11) +#include +#endif +#include +#include +#include + +typedef webrtc::adm_linux_alsa::AlsaSymbolTable WebRTCAlsaSymbolTable; +WebRTCAlsaSymbolTable* GetAlsaSymbolTable(); + +namespace webrtc { + +class AudioDeviceLinuxALSA : public AudioDeviceGeneric { + public: + AudioDeviceLinuxALSA(); + virtual ~AudioDeviceLinuxALSA(); + + // Retrieve the currently utilized audio layer + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const override; + + // Main initializaton and termination + InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Initialized() const override; + + // Device enumeration + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + + // Device selection + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override; + + // Audio transport initialization + int32_t PlayoutIsAvailable(bool& available) override; + int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool& available) override; + int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_) override; + bool RecordingIsInitialized() const override; + + // Audio transport control + int32_t StartPlayout() override; + int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Playing() const override; + int32_t StartRecording() override; + int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Recording() const override; + + // Audio mixer initialization + int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_) override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_) override; + bool MicrophoneIsInitialized() const override; + + // Speaker volume controls + int32_t SpeakerVolumeIsAvailable(bool& available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t& volume) const override; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t& minVolume) const override; + + // Microphone volume controls + int32_t MicrophoneVolumeIsAvailable(bool& available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t& volume) const override; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const override; + + // Speaker mute control + int32_t SpeakerMuteIsAvailable(bool& available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool& enabled) const override; + + // Microphone mute control + int32_t MicrophoneMuteIsAvailable(bool& available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool& enabled) const override; + + // Stereo support + int32_t StereoPlayoutIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool& enabled) const override; + int32_t StereoRecordingIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool& enabled) const override; + + // Delay information and control + int32_t PlayoutDelay(uint16_t& delayMS) const override; + + void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) + RTC_LOCKS_EXCLUDED(mutex_) override; + + private: + int32_t InitRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t StopRecordingLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t StopPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t InitPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t GetDevicesInfo(int32_t function, + bool playback, + int32_t enumDeviceNo = 0, + char* enumDeviceName = NULL, + int32_t ednLen = 0) const; + int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle); + + bool KeyPressed() const; + + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); } + void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); } + + inline int32_t InputSanityCheckAfterUnlockedPeriod() const; + inline int32_t OutputSanityCheckAfterUnlockedPeriod() const; + + static void RecThreadFunc(void*); + static void PlayThreadFunc(void*); + bool RecThreadProcess(); + bool PlayThreadProcess(); + + AudioDeviceBuffer* _ptrAudioBuffer; + + Mutex mutex_; + + rtc::PlatformThread _ptrThreadRec; + rtc::PlatformThread _ptrThreadPlay; + + AudioMixerManagerLinuxALSA _mixerManager; + + uint16_t _inputDeviceIndex; + uint16_t _outputDeviceIndex; + bool _inputDeviceIsSpecified; + bool _outputDeviceIsSpecified; + + snd_pcm_t* _handleRecord; + snd_pcm_t* _handlePlayout; + + snd_pcm_uframes_t _recordingBuffersizeInFrame; + snd_pcm_uframes_t _recordingPeriodSizeInFrame; + snd_pcm_uframes_t _playoutBufferSizeInFrame; + snd_pcm_uframes_t _playoutPeriodSizeInFrame; + + ssize_t _recordingBufferSizeIn10MS; + ssize_t _playoutBufferSizeIn10MS; + uint32_t _recordingFramesIn10MS; + uint32_t _playoutFramesIn10MS; + + uint32_t _recordingFreq; + uint32_t _playoutFreq; + uint8_t _recChannels; + uint8_t _playChannels; + + int8_t* _recordingBuffer; // in byte + int8_t* _playoutBuffer; // in byte + uint32_t _recordingFramesLeft; + uint32_t _playoutFramesLeft; + + bool _initialized; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + + snd_pcm_sframes_t _recordingDelay; + snd_pcm_sframes_t _playoutDelay; + + char _oldKeyState[32]; +#if defined(WEBRTC_USE_X11) + Display* _XDisplay; +#endif +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_ALSA_LINUX_H_ diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc new file mode 100644 index 0000000000..90cd58c497 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.cc @@ -0,0 +1,2286 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/linux/audio_device_pulse_linux.h" + +#include + +#include "modules/audio_device/linux/latebindingsymboltable_linux.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/platform_thread.h" + +WebRTCPulseSymbolTable* GetPulseSymbolTable() { + static WebRTCPulseSymbolTable* pulse_symbol_table = + new WebRTCPulseSymbolTable(); + return pulse_symbol_table; +} + +// Accesses Pulse functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libpulse, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, \ + GetPulseSymbolTable(), sym) + +namespace webrtc { + +AudioDeviceLinuxPulse::AudioDeviceLinuxPulse() + : _ptrAudioBuffer(NULL), + _inputDeviceIndex(0), + _outputDeviceIndex(0), + _inputDeviceIsSpecified(false), + _outputDeviceIsSpecified(false), + sample_rate_hz_(0), + _recChannels(1), + _playChannels(1), + _initialized(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _startRec(false), + _startPlay(false), + update_speaker_volume_at_startup_(false), + quit_(false), + _sndCardPlayDelay(0), + _writeErrors(0), + _deviceIndex(-1), + _numPlayDevices(0), + _numRecDevices(0), + _playDeviceName(NULL), + _recDeviceName(NULL), + _playDisplayDeviceName(NULL), + _recDisplayDeviceName(NULL), + _playBuffer(NULL), + _playbackBufferSize(0), + _playbackBufferUnused(0), + _tempBufferSpace(0), + _recBuffer(NULL), + _recordBufferSize(0), + _recordBufferUsed(0), + _tempSampleData(NULL), + _tempSampleDataSize(0), + _configuredLatencyPlay(0), + _configuredLatencyRec(0), + _paDeviceIndex(-1), + _paStateChanged(false), + _paMainloop(NULL), + _paMainloopApi(NULL), + _paContext(NULL), + _recStream(NULL), + _playStream(NULL), + _recStreamFlags(0), + _playStreamFlags(0) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; + + memset(_paServerVersion, 0, sizeof(_paServerVersion)); + memset(&_playBufferAttr, 0, sizeof(_playBufferAttr)); + memset(&_recBufferAttr, 0, sizeof(_recBufferAttr)); + memset(_oldKeyState, 0, sizeof(_oldKeyState)); +} + +AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; + RTC_DCHECK(thread_checker_.IsCurrent()); + Terminate(); + + if (_recBuffer) { + delete[] _recBuffer; + _recBuffer = NULL; + } + if (_playBuffer) { + delete[] _playBuffer; + _playBuffer = NULL; + } + if (_playDeviceName) { + delete[] _playDeviceName; + _playDeviceName = NULL; + } + if (_recDeviceName) { + delete[] _recDeviceName; + _recDeviceName = NULL; + } +} + +void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { + RTC_DCHECK(thread_checker_.IsCurrent()); + + _ptrAudioBuffer = audioBuffer; + + // Inform the AudioBuffer about default settings for this implementation. + // Set all values to zero here since the actual settings will be done by + // InitPlayout and InitRecording later. + _ptrAudioBuffer->SetRecordingSampleRate(0); + _ptrAudioBuffer->SetPlayoutSampleRate(0); + _ptrAudioBuffer->SetRecordingChannels(0); + _ptrAudioBuffer->SetPlayoutChannels(0); +} + +// ---------------------------------------------------------------------------- +// ActiveAudioLayer +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceLinuxPulse::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const { + audioLayer = AudioDeviceModule::kLinuxPulseAudio; + return 0; +} + +AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_initialized) { + return InitStatus::OK; + } + + // Initialize PulseAudio + if (InitPulseAudio() < 0) { + RTC_LOG(LS_ERROR) << "failed to initialize PulseAudio"; + if (TerminatePulseAudio() < 0) { + RTC_LOG(LS_ERROR) << "failed to terminate PulseAudio"; + } + return InitStatus::OTHER_ERROR; + } + +#if defined(WEBRTC_USE_X11) + // Get X display handle for typing detection + _XDisplay = XOpenDisplay(NULL); + if (!_XDisplay) { + RTC_LOG(LS_WARNING) + << "failed to open X display, typing detection will not work"; + } +#endif + + // RECORDING + const auto attributes = + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime); + _ptrThreadRec = rtc::PlatformThread::SpawnJoinable( + [this] { + while (RecThreadProcess()) { + } + }, + "webrtc_audio_module_rec_thread", attributes); + + // PLAYOUT + _ptrThreadPlay = rtc::PlatformThread::SpawnJoinable( + [this] { + while (PlayThreadProcess()) { + } + }, + "webrtc_audio_module_play_thread", attributes); + _initialized = true; + + return InitStatus::OK; +} + +int32_t AudioDeviceLinuxPulse::Terminate() { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (!_initialized) { + return 0; + } + { + MutexLock lock(&mutex_); + quit_ = true; + } + _mixerManager.Close(); + + // RECORDING + _timeEventRec.Set(); + _ptrThreadRec.Finalize(); + + // PLAYOUT + _timeEventPlay.Set(); + _ptrThreadPlay.Finalize(); + + // Terminate PulseAudio + if (TerminatePulseAudio() < 0) { + RTC_LOG(LS_ERROR) << "failed to terminate PulseAudio"; + return -1; + } + +#if defined(WEBRTC_USE_X11) + if (_XDisplay) { + XCloseDisplay(_XDisplay); + _XDisplay = NULL; + } +#endif + + _initialized = false; + _outputDeviceIsSpecified = false; + _inputDeviceIsSpecified = false; + + return 0; +} + +bool AudioDeviceLinuxPulse::Initialized() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_initialized); +} + +int32_t AudioDeviceLinuxPulse::InitSpeaker() { + RTC_DCHECK(thread_checker_.IsCurrent()); + + if (_playing) { + return -1; + } + + if (!_outputDeviceIsSpecified) { + return -1; + } + + // check if default device + if (_outputDeviceIndex == 0) { + uint16_t deviceIndex = 0; + GetDefaultDeviceInfo(false, NULL, deviceIndex); + _paDeviceIndex = deviceIndex; + } else { + // get the PA device index from + // the callback + _deviceIndex = _outputDeviceIndex; + + // get playout devices + PlayoutDevices(); + } + + // the callback has now set the _paDeviceIndex to + // the PulseAudio index of the device + if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) { + return -1; + } + + // clear _deviceIndex + _deviceIndex = -1; + _paDeviceIndex = -1; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::InitMicrophone() { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_recording) { + return -1; + } + + if (!_inputDeviceIsSpecified) { + return -1; + } + + // Check if default device + if (_inputDeviceIndex == 0) { + uint16_t deviceIndex = 0; + GetDefaultDeviceInfo(true, NULL, deviceIndex); + _paDeviceIndex = deviceIndex; + } else { + // Get the PA device index from + // the callback + _deviceIndex = _inputDeviceIndex; + + // get recording devices + RecordingDevices(); + } + + // The callback has now set the _paDeviceIndex to + // the PulseAudio index of the device + if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) { + return -1; + } + + // Clear _deviceIndex + _deviceIndex = -1; + _paDeviceIndex = -1; + + return 0; +} + +bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_mixerManager.SpeakerIsInitialized()); +} + +bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_mixerManager.MicrophoneIsInitialized()); +} + +int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + if (!wasInitialized && InitSpeaker() == -1) { + // If we end up here it means that the selected speaker has no volume + // control. + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know volume control exists. + available = true; + + // Close the initialized output mixer + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (!_playing) { + // Only update the volume if it's been set while we weren't playing. + update_speaker_volume_at_startup_ = true; + } + return (_mixerManager.SetSpeakerVolume(volume)); +} + +int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + uint32_t level(0); + + if (_mixerManager.SpeakerVolume(level) == -1) { + return -1; + } + + volume = level; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + uint32_t maxVol(0); + + if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + uint32_t minVol(0); + + if (_mixerManager.MinSpeakerVolume(minVol) == -1) { + return -1; + } + + minVolume = minVol; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) { + // If we end up here it means that the selected speaker has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected speaker has a mute control + _mixerManager.SpeakerMuteIsAvailable(isAvailable); + + available = isAvailable; + + // Close the initialized output mixer + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_mixerManager.SetSpeakerMute(enable)); +} + +int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + bool muted(0); + if (_mixerManager.SpeakerMute(muted) == -1) { + return -1; + } + + enabled = muted; + return 0; +} + +int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) { + // If we end up here it means that the selected microphone has no + // volume control, hence it is safe to state that there is no + // boost control already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a mute control + // + _mixerManager.MicrophoneMuteIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_mixerManager.SetMicrophoneMute(enable)); +} + +int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + bool muted(0); + if (_mixerManager.MicrophoneMute(muted) == -1) { + return -1; + } + + enabled = muted; + return 0; +} + +int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_recChannels == 2 && _recording) { + available = true; + return 0; + } + + available = false; + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + int error = 0; + + if (!wasInitialized && InitMicrophone() == -1) { + // Cannot open the specified device + available = false; + return 0; + } + + // Check if the selected microphone can record stereo. + bool isAvailable(false); + error = _mixerManager.StereoRecordingIsAvailable(isAvailable); + if (!error) + available = isAvailable; + + // Close the initialized input mixer + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return error; +} + +int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (enable) + _recChannels = 2; + else + _recChannels = 1; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_recChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_playChannels == 2 && _playing) { + available = true; + return 0; + } + + available = false; + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + int error = 0; + + if (!wasInitialized && InitSpeaker() == -1) { + // Cannot open the specified device. + return -1; + } + + // Check if the selected speaker can play stereo. + bool isAvailable(false); + error = _mixerManager.StereoPlayoutIsAvailable(isAvailable); + if (!error) + available = isAvailable; + + // Close the initialized input mixer + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return error; +} + +int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (enable) + _playChannels = 2; + else + _playChannels = 1; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_playChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + if (!wasInitialized && InitMicrophone() == -1) { + // If we end up here it means that the selected microphone has no + // volume control. + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a volume control + // exists. + available = true; + + // Close the initialized input mixer + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) { + return (_mixerManager.SetMicrophoneVolume(volume)); +} + +int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const { + uint32_t level(0); + + if (_mixerManager.MicrophoneVolume(level) == -1) { + RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level"; + return -1; + } + + volume = level; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const { + uint32_t maxVol(0); + + if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const { + uint32_t minVol(0); + + if (_mixerManager.MinMicrophoneVolume(minVol) == -1) { + return -1; + } + + minVolume = minVol; + + return 0; +} + +int16_t AudioDeviceLinuxPulse::PlayoutDevices() { + PaLock(); + + pa_operation* paOperation = NULL; + _numPlayDevices = 1; // init to 1 to account for "default" + + // get the whole list of devices and update _numPlayDevices + paOperation = + LATE(pa_context_get_sink_info_list)(_paContext, PaSinkInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + return _numPlayDevices; +} + +int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_playIsInitialized) { + return -1; + } + + const uint16_t nDevices = PlayoutDevices(); + + RTC_LOG(LS_VERBOSE) << "number of availiable output devices is " << nDevices; + + if (index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + _outputDeviceIndex = index; + _outputDeviceIsSpecified = true; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) { + RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; + return -1; +} + +int32_t AudioDeviceLinuxPulse::PlayoutDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_DCHECK(thread_checker_.IsCurrent()); + const uint16_t nDevices = PlayoutDevices(); + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + // Check if default device + if (index == 0) { + uint16_t deviceIndex = 0; + return GetDefaultDeviceInfo(false, name, deviceIndex); + } + + // Tell the callback that we want + // The name for this device + _playDisplayDeviceName = name; + _deviceIndex = index; + + // get playout devices + PlayoutDevices(); + + // clear device name and index + _playDisplayDeviceName = NULL; + _deviceIndex = -1; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::RecordingDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_DCHECK(thread_checker_.IsCurrent()); + const uint16_t nDevices(RecordingDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + // Check if default device + if (index == 0) { + uint16_t deviceIndex = 0; + return GetDefaultDeviceInfo(true, name, deviceIndex); + } + + // Tell the callback that we want + // the name for this device + _recDisplayDeviceName = name; + _deviceIndex = index; + + // Get recording devices + RecordingDevices(); + + // Clear device name and index + _recDisplayDeviceName = NULL; + _deviceIndex = -1; + + return 0; +} + +int16_t AudioDeviceLinuxPulse::RecordingDevices() { + PaLock(); + + pa_operation* paOperation = NULL; + _numRecDevices = 1; // Init to 1 to account for "default" + + // Get the whole list of devices and update _numRecDevices + paOperation = LATE(pa_context_get_source_info_list)( + _paContext, PaSourceInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + return _numRecDevices; +} + +int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_recIsInitialized) { + return -1; + } + + const uint16_t nDevices(RecordingDevices()); + + RTC_LOG(LS_VERBOSE) << "number of availiable input devices is " << nDevices; + + if (index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + _inputDeviceIndex = index; + _inputDeviceIsSpecified = true; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) { + RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; + return -1; +} + +int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + available = false; + + // Try to initialize the playout side + int32_t res = InitPlayout(); + + // Cancel effect of initialization + StopPlayout(); + + if (res != -1) { + available = true; + } + + return res; +} + +int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + available = false; + + // Try to initialize the playout side + int32_t res = InitRecording(); + + // Cancel effect of initialization + StopRecording(); + + if (res != -1) { + available = true; + } + + return res; +} + +int32_t AudioDeviceLinuxPulse::InitPlayout() { + RTC_DCHECK(thread_checker_.IsCurrent()); + + if (_playing) { + return -1; + } + + if (!_outputDeviceIsSpecified) { + return -1; + } + + if (_playIsInitialized) { + return 0; + } + + // Initialize the speaker (devices might have been added or removed) + if (InitSpeaker() == -1) { + RTC_LOG(LS_WARNING) << "InitSpeaker() failed"; + } + + // Set the play sample specification + pa_sample_spec playSampleSpec; + playSampleSpec.channels = _playChannels; + playSampleSpec.format = PA_SAMPLE_S16LE; + playSampleSpec.rate = sample_rate_hz_; + + // Create a new play stream + { + MutexLock lock(&mutex_); + _playStream = + LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL); + } + + if (!_playStream) { + RTC_LOG(LS_ERROR) << "failed to create play stream, err=" + << LATE(pa_context_errno)(_paContext); + return -1; + } + + // Provide the playStream to the mixer + _mixerManager.SetPlayStream(_playStream); + + if (_ptrAudioBuffer) { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_); + _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); + } + + RTC_LOG(LS_VERBOSE) << "stream state " + << LATE(pa_stream_get_state)(_playStream); + + // Set stream flags + _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | + PA_STREAM_INTERPOLATE_TIMING); + + if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { + // If configuring a specific latency then we want to specify + // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters + // automatically to reach that target latency. However, that flag + // doesn't exist in Ubuntu 8.04 and many people still use that, + // so we have to check the protocol version of libpulse. + if (LATE(pa_context_get_protocol_version)(_paContext) >= + WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { + _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; + } + + const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); + if (!spec) { + RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec()"; + return -1; + } + + size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); + uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / + WEBRTC_PA_MSECS_PER_SEC; + + // Set the play buffer attributes + _playBufferAttr.maxlength = latency; // num bytes stored in the buffer + _playBufferAttr.tlength = latency; // target fill level of play buffer + // minimum free num bytes before server request more data + _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; + // prebuffer tlength before starting playout + _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; + + _configuredLatencyPlay = latency; + } + + // num samples in bytes * num channels + _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels; + _playbackBufferUnused = _playbackBufferSize; + _playBuffer = new int8_t[_playbackBufferSize]; + + // Enable underflow callback + LATE(pa_stream_set_underflow_callback) + (_playStream, PaStreamUnderflowCallback, this); + + // Set the state callback function for the stream + LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this); + + // Mark playout side as initialized + { + MutexLock lock(&mutex_); + _playIsInitialized = true; + _sndCardPlayDelay = 0; + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::InitRecording() { + RTC_DCHECK(thread_checker_.IsCurrent()); + + if (_recording) { + return -1; + } + + if (!_inputDeviceIsSpecified) { + return -1; + } + + if (_recIsInitialized) { + return 0; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophone() == -1) { + RTC_LOG(LS_WARNING) << "InitMicrophone() failed"; + } + + // Set the rec sample specification + pa_sample_spec recSampleSpec; + recSampleSpec.channels = _recChannels; + recSampleSpec.format = PA_SAMPLE_S16LE; + recSampleSpec.rate = sample_rate_hz_; + + // Create a new rec stream + _recStream = + LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL); + if (!_recStream) { + RTC_LOG(LS_ERROR) << "failed to create rec stream, err=" + << LATE(pa_context_errno)(_paContext); + return -1; + } + + // Provide the recStream to the mixer + _mixerManager.SetRecStream(_recStream); + + if (_ptrAudioBuffer) { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_); + _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); + } + + if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { + _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | + PA_STREAM_INTERPOLATE_TIMING); + + // If configuring a specific latency then we want to specify + // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters + // automatically to reach that target latency. However, that flag + // doesn't exist in Ubuntu 8.04 and many people still use that, + // so we have to check the protocol version of libpulse. + if (LATE(pa_context_get_protocol_version)(_paContext) >= + WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { + _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; + } + + const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream); + if (!spec) { + RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec(rec)"; + return -1; + } + + size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); + uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / + WEBRTC_PA_MSECS_PER_SEC; + + // Set the rec buffer attributes + // Note: fragsize specifies a maximum transfer size, not a minimum, so + // it is not possible to force a high latency setting, only a low one. + _recBufferAttr.fragsize = latency; // size of fragment + _recBufferAttr.maxlength = + latency + bytesPerSec * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / + WEBRTC_PA_MSECS_PER_SEC; + + _configuredLatencyRec = latency; + } + + _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels; + _recordBufferUsed = 0; + _recBuffer = new int8_t[_recordBufferSize]; + + // Enable overflow callback + LATE(pa_stream_set_overflow_callback) + (_recStream, PaStreamOverflowCallback, this); + + // Set the state callback function for the stream + LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this); + + // Mark recording side as initialized + _recIsInitialized = true; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::StartRecording() { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (!_recIsInitialized) { + return -1; + } + + if (_recording) { + return 0; + } + + // Set state to ensure that the recording starts from the audio thread. + _startRec = true; + + // The audio thread will signal when recording has started. + _timeEventRec.Set(); + if (!_recStartEvent.Wait(TimeDelta::Seconds(10))) { + { + MutexLock lock(&mutex_); + _startRec = false; + } + StopRecording(); + RTC_LOG(LS_ERROR) << "failed to activate recording"; + return -1; + } + + { + MutexLock lock(&mutex_); + if (_recording) { + // The recording state is set by the audio thread after recording + // has started. + } else { + RTC_LOG(LS_ERROR) << "failed to activate recording"; + return -1; + } + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::StopRecording() { + RTC_DCHECK(thread_checker_.IsCurrent()); + MutexLock lock(&mutex_); + + if (!_recIsInitialized) { + return 0; + } + + if (_recStream == NULL) { + return -1; + } + + _recIsInitialized = false; + _recording = false; + + RTC_LOG(LS_VERBOSE) << "stopping recording"; + + // Stop Recording + PaLock(); + + DisableReadCallback(); + LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); + + // Unset this here so that we don't get a TERMINATED callback + LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); + + if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) { + // Disconnect the stream + if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) { + RTC_LOG(LS_ERROR) << "failed to disconnect rec stream, err=" + << LATE(pa_context_errno)(_paContext); + PaUnLock(); + return -1; + } + + RTC_LOG(LS_VERBOSE) << "disconnected recording"; + } + + LATE(pa_stream_unref)(_recStream); + _recStream = NULL; + + PaUnLock(); + + // Provide the recStream to the mixer + _mixerManager.SetRecStream(_recStream); + + if (_recBuffer) { + delete[] _recBuffer; + _recBuffer = NULL; + } + + return 0; +} + +bool AudioDeviceLinuxPulse::RecordingIsInitialized() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_recIsInitialized); +} + +bool AudioDeviceLinuxPulse::Recording() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_recording); +} + +bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_playIsInitialized); +} + +int32_t AudioDeviceLinuxPulse::StartPlayout() { + RTC_DCHECK(thread_checker_.IsCurrent()); + + if (!_playIsInitialized) { + return -1; + } + + if (_playing) { + return 0; + } + + // Set state to ensure that playout starts from the audio thread. + { + MutexLock lock(&mutex_); + _startPlay = true; + } + + // Both `_startPlay` and `_playing` needs protction since they are also + // accessed on the playout thread. + + // The audio thread will signal when playout has started. + _timeEventPlay.Set(); + if (!_playStartEvent.Wait(TimeDelta::Seconds(10))) { + { + MutexLock lock(&mutex_); + _startPlay = false; + } + StopPlayout(); + RTC_LOG(LS_ERROR) << "failed to activate playout"; + return -1; + } + + { + MutexLock lock(&mutex_); + if (_playing) { + // The playing state is set by the audio thread after playout + // has started. + } else { + RTC_LOG(LS_ERROR) << "failed to activate playing"; + return -1; + } + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::StopPlayout() { + RTC_DCHECK(thread_checker_.IsCurrent()); + MutexLock lock(&mutex_); + + if (!_playIsInitialized) { + return 0; + } + + if (_playStream == NULL) { + return -1; + } + + _playIsInitialized = false; + _playing = false; + _sndCardPlayDelay = 0; + + RTC_LOG(LS_VERBOSE) << "stopping playback"; + + // Stop Playout + PaLock(); + + DisableWriteCallback(); + LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); + + // Unset this here so that we don't get a TERMINATED callback + LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); + + if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) { + // Disconnect the stream + if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) { + RTC_LOG(LS_ERROR) << "failed to disconnect play stream, err=" + << LATE(pa_context_errno)(_paContext); + PaUnLock(); + return -1; + } + + RTC_LOG(LS_VERBOSE) << "disconnected playback"; + } + + LATE(pa_stream_unref)(_playStream); + _playStream = NULL; + + PaUnLock(); + + // Provide the playStream to the mixer + _mixerManager.SetPlayStream(_playStream); + + if (_playBuffer) { + delete[] _playBuffer; + _playBuffer = NULL; + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const { + MutexLock lock(&mutex_); + delayMS = (uint16_t)_sndCardPlayDelay; + return 0; +} + +bool AudioDeviceLinuxPulse::Playing() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + return (_playing); +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context* c, void* pThis) { + static_cast(pThis)->PaContextStateCallbackHandler(c); +} + +// ---------------------------------------------------------------------------- +// PaSinkInfoCallback +// ---------------------------------------------------------------------------- + +void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/, + const pa_sink_info* i, + int eol, + void* pThis) { + static_cast(pThis)->PaSinkInfoCallbackHandler(i, eol); +} + +void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/, + const pa_source_info* i, + int eol, + void* pThis) { + static_cast(pThis)->PaSourceInfoCallbackHandler(i, + eol); +} + +void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context* /*c*/, + const pa_server_info* i, + void* pThis) { + static_cast(pThis)->PaServerInfoCallbackHandler(i); +} + +void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) { + static_cast(pThis)->PaStreamStateCallbackHandler(p); +} + +void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) { + RTC_LOG(LS_VERBOSE) << "context state cb"; + + pa_context_state_t state = LATE(pa_context_get_state)(c); + switch (state) { + case PA_CONTEXT_UNCONNECTED: + RTC_LOG(LS_VERBOSE) << "unconnected"; + break; + case PA_CONTEXT_CONNECTING: + case PA_CONTEXT_AUTHORIZING: + case PA_CONTEXT_SETTING_NAME: + RTC_LOG(LS_VERBOSE) << "no state"; + break; + case PA_CONTEXT_FAILED: + case PA_CONTEXT_TERMINATED: + RTC_LOG(LS_VERBOSE) << "failed"; + _paStateChanged = true; + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + break; + case PA_CONTEXT_READY: + RTC_LOG(LS_VERBOSE) << "ready"; + _paStateChanged = true; + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + break; + } +} + +void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i, + int eol) { + if (eol) { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + if (_numPlayDevices == _deviceIndex) { + // Convert the device index to the one of the sink + _paDeviceIndex = i->index; + + if (_playDeviceName) { + // Copy the sink name + strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize); + _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + if (_playDisplayDeviceName) { + // Copy the sink display name + strncpy(_playDisplayDeviceName, i->description, kAdmMaxDeviceNameSize); + _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + } + + _numPlayDevices++; +} + +void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(const pa_source_info* i, + int eol) { + if (eol) { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + // We don't want to list output devices + if (i->monitor_of_sink == PA_INVALID_INDEX) { + if (_numRecDevices == _deviceIndex) { + // Convert the device index to the one of the source + _paDeviceIndex = i->index; + + if (_recDeviceName) { + // copy the source name + strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize); + _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + if (_recDisplayDeviceName) { + // Copy the source display name + strncpy(_recDisplayDeviceName, i->description, kAdmMaxDeviceNameSize); + _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + } + + _numRecDevices++; + } +} + +void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler( + const pa_server_info* i) { + // Use PA native sampling rate + sample_rate_hz_ = i->sample_spec.rate; + + // Copy the PA server version + strncpy(_paServerVersion, i->server_version, 31); + _paServerVersion[31] = '\0'; + + if (_recDisplayDeviceName) { + // Copy the source name + strncpy(_recDisplayDeviceName, i->default_source_name, + kAdmMaxDeviceNameSize); + _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + + if (_playDisplayDeviceName) { + // Copy the sink name + strncpy(_playDisplayDeviceName, i->default_sink_name, + kAdmMaxDeviceNameSize); + _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); +} + +void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) { + RTC_LOG(LS_VERBOSE) << "stream state cb"; + + pa_stream_state_t state = LATE(pa_stream_get_state)(p); + switch (state) { + case PA_STREAM_UNCONNECTED: + RTC_LOG(LS_VERBOSE) << "unconnected"; + break; + case PA_STREAM_CREATING: + RTC_LOG(LS_VERBOSE) << "creating"; + break; + case PA_STREAM_FAILED: + case PA_STREAM_TERMINATED: + RTC_LOG(LS_VERBOSE) << "failed"; + break; + case PA_STREAM_READY: + RTC_LOG(LS_VERBOSE) << "ready"; + break; + } + + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); +} + +int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() { + PaLock(); + + pa_operation* paOperation = NULL; + + // get the server info and update deviceName + paOperation = + LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + RTC_LOG(LS_VERBOSE) << "checking PulseAudio version: " << _paServerVersion; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() { + PaLock(); + + pa_operation* paOperation = NULL; + + // Get the server info and update sample_rate_hz_ + paOperation = + LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + return 0; +} + +int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice, + char* name, + uint16_t& index) { + char tmpName[kAdmMaxDeviceNameSize] = {0}; + // subtract length of "default: " + uint16_t nameLen = kAdmMaxDeviceNameSize - 9; + char* pName = NULL; + + if (name) { + // Add "default: " + strcpy(name, "default: "); + pName = &name[9]; + } + + // Tell the callback that we want + // the name for this device + if (recDevice) { + _recDisplayDeviceName = tmpName; + } else { + _playDisplayDeviceName = tmpName; + } + + // Set members + _paDeviceIndex = -1; + _deviceIndex = 0; + _numPlayDevices = 0; + _numRecDevices = 0; + + PaLock(); + + pa_operation* paOperation = NULL; + + // Get the server info and update deviceName + paOperation = + LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + // Get the device index + if (recDevice) { + paOperation = LATE(pa_context_get_source_info_by_name)( + _paContext, (char*)tmpName, PaSourceInfoCallback, this); + } else { + paOperation = LATE(pa_context_get_sink_info_by_name)( + _paContext, (char*)tmpName, PaSinkInfoCallback, this); + } + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + // Set the index + index = _paDeviceIndex; + + if (name) { + // Copy to name string + strncpy(pName, tmpName, nameLen); + } + + // Clear members + _playDisplayDeviceName = NULL; + _recDisplayDeviceName = NULL; + _paDeviceIndex = -1; + _deviceIndex = -1; + _numPlayDevices = 0; + _numRecDevices = 0; + + return 0; +} + +int32_t AudioDeviceLinuxPulse::InitPulseAudio() { + int retVal = 0; + + // Load libpulse + if (!GetPulseSymbolTable()->Load()) { + // Most likely the Pulse library and sound server are not installed on + // this system + RTC_LOG(LS_ERROR) << "failed to load symbol table"; + return -1; + } + + // Create a mainloop API and connection to the default server + // the mainloop is the internal asynchronous API event loop + if (_paMainloop) { + RTC_LOG(LS_ERROR) << "PA mainloop has already existed"; + return -1; + } + _paMainloop = LATE(pa_threaded_mainloop_new)(); + if (!_paMainloop) { + RTC_LOG(LS_ERROR) << "could not create mainloop"; + return -1; + } + + // Start the threaded main loop + retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); + if (retVal != PA_OK) { + RTC_LOG(LS_ERROR) << "failed to start main loop, error=" << retVal; + return -1; + } + + RTC_LOG(LS_VERBOSE) << "mainloop running!"; + + PaLock(); + + _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); + if (!_paMainloopApi) { + RTC_LOG(LS_ERROR) << "could not create mainloop API"; + PaUnLock(); + return -1; + } + + // Create a new PulseAudio context + if (_paContext) { + RTC_LOG(LS_ERROR) << "PA context has already existed"; + PaUnLock(); + return -1; + } + _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); + + if (!_paContext) { + RTC_LOG(LS_ERROR) << "could not create context"; + PaUnLock(); + return -1; + } + + // Set state callback function + LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this); + + // Connect the context to a server (default) + _paStateChanged = false; + retVal = + LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); + + if (retVal != PA_OK) { + RTC_LOG(LS_ERROR) << "failed to connect context, error=" << retVal; + PaUnLock(); + return -1; + } + + // Wait for state change + while (!_paStateChanged) { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + // Now check to see what final state we reached. + pa_context_state_t state = LATE(pa_context_get_state)(_paContext); + + if (state != PA_CONTEXT_READY) { + if (state == PA_CONTEXT_FAILED) { + RTC_LOG(LS_ERROR) << "failed to connect to PulseAudio sound server"; + } else if (state == PA_CONTEXT_TERMINATED) { + RTC_LOG(LS_ERROR) << "PulseAudio connection terminated early"; + } else { + // Shouldn't happen, because we only signal on one of those three + // states + RTC_LOG(LS_ERROR) << "unknown problem connecting to PulseAudio"; + } + PaUnLock(); + return -1; + } + + PaUnLock(); + + // Give the objects to the mixer manager + _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); + + // Check the version + if (CheckPulseAudioVersion() < 0) { + RTC_LOG(LS_ERROR) << "PulseAudio version " << _paServerVersion + << " not supported"; + return -1; + } + + // Initialize sampling frequency + if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) { + RTC_LOG(LS_ERROR) << "failed to initialize sampling frequency, set to " + << sample_rate_hz_ << " Hz"; + return -1; + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() { + // Do nothing if the instance doesn't exist + // likely GetPulseSymbolTable.Load() fails + if (!_paMainloop) { + return 0; + } + + PaLock(); + + // Disconnect the context + if (_paContext) { + LATE(pa_context_disconnect)(_paContext); + } + + // Unreference the context + if (_paContext) { + LATE(pa_context_unref)(_paContext); + } + + PaUnLock(); + _paContext = NULL; + + // Stop the threaded main loop + if (_paMainloop) { + LATE(pa_threaded_mainloop_stop)(_paMainloop); + } + + // Free the mainloop + if (_paMainloop) { + LATE(pa_threaded_mainloop_free)(_paMainloop); + } + + _paMainloop = NULL; + + RTC_LOG(LS_VERBOSE) << "PulseAudio terminated"; + + return 0; +} + +void AudioDeviceLinuxPulse::PaLock() { + LATE(pa_threaded_mainloop_lock)(_paMainloop); +} + +void AudioDeviceLinuxPulse::PaUnLock() { + LATE(pa_threaded_mainloop_unlock)(_paMainloop); +} + +void AudioDeviceLinuxPulse::WaitForOperationCompletion( + pa_operation* paOperation) const { + if (!paOperation) { + RTC_LOG(LS_ERROR) << "paOperation NULL in WaitForOperationCompletion"; + return; + } + + while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + LATE(pa_operation_unref)(paOperation); +} + +// ============================================================================ +// Thread Methods +// ============================================================================ + +void AudioDeviceLinuxPulse::EnableWriteCallback() { + if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) { + // May already have available space. Must check. + _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream); + if (_tempBufferSpace > 0) { + // Yup, there is already space available, so if we register a + // write callback then it will not receive any event. So dispatch + // one ourself instead. + _timeEventPlay.Set(); + return; + } + } + + LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, this); +} + +void AudioDeviceLinuxPulse::DisableWriteCallback() { + LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL); +} + +void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream* /*unused*/, + size_t buffer_space, + void* pThis) { + static_cast(pThis)->PaStreamWriteCallbackHandler( + buffer_space); +} + +void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) { + _tempBufferSpace = bufferSpace; + + // Since we write the data asynchronously on a different thread, we have + // to temporarily disable the write callback or else Pulse will call it + // continuously until we write the data. We re-enable it below. + DisableWriteCallback(); + _timeEventPlay.Set(); +} + +void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/, + void* pThis) { + static_cast(pThis) + ->PaStreamUnderflowCallbackHandler(); +} + +void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() { + RTC_LOG(LS_WARNING) << "Playout underflow"; + + if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { + // We didn't configure a pa_buffer_attr before, so switching to + // one now would be questionable. + return; + } + + // Otherwise reconfigure the stream with a higher target latency. + + const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); + if (!spec) { + RTC_LOG(LS_ERROR) << "pa_stream_get_sample_spec()"; + return; + } + + size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); + uint32_t newLatency = + _configuredLatencyPlay + bytesPerSec * + WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / + WEBRTC_PA_MSECS_PER_SEC; + + // Set the play buffer attributes + _playBufferAttr.maxlength = newLatency; + _playBufferAttr.tlength = newLatency; + _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; + _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; + + pa_operation* op = LATE(pa_stream_set_buffer_attr)( + _playStream, &_playBufferAttr, NULL, NULL); + if (!op) { + RTC_LOG(LS_ERROR) << "pa_stream_set_buffer_attr()"; + return; + } + + // Don't need to wait for this to complete. + LATE(pa_operation_unref)(op); + + // Save the new latency in case we underflow again. + _configuredLatencyPlay = newLatency; +} + +void AudioDeviceLinuxPulse::EnableReadCallback() { + LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this); +} + +void AudioDeviceLinuxPulse::DisableReadCallback() { + LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); +} + +void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/, + size_t /*unused2*/, + void* pThis) { + static_cast(pThis)->PaStreamReadCallbackHandler(); +} + +void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() { + // We get the data pointer and size now in order to save one Lock/Unlock + // in the worker thread. + if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, + &_tempSampleDataSize) != 0) { + RTC_LOG(LS_ERROR) << "Can't read data!"; + return; + } + + // Since we consume the data asynchronously on a different thread, we have + // to temporarily disable the read callback or else Pulse will call it + // continuously until we consume the data. We re-enable it below. + DisableReadCallback(); + _timeEventRec.Set(); +} + +void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/, + void* pThis) { + static_cast(pThis)->PaStreamOverflowCallbackHandler(); +} + +void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() { + RTC_LOG(LS_WARNING) << "Recording overflow"; +} + +int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) { + if (!WEBRTC_PA_REPORT_LATENCY) { + return 0; + } + + if (!stream) { + return 0; + } + + pa_usec_t latency; + int negative; + if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) { + RTC_LOG(LS_ERROR) << "Can't query latency"; + // We'd rather continue playout/capture with an incorrect delay than + // stop it altogether, so return a valid value. + return 0; + } + + if (negative) { + RTC_LOG(LS_VERBOSE) + << "warning: pa_stream_get_latency reported negative delay"; + + // The delay can be negative for monitoring streams if the captured + // samples haven't been played yet. In such a case, "latency" + // contains the magnitude, so we must negate it to get the real value. + int32_t tmpLatency = (int32_t)-latency; + if (tmpLatency < 0) { + // Make sure that we don't use a negative delay. + tmpLatency = 0; + } + + return tmpLatency; + } else { + return (int32_t)latency; + } +} + +int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData, + size_t bufferSize) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + size_t size = bufferSize; + uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels); + + // Account for the peeked data and the used data. + uint32_t recDelay = + (uint32_t)((LatencyUsecs(_recStream) / 1000) + + 10 * ((size + _recordBufferUsed) / _recordBufferSize)); + + if (_playStream) { + // Get the playout delay. + _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000); + } + + if (_recordBufferUsed > 0) { + // Have to copy to the buffer until it is full. + size_t copy = _recordBufferSize - _recordBufferUsed; + if (size < copy) { + copy = size; + } + + memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy); + _recordBufferUsed += copy; + bufferData = static_cast(bufferData) + copy; + size -= copy; + + if (_recordBufferUsed != _recordBufferSize) { + // Not enough data yet to pass to VoE. + return 0; + } + + // Provide data to VoiceEngine. + if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) { + // We have stopped recording. + return -1; + } + + _recordBufferUsed = 0; + } + + // Now process full 10ms sample sets directly from the input. + while (size >= _recordBufferSize) { + // Provide data to VoiceEngine. + if (ProcessRecordedData(static_cast(const_cast(bufferData)), + numRecSamples, recDelay) == -1) { + // We have stopped recording. + return -1; + } + + bufferData = static_cast(bufferData) + _recordBufferSize; + size -= _recordBufferSize; + + // We have consumed 10ms of data. + recDelay -= 10; + } + + // Now save any leftovers for later. + if (size > 0) { + memcpy(_recBuffer, bufferData, size); + _recordBufferUsed = size; + } + + return 0; +} + +int32_t AudioDeviceLinuxPulse::ProcessRecordedData(int8_t* bufferData, + uint32_t bufferSizeInSamples, + uint32_t recDelay) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples); + + // TODO(andrew): this is a temporary hack, to avoid non-causal far- and + // near-end signals at the AEC for PulseAudio. I think the system delay is + // being correctly calculated here, but for legacy reasons we add +10 ms + // to the value in the AEC. The real fix will be part of a larger + // investigation into managing system delay in the AEC. + if (recDelay > 10) + recDelay -= 10; + else + recDelay = 0; + _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay); + _ptrAudioBuffer->SetTypingStatus(KeyPressed()); + // Deliver recorded samples at specified sample rate, + // mic level etc. to the observer using callback. + UnLock(); + _ptrAudioBuffer->DeliverRecordedData(); + Lock(); + + // We have been unlocked - check the flag again. + if (!_recording) { + return -1; + } + + return 0; +} + +bool AudioDeviceLinuxPulse::PlayThreadProcess() { + if (!_timeEventPlay.Wait(TimeDelta::Seconds(1))) { + return true; + } + + MutexLock lock(&mutex_); + + if (quit_) { + return false; + } + + if (_startPlay) { + RTC_LOG(LS_VERBOSE) << "_startPlay true, performing initial actions"; + + _startPlay = false; + _playDeviceName = NULL; + + // Set if not default device + if (_outputDeviceIndex > 0) { + // Get the playout device name + _playDeviceName = new char[kAdmMaxDeviceNameSize]; + _deviceIndex = _outputDeviceIndex; + PlayoutDevices(); + } + + // Start muted only supported on 0.9.11 and up + if (LATE(pa_context_get_protocol_version)(_paContext) >= + WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { + // Get the currently saved speaker mute status + // and set the initial mute status accordingly + bool enabled(false); + _mixerManager.SpeakerMute(enabled); + if (enabled) { + _playStreamFlags |= PA_STREAM_START_MUTED; + } + } + + // Get the currently saved speaker volume + uint32_t volume = 0; + if (update_speaker_volume_at_startup_) + _mixerManager.SpeakerVolume(volume); + + PaLock(); + + // NULL gives PA the choice of startup volume. + pa_cvolume* ptr_cvolume = NULL; + if (update_speaker_volume_at_startup_) { + pa_cvolume cVolumes; + ptr_cvolume = &cVolumes; + + // Set the same volume for all channels + const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); + LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); + update_speaker_volume_at_startup_ = false; + } + + // Connect the stream to a sink + if (LATE(pa_stream_connect_playback)( + _playStream, _playDeviceName, &_playBufferAttr, + (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) { + RTC_LOG(LS_ERROR) << "failed to connect play stream, err=" + << LATE(pa_context_errno)(_paContext); + } + + RTC_LOG(LS_VERBOSE) << "play stream connected"; + + // Wait for state change + while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + RTC_LOG(LS_VERBOSE) << "play stream ready"; + + // We can now handle write callbacks + EnableWriteCallback(); + + PaUnLock(); + + // Clear device name + if (_playDeviceName) { + delete[] _playDeviceName; + _playDeviceName = NULL; + } + + _playing = true; + _playStartEvent.Set(); + + return true; + } + + if (_playing) { + if (!_recording) { + // Update the playout delay + _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000); + } + + if (_playbackBufferUnused < _playbackBufferSize) { + size_t write = _playbackBufferSize - _playbackBufferUnused; + if (_tempBufferSpace < write) { + write = _tempBufferSpace; + } + + PaLock(); + if (LATE(pa_stream_write)( + _playStream, (void*)&_playBuffer[_playbackBufferUnused], write, + NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { + _writeErrors++; + if (_writeErrors > 10) { + RTC_LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors + << ", error=" << LATE(pa_context_errno)(_paContext); + _writeErrors = 0; + } + } + PaUnLock(); + + _playbackBufferUnused += write; + _tempBufferSpace -= write; + } + + uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); + // Might have been reduced to zero by the above. + if (_tempBufferSpace > 0) { + // Ask for new PCM data to be played out using the + // AudioDeviceBuffer ensure that this callback is executed + // without taking the audio-thread lock. + UnLock(); + RTC_LOG(LS_VERBOSE) << "requesting data"; + uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); + Lock(); + + // We have been unlocked - check the flag again. + if (!_playing) { + return true; + } + + nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); + if (nSamples != numPlaySamples) { + RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples + << ")"; + } + + size_t write = _playbackBufferSize; + if (_tempBufferSpace < write) { + write = _tempBufferSpace; + } + + RTC_LOG(LS_VERBOSE) << "will write"; + PaLock(); + if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write, + NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { + _writeErrors++; + if (_writeErrors > 10) { + RTC_LOG(LS_ERROR) << "Playout error: _writeErrors=" << _writeErrors + << ", error=" << LATE(pa_context_errno)(_paContext); + _writeErrors = 0; + } + } + PaUnLock(); + + _playbackBufferUnused = write; + } + + _tempBufferSpace = 0; + PaLock(); + EnableWriteCallback(); + PaUnLock(); + + } // _playing + + return true; +} + +bool AudioDeviceLinuxPulse::RecThreadProcess() { + if (!_timeEventRec.Wait(TimeDelta::Seconds(1))) { + return true; + } + + MutexLock lock(&mutex_); + if (quit_) { + return false; + } + if (_startRec) { + RTC_LOG(LS_VERBOSE) << "_startRec true, performing initial actions"; + + _recDeviceName = NULL; + + // Set if not default device + if (_inputDeviceIndex > 0) { + // Get the recording device name + _recDeviceName = new char[kAdmMaxDeviceNameSize]; + _deviceIndex = _inputDeviceIndex; + RecordingDevices(); + } + + PaLock(); + + RTC_LOG(LS_VERBOSE) << "connecting stream"; + + // Connect the stream to a source + if (LATE(pa_stream_connect_record)( + _recStream, _recDeviceName, &_recBufferAttr, + (pa_stream_flags_t)_recStreamFlags) != PA_OK) { + RTC_LOG(LS_ERROR) << "failed to connect rec stream, err=" + << LATE(pa_context_errno)(_paContext); + } + + RTC_LOG(LS_VERBOSE) << "connected"; + + // Wait for state change + while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + RTC_LOG(LS_VERBOSE) << "done"; + + // We can now handle read callbacks + EnableReadCallback(); + + PaUnLock(); + + // Clear device name + if (_recDeviceName) { + delete[] _recDeviceName; + _recDeviceName = NULL; + } + + _startRec = false; + _recording = true; + _recStartEvent.Set(); + + return true; + } + + if (_recording) { + // Read data and provide it to VoiceEngine + if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) { + return true; + } + + _tempSampleData = NULL; + _tempSampleDataSize = 0; + + PaLock(); + while (true) { + // Ack the last thing we read + if (LATE(pa_stream_drop)(_recStream) != 0) { + RTC_LOG(LS_WARNING) + << "failed to drop, err=" << LATE(pa_context_errno)(_paContext); + } + + if (LATE(pa_stream_readable_size)(_recStream) <= 0) { + // Then that was all the data + break; + } + + // Else more data. + const void* sampleData; + size_t sampleDataSize; + + if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) { + RTC_LOG(LS_ERROR) << "RECORD_ERROR, error = " + << LATE(pa_context_errno)(_paContext); + break; + } + + // Drop lock for sigslot dispatch, which could take a while. + PaUnLock(); + // Read data and provide it to VoiceEngine + if (ReadRecordedData(sampleData, sampleDataSize) == -1) { + return true; + } + PaLock(); + + // Return to top of loop for the ack and the check for more data. + } + + EnableReadCallback(); + PaUnLock(); + + } // _recording + + return true; +} + +bool AudioDeviceLinuxPulse::KeyPressed() const { +#if defined(WEBRTC_USE_X11) + char szKey[32]; + unsigned int i = 0; + char state = 0; + + if (!_XDisplay) + return false; + + // Check key map status + XQueryKeymap(_XDisplay, szKey); + + // A bit change in keymap means a key is pressed + for (i = 0; i < sizeof(szKey); i++) + state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i]; + + // Save old state + memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState)); + return (state != 0); +#else + return false; +#endif +} +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h new file mode 100644 index 0000000000..0cf89ef011 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_device_pulse_linux.h @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_ +#define AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H_ + +#include + +#include "api/sequence_checker.h" +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/audio_device_generic.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h" +#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h" +#include "rtc_base/event.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" + +#if defined(WEBRTC_USE_X11) +#include +#endif + +#include +#include +#include + +// We define this flag if it's missing from our headers, because we want to be +// able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY +// if run against a recent version of the library. +#ifndef PA_STREAM_ADJUST_LATENCY +#define PA_STREAM_ADJUST_LATENCY 0x2000U +#endif +#ifndef PA_STREAM_START_MUTED +#define PA_STREAM_START_MUTED 0x1000U +#endif + +// Set this constant to 0 to disable latency reading +const uint32_t WEBRTC_PA_REPORT_LATENCY = 1; + +// Constants from implementation by Tristan Schmelcher [tschmelcher@google.com] + +// First PulseAudio protocol version that supports PA_STREAM_ADJUST_LATENCY. +const uint32_t WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION = 13; + +// Some timing constants for optimal operation. See +// https://tango.0pointer.de/pipermail/pulseaudio-discuss/2008-January/001170.html +// for a good explanation of some of the factors that go into this. + +// Playback. + +// For playback, there is a round-trip delay to fill the server-side playback +// buffer, so setting too low of a latency is a buffer underflow risk. We will +// automatically increase the latency if a buffer underflow does occur, but we +// also enforce a sane minimum at start-up time. Anything lower would be +// virtually guaranteed to underflow at least once, so there's no point in +// allowing lower latencies. +const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS = 20; + +// Every time a playback stream underflows, we will reconfigure it with target +// latency that is greater by this amount. +const uint32_t WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS = 20; + +// We also need to configure a suitable request size. Too small and we'd burn +// CPU from the overhead of transfering small amounts of data at once. Too large +// and the amount of data remaining in the buffer right before refilling it +// would be a buffer underflow risk. We set it to half of the buffer size. +const uint32_t WEBRTC_PA_PLAYBACK_REQUEST_FACTOR = 2; + +// Capture. + +// For capture, low latency is not a buffer overflow risk, but it makes us burn +// CPU from the overhead of transfering small amounts of data at once, so we set +// a recommended value that we use for the kLowLatency constant (but if the user +// explicitly requests something lower then we will honour it). +// 1ms takes about 6-7% CPU. 5ms takes about 5%. 10ms takes about 4.x%. +const uint32_t WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS = 10; + +// There is a round-trip delay to ack the data to the server, so the +// server-side buffer needs extra space to prevent buffer overflow. 20ms is +// sufficient, but there is no penalty to making it bigger, so we make it huge. +// (750ms is libpulse's default value for the _total_ buffer size in the +// kNoLatencyRequirements case.) +const uint32_t WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS = 750; + +const uint32_t WEBRTC_PA_MSECS_PER_SEC = 1000; + +// Init _configuredLatencyRec/Play to this value to disable latency requirements +const int32_t WEBRTC_PA_NO_LATENCY_REQUIREMENTS = -1; + +// Set this const to 1 to account for peeked and used data in latency +// calculation +const uint32_t WEBRTC_PA_CAPTURE_BUFFER_LATENCY_ADJUSTMENT = 0; + +typedef webrtc::adm_linux_pulse::PulseAudioSymbolTable WebRTCPulseSymbolTable; +WebRTCPulseSymbolTable* GetPulseSymbolTable(); + +namespace webrtc { + +class AudioDeviceLinuxPulse : public AudioDeviceGeneric { + public: + AudioDeviceLinuxPulse(); + virtual ~AudioDeviceLinuxPulse(); + + // Retrieve the currently utilized audio layer + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const override; + + // Main initializaton and termination + InitStatus Init() override; + int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Initialized() const override; + + // Device enumeration + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + + // Device selection + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override; + + // Audio transport initialization + int32_t PlayoutIsAvailable(bool& available) override; + int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_) override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool& available) override; + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + // Audio transport control + int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Playing() const override; + int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Recording() const override; + + // Audio mixer initialization + int32_t InitSpeaker() override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() override; + bool MicrophoneIsInitialized() const override; + + // Speaker volume controls + int32_t SpeakerVolumeIsAvailable(bool& available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t& volume) const override; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t& minVolume) const override; + + // Microphone volume controls + int32_t MicrophoneVolumeIsAvailable(bool& available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t& volume) const override; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const override; + + // Speaker mute control + int32_t SpeakerMuteIsAvailable(bool& available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool& enabled) const override; + + // Microphone mute control + int32_t MicrophoneMuteIsAvailable(bool& available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool& enabled) const override; + + // Stereo support + int32_t StereoPlayoutIsAvailable(bool& available) override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool& enabled) const override; + int32_t StereoRecordingIsAvailable(bool& available) override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool& enabled) const override; + + // Delay information and control + int32_t PlayoutDelay(uint16_t& delayMS) const + RTC_LOCKS_EXCLUDED(mutex_) override; + + void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; + + private: + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_) { mutex_.Lock(); } + void UnLock() RTC_UNLOCK_FUNCTION(mutex_) { mutex_.Unlock(); } + void WaitForOperationCompletion(pa_operation* paOperation) const; + void WaitForSuccess(pa_operation* paOperation) const; + + bool KeyPressed() const; + + static void PaContextStateCallback(pa_context* c, void* pThis); + static void PaSinkInfoCallback(pa_context* c, + const pa_sink_info* i, + int eol, + void* pThis); + static void PaSourceInfoCallback(pa_context* c, + const pa_source_info* i, + int eol, + void* pThis); + static void PaServerInfoCallback(pa_context* c, + const pa_server_info* i, + void* pThis); + static void PaStreamStateCallback(pa_stream* p, void* pThis); + void PaContextStateCallbackHandler(pa_context* c); + void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol); + void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol); + void PaServerInfoCallbackHandler(const pa_server_info* i); + void PaStreamStateCallbackHandler(pa_stream* p); + + void EnableWriteCallback(); + void DisableWriteCallback(); + static void PaStreamWriteCallback(pa_stream* unused, + size_t buffer_space, + void* pThis); + void PaStreamWriteCallbackHandler(size_t buffer_space); + static void PaStreamUnderflowCallback(pa_stream* unused, void* pThis); + void PaStreamUnderflowCallbackHandler(); + void EnableReadCallback(); + void DisableReadCallback(); + static void PaStreamReadCallback(pa_stream* unused1, + size_t unused2, + void* pThis); + void PaStreamReadCallbackHandler(); + static void PaStreamOverflowCallback(pa_stream* unused, void* pThis); + void PaStreamOverflowCallbackHandler(); + int32_t LatencyUsecs(pa_stream* stream); + int32_t ReadRecordedData(const void* bufferData, size_t bufferSize); + int32_t ProcessRecordedData(int8_t* bufferData, + uint32_t bufferSizeInSamples, + uint32_t recDelay); + + int32_t CheckPulseAudioVersion(); + int32_t InitSamplingFrequency(); + int32_t GetDefaultDeviceInfo(bool recDevice, char* name, uint16_t& index); + int32_t InitPulseAudio(); + int32_t TerminatePulseAudio(); + + void PaLock(); + void PaUnLock(); + + static void RecThreadFunc(void*); + static void PlayThreadFunc(void*); + bool RecThreadProcess() RTC_LOCKS_EXCLUDED(mutex_); + bool PlayThreadProcess() RTC_LOCKS_EXCLUDED(mutex_); + + AudioDeviceBuffer* _ptrAudioBuffer; + + mutable Mutex mutex_; + rtc::Event _timeEventRec; + rtc::Event _timeEventPlay; + rtc::Event _recStartEvent; + rtc::Event _playStartEvent; + + rtc::PlatformThread _ptrThreadPlay; + rtc::PlatformThread _ptrThreadRec; + + AudioMixerManagerLinuxPulse _mixerManager; + + uint16_t _inputDeviceIndex; + uint16_t _outputDeviceIndex; + bool _inputDeviceIsSpecified; + bool _outputDeviceIsSpecified; + + int sample_rate_hz_; + uint8_t _recChannels; + uint8_t _playChannels; + + // Stores thread ID in constructor. + // We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that + // other methods are called from the same thread. + // Currently only does RTC_DCHECK(thread_checker_.IsCurrent()). + SequenceChecker thread_checker_; + + bool _initialized; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + bool _startRec; + bool _startPlay; + bool update_speaker_volume_at_startup_; + bool quit_ RTC_GUARDED_BY(&mutex_); + + uint32_t _sndCardPlayDelay RTC_GUARDED_BY(&mutex_); + + int32_t _writeErrors; + + uint16_t _deviceIndex; + int16_t _numPlayDevices; + int16_t _numRecDevices; + char* _playDeviceName; + char* _recDeviceName; + char* _playDisplayDeviceName; + char* _recDisplayDeviceName; + char _paServerVersion[32]; + + int8_t* _playBuffer; + size_t _playbackBufferSize; + size_t _playbackBufferUnused; + size_t _tempBufferSpace; + int8_t* _recBuffer; + size_t _recordBufferSize; + size_t _recordBufferUsed; + const void* _tempSampleData; + size_t _tempSampleDataSize; + int32_t _configuredLatencyPlay; + int32_t _configuredLatencyRec; + + // PulseAudio + uint16_t _paDeviceIndex; + bool _paStateChanged; + + pa_threaded_mainloop* _paMainloop; + pa_mainloop_api* _paMainloopApi; + pa_context* _paContext; + + pa_stream* _recStream; + pa_stream* _playStream; + uint32_t _recStreamFlags; + uint32_t _playStreamFlags; + pa_buffer_attr _playBufferAttr; + pa_buffer_attr _recBufferAttr; + + char _oldKeyState[32]; +#if defined(WEBRTC_USE_X11) + Display* _XDisplay; +#endif +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_PULSE_LINUX_H_ diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc new file mode 100644 index 0000000000..e7e7033173 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.cc @@ -0,0 +1,979 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/linux/audio_mixer_manager_alsa_linux.h" + +#include "modules/audio_device/linux/audio_device_alsa_linux.h" +#include "rtc_base/logging.h" + +// Accesses ALSA functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libasound, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc::adm_linux_alsa::AlsaSymbolTable, GetAlsaSymbolTable(), \ + sym) + +namespace webrtc { + +AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA() + : _outputMixerHandle(NULL), + _inputMixerHandle(NULL), + _outputMixerElement(NULL), + _inputMixerElement(NULL) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; + + memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize); + memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize); +} + +AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; + Close(); +} + +// ============================================================================ +// PUBLIC METHODS +// ============================================================================ + +int32_t AudioMixerManagerLinuxALSA::Close() { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + MutexLock lock(&mutex_); + + CloseSpeakerLocked(); + CloseMicrophoneLocked(); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::CloseSpeaker() { + MutexLock lock(&mutex_); + return CloseSpeakerLocked(); +} + +int32_t AudioMixerManagerLinuxALSA::CloseSpeakerLocked() { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + int errVal = 0; + + if (_outputMixerHandle != NULL) { + RTC_LOG(LS_VERBOSE) << "Closing playout mixer"; + LATE(snd_mixer_free)(_outputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error freeing playout mixer: " + << LATE(snd_strerror)(errVal); + } + errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error detaching playout mixer: " + << LATE(snd_strerror)(errVal); + } + errVal = LATE(snd_mixer_close)(_outputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" + << errVal; + } + _outputMixerHandle = NULL; + _outputMixerElement = NULL; + } + memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::CloseMicrophone() { + MutexLock lock(&mutex_); + return CloseMicrophoneLocked(); +} + +int32_t AudioMixerManagerLinuxALSA::CloseMicrophoneLocked() { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + int errVal = 0; + + if (_inputMixerHandle != NULL) { + RTC_LOG(LS_VERBOSE) << "Closing record mixer"; + + LATE(snd_mixer_free)(_inputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error freeing record mixer: " + << LATE(snd_strerror)(errVal); + } + RTC_LOG(LS_VERBOSE) << "Closing record mixer 2"; + + errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error detaching record mixer: " + << LATE(snd_strerror)(errVal); + } + RTC_LOG(LS_VERBOSE) << "Closing record mixer 3"; + + errVal = LATE(snd_mixer_close)(_inputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" + << errVal; + } + + RTC_LOG(LS_VERBOSE) << "Closing record mixer 4"; + _inputMixerHandle = NULL; + _inputMixerElement = NULL; + } + memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenSpeaker(name=" + << deviceName << ")"; + + MutexLock lock(&mutex_); + + int errVal = 0; + + // Close any existing output mixer handle + // + if (_outputMixerHandle != NULL) { + RTC_LOG(LS_VERBOSE) << "Closing playout mixer"; + + LATE(snd_mixer_free)(_outputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error freeing playout mixer: " + << LATE(snd_strerror)(errVal); + } + errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error detaching playout mixer: " + << LATE(snd_strerror)(errVal); + } + errVal = LATE(snd_mixer_close)(_outputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" + << errVal; + } + } + _outputMixerHandle = NULL; + _outputMixerElement = NULL; + + errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_mixer_open(&_outputMixerHandle, 0) - error"; + return -1; + } + + char controlName[kAdmMaxDeviceNameSize] = {0}; + GetControlName(controlName, deviceName); + + RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_outputMixerHandle, " << controlName + << ")"; + + errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_mixer_attach(_outputMixerHandle, " << controlName + << ") error: " << LATE(snd_strerror)(errVal); + _outputMixerHandle = NULL; + return -1; + } + strcpy(_outputMixerStr, controlName); + + errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL); + if (errVal < 0) { + RTC_LOG(LS_ERROR) + << "snd_mixer_selem_register(_outputMixerHandle, NULL, NULL), " + "error: " + << LATE(snd_strerror)(errVal); + _outputMixerHandle = NULL; + return -1; + } + + // Load and find the proper mixer element + if (LoadSpeakerMixerElement() < 0) { + return -1; + } + + if (_outputMixerHandle != NULL) { + RTC_LOG(LS_VERBOSE) << "the output mixer device is now open (" + << _outputMixerHandle << ")"; + } + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::OpenMicrophone(char* deviceName) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::OpenMicrophone(name=" + << deviceName << ")"; + + MutexLock lock(&mutex_); + + int errVal = 0; + + // Close any existing input mixer handle + // + if (_inputMixerHandle != NULL) { + RTC_LOG(LS_VERBOSE) << "Closing record mixer"; + + LATE(snd_mixer_free)(_inputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error freeing record mixer: " + << LATE(snd_strerror)(errVal); + } + RTC_LOG(LS_VERBOSE) << "Closing record mixer"; + + errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error detaching record mixer: " + << LATE(snd_strerror)(errVal); + } + RTC_LOG(LS_VERBOSE) << "Closing record mixer"; + + errVal = LATE(snd_mixer_close)(_inputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error snd_mixer_close(handleMixer) errVal=" + << errVal; + } + RTC_LOG(LS_VERBOSE) << "Closing record mixer"; + } + _inputMixerHandle = NULL; + _inputMixerElement = NULL; + + errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_mixer_open(&_inputMixerHandle, 0) - error"; + return -1; + } + + char controlName[kAdmMaxDeviceNameSize] = {0}; + GetControlName(controlName, deviceName); + + RTC_LOG(LS_VERBOSE) << "snd_mixer_attach(_inputMixerHandle, " << controlName + << ")"; + + errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_mixer_attach(_inputMixerHandle, " << controlName + << ") error: " << LATE(snd_strerror)(errVal); + + _inputMixerHandle = NULL; + return -1; + } + strcpy(_inputMixerStr, controlName); + + errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL); + if (errVal < 0) { + RTC_LOG(LS_ERROR) + << "snd_mixer_selem_register(_inputMixerHandle, NULL, NULL), " + "error: " + << LATE(snd_strerror)(errVal); + + _inputMixerHandle = NULL; + return -1; + } + // Load and find the proper mixer element + if (LoadMicMixerElement() < 0) { + return -1; + } + + if (_inputMixerHandle != NULL) { + RTC_LOG(LS_VERBOSE) << "the input mixer device is now open (" + << _inputMixerHandle << ")"; + } + + return 0; +} + +bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + + return (_outputMixerHandle != NULL); +} + +bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + + return (_inputMixerHandle != NULL); +} + +int32_t AudioMixerManagerLinuxALSA::SetSpeakerVolume(uint32_t volume) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume=" + << volume << ")"; + + MutexLock lock(&mutex_); + + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + int errVal = LATE(snd_mixer_selem_set_playback_volume_all)( + _outputMixerElement, volume); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error changing master volume: " + << LATE(snd_strerror)(errVal); + return -1; + } + + return (0); +} + +int32_t AudioMixerManagerLinuxALSA::SpeakerVolume(uint32_t& volume) const { + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + long int vol(0); + + int errVal = LATE(snd_mixer_selem_get_playback_volume)( + _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error getting outputvolume: " + << LATE(snd_strerror)(errVal); + return -1; + } + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SpeakerVolume() => vol=" + << vol; + + volume = static_cast(vol); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::MaxSpeakerVolume( + uint32_t& maxVolume) const { + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avilable output mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = LATE(snd_mixer_selem_get_playback_volume_range)( + _outputMixerElement, &minVol, &maxVol); + + RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol + << ", max: " << maxVol; + + if (maxVol <= minVol) { + RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: " + << LATE(snd_strerror)(errVal); + } + + maxVolume = static_cast(maxVol); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::MinSpeakerVolume( + uint32_t& minVolume) const { + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = LATE(snd_mixer_selem_get_playback_volume_range)( + _outputMixerElement, &minVol, &maxVol); + + RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol + << ", max: " << maxVol; + + if (maxVol <= minVol) { + RTC_LOG(LS_ERROR) << "Error getting get_playback_volume_range: " + << LATE(snd_strerror)(errVal); + } + + minVolume = static_cast(minVol); + + return 0; +} + +// TL: Have done testnig with these but they don't seem reliable and +// they were therefore not added +/* + // ---------------------------------------------------------------------------- + // SetMaxSpeakerVolume + // ---------------------------------------------------------------------------- + + int32_t AudioMixerManagerLinuxALSA::SetMaxSpeakerVolume( + uint32_t maxVolume) + { + + if (_outputMixerElement == NULL) + { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_playback_volume_range( + _outputMixerElement, &minVol, &maxVol); + if ((maxVol <= minVol) || (errVal != 0)) + { + RTC_LOG(LS_WARNING) << "Error getting playback volume range: " + << snd_strerror(errVal); + } + + maxVol = maxVolume; + errVal = snd_mixer_selem_set_playback_volume_range( + _outputMixerElement, minVol, maxVol); + RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol + << ", max: " << maxVol; + if (errVal != 0) + { + RTC_LOG(LS_ERROR) << "Error setting playback volume range: " + << snd_strerror(errVal); + return -1; + } + + return 0; + } + + // ---------------------------------------------------------------------------- + // SetMinSpeakerVolume + // ---------------------------------------------------------------------------- + + int32_t AudioMixerManagerLinuxALSA::SetMinSpeakerVolume( + uint32_t minVolume) + { + + if (_outputMixerElement == NULL) + { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_playback_volume_range( + _outputMixerElement, &minVol, &maxVol); + if ((maxVol <= minVol) || (errVal != 0)) + { + RTC_LOG(LS_WARNING) << "Error getting playback volume range: " + << snd_strerror(errVal); + } + + minVol = minVolume; + errVal = snd_mixer_selem_set_playback_volume_range( + _outputMixerElement, minVol, maxVol); + RTC_LOG(LS_VERBOSE) << "Playout hardware volume range, min: " << minVol + << ", max: " << maxVol; + if (errVal != 0) + { + RTC_LOG(LS_ERROR) << "Error setting playback volume range: " + << snd_strerror(errVal); + return -1; + } + + return 0; + } + */ + +int32_t AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable(bool& available) { + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable(bool& available) { + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable=" + << enable << ")"; + + MutexLock lock(&mutex_); + + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + // Ensure that the selected speaker destination has a valid mute control. + bool available(false); + SpeakerMuteIsAvailable(available); + if (!available) { + RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker"; + return -1; + } + + // Note value = 0 (off) means muted + int errVal = LATE(snd_mixer_selem_set_playback_switch_all)( + _outputMixerElement, !enable); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error setting playback switch: " + << LATE(snd_strerror)(errVal); + return -1; + } + + return (0); +} + +int32_t AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const { + if (_outputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable output mixer exists"; + return -1; + } + + // Ensure that the selected speaker destination has a valid mute control. + bool available = + LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement); + if (!available) { + RTC_LOG(LS_WARNING) << "it is not possible to mute the speaker"; + return -1; + } + + int value(false); + + // Retrieve one boolean control value for a specified mute-control + // + int errVal = LATE(snd_mixer_selem_get_playback_switch)( + _outputMixerElement, (snd_mixer_selem_channel_id_t)0, &value); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error getting playback switch: " + << LATE(snd_strerror)(errVal); + return -1; + } + + // Note value = 0 (off) means muted + enabled = (bool)!value; + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable(bool& available) { + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists"; + return -1; + } + + available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement); + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable=" + << enable << ")"; + + MutexLock lock(&mutex_); + + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists"; + return -1; + } + + // Ensure that the selected microphone destination has a valid mute control. + bool available(false); + MicrophoneMuteIsAvailable(available); + if (!available) { + RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone"; + return -1; + } + + // Note value = 0 (off) means muted + int errVal = + LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement, !enable); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error setting capture switch: " + << LATE(snd_strerror)(errVal); + return -1; + } + + return (0); +} + +int32_t AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const { + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer exists"; + return -1; + } + + // Ensure that the selected microphone destination has a valid mute control. + bool available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement); + if (!available) { + RTC_LOG(LS_WARNING) << "it is not possible to mute the microphone"; + return -1; + } + + int value(false); + + // Retrieve one boolean control value for a specified mute-control + // + int errVal = LATE(snd_mixer_selem_get_capture_switch)( + _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &value); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error getting capture switch: " + << LATE(snd_strerror)(errVal); + return -1; + } + + // Note value = 0 (off) means muted + enabled = (bool)!value; + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::MicrophoneVolumeIsAvailable( + bool& available) { + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists"; + return -1; + } + + available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::SetMicrophoneVolume(uint32_t volume) { + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume=" << volume + << ")"; + + MutexLock lock(&mutex_); + + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists"; + return -1; + } + + int errVal = + LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement, volume); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error changing microphone volume: " + << LATE(snd_strerror)(errVal); + return -1; + } + + return (0); +} + +// TL: Have done testnig with these but they don't seem reliable and +// they were therefore not added +/* + // ---------------------------------------------------------------------------- + // SetMaxMicrophoneVolume + // ---------------------------------------------------------------------------- + + int32_t AudioMixerManagerLinuxALSA::SetMaxMicrophoneVolume( + uint32_t maxVolume) + { + + if (_inputMixerElement == NULL) + { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_capture_volume_range(_inputMixerElement, + &minVol, &maxVol); + if ((maxVol <= minVol) || (errVal != 0)) + { + RTC_LOG(LS_WARNING) << "Error getting capture volume range: " + << snd_strerror(errVal); + } + + maxVol = (long int)maxVolume; + printf("min %d max %d", minVol, maxVol); + errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol, + maxVol); RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << + minVol + << ", max: " << maxVol; + if (errVal != 0) + { + RTC_LOG(LS_ERROR) << "Error setting capture volume range: " + << snd_strerror(errVal); + return -1; + } + + return 0; + } + + // ---------------------------------------------------------------------------- + // SetMinMicrophoneVolume + // ---------------------------------------------------------------------------- + + int32_t AudioMixerManagerLinuxALSA::SetMinMicrophoneVolume( + uint32_t minVolume) + { + + if (_inputMixerElement == NULL) + { + RTC_LOG(LS_WARNING) << "no avaliable output mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_capture_volume_range( + _inputMixerElement, &minVol, &maxVol); + if (maxVol <= minVol) + { + //maxVol = 255; + RTC_LOG(LS_WARNING) << "Error getting capture volume range: " + << snd_strerror(errVal); + } + + printf("min %d max %d", minVol, maxVol); + minVol = (long int)minVolume; + errVal = snd_mixer_selem_set_capture_volume_range( + _inputMixerElement, minVol, maxVol); + RTC_LOG(LS_VERBOSE) << "Capture hardware volume range, min: " << minVol + << ", max: " << maxVol; + if (errVal != 0) + { + RTC_LOG(LS_ERROR) << "Error setting capture volume range: " + << snd_strerror(errVal); + return -1; + } + + return 0; + } + */ + +int32_t AudioMixerManagerLinuxALSA::MicrophoneVolume(uint32_t& volume) const { + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists"; + return -1; + } + + long int vol(0); + + int errVal = LATE(snd_mixer_selem_get_capture_volume)( + _inputMixerElement, (snd_mixer_selem_channel_id_t)0, &vol); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "Error getting inputvolume: " + << LATE(snd_strerror)(errVal); + return -1; + } + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol=" << vol; + + volume = static_cast(vol); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::MaxMicrophoneVolume( + uint32_t& maxVolume) const { + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + // check if we have mic volume at all + if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement)) { + RTC_LOG(LS_ERROR) << "No microphone volume available"; + return -1; + } + + int errVal = LATE(snd_mixer_selem_get_capture_volume_range)( + _inputMixerElement, &minVol, &maxVol); + + RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol + << ", max: " << maxVol; + if (maxVol <= minVol) { + RTC_LOG(LS_ERROR) << "Error getting microphone volume range: " + << LATE(snd_strerror)(errVal); + } + + maxVolume = static_cast(maxVol); + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::MinMicrophoneVolume( + uint32_t& minVolume) const { + if (_inputMixerElement == NULL) { + RTC_LOG(LS_WARNING) << "no avaliable input mixer element exists"; + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = LATE(snd_mixer_selem_get_capture_volume_range)( + _inputMixerElement, &minVol, &maxVol); + + RTC_LOG(LS_VERBOSE) << "Microphone hardware volume range, min: " << minVol + << ", max: " << maxVol; + if (maxVol <= minVol) { + RTC_LOG(LS_ERROR) << "Error getting microphone volume range: " + << LATE(snd_strerror)(errVal); + } + + minVolume = static_cast(minVol); + + return 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +int32_t AudioMixerManagerLinuxALSA::LoadMicMixerElement() const { + int errVal = LATE(snd_mixer_load)(_inputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_mixer_load(_inputMixerHandle), error: " + << LATE(snd_strerror)(errVal); + _inputMixerHandle = NULL; + return -1; + } + + snd_mixer_elem_t* elem = NULL; + snd_mixer_elem_t* micElem = NULL; + unsigned mixerIdx = 0; + const char* selemName = NULL; + + // Find and store handles to the right mixer elements + for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem; + elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) { + if (LATE(snd_mixer_selem_is_active)(elem)) { + selemName = LATE(snd_mixer_selem_get_name)(elem); + if (strcmp(selemName, "Capture") == 0) // "Capture", "Mic" + { + _inputMixerElement = elem; + RTC_LOG(LS_VERBOSE) << "Capture element set"; + } else if (strcmp(selemName, "Mic") == 0) { + micElem = elem; + RTC_LOG(LS_VERBOSE) << "Mic element found"; + } + } + + if (_inputMixerElement) { + // Use the first Capture element that is found + // The second one may not work + break; + } + } + + if (_inputMixerElement == NULL) { + // We didn't find a Capture handle, use Mic. + if (micElem != NULL) { + _inputMixerElement = micElem; + RTC_LOG(LS_VERBOSE) << "Using Mic as capture volume."; + } else { + _inputMixerElement = NULL; + RTC_LOG(LS_ERROR) << "Could not find capture volume on the mixer."; + + return -1; + } + } + + return 0; +} + +int32_t AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const { + int errVal = LATE(snd_mixer_load)(_outputMixerHandle); + if (errVal < 0) { + RTC_LOG(LS_ERROR) << "snd_mixer_load(_outputMixerHandle), error: " + << LATE(snd_strerror)(errVal); + _outputMixerHandle = NULL; + return -1; + } + + snd_mixer_elem_t* elem = NULL; + snd_mixer_elem_t* masterElem = NULL; + snd_mixer_elem_t* speakerElem = NULL; + unsigned mixerIdx = 0; + const char* selemName = NULL; + + // Find and store handles to the right mixer elements + for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem; + elem = LATE(snd_mixer_elem_next)(elem), mixerIdx++) { + if (LATE(snd_mixer_selem_is_active)(elem)) { + selemName = LATE(snd_mixer_selem_get_name)(elem); + RTC_LOG(LS_VERBOSE) << "snd_mixer_selem_get_name " << mixerIdx << ": " + << selemName << " =" << elem; + + // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave" + if (strcmp(selemName, "PCM") == 0) { + _outputMixerElement = elem; + RTC_LOG(LS_VERBOSE) << "PCM element set"; + } else if (strcmp(selemName, "Master") == 0) { + masterElem = elem; + RTC_LOG(LS_VERBOSE) << "Master element found"; + } else if (strcmp(selemName, "Speaker") == 0) { + speakerElem = elem; + RTC_LOG(LS_VERBOSE) << "Speaker element found"; + } + } + + if (_outputMixerElement) { + // We have found the element we want + break; + } + } + + // If we didn't find a PCM Handle, use Master or Speaker + if (_outputMixerElement == NULL) { + if (masterElem != NULL) { + _outputMixerElement = masterElem; + RTC_LOG(LS_VERBOSE) << "Using Master as output volume."; + } else if (speakerElem != NULL) { + _outputMixerElement = speakerElem; + RTC_LOG(LS_VERBOSE) << "Using Speaker as output volume."; + } else { + _outputMixerElement = NULL; + RTC_LOG(LS_ERROR) << "Could not find output volume in the mixer."; + return -1; + } + } + + return 0; +} + +void AudioMixerManagerLinuxALSA::GetControlName(char* controlName, + char* deviceName) const { + // Example + // deviceName: "front:CARD=Intel,DEV=0" + // controlName: "hw:CARD=Intel" + char* pos1 = strchr(deviceName, ':'); + char* pos2 = strchr(deviceName, ','); + if (!pos2) { + // Can also be default:CARD=Intel + pos2 = &deviceName[strlen(deviceName)]; + } + if (pos1 && pos2) { + strcpy(controlName, "hw"); + int nChar = (int)(pos2 - pos1); + strncpy(&controlName[2], pos1, nChar); + controlName[2 + nChar] = '\0'; + } else { + strcpy(controlName, deviceName); + } +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h new file mode 100644 index 0000000000..d98287822d --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_ +#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_ + +#include + +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/linux/alsasymboltable_linux.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { + +class AudioMixerManagerLinuxALSA { + public: + int32_t OpenSpeaker(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_); + int32_t OpenMicrophone(char* deviceName) RTC_LOCKS_EXCLUDED(mutex_); + int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_); + int32_t SpeakerVolume(uint32_t& volume) const; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; + int32_t MinSpeakerVolume(uint32_t& minVolume) const; + int32_t SpeakerVolumeIsAvailable(bool& available); + int32_t SpeakerMuteIsAvailable(bool& available); + int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_); + int32_t SpeakerMute(bool& enabled) const; + int32_t MicrophoneMuteIsAvailable(bool& available); + int32_t SetMicrophoneMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_); + int32_t MicrophoneMute(bool& enabled) const; + int32_t MicrophoneVolumeIsAvailable(bool& available); + int32_t SetMicrophoneVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_); + int32_t MicrophoneVolume(uint32_t& volume) const; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const; + int32_t Close() RTC_LOCKS_EXCLUDED(mutex_); + int32_t CloseSpeaker() RTC_LOCKS_EXCLUDED(mutex_); + int32_t CloseMicrophone() RTC_LOCKS_EXCLUDED(mutex_); + bool SpeakerIsInitialized() const; + bool MicrophoneIsInitialized() const; + + public: + AudioMixerManagerLinuxALSA(); + ~AudioMixerManagerLinuxALSA(); + + private: + int32_t CloseSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t CloseMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t LoadMicMixerElement() const; + int32_t LoadSpeakerMixerElement() const; + void GetControlName(char* controlName, char* deviceName) const; + + private: + Mutex mutex_; + mutable snd_mixer_t* _outputMixerHandle; + char _outputMixerStr[kAdmMaxDeviceNameSize]; + mutable snd_mixer_t* _inputMixerHandle; + char _inputMixerStr[kAdmMaxDeviceNameSize]; + mutable snd_mixer_elem_t* _outputMixerElement; + mutable snd_mixer_elem_t* _inputMixerElement; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_ diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc new file mode 100644 index 0000000000..91beee3c87 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc @@ -0,0 +1,844 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/linux/audio_mixer_manager_pulse_linux.h" + +#include + +#include "modules/audio_device/linux/audio_device_pulse_linux.h" +#include "modules/audio_device/linux/latebindingsymboltable_linux.h" +#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +// Accesses Pulse functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libpulse, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, \ + GetPulseSymbolTable(), sym) + +namespace webrtc { + +class AutoPulseLock { + public: + explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop) + : pa_mainloop_(pa_mainloop) { + LATE(pa_threaded_mainloop_lock)(pa_mainloop_); + } + + ~AutoPulseLock() { LATE(pa_threaded_mainloop_unlock)(pa_mainloop_); } + + private: + pa_threaded_mainloop* const pa_mainloop_; +}; + +AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse() + : _paOutputDeviceIndex(-1), + _paInputDeviceIndex(-1), + _paPlayStream(NULL), + _paRecStream(NULL), + _paMainloop(NULL), + _paContext(NULL), + _paVolume(0), + _paMute(0), + _paVolSteps(0), + _paSpeakerMute(false), + _paSpeakerVolume(PA_VOLUME_NORM), + _paChannels(0), + _paObjectsSet(false) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; +} + +AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; + + Close(); +} + +// =========================================================================== +// PUBLIC METHODS +// =========================================================================== + +int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects( + pa_threaded_mainloop* mainloop, + pa_context* context) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + if (!mainloop || !context) { + RTC_LOG(LS_ERROR) << "could not set PulseAudio objects for mixer"; + return -1; + } + + _paMainloop = mainloop; + _paContext = context; + _paObjectsSet = true; + + RTC_LOG(LS_VERBOSE) << "the PulseAudio objects for the mixer has been set"; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::Close() { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + CloseSpeaker(); + CloseMicrophone(); + + _paMainloop = NULL; + _paContext = NULL; + _paObjectsSet = false; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + // Reset the index to -1 + _paOutputDeviceIndex = -1; + _paPlayStream = NULL; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + // Reset the index to -1 + _paInputDeviceIndex = -1; + _paRecStream = NULL; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)"; + + _paPlayStream = playStream; + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetRecStream(recStream)"; + + _paRecStream = recStream; + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(uint16_t deviceIndex) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex=" + << deviceIndex << ")"; + + // No point in opening the speaker + // if PA objects have not been set + if (!_paObjectsSet) { + RTC_LOG(LS_ERROR) << "PulseAudio objects has not been set"; + return -1; + } + + // Set the index for the PulseAudio + // output device to control + _paOutputDeviceIndex = deviceIndex; + + RTC_LOG(LS_VERBOSE) << "the output mixer device is now open"; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(uint16_t deviceIndex) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex=" + << deviceIndex << ")"; + + // No point in opening the microphone + // if PA objects have not been set + if (!_paObjectsSet) { + RTC_LOG(LS_ERROR) << "PulseAudio objects have not been set"; + return -1; + } + + // Set the index for the PulseAudio + // input device to control + _paInputDeviceIndex = deviceIndex; + + RTC_LOG(LS_VERBOSE) << "the input mixer device is now open"; + + return 0; +} + +bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DLOG(LS_INFO) << __FUNCTION__; + + return (_paOutputDeviceIndex != -1); +} + +bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DLOG(LS_INFO) << __FUNCTION__; + + return (_paInputDeviceIndex != -1); +} + +int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(uint32_t volume) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume=" + << volume << ")"; + + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + bool setFailed(false); + + if (_paPlayStream && + (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) { + // We can only really set the volume if we have a connected stream + AutoPulseLock auto_lock(_paMainloop); + + // Get the number of channels from the sample specification + const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_paPlayStream); + if (!spec) { + RTC_LOG(LS_ERROR) << "could not get sample specification"; + return -1; + } + + // Set the same volume for all channels + pa_cvolume cVolumes; + LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); + + pa_operation* paOperation = NULL; + paOperation = LATE(pa_context_set_sink_input_volume)( + _paContext, LATE(pa_stream_get_index)(_paPlayStream), &cVolumes, + PaSetVolumeCallback, NULL); + if (!paOperation) { + setFailed = true; + } + + // Don't need to wait for the completion + LATE(pa_operation_unref)(paOperation); + } else { + // We have not created a stream or it's not connected to the sink + // Save the volume to be set at connection + _paSpeakerVolume = volume; + } + + if (setFailed) { + RTC_LOG(LS_WARNING) << "could not set speaker volume, error=" + << LATE(pa_context_errno)(_paContext); + + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const { + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + if (_paPlayStream && + (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) { + // We can only get the volume if we have a connected stream + if (!GetSinkInputInfo()) + return -1; + + AutoPulseLock auto_lock(_paMainloop); + volume = static_cast(_paVolume); + } else { + AutoPulseLock auto_lock(_paMainloop); + volume = _paSpeakerVolume; + } + + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SpeakerVolume() => vol=" + << volume; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MaxSpeakerVolume( + uint32_t& maxVolume) const { + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + // PA_VOLUME_NORM corresponds to 100% (0db) + // but PA allows up to 150 db amplification + maxVolume = static_cast(PA_VOLUME_NORM); + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MinSpeakerVolume( + uint32_t& minVolume) const { + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + minVolume = static_cast(PA_VOLUME_MUTED); + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable=" + << enable << ")"; + + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + bool setFailed(false); + + if (_paPlayStream && + (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) { + // We can only really mute if we have a connected stream + AutoPulseLock auto_lock(_paMainloop); + + pa_operation* paOperation = NULL; + paOperation = LATE(pa_context_set_sink_input_mute)( + _paContext, LATE(pa_stream_get_index)(_paPlayStream), (int)enable, + PaSetVolumeCallback, NULL); + if (!paOperation) { + setFailed = true; + } + + // Don't need to wait for the completion + LATE(pa_operation_unref)(paOperation); + } else { + // We have not created a stream or it's not connected to the sink + // Save the mute status to be set at connection + _paSpeakerMute = enable; + } + + if (setFailed) { + RTC_LOG(LS_WARNING) << "could not mute speaker, error=" + << LATE(pa_context_errno)(_paContext); + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const { + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + if (_paPlayStream && + (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) { + // We can only get the mute status if we have a connected stream + if (!GetSinkInputInfo()) + return -1; + + enabled = static_cast(_paMute); + } else { + enabled = _paSpeakerMute; + } + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::SpeakerMute() => enabled=" << enabled; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_paOutputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "output device index has not been set"; + return -1; + } + + uint32_t deviceIndex = (uint32_t)_paOutputDeviceIndex; + + { + AutoPulseLock auto_lock(_paMainloop); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paPlayStream && + (LATE(pa_stream_get_state)(_paPlayStream) != PA_STREAM_UNCONNECTED)) { + deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream); + } + } + + if (!GetSinkInfoByIndex(deviceIndex)) + return -1; + + available = static_cast(_paChannels == 2); + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable( + bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex; + + AutoPulseLock auto_lock(_paMainloop); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && + (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + pa_operation* paOperation = NULL; + + // Get info for this source + // We want to know if the actual device can record in stereo + paOperation = LATE(pa_context_get_source_info_by_index)( + _paContext, deviceIndex, PaSourceInfoCallback, (void*)this); + + WaitForOperationCompletion(paOperation); + + available = static_cast(_paChannels == 2); + + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()" + " => available=" + << available; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable( + bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) { + RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=" << enable + << ")"; + + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + bool setFailed(false); + pa_operation* paOperation = NULL; + + uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex; + + AutoPulseLock auto_lock(_paMainloop); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && + (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + // Set mute switch for the source + paOperation = LATE(pa_context_set_source_mute_by_index)( + _paContext, deviceIndex, enable, PaSetVolumeCallback, NULL); + + if (!paOperation) { + setFailed = true; + } + + // Don't need to wait for this to complete. + LATE(pa_operation_unref)(paOperation); + + if (setFailed) { + RTC_LOG(LS_WARNING) << "could not mute microphone, error=" + << LATE(pa_context_errno)(_paContext); + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex; + + { + AutoPulseLock auto_lock(_paMainloop); + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && + (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + } + + if (!GetSourceInfoByIndex(deviceIndex)) + return -1; + + enabled = static_cast(_paMute); + + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::MicrophoneMute() => enabled=" << enabled; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable( + bool& available) { + RTC_DCHECK(thread_checker_.IsCurrent()); + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) { + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=" << volume + << ")"; + + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + // Unlike output streams, input streams have no concept of a stream + // volume, only a device volume. So we have to change the volume of the + // device itself. + + // The device may have a different number of channels than the stream and + // their mapping may be different, so we don't want to use the channel + // count from our sample spec. We could use PA_CHANNELS_MAX to cover our + // bases, and the server allows that even if the device's channel count + // is lower, but some buggy PA clients don't like that (the pavucontrol + // on Hardy dies in an assert if the channel count is different). So + // instead we look up the actual number of channels that the device has. + AutoPulseLock auto_lock(_paMainloop); + uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex; + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && + (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + bool setFailed(false); + pa_operation* paOperation = NULL; + + // Get the number of channels for this source + paOperation = LATE(pa_context_get_source_info_by_index)( + _paContext, deviceIndex, PaSourceInfoCallback, (void*)this); + + WaitForOperationCompletion(paOperation); + + uint8_t channels = _paChannels; + pa_cvolume cVolumes; + LATE(pa_cvolume_set)(&cVolumes, channels, volume); + + // Set the volume for the source + paOperation = LATE(pa_context_set_source_volume_by_index)( + _paContext, deviceIndex, &cVolumes, PaSetVolumeCallback, NULL); + + if (!paOperation) { + setFailed = true; + } + + // Don't need to wait for this to complete. + LATE(pa_operation_unref)(paOperation); + + if (setFailed) { + RTC_LOG(LS_WARNING) << "could not set microphone volume, error=" + << LATE(pa_context_errno)(_paContext); + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const { + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + uint32_t deviceIndex = (uint32_t)_paInputDeviceIndex; + + { + AutoPulseLock auto_lock(_paMainloop); + // Get the actual stream device index if we have a connected stream. + // The device used by the stream can be changed during the call. + if (_paRecStream && + (LATE(pa_stream_get_state)(_paRecStream) != PA_STREAM_UNCONNECTED)) { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + } + + if (!GetSourceInfoByIndex(deviceIndex)) + return -1; + + { + AutoPulseLock auto_lock(_paMainloop); + volume = static_cast(_paVolume); + } + + RTC_LOG(LS_VERBOSE) + << "AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol=" << volume; + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MaxMicrophoneVolume( + uint32_t& maxVolume) const { + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + // PA_VOLUME_NORM corresponds to 100% (0db) + // PA allows up to 150 db amplification (PA_VOLUME_MAX) + // but that doesn't work well for all sound cards + maxVolume = static_cast(PA_VOLUME_NORM); + + return 0; +} + +int32_t AudioMixerManagerLinuxPulse::MinMicrophoneVolume( + uint32_t& minVolume) const { + if (_paInputDeviceIndex == -1) { + RTC_LOG(LS_WARNING) << "input device index has not been set"; + return -1; + } + + minVolume = static_cast(PA_VOLUME_MUTED); + + return 0; +} + +// =========================================================================== +// Private Methods +// =========================================================================== + +void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/, + const pa_sink_info* i, + int eol, + void* pThis) { + static_cast(pThis)->PaSinkInfoCallbackHandler( + i, eol); +} + +void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback( + pa_context* /*c*/, + const pa_sink_input_info* i, + int eol, + void* pThis) { + static_cast(pThis) + ->PaSinkInputInfoCallbackHandler(i, eol); +} + +void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/, + const pa_source_info* i, + int eol, + void* pThis) { + static_cast(pThis)->PaSourceInfoCallbackHandler( + i, eol); +} + +void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context* c, + int success, + void* /*pThis*/) { + if (!success) { + RTC_LOG(LS_ERROR) << "failed to set volume"; + } +} + +void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler( + const pa_sink_info* i, + int eol) { + if (eol) { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + _paChannels = i->channel_map.channels; // Get number of channels + pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. + for (int j = 0; j < _paChannels; ++j) { + if (paVolume < i->volume.values[j]) { + paVolume = i->volume.values[j]; + } + } + _paVolume = paVolume; // get the max volume for any channel + _paMute = i->mute; // get mute status + + // supported since PA 0.9.15 + //_paVolSteps = i->n_volume_steps; // get the number of volume steps + // default value is PA_VOLUME_NORM+1 + _paVolSteps = PA_VOLUME_NORM + 1; +} + +void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler( + const pa_sink_input_info* i, + int eol) { + if (eol) { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + _paChannels = i->channel_map.channels; // Get number of channels + pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. + for (int j = 0; j < _paChannels; ++j) { + if (paVolume < i->volume.values[j]) { + paVolume = i->volume.values[j]; + } + } + _paVolume = paVolume; // Get the max volume for any channel + _paMute = i->mute; // Get mute status +} + +void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler( + const pa_source_info* i, + int eol) { + if (eol) { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + _paChannels = i->channel_map.channels; // Get number of channels + pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. + for (int j = 0; j < _paChannels; ++j) { + if (paVolume < i->volume.values[j]) { + paVolume = i->volume.values[j]; + } + } + _paVolume = paVolume; // Get the max volume for any channel + _paMute = i->mute; // Get mute status + + // supported since PA 0.9.15 + //_paVolSteps = i->n_volume_steps; // Get the number of volume steps + // default value is PA_VOLUME_NORM+1 + _paVolSteps = PA_VOLUME_NORM + 1; +} + +void AudioMixerManagerLinuxPulse::WaitForOperationCompletion( + pa_operation* paOperation) const { + while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + LATE(pa_operation_unref)(paOperation); +} + +bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const { + pa_operation* paOperation = NULL; + + AutoPulseLock auto_lock(_paMainloop); + // Get info for this stream (sink input). + paOperation = LATE(pa_context_get_sink_input_info)( + _paContext, LATE(pa_stream_get_index)(_paPlayStream), + PaSinkInputInfoCallback, (void*)this); + + WaitForOperationCompletion(paOperation); + return true; +} + +bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(int device_index) const { + pa_operation* paOperation = NULL; + + AutoPulseLock auto_lock(_paMainloop); + paOperation = LATE(pa_context_get_sink_info_by_index)( + _paContext, device_index, PaSinkInfoCallback, (void*)this); + + WaitForOperationCompletion(paOperation); + return true; +} + +bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(int device_index) const { + pa_operation* paOperation = NULL; + + AutoPulseLock auto_lock(_paMainloop); + paOperation = LATE(pa_context_get_source_info_by_index)( + _paContext, device_index, PaSourceInfoCallback, (void*)this); + + WaitForOperationCompletion(paOperation); + return true; +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h new file mode 100644 index 0000000000..546440c4a6 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_ +#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_ + +#include +#include + +#include "api/sequence_checker.h" + +#ifndef UINT32_MAX +#define UINT32_MAX ((uint32_t)-1) +#endif + +namespace webrtc { + +class AudioMixerManagerLinuxPulse { + public: + int32_t SetPlayStream(pa_stream* playStream); + int32_t SetRecStream(pa_stream* recStream); + int32_t OpenSpeaker(uint16_t deviceIndex); + int32_t OpenMicrophone(uint16_t deviceIndex); + int32_t SetSpeakerVolume(uint32_t volume); + int32_t SpeakerVolume(uint32_t& volume) const; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; + int32_t MinSpeakerVolume(uint32_t& minVolume) const; + int32_t SpeakerVolumeIsAvailable(bool& available); + int32_t SpeakerMuteIsAvailable(bool& available); + int32_t SetSpeakerMute(bool enable); + int32_t StereoPlayoutIsAvailable(bool& available); + int32_t StereoRecordingIsAvailable(bool& available); + int32_t SpeakerMute(bool& enabled) const; + int32_t MicrophoneMuteIsAvailable(bool& available); + int32_t SetMicrophoneMute(bool enable); + int32_t MicrophoneMute(bool& enabled) const; + int32_t MicrophoneVolumeIsAvailable(bool& available); + int32_t SetMicrophoneVolume(uint32_t volume); + int32_t MicrophoneVolume(uint32_t& volume) const; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const; + int32_t SetPulseAudioObjects(pa_threaded_mainloop* mainloop, + pa_context* context); + int32_t Close(); + int32_t CloseSpeaker(); + int32_t CloseMicrophone(); + bool SpeakerIsInitialized() const; + bool MicrophoneIsInitialized() const; + + public: + AudioMixerManagerLinuxPulse(); + ~AudioMixerManagerLinuxPulse(); + + private: + static void PaSinkInfoCallback(pa_context* c, + const pa_sink_info* i, + int eol, + void* pThis); + static void PaSinkInputInfoCallback(pa_context* c, + const pa_sink_input_info* i, + int eol, + void* pThis); + static void PaSourceInfoCallback(pa_context* c, + const pa_source_info* i, + int eol, + void* pThis); + static void PaSetVolumeCallback(pa_context* /*c*/, + int success, + void* /*pThis*/); + void PaSinkInfoCallbackHandler(const pa_sink_info* i, int eol); + void PaSinkInputInfoCallbackHandler(const pa_sink_input_info* i, int eol); + void PaSourceInfoCallbackHandler(const pa_source_info* i, int eol); + + void WaitForOperationCompletion(pa_operation* paOperation) const; + + bool GetSinkInputInfo() const; + bool GetSinkInfoByIndex(int device_index) const; + bool GetSourceInfoByIndex(int device_index) const; + + private: + int16_t _paOutputDeviceIndex; + int16_t _paInputDeviceIndex; + + pa_stream* _paPlayStream; + pa_stream* _paRecStream; + + pa_threaded_mainloop* _paMainloop; + pa_context* _paContext; + + mutable uint32_t _paVolume; + mutable uint32_t _paMute; + mutable uint32_t _paVolSteps; + bool _paSpeakerMute; + mutable uint32_t _paSpeakerVolume; + mutable uint8_t _paChannels; + bool _paObjectsSet; + + // Stores thread ID in constructor. + // We can then use RTC_DCHECK_RUN_ON(&worker_thread_checker_) to ensure that + // other methods are called from the same thread. + // Currently only does RTC_DCHECK(thread_checker_.IsCurrent()). + SequenceChecker thread_checker_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_ diff --git a/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc new file mode 100644 index 0000000000..751edafd8b --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/linux/latebindingsymboltable_linux.h" + +#include "absl/strings/string_view.h" +#include "rtc_base/logging.h" + +#ifdef WEBRTC_LINUX +#include +#endif + +namespace webrtc { +namespace adm_linux { + +inline static const char* GetDllError() { +#ifdef WEBRTC_LINUX + char* err = dlerror(); + if (err) { + return err; + } else { + return "No error"; + } +#else +#error Not implemented +#endif +} + +DllHandle InternalLoadDll(absl::string_view dll_name) { +#ifdef WEBRTC_LINUX + DllHandle handle = dlopen(std::string(dll_name).c_str(), RTLD_NOW); +#else +#error Not implemented +#endif + if (handle == kInvalidDllHandle) { + RTC_LOG(LS_WARNING) << "Can't load " << dll_name << " : " << GetDllError(); + } + return handle; +} + +void InternalUnloadDll(DllHandle handle) { +#ifdef WEBRTC_LINUX +// TODO(pbos): Remove this dlclose() exclusion when leaks and suppressions from +// here are gone (or AddressSanitizer can display them properly). +// +// Skip dlclose() on AddressSanitizer as leaks including this module in the +// stack trace gets displayed as instead of the actual library +// -> it can not be suppressed. +// https://code.google.com/p/address-sanitizer/issues/detail?id=89 +#if !defined(ADDRESS_SANITIZER) + if (dlclose(handle) != 0) { + RTC_LOG(LS_ERROR) << GetDllError(); + } +#endif // !defined(ADDRESS_SANITIZER) +#else +#error Not implemented +#endif +} + +static bool LoadSymbol(DllHandle handle, + absl::string_view symbol_name, + void** symbol) { +#ifdef WEBRTC_LINUX + *symbol = dlsym(handle, std::string(symbol_name).c_str()); + char* err = dlerror(); + if (err) { + RTC_LOG(LS_ERROR) << "Error loading symbol " << symbol_name << " : " << err; + return false; + } else if (!*symbol) { + RTC_LOG(LS_ERROR) << "Symbol " << symbol_name << " is NULL"; + return false; + } + return true; +#else +#error Not implemented +#endif +} + +// This routine MUST assign SOME value for every symbol, even if that value is +// NULL, or else some symbols may be left with uninitialized data that the +// caller may later interpret as a valid address. +bool InternalLoadSymbols(DllHandle handle, + int num_symbols, + const char* const symbol_names[], + void* symbols[]) { +#ifdef WEBRTC_LINUX + // Clear any old errors. + dlerror(); +#endif + for (int i = 0; i < num_symbols; ++i) { + if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) { + return false; + } + } + return true; +} + +} // namespace adm_linux +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h new file mode 100644 index 0000000000..00f3c5a449 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/latebindingsymboltable_linux.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2010 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_ +#define AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H_ + +#include // for NULL +#include + +#include "absl/strings/string_view.h" +#include "rtc_base/checks.h" + +// This file provides macros for creating "symbol table" classes to simplify the +// dynamic loading of symbols from DLLs. Currently the implementation only +// supports Linux and pure C symbols. +// See talk/sound/pulseaudiosymboltable.(h|cc) for an example. + +namespace webrtc { +namespace adm_linux { + +#ifdef WEBRTC_LINUX +typedef void* DllHandle; + +const DllHandle kInvalidDllHandle = NULL; +#else +#error Not implemented +#endif + +// These are helpers for use only by the class below. +DllHandle InternalLoadDll(absl::string_view); + +void InternalUnloadDll(DllHandle handle); + +bool InternalLoadSymbols(DllHandle handle, + int num_symbols, + const char* const symbol_names[], + void* symbols[]); + +template +class LateBindingSymbolTable { + public: + LateBindingSymbolTable() + : handle_(kInvalidDllHandle), undefined_symbols_(false) { + memset(symbols_, 0, sizeof(symbols_)); + } + + ~LateBindingSymbolTable() { Unload(); } + + LateBindingSymbolTable(const LateBindingSymbolTable&) = delete; + LateBindingSymbolTable& operator=(LateBindingSymbolTable&) = delete; + + static int NumSymbols() { return SYMBOL_TABLE_SIZE; } + + // We do not use this, but we offer it for theoretical convenience. + static const char* GetSymbolName(int index) { + RTC_DCHECK_LT(index, NumSymbols()); + return kSymbolNames[index]; + } + + bool IsLoaded() const { return handle_ != kInvalidDllHandle; } + + // Loads the DLL and the symbol table. Returns true iff the DLL and symbol + // table loaded successfully. + bool Load() { + if (IsLoaded()) { + return true; + } + if (undefined_symbols_) { + // We do not attempt to load again because repeated attempts are not + // likely to succeed and DLL loading is costly. + return false; + } + handle_ = InternalLoadDll(kDllName); + if (!IsLoaded()) { + return false; + } + if (!InternalLoadSymbols(handle_, NumSymbols(), kSymbolNames, symbols_)) { + undefined_symbols_ = true; + Unload(); + return false; + } + return true; + } + + void Unload() { + if (!IsLoaded()) { + return; + } + InternalUnloadDll(handle_); + handle_ = kInvalidDllHandle; + memset(symbols_, 0, sizeof(symbols_)); + } + + // Retrieves the given symbol. NOTE: Recommended to use LATESYM_GET below + // instead of this. + void* GetSymbol(int index) const { + RTC_DCHECK(IsLoaded()); + RTC_DCHECK_LT(index, NumSymbols()); + return symbols_[index]; + } + + private: + DllHandle handle_; + bool undefined_symbols_; + void* symbols_[SYMBOL_TABLE_SIZE]; +}; + +// This macro must be invoked in a header to declare a symbol table class. +#define LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(ClassName) enum { +// This macro must be invoked in the header declaration once for each symbol +// (recommended to use an X-Macro to avoid duplication). +// This macro defines an enum with names built from the symbols, which +// essentially creates a hash table in the compiler from symbol names to their +// indices in the symbol table class. +#define LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(ClassName, sym) \ + ClassName##_SYMBOL_TABLE_INDEX_##sym, + +// This macro completes the header declaration. +#define LATE_BINDING_SYMBOL_TABLE_DECLARE_END(ClassName) \ + ClassName##_SYMBOL_TABLE_SIZE \ + } \ + ; \ + \ + extern const char ClassName##_kDllName[]; \ + extern const char* const \ + ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE]; \ + \ + typedef ::webrtc::adm_linux::LateBindingSymbolTable< \ + ClassName##_SYMBOL_TABLE_SIZE, ClassName##_kDllName, \ + ClassName##_kSymbolNames> \ + ClassName; + +// This macro must be invoked in a .cc file to define a previously-declared +// symbol table class. +#define LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(ClassName, dllName) \ + const char ClassName##_kDllName[] = dllName; \ + const char* const ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE] = { +// This macro must be invoked in the .cc definition once for each symbol +// (recommended to use an X-Macro to avoid duplication). +// This would have to use the mangled name if we were to ever support C++ +// symbols. +#define LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(ClassName, sym) #sym, + +#define LATE_BINDING_SYMBOL_TABLE_DEFINE_END(ClassName) \ + } \ + ; + +// Index of a given symbol in the given symbol table class. +#define LATESYM_INDEXOF(ClassName, sym) (ClassName##_SYMBOL_TABLE_INDEX_##sym) + +// Returns a reference to the given late-binded symbol, with the correct type. +#define LATESYM_GET(ClassName, inst, sym) \ + (*reinterpret_cast<__typeof__(&sym)>( \ + (inst)->GetSymbol(LATESYM_INDEXOF(ClassName, sym)))) + +} // namespace adm_linux +} // namespace webrtc + +#endif // ADM_LATEBINDINGSYMBOLTABLE_LINUX_H_ diff --git a/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc new file mode 100644 index 0000000000..e0759e6ca3 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc @@ -0,0 +1,41 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "modules/audio_device/linux/pulseaudiosymboltable_linux.h" + +namespace webrtc { +namespace adm_linux_pulse { + +LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0") +#define X(sym) \ + LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym) +PULSE_AUDIO_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable) + +} // namespace adm_linux_pulse +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h new file mode 100644 index 0000000000..2f6a9510d8 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h @@ -0,0 +1,106 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_ +#define AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_ + +#include "modules/audio_device/linux/latebindingsymboltable_linux.h" + +namespace webrtc { +namespace adm_linux_pulse { + +// The PulseAudio symbols we need, as an X-Macro list. +// This list must contain precisely every libpulse function that is used in +// the ADM LINUX PULSE Device and Mixer classes +#define PULSE_AUDIO_SYMBOLS_LIST \ + X(pa_bytes_per_second) \ + X(pa_context_connect) \ + X(pa_context_disconnect) \ + X(pa_context_errno) \ + X(pa_context_get_protocol_version) \ + X(pa_context_get_server_info) \ + X(pa_context_get_sink_info_list) \ + X(pa_context_get_sink_info_by_index) \ + X(pa_context_get_sink_info_by_name) \ + X(pa_context_get_sink_input_info) \ + X(pa_context_get_source_info_by_index) \ + X(pa_context_get_source_info_by_name) \ + X(pa_context_get_source_info_list) \ + X(pa_context_get_state) \ + X(pa_context_new) \ + X(pa_context_set_sink_input_volume) \ + X(pa_context_set_sink_input_mute) \ + X(pa_context_set_source_volume_by_index) \ + X(pa_context_set_source_mute_by_index) \ + X(pa_context_set_state_callback) \ + X(pa_context_unref) \ + X(pa_cvolume_set) \ + X(pa_operation_get_state) \ + X(pa_operation_unref) \ + X(pa_stream_connect_playback) \ + X(pa_stream_connect_record) \ + X(pa_stream_disconnect) \ + X(pa_stream_drop) \ + X(pa_stream_get_device_index) \ + X(pa_stream_get_index) \ + X(pa_stream_get_latency) \ + X(pa_stream_get_sample_spec) \ + X(pa_stream_get_state) \ + X(pa_stream_new) \ + X(pa_stream_peek) \ + X(pa_stream_readable_size) \ + X(pa_stream_set_buffer_attr) \ + X(pa_stream_set_overflow_callback) \ + X(pa_stream_set_read_callback) \ + X(pa_stream_set_state_callback) \ + X(pa_stream_set_underflow_callback) \ + X(pa_stream_set_write_callback) \ + X(pa_stream_unref) \ + X(pa_stream_writable_size) \ + X(pa_stream_write) \ + X(pa_strerror) \ + X(pa_threaded_mainloop_free) \ + X(pa_threaded_mainloop_get_api) \ + X(pa_threaded_mainloop_lock) \ + X(pa_threaded_mainloop_new) \ + X(pa_threaded_mainloop_signal) \ + X(pa_threaded_mainloop_start) \ + X(pa_threaded_mainloop_stop) \ + X(pa_threaded_mainloop_unlock) \ + X(pa_threaded_mainloop_wait) + +LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(PulseAudioSymbolTable) +#define X(sym) \ + LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(PulseAudioSymbolTable, sym) +PULSE_AUDIO_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DECLARE_END(PulseAudioSymbolTable) + +} // namespace adm_linux_pulse +} // namespace webrtc + +#endif // AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H_ diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc new file mode 100644 index 0000000000..ed7b0e4669 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.cc @@ -0,0 +1,2500 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/mac/audio_device_mac.h" + +#include +#include // mach_task_self() +#include // sysctlbyname() + +#include + +#include "modules/audio_device/audio_device_config.h" +#include "modules/third_party/portaudio/pa_ringbuffer.h" +#include "rtc_base/arraysize.h" +#include "rtc_base/checks.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/system/arch.h" + +namespace webrtc { + +#define WEBRTC_CA_RETURN_ON_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \ + return -1; \ + } \ + } while (0) + +#define WEBRTC_CA_LOG_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \ + } \ + } while (0) + +#define WEBRTC_CA_LOG_WARN(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \ + } \ + } while (0) + +enum { MaxNumberDevices = 64 }; + +// CoreAudio errors are best interpreted as four character strings. +void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev, + const char* msg, + const char* err) { + RTC_DCHECK(msg != NULL); + RTC_DCHECK(err != NULL); + +#ifdef WEBRTC_ARCH_BIG_ENDIAN + switch (sev) { + case rtc::LS_ERROR: + RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3]; + break; + case rtc::LS_WARNING: + RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2] + << err[3]; + break; + case rtc::LS_VERBOSE: + RTC_LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2] + << err[3]; + break; + default: + break; + } +#else + // We need to flip the characters in this case. + switch (sev) { + case rtc::LS_ERROR: + RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0]; + break; + case rtc::LS_WARNING: + RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1] + << err[0]; + break; + case rtc::LS_VERBOSE: + RTC_LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1] + << err[0]; + break; + default: + break; + } +#endif +} + +AudioDeviceMac::AudioDeviceMac() + : _ptrAudioBuffer(NULL), + _mixerManager(), + _inputDeviceIndex(0), + _outputDeviceIndex(0), + _inputDeviceID(kAudioObjectUnknown), + _outputDeviceID(kAudioObjectUnknown), + _inputDeviceIsSpecified(false), + _outputDeviceIsSpecified(false), + _recChannels(N_REC_CHANNELS), + _playChannels(N_PLAY_CHANNELS), + _captureBufData(NULL), + _renderBufData(NULL), + _initialized(false), + _isShutDown(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _renderDeviceIsAlive(1), + _captureDeviceIsAlive(1), + _twoDevices(true), + _doStop(false), + _doStopRec(false), + _macBookPro(false), + _macBookProPanRight(false), + _captureLatencyUs(0), + _renderLatencyUs(0), + _captureDelayUs(0), + _renderDelayUs(0), + _renderDelayOffsetSamples(0), + _paCaptureBuffer(NULL), + _paRenderBuffer(NULL), + _captureBufSizeSamples(0), + _renderBufSizeSamples(0), + prev_key_state_() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; + + memset(_renderConvertData, 0, sizeof(_renderConvertData)); + memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription)); + memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); + memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription)); + memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); +} + +AudioDeviceMac::~AudioDeviceMac() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; + + if (!_isShutDown) { + Terminate(); + } + + RTC_DCHECK(capture_worker_thread_.empty()); + RTC_DCHECK(render_worker_thread_.empty()); + + if (_paRenderBuffer) { + delete _paRenderBuffer; + _paRenderBuffer = NULL; + } + + if (_paCaptureBuffer) { + delete _paCaptureBuffer; + _paCaptureBuffer = NULL; + } + + if (_renderBufData) { + delete[] _renderBufData; + _renderBufData = NULL; + } + + if (_captureBufData) { + delete[] _captureBufData; + _captureBufData = NULL; + } + + kern_return_t kernErr = KERN_SUCCESS; + kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore); + if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr; + } + + kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore); + if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr; + } +} + +// ============================================================================ +// API +// ============================================================================ + +void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { + MutexLock lock(&mutex_); + + _ptrAudioBuffer = audioBuffer; + + // inform the AudioBuffer about default settings for this implementation + _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); + _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); +} + +int32_t AudioDeviceMac::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const { + audioLayer = AudioDeviceModule::kPlatformDefaultAudio; + return 0; +} + +AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() { + MutexLock lock(&mutex_); + + if (_initialized) { + return InitStatus::OK; + } + + OSStatus err = noErr; + + _isShutDown = false; + + // PortAudio ring buffers require an elementCount which is a power of two. + if (_renderBufData == NULL) { + UInt32 powerOfTwo = 1; + while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) { + powerOfTwo <<= 1; + } + _renderBufSizeSamples = powerOfTwo; + _renderBufData = new SInt16[_renderBufSizeSamples]; + } + + if (_paRenderBuffer == NULL) { + _paRenderBuffer = new PaUtilRingBuffer; + ring_buffer_size_t bufSize = -1; + bufSize = PaUtil_InitializeRingBuffer( + _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData); + if (bufSize == -1) { + RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error"; + return InitStatus::PLAYOUT_ERROR; + } + } + + if (_captureBufData == NULL) { + UInt32 powerOfTwo = 1; + while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) { + powerOfTwo <<= 1; + } + _captureBufSizeSamples = powerOfTwo; + _captureBufData = new Float32[_captureBufSizeSamples]; + } + + if (_paCaptureBuffer == NULL) { + _paCaptureBuffer = new PaUtilRingBuffer; + ring_buffer_size_t bufSize = -1; + bufSize = + PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32), + _captureBufSizeSamples, _captureBufData); + if (bufSize == -1) { + RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error"; + return InitStatus::RECORDING_ERROR; + } + } + + kern_return_t kernErr = KERN_SUCCESS; + kernErr = semaphore_create(mach_task_self(), &_renderSemaphore, + SYNC_POLICY_FIFO, 0); + if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr; + return InitStatus::OTHER_ERROR; + } + + kernErr = semaphore_create(mach_task_self(), &_captureSemaphore, + SYNC_POLICY_FIFO, 0); + if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr; + return InitStatus::OTHER_ERROR; + } + + // Setting RunLoop to NULL here instructs HAL to manage its own thread for + // notifications. This was the default behaviour on OS X 10.5 and earlier, + // but now must be explicitly specified. HAL would otherwise try to use the + // main thread to issue notifications. + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster}; + CFRunLoopRef runLoop = NULL; + UInt32 size = sizeof(CFRunLoopRef); + int aoerr = AudioObjectSetPropertyData( + kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop); + if (aoerr != noErr) { + RTC_LOG(LS_ERROR) << "Error in AudioObjectSetPropertyData: " + << (const char*)&aoerr; + return InitStatus::OTHER_ERROR; + } + + // Listen for any device changes. + propertyAddress.mSelector = kAudioHardwarePropertyDevices; + WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener( + kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); + + // Determine if this is a MacBook Pro + _macBookPro = false; + _macBookProPanRight = false; + char buf[128]; + size_t length = sizeof(buf); + memset(buf, 0, length); + + int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0); + if (intErr != 0) { + RTC_LOG(LS_ERROR) << "Error in sysctlbyname(): " << err; + } else { + RTC_LOG(LS_VERBOSE) << "Hardware model: " << buf; + if (strncmp(buf, "MacBookPro", 10) == 0) { + _macBookPro = true; + } + } + + _initialized = true; + + return InitStatus::OK; +} + +int32_t AudioDeviceMac::Terminate() { + if (!_initialized) { + return 0; + } + + if (_recording) { + RTC_LOG(LS_ERROR) << "Recording must be stopped"; + return -1; + } + + if (_playing) { + RTC_LOG(LS_ERROR) << "Playback must be stopped"; + return -1; + } + + MutexLock lock(&mutex_); + _mixerManager.Close(); + + OSStatus err = noErr; + int retVal = 0; + + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster}; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( + kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); + + err = AudioHardwareUnload(); + if (err != noErr) { + logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()", + (const char*)&err); + retVal = -1; + } + + _isShutDown = true; + _initialized = false; + _outputDeviceIsSpecified = false; + _inputDeviceIsSpecified = false; + + return retVal; +} + +bool AudioDeviceMac::Initialized() const { + return (_initialized); +} + +int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) { + MutexLock lock(&mutex_); + return SpeakerIsAvailableLocked(available); +} + +int32_t AudioDeviceMac::SpeakerIsAvailableLocked(bool& available) { + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeakerLocked() == -1) { + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a valid speaker + // exists. + available = true; + + // Close the initialized output mixer + // + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceMac::InitSpeaker() { + MutexLock lock(&mutex_); + return InitSpeakerLocked(); +} + +int32_t AudioDeviceMac::InitSpeakerLocked() { + if (_playing) { + return -1; + } + + if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) { + return -1; + } + + if (_inputDeviceID == _outputDeviceID) { + _twoDevices = false; + } else { + _twoDevices = true; + } + + if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) { + return -1; + } + + return 0; +} + +int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) { + MutexLock lock(&mutex_); + return MicrophoneIsAvailableLocked(available); +} + +int32_t AudioDeviceMac::MicrophoneIsAvailableLocked(bool& available) { + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitMicrophoneLocked() == -1) { + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a valid microphone + // exists. + available = true; + + // Close the initialized input mixer + // + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceMac::InitMicrophone() { + MutexLock lock(&mutex_); + return InitMicrophoneLocked(); +} + +int32_t AudioDeviceMac::InitMicrophoneLocked() { + if (_recording) { + return -1; + } + + if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) { + return -1; + } + + if (_inputDeviceID == _outputDeviceID) { + _twoDevices = false; + } else { + _twoDevices = true; + } + + if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) { + return -1; + } + + return 0; +} + +bool AudioDeviceMac::SpeakerIsInitialized() const { + return (_mixerManager.SpeakerIsInitialized()); +} + +bool AudioDeviceMac::MicrophoneIsInitialized() const { + return (_mixerManager.MicrophoneIsInitialized()); +} + +int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) { + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) { + // If we end up here it means that the selected speaker has no volume + // control. + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a volume control exists + // + available = true; + + // Close the initialized output mixer + // + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) { + return (_mixerManager.SetSpeakerVolume(volume)); +} + +int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const { + uint32_t level(0); + + if (_mixerManager.SpeakerVolume(level) == -1) { + return -1; + } + + volume = level; + return 0; +} + +int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const { + uint32_t maxVol(0); + + if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { + return -1; + } + + maxVolume = maxVol; + return 0; +} + +int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const { + uint32_t minVol(0); + + if (_mixerManager.MinSpeakerVolume(minVol) == -1) { + return -1; + } + + minVolume = minVol; + return 0; +} + +int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) { + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) { + // If we end up here it means that the selected speaker has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected speaker has a mute control + // + _mixerManager.SpeakerMuteIsAvailable(isAvailable); + + available = isAvailable; + + // Close the initialized output mixer + // + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceMac::SetSpeakerMute(bool enable) { + return (_mixerManager.SetSpeakerMute(enable)); +} + +int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const { + bool muted(0); + + if (_mixerManager.SpeakerMute(muted) == -1) { + return -1; + } + + enabled = muted; + return 0; +} + +int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) { + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no boost control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a mute control + // + _mixerManager.MicrophoneMuteIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) { + return (_mixerManager.SetMicrophoneMute(enable)); +} + +int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const { + bool muted(0); + + if (_mixerManager.MicrophoneMute(muted) == -1) { + return -1; + } + + enabled = muted; + return 0; +} + +int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) { + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + if (!wasInitialized && InitMicrophone() == -1) { + // Cannot open the specified device + available = false; + return 0; + } + + // Check if the selected microphone can record stereo + // + _mixerManager.StereoRecordingIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceMac::SetStereoRecording(bool enable) { + if (enable) + _recChannels = 2; + else + _recChannels = 1; + + return 0; +} + +int32_t AudioDeviceMac::StereoRecording(bool& enabled) const { + if (_recChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) { + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + if (!wasInitialized && InitSpeaker() == -1) { + // Cannot open the specified device + available = false; + return 0; + } + + // Check if the selected microphone can record stereo + // + _mixerManager.StereoPlayoutIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +int32_t AudioDeviceMac::SetStereoPlayout(bool enable) { + if (enable) + _playChannels = 2; + else + _playChannels = 1; + + return 0; +} + +int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const { + if (_playChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) { + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitMicrophone() == -1) { + // If we end up here it means that the selected microphone has no volume + // control. + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a volume control + // exists + // + available = true; + + // Close the initialized input mixer + // + if (!wasInitialized) { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) { + return (_mixerManager.SetMicrophoneVolume(volume)); +} + +int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const { + uint32_t level(0); + + if (_mixerManager.MicrophoneVolume(level) == -1) { + RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level"; + return -1; + } + + volume = level; + return 0; +} + +int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const { + uint32_t maxVol(0); + + if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) { + return -1; + } + + maxVolume = maxVol; + return 0; +} + +int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const { + uint32_t minVol(0); + + if (_mixerManager.MinMicrophoneVolume(minVol) == -1) { + return -1; + } + + minVolume = minVol; + return 0; +} + +int16_t AudioDeviceMac::PlayoutDevices() { + AudioDeviceID playDevices[MaxNumberDevices]; + return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices, + MaxNumberDevices); +} + +int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) { + MutexLock lock(&mutex_); + + if (_playIsInitialized) { + return -1; + } + + AudioDeviceID playDevices[MaxNumberDevices]; + uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, + playDevices, MaxNumberDevices); + RTC_LOG(LS_VERBOSE) << "number of available waveform-audio output devices is " + << nDevices; + + if (index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + _outputDeviceIndex = index; + _outputDeviceIsSpecified = true; + + return 0; +} + +int32_t AudioDeviceMac::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) { + RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; + return -1; +} + +int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + const uint16_t nDevices(PlayoutDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDeviceName(kAudioDevicePropertyScopeOutput, index, + rtc::ArrayView(name, kAdmMaxDeviceNameSize)); +} + +int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + const uint16_t nDevices(RecordingDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDeviceName(kAudioDevicePropertyScopeInput, index, + rtc::ArrayView(name, kAdmMaxDeviceNameSize)); +} + +int16_t AudioDeviceMac::RecordingDevices() { + AudioDeviceID recDevices[MaxNumberDevices]; + return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices, + MaxNumberDevices); +} + +int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) { + if (_recIsInitialized) { + return -1; + } + + AudioDeviceID recDevices[MaxNumberDevices]; + uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, + recDevices, MaxNumberDevices); + RTC_LOG(LS_VERBOSE) << "number of available waveform-audio input devices is " + << nDevices; + + if (index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + _inputDeviceIndex = index; + _inputDeviceIsSpecified = true; + + return 0; +} + +int32_t AudioDeviceMac::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) { + RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; + return -1; +} + +int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) { + available = true; + + // Try to initialize the playout side + if (InitPlayout() == -1) { + available = false; + } + + // We destroy the IOProc created by InitPlayout() in implDeviceIOProc(). + // We must actually start playout here in order to have the IOProc + // deleted by calling StopPlayout(). + if (StartPlayout() == -1) { + available = false; + } + + // Cancel effect of initialization + if (StopPlayout() == -1) { + available = false; + } + + return 0; +} + +int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) { + available = true; + + // Try to initialize the recording side + if (InitRecording() == -1) { + available = false; + } + + // We destroy the IOProc created by InitRecording() in implInDeviceIOProc(). + // We must actually start recording here in order to have the IOProc + // deleted by calling StopRecording(). + if (StartRecording() == -1) { + available = false; + } + + // Cancel effect of initialization + if (StopRecording() == -1) { + available = false; + } + + return 0; +} + +int32_t AudioDeviceMac::InitPlayout() { + RTC_LOG(LS_INFO) << "InitPlayout"; + MutexLock lock(&mutex_); + + if (_playing) { + return -1; + } + + if (!_outputDeviceIsSpecified) { + return -1; + } + + if (_playIsInitialized) { + return 0; + } + + // Initialize the speaker (devices might have been added or removed) + if (InitSpeakerLocked() == -1) { + RTC_LOG(LS_WARNING) << "InitSpeaker() failed"; + } + + if (!MicrophoneIsInitialized()) { + // Make this call to check if we are using + // one or two devices (_twoDevices) + bool available = false; + if (MicrophoneIsAvailableLocked(available) == -1) { + RTC_LOG(LS_WARNING) << "MicrophoneIsAvailable() failed"; + } + } + + PaUtil_FlushRingBuffer(_paRenderBuffer); + + OSStatus err = noErr; + UInt32 size = 0; + _renderDelayOffsetSamples = 0; + _renderDelayUs = 0; + _renderLatencyUs = 0; + _renderDeviceIsAlive = 1; + _doStop = false; + + // The internal microphone of a MacBook Pro is located under the left speaker + // grille. When the internal speakers are in use, we want to fully stereo + // pan to the right. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0}; + if (_macBookPro) { + _macBookProPanRight = false; + Boolean hasProperty = + AudioObjectHasProperty(_outputDeviceID, &propertyAddress); + if (hasProperty) { + UInt32 dataSource = 0; + size = sizeof(dataSource); + WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource)); + + if (dataSource == 'ispk') { + _macBookProPanRight = true; + RTC_LOG(LS_VERBOSE) + << "MacBook Pro using internal speakers; stereo panning right"; + } else { + RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers"; + } + + // Add a listener to determine if the status changes. + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( + _outputDeviceID, &propertyAddress, &objectListenerProc, this)); + } + } + + // Get current stream description + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + memset(&_outStreamFormat, 0, sizeof(_outStreamFormat)); + size = sizeof(_outStreamFormat); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat)); + + if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) { + logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID", + (const char*)&_outStreamFormat.mFormatID); + return -1; + } + + if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { + RTC_LOG(LS_ERROR) + << "Too many channels on output device (mChannelsPerFrame = " + << _outStreamFormat.mChannelsPerFrame << ")"; + return -1; + } + + if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) { + RTC_LOG(LS_ERROR) << "Non-interleaved audio data is not supported." + "AudioHardware streams should not have this format."; + return -1; + } + + RTC_LOG(LS_VERBOSE) << "Ouput stream format:"; + RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate + << ", mChannelsPerFrame = " + << _outStreamFormat.mChannelsPerFrame; + RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " + << _outStreamFormat.mBytesPerPacket + << ", mFramesPerPacket = " + << _outStreamFormat.mFramesPerPacket; + RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame + << ", mBitsPerChannel = " + << _outStreamFormat.mBitsPerChannel; + RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags; + logCAMsg(rtc::LS_VERBOSE, "mFormatID", + (const char*)&_outStreamFormat.mFormatID); + + // Our preferred format to work with. + if (_outStreamFormat.mChannelsPerFrame < 2) { + // Disable stereo playout when we only have one channel on the device. + _playChannels = 1; + RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device"; + } + WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); + + // Listen for format changes. + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( + _outputDeviceID, &propertyAddress, &objectListenerProc, this)); + + // Listen for processor overloads. + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( + _outputDeviceID, &propertyAddress, &objectListenerProc, this)); + + if (_twoDevices || !_recIsInitialized) { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( + _outputDeviceID, deviceIOProc, this, &_deviceIOProcID)); + } + + _playIsInitialized = true; + + return 0; +} + +int32_t AudioDeviceMac::InitRecording() { + RTC_LOG(LS_INFO) << "InitRecording"; + MutexLock lock(&mutex_); + + if (_recording) { + return -1; + } + + if (!_inputDeviceIsSpecified) { + return -1; + } + + if (_recIsInitialized) { + return 0; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophoneLocked() == -1) { + RTC_LOG(LS_WARNING) << "InitMicrophone() failed"; + } + + if (!SpeakerIsInitialized()) { + // Make this call to check if we are using + // one or two devices (_twoDevices) + bool available = false; + if (SpeakerIsAvailableLocked(available) == -1) { + RTC_LOG(LS_WARNING) << "SpeakerIsAvailable() failed"; + } + } + + OSStatus err = noErr; + UInt32 size = 0; + + PaUtil_FlushRingBuffer(_paCaptureBuffer); + + _captureDelayUs = 0; + _captureLatencyUs = 0; + _captureDeviceIsAlive = 1; + _doStopRec = false; + + // Get current stream description + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; + memset(&_inStreamFormat, 0, sizeof(_inStreamFormat)); + size = sizeof(_inStreamFormat); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat)); + + if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) { + logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID", + (const char*)&_inStreamFormat.mFormatID); + return -1; + } + + if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { + RTC_LOG(LS_ERROR) + << "Too many channels on input device (mChannelsPerFrame = " + << _inStreamFormat.mChannelsPerFrame << ")"; + return -1; + } + + const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame * + _inStreamFormat.mSampleRate / 100 * + N_BLOCKS_IO; + if (io_block_size_samples > _captureBufSizeSamples) { + RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples + << ") is larger than ring buffer (" + << _captureBufSizeSamples << ")"; + return -1; + } + + RTC_LOG(LS_VERBOSE) << "Input stream format:"; + RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate + << ", mChannelsPerFrame = " + << _inStreamFormat.mChannelsPerFrame; + RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket + << ", mFramesPerPacket = " + << _inStreamFormat.mFramesPerPacket; + RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame + << ", mBitsPerChannel = " + << _inStreamFormat.mBitsPerChannel; + RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags; + logCAMsg(rtc::LS_VERBOSE, "mFormatID", + (const char*)&_inStreamFormat.mFormatID); + + // Our preferred format to work with + if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { + _inDesiredFormat.mChannelsPerFrame = 2; + } else { + // Disable stereo recording when we only have one channel on the device. + _inDesiredFormat.mChannelsPerFrame = 1; + _recChannels = 1; + RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device"; + } + + if (_ptrAudioBuffer) { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); + } + + _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC; + _inDesiredFormat.mBytesPerPacket = + _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16); + _inDesiredFormat.mFramesPerPacket = 1; + _inDesiredFormat.mBytesPerFrame = + _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16); + _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; + + _inDesiredFormat.mFormatFlags = + kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; +#ifdef WEBRTC_ARCH_BIG_ENDIAN + _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; +#endif + _inDesiredFormat.mFormatID = kAudioFormatLinearPCM; + + WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat, + &_captureConverter)); + + // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO) + // TODO(xians): investigate this block. + UInt32 bufByteCount = + (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO * + _inStreamFormat.mChannelsPerFrame * sizeof(Float32)); + if (_inStreamFormat.mFramesPerPacket != 0) { + if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) { + bufByteCount = + ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) * + _inStreamFormat.mFramesPerPacket; + } + } + + // Ensure the buffer size is within the acceptable range provided by the + // device. + propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; + AudioValueRange range; + size = sizeof(range); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &range)); + if (range.mMinimum > bufByteCount) { + bufByteCount = range.mMinimum; + } else if (range.mMaximum < bufByteCount) { + bufByteCount = range.mMaximum; + } + + propertyAddress.mSelector = kAudioDevicePropertyBufferSize; + size = sizeof(bufByteCount); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount)); + + // Get capture device latency + propertyAddress.mSelector = kAudioDevicePropertyLatency; + UInt32 latency = 0; + size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); + _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate); + + // Get capture stream latency + propertyAddress.mSelector = kAudioDevicePropertyStreams; + AudioStreamID stream = 0; + size = sizeof(AudioStreamID); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); + propertyAddress.mSelector = kAudioStreamPropertyLatency; + size = sizeof(UInt32); + latency = 0; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); + _captureLatencyUs += + (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate); + + // Listen for format changes + // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged? + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( + _inputDeviceID, &propertyAddress, &objectListenerProc, this)); + + // Listen for processor overloads + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( + _inputDeviceID, &propertyAddress, &objectListenerProc, this)); + + if (_twoDevices) { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( + _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID)); + } else if (!_playIsInitialized) { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( + _inputDeviceID, deviceIOProc, this, &_deviceIOProcID)); + } + + // Mark recording side as initialized + _recIsInitialized = true; + + return 0; +} + +int32_t AudioDeviceMac::StartRecording() { + RTC_LOG(LS_INFO) << "StartRecording"; + MutexLock lock(&mutex_); + + if (!_recIsInitialized) { + return -1; + } + + if (_recording) { + return 0; + } + + if (!_initialized) { + RTC_LOG(LS_ERROR) << "Recording worker thread has not been started"; + return -1; + } + + RTC_DCHECK(capture_worker_thread_.empty()); + capture_worker_thread_ = rtc::PlatformThread::SpawnJoinable( + [this] { + while (CaptureWorkerThread()) { + } + }, + "CaptureWorkerThread", + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); + + OSStatus err = noErr; + if (_twoDevices) { + WEBRTC_CA_RETURN_ON_ERR( + AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID)); + } else if (!_playing) { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID)); + } + + _recording = true; + + return 0; +} + +int32_t AudioDeviceMac::StopRecording() { + RTC_LOG(LS_INFO) << "StopRecording"; + MutexLock lock(&mutex_); + + if (!_recIsInitialized) { + return 0; + } + + OSStatus err = noErr; + int32_t captureDeviceIsAlive = _captureDeviceIsAlive; + if (_twoDevices && captureDeviceIsAlive == 1) { + // Recording side uses its own dedicated device and IOProc. + if (_recording) { + _recording = false; + _doStopRec = true; // Signal to io proc to stop audio device + mutex_.Unlock(); // Cannot be under lock, risk of deadlock + if (!_stopEventRec.Wait(TimeDelta::Seconds(2))) { + MutexLock lockScoped(&mutex_); + RTC_LOG(LS_WARNING) << "Timed out stopping the capture IOProc." + "We may have failed to detect a device removal."; + WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); + } + mutex_.Lock(); + _doStopRec = false; + RTC_LOG(LS_INFO) << "Recording stopped (input device)"; + } else if (_recIsInitialized) { + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); + RTC_LOG(LS_INFO) << "Recording uninitialized (input device)"; + } + } else { + // We signal a stop for a shared device even when rendering has + // not yet ended. This is to ensure the IOProc will return early as + // intended (by checking `_recording`) before accessing + // resources we free below (e.g. the capture converter). + // + // In the case of a shared devcie, the IOProc will verify + // rendering has ended before stopping itself. + if (_recording && captureDeviceIsAlive == 1) { + _recording = false; + _doStop = true; // Signal to io proc to stop audio device + mutex_.Unlock(); // Cannot be under lock, risk of deadlock + if (!_stopEvent.Wait(TimeDelta::Seconds(2))) { + MutexLock lockScoped(&mutex_); + RTC_LOG(LS_WARNING) << "Timed out stopping the shared IOProc." + "We may have failed to detect a device removal."; + // We assume rendering on a shared device has stopped as well if + // the IOProc times out. + WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); + } + mutex_.Lock(); + _doStop = false; + RTC_LOG(LS_INFO) << "Recording stopped (shared device)"; + } else if (_recIsInitialized && !_playing && !_playIsInitialized) { + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); + RTC_LOG(LS_INFO) << "Recording uninitialized (shared device)"; + } + } + + // Setting this signal will allow the worker thread to be stopped. + _captureDeviceIsAlive = 0; + + if (!capture_worker_thread_.empty()) { + mutex_.Unlock(); + capture_worker_thread_.Finalize(); + mutex_.Lock(); + } + + WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter)); + + // Remove listeners. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( + _inputDeviceID, &propertyAddress, &objectListenerProc, this)); + + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( + _inputDeviceID, &propertyAddress, &objectListenerProc, this)); + + _recIsInitialized = false; + _recording = false; + + return 0; +} + +bool AudioDeviceMac::RecordingIsInitialized() const { + return (_recIsInitialized); +} + +bool AudioDeviceMac::Recording() const { + return (_recording); +} + +bool AudioDeviceMac::PlayoutIsInitialized() const { + return (_playIsInitialized); +} + +int32_t AudioDeviceMac::StartPlayout() { + RTC_LOG(LS_INFO) << "StartPlayout"; + MutexLock lock(&mutex_); + + if (!_playIsInitialized) { + return -1; + } + + if (_playing) { + return 0; + } + + RTC_DCHECK(render_worker_thread_.empty()); + render_worker_thread_ = rtc::PlatformThread::SpawnJoinable( + [this] { + while (RenderWorkerThread()) { + } + }, + "RenderWorkerThread", + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); + + if (_twoDevices || !_recording) { + OSStatus err = noErr; + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID)); + } + _playing = true; + + return 0; +} + +int32_t AudioDeviceMac::StopPlayout() { + RTC_LOG(LS_INFO) << "StopPlayout"; + MutexLock lock(&mutex_); + + if (!_playIsInitialized) { + return 0; + } + + OSStatus err = noErr; + int32_t renderDeviceIsAlive = _renderDeviceIsAlive; + if (_playing && renderDeviceIsAlive == 1) { + // We signal a stop for a shared device even when capturing has not + // yet ended. This is to ensure the IOProc will return early as + // intended (by checking `_playing`) before accessing resources we + // free below (e.g. the render converter). + // + // In the case of a shared device, the IOProc will verify capturing + // has ended before stopping itself. + _playing = false; + _doStop = true; // Signal to io proc to stop audio device + mutex_.Unlock(); // Cannot be under lock, risk of deadlock + if (!_stopEvent.Wait(TimeDelta::Seconds(2))) { + MutexLock lockScoped(&mutex_); + RTC_LOG(LS_WARNING) << "Timed out stopping the render IOProc." + "We may have failed to detect a device removal."; + + // We assume capturing on a shared device has stopped as well if the + // IOProc times out. + WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); + } + mutex_.Lock(); + _doStop = false; + RTC_LOG(LS_INFO) << "Playout stopped"; + } else if (_twoDevices && _playIsInitialized) { + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); + RTC_LOG(LS_INFO) << "Playout uninitialized (output device)"; + } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) { + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); + RTC_LOG(LS_INFO) << "Playout uninitialized (shared device)"; + } + + // Setting this signal will allow the worker thread to be stopped. + _renderDeviceIsAlive = 0; + if (!render_worker_thread_.empty()) { + mutex_.Unlock(); + render_worker_thread_.Finalize(); + mutex_.Lock(); + } + + WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter)); + + // Remove listeners. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0}; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( + _outputDeviceID, &propertyAddress, &objectListenerProc, this)); + + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( + _outputDeviceID, &propertyAddress, &objectListenerProc, this)); + + if (_macBookPro) { + Boolean hasProperty = + AudioObjectHasProperty(_outputDeviceID, &propertyAddress); + if (hasProperty) { + propertyAddress.mSelector = kAudioDevicePropertyDataSource; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( + _outputDeviceID, &propertyAddress, &objectListenerProc, this)); + } + } + + _playIsInitialized = false; + _playing = false; + + return 0; +} + +int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const { + int32_t renderDelayUs = _renderDelayUs; + delayMS = + static_cast(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); + return 0; +} + +bool AudioDeviceMac::Playing() const { + return (_playing); +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope, + AudioDeviceID scopedDeviceIds[], + const uint32_t deviceListLength) { + OSStatus err = noErr; + + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster}; + UInt32 size = 0; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize( + kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size)); + if (size == 0) { + RTC_LOG(LS_WARNING) << "No devices"; + return 0; + } + + UInt32 numberDevices = size / sizeof(AudioDeviceID); + const auto deviceIds = std::make_unique(numberDevices); + AudioBufferList* bufferList = NULL; + UInt32 numberScopedDevices = 0; + + // First check if there is a default device and list it + UInt32 hardwareProperty = 0; + if (scope == kAudioDevicePropertyScopeOutput) { + hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; + } else { + hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; + } + + AudioObjectPropertyAddress propertyAddressDefault = { + hardwareProperty, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster}; + + AudioDeviceID usedID; + UInt32 uintSize = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, + &propertyAddressDefault, 0, + NULL, &uintSize, &usedID)); + if (usedID != kAudioDeviceUnknown) { + scopedDeviceIds[numberScopedDevices] = usedID; + numberScopedDevices++; + } else { + RTC_LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown"; + } + + // Then list the rest of the devices + bool listOK = true; + + WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, + &propertyAddress, 0, NULL, &size, + deviceIds.get())); + if (err != noErr) { + listOK = false; + } else { + propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration; + propertyAddress.mScope = scope; + propertyAddress.mElement = 0; + for (UInt32 i = 0; i < numberDevices; i++) { + // Check for input channels + WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize( + deviceIds[i], &propertyAddress, 0, NULL, &size)); + if (err == kAudioHardwareBadDeviceError) { + // This device doesn't actually exist; continue iterating. + continue; + } else if (err != noErr) { + listOK = false; + break; + } + + bufferList = (AudioBufferList*)malloc(size); + WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( + deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList)); + if (err != noErr) { + listOK = false; + break; + } + + if (bufferList->mNumberBuffers > 0) { + if (numberScopedDevices >= deviceListLength) { + RTC_LOG(LS_ERROR) << "Device list is not long enough"; + listOK = false; + break; + } + + scopedDeviceIds[numberScopedDevices] = deviceIds[i]; + numberScopedDevices++; + } + + free(bufferList); + bufferList = NULL; + } // for + } + + if (!listOK) { + if (bufferList) { + free(bufferList); + bufferList = NULL; + } + return -1; + } + + return numberScopedDevices; +} + +int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope, + const uint16_t index, + rtc::ArrayView name) { + OSStatus err = noErr; + AudioDeviceID deviceIds[MaxNumberDevices]; + + int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices); + if (numberDevices < 0) { + return -1; + } else if (numberDevices == 0) { + RTC_LOG(LS_ERROR) << "No devices"; + return -1; + } + + // If the number is below the number of devices, assume it's "WEBRTC ID" + // otherwise assume it's a CoreAudio ID + AudioDeviceID usedID; + + // Check if there is a default device + bool isDefaultDevice = false; + if (index == 0) { + UInt32 hardwareProperty = 0; + if (scope == kAudioDevicePropertyScopeOutput) { + hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; + } else { + hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; + } + AudioObjectPropertyAddress propertyAddress = { + hardwareProperty, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster}; + UInt32 size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID)); + if (usedID == kAudioDeviceUnknown) { + RTC_LOG(LS_WARNING) << "GetDeviceName(): Default device unknown"; + } else { + isDefaultDevice = true; + } + } + + AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName, + scope, 0}; + + if (isDefaultDevice) { + std::array devName; + UInt32 len = devName.size(); + + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + usedID, &propertyAddress, 0, NULL, &len, devName.data())); + + rtc::SimpleStringBuilder ss(name); + ss.AppendFormat("default (%s)", devName.data()); + } else { + if (index < numberDevices) { + usedID = deviceIds[index]; + } else { + usedID = index; + } + UInt32 len = name.size(); + + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + usedID, &propertyAddress, 0, NULL, &len, name.data())); + } + + return 0; +} + +int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex, + AudioDeviceID& deviceId, + const bool isInput) { + OSStatus err = noErr; + UInt32 size = 0; + AudioObjectPropertyScope deviceScope; + AudioObjectPropertySelector defaultDeviceSelector; + AudioDeviceID deviceIds[MaxNumberDevices]; + + if (isInput) { + deviceScope = kAudioDevicePropertyScopeInput; + defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice; + } else { + deviceScope = kAudioDevicePropertyScopeOutput; + defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice; + } + + AudioObjectPropertyAddress propertyAddress = { + defaultDeviceSelector, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster}; + + // Get the actual device IDs + int numberDevices = + GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices); + if (numberDevices < 0) { + return -1; + } else if (numberDevices == 0) { + RTC_LOG(LS_ERROR) << "InitDevice(): No devices"; + return -1; + } + + bool isDefaultDevice = false; + deviceId = kAudioDeviceUnknown; + if (userDeviceIndex == 0) { + // Try to use default system device + size = sizeof(AudioDeviceID); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId)); + if (deviceId == kAudioDeviceUnknown) { + RTC_LOG(LS_WARNING) << "No default device exists"; + } else { + isDefaultDevice = true; + } + } + + if (!isDefaultDevice) { + deviceId = deviceIds[userDeviceIndex]; + } + + // Obtain device name and manufacturer for logging. + // Also use this as a test to ensure a user-set device ID is valid. + char devName[128]; + char devManf[128]; + memset(devName, 0, sizeof(devName)); + memset(devManf, 0, sizeof(devManf)); + + propertyAddress.mSelector = kAudioDevicePropertyDeviceName; + propertyAddress.mScope = deviceScope; + propertyAddress.mElement = 0; + size = sizeof(devName); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, + 0, NULL, &size, devName)); + + propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer; + size = sizeof(devManf); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, + 0, NULL, &size, devManf)); + + if (isInput) { + RTC_LOG(LS_INFO) << "Input device: " << devManf << " " << devName; + } else { + RTC_LOG(LS_INFO) << "Output device: " << devManf << " " << devName; + } + + return 0; +} + +OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() { + // Our preferred format to work with. + _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC; + _outDesiredFormat.mChannelsPerFrame = _playChannels; + + if (_ptrAudioBuffer) { + // Update audio buffer with the selected parameters. + _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); + } + + _renderDelayOffsetSamples = + _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * + _outDesiredFormat.mChannelsPerFrame; + + _outDesiredFormat.mBytesPerPacket = + _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16); + // In uncompressed audio, a packet is one frame. + _outDesiredFormat.mFramesPerPacket = 1; + _outDesiredFormat.mBytesPerFrame = + _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16); + _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; + + _outDesiredFormat.mFormatFlags = + kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; +#ifdef WEBRTC_ARCH_BIG_ENDIAN + _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; +#endif + _outDesiredFormat.mFormatID = kAudioFormatLinearPCM; + + OSStatus err = noErr; + WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew( + &_outDesiredFormat, &_outStreamFormat, &_renderConverter)); + + // Try to set buffer size to desired value set to 20ms. + const uint16_t kPlayBufDelayFixed = 20; + UInt32 bufByteCount = static_cast( + (_outStreamFormat.mSampleRate / 1000.0) * kPlayBufDelayFixed * + _outStreamFormat.mChannelsPerFrame * sizeof(Float32)); + if (_outStreamFormat.mFramesPerPacket != 0) { + if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) { + bufByteCount = (static_cast(bufByteCount / + _outStreamFormat.mFramesPerPacket) + + 1) * + _outStreamFormat.mFramesPerPacket; + } + } + + // Ensure the buffer size is within the range provided by the device. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0}; + propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; + AudioValueRange range; + UInt32 size = sizeof(range); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &range)); + if (range.mMinimum > bufByteCount) { + bufByteCount = range.mMinimum; + } else if (range.mMaximum < bufByteCount) { + bufByteCount = range.mMaximum; + } + + propertyAddress.mSelector = kAudioDevicePropertyBufferSize; + size = sizeof(bufByteCount); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount)); + + // Get render device latency. + propertyAddress.mSelector = kAudioDevicePropertyLatency; + UInt32 latency = 0; + size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); + _renderLatencyUs = + static_cast((1.0e6 * latency) / _outStreamFormat.mSampleRate); + + // Get render stream latency. + propertyAddress.mSelector = kAudioDevicePropertyStreams; + AudioStreamID stream = 0; + size = sizeof(AudioStreamID); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); + propertyAddress.mSelector = kAudioStreamPropertyLatency; + size = sizeof(UInt32); + latency = 0; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); + _renderLatencyUs += + static_cast((1.0e6 * latency) / _outStreamFormat.mSampleRate); + + RTC_LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples=" + << _renderDelayOffsetSamples + << ", _renderDelayUs=" << _renderDelayUs + << ", _renderLatencyUs=" << _renderLatencyUs; + return 0; +} + +OSStatus AudioDeviceMac::objectListenerProc( + AudioObjectID objectId, + UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[], + void* clientData) { + AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; + RTC_DCHECK(ptrThis != NULL); + + ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses); + + // AudioObjectPropertyListenerProc functions are supposed to return 0 + return 0; +} + +OSStatus AudioDeviceMac::implObjectListenerProc( + const AudioObjectID objectId, + const UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[]) { + RTC_LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()"; + + for (UInt32 i = 0; i < numberAddresses; i++) { + if (addresses[i].mSelector == kAudioHardwarePropertyDevices) { + HandleDeviceChange(); + } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) { + HandleStreamFormatChange(objectId, addresses[i]); + } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) { + HandleDataSourceChange(objectId, addresses[i]); + } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) { + HandleProcessorOverload(addresses[i]); + } + } + + return 0; +} + +int32_t AudioDeviceMac::HandleDeviceChange() { + OSStatus err = noErr; + + RTC_LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices"; + + // A device has changed. Check if our registered devices have been removed. + // Ensure the devices have been initialized, meaning the IDs are valid. + if (MicrophoneIsInitialized()) { + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0}; + UInt32 deviceIsAlive = 1; + UInt32 size = sizeof(UInt32); + err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL, + &size, &deviceIsAlive); + + if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { + RTC_LOG(LS_WARNING) << "Capture device is not alive (probably removed)"; + _captureDeviceIsAlive = 0; + _mixerManager.CloseMicrophone(); + } else if (err != noErr) { + logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()", + (const char*)&err); + return -1; + } + } + + if (SpeakerIsInitialized()) { + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0}; + UInt32 deviceIsAlive = 1; + UInt32 size = sizeof(UInt32); + err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL, + &size, &deviceIsAlive); + + if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { + RTC_LOG(LS_WARNING) << "Render device is not alive (probably removed)"; + _renderDeviceIsAlive = 0; + _mixerManager.CloseSpeaker(); + } else if (err != noErr) { + logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()", + (const char*)&err); + return -1; + } + } + + return 0; +} + +int32_t AudioDeviceMac::HandleStreamFormatChange( + const AudioObjectID objectId, + const AudioObjectPropertyAddress propertyAddress) { + OSStatus err = noErr; + + RTC_LOG(LS_VERBOSE) << "Stream format changed"; + + if (objectId != _inputDeviceID && objectId != _outputDeviceID) { + return 0; + } + + // Get the new device format + AudioStreamBasicDescription streamFormat; + UInt32 size = sizeof(streamFormat); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + objectId, &propertyAddress, 0, NULL, &size, &streamFormat)); + + if (streamFormat.mFormatID != kAudioFormatLinearPCM) { + logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID", + (const char*)&streamFormat.mFormatID); + return -1; + } + + if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { + RTC_LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = " + << streamFormat.mChannelsPerFrame << ")"; + return -1; + } + + if (_ptrAudioBuffer && streamFormat.mChannelsPerFrame != _recChannels) { + RTC_LOG(LS_ERROR) << "Changing channels not supported (mChannelsPerFrame = " + << streamFormat.mChannelsPerFrame << ")"; + return -1; + } + + RTC_LOG(LS_VERBOSE) << "Stream format:"; + RTC_LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate + << ", mChannelsPerFrame = " + << streamFormat.mChannelsPerFrame; + RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket + << ", mFramesPerPacket = " + << streamFormat.mFramesPerPacket; + RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame + << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel; + RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags; + logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID); + + if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { + const int io_block_size_samples = streamFormat.mChannelsPerFrame * + streamFormat.mSampleRate / 100 * + N_BLOCKS_IO; + if (io_block_size_samples > _captureBufSizeSamples) { + RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples + << ") is larger than ring buffer (" + << _captureBufSizeSamples << ")"; + return -1; + } + + memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat)); + + if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { + _inDesiredFormat.mChannelsPerFrame = 2; + } else { + // Disable stereo recording when we only have one channel on the device. + _inDesiredFormat.mChannelsPerFrame = 1; + _recChannels = 1; + RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device"; + } + + // Recreate the converter with the new format + // TODO(xians): make this thread safe + WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter)); + + WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat, + &_captureConverter)); + } else { + memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat)); + + // Our preferred format to work with + if (_outStreamFormat.mChannelsPerFrame < 2) { + _playChannels = 1; + RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device"; + } + WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); + } + return 0; +} + +int32_t AudioDeviceMac::HandleDataSourceChange( + const AudioObjectID objectId, + const AudioObjectPropertyAddress propertyAddress) { + OSStatus err = noErr; + + if (_macBookPro && + propertyAddress.mScope == kAudioDevicePropertyScopeOutput) { + RTC_LOG(LS_VERBOSE) << "Data source changed"; + + _macBookProPanRight = false; + UInt32 dataSource = 0; + UInt32 size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + objectId, &propertyAddress, 0, NULL, &size, &dataSource)); + if (dataSource == 'ispk') { + _macBookProPanRight = true; + RTC_LOG(LS_VERBOSE) + << "MacBook Pro using internal speakers; stereo panning right"; + } else { + RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers"; + } + } + + return 0; +} +int32_t AudioDeviceMac::HandleProcessorOverload( + const AudioObjectPropertyAddress propertyAddress) { + // TODO(xians): we probably want to notify the user in some way of the + // overload. However, the Windows interpretations of these errors seem to + // be more severe than what ProcessorOverload is thrown for. + // + // We don't log the notification, as it's sent from the HAL's IO thread. We + // don't want to slow it down even further. + if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { + // RTC_LOG(LS_WARNING) << "Capture processor // overload"; + //_callback->ProblemIsReported( + // SndCardStreamObserver::ERecordingProblem); + } else { + // RTC_LOG(LS_WARNING) << "Render processor overload"; + //_callback->ProblemIsReported( + // SndCardStreamObserver::EPlaybackProblem); + } + + return 0; +} + +// ============================================================================ +// Thread Methods +// ============================================================================ + +OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, + const AudioTimeStamp*, + const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList* outputData, + const AudioTimeStamp* outputTime, + void* clientData) { + AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; + RTC_DCHECK(ptrThis != NULL); + + ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime); + + // AudioDeviceIOProc functions are supposed to return 0 + return 0; +} + +OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef, + UInt32* numberDataPackets, + AudioBufferList* data, + AudioStreamPacketDescription**, + void* userData) { + AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData; + RTC_DCHECK(ptrThis != NULL); + + return ptrThis->implOutConverterProc(numberDataPackets, data); +} + +OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, + const AudioTimeStamp*, + const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList*, + const AudioTimeStamp*, + void* clientData) { + AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; + RTC_DCHECK(ptrThis != NULL); + + ptrThis->implInDeviceIOProc(inputData, inputTime); + + // AudioDeviceIOProc functions are supposed to return 0 + return 0; +} + +OSStatus AudioDeviceMac::inConverterProc( + AudioConverterRef, + UInt32* numberDataPackets, + AudioBufferList* data, + AudioStreamPacketDescription** /*dataPacketDescription*/, + void* userData) { + AudioDeviceMac* ptrThis = static_cast(userData); + RTC_DCHECK(ptrThis != NULL); + + return ptrThis->implInConverterProc(numberDataPackets, data); +} + +OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList* outputData, + const AudioTimeStamp* outputTime) { + OSStatus err = noErr; + UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime); + UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); + + if (!_twoDevices && _recording) { + implInDeviceIOProc(inputData, inputTime); + } + + // Check if we should close down audio device + // Double-checked locking optimization to remove locking overhead + if (_doStop) { + MutexLock lock(&mutex_); + if (_doStop) { + if (_twoDevices || (!_recording && !_playing)) { + // In the case of a shared device, the single driving ioProc + // is stopped here + WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); + if (err == noErr) { + RTC_LOG(LS_VERBOSE) << "Playout or shared device stopped"; + } + } + + _doStop = false; + _stopEvent.Set(); + return 0; + } + } + + if (!_playing) { + // This can be the case when a shared device is capturing but not + // rendering. We allow the checks above before returning to avoid a + // timeout when capturing is stopped. + return 0; + } + + RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0); + UInt32 size = + outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame; + + // TODO(xians): signal an error somehow? + err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc, + this, &size, outputData, NULL); + if (err != noErr) { + if (err == 1) { + // This is our own error. + RTC_LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()"; + return 1; + } else { + logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()", + (const char*)&err); + return 1; + } + } + + ring_buffer_size_t bufSizeSamples = + PaUtil_GetRingBufferReadAvailable(_paRenderBuffer); + + int32_t renderDelayUs = + static_cast(1e-3 * (outputTimeNs - nowNs) + 0.5); + renderDelayUs += static_cast( + (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame / + _outDesiredFormat.mSampleRate + + 0.5); + + _renderDelayUs = renderDelayUs; + + return 0; +} + +OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets, + AudioBufferList* data) { + RTC_DCHECK(data->mNumberBuffers == 1); + ring_buffer_size_t numSamples = + *numberDataPackets * _outDesiredFormat.mChannelsPerFrame; + + data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame; + // Always give the converter as much as it wants, zero padding as required. + data->mBuffers->mDataByteSize = + *numberDataPackets * _outDesiredFormat.mBytesPerPacket; + data->mBuffers->mData = _renderConvertData; + memset(_renderConvertData, 0, sizeof(_renderConvertData)); + + PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples); + + kern_return_t kernErr = semaphore_signal_all(_renderSemaphore); + if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr; + return 1; + } + + return 0; +} + +OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData, + const AudioTimeStamp* inputTime) { + OSStatus err = noErr; + UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime); + UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); + + // Check if we should close down audio device + // Double-checked locking optimization to remove locking overhead + if (_doStopRec) { + MutexLock lock(&mutex_); + if (_doStopRec) { + // This will be signalled only when a shared device is not in use. + WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); + WEBRTC_CA_LOG_WARN( + AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); + if (err == noErr) { + RTC_LOG(LS_VERBOSE) << "Recording device stopped"; + } + + _doStopRec = false; + _stopEventRec.Set(); + return 0; + } + } + + if (!_recording) { + // Allow above checks to avoid a timeout on stopping capture. + return 0; + } + + ring_buffer_size_t bufSizeSamples = + PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer); + + int32_t captureDelayUs = + static_cast(1e-3 * (nowNs - inputTimeNs) + 0.5); + captureDelayUs += static_cast((1.0e6 * bufSizeSamples) / + _inStreamFormat.mChannelsPerFrame / + _inStreamFormat.mSampleRate + + 0.5); + + _captureDelayUs = captureDelayUs; + + RTC_DCHECK(inputData->mNumberBuffers == 1); + ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize * + _inStreamFormat.mChannelsPerFrame / + _inStreamFormat.mBytesPerPacket; + PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData, + numSamples); + + kern_return_t kernErr = semaphore_signal_all(_captureSemaphore); + if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr; + } + + return err; +} + +OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets, + AudioBufferList* data) { + RTC_DCHECK(data->mNumberBuffers == 1); + ring_buffer_size_t numSamples = + *numberDataPackets * _inStreamFormat.mChannelsPerFrame; + + while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) { + mach_timespec_t timeout; + timeout.tv_sec = 0; + timeout.tv_nsec = TIMER_PERIOD_MS; + + kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); + if (kernErr == KERN_OPERATION_TIMED_OUT) { + int32_t signal = _captureDeviceIsAlive; + if (signal == 0) { + // The capture device is no longer alive; stop the worker thread. + *numberDataPackets = 0; + return 1; + } + } else if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr; + } + } + + // Pass the read pointer directly to the converter to avoid a memcpy. + void* dummyPtr; + ring_buffer_size_t dummySize; + PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples, + &data->mBuffers->mData, &numSamples, + &dummyPtr, &dummySize); + PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples); + + data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame; + *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame; + data->mBuffers->mDataByteSize = + *numberDataPackets * _inStreamFormat.mBytesPerPacket; + + return 0; +} + +bool AudioDeviceMac::RenderWorkerThread() { + ring_buffer_size_t numSamples = + ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame; + while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) - + _renderDelayOffsetSamples < + numSamples) { + mach_timespec_t timeout; + timeout.tv_sec = 0; + timeout.tv_nsec = TIMER_PERIOD_MS; + + kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); + if (kernErr == KERN_OPERATION_TIMED_OUT) { + int32_t signal = _renderDeviceIsAlive; + if (signal == 0) { + // The render device is no longer alive; stop the worker thread. + return false; + } + } else if (kernErr != KERN_SUCCESS) { + RTC_LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr; + } + } + + int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; + + if (!_ptrAudioBuffer) { + RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid"; + return false; + } + + // Ask for new PCM data to be played out using the AudioDeviceBuffer. + uint32_t nSamples = + _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); + + nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); + if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) { + RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")"; + } + + uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame; + + SInt16* pPlayBuffer = (SInt16*)&playBuffer; + if (_macBookProPanRight && (_playChannels == 2)) { + // Mix entirely into the right channel and zero the left channel. + SInt32 sampleInt32 = 0; + for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) { + sampleInt32 = pPlayBuffer[sampleIdx]; + sampleInt32 += pPlayBuffer[sampleIdx + 1]; + sampleInt32 /= 2; + + if (sampleInt32 > 32767) { + sampleInt32 = 32767; + } else if (sampleInt32 < -32768) { + sampleInt32 = -32768; + } + + pPlayBuffer[sampleIdx] = 0; + pPlayBuffer[sampleIdx + 1] = static_cast(sampleInt32); + } + } + + PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples); + + return true; +} + +bool AudioDeviceMac::CaptureWorkerThread() { + OSStatus err = noErr; + UInt32 noRecSamples = + ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame; + SInt16 recordBuffer[noRecSamples]; + UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES; + + AudioBufferList engineBuffer; + engineBuffer.mNumberBuffers = 1; // Interleaved channels. + engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame; + engineBuffer.mBuffers->mDataByteSize = + _inDesiredFormat.mBytesPerPacket * noRecSamples; + engineBuffer.mBuffers->mData = recordBuffer; + + err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc, + this, &size, &engineBuffer, NULL); + if (err != noErr) { + if (err == 1) { + // This is our own error. + return false; + } else { + logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()", + (const char*)&err); + return false; + } + } + + // TODO(xians): what if the returned size is incorrect? + if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) { + int32_t msecOnPlaySide; + int32_t msecOnRecordSide; + + int32_t captureDelayUs = _captureDelayUs; + int32_t renderDelayUs = _renderDelayUs; + + msecOnPlaySide = + static_cast(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); + msecOnRecordSide = + static_cast(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5); + + if (!_ptrAudioBuffer) { + RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid"; + return false; + } + + // store the recorded buffer (no action will be taken if the + // #recorded samples is not a full buffer) + _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size); + _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide); + _ptrAudioBuffer->SetTypingStatus(KeyPressed()); + + // deliver recorded samples at specified sample rate, mic level etc. + // to the observer using callback + _ptrAudioBuffer->DeliverRecordedData(); + } + + return true; +} + +bool AudioDeviceMac::KeyPressed() { + bool key_down = false; + // Loop through all Mac virtual key constant values. + for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_); + ++key_index) { + bool keyState = + CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index); + // A false -> true change in keymap means a key is pressed. + key_down |= (keyState && !prev_key_state_[key_index]); + // Save current state. + prev_key_state_[key_index] = keyState; + } + return key_down; +} +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h new file mode 100644 index 0000000000..bb06395d03 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/mac/audio_device_mac.h @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_DEVICE_MAC_H_ +#define AUDIO_DEVICE_AUDIO_DEVICE_MAC_H_ + +#include +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "modules/audio_device/audio_device_generic.h" +#include "modules/audio_device/mac/audio_mixer_manager_mac.h" +#include "rtc_base/event.h" +#include "rtc_base/logging.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" + +struct PaUtilRingBuffer; + +namespace webrtc { + +const uint32_t N_REC_SAMPLES_PER_SEC = 48000; +const uint32_t N_PLAY_SAMPLES_PER_SEC = 48000; + +const uint32_t N_REC_CHANNELS = 1; // default is mono recording +const uint32_t N_PLAY_CHANNELS = 2; // default is stereo playout +const uint32_t N_DEVICE_CHANNELS = 64; + +const int kBufferSizeMs = 10; + +const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = + N_REC_SAMPLES_PER_SEC * kBufferSizeMs / 1000; +const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = + N_PLAY_SAMPLES_PER_SEC * kBufferSizeMs / 1000; + +const int N_BLOCKS_IO = 2; +const int N_BUFFERS_IN = 2; // Must be at least N_BLOCKS_IO. +const int N_BUFFERS_OUT = 3; // Must be at least N_BLOCKS_IO. + +const uint32_t TIMER_PERIOD_MS = 2 * 10 * N_BLOCKS_IO * 1000000; + +const uint32_t REC_BUF_SIZE_IN_SAMPLES = + ENGINE_REC_BUF_SIZE_IN_SAMPLES * N_DEVICE_CHANNELS * N_BUFFERS_IN; +const uint32_t PLAY_BUF_SIZE_IN_SAMPLES = + ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * N_PLAY_CHANNELS * N_BUFFERS_OUT; + +const int kGetMicVolumeIntervalMs = 1000; + +class AudioDeviceMac : public AudioDeviceGeneric { + public: + AudioDeviceMac(); + ~AudioDeviceMac(); + + // Retrieve the currently utilized audio layer + virtual int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const; + + // Main initializaton and termination + virtual InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool Initialized() const; + + // Device enumeration + virtual int16_t PlayoutDevices(); + virtual int16_t RecordingDevices(); + virtual int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]); + virtual int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]); + + // Device selection + virtual int32_t SetPlayoutDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device); + virtual int32_t SetRecordingDevice(uint16_t index); + virtual int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device); + + // Audio transport initialization + virtual int32_t PlayoutIsAvailable(bool& available); + virtual int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool PlayoutIsInitialized() const; + virtual int32_t RecordingIsAvailable(bool& available); + virtual int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool RecordingIsInitialized() const; + + // Audio transport control + virtual int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool Playing() const; + virtual int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool Recording() const; + + // Audio mixer initialization + virtual int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool SpeakerIsInitialized() const; + virtual int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool MicrophoneIsInitialized() const; + + // Speaker volume controls + virtual int32_t SpeakerVolumeIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetSpeakerVolume(uint32_t volume); + virtual int32_t SpeakerVolume(uint32_t& volume) const; + virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; + virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const; + + // Microphone volume controls + virtual int32_t MicrophoneVolumeIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetMicrophoneVolume(uint32_t volume); + virtual int32_t MicrophoneVolume(uint32_t& volume) const; + virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const; + virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const; + + // Microphone mute control + virtual int32_t MicrophoneMuteIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetMicrophoneMute(bool enable); + virtual int32_t MicrophoneMute(bool& enabled) const; + + // Speaker mute control + virtual int32_t SpeakerMuteIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetSpeakerMute(bool enable); + virtual int32_t SpeakerMute(bool& enabled) const; + + // Stereo support + virtual int32_t StereoPlayoutIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetStereoPlayout(bool enable); + virtual int32_t StereoPlayout(bool& enabled) const; + virtual int32_t StereoRecordingIsAvailable(bool& available); + virtual int32_t SetStereoRecording(bool enable); + virtual int32_t StereoRecording(bool& enabled) const; + + // Delay information and control + virtual int32_t PlayoutDelay(uint16_t& delayMS) const; + + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) + RTC_LOCKS_EXCLUDED(mutex_); + + private: + int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + + virtual int32_t MicrophoneIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t MicrophoneIsAvailableLocked(bool& available) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + virtual int32_t SpeakerIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SpeakerIsAvailableLocked(bool& available) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + + static void AtomicSet32(int32_t* theValue, int32_t newValue); + static int32_t AtomicGet32(int32_t* theValue); + + static void logCAMsg(rtc::LoggingSeverity sev, + const char* msg, + const char* err); + + int32_t GetNumberDevices(AudioObjectPropertyScope scope, + AudioDeviceID scopedDeviceIds[], + uint32_t deviceListLength); + + int32_t GetDeviceName(AudioObjectPropertyScope scope, + uint16_t index, + rtc::ArrayView name); + + int32_t InitDevice(uint16_t userDeviceIndex, + AudioDeviceID& deviceId, + bool isInput); + + // Always work with our preferred playout format inside VoE. + // Then convert the output to the OS setting using an AudioConverter. + OSStatus SetDesiredPlayoutFormat(); + + static OSStatus objectListenerProc( + AudioObjectID objectId, + UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[], + void* clientData); + + OSStatus implObjectListenerProc(AudioObjectID objectId, + UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[]); + + int32_t HandleDeviceChange(); + + int32_t HandleStreamFormatChange(AudioObjectID objectId, + AudioObjectPropertyAddress propertyAddress); + + int32_t HandleDataSourceChange(AudioObjectID objectId, + AudioObjectPropertyAddress propertyAddress); + + int32_t HandleProcessorOverload(AudioObjectPropertyAddress propertyAddress); + + static OSStatus deviceIOProc(AudioDeviceID device, + const AudioTimeStamp* now, + const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList* outputData, + const AudioTimeStamp* outputTime, + void* clientData); + + static OSStatus outConverterProc( + AudioConverterRef audioConverter, + UInt32* numberDataPackets, + AudioBufferList* data, + AudioStreamPacketDescription** dataPacketDescription, + void* userData); + + static OSStatus inDeviceIOProc(AudioDeviceID device, + const AudioTimeStamp* now, + const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList* outputData, + const AudioTimeStamp* outputTime, + void* clientData); + + static OSStatus inConverterProc( + AudioConverterRef audioConverter, + UInt32* numberDataPackets, + AudioBufferList* data, + AudioStreamPacketDescription** dataPacketDescription, + void* inUserData); + + OSStatus implDeviceIOProc(const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList* outputData, + const AudioTimeStamp* outputTime) + RTC_LOCKS_EXCLUDED(mutex_); + + OSStatus implOutConverterProc(UInt32* numberDataPackets, + AudioBufferList* data); + + OSStatus implInDeviceIOProc(const AudioBufferList* inputData, + const AudioTimeStamp* inputTime) + RTC_LOCKS_EXCLUDED(mutex_); + + OSStatus implInConverterProc(UInt32* numberDataPackets, + AudioBufferList* data); + + static void RunCapture(void*); + static void RunRender(void*); + bool CaptureWorkerThread(); + bool RenderWorkerThread(); + + bool KeyPressed(); + + AudioDeviceBuffer* _ptrAudioBuffer; + + Mutex mutex_; + + rtc::Event _stopEventRec; + rtc::Event _stopEvent; + + // Only valid/running between calls to StartRecording and StopRecording. + rtc::PlatformThread capture_worker_thread_; + + // Only valid/running between calls to StartPlayout and StopPlayout. + rtc::PlatformThread render_worker_thread_; + + AudioMixerManagerMac _mixerManager; + + uint16_t _inputDeviceIndex; + uint16_t _outputDeviceIndex; + AudioDeviceID _inputDeviceID; + AudioDeviceID _outputDeviceID; +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + AudioDeviceIOProcID _inDeviceIOProcID; + AudioDeviceIOProcID _deviceIOProcID; +#endif + bool _inputDeviceIsSpecified; + bool _outputDeviceIsSpecified; + + uint8_t _recChannels; + uint8_t _playChannels; + + Float32* _captureBufData; + SInt16* _renderBufData; + + SInt16 _renderConvertData[PLAY_BUF_SIZE_IN_SAMPLES]; + + bool _initialized; + bool _isShutDown; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + + // Atomically set varaibles + std::atomic _renderDeviceIsAlive; + std::atomic _captureDeviceIsAlive; + + bool _twoDevices; + bool _doStop; // For play if not shared device or play+rec if shared device + bool _doStopRec; // For rec if not shared device + bool _macBookPro; + bool _macBookProPanRight; + + AudioConverterRef _captureConverter; + AudioConverterRef _renderConverter; + + AudioStreamBasicDescription _outStreamFormat; + AudioStreamBasicDescription _outDesiredFormat; + AudioStreamBasicDescription _inStreamFormat; + AudioStreamBasicDescription _inDesiredFormat; + + uint32_t _captureLatencyUs; + uint32_t _renderLatencyUs; + + // Atomically set variables + mutable std::atomic _captureDelayUs; + mutable std::atomic _renderDelayUs; + + int32_t _renderDelayOffsetSamples; + + PaUtilRingBuffer* _paCaptureBuffer; + PaUtilRingBuffer* _paRenderBuffer; + + semaphore_t _renderSemaphore; + semaphore_t _captureSemaphore; + + int _captureBufSizeSamples; + int _renderBufSizeSamples; + + // Typing detection + // 0x5c is key "9", after that comes function keys. + bool prev_key_state_[0x5d]; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_MAC_AUDIO_DEVICE_MAC_H_ diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc new file mode 100644 index 0000000000..942e7db3b3 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc @@ -0,0 +1,924 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/mac/audio_mixer_manager_mac.h" + +#include // getpid() + +#include "rtc_base/system/arch.h" + +namespace webrtc { + +#define WEBRTC_CA_RETURN_ON_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \ + return -1; \ + } \ + } while (0) + +#define WEBRTC_CA_LOG_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \ + } \ + } while (0) + +#define WEBRTC_CA_LOG_WARN(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \ + } \ + } while (0) + +AudioMixerManagerMac::AudioMixerManagerMac() + : _inputDeviceID(kAudioObjectUnknown), + _outputDeviceID(kAudioObjectUnknown), + _noInputChannels(0), + _noOutputChannels(0) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; +} + +AudioMixerManagerMac::~AudioMixerManagerMac() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; + Close(); +} + +// ============================================================================ +// PUBLIC METHODS +// ============================================================================ + +int32_t AudioMixerManagerMac::Close() { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + MutexLock lock(&mutex_); + + CloseSpeakerLocked(); + CloseMicrophoneLocked(); + + return 0; +} + +int32_t AudioMixerManagerMac::CloseSpeaker() { + MutexLock lock(&mutex_); + return CloseSpeakerLocked(); +} + +int32_t AudioMixerManagerMac::CloseSpeakerLocked() { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + _outputDeviceID = kAudioObjectUnknown; + _noOutputChannels = 0; + + return 0; +} + +int32_t AudioMixerManagerMac::CloseMicrophone() { + MutexLock lock(&mutex_); + return CloseMicrophoneLocked(); +} + +int32_t AudioMixerManagerMac::CloseMicrophoneLocked() { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + _inputDeviceID = kAudioObjectUnknown; + _noInputChannels = 0; + + return 0; +} + +int32_t AudioMixerManagerMac::OpenSpeaker(AudioDeviceID deviceID) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::OpenSpeaker(id=" << deviceID + << ")"; + + MutexLock lock(&mutex_); + + OSStatus err = noErr; + UInt32 size = 0; + pid_t hogPid = -1; + + _outputDeviceID = deviceID; + + // Check which process, if any, has hogged the device. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeOutput, 0}; + + // First, does it have the property? Aggregate devices don't. + if (AudioObjectHasProperty(_outputDeviceID, &propertyAddress)) { + size = sizeof(hogPid); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid)); + + if (hogPid == -1) { + RTC_LOG(LS_VERBOSE) << "No process has hogged the output device"; + } + // getpid() is apparently "always successful" + else if (hogPid == getpid()) { + RTC_LOG(LS_VERBOSE) << "Our process has hogged the output device"; + } else { + RTC_LOG(LS_WARNING) << "Another process (pid = " + << static_cast(hogPid) + << ") has hogged the output device"; + + return -1; + } + } + + // get number of channels from stream format + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + + // Get the stream format, to be able to read the number of channels. + AudioStreamBasicDescription streamFormat; + size = sizeof(AudioStreamBasicDescription); + memset(&streamFormat, 0, size); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat)); + + _noOutputChannels = streamFormat.mChannelsPerFrame; + + return 0; +} + +int32_t AudioMixerManagerMac::OpenMicrophone(AudioDeviceID deviceID) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::OpenMicrophone(id=" << deviceID + << ")"; + + MutexLock lock(&mutex_); + + OSStatus err = noErr; + UInt32 size = 0; + pid_t hogPid = -1; + + _inputDeviceID = deviceID; + + // Check which process, if any, has hogged the device. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeInput, 0}; + size = sizeof(hogPid); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid)); + if (hogPid == -1) { + RTC_LOG(LS_VERBOSE) << "No process has hogged the input device"; + } + // getpid() is apparently "always successful" + else if (hogPid == getpid()) { + RTC_LOG(LS_VERBOSE) << "Our process has hogged the input device"; + } else { + RTC_LOG(LS_WARNING) << "Another process (pid = " << static_cast(hogPid) + << ") has hogged the input device"; + + return -1; + } + + // get number of channels from stream format + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + + // Get the stream format, to be able to read the number of channels. + AudioStreamBasicDescription streamFormat; + size = sizeof(AudioStreamBasicDescription); + memset(&streamFormat, 0, size); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat)); + + _noInputChannels = streamFormat.mChannelsPerFrame; + + return 0; +} + +bool AudioMixerManagerMac::SpeakerIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + + return (_outputDeviceID != kAudioObjectUnknown); +} + +bool AudioMixerManagerMac::MicrophoneIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + + return (_inputDeviceID != kAudioObjectUnknown); +} + +int32_t AudioMixerManagerMac::SetSpeakerVolume(uint32_t volume) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetSpeakerVolume(volume=" + << volume << ")"; + + MutexLock lock(&mutex_); + + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + bool success = false; + + // volume range is 0.0 - 1.0, convert from 0 -255 + const Float32 vol = (Float32)(volume / 255.0); + + RTC_DCHECK(vol <= 1.0 && vol >= 0.0); + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, size, &vol)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, size, &vol)); + } + success = true; + } + + if (!success) { + RTC_LOG(LS_WARNING) << "Unable to set a volume on any output channel"; + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerMac::SpeakerVolume(uint32_t& volume) const { + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + Float32 channelVol = 0; + Float32 vol = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0}; + Boolean hasProperty = + AudioObjectHasProperty(_outputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &vol)); + + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast(vol * 255 + 0.5); + } else { + // Otherwise get the average volume across channels. + vol = 0; + for (UInt32 i = 1; i <= _noOutputChannels; i++) { + channelVol = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(channelVol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol)); + + vol += channelVol; + channels++; + } + } + + if (channels == 0) { + RTC_LOG(LS_WARNING) << "Unable to get a volume on any channel"; + return -1; + } + + RTC_DCHECK_GT(channels, 0); + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast(255 * vol / channels + 0.5); + } + + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SpeakerVolume() => vol=" << vol; + + return 0; +} + +int32_t AudioMixerManagerMac::MaxSpeakerVolume(uint32_t& maxVolume) const { + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 255 + maxVolume = 255; + + return 0; +} + +int32_t AudioMixerManagerMac::MinSpeakerVolume(uint32_t& minVolume) const { + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 255 + minVolume = 0; + + return 0; +} + +int32_t AudioMixerManagerMac::SpeakerVolumeIsAvailable(bool& available) { + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) { + available = false; + RTC_LOG(LS_WARNING) << "Volume cannot be set for output channel " << i + << ", err=" << err; + return -1; + } + } + + available = true; + return 0; +} + +int32_t AudioMixerManagerMac::SpeakerMuteIsAvailable(bool& available) { + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) { + available = false; + RTC_LOG(LS_WARNING) << "Mute cannot be set for output channel " << i + << ", err=" << err; + return -1; + } + } + + available = true; + return 0; +} + +int32_t AudioMixerManagerMac::SetSpeakerMute(bool enable) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetSpeakerMute(enable=" + << enable << ")"; + + MutexLock lock(&mutex_); + + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + UInt32 mute = enable ? 1 : 0; + bool success = false; + + // Does the render device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, size, &mute)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, size, &mute)); + } + success = true; + } + + if (!success) { + RTC_LOG(LS_WARNING) << "Unable to set mute on any input channel"; + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerMac::SpeakerMute(bool& enabled) const { + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + UInt32 channelMuted = 0; + UInt32 muted = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0}; + Boolean hasProperty = + AudioObjectHasProperty(_outputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(muted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &muted)); + + // 1 means muted + enabled = static_cast(muted); + } else { + // Otherwise check if all channels are muted. + for (UInt32 i = 1; i <= _noOutputChannels; i++) { + muted = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(channelMuted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted)); + + muted = (muted && channelMuted); + channels++; + } + } + + if (channels == 0) { + RTC_LOG(LS_WARNING) << "Unable to get mute for any channel"; + return -1; + } + + RTC_DCHECK_GT(channels, 0); + // 1 means muted + enabled = static_cast(muted); + } + + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SpeakerMute() => enabled=" + << enabled; + + return 0; +} + +int32_t AudioMixerManagerMac::StereoPlayoutIsAvailable(bool& available) { + if (_outputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + available = (_noOutputChannels == 2); + return 0; +} + +int32_t AudioMixerManagerMac::StereoRecordingIsAvailable(bool& available) { + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + available = (_noInputChannels == 2); + return 0; +} + +int32_t AudioMixerManagerMac::MicrophoneMuteIsAvailable(bool& available) { + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) { + available = false; + RTC_LOG(LS_WARNING) << "Mute cannot be set for output channel " << i + << ", err=" << err; + return -1; + } + } + + available = true; + return 0; +} + +int32_t AudioMixerManagerMac::SetMicrophoneMute(bool enable) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetMicrophoneMute(enable=" + << enable << ")"; + + MutexLock lock(&mutex_); + + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + UInt32 mute = enable ? 1 : 0; + bool success = false; + + // Does the capture device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, size, &mute)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, size, &mute)); + } + success = true; + } + + if (!success) { + RTC_LOG(LS_WARNING) << "Unable to set mute on any input channel"; + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerMac::MicrophoneMute(bool& enabled) const { + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + UInt32 channelMuted = 0; + UInt32 muted = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0}; + Boolean hasProperty = + AudioObjectHasProperty(_inputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(muted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &muted)); + + // 1 means muted + enabled = static_cast(muted); + } else { + // Otherwise check if all channels are muted. + for (UInt32 i = 1; i <= _noInputChannels; i++) { + muted = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(channelMuted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted)); + + muted = (muted && channelMuted); + channels++; + } + } + + if (channels == 0) { + RTC_LOG(LS_WARNING) << "Unable to get mute for any channel"; + return -1; + } + + RTC_DCHECK_GT(channels, 0); + // 1 means muted + enabled = static_cast(muted); + } + + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::MicrophoneMute() => enabled=" + << enabled; + + return 0; +} + +int32_t AudioMixerManagerMac::MicrophoneVolumeIsAvailable(bool& available) { + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) { + available = false; + RTC_LOG(LS_WARNING) << "Volume cannot be set for input channel " << i + << ", err=" << err; + return -1; + } + } + + available = true; + return 0; +} + +int32_t AudioMixerManagerMac::SetMicrophoneVolume(uint32_t volume) { + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::SetMicrophoneVolume(volume=" + << volume << ")"; + + MutexLock lock(&mutex_); + + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + bool success = false; + + // volume range is 0.0 - 1.0, convert from 0 - 255 + const Float32 vol = (Float32)(volume / 255.0); + + RTC_DCHECK(vol <= 1.0 && vol >= 0.0); + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0}; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, size, &vol)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, size, &vol)); + } + success = true; + } + + if (!success) { + RTC_LOG(LS_WARNING) << "Unable to set a level on any input channel"; + return -1; + } + + return 0; +} + +int32_t AudioMixerManagerMac::MicrophoneVolume(uint32_t& volume) const { + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + Float32 channelVol = 0; + Float32 volFloat32 = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0}; + Boolean hasProperty = + AudioObjectHasProperty(_inputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(volFloat32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &volFloat32)); + + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast(volFloat32 * 255 + 0.5); + } else { + // Otherwise get the average volume across channels. + volFloat32 = 0; + for (UInt32 i = 1; i <= _noInputChannels; i++) { + channelVol = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress); + if (hasProperty) { + size = sizeof(channelVol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( + _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol)); + + volFloat32 += channelVol; + channels++; + } + } + + if (channels == 0) { + RTC_LOG(LS_WARNING) << "Unable to get a level on any channel"; + return -1; + } + + RTC_DCHECK_GT(channels, 0); + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast(255 * volFloat32 / channels + 0.5); + } + + RTC_LOG(LS_VERBOSE) << "AudioMixerManagerMac::MicrophoneVolume() => vol=" + << volume; + + return 0; +} + +int32_t AudioMixerManagerMac::MaxMicrophoneVolume(uint32_t& maxVolume) const { + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 255 + maxVolume = 255; + + return 0; +} + +int32_t AudioMixerManagerMac::MinMicrophoneVolume(uint32_t& minVolume) const { + if (_inputDeviceID == kAudioObjectUnknown) { + RTC_LOG(LS_WARNING) << "device ID has not been set"; + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 10 + minVolume = 0; + + return 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +// CoreAudio errors are best interpreted as four character strings. +void AudioMixerManagerMac::logCAMsg(const rtc::LoggingSeverity sev, + const char* msg, + const char* err) { + RTC_DCHECK(msg != NULL); + RTC_DCHECK(err != NULL); + RTC_DCHECK(sev == rtc::LS_ERROR || sev == rtc::LS_WARNING); + +#ifdef WEBRTC_ARCH_BIG_ENDIAN + switch (sev) { + case rtc::LS_ERROR: + RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3]; + break; + case rtc::LS_WARNING: + RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2] + << err[3]; + break; + default: + break; + } +#else + // We need to flip the characters in this case. + switch (sev) { + case rtc::LS_ERROR: + RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0]; + break; + case rtc::LS_WARNING: + RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1] + << err[0]; + break; + default: + break; + } +#endif +} + +} // namespace webrtc +// EOF diff --git a/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h new file mode 100644 index 0000000000..0ccab4879b --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/mac/audio_mixer_manager_mac.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_DEVICE_AUDIO_MIXER_MANAGER_MAC_H_ +#define AUDIO_DEVICE_AUDIO_MIXER_MANAGER_MAC_H_ + +#include + +#include "modules/audio_device/include/audio_device.h" +#include "rtc_base/logging.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { + +class AudioMixerManagerMac { + public: + int32_t OpenSpeaker(AudioDeviceID deviceID) RTC_LOCKS_EXCLUDED(mutex_); + int32_t OpenMicrophone(AudioDeviceID deviceID) RTC_LOCKS_EXCLUDED(mutex_); + int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_); + int32_t SpeakerVolume(uint32_t& volume) const; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; + int32_t MinSpeakerVolume(uint32_t& minVolume) const; + int32_t SpeakerVolumeIsAvailable(bool& available); + int32_t SpeakerMuteIsAvailable(bool& available); + int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_); + int32_t SpeakerMute(bool& enabled) const; + int32_t StereoPlayoutIsAvailable(bool& available); + int32_t StereoRecordingIsAvailable(bool& available); + int32_t MicrophoneMuteIsAvailable(bool& available); + int32_t SetMicrophoneMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_); + int32_t MicrophoneMute(bool& enabled) const; + int32_t MicrophoneVolumeIsAvailable(bool& available); + int32_t SetMicrophoneVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_); + int32_t MicrophoneVolume(uint32_t& volume) const; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const; + int32_t Close() RTC_LOCKS_EXCLUDED(mutex_); + int32_t CloseSpeaker() RTC_LOCKS_EXCLUDED(mutex_); + int32_t CloseMicrophone() RTC_LOCKS_EXCLUDED(mutex_); + bool SpeakerIsInitialized() const; + bool MicrophoneIsInitialized() const; + + public: + AudioMixerManagerMac(); + ~AudioMixerManagerMac(); + + private: + int32_t CloseSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t CloseMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + static void logCAMsg(rtc::LoggingSeverity sev, + const char* msg, + const char* err); + + private: + Mutex mutex_; + + AudioDeviceID _inputDeviceID; + AudioDeviceID _outputDeviceID; + + uint16_t _noInputChannels; + uint16_t _noOutputChannels; +}; + +} // namespace webrtc + +#endif // AUDIO_MIXER_MAC_H diff --git a/third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h b/third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h new file mode 100644 index 0000000000..b0f54c20ff --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/mock_audio_device_buffer.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_ +#define MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_ + +#include "modules/audio_device/audio_device_buffer.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockAudioDeviceBuffer : public AudioDeviceBuffer { + public: + using AudioDeviceBuffer::AudioDeviceBuffer; + virtual ~MockAudioDeviceBuffer() {} + MOCK_METHOD(int32_t, RequestPlayoutData, (size_t nSamples), (override)); + MOCK_METHOD(int32_t, GetPlayoutData, (void* audioBuffer), (override)); + MOCK_METHOD(int32_t, + SetRecordedBuffer, + (const void* audioBuffer, size_t nSamples), + (override)); + MOCK_METHOD(void, SetVQEData, (int playDelayMS, int recDelayMS), (override)); + MOCK_METHOD(int32_t, DeliverRecordedData, (), (override)); +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_ diff --git a/third_party/libwebrtc/modules/audio_device/test_audio_device_impl.cc b/third_party/libwebrtc/modules/audio_device/test_audio_device_impl.cc new file mode 100644 index 0000000000..627e68b36f --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/test_audio_device_impl.cc @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2023 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/audio_device/test_audio_device_impl.h" + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/units/time_delta.h" +#include "modules/audio_device/include/test_audio_device.h" +#include "rtc_base/checks.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/repeating_task.h" + +namespace webrtc { +namespace { + +constexpr int kFrameLengthUs = 10000; + +} + +TestAudioDevice::TestAudioDevice( + TaskQueueFactory* task_queue_factory, + std::unique_ptr capturer, + std::unique_ptr renderer, + float speed) + : task_queue_factory_(task_queue_factory), + capturer_(std::move(capturer)), + renderer_(std::move(renderer)), + process_interval_us_(kFrameLengthUs / speed), + audio_buffer_(nullptr), + rendering_(false), + capturing_(false) { + auto good_sample_rate = [](int sr) { + return sr == 8000 || sr == 16000 || sr == 32000 || sr == 44100 || + sr == 48000; + }; + + if (renderer_) { + const int sample_rate = renderer_->SamplingFrequency(); + playout_buffer_.resize(TestAudioDeviceModule::SamplesPerFrame(sample_rate) * + renderer_->NumChannels(), + 0); + RTC_CHECK(good_sample_rate(sample_rate)); + } + if (capturer_) { + RTC_CHECK(good_sample_rate(capturer_->SamplingFrequency())); + } +} + +AudioDeviceGeneric::InitStatus TestAudioDevice::Init() { + task_queue_ = + std::make_unique(task_queue_factory_->CreateTaskQueue( + "TestAudioDeviceModuleImpl", TaskQueueFactory::Priority::NORMAL)); + + RepeatingTaskHandle::Start(task_queue_->Get(), [this]() { + ProcessAudio(); + return TimeDelta::Micros(process_interval_us_); + }); + return InitStatus::OK; +} + +int32_t TestAudioDevice::PlayoutIsAvailable(bool& available) { + MutexLock lock(&lock_); + available = renderer_ != nullptr; + return 0; +} + +int32_t TestAudioDevice::InitPlayout() { + MutexLock lock(&lock_); + + if (rendering_) { + return -1; + } + + if (audio_buffer_ != nullptr && renderer_ != nullptr) { + // Update webrtc audio buffer with the selected parameters + audio_buffer_->SetPlayoutSampleRate(renderer_->SamplingFrequency()); + audio_buffer_->SetPlayoutChannels(renderer_->NumChannels()); + } + rendering_initialized_ = true; + return 0; +} + +bool TestAudioDevice::PlayoutIsInitialized() const { + MutexLock lock(&lock_); + return rendering_initialized_; +} + +int32_t TestAudioDevice::StartPlayout() { + MutexLock lock(&lock_); + RTC_CHECK(renderer_); + rendering_ = true; + return 0; +} + +int32_t TestAudioDevice::StopPlayout() { + MutexLock lock(&lock_); + rendering_ = false; + return 0; +} + +int32_t TestAudioDevice::RecordingIsAvailable(bool& available) { + MutexLock lock(&lock_); + available = capturer_ != nullptr; + return 0; +} + +int32_t TestAudioDevice::InitRecording() { + MutexLock lock(&lock_); + + if (capturing_) { + return -1; + } + + if (audio_buffer_ != nullptr && capturer_ != nullptr) { + // Update webrtc audio buffer with the selected parameters + audio_buffer_->SetRecordingSampleRate(capturer_->SamplingFrequency()); + audio_buffer_->SetRecordingChannels(capturer_->NumChannels()); + } + capturing_initialized_ = true; + return 0; +} + +bool TestAudioDevice::RecordingIsInitialized() const { + MutexLock lock(&lock_); + return capturing_initialized_; +} + +int32_t TestAudioDevice::StartRecording() { + MutexLock lock(&lock_); + capturing_ = true; + return 0; +} + +int32_t TestAudioDevice::StopRecording() { + MutexLock lock(&lock_); + capturing_ = false; + return 0; +} + +bool TestAudioDevice::Playing() const { + MutexLock lock(&lock_); + return rendering_; +} + +bool TestAudioDevice::Recording() const { + MutexLock lock(&lock_); + return capturing_; +} + +void TestAudioDevice::ProcessAudio() { + MutexLock lock(&lock_); + if (audio_buffer_ == nullptr) { + return; + } + if (capturing_ && capturer_ != nullptr) { + // Capture 10ms of audio. 2 bytes per sample. + const bool keep_capturing = capturer_->Capture(&recording_buffer_); + if (recording_buffer_.size() > 0) { + audio_buffer_->SetRecordedBuffer( + recording_buffer_.data(), + recording_buffer_.size() / capturer_->NumChannels(), + absl::make_optional(rtc::TimeNanos())); + audio_buffer_->DeliverRecordedData(); + } + if (!keep_capturing) { + capturing_ = false; + } + } + if (rendering_) { + const int sampling_frequency = renderer_->SamplingFrequency(); + int32_t samples_per_channel = audio_buffer_->RequestPlayoutData( + TestAudioDeviceModule::SamplesPerFrame(sampling_frequency)); + audio_buffer_->GetPlayoutData(playout_buffer_.data()); + size_t samples_out = samples_per_channel * renderer_->NumChannels(); + RTC_CHECK_LE(samples_out, playout_buffer_.size()); + const bool keep_rendering = renderer_->Render( + rtc::ArrayView(playout_buffer_.data(), samples_out)); + if (!keep_rendering) { + rendering_ = false; + } + } +} + +void TestAudioDevice::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) { + MutexLock lock(&lock_); + RTC_DCHECK(audio_buffer || audio_buffer_); + audio_buffer_ = audio_buffer; + + if (renderer_ != nullptr) { + audio_buffer_->SetPlayoutSampleRate(renderer_->SamplingFrequency()); + audio_buffer_->SetPlayoutChannels(renderer_->NumChannels()); + } + if (capturer_ != nullptr) { + audio_buffer_->SetRecordingSampleRate(capturer_->SamplingFrequency()); + audio_buffer_->SetRecordingChannels(capturer_->NumChannels()); + } +} + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/test_audio_device_impl.h b/third_party/libwebrtc/modules/audio_device/test_audio_device_impl.h new file mode 100644 index 0000000000..36192b7f7f --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/test_audio_device_impl.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2023 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_TEST_AUDIO_DEVICE_IMPL_H_ +#define MODULES_AUDIO_DEVICE_TEST_AUDIO_DEVICE_IMPL_H_ + +#include +#include + +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/audio_device_generic.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "modules/audio_device/include/test_audio_device.h" +#include "rtc_base/buffer.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_queue.h" + +namespace webrtc { + +class TestAudioDevice : public AudioDeviceGeneric { + public: + // Creates a new TestAudioDevice. When capturing or playing, 10 ms audio + // frames will be processed every 10ms / `speed`. + // `capturer` is an object that produces audio data. Can be nullptr if this + // device is never used for recording. + // `renderer` is an object that receives audio data that would have been + // played out. Can be nullptr if this device is never used for playing. + TestAudioDevice(TaskQueueFactory* task_queue_factory, + std::unique_ptr capturer, + std::unique_ptr renderer, + float speed = 1); + TestAudioDevice(const TestAudioDevice&) = delete; + TestAudioDevice& operator=(const TestAudioDevice&) = delete; + ~TestAudioDevice() override = default; + + // Retrieve the currently utilized audio layer + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const override { + return 0; + } + + // Main initializaton and termination + InitStatus Init() override; + int32_t Terminate() override { return 0; } + bool Initialized() const override { return true; } + + // Device enumeration + int16_t PlayoutDevices() override { return 0; } + int16_t RecordingDevices() override { return 0; } + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + return 0; + } + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + return 0; + } + + // Device selection + int32_t SetPlayoutDevice(uint16_t index) override { return 0; } + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override { + return 0; + } + int32_t SetRecordingDevice(uint16_t index) override { return 0; } + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override { + return 0; + } + + // Audio transport initialization + int32_t PlayoutIsAvailable(bool& available) override; + int32_t InitPlayout() override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool& available) override; + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + // Audio transport control + int32_t StartPlayout() override; + int32_t StopPlayout() override; + bool Playing() const override; + int32_t StartRecording() override; + int32_t StopRecording() override; + bool Recording() const override; + + // Audio mixer initialization + int32_t InitSpeaker() override { return 0; } + bool SpeakerIsInitialized() const override { return true; } + int32_t InitMicrophone() override { return 0; } + bool MicrophoneIsInitialized() const override { return true; } + + // Speaker volume controls + int32_t SpeakerVolumeIsAvailable(bool& available) override { return 0; } + int32_t SetSpeakerVolume(uint32_t volume) override { return 0; } + int32_t SpeakerVolume(uint32_t& volume) const override { return 0; } + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override { return 0; } + int32_t MinSpeakerVolume(uint32_t& minVolume) const override { return 0; } + + // Microphone volume controls + int32_t MicrophoneVolumeIsAvailable(bool& available) override { return 0; } + int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; } + int32_t MicrophoneVolume(uint32_t& volume) const override { return 0; } + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override { return 0; } + int32_t MinMicrophoneVolume(uint32_t& minVolume) const override { return 0; } + + // Speaker mute control + int32_t SpeakerMuteIsAvailable(bool& available) override { return 0; } + int32_t SetSpeakerMute(bool enable) override { return 0; } + int32_t SpeakerMute(bool& enabled) const override { return 0; } + + // Microphone mute control + int32_t MicrophoneMuteIsAvailable(bool& available) override { return 0; } + int32_t SetMicrophoneMute(bool enable) override { return 0; } + int32_t MicrophoneMute(bool& enabled) const override { return 0; } + + // Stereo support + int32_t StereoPlayoutIsAvailable(bool& available) override { + available = false; + return 0; + } + int32_t SetStereoPlayout(bool enable) override { return 0; } + int32_t StereoPlayout(bool& enabled) const override { return 0; } + int32_t StereoRecordingIsAvailable(bool& available) override { + available = false; + return 0; + } + int32_t SetStereoRecording(bool enable) override { return 0; } + int32_t StereoRecording(bool& enabled) const override { return 0; } + + // Delay information and control + int32_t PlayoutDelay(uint16_t& delayMS) const override { + delayMS = 0; + return 0; + } + + // Android only + bool BuiltInAECIsAvailable() const override { return false; } + bool BuiltInAGCIsAvailable() const override { return false; } + bool BuiltInNSIsAvailable() const override { return false; } + + // Windows Core Audio and Android only. + int32_t EnableBuiltInAEC(bool enable) override { return -1; } + int32_t EnableBuiltInAGC(bool enable) override { return -1; } + int32_t EnableBuiltInNS(bool enable) override { return -1; } + + // Play underrun count. + int32_t GetPlayoutUnderrunCount() const override { return -1; } + +// iOS only. +// TODO(henrika): add Android support. +#if defined(WEBRTC_IOS) + int GetPlayoutAudioParameters(AudioParameters* params) const override { + return -1; + } + int GetRecordAudioParameters(AudioParameters* params) const override { + return -1; + } +#endif // WEBRTC_IOS + + void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override; + + private: + void ProcessAudio(); + + TaskQueueFactory* const task_queue_factory_; + const std::unique_ptr capturer_ + RTC_GUARDED_BY(lock_); + const std::unique_ptr renderer_ + RTC_GUARDED_BY(lock_); + const int64_t process_interval_us_; + + mutable Mutex lock_; + AudioDeviceBuffer* audio_buffer_ RTC_GUARDED_BY(lock_) = nullptr; + bool rendering_ RTC_GUARDED_BY(lock_) = false; + bool capturing_ RTC_GUARDED_BY(lock_) = false; + bool rendering_initialized_ RTC_GUARDED_BY(lock_) = false; + bool capturing_initialized_ RTC_GUARDED_BY(lock_) = false; + + std::vector playout_buffer_ RTC_GUARDED_BY(lock_); + rtc::BufferT recording_buffer_ RTC_GUARDED_BY(lock_); + std::unique_ptr task_queue_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_TEST_AUDIO_DEVICE_IMPL_H_ diff --git a/third_party/libwebrtc/modules/audio_device/test_audio_device_impl_test.cc b/third_party/libwebrtc/modules/audio_device/test_audio_device_impl_test.cc new file mode 100644 index 0000000000..e81bb2f807 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/test_audio_device_impl_test.cc @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2023 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/audio_device/test_audio_device_impl.h" + +#include +#include + +#include "absl/types/optional.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/audio_device_generic.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "modules/audio_device/include/test_audio_device.h" +#include "rtc_base/checks.h" +#include "rtc_base/synchronization/mutex.h" +#include "test/gmock.h" +#include "test/gtest.h" +#include "test/time_controller/simulated_time_controller.h" + +namespace webrtc { +namespace { + +using ::testing::ElementsAre; + +constexpr Timestamp kStartTime = Timestamp::Millis(10000); + +class TestAudioTransport : public AudioTransport { + public: + enum class Mode { kPlaying, kRecording }; + + explicit TestAudioTransport(Mode mode) : mode_(mode) {} + ~TestAudioTransport() override = default; + + int32_t RecordedDataIsAvailable( + const void* audioSamples, + size_t samples_per_channel, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_second, + uint32_t total_delay_ms, + int32_t clock_drift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level, + absl::optional estimated_capture_time_ns) override { + new_mic_level = 1; + + if (mode_ != Mode::kRecording) { + EXPECT_TRUE(false) << "RecordedDataIsAvailable mustn't be called when " + "mode isn't kRecording"; + return -1; + } + + MutexLock lock(&mutex_); + samples_per_channel_.push_back(samples_per_channel); + number_of_channels_.push_back(number_of_channels); + bytes_per_sample_.push_back(bytes_per_sample); + samples_per_second_.push_back(samples_per_second); + return 0; + } + + int32_t NeedMorePlayData(size_t samples_per_channel, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_second, + void* audio_samples, + size_t& samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override { + const size_t num_bytes = samples_per_channel * number_of_channels; + std::memset(audio_samples, 1, num_bytes); + samples_out = samples_per_channel * number_of_channels; + *elapsed_time_ms = 0; + *ntp_time_ms = 0; + + if (mode_ != Mode::kPlaying) { + EXPECT_TRUE(false) + << "NeedMorePlayData mustn't be called when mode isn't kPlaying"; + return -1; + } + + MutexLock lock(&mutex_); + samples_per_channel_.push_back(samples_per_channel); + number_of_channels_.push_back(number_of_channels); + bytes_per_sample_.push_back(bytes_per_sample); + samples_per_second_.push_back(samples_per_second); + return 0; + } + + int32_t RecordedDataIsAvailable(const void* audio_samples, + size_t samples_per_channel, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_second, + uint32_t total_delay_ms, + int32_t clockDrift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level) override { + RTC_CHECK(false) << "This methods should be never executed"; + } + + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override { + RTC_CHECK(false) << "This methods should be never executed"; + } + + std::vector samples_per_channel() const { + MutexLock lock(&mutex_); + return samples_per_channel_; + } + std::vector number_of_channels() const { + MutexLock lock(&mutex_); + return number_of_channels_; + } + std::vector bytes_per_sample() const { + MutexLock lock(&mutex_); + return bytes_per_sample_; + } + std::vector samples_per_second() const { + MutexLock lock(&mutex_); + return samples_per_second_; + } + + private: + const Mode mode_; + + mutable Mutex mutex_; + std::vector samples_per_channel_ RTC_GUARDED_BY(mutex_); + std::vector number_of_channels_ RTC_GUARDED_BY(mutex_); + std::vector bytes_per_sample_ RTC_GUARDED_BY(mutex_); + std::vector samples_per_second_ RTC_GUARDED_BY(mutex_); +}; + +TEST(TestAudioDeviceTest, EnablingRecordingProducesAudio) { + GlobalSimulatedTimeController time_controller(kStartTime); + TestAudioTransport audio_transport(TestAudioTransport::Mode::kRecording); + AudioDeviceBuffer audio_buffer(time_controller.GetTaskQueueFactory()); + ASSERT_EQ(audio_buffer.RegisterAudioCallback(&audio_transport), 0); + std::unique_ptr capturer = + TestAudioDeviceModule::CreatePulsedNoiseCapturer( + /*max_amplitude=*/1000, + /*sampling_frequency_in_hz=*/48000, /*num_channels=*/2); + + TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(), + std::move(capturer), + /*renderer=*/nullptr); + ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK); + audio_device.AttachAudioBuffer(&audio_buffer); + + EXPECT_FALSE(audio_device.RecordingIsInitialized()); + ASSERT_EQ(audio_device.InitRecording(), 0); + EXPECT_TRUE(audio_device.RecordingIsInitialized()); + audio_buffer.StartRecording(); + ASSERT_EQ(audio_device.StartRecording(), 0); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_TRUE(audio_device.Recording()); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_EQ(audio_device.StopRecording(), 0); + audio_buffer.StopRecording(); + + EXPECT_THAT(audio_transport.samples_per_channel(), + ElementsAre(480, 480, 480)); + EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2)); + EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4)); + EXPECT_THAT(audio_transport.samples_per_second(), + ElementsAre(48000, 48000, 48000)); +} + +TEST(TestAudioDeviceTest, RecordingIsAvailableWhenCapturerIsSet) { + GlobalSimulatedTimeController time_controller(kStartTime); + std::unique_ptr capturer = + TestAudioDeviceModule::CreatePulsedNoiseCapturer( + /*max_amplitude=*/1000, + /*sampling_frequency_in_hz=*/48000, /*num_channels=*/2); + + TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(), + std::move(capturer), + /*renderer=*/nullptr); + ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK); + + bool available; + EXPECT_EQ(audio_device.RecordingIsAvailable(available), 0); + EXPECT_TRUE(available); +} + +TEST(TestAudioDeviceTest, RecordingIsNotAvailableWhenCapturerIsNotSet) { + GlobalSimulatedTimeController time_controller(kStartTime); + TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(), + /*capturer=*/nullptr, + /*renderer=*/nullptr); + ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK); + + bool available; + EXPECT_EQ(audio_device.RecordingIsAvailable(available), 0); + EXPECT_FALSE(available); +} + +TEST(TestAudioDeviceTest, EnablingPlayoutProducesAudio) { + GlobalSimulatedTimeController time_controller(kStartTime); + TestAudioTransport audio_transport(TestAudioTransport::Mode::kPlaying); + AudioDeviceBuffer audio_buffer(time_controller.GetTaskQueueFactory()); + ASSERT_EQ(audio_buffer.RegisterAudioCallback(&audio_transport), 0); + std::unique_ptr renderer = + TestAudioDeviceModule::CreateDiscardRenderer( + /*sampling_frequency_in_hz=*/48000, /*num_channels=*/2); + + TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(), + /*capturer=*/nullptr, std::move(renderer)); + ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK); + audio_device.AttachAudioBuffer(&audio_buffer); + + EXPECT_FALSE(audio_device.PlayoutIsInitialized()); + ASSERT_EQ(audio_device.InitPlayout(), 0); + EXPECT_TRUE(audio_device.PlayoutIsInitialized()); + audio_buffer.StartPlayout(); + ASSERT_EQ(audio_device.StartPlayout(), 0); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_TRUE(audio_device.Playing()); + time_controller.AdvanceTime(TimeDelta::Millis(10)); + ASSERT_EQ(audio_device.StopPlayout(), 0); + audio_buffer.StopPlayout(); + + EXPECT_THAT(audio_transport.samples_per_channel(), + ElementsAre(480, 480, 480)); + EXPECT_THAT(audio_transport.number_of_channels(), ElementsAre(2, 2, 2)); + EXPECT_THAT(audio_transport.bytes_per_sample(), ElementsAre(4, 4, 4)); + EXPECT_THAT(audio_transport.samples_per_second(), + ElementsAre(48000, 48000, 48000)); +} + +TEST(TestAudioDeviceTest, PlayoutIsAvailableWhenRendererIsSet) { + GlobalSimulatedTimeController time_controller(kStartTime); + std::unique_ptr renderer = + TestAudioDeviceModule::CreateDiscardRenderer( + /*sampling_frequency_in_hz=*/48000, /*num_channels=*/2); + + TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(), + /*capturer=*/nullptr, std::move(renderer)); + ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK); + + bool available; + EXPECT_EQ(audio_device.PlayoutIsAvailable(available), 0); + EXPECT_TRUE(available); +} + +TEST(TestAudioDeviceTest, PlayoutIsNotAvailableWhenRendererIsNotSet) { + GlobalSimulatedTimeController time_controller(kStartTime); + TestAudioDevice audio_device(time_controller.GetTaskQueueFactory(), + /*capturer=*/nullptr, + /*renderer=*/nullptr); + ASSERT_EQ(audio_device.Init(), AudioDeviceGeneric::InitStatus::OK); + + bool available; + EXPECT_EQ(audio_device.PlayoutIsAvailable(available), 0); + EXPECT_FALSE(available); +} + +} // namespace +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc new file mode 100644 index 0000000000..aa8b6a9ebe --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.cc @@ -0,0 +1,4178 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#pragma warning(disable : 4995) // name was marked as #pragma deprecated + +#if (_MSC_VER >= 1310) && (_MSC_VER < 1400) +// Reports the major and minor versions of the compiler. +// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version +// 13 and a 1.0 point release. The Visual C++ 2005 compiler version is 1400. +// Type cl /? at the command line to see the major and minor versions of your +// compiler along with the build number. +#pragma message(">> INFO: Windows Core Audio is not supported in VS 2003") +#endif + +#include "modules/audio_device/audio_device_config.h" + +#ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD + +// clang-format off +// To get Windows includes in the right order, this must come before the Windows +// includes below. +#include "modules/audio_device/win/audio_device_core_win.h" +// clang-format on + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "api/make_ref_counted.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/string_utils.h" +#include "rtc_base/thread_annotations.h" +#include "system_wrappers/include/sleep.h" + +// Macro that calls a COM method returning HRESULT value. +#define EXIT_ON_ERROR(hres) \ + do { \ + if (FAILED(hres)) \ + goto Exit; \ + } while (0) + +// Macro that continues to a COM error. +#define CONTINUE_ON_ERROR(hres) \ + do { \ + if (FAILED(hres)) \ + goto Next; \ + } while (0) + +// Macro that releases a COM object if not NULL. +#define SAFE_RELEASE(p) \ + do { \ + if ((p)) { \ + (p)->Release(); \ + (p) = NULL; \ + } \ + } while (0) + +#define ROUND(x) ((x) >= 0 ? (int)((x) + 0.5) : (int)((x)-0.5)) + +// REFERENCE_TIME time units per millisecond +#define REFTIMES_PER_MILLISEC 10000 + +typedef struct tagTHREADNAME_INFO { + DWORD dwType; // must be 0x1000 + LPCSTR szName; // pointer to name (in user addr space) + DWORD dwThreadID; // thread ID (-1=caller thread) + DWORD dwFlags; // reserved for future use, must be zero +} THREADNAME_INFO; + +namespace webrtc { +namespace { + +enum { COM_THREADING_MODEL = COINIT_MULTITHREADED }; + +enum { kAecCaptureStreamIndex = 0, kAecRenderStreamIndex = 1 }; + +// An implementation of IMediaBuffer, as required for +// IMediaObject::ProcessOutput(). After consuming data provided by +// ProcessOutput(), call SetLength() to update the buffer availability. +// +// Example implementation: +// http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx +class MediaBufferImpl final : public IMediaBuffer { + public: + explicit MediaBufferImpl(DWORD maxLength) + : _data(new BYTE[maxLength]), + _length(0), + _maxLength(maxLength), + _refCount(0) {} + + // IMediaBuffer methods. + STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength)) { + if (!ppBuffer || !pcbLength) { + return E_POINTER; + } + + *ppBuffer = _data; + *pcbLength = _length; + + return S_OK; + } + + STDMETHOD(GetMaxLength(DWORD* pcbMaxLength)) { + if (!pcbMaxLength) { + return E_POINTER; + } + + *pcbMaxLength = _maxLength; + return S_OK; + } + + STDMETHOD(SetLength(DWORD cbLength)) { + if (cbLength > _maxLength) { + return E_INVALIDARG; + } + + _length = cbLength; + return S_OK; + } + + // IUnknown methods. + STDMETHOD_(ULONG, AddRef()) { return InterlockedIncrement(&_refCount); } + + STDMETHOD(QueryInterface(REFIID riid, void** ppv)) { + if (!ppv) { + return E_POINTER; + } else if (riid != IID_IMediaBuffer && riid != IID_IUnknown) { + return E_NOINTERFACE; + } + + *ppv = static_cast(this); + AddRef(); + return S_OK; + } + + STDMETHOD_(ULONG, Release()) { + LONG refCount = InterlockedDecrement(&_refCount); + if (refCount == 0) { + delete this; + } + + return refCount; + } + + private: + ~MediaBufferImpl() { delete[] _data; } + + BYTE* _data; + DWORD _length; + const DWORD _maxLength; + LONG _refCount; +}; +} // namespace + +// ============================================================================ +// Static Methods +// ============================================================================ + +// ---------------------------------------------------------------------------- +// CoreAudioIsSupported +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::CoreAudioIsSupported() { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + bool MMDeviceIsAvailable(false); + bool coreAudioIsSupported(false); + + HRESULT hr(S_OK); + wchar_t buf[MAXERRORLENGTH]; + wchar_t errorText[MAXERRORLENGTH]; + + // 1) Check if Windows version is Vista SP1 or later. + // + // CoreAudio is only available on Vista SP1 and later. + // + OSVERSIONINFOEX osvi; + DWORDLONG dwlConditionMask = 0; + int op = VER_LESS_EQUAL; + + // Initialize the OSVERSIONINFOEX structure. + ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); + osvi.dwMajorVersion = 6; + osvi.dwMinorVersion = 0; + osvi.wServicePackMajor = 0; + osvi.wServicePackMinor = 0; + osvi.wProductType = VER_NT_WORKSTATION; + + // Initialize the condition mask. + VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op); + VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op); + VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op); + VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op); + VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL); + + DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION | + VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR | + VER_PRODUCT_TYPE; + + // Perform the test. + BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask, dwlConditionMask); + if (isVistaRTMorXP != 0) { + RTC_LOG(LS_VERBOSE) + << "*** Windows Core Audio is only supported on Vista SP1 or later"; + return false; + } + + // 2) Initializes the COM library for use by the calling thread. + + // The COM init wrapper sets the thread's concurrency model to MTA, + // and creates a new apartment for the thread if one is required. The + // wrapper also ensures that each call to CoInitializeEx is balanced + // by a corresponding call to CoUninitialize. + // + ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA); + if (!comInit.Succeeded()) { + // Things will work even if an STA thread is calling this method but we + // want to ensure that MTA is used and therefore return false here. + return false; + } + + // 3) Check if the MMDevice API is available. + // + // The Windows Multimedia Device (MMDevice) API enables audio clients to + // discover audio endpoint devices, determine their capabilities, and create + // driver instances for those devices. + // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API. + // The MMDevice API consists of several interfaces. The first of these is the + // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice + // API, a client obtains a reference to the IMMDeviceEnumerator interface of a + // device-enumerator object by calling the CoCreateInstance function. + // + // Through the IMMDeviceEnumerator interface, the client can obtain references + // to the other interfaces in the MMDevice API. The MMDevice API implements + // the following interfaces: + // + // IMMDevice Represents an audio device. + // IMMDeviceCollection Represents a collection of audio devices. + // IMMDeviceEnumerator Provides methods for enumerating audio devices. + // IMMEndpoint Represents an audio endpoint device. + // + IMMDeviceEnumerator* pIMMD(NULL); + const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator); + const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator); + + hr = CoCreateInstance( + CLSID_MMDeviceEnumerator, // GUID value of MMDeviceEnumerator coclass + NULL, CLSCTX_ALL, + IID_IMMDeviceEnumerator, // GUID value of the IMMDeviceEnumerator + // interface + (void**)&pIMMD); + + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "AudioDeviceWindowsCore::CoreAudioIsSupported()" + " Failed to create the required COM object (hr=" + << hr << ")"; + RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::CoreAudioIsSupported()" + " CoCreateInstance(MMDeviceEnumerator) failed (hr=" + << hr << ")"; + + const DWORD dwFlags = + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US); + + // Gets the system's human readable message string for this HRESULT. + // All error message in English by default. + DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText, + MAXERRORLENGTH, NULL); + + RTC_DCHECK_LE(messageLength, MAXERRORLENGTH); + + // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.). + for (; messageLength && ::isspace(errorText[messageLength - 1]); + --messageLength) { + errorText[messageLength - 1] = '\0'; + } + + StringCchPrintfW(buf, MAXERRORLENGTH, L"Error details: "); + StringCchCatW(buf, MAXERRORLENGTH, errorText); + RTC_LOG(LS_VERBOSE) << buf; + } else { + MMDeviceIsAvailable = true; + RTC_LOG(LS_VERBOSE) + << "AudioDeviceWindowsCore::CoreAudioIsSupported()" + " CoCreateInstance(MMDeviceEnumerator) succeeded (hr=" + << hr << ")"; + SAFE_RELEASE(pIMMD); + } + + // 4) Verify that we can create and initialize our Core Audio class. + // + if (MMDeviceIsAvailable) { + coreAudioIsSupported = false; + + AudioDeviceWindowsCore* p = new (std::nothrow) AudioDeviceWindowsCore(); + if (p == NULL) { + return false; + } + + int ok(0); + + if (p->Init() != InitStatus::OK) { + ok |= -1; + } + + ok |= p->Terminate(); + + if (ok == 0) { + coreAudioIsSupported = true; + } + + delete p; + } + + if (coreAudioIsSupported) { + RTC_LOG(LS_VERBOSE) << "*** Windows Core Audio is supported ***"; + } else { + RTC_LOG(LS_VERBOSE) << "*** Windows Core Audio is NOT supported"; + } + + return (coreAudioIsSupported); +} + +// ============================================================================ +// Construction & Destruction +// ============================================================================ + +// ---------------------------------------------------------------------------- +// AudioDeviceWindowsCore() - ctor +// ---------------------------------------------------------------------------- + +AudioDeviceWindowsCore::AudioDeviceWindowsCore() + : _avrtLibrary(nullptr), + _winSupportAvrt(false), + _comInit(ScopedCOMInitializer::kMTA), + _ptrAudioBuffer(nullptr), + _ptrEnumerator(nullptr), + _ptrRenderCollection(nullptr), + _ptrCaptureCollection(nullptr), + _ptrDeviceOut(nullptr), + _ptrDeviceIn(nullptr), + _ptrClientOut(nullptr), + _ptrClientIn(nullptr), + _ptrRenderClient(nullptr), + _ptrCaptureClient(nullptr), + _ptrCaptureVolume(nullptr), + _ptrRenderSimpleVolume(nullptr), + _dmo(nullptr), + _mediaBuffer(nullptr), + _builtInAecEnabled(false), + _hRenderSamplesReadyEvent(nullptr), + _hPlayThread(nullptr), + _hRenderStartedEvent(nullptr), + _hShutdownRenderEvent(nullptr), + _hCaptureSamplesReadyEvent(nullptr), + _hRecThread(nullptr), + _hCaptureStartedEvent(nullptr), + _hShutdownCaptureEvent(nullptr), + _hMmTask(nullptr), + _playAudioFrameSize(0), + _playSampleRate(0), + _playBlockSize(0), + _playChannels(2), + _sndCardPlayDelay(0), + _writtenSamples(0), + _readSamples(0), + _recAudioFrameSize(0), + _recSampleRate(0), + _recBlockSize(0), + _recChannels(2), + _initialized(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _speakerIsInitialized(false), + _microphoneIsInitialized(false), + _usingInputDeviceIndex(false), + _usingOutputDeviceIndex(false), + _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice), + _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice), + _inputDeviceIndex(0), + _outputDeviceIndex(0) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; + RTC_DCHECK(_comInit.Succeeded()); + + // Try to load the Avrt DLL + if (!_avrtLibrary) { + // Get handle to the Avrt DLL module. + _avrtLibrary = LoadLibrary(TEXT("Avrt.dll")); + if (_avrtLibrary) { + // Handle is valid (should only happen if OS larger than vista & win7). + // Try to get the function addresses. + RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()" + " The Avrt DLL module is now loaded"; + + _PAvRevertMmThreadCharacteristics = + (PAvRevertMmThreadCharacteristics)GetProcAddress( + _avrtLibrary, "AvRevertMmThreadCharacteristics"); + _PAvSetMmThreadCharacteristicsA = + (PAvSetMmThreadCharacteristicsA)GetProcAddress( + _avrtLibrary, "AvSetMmThreadCharacteristicsA"); + _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress( + _avrtLibrary, "AvSetMmThreadPriority"); + + if (_PAvRevertMmThreadCharacteristics && + _PAvSetMmThreadCharacteristicsA && _PAvSetMmThreadPriority) { + RTC_LOG(LS_VERBOSE) + << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()" + " AvRevertMmThreadCharacteristics() is OK"; + RTC_LOG(LS_VERBOSE) + << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()" + " AvSetMmThreadCharacteristicsA() is OK"; + RTC_LOG(LS_VERBOSE) + << "AudioDeviceWindowsCore::AudioDeviceWindowsCore()" + " AvSetMmThreadPriority() is OK"; + _winSupportAvrt = true; + } + } + } + + // Create our samples ready events - we want auto reset events that start in + // the not-signaled state. The state of an auto-reset event object remains + // signaled until a single waiting thread is released, at which time the + // system automatically sets the state to nonsignaled. If no threads are + // waiting, the event object's state remains signaled. (Except for + // _hShutdownCaptureEvent, which is used to shutdown multiple threads). + _hRenderSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + _hCaptureSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + _hShutdownRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + _hShutdownCaptureEvent = CreateEvent(NULL, TRUE, FALSE, NULL); + _hRenderStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + _hCaptureStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + + _perfCounterFreq.QuadPart = 1; + _perfCounterFactor = 0.0; + + // list of number of channels to use on recording side + _recChannelsPrioList[0] = 2; // stereo is prio 1 + _recChannelsPrioList[1] = 1; // mono is prio 2 + _recChannelsPrioList[2] = 4; // quad is prio 3 + + // list of number of channels to use on playout side + _playChannelsPrioList[0] = 2; // stereo is prio 1 + _playChannelsPrioList[1] = 1; // mono is prio 2 + + HRESULT hr; + + // We know that this API will work since it has already been verified in + // CoreAudioIsSupported, hence no need to check for errors here as well. + + // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll) + // TODO(henrika): we should probably move this allocation to Init() instead + // and deallocate in Terminate() to make the implementation more symmetric. + CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, + __uuidof(IMMDeviceEnumerator), + reinterpret_cast(&_ptrEnumerator)); + RTC_DCHECK(_ptrEnumerator); + + // DMO initialization for built-in WASAPI AEC. + { + IMediaObject* ptrDMO = NULL; + hr = CoCreateInstance(CLSID_CWMAudioAEC, NULL, CLSCTX_INPROC_SERVER, + IID_IMediaObject, reinterpret_cast(&ptrDMO)); + if (FAILED(hr) || ptrDMO == NULL) { + // Since we check that _dmo is non-NULL in EnableBuiltInAEC(), the + // feature is prevented from being enabled. + _builtInAecEnabled = false; + _TraceCOMError(hr); + } + _dmo = ptrDMO; + SAFE_RELEASE(ptrDMO); + } +} + +// ---------------------------------------------------------------------------- +// AudioDeviceWindowsCore() - dtor +// ---------------------------------------------------------------------------- + +AudioDeviceWindowsCore::~AudioDeviceWindowsCore() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; + + Terminate(); + + // The IMMDeviceEnumerator is created during construction. Must release + // it here and not in Terminate() since we don't recreate it in Init(). + SAFE_RELEASE(_ptrEnumerator); + + _ptrAudioBuffer = NULL; + + if (NULL != _hRenderSamplesReadyEvent) { + CloseHandle(_hRenderSamplesReadyEvent); + _hRenderSamplesReadyEvent = NULL; + } + + if (NULL != _hCaptureSamplesReadyEvent) { + CloseHandle(_hCaptureSamplesReadyEvent); + _hCaptureSamplesReadyEvent = NULL; + } + + if (NULL != _hRenderStartedEvent) { + CloseHandle(_hRenderStartedEvent); + _hRenderStartedEvent = NULL; + } + + if (NULL != _hCaptureStartedEvent) { + CloseHandle(_hCaptureStartedEvent); + _hCaptureStartedEvent = NULL; + } + + if (NULL != _hShutdownRenderEvent) { + CloseHandle(_hShutdownRenderEvent); + _hShutdownRenderEvent = NULL; + } + + if (NULL != _hShutdownCaptureEvent) { + CloseHandle(_hShutdownCaptureEvent); + _hShutdownCaptureEvent = NULL; + } + + if (_avrtLibrary) { + BOOL freeOK = FreeLibrary(_avrtLibrary); + if (!freeOK) { + RTC_LOG(LS_WARNING) + << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()" + " failed to free the loaded Avrt DLL module correctly"; + } else { + RTC_LOG(LS_WARNING) << "AudioDeviceWindowsCore::~AudioDeviceWindowsCore()" + " the Avrt DLL module is now unloaded"; + } + } +} + +// ============================================================================ +// API +// ============================================================================ + +// ---------------------------------------------------------------------------- +// AttachAudioBuffer +// ---------------------------------------------------------------------------- + +void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { + _ptrAudioBuffer = audioBuffer; + + // Inform the AudioBuffer about default settings for this implementation. + // Set all values to zero here since the actual settings will be done by + // InitPlayout and InitRecording later. + _ptrAudioBuffer->SetRecordingSampleRate(0); + _ptrAudioBuffer->SetPlayoutSampleRate(0); + _ptrAudioBuffer->SetRecordingChannels(0); + _ptrAudioBuffer->SetPlayoutChannels(0); +} + +// ---------------------------------------------------------------------------- +// ActiveAudioLayer +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const { + audioLayer = AudioDeviceModule::kWindowsCoreAudio; + return 0; +} + +// ---------------------------------------------------------------------------- +// Init +// ---------------------------------------------------------------------------- + +AudioDeviceGeneric::InitStatus AudioDeviceWindowsCore::Init() { + MutexLock lock(&mutex_); + + if (_initialized) { + return InitStatus::OK; + } + + // Enumerate all audio rendering and capturing endpoint devices. + // Note that, some of these will not be able to select by the user. + // The complete collection is for internal use only. + _EnumerateEndpointDevicesAll(eRender); + _EnumerateEndpointDevicesAll(eCapture); + + _initialized = true; + + return InitStatus::OK; +} + +// ---------------------------------------------------------------------------- +// Terminate +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::Terminate() { + MutexLock lock(&mutex_); + + if (!_initialized) { + return 0; + } + + _initialized = false; + _speakerIsInitialized = false; + _microphoneIsInitialized = false; + _playing = false; + _recording = false; + + SAFE_RELEASE(_ptrRenderCollection); + SAFE_RELEASE(_ptrCaptureCollection); + SAFE_RELEASE(_ptrDeviceOut); + SAFE_RELEASE(_ptrDeviceIn); + SAFE_RELEASE(_ptrClientOut); + SAFE_RELEASE(_ptrClientIn); + SAFE_RELEASE(_ptrRenderClient); + SAFE_RELEASE(_ptrCaptureClient); + SAFE_RELEASE(_ptrCaptureVolume); + SAFE_RELEASE(_ptrRenderSimpleVolume); + + return 0; +} + +// ---------------------------------------------------------------------------- +// Initialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::Initialized() const { + return (_initialized); +} + +// ---------------------------------------------------------------------------- +// InitSpeaker +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::InitSpeaker() { + MutexLock lock(&mutex_); + return InitSpeakerLocked(); +} + +int32_t AudioDeviceWindowsCore::InitSpeakerLocked() { + if (_playing) { + return -1; + } + + if (_ptrDeviceOut == NULL) { + return -1; + } + + if (_usingOutputDeviceIndex) { + int16_t nDevices = PlayoutDevicesLocked(); + if (_outputDeviceIndex > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "current device selection is invalid => unable to" + " initialize"; + return -1; + } + } + + int32_t ret(0); + + SAFE_RELEASE(_ptrDeviceOut); + if (_usingOutputDeviceIndex) { + // Refresh the selected rendering endpoint device using current index + ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut); + } else { + ERole role; + (_outputDevice == AudioDeviceModule::kDefaultDevice) + ? role = eConsole + : role = eCommunications; + // Refresh the selected rendering endpoint device using role + ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut); + } + + if (ret != 0 || (_ptrDeviceOut == NULL)) { + RTC_LOG(LS_ERROR) << "failed to initialize the rendering enpoint device"; + SAFE_RELEASE(_ptrDeviceOut); + return -1; + } + + IAudioSessionManager* pManager = NULL; + ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, + NULL, (void**)&pManager); + if (ret != 0 || pManager == NULL) { + RTC_LOG(LS_ERROR) << "failed to initialize the render manager"; + SAFE_RELEASE(pManager); + return -1; + } + + SAFE_RELEASE(_ptrRenderSimpleVolume); + ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume); + if (ret != 0 || _ptrRenderSimpleVolume == NULL) { + RTC_LOG(LS_ERROR) << "failed to initialize the render simple volume"; + SAFE_RELEASE(pManager); + SAFE_RELEASE(_ptrRenderSimpleVolume); + return -1; + } + SAFE_RELEASE(pManager); + + _speakerIsInitialized = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// InitMicrophone +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::InitMicrophone() { + MutexLock lock(&mutex_); + return InitMicrophoneLocked(); +} + +int32_t AudioDeviceWindowsCore::InitMicrophoneLocked() { + if (_recording) { + return -1; + } + + if (_ptrDeviceIn == NULL) { + return -1; + } + + if (_usingInputDeviceIndex) { + int16_t nDevices = RecordingDevicesLocked(); + if (_inputDeviceIndex > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "current device selection is invalid => unable to" + " initialize"; + return -1; + } + } + + int32_t ret(0); + + SAFE_RELEASE(_ptrDeviceIn); + if (_usingInputDeviceIndex) { + // Refresh the selected capture endpoint device using current index + ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn); + } else { + ERole role; + (_inputDevice == AudioDeviceModule::kDefaultDevice) + ? role = eConsole + : role = eCommunications; + // Refresh the selected capture endpoint device using role + ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn); + } + + if (ret != 0 || (_ptrDeviceIn == NULL)) { + RTC_LOG(LS_ERROR) << "failed to initialize the capturing enpoint device"; + SAFE_RELEASE(_ptrDeviceIn); + return -1; + } + + SAFE_RELEASE(_ptrCaptureVolume); + ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&_ptrCaptureVolume)); + if (ret != 0 || _ptrCaptureVolume == NULL) { + RTC_LOG(LS_ERROR) << "failed to initialize the capture volume"; + SAFE_RELEASE(_ptrCaptureVolume); + return -1; + } + + _microphoneIsInitialized = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SpeakerIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::SpeakerIsInitialized() const { + return (_speakerIsInitialized); +} + +// ---------------------------------------------------------------------------- +// MicrophoneIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const { + return (_microphoneIsInitialized); +} + +// ---------------------------------------------------------------------------- +// SpeakerVolumeIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available) { + MutexLock lock(&mutex_); + + if (_ptrDeviceOut == NULL) { + return -1; + } + + HRESULT hr = S_OK; + IAudioSessionManager* pManager = NULL; + ISimpleAudioVolume* pVolume = NULL; + + hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL, + (void**)&pManager); + EXIT_ON_ERROR(hr); + + hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume); + EXIT_ON_ERROR(hr); + + float volume(0.0f); + hr = pVolume->GetMasterVolume(&volume); + if (FAILED(hr)) { + available = false; + } + available = true; + + SAFE_RELEASE(pManager); + SAFE_RELEASE(pVolume); + + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pManager); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// SetSpeakerVolume +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume) { + { + MutexLock lock(&mutex_); + + if (!_speakerIsInitialized) { + return -1; + } + + if (_ptrDeviceOut == NULL) { + return -1; + } + } + + if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME || + volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME) { + return -1; + } + + HRESULT hr = S_OK; + + // scale input volume to valid range (0.0 to 1.0) + const float fLevel = (float)volume / MAX_CORE_SPEAKER_VOLUME; + volume_mutex_.Lock(); + hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel, NULL); + volume_mutex_.Unlock(); + EXIT_ON_ERROR(hr); + + return 0; + +Exit: + _TraceCOMError(hr); + return -1; +} + +// ---------------------------------------------------------------------------- +// SpeakerVolume +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const { + { + MutexLock lock(&mutex_); + + if (!_speakerIsInitialized) { + return -1; + } + + if (_ptrDeviceOut == NULL) { + return -1; + } + } + + HRESULT hr = S_OK; + float fLevel(0.0f); + + volume_mutex_.Lock(); + hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel); + volume_mutex_.Unlock(); + EXIT_ON_ERROR(hr); + + // scale input volume range [0.0,1.0] to valid output range + volume = static_cast(fLevel * MAX_CORE_SPEAKER_VOLUME); + + return 0; + +Exit: + _TraceCOMError(hr); + return -1; +} + +// ---------------------------------------------------------------------------- +// MaxSpeakerVolume +// +// The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates +// silence and 1.0 indicates full volume (no attenuation). +// We add our (webrtc-internal) own max level to match the Wave API and +// how it is used today in VoE. +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const { + if (!_speakerIsInitialized) { + return -1; + } + + maxVolume = static_cast(MAX_CORE_SPEAKER_VOLUME); + + return 0; +} + +// ---------------------------------------------------------------------------- +// MinSpeakerVolume +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const { + if (!_speakerIsInitialized) { + return -1; + } + + minVolume = static_cast(MIN_CORE_SPEAKER_VOLUME); + + return 0; +} + +// ---------------------------------------------------------------------------- +// SpeakerMuteIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available) { + MutexLock lock(&mutex_); + + if (_ptrDeviceOut == NULL) { + return -1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume* pVolume = NULL; + + // Query the speaker system mute state. + hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + EXIT_ON_ERROR(hr); + + BOOL mute; + hr = pVolume->GetMute(&mute); + if (FAILED(hr)) + available = false; + else + available = true; + + SAFE_RELEASE(pVolume); + + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// SetSpeakerMute +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable) { + MutexLock lock(&mutex_); + + if (!_speakerIsInitialized) { + return -1; + } + + if (_ptrDeviceOut == NULL) { + return -1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume* pVolume = NULL; + + // Set the speaker system mute state. + hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + EXIT_ON_ERROR(hr); + + const BOOL mute(enable); + hr = pVolume->SetMute(mute, NULL); + EXIT_ON_ERROR(hr); + + SAFE_RELEASE(pVolume); + + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// SpeakerMute +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const { + if (!_speakerIsInitialized) { + return -1; + } + + if (_ptrDeviceOut == NULL) { + return -1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume* pVolume = NULL; + + // Query the speaker system mute state. + hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + EXIT_ON_ERROR(hr); + + BOOL mute; + hr = pVolume->GetMute(&mute); + EXIT_ON_ERROR(hr); + + enabled = (mute == TRUE) ? true : false; + + SAFE_RELEASE(pVolume); + + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneMuteIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available) { + MutexLock lock(&mutex_); + + if (_ptrDeviceIn == NULL) { + return -1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume* pVolume = NULL; + + // Query the microphone system mute state. + hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + EXIT_ON_ERROR(hr); + + BOOL mute; + hr = pVolume->GetMute(&mute); + if (FAILED(hr)) + available = false; + else + available = true; + + SAFE_RELEASE(pVolume); + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// SetMicrophoneMute +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable) { + if (!_microphoneIsInitialized) { + return -1; + } + + if (_ptrDeviceIn == NULL) { + return -1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume* pVolume = NULL; + + // Set the microphone system mute state. + hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + EXIT_ON_ERROR(hr); + + const BOOL mute(enable); + hr = pVolume->SetMute(mute, NULL); + EXIT_ON_ERROR(hr); + + SAFE_RELEASE(pVolume); + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneMute +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const { + if (!_microphoneIsInitialized) { + return -1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume* pVolume = NULL; + + // Query the microphone system mute state. + hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + EXIT_ON_ERROR(hr); + + BOOL mute; + hr = pVolume->GetMute(&mute); + EXIT_ON_ERROR(hr); + + enabled = (mute == TRUE) ? true : false; + + SAFE_RELEASE(pVolume); + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// StereoRecordingIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available) { + available = true; + return 0; +} + +// ---------------------------------------------------------------------------- +// SetStereoRecording +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable) { + MutexLock lock(&mutex_); + + if (enable) { + _recChannelsPrioList[0] = 2; // try stereo first + _recChannelsPrioList[1] = 1; + _recChannels = 2; + } else { + _recChannelsPrioList[0] = 1; // try mono first + _recChannelsPrioList[1] = 2; + _recChannels = 1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// StereoRecording +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const { + if (_recChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// StereoPlayoutIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available) { + available = true; + return 0; +} + +// ---------------------------------------------------------------------------- +// SetStereoPlayout +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable) { + MutexLock lock(&mutex_); + + if (enable) { + _playChannelsPrioList[0] = 2; // try stereo first + _playChannelsPrioList[1] = 1; + _playChannels = 2; + } else { + _playChannelsPrioList[0] = 1; // try mono first + _playChannelsPrioList[1] = 2; + _playChannels = 1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// StereoPlayout +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const { + if (_playChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// MicrophoneVolumeIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available) { + MutexLock lock(&mutex_); + + if (_ptrDeviceIn == NULL) { + return -1; + } + + HRESULT hr = S_OK; + IAudioEndpointVolume* pVolume = NULL; + + hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + reinterpret_cast(&pVolume)); + EXIT_ON_ERROR(hr); + + float volume(0.0f); + hr = pVolume->GetMasterVolumeLevelScalar(&volume); + if (FAILED(hr)) { + available = false; + } + available = true; + + SAFE_RELEASE(pVolume); + return 0; + +Exit: + _TraceCOMError(hr); + SAFE_RELEASE(pVolume); + return -1; +} + +// ---------------------------------------------------------------------------- +// SetMicrophoneVolume +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume) { + RTC_LOG(LS_VERBOSE) << "AudioDeviceWindowsCore::SetMicrophoneVolume(volume=" + << volume << ")"; + + { + MutexLock lock(&mutex_); + + if (!_microphoneIsInitialized) { + return -1; + } + + if (_ptrDeviceIn == NULL) { + return -1; + } + } + + if (volume < static_cast(MIN_CORE_MICROPHONE_VOLUME) || + volume > static_cast(MAX_CORE_MICROPHONE_VOLUME)) { + return -1; + } + + HRESULT hr = S_OK; + // scale input volume to valid range (0.0 to 1.0) + const float fLevel = static_cast(volume) / MAX_CORE_MICROPHONE_VOLUME; + volume_mutex_.Lock(); + _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL); + volume_mutex_.Unlock(); + EXIT_ON_ERROR(hr); + + return 0; + +Exit: + _TraceCOMError(hr); + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneVolume +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const { + { + MutexLock lock(&mutex_); + + if (!_microphoneIsInitialized) { + return -1; + } + + if (_ptrDeviceIn == NULL) { + return -1; + } + } + + HRESULT hr = S_OK; + float fLevel(0.0f); + volume = 0; + volume_mutex_.Lock(); + hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel); + volume_mutex_.Unlock(); + EXIT_ON_ERROR(hr); + + // scale input volume range [0.0,1.0] to valid output range + volume = static_cast(fLevel * MAX_CORE_MICROPHONE_VOLUME); + + return 0; + +Exit: + _TraceCOMError(hr); + return -1; +} + +// ---------------------------------------------------------------------------- +// MaxMicrophoneVolume +// +// The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates +// silence and 1.0 indicates full volume (no attenuation). +// We add our (webrtc-internal) own max level to match the Wave API and +// how it is used today in VoE. +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + if (!_microphoneIsInitialized) { + return -1; + } + + maxVolume = static_cast(MAX_CORE_MICROPHONE_VOLUME); + + return 0; +} + +// ---------------------------------------------------------------------------- +// MinMicrophoneVolume +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const { + if (!_microphoneIsInitialized) { + return -1; + } + + minVolume = static_cast(MIN_CORE_MICROPHONE_VOLUME); + + return 0; +} + +// ---------------------------------------------------------------------------- +// PlayoutDevices +// ---------------------------------------------------------------------------- +int16_t AudioDeviceWindowsCore::PlayoutDevices() { + MutexLock lock(&mutex_); + return PlayoutDevicesLocked(); +} + +int16_t AudioDeviceWindowsCore::PlayoutDevicesLocked() { + if (_RefreshDeviceList(eRender) != -1) { + return (_DeviceListCount(eRender)); + } + + return -1; +} + +// ---------------------------------------------------------------------------- +// SetPlayoutDevice I (II) +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index) { + if (_playIsInitialized) { + return -1; + } + + // Get current number of available rendering endpoint devices and refresh the + // rendering collection. + UINT nDevices = PlayoutDevices(); + + if (index < 0 || index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + MutexLock lock(&mutex_); + + HRESULT hr(S_OK); + + RTC_DCHECK(_ptrRenderCollection); + + // Select an endpoint rendering device given the specified index + SAFE_RELEASE(_ptrDeviceOut); + hr = _ptrRenderCollection->Item(index, &_ptrDeviceOut); + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(_ptrDeviceOut); + return -1; + } + + WCHAR szDeviceName[MAX_PATH]; + const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0]; + + // Get the endpoint device's friendly-name + if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) { + RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\""; + } + + _usingOutputDeviceIndex = true; + _outputDeviceIndex = index; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetPlayoutDevice II (II) +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) { + if (_playIsInitialized) { + return -1; + } + + ERole role(eCommunications); + + if (device == AudioDeviceModule::kDefaultDevice) { + role = eConsole; + } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) { + role = eCommunications; + } + + MutexLock lock(&mutex_); + + // Refresh the list of rendering endpoint devices + _RefreshDeviceList(eRender); + + HRESULT hr(S_OK); + + RTC_DCHECK(_ptrEnumerator); + + // Select an endpoint rendering device given the specified role + SAFE_RELEASE(_ptrDeviceOut); + hr = _ptrEnumerator->GetDefaultAudioEndpoint(eRender, role, &_ptrDeviceOut); + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(_ptrDeviceOut); + return -1; + } + + WCHAR szDeviceName[MAX_PATH]; + const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0]; + + // Get the endpoint device's friendly-name + if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0) { + RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\""; + } + + _usingOutputDeviceIndex = false; + _outputDevice = device; + + return 0; +} + +// ---------------------------------------------------------------------------- +// PlayoutDeviceName +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::PlayoutDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + bool defaultCommunicationDevice(false); + const int16_t nDevices(PlayoutDevices()); // also updates the list of devices + + // Special fix for the case when the user selects '-1' as index (<=> Default + // Communication Device) + if (index == (uint16_t)(-1)) { + defaultCommunicationDevice = true; + index = 0; + RTC_LOG(LS_VERBOSE) << "Default Communication endpoint device will be used"; + } + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + MutexLock lock(&mutex_); + + int32_t ret(-1); + WCHAR szDeviceName[MAX_PATH]; + const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0]; + + // Get the endpoint device's friendly-name + if (defaultCommunicationDevice) { + ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName, + bufferLen); + } else { + ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen); + } + + if (ret == 0) { + // Convert the endpoint device's friendly-name to UTF-8 + if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, + kAdmMaxDeviceNameSize, NULL, NULL) == 0) { + RTC_LOG(LS_ERROR) + << "WideCharToMultiByte(CP_UTF8) failed with error code " + << GetLastError(); + } + } + + // Get the endpoint ID string (uniquely identifies the device among all audio + // endpoint devices) + if (defaultCommunicationDevice) { + ret = + _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen); + } else { + ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen); + } + + if (guid != NULL && ret == 0) { + // Convert the endpoint device's ID string to UTF-8 + if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, + NULL, NULL) == 0) { + RTC_LOG(LS_ERROR) + << "WideCharToMultiByte(CP_UTF8) failed with error code " + << GetLastError(); + } + } + + return ret; +} + +// ---------------------------------------------------------------------------- +// RecordingDeviceName +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::RecordingDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + bool defaultCommunicationDevice(false); + const int16_t nDevices( + RecordingDevices()); // also updates the list of devices + + // Special fix for the case when the user selects '-1' as index (<=> Default + // Communication Device) + if (index == (uint16_t)(-1)) { + defaultCommunicationDevice = true; + index = 0; + RTC_LOG(LS_VERBOSE) << "Default Communication endpoint device will be used"; + } + + if ((index > (nDevices - 1)) || (name == NULL)) { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) { + memset(guid, 0, kAdmMaxGuidSize); + } + + MutexLock lock(&mutex_); + + int32_t ret(-1); + WCHAR szDeviceName[MAX_PATH]; + const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0]; + + // Get the endpoint device's friendly-name + if (defaultCommunicationDevice) { + ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName, + bufferLen); + } else { + ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen); + } + + if (ret == 0) { + // Convert the endpoint device's friendly-name to UTF-8 + if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, + kAdmMaxDeviceNameSize, NULL, NULL) == 0) { + RTC_LOG(LS_ERROR) + << "WideCharToMultiByte(CP_UTF8) failed with error code " + << GetLastError(); + } + } + + // Get the endpoint ID string (uniquely identifies the device among all audio + // endpoint devices) + if (defaultCommunicationDevice) { + ret = + _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen); + } else { + ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen); + } + + if (guid != NULL && ret == 0) { + // Convert the endpoint device's ID string to UTF-8 + if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, + NULL, NULL) == 0) { + RTC_LOG(LS_ERROR) + << "WideCharToMultiByte(CP_UTF8) failed with error code " + << GetLastError(); + } + } + + return ret; +} + +// ---------------------------------------------------------------------------- +// RecordingDevices +// ---------------------------------------------------------------------------- + +int16_t AudioDeviceWindowsCore::RecordingDevices() { + MutexLock lock(&mutex_); + return RecordingDevicesLocked(); +} + +int16_t AudioDeviceWindowsCore::RecordingDevicesLocked() { + if (_RefreshDeviceList(eCapture) != -1) { + return (_DeviceListCount(eCapture)); + } + + return -1; +} + +// ---------------------------------------------------------------------------- +// SetRecordingDevice I (II) +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index) { + if (_recIsInitialized) { + return -1; + } + + // Get current number of available capture endpoint devices and refresh the + // capture collection. + UINT nDevices = RecordingDevices(); + + if (index < 0 || index > (nDevices - 1)) { + RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) + << "]"; + return -1; + } + + MutexLock lock(&mutex_); + + HRESULT hr(S_OK); + + RTC_DCHECK(_ptrCaptureCollection); + + // Select an endpoint capture device given the specified index + SAFE_RELEASE(_ptrDeviceIn); + hr = _ptrCaptureCollection->Item(index, &_ptrDeviceIn); + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(_ptrDeviceIn); + return -1; + } + + WCHAR szDeviceName[MAX_PATH]; + const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0]; + + // Get the endpoint device's friendly-name + if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) { + RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\""; + } + + _usingInputDeviceIndex = true; + _inputDeviceIndex = index; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetRecordingDevice II (II) +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) { + if (_recIsInitialized) { + return -1; + } + + ERole role(eCommunications); + + if (device == AudioDeviceModule::kDefaultDevice) { + role = eConsole; + } else if (device == AudioDeviceModule::kDefaultCommunicationDevice) { + role = eCommunications; + } + + MutexLock lock(&mutex_); + + // Refresh the list of capture endpoint devices + _RefreshDeviceList(eCapture); + + HRESULT hr(S_OK); + + RTC_DCHECK(_ptrEnumerator); + + // Select an endpoint capture device given the specified role + SAFE_RELEASE(_ptrDeviceIn); + hr = _ptrEnumerator->GetDefaultAudioEndpoint(eCapture, role, &_ptrDeviceIn); + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(_ptrDeviceIn); + return -1; + } + + WCHAR szDeviceName[MAX_PATH]; + const int bufferLen = sizeof(szDeviceName) / sizeof(szDeviceName)[0]; + + // Get the endpoint device's friendly-name + if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0) { + RTC_LOG(LS_VERBOSE) << "friendly name: \"" << szDeviceName << "\""; + } + + _usingInputDeviceIndex = false; + _inputDevice = device; + + return 0; +} + +// ---------------------------------------------------------------------------- +// PlayoutIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available) { + available = false; + + // Try to initialize the playout side + int32_t res = InitPlayout(); + + // Cancel effect of initialization + StopPlayout(); + + if (res != -1) { + available = true; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// RecordingIsAvailable +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available) { + available = false; + + // Try to initialize the recording side + int32_t res = InitRecording(); + + // Cancel effect of initialization + StopRecording(); + + if (res != -1) { + available = true; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// InitPlayout +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::InitPlayout() { + MutexLock lock(&mutex_); + + if (_playing) { + return -1; + } + + if (_playIsInitialized) { + return 0; + } + + if (_ptrDeviceOut == NULL) { + return -1; + } + + // Initialize the speaker (devices might have been added or removed) + if (InitSpeakerLocked() == -1) { + RTC_LOG(LS_WARNING) << "InitSpeaker() failed"; + } + + // Ensure that the updated rendering endpoint device is valid + if (_ptrDeviceOut == NULL) { + return -1; + } + + if (_builtInAecEnabled && _recIsInitialized) { + // Ensure the correct render device is configured in case + // InitRecording() was called before InitPlayout(). + if (SetDMOProperties() == -1) { + return -1; + } + } + + HRESULT hr = S_OK; + WAVEFORMATEX* pWfxOut = NULL; + WAVEFORMATEX Wfx = WAVEFORMATEX(); + WAVEFORMATEX* pWfxClosestMatch = NULL; + + // Create COM object with IAudioClient interface. + SAFE_RELEASE(_ptrClientOut); + hr = _ptrDeviceOut->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, + (void**)&_ptrClientOut); + EXIT_ON_ERROR(hr); + + // Retrieve the stream format that the audio engine uses for its internal + // processing (mixing) of shared-mode streams. + hr = _ptrClientOut->GetMixFormat(&pWfxOut); + if (SUCCEEDED(hr)) { + RTC_LOG(LS_VERBOSE) << "Audio Engine's current rendering mix format:"; + // format type + RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x" + << rtc::ToHex(pWfxOut->wFormatTag) << " (" + << pWfxOut->wFormatTag << ")"; + // number of channels (i.e. mono, stereo...) + RTC_LOG(LS_VERBOSE) << "nChannels : " << pWfxOut->nChannels; + // sample rate + RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxOut->nSamplesPerSec; + // for buffer estimation + RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxOut->nAvgBytesPerSec; + // block size of data + RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << pWfxOut->nBlockAlign; + // number of bits per sample of mono data + RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxOut->wBitsPerSample; + RTC_LOG(LS_VERBOSE) << "cbSize : " << pWfxOut->cbSize; + } + + // Set wave format + Wfx.wFormatTag = WAVE_FORMAT_PCM; + Wfx.wBitsPerSample = 16; + Wfx.cbSize = 0; + + const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000}; + hr = S_FALSE; + + // Iterate over frequencies and channels, in order of priority + for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) { + for (unsigned int chan = 0; chan < sizeof(_playChannelsPrioList) / + sizeof(_playChannelsPrioList[0]); + chan++) { + Wfx.nChannels = _playChannelsPrioList[chan]; + Wfx.nSamplesPerSec = freqs[freq]; + Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8; + Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign; + // If the method succeeds and the audio endpoint device supports the + // specified stream format, it returns S_OK. If the method succeeds and + // provides a closest match to the specified format, it returns S_FALSE. + hr = _ptrClientOut->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &Wfx, + &pWfxClosestMatch); + if (hr == S_OK) { + break; + } else { + if (pWfxClosestMatch) { + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels + << ", nSamplesPerSec=" << Wfx.nSamplesPerSec + << " is not supported. Closest match: " + "nChannels=" + << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" + << pWfxClosestMatch->nSamplesPerSec; + CoTaskMemFree(pWfxClosestMatch); + pWfxClosestMatch = NULL; + } else { + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels + << ", nSamplesPerSec=" << Wfx.nSamplesPerSec + << " is not supported. No closest match."; + } + } + } + if (hr == S_OK) + break; + } + + // TODO(andrew): what happens in the event of failure in the above loop? + // Is _ptrClientOut->Initialize expected to fail? + // Same in InitRecording(). + if (hr == S_OK) { + _playAudioFrameSize = Wfx.nBlockAlign; + // Block size is the number of samples each channel in 10ms. + _playBlockSize = Wfx.nSamplesPerSec / 100; + _playSampleRate = Wfx.nSamplesPerSec; + _devicePlaySampleRate = + Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz. + _devicePlayBlockSize = Wfx.nSamplesPerSec / 100; + _playChannels = Wfx.nChannels; + + RTC_LOG(LS_VERBOSE) << "VoE selected this rendering format:"; + RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x" + << rtc::ToHex(Wfx.wFormatTag) << " (" << Wfx.wFormatTag + << ")"; + RTC_LOG(LS_VERBOSE) << "nChannels : " << Wfx.nChannels; + RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << Wfx.nSamplesPerSec; + RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec : " << Wfx.nAvgBytesPerSec; + RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << Wfx.nBlockAlign; + RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << Wfx.wBitsPerSample; + RTC_LOG(LS_VERBOSE) << "cbSize : " << Wfx.cbSize; + RTC_LOG(LS_VERBOSE) << "Additional settings:"; + RTC_LOG(LS_VERBOSE) << "_playAudioFrameSize: " << _playAudioFrameSize; + RTC_LOG(LS_VERBOSE) << "_playBlockSize : " << _playBlockSize; + RTC_LOG(LS_VERBOSE) << "_playChannels : " << _playChannels; + } + + // Create a rendering stream. + // + // **************************************************************************** + // For a shared-mode stream that uses event-driven buffering, the caller must + // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method + // determines how large a buffer to allocate based on the scheduling period + // of the audio engine. Although the client's buffer processing thread is + // event driven, the basic buffer management process, as described previously, + // is unaltered. + // Each time the thread awakens, it should call + // IAudioClient::GetCurrentPadding to determine how much data to write to a + // rendering buffer or read from a capture buffer. In contrast to the two + // buffers that the Initialize method allocates for an exclusive-mode stream + // that uses event-driven buffering, a shared-mode stream requires a single + // buffer. + // **************************************************************************** + // + REFERENCE_TIME hnsBufferDuration = + 0; // ask for minimum buffer size (default) + if (_devicePlaySampleRate == 44100) { + // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate. + // There seems to be a larger risk of underruns for 44.1 compared + // with the default rate (48kHz). When using default, we set the requested + // buffer duration to 0, which sets the buffer to the minimum size + // required by the engine thread. The actual buffer size can then be + // read by GetBufferSize() and it is 20ms on most machines. + hnsBufferDuration = 30 * 10000; + } + hr = _ptrClientOut->Initialize( + AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications + AUDCLNT_STREAMFLAGS_EVENTCALLBACK, // processing of the audio buffer by + // the client will be event driven + hnsBufferDuration, // requested buffer capacity as a time value (in + // 100-nanosecond units) + 0, // periodicity + &Wfx, // selected wave format + NULL); // session GUID + + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "IAudioClient::Initialize() failed:"; + } + EXIT_ON_ERROR(hr); + + if (_ptrAudioBuffer) { + // Update the audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate); + _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); + } else { + // We can enter this state during CoreAudioIsSupported() when no + // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer + // does not exist. It is OK to end up here since we don't initiate any media + // in CoreAudioIsSupported(). + RTC_LOG(LS_VERBOSE) + << "AudioDeviceBuffer must be attached before streaming can start"; + } + + // Get the actual size of the shared (endpoint buffer). + // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. + UINT bufferFrameCount(0); + hr = _ptrClientOut->GetBufferSize(&bufferFrameCount); + if (SUCCEEDED(hr)) { + RTC_LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => " + << bufferFrameCount << " (<=> " + << bufferFrameCount * _playAudioFrameSize << " bytes)"; + } + + // Set the event handle that the system signals when an audio buffer is ready + // to be processed by the client. + hr = _ptrClientOut->SetEventHandle(_hRenderSamplesReadyEvent); + EXIT_ON_ERROR(hr); + + // Get an IAudioRenderClient interface. + SAFE_RELEASE(_ptrRenderClient); + hr = _ptrClientOut->GetService(__uuidof(IAudioRenderClient), + (void**)&_ptrRenderClient); + EXIT_ON_ERROR(hr); + + // Mark playout side as initialized + _playIsInitialized = true; + + CoTaskMemFree(pWfxOut); + CoTaskMemFree(pWfxClosestMatch); + + RTC_LOG(LS_VERBOSE) << "render side is now initialized"; + return 0; + +Exit: + _TraceCOMError(hr); + CoTaskMemFree(pWfxOut); + CoTaskMemFree(pWfxClosestMatch); + SAFE_RELEASE(_ptrClientOut); + SAFE_RELEASE(_ptrRenderClient); + return -1; +} + +// Capture initialization when the built-in AEC DirectX Media Object (DMO) is +// used. Called from InitRecording(), most of which is skipped over. The DMO +// handles device initialization itself. +// Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx +int32_t AudioDeviceWindowsCore::InitRecordingDMO() { + RTC_DCHECK(_builtInAecEnabled); + RTC_DCHECK(_dmo); + + if (SetDMOProperties() == -1) { + return -1; + } + + DMO_MEDIA_TYPE mt = {}; + HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX)); + if (FAILED(hr)) { + MoFreeMediaType(&mt); + _TraceCOMError(hr); + return -1; + } + mt.majortype = MEDIATYPE_Audio; + mt.subtype = MEDIASUBTYPE_PCM; + mt.formattype = FORMAT_WaveFormatEx; + + // Supported formats + // nChannels: 1 (in AEC-only mode) + // nSamplesPerSec: 8000, 11025, 16000, 22050 + // wBitsPerSample: 16 + WAVEFORMATEX* ptrWav = reinterpret_cast(mt.pbFormat); + ptrWav->wFormatTag = WAVE_FORMAT_PCM; + ptrWav->nChannels = 1; + // 16000 is the highest we can support with our resampler. + ptrWav->nSamplesPerSec = 16000; + ptrWav->nAvgBytesPerSec = 32000; + ptrWav->nBlockAlign = 2; + ptrWav->wBitsPerSample = 16; + ptrWav->cbSize = 0; + + // Set the VoE format equal to the AEC output format. + _recAudioFrameSize = ptrWav->nBlockAlign; + _recSampleRate = ptrWav->nSamplesPerSec; + _recBlockSize = ptrWav->nSamplesPerSec / 100; + _recChannels = ptrWav->nChannels; + + // Set the DMO output format parameters. + hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0); + MoFreeMediaType(&mt); + if (FAILED(hr)) { + _TraceCOMError(hr); + return -1; + } + + if (_ptrAudioBuffer) { + _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate); + _ptrAudioBuffer->SetRecordingChannels(_recChannels); + } else { + // Refer to InitRecording() for comments. + RTC_LOG(LS_VERBOSE) + << "AudioDeviceBuffer must be attached before streaming can start"; + } + + _mediaBuffer = rtc::make_ref_counted(_recBlockSize * + _recAudioFrameSize); + + // Optional, but if called, must be after media types are set. + hr = _dmo->AllocateStreamingResources(); + if (FAILED(hr)) { + _TraceCOMError(hr); + return -1; + } + + _recIsInitialized = true; + RTC_LOG(LS_VERBOSE) << "Capture side is now initialized"; + + return 0; +} + +// ---------------------------------------------------------------------------- +// InitRecording +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::InitRecording() { + MutexLock lock(&mutex_); + + if (_recording) { + return -1; + } + + if (_recIsInitialized) { + return 0; + } + + if (QueryPerformanceFrequency(&_perfCounterFreq) == 0) { + return -1; + } + _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart; + + if (_ptrDeviceIn == NULL) { + return -1; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophoneLocked() == -1) { + RTC_LOG(LS_WARNING) << "InitMicrophone() failed"; + } + + // Ensure that the updated capturing endpoint device is valid + if (_ptrDeviceIn == NULL) { + return -1; + } + + if (_builtInAecEnabled) { + // The DMO will configure the capture device. + return InitRecordingDMO(); + } + + HRESULT hr = S_OK; + WAVEFORMATEX* pWfxIn = NULL; + WAVEFORMATEXTENSIBLE Wfx = WAVEFORMATEXTENSIBLE(); + WAVEFORMATEX* pWfxClosestMatch = NULL; + + // Create COM object with IAudioClient interface. + SAFE_RELEASE(_ptrClientIn); + hr = _ptrDeviceIn->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, + (void**)&_ptrClientIn); + EXIT_ON_ERROR(hr); + + // Retrieve the stream format that the audio engine uses for its internal + // processing (mixing) of shared-mode streams. + hr = _ptrClientIn->GetMixFormat(&pWfxIn); + if (SUCCEEDED(hr)) { + RTC_LOG(LS_VERBOSE) << "Audio Engine's current capturing mix format:"; + // format type + RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x" + << rtc::ToHex(pWfxIn->wFormatTag) << " (" + << pWfxIn->wFormatTag << ")"; + // number of channels (i.e. mono, stereo...) + RTC_LOG(LS_VERBOSE) << "nChannels : " << pWfxIn->nChannels; + // sample rate + RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << pWfxIn->nSamplesPerSec; + // for buffer estimation + RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec: " << pWfxIn->nAvgBytesPerSec; + // block size of data + RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << pWfxIn->nBlockAlign; + // number of bits per sample of mono data + RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << pWfxIn->wBitsPerSample; + RTC_LOG(LS_VERBOSE) << "cbSize : " << pWfxIn->cbSize; + } + + // Set wave format + Wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + Wfx.Format.wBitsPerSample = 16; + Wfx.Format.cbSize = 22; + Wfx.dwChannelMask = 0; + Wfx.Samples.wValidBitsPerSample = Wfx.Format.wBitsPerSample; + Wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; + + const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000}; + hr = S_FALSE; + + // Iterate over frequencies and channels, in order of priority + for (unsigned int freq = 0; freq < sizeof(freqs) / sizeof(freqs[0]); freq++) { + for (unsigned int chan = 0; + chan < sizeof(_recChannelsPrioList) / sizeof(_recChannelsPrioList[0]); + chan++) { + Wfx.Format.nChannels = _recChannelsPrioList[chan]; + Wfx.Format.nSamplesPerSec = freqs[freq]; + Wfx.Format.nBlockAlign = + Wfx.Format.nChannels * Wfx.Format.wBitsPerSample / 8; + Wfx.Format.nAvgBytesPerSec = + Wfx.Format.nSamplesPerSec * Wfx.Format.nBlockAlign; + // If the method succeeds and the audio endpoint device supports the + // specified stream format, it returns S_OK. If the method succeeds and + // provides a closest match to the specified format, it returns S_FALSE. + hr = _ptrClientIn->IsFormatSupported( + AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*)&Wfx, &pWfxClosestMatch); + if (hr == S_OK) { + break; + } else { + if (pWfxClosestMatch) { + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels + << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec + << " is not supported. Closest match: " + "nChannels=" + << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" + << pWfxClosestMatch->nSamplesPerSec; + CoTaskMemFree(pWfxClosestMatch); + pWfxClosestMatch = NULL; + } else { + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels + << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec + << " is not supported. No closest match."; + } + } + } + if (hr == S_OK) + break; + } + + if (hr == S_OK) { + _recAudioFrameSize = Wfx.Format.nBlockAlign; + _recSampleRate = Wfx.Format.nSamplesPerSec; + _recBlockSize = Wfx.Format.nSamplesPerSec / 100; + _recChannels = Wfx.Format.nChannels; + + RTC_LOG(LS_VERBOSE) << "VoE selected this capturing format:"; + RTC_LOG(LS_VERBOSE) << "wFormatTag : 0x" + << rtc::ToHex(Wfx.Format.wFormatTag) << " (" + << Wfx.Format.wFormatTag << ")"; + RTC_LOG(LS_VERBOSE) << "nChannels : " << Wfx.Format.nChannels; + RTC_LOG(LS_VERBOSE) << "nSamplesPerSec : " << Wfx.Format.nSamplesPerSec; + RTC_LOG(LS_VERBOSE) << "nAvgBytesPerSec : " << Wfx.Format.nAvgBytesPerSec; + RTC_LOG(LS_VERBOSE) << "nBlockAlign : " << Wfx.Format.nBlockAlign; + RTC_LOG(LS_VERBOSE) << "wBitsPerSample : " << Wfx.Format.wBitsPerSample; + RTC_LOG(LS_VERBOSE) << "cbSize : " << Wfx.Format.cbSize; + RTC_LOG(LS_VERBOSE) << "Additional settings:"; + RTC_LOG(LS_VERBOSE) << "_recAudioFrameSize: " << _recAudioFrameSize; + RTC_LOG(LS_VERBOSE) << "_recBlockSize : " << _recBlockSize; + RTC_LOG(LS_VERBOSE) << "_recChannels : " << _recChannels; + } + + // Create a capturing stream. + hr = _ptrClientIn->Initialize( + AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications + AUDCLNT_STREAMFLAGS_EVENTCALLBACK | // processing of the audio buffer by + // the client will be event driven + AUDCLNT_STREAMFLAGS_NOPERSIST, // volume and mute settings for an + // audio session will not persist + // across system restarts + 0, // required for event-driven shared mode + 0, // periodicity + (WAVEFORMATEX*)&Wfx, // selected wave format + NULL); // session GUID + + if (hr != S_OK) { + RTC_LOG(LS_ERROR) << "IAudioClient::Initialize() failed:"; + } + EXIT_ON_ERROR(hr); + + if (_ptrAudioBuffer) { + // Update the audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate); + _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); + } else { + // We can enter this state during CoreAudioIsSupported() when no + // AudioDeviceImplementation has been created, hence the AudioDeviceBuffer + // does not exist. It is OK to end up here since we don't initiate any media + // in CoreAudioIsSupported(). + RTC_LOG(LS_VERBOSE) + << "AudioDeviceBuffer must be attached before streaming can start"; + } + + // Get the actual size of the shared (endpoint buffer). + // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. + UINT bufferFrameCount(0); + hr = _ptrClientIn->GetBufferSize(&bufferFrameCount); + if (SUCCEEDED(hr)) { + RTC_LOG(LS_VERBOSE) << "IAudioClient::GetBufferSize() => " + << bufferFrameCount << " (<=> " + << bufferFrameCount * _recAudioFrameSize << " bytes)"; + } + + // Set the event handle that the system signals when an audio buffer is ready + // to be processed by the client. + hr = _ptrClientIn->SetEventHandle(_hCaptureSamplesReadyEvent); + EXIT_ON_ERROR(hr); + + // Get an IAudioCaptureClient interface. + SAFE_RELEASE(_ptrCaptureClient); + hr = _ptrClientIn->GetService(__uuidof(IAudioCaptureClient), + (void**)&_ptrCaptureClient); + EXIT_ON_ERROR(hr); + + // Mark capture side as initialized + _recIsInitialized = true; + + CoTaskMemFree(pWfxIn); + CoTaskMemFree(pWfxClosestMatch); + + RTC_LOG(LS_VERBOSE) << "capture side is now initialized"; + return 0; + +Exit: + _TraceCOMError(hr); + CoTaskMemFree(pWfxIn); + CoTaskMemFree(pWfxClosestMatch); + SAFE_RELEASE(_ptrClientIn); + SAFE_RELEASE(_ptrCaptureClient); + return -1; +} + +// ---------------------------------------------------------------------------- +// StartRecording +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StartRecording() { + if (!_recIsInitialized) { + return -1; + } + + if (_hRecThread != NULL) { + return 0; + } + + if (_recording) { + return 0; + } + + { + MutexLock lockScoped(&mutex_); + + // Create thread which will drive the capturing + LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread; + if (_builtInAecEnabled) { + // Redirect to the DMO polling method. + lpStartAddress = WSAPICaptureThreadPollDMO; + + if (!_playing) { + // The DMO won't provide us captured output data unless we + // give it render data to process. + RTC_LOG(LS_ERROR) + << "Playout must be started before recording when using" + " the built-in AEC"; + return -1; + } + } + + RTC_DCHECK(_hRecThread == NULL); + _hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL); + if (_hRecThread == NULL) { + RTC_LOG(LS_ERROR) << "failed to create the recording thread"; + return -1; + } + + // Set thread priority to highest possible + SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL); + } // critScoped + + DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000); + if (ret != WAIT_OBJECT_0) { + RTC_LOG(LS_VERBOSE) << "capturing did not start up properly"; + return -1; + } + RTC_LOG(LS_VERBOSE) << "capture audio stream has now started..."; + + _recording = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// StopRecording +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StopRecording() { + int32_t err = 0; + + if (!_recIsInitialized) { + return 0; + } + + _Lock(); + + if (_hRecThread == NULL) { + RTC_LOG(LS_VERBOSE) + << "no capturing stream is active => close down WASAPI only"; + SAFE_RELEASE(_ptrClientIn); + SAFE_RELEASE(_ptrCaptureClient); + _recIsInitialized = false; + _recording = false; + _UnLock(); + return 0; + } + + // Stop the driving thread... + RTC_LOG(LS_VERBOSE) << "closing down the webrtc_core_audio_capture_thread..."; + // Manual-reset event; it will remain signalled to stop all capture threads. + SetEvent(_hShutdownCaptureEvent); + + _UnLock(); + DWORD ret = WaitForSingleObject(_hRecThread, 2000); + if (ret != WAIT_OBJECT_0) { + RTC_LOG(LS_ERROR) + << "failed to close down webrtc_core_audio_capture_thread"; + err = -1; + } else { + RTC_LOG(LS_VERBOSE) << "webrtc_core_audio_capture_thread is now closed"; + } + _Lock(); + + ResetEvent(_hShutdownCaptureEvent); // Must be manually reset. + // Ensure that the thread has released these interfaces properly. + RTC_DCHECK(err == -1 || _ptrClientIn == NULL); + RTC_DCHECK(err == -1 || _ptrCaptureClient == NULL); + + _recIsInitialized = false; + _recording = false; + + // These will create thread leaks in the result of an error, + // but we can at least resume the call. + CloseHandle(_hRecThread); + _hRecThread = NULL; + + if (_builtInAecEnabled) { + RTC_DCHECK(_dmo); + // This is necessary. Otherwise the DMO can generate garbage render + // audio even after rendering has stopped. + HRESULT hr = _dmo->FreeStreamingResources(); + if (FAILED(hr)) { + _TraceCOMError(hr); + err = -1; + } + } + + _UnLock(); + + return err; +} + +// ---------------------------------------------------------------------------- +// RecordingIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::RecordingIsInitialized() const { + return (_recIsInitialized); +} + +// ---------------------------------------------------------------------------- +// Recording +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::Recording() const { + return (_recording); +} + +// ---------------------------------------------------------------------------- +// PlayoutIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::PlayoutIsInitialized() const { + return (_playIsInitialized); +} + +// ---------------------------------------------------------------------------- +// StartPlayout +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StartPlayout() { + if (!_playIsInitialized) { + return -1; + } + + if (_hPlayThread != NULL) { + return 0; + } + + if (_playing) { + return 0; + } + + { + MutexLock lockScoped(&mutex_); + + // Create thread which will drive the rendering. + RTC_DCHECK(_hPlayThread == NULL); + _hPlayThread = CreateThread(NULL, 0, WSAPIRenderThread, this, 0, NULL); + if (_hPlayThread == NULL) { + RTC_LOG(LS_ERROR) << "failed to create the playout thread"; + return -1; + } + + // Set thread priority to highest possible. + SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL); + } // critScoped + + DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000); + if (ret != WAIT_OBJECT_0) { + RTC_LOG(LS_VERBOSE) << "rendering did not start up properly"; + return -1; + } + + _playing = true; + RTC_LOG(LS_VERBOSE) << "rendering audio stream has now started..."; + + return 0; +} + +// ---------------------------------------------------------------------------- +// StopPlayout +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::StopPlayout() { + if (!_playIsInitialized) { + return 0; + } + + { + MutexLock lockScoped(&mutex_); + + if (_hPlayThread == NULL) { + RTC_LOG(LS_VERBOSE) + << "no rendering stream is active => close down WASAPI only"; + SAFE_RELEASE(_ptrClientOut); + SAFE_RELEASE(_ptrRenderClient); + _playIsInitialized = false; + _playing = false; + return 0; + } + + // stop the driving thread... + RTC_LOG(LS_VERBOSE) + << "closing down the webrtc_core_audio_render_thread..."; + SetEvent(_hShutdownRenderEvent); + } // critScoped + + DWORD ret = WaitForSingleObject(_hPlayThread, 2000); + if (ret != WAIT_OBJECT_0) { + // the thread did not stop as it should + RTC_LOG(LS_ERROR) << "failed to close down webrtc_core_audio_render_thread"; + CloseHandle(_hPlayThread); + _hPlayThread = NULL; + _playIsInitialized = false; + _playing = false; + return -1; + } + + { + MutexLock lockScoped(&mutex_); + RTC_LOG(LS_VERBOSE) << "webrtc_core_audio_render_thread is now closed"; + + // to reset this event manually at each time we finish with it, + // in case that the render thread has exited before StopPlayout(), + // this event might be caught by the new render thread within same VoE + // instance. + ResetEvent(_hShutdownRenderEvent); + + SAFE_RELEASE(_ptrClientOut); + SAFE_RELEASE(_ptrRenderClient); + + _playIsInitialized = false; + _playing = false; + + CloseHandle(_hPlayThread); + _hPlayThread = NULL; + + if (_builtInAecEnabled && _recording) { + // The DMO won't provide us captured output data unless we + // give it render data to process. + // + // We still permit the playout to shutdown, and trace a warning. + // Otherwise, VoE can get into a state which will never permit + // playout to stop properly. + RTC_LOG(LS_WARNING) + << "Recording should be stopped before playout when using the" + " built-in AEC"; + } + + // Reset the playout delay value. + _sndCardPlayDelay = 0; + } // critScoped + + return 0; +} + +// ---------------------------------------------------------------------------- +// PlayoutDelay +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const { + MutexLock lockScoped(&mutex_); + delayMS = static_cast(_sndCardPlayDelay); + return 0; +} + +bool AudioDeviceWindowsCore::BuiltInAECIsAvailable() const { + return _dmo != nullptr; +} + +// ---------------------------------------------------------------------------- +// Playing +// ---------------------------------------------------------------------------- + +bool AudioDeviceWindowsCore::Playing() const { + return (_playing); +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +// ---------------------------------------------------------------------------- +// [static] WSAPIRenderThread +// ---------------------------------------------------------------------------- + +DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context) { + return reinterpret_cast(context)->DoRenderThread(); +} + +// ---------------------------------------------------------------------------- +// [static] WSAPICaptureThread +// ---------------------------------------------------------------------------- + +DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context) { + return reinterpret_cast(context)->DoCaptureThread(); +} + +DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context) { + return reinterpret_cast(context) + ->DoCaptureThreadPollDMO(); +} + +// ---------------------------------------------------------------------------- +// DoRenderThread +// ---------------------------------------------------------------------------- + +DWORD AudioDeviceWindowsCore::DoRenderThread() { + bool keepPlaying = true; + HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent}; + HRESULT hr = S_OK; + HANDLE hMmTask = NULL; + + // Initialize COM as MTA in this thread. + ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA); + if (!comInit.Succeeded()) { + RTC_LOG(LS_ERROR) << "failed to initialize COM in render thread"; + return 1; + } + + rtc::SetCurrentThreadName("webrtc_core_audio_render_thread"); + + // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread + // priority. + // + if (_winSupportAvrt) { + DWORD taskIndex(0); + hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex); + if (hMmTask) { + if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL)) { + RTC_LOG(LS_WARNING) << "failed to boost play-thread using MMCSS"; + } + RTC_LOG(LS_VERBOSE) + << "render thread is now registered with MMCSS (taskIndex=" + << taskIndex << ")"; + } else { + RTC_LOG(LS_WARNING) << "failed to enable MMCSS on render thread (err=" + << GetLastError() << ")"; + _TraceCOMError(GetLastError()); + } + } + + _Lock(); + + IAudioClock* clock = NULL; + + // Get size of rendering buffer (length is expressed as the number of audio + // frames the buffer can hold). This value is fixed during the rendering + // session. + // + UINT32 bufferLength = 0; + hr = _ptrClientOut->GetBufferSize(&bufferLength); + EXIT_ON_ERROR(hr); + RTC_LOG(LS_VERBOSE) << "[REND] size of buffer : " << bufferLength; + + // Get maximum latency for the current stream (will not change for the + // lifetime of the IAudioClient object). + // + REFERENCE_TIME latency; + _ptrClientOut->GetStreamLatency(&latency); + RTC_LOG(LS_VERBOSE) << "[REND] max stream latency : " << (DWORD)latency + << " (" << (double)(latency / 10000.0) << " ms)"; + + // Get the length of the periodic interval separating successive processing + // passes by the audio engine on the data in the endpoint buffer. + // + // The period between processing passes by the audio engine is fixed for a + // particular audio endpoint device and represents the smallest processing + // quantum for the audio engine. This period plus the stream latency between + // the buffer and endpoint device represents the minimum possible latency that + // an audio application can achieve. Typical value: 100000 <=> 0.01 sec = + // 10ms. + // + REFERENCE_TIME devPeriod = 0; + REFERENCE_TIME devPeriodMin = 0; + _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin); + RTC_LOG(LS_VERBOSE) << "[REND] device period : " << (DWORD)devPeriod + << " (" << (double)(devPeriod / 10000.0) << " ms)"; + + // Derive initial rendering delay. + // Example: 10*(960/480) + 15 = 20 + 15 = 35ms + // + int playout_delay = 10 * (bufferLength / _playBlockSize) + + (int)((latency + devPeriod) / 10000); + _sndCardPlayDelay = playout_delay; + _writtenSamples = 0; + RTC_LOG(LS_VERBOSE) << "[REND] initial delay : " << playout_delay; + + double endpointBufferSizeMS = + 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize); + RTC_LOG(LS_VERBOSE) << "[REND] endpointBufferSizeMS : " + << endpointBufferSizeMS; + + // Before starting the stream, fill the rendering buffer with silence. + // + BYTE* pData = NULL; + hr = _ptrRenderClient->GetBuffer(bufferLength, &pData); + EXIT_ON_ERROR(hr); + + hr = + _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT); + EXIT_ON_ERROR(hr); + + _writtenSamples += bufferLength; + + hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock); + if (FAILED(hr)) { + RTC_LOG(LS_WARNING) + << "failed to get IAudioClock interface from the IAudioClient"; + } + + // Start up the rendering audio stream. + hr = _ptrClientOut->Start(); + EXIT_ON_ERROR(hr); + + _UnLock(); + + // Set event which will ensure that the calling thread modifies the playing + // state to true. + // + SetEvent(_hRenderStartedEvent); + + // >> ------------------ THREAD LOOP ------------------ + + while (keepPlaying) { + // Wait for a render notification event or a shutdown event + DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500); + switch (waitResult) { + case WAIT_OBJECT_0 + 0: // _hShutdownRenderEvent + keepPlaying = false; + break; + case WAIT_OBJECT_0 + 1: // _hRenderSamplesReadyEvent + break; + case WAIT_TIMEOUT: // timeout notification + RTC_LOG(LS_WARNING) << "render event timed out after 0.5 seconds"; + goto Exit; + default: // unexpected error + RTC_LOG(LS_WARNING) << "unknown wait termination on render side"; + goto Exit; + } + + while (keepPlaying) { + _Lock(); + + // Sanity check to ensure that essential states are not modified + // during the unlocked period. + if (_ptrRenderClient == NULL || _ptrClientOut == NULL) { + _UnLock(); + RTC_LOG(LS_ERROR) + << "output state has been modified during unlocked period"; + goto Exit; + } + + // Get the number of frames of padding (queued up to play) in the endpoint + // buffer. + UINT32 padding = 0; + hr = _ptrClientOut->GetCurrentPadding(&padding); + EXIT_ON_ERROR(hr); + + // Derive the amount of available space in the output buffer + uint32_t framesAvailable = bufferLength - padding; + + // Do we have 10 ms available in the render buffer? + if (framesAvailable < _playBlockSize) { + // Not enough space in render buffer to store next render packet. + _UnLock(); + break; + } + + // Write n*10ms buffers to the render buffer + const uint32_t n10msBuffers = (framesAvailable / _playBlockSize); + for (uint32_t n = 0; n < n10msBuffers; n++) { + // Get pointer (i.e., grab the buffer) to next space in the shared + // render buffer. + hr = _ptrRenderClient->GetBuffer(_playBlockSize, &pData); + EXIT_ON_ERROR(hr); + + if (_ptrAudioBuffer) { + // Request data to be played out (#bytes = + // _playBlockSize*_audioFrameSize) + _UnLock(); + int32_t nSamples = + _ptrAudioBuffer->RequestPlayoutData(_playBlockSize); + _Lock(); + + if (nSamples == -1) { + _UnLock(); + RTC_LOG(LS_ERROR) << "failed to read data from render client"; + goto Exit; + } + + // Sanity check to ensure that essential states are not modified + // during the unlocked period + if (_ptrRenderClient == NULL || _ptrClientOut == NULL) { + _UnLock(); + RTC_LOG(LS_ERROR) + << "output state has been modified during unlocked" + " period"; + goto Exit; + } + if (nSamples != static_cast(_playBlockSize)) { + RTC_LOG(LS_WARNING) + << "nSamples(" << nSamples << ") != _playBlockSize" + << _playBlockSize << ")"; + } + + // Get the actual (stored) data + nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData); + } + + DWORD dwFlags(0); + hr = _ptrRenderClient->ReleaseBuffer(_playBlockSize, dwFlags); + // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx + // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED. + EXIT_ON_ERROR(hr); + + _writtenSamples += _playBlockSize; + } + + // Check the current delay on the playout side. + if (clock) { + UINT64 pos = 0; + UINT64 freq = 1; + clock->GetPosition(&pos, NULL); + clock->GetFrequency(&freq); + playout_delay = ROUND((double(_writtenSamples) / _devicePlaySampleRate - + double(pos) / freq) * + 1000.0); + _sndCardPlayDelay = playout_delay; + } + + _UnLock(); + } + } + + // ------------------ THREAD LOOP ------------------ << + + SleepMs(static_cast(endpointBufferSizeMS + 0.5)); + hr = _ptrClientOut->Stop(); + +Exit: + SAFE_RELEASE(clock); + + if (FAILED(hr)) { + _ptrClientOut->Stop(); + _UnLock(); + _TraceCOMError(hr); + } + + if (_winSupportAvrt) { + if (NULL != hMmTask) { + _PAvRevertMmThreadCharacteristics(hMmTask); + } + } + + _Lock(); + + if (keepPlaying) { + if (_ptrClientOut != NULL) { + hr = _ptrClientOut->Stop(); + if (FAILED(hr)) { + _TraceCOMError(hr); + } + hr = _ptrClientOut->Reset(); + if (FAILED(hr)) { + _TraceCOMError(hr); + } + } + RTC_LOG(LS_ERROR) + << "Playout error: rendering thread has ended pre-maturely"; + } else { + RTC_LOG(LS_VERBOSE) << "_Rendering thread is now terminated properly"; + } + + _UnLock(); + + return (DWORD)hr; +} + +DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority() { + _hMmTask = NULL; + + rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread"); + + // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread + // priority. + if (_winSupportAvrt) { + DWORD taskIndex(0); + _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex); + if (_hMmTask) { + if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL)) { + RTC_LOG(LS_WARNING) << "failed to boost rec-thread using MMCSS"; + } + RTC_LOG(LS_VERBOSE) + << "capture thread is now registered with MMCSS (taskIndex=" + << taskIndex << ")"; + } else { + RTC_LOG(LS_WARNING) << "failed to enable MMCSS on capture thread (err=" + << GetLastError() << ")"; + _TraceCOMError(GetLastError()); + } + } + + return S_OK; +} + +void AudioDeviceWindowsCore::RevertCaptureThreadPriority() { + if (_winSupportAvrt) { + if (NULL != _hMmTask) { + _PAvRevertMmThreadCharacteristics(_hMmTask); + } + } + + _hMmTask = NULL; +} + +DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { + RTC_DCHECK(_mediaBuffer); + bool keepRecording = true; + + // Initialize COM as MTA in this thread. + ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA); + if (!comInit.Succeeded()) { + RTC_LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread"; + return 1; + } + + HRESULT hr = InitCaptureThreadPriority(); + if (FAILED(hr)) { + return hr; + } + + // Set event which will ensure that the calling thread modifies the + // recording state to true. + SetEvent(_hCaptureStartedEvent); + + // >> ---------------------------- THREAD LOOP ---------------------------- + while (keepRecording) { + // Poll the DMO every 5 ms. + // (The same interval used in the Wave implementation.) + DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5); + switch (waitResult) { + case WAIT_OBJECT_0: // _hShutdownCaptureEvent + keepRecording = false; + break; + case WAIT_TIMEOUT: // timeout notification + break; + default: // unexpected error + RTC_LOG(LS_WARNING) << "Unknown wait termination on capture side"; + hr = -1; // To signal an error callback. + keepRecording = false; + break; + } + + while (keepRecording) { + MutexLock lockScoped(&mutex_); + + DWORD dwStatus = 0; + { + DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0}; + dmoBuffer.pBuffer = _mediaBuffer.get(); + dmoBuffer.pBuffer->AddRef(); + + // Poll the DMO for AEC processed capture data. The DMO will + // copy available data to `dmoBuffer`, and should only return + // 10 ms frames. The value of `dwStatus` should be ignored. + hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus); + SAFE_RELEASE(dmoBuffer.pBuffer); + dwStatus = dmoBuffer.dwStatus; + } + if (FAILED(hr)) { + _TraceCOMError(hr); + keepRecording = false; + RTC_DCHECK_NOTREACHED(); + break; + } + + ULONG bytesProduced = 0; + BYTE* data; + // Get a pointer to the data buffer. This should be valid until + // the next call to ProcessOutput. + hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced); + if (FAILED(hr)) { + _TraceCOMError(hr); + keepRecording = false; + RTC_DCHECK_NOTREACHED(); + break; + } + + if (bytesProduced > 0) { + const int kSamplesProduced = bytesProduced / _recAudioFrameSize; + // TODO(andrew): verify that this is always satisfied. It might + // be that ProcessOutput will try to return more than 10 ms if + // we fail to call it frequently enough. + RTC_DCHECK_EQ(kSamplesProduced, static_cast(_recBlockSize)); + RTC_DCHECK_EQ(sizeof(BYTE), sizeof(int8_t)); + _ptrAudioBuffer->SetRecordedBuffer(reinterpret_cast(data), + kSamplesProduced); + _ptrAudioBuffer->SetVQEData(0, 0); + + _UnLock(); // Release lock while making the callback. + _ptrAudioBuffer->DeliverRecordedData(); + _Lock(); + } + + // Reset length to indicate buffer availability. + hr = _mediaBuffer->SetLength(0); + if (FAILED(hr)) { + _TraceCOMError(hr); + keepRecording = false; + RTC_DCHECK_NOTREACHED(); + break; + } + + if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE)) { + // The DMO cannot currently produce more data. This is the + // normal case; otherwise it means the DMO had more than 10 ms + // of data available and ProcessOutput should be called again. + break; + } + } + } + // ---------------------------- THREAD LOOP ---------------------------- << + + RevertCaptureThreadPriority(); + + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) + << "Recording error: capturing thread has ended prematurely"; + } else { + RTC_LOG(LS_VERBOSE) << "Capturing thread is now terminated properly"; + } + + return hr; +} + +// ---------------------------------------------------------------------------- +// DoCaptureThread +// ---------------------------------------------------------------------------- + +DWORD AudioDeviceWindowsCore::DoCaptureThread() { + bool keepRecording = true; + HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent}; + HRESULT hr = S_OK; + + LARGE_INTEGER t1; + + BYTE* syncBuffer = NULL; + UINT32 syncBufIndex = 0; + + _readSamples = 0; + + // Initialize COM as MTA in this thread. + ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA); + if (!comInit.Succeeded()) { + RTC_LOG(LS_ERROR) << "failed to initialize COM in capture thread"; + return 1; + } + + hr = InitCaptureThreadPriority(); + if (FAILED(hr)) { + return hr; + } + + _Lock(); + + // Get size of capturing buffer (length is expressed as the number of audio + // frames the buffer can hold). This value is fixed during the capturing + // session. + // + UINT32 bufferLength = 0; + if (_ptrClientIn == NULL) { + RTC_LOG(LS_ERROR) + << "input state has been modified before capture loop starts."; + return 1; + } + hr = _ptrClientIn->GetBufferSize(&bufferLength); + EXIT_ON_ERROR(hr); + RTC_LOG(LS_VERBOSE) << "[CAPT] size of buffer : " << bufferLength; + + // Allocate memory for sync buffer. + // It is used for compensation between native 44.1 and internal 44.0 and + // for cases when the capture buffer is larger than 10ms. + // + const UINT32 syncBufferSize = 2 * (bufferLength * _recAudioFrameSize); + syncBuffer = new BYTE[syncBufferSize]; + if (syncBuffer == NULL) { + return (DWORD)E_POINTER; + } + RTC_LOG(LS_VERBOSE) << "[CAPT] size of sync buffer : " << syncBufferSize + << " [bytes]"; + + // Get maximum latency for the current stream (will not change for the + // lifetime of the IAudioClient object). + // + REFERENCE_TIME latency; + _ptrClientIn->GetStreamLatency(&latency); + RTC_LOG(LS_VERBOSE) << "[CAPT] max stream latency : " << (DWORD)latency + << " (" << (double)(latency / 10000.0) << " ms)"; + + // Get the length of the periodic interval separating successive processing + // passes by the audio engine on the data in the endpoint buffer. + // + REFERENCE_TIME devPeriod = 0; + REFERENCE_TIME devPeriodMin = 0; + _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin); + RTC_LOG(LS_VERBOSE) << "[CAPT] device period : " << (DWORD)devPeriod + << " (" << (double)(devPeriod / 10000.0) << " ms)"; + + double extraDelayMS = (double)((latency + devPeriod) / 10000.0); + RTC_LOG(LS_VERBOSE) << "[CAPT] extraDelayMS : " << extraDelayMS; + + double endpointBufferSizeMS = + 10.0 * ((double)bufferLength / (double)_recBlockSize); + RTC_LOG(LS_VERBOSE) << "[CAPT] endpointBufferSizeMS : " + << endpointBufferSizeMS; + + // Start up the capturing stream. + // + hr = _ptrClientIn->Start(); + EXIT_ON_ERROR(hr); + + _UnLock(); + + // Set event which will ensure that the calling thread modifies the recording + // state to true. + // + SetEvent(_hCaptureStartedEvent); + + // >> ---------------------------- THREAD LOOP ---------------------------- + + while (keepRecording) { + // Wait for a capture notification event or a shutdown event + DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500); + switch (waitResult) { + case WAIT_OBJECT_0 + 0: // _hShutdownCaptureEvent + keepRecording = false; + break; + case WAIT_OBJECT_0 + 1: // _hCaptureSamplesReadyEvent + break; + case WAIT_TIMEOUT: // timeout notification + RTC_LOG(LS_WARNING) << "capture event timed out after 0.5 seconds"; + goto Exit; + default: // unexpected error + RTC_LOG(LS_WARNING) << "unknown wait termination on capture side"; + goto Exit; + } + + while (keepRecording) { + BYTE* pData = 0; + UINT32 framesAvailable = 0; + DWORD flags = 0; + UINT64 recTime = 0; + UINT64 recPos = 0; + + _Lock(); + + // Sanity check to ensure that essential states are not modified + // during the unlocked period. + if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) { + _UnLock(); + RTC_LOG(LS_ERROR) + << "input state has been modified during unlocked period"; + goto Exit; + } + + // Find out how much capture data is available + // + hr = _ptrCaptureClient->GetBuffer( + &pData, // packet which is ready to be read by used + &framesAvailable, // #frames in the captured packet (can be zero) + &flags, // support flags (check) + &recPos, // device position of first audio frame in data packet + &recTime); // value of performance counter at the time of recording + // the first audio frame + + if (SUCCEEDED(hr)) { + if (AUDCLNT_S_BUFFER_EMPTY == hr) { + // Buffer was empty => start waiting for a new capture notification + // event + _UnLock(); + break; + } + + if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { + // Treat all of the data in the packet as silence and ignore the + // actual data values. + RTC_LOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_SILENT"; + pData = NULL; + } + + RTC_DCHECK_NE(framesAvailable, 0); + + if (pData) { + CopyMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], pData, + framesAvailable * _recAudioFrameSize); + } else { + ZeroMemory(&syncBuffer[syncBufIndex * _recAudioFrameSize], + framesAvailable * _recAudioFrameSize); + } + RTC_DCHECK_GE(syncBufferSize, (syncBufIndex * _recAudioFrameSize) + + framesAvailable * _recAudioFrameSize); + + // Release the capture buffer + // + hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable); + EXIT_ON_ERROR(hr); + + _readSamples += framesAvailable; + syncBufIndex += framesAvailable; + + QueryPerformanceCounter(&t1); + + // Get the current recording and playout delay. + uint32_t sndCardRecDelay = + (uint32_t)(((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime) / + 10000) + + (10 * syncBufIndex) / _recBlockSize - 10); + uint32_t sndCardPlayDelay = static_cast(_sndCardPlayDelay); + + while (syncBufIndex >= _recBlockSize) { + if (_ptrAudioBuffer) { + _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer, + _recBlockSize); + _ptrAudioBuffer->SetVQEData(sndCardPlayDelay, sndCardRecDelay); + + _ptrAudioBuffer->SetTypingStatus(KeyPressed()); + + _UnLock(); // release lock while making the callback + _ptrAudioBuffer->DeliverRecordedData(); + _Lock(); // restore the lock + + // Sanity check to ensure that essential states are not modified + // during the unlocked period + if (_ptrCaptureClient == NULL || _ptrClientIn == NULL) { + _UnLock(); + RTC_LOG(LS_ERROR) << "input state has been modified during" + " unlocked period"; + goto Exit; + } + } + + // store remaining data which was not able to deliver as 10ms segment + MoveMemory(&syncBuffer[0], + &syncBuffer[_recBlockSize * _recAudioFrameSize], + (syncBufIndex - _recBlockSize) * _recAudioFrameSize); + syncBufIndex -= _recBlockSize; + sndCardRecDelay -= 10; + } + } else { + // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the + // audio samples must wait for the next processing pass. The client + // might benefit from keeping a count of the failed GetBuffer calls. If + // GetBuffer returns this error repeatedly, the client can start a new + // processing loop after shutting down the current client by calling + // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio + // client. + RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer returned" + " AUDCLNT_E_BUFFER_ERROR, hr = 0x" + << rtc::ToHex(hr); + goto Exit; + } + + _UnLock(); + } + } + + // ---------------------------- THREAD LOOP ---------------------------- << + + if (_ptrClientIn) { + hr = _ptrClientIn->Stop(); + } + +Exit: + if (FAILED(hr)) { + _ptrClientIn->Stop(); + _UnLock(); + _TraceCOMError(hr); + } + + RevertCaptureThreadPriority(); + + _Lock(); + + if (keepRecording) { + if (_ptrClientIn != NULL) { + hr = _ptrClientIn->Stop(); + if (FAILED(hr)) { + _TraceCOMError(hr); + } + hr = _ptrClientIn->Reset(); + if (FAILED(hr)) { + _TraceCOMError(hr); + } + } + + RTC_LOG(LS_ERROR) + << "Recording error: capturing thread has ended pre-maturely"; + } else { + RTC_LOG(LS_VERBOSE) << "_Capturing thread is now terminated properly"; + } + + SAFE_RELEASE(_ptrClientIn); + SAFE_RELEASE(_ptrCaptureClient); + + _UnLock(); + + if (syncBuffer) { + delete[] syncBuffer; + } + + return (DWORD)hr; +} + +int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable) { + if (_recIsInitialized) { + RTC_LOG(LS_ERROR) + << "Attempt to set Windows AEC with recording already initialized"; + return -1; + } + + if (_dmo == NULL) { + RTC_LOG(LS_ERROR) + << "Built-in AEC DMO was not initialized properly at create time"; + return -1; + } + + _builtInAecEnabled = enable; + return 0; +} + +void AudioDeviceWindowsCore::_Lock() RTC_NO_THREAD_SAFETY_ANALYSIS { + mutex_.Lock(); +} + +void AudioDeviceWindowsCore::_UnLock() RTC_NO_THREAD_SAFETY_ANALYSIS { + mutex_.Unlock(); +} + +int AudioDeviceWindowsCore::SetDMOProperties() { + HRESULT hr = S_OK; + RTC_DCHECK(_dmo); + + rtc::scoped_refptr ps; + { + IPropertyStore* ptrPS = NULL; + hr = _dmo->QueryInterface(IID_IPropertyStore, + reinterpret_cast(&ptrPS)); + if (FAILED(hr) || ptrPS == NULL) { + _TraceCOMError(hr); + return -1; + } + ps = ptrPS; + SAFE_RELEASE(ptrPS); + } + + // Set the AEC system mode. + // SINGLE_CHANNEL_AEC - AEC processing only. + if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_SYSTEM_MODE, + SINGLE_CHANNEL_AEC)) { + return -1; + } + + // Set the AEC source mode. + // VARIANT_TRUE - Source mode (we poll the AEC for captured data). + if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_DMO_SOURCE_MODE, + VARIANT_TRUE) == -1) { + return -1; + } + + // Enable the feature mode. + // This lets us override all the default processing settings below. + if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) == + -1) { + return -1; + } + + // Disable analog AGC (default enabled). + if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER, + VARIANT_FALSE) == -1) { + return -1; + } + + // Disable noise suppression (default enabled). + // 0 - Disabled, 1 - Enabled + if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_FEATR_NS, 0) == -1) { + return -1; + } + + // Relevant parameters to leave at default settings: + // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled). + // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled). + // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms). + // TODO(andrew): investigate decresing the length to 128 ms. + // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0). + // 0 is automatic; defaults to 160 samples (or 10 ms frames at the + // selected 16 kHz) as long as mic array processing is disabled. + // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled). + // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled). + + // Set the devices selected by VoE. If using a default device, we need to + // search for the device index. + int inDevIndex = _inputDeviceIndex; + int outDevIndex = _outputDeviceIndex; + if (!_usingInputDeviceIndex) { + ERole role = eCommunications; + if (_inputDevice == AudioDeviceModule::kDefaultDevice) { + role = eConsole; + } + + if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1) { + return -1; + } + } + + if (!_usingOutputDeviceIndex) { + ERole role = eCommunications; + if (_outputDevice == AudioDeviceModule::kDefaultDevice) { + role = eConsole; + } + + if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1) { + return -1; + } + } + + DWORD devIndex = static_cast(outDevIndex << 16) + + static_cast(0x0000ffff & inDevIndex); + RTC_LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex + << ", render device index: " << outDevIndex; + if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex) == + -1) { + return -1; + } + + return 0; +} + +int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS, + REFPROPERTYKEY key, + VARIANT_BOOL value) { + PROPVARIANT pv; + PropVariantInit(&pv); + pv.vt = VT_BOOL; + pv.boolVal = value; + HRESULT hr = ptrPS->SetValue(key, pv); + PropVariantClear(&pv); + if (FAILED(hr)) { + _TraceCOMError(hr); + return -1; + } + return 0; +} + +int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS, + REFPROPERTYKEY key, + LONG value) { + PROPVARIANT pv; + PropVariantInit(&pv); + pv.vt = VT_I4; + pv.lVal = value; + HRESULT hr = ptrPS->SetValue(key, pv); + PropVariantClear(&pv); + if (FAILED(hr)) { + _TraceCOMError(hr); + return -1; + } + return 0; +} + +// ---------------------------------------------------------------------------- +// _RefreshDeviceList +// +// Creates a new list of endpoint rendering or capture devices after +// deleting any previously created (and possibly out-of-date) list of +// such devices. +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr = S_OK; + IMMDeviceCollection* pCollection = NULL; + + RTC_DCHECK(dir == eRender || dir == eCapture); + RTC_DCHECK(_ptrEnumerator); + + // Create a fresh list of devices using the specified direction + hr = _ptrEnumerator->EnumAudioEndpoints(dir, DEVICE_STATE_ACTIVE, + &pCollection); + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(pCollection); + return -1; + } + + if (dir == eRender) { + SAFE_RELEASE(_ptrRenderCollection); + _ptrRenderCollection = pCollection; + } else { + SAFE_RELEASE(_ptrCaptureCollection); + _ptrCaptureCollection = pCollection; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// _DeviceListCount +// +// Gets a count of the endpoint rendering or capture devices in the +// current list of such devices. +// ---------------------------------------------------------------------------- + +int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr = S_OK; + UINT count = 0; + + RTC_DCHECK(eRender == dir || eCapture == dir); + + if (eRender == dir && NULL != _ptrRenderCollection) { + hr = _ptrRenderCollection->GetCount(&count); + } else if (NULL != _ptrCaptureCollection) { + hr = _ptrCaptureCollection->GetCount(&count); + } + + if (FAILED(hr)) { + _TraceCOMError(hr); + return -1; + } + + return static_cast(count); +} + +// ---------------------------------------------------------------------------- +// _GetListDeviceName +// +// Gets the friendly name of an endpoint rendering or capture device +// from the current list of such devices. The caller uses an index +// into the list to identify the device. +// +// Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated +// in _RefreshDeviceList(). +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, + int index, + LPWSTR szBuffer, + int bufferLen) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr = S_OK; + IMMDevice* pDevice = NULL; + + RTC_DCHECK(dir == eRender || dir == eCapture); + + if (eRender == dir && NULL != _ptrRenderCollection) { + hr = _ptrRenderCollection->Item(index, &pDevice); + } else if (NULL != _ptrCaptureCollection) { + hr = _ptrCaptureCollection->Item(index, &pDevice); + } + + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(pDevice); + return -1; + } + + int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen); + SAFE_RELEASE(pDevice); + return res; +} + +// ---------------------------------------------------------------------------- +// _GetDefaultDeviceName +// +// Gets the friendly name of an endpoint rendering or capture device +// given a specified device role. +// +// Uses: _ptrEnumerator +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, + ERole role, + LPWSTR szBuffer, + int bufferLen) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr = S_OK; + IMMDevice* pDevice = NULL; + + RTC_DCHECK(dir == eRender || dir == eCapture); + RTC_DCHECK(role == eConsole || role == eCommunications); + RTC_DCHECK(_ptrEnumerator); + + hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice); + + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(pDevice); + return -1; + } + + int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen); + SAFE_RELEASE(pDevice); + return res; +} + +// ---------------------------------------------------------------------------- +// _GetListDeviceID +// +// Gets the unique ID string of an endpoint rendering or capture device +// from the current list of such devices. The caller uses an index +// into the list to identify the device. +// +// Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated +// in _RefreshDeviceList(). +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, + int index, + LPWSTR szBuffer, + int bufferLen) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr = S_OK; + IMMDevice* pDevice = NULL; + + RTC_DCHECK(dir == eRender || dir == eCapture); + + if (eRender == dir && NULL != _ptrRenderCollection) { + hr = _ptrRenderCollection->Item(index, &pDevice); + } else if (NULL != _ptrCaptureCollection) { + hr = _ptrCaptureCollection->Item(index, &pDevice); + } + + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(pDevice); + return -1; + } + + int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen); + SAFE_RELEASE(pDevice); + return res; +} + +// ---------------------------------------------------------------------------- +// _GetDefaultDeviceID +// +// Gets the uniqe device ID of an endpoint rendering or capture device +// given a specified device role. +// +// Uses: _ptrEnumerator +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, + ERole role, + LPWSTR szBuffer, + int bufferLen) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr = S_OK; + IMMDevice* pDevice = NULL; + + RTC_DCHECK(dir == eRender || dir == eCapture); + RTC_DCHECK(role == eConsole || role == eCommunications); + RTC_DCHECK(_ptrEnumerator); + + hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, &pDevice); + + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(pDevice); + return -1; + } + + int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen); + SAFE_RELEASE(pDevice); + return res; +} + +int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir, + ERole role, + int* index) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr = S_OK; + WCHAR szDefaultDeviceID[MAX_PATH] = {0}; + WCHAR szDeviceID[MAX_PATH] = {0}; + + const size_t kDeviceIDLength = sizeof(szDeviceID) / sizeof(szDeviceID[0]); + RTC_DCHECK_EQ(kDeviceIDLength, + sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0])); + + if (_GetDefaultDeviceID(dir, role, szDefaultDeviceID, kDeviceIDLength) == + -1) { + return -1; + } + + IMMDeviceCollection* collection = _ptrCaptureCollection; + if (dir == eRender) { + collection = _ptrRenderCollection; + } + + if (!collection) { + RTC_LOG(LS_ERROR) << "Device collection not valid"; + return -1; + } + + UINT count = 0; + hr = collection->GetCount(&count); + if (FAILED(hr)) { + _TraceCOMError(hr); + return -1; + } + + *index = -1; + for (UINT i = 0; i < count; i++) { + memset(szDeviceID, 0, sizeof(szDeviceID)); + rtc::scoped_refptr device; + { + IMMDevice* ptrDevice = NULL; + hr = collection->Item(i, &ptrDevice); + if (FAILED(hr) || ptrDevice == NULL) { + _TraceCOMError(hr); + return -1; + } + device = ptrDevice; + SAFE_RELEASE(ptrDevice); + } + + if (_GetDeviceID(device.get(), szDeviceID, kDeviceIDLength) == -1) { + return -1; + } + + if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0) { + // Found a match. + *index = i; + break; + } + } + + if (*index == -1) { + RTC_LOG(LS_ERROR) << "Unable to find collection index for default device"; + return -1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// _GetDeviceName +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice, + LPWSTR pszBuffer, + int bufferLen) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + static const WCHAR szDefault[] = L""; + + HRESULT hr = E_FAIL; + IPropertyStore* pProps = NULL; + PROPVARIANT varName; + + RTC_DCHECK(pszBuffer); + RTC_DCHECK_GT(bufferLen, 0); + + if (pDevice != NULL) { + hr = pDevice->OpenPropertyStore(STGM_READ, &pProps); + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "IMMDevice::OpenPropertyStore failed, hr = 0x" + << rtc::ToHex(hr); + } + } + + // Initialize container for property value. + PropVariantInit(&varName); + + if (SUCCEEDED(hr)) { + // Get the endpoint device's friendly-name property. + hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName); + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue failed, hr = 0x" + << rtc::ToHex(hr); + } + } + + if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt)) { + hr = E_FAIL; + RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue returned no value," + " hr = 0x" + << rtc::ToHex(hr); + } + + if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt)) { + // The returned value is not a wide null terminated string. + hr = E_UNEXPECTED; + RTC_LOG(LS_ERROR) << "IPropertyStore::GetValue returned unexpected" + " type, hr = 0x" + << rtc::ToHex(hr); + } + + if (SUCCEEDED(hr) && (varName.pwszVal != NULL)) { + // Copy the valid device name to the provided ouput buffer. + wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE); + } else { + // Failed to find the device name. + wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE); + } + + PropVariantClear(&varName); + SAFE_RELEASE(pProps); + + return 0; +} + +// ---------------------------------------------------------------------------- +// _GetDeviceID +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, + LPWSTR pszBuffer, + int bufferLen) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + static const WCHAR szDefault[] = L""; + + HRESULT hr = E_FAIL; + LPWSTR pwszID = NULL; + + RTC_DCHECK(pszBuffer); + RTC_DCHECK_GT(bufferLen, 0); + + if (pDevice != NULL) { + hr = pDevice->GetId(&pwszID); + } + + if (hr == S_OK) { + // Found the device ID. + wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE); + } else { + // Failed to find the device ID. + wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE); + } + + CoTaskMemFree(pwszID); + return 0; +} + +// ---------------------------------------------------------------------------- +// _GetDefaultDevice +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, + ERole role, + IMMDevice** ppDevice) { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + HRESULT hr(S_OK); + + RTC_DCHECK(_ptrEnumerator); + + hr = _ptrEnumerator->GetDefaultAudioEndpoint(dir, role, ppDevice); + if (FAILED(hr)) { + _TraceCOMError(hr); + return -1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// _GetListDevice +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, + int index, + IMMDevice** ppDevice) { + HRESULT hr(S_OK); + + RTC_DCHECK(_ptrEnumerator); + + IMMDeviceCollection* pCollection = NULL; + + hr = _ptrEnumerator->EnumAudioEndpoints( + dir, + DEVICE_STATE_ACTIVE, // only active endpoints are OK + &pCollection); + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(pCollection); + return -1; + } + + hr = pCollection->Item(index, ppDevice); + if (FAILED(hr)) { + _TraceCOMError(hr); + SAFE_RELEASE(pCollection); + return -1; + } + + SAFE_RELEASE(pCollection); + + return 0; +} + +// ---------------------------------------------------------------------------- +// _EnumerateEndpointDevicesAll +// ---------------------------------------------------------------------------- + +int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll( + EDataFlow dataFlow) const { + RTC_DLOG(LS_VERBOSE) << __FUNCTION__; + + RTC_DCHECK(_ptrEnumerator); + + HRESULT hr = S_OK; + IMMDeviceCollection* pCollection = NULL; + IMMDevice* pEndpoint = NULL; + IPropertyStore* pProps = NULL; + IAudioEndpointVolume* pEndpointVolume = NULL; + LPWSTR pwszID = NULL; + + // Generate a collection of audio endpoint devices in the system. + // Get states for *all* endpoint devices. + // Output: IMMDeviceCollection interface. + hr = _ptrEnumerator->EnumAudioEndpoints( + dataFlow, // data-flow direction (input parameter) + DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED, + &pCollection); // release interface when done + + EXIT_ON_ERROR(hr); + + // use the IMMDeviceCollection interface... + + UINT count = 0; + + // Retrieve a count of the devices in the device collection. + hr = pCollection->GetCount(&count); + EXIT_ON_ERROR(hr); + if (dataFlow == eRender) + RTC_LOG(LS_VERBOSE) << "#rendering endpoint devices (counting all): " + << count; + else if (dataFlow == eCapture) + RTC_LOG(LS_VERBOSE) << "#capturing endpoint devices (counting all): " + << count; + + if (count == 0) { + return 0; + } + + // Each loop prints the name of an endpoint device. + for (ULONG i = 0; i < count; i++) { + RTC_LOG(LS_VERBOSE) << "Endpoint " << i << ":"; + + // Get pointer to endpoint number i. + // Output: IMMDevice interface. + hr = pCollection->Item(i, &pEndpoint); + CONTINUE_ON_ERROR(hr); + + // use the IMMDevice interface of the specified endpoint device... + + // Get the endpoint ID string (uniquely identifies the device among all + // audio endpoint devices) + hr = pEndpoint->GetId(&pwszID); + CONTINUE_ON_ERROR(hr); + RTC_LOG(LS_VERBOSE) << "ID string : " << pwszID; + + // Retrieve an interface to the device's property store. + // Output: IPropertyStore interface. + hr = pEndpoint->OpenPropertyStore(STGM_READ, &pProps); + CONTINUE_ON_ERROR(hr); + + // use the IPropertyStore interface... + + PROPVARIANT varName; + // Initialize container for property value. + PropVariantInit(&varName); + + // Get the endpoint's friendly-name property. + // Example: "Speakers (Realtek High Definition Audio)" + hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName); + CONTINUE_ON_ERROR(hr); + RTC_LOG(LS_VERBOSE) << "friendly name: \"" << varName.pwszVal << "\""; + + // Get the endpoint's current device state + DWORD dwState; + hr = pEndpoint->GetState(&dwState); + CONTINUE_ON_ERROR(hr); + if (dwState & DEVICE_STATE_ACTIVE) + RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState) + << ") : *ACTIVE*"; + if (dwState & DEVICE_STATE_DISABLED) + RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState) + << ") : DISABLED"; + if (dwState & DEVICE_STATE_NOTPRESENT) + RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState) + << ") : NOTPRESENT"; + if (dwState & DEVICE_STATE_UNPLUGGED) + RTC_LOG(LS_VERBOSE) << "state (0x" << rtc::ToHex(dwState) + << ") : UNPLUGGED"; + + // Check the hardware volume capabilities. + DWORD dwHwSupportMask = 0; + hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, + (void**)&pEndpointVolume); + CONTINUE_ON_ERROR(hr); + hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask); + CONTINUE_ON_ERROR(hr); + if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME) + // The audio endpoint device supports a hardware volume control + RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask) + << ") : HARDWARE_SUPPORT_VOLUME"; + if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE) + // The audio endpoint device supports a hardware mute control + RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask) + << ") : HARDWARE_SUPPORT_MUTE"; + if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER) + // The audio endpoint device supports a hardware peak meter + RTC_LOG(LS_VERBOSE) << "hwmask (0x" << rtc::ToHex(dwHwSupportMask) + << ") : HARDWARE_SUPPORT_METER"; + + // Check the channel count (#channels in the audio stream that enters or + // leaves the audio endpoint device) + UINT nChannelCount(0); + hr = pEndpointVolume->GetChannelCount(&nChannelCount); + CONTINUE_ON_ERROR(hr); + RTC_LOG(LS_VERBOSE) << "#channels : " << nChannelCount; + + if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME) { + // Get the volume range. + float fLevelMinDB(0.0); + float fLevelMaxDB(0.0); + float fVolumeIncrementDB(0.0); + hr = pEndpointVolume->GetVolumeRange(&fLevelMinDB, &fLevelMaxDB, + &fVolumeIncrementDB); + CONTINUE_ON_ERROR(hr); + RTC_LOG(LS_VERBOSE) << "volume range : " << fLevelMinDB << " (min), " + << fLevelMaxDB << " (max), " << fVolumeIncrementDB + << " (inc) [dB]"; + + // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is + // divided into n uniform intervals of size vinc = fVolumeIncrementDB, + // where n = (vmax ?vmin) / vinc. The values vmin, vmax, and vinc are + // measured in decibels. The client can set the volume level to one of n + + // 1 discrete values in the range from vmin to vmax. + int n = (int)((fLevelMaxDB - fLevelMinDB) / fVolumeIncrementDB); + RTC_LOG(LS_VERBOSE) << "#intervals : " << n; + + // Get information about the current step in the volume range. + // This method represents the volume level of the audio stream that enters + // or leaves the audio endpoint device as an index or "step" in a range of + // discrete volume levels. Output value nStepCount is the number of steps + // in the range. Output value nStep is the step index of the current + // volume level. If the number of steps is n = nStepCount, then step index + // nStep can assume values from 0 (minimum volume) to n ?1 (maximum + // volume). + UINT nStep(0); + UINT nStepCount(0); + hr = pEndpointVolume->GetVolumeStepInfo(&nStep, &nStepCount); + CONTINUE_ON_ERROR(hr); + RTC_LOG(LS_VERBOSE) << "volume steps : " << nStep << " (nStep), " + << nStepCount << " (nStepCount)"; + } + Next: + if (FAILED(hr)) { + RTC_LOG(LS_VERBOSE) << "Error when logging device information"; + } + CoTaskMemFree(pwszID); + pwszID = NULL; + PropVariantClear(&varName); + SAFE_RELEASE(pProps); + SAFE_RELEASE(pEndpoint); + SAFE_RELEASE(pEndpointVolume); + } + SAFE_RELEASE(pCollection); + return 0; + +Exit: + _TraceCOMError(hr); + CoTaskMemFree(pwszID); + pwszID = NULL; + SAFE_RELEASE(pCollection); + SAFE_RELEASE(pEndpoint); + SAFE_RELEASE(pEndpointVolume); + SAFE_RELEASE(pProps); + return -1; +} + +// ---------------------------------------------------------------------------- +// _TraceCOMError +// ---------------------------------------------------------------------------- + +void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const { + wchar_t buf[MAXERRORLENGTH]; + wchar_t errorText[MAXERRORLENGTH]; + + const DWORD dwFlags = + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US); + + // Gets the system's human readable message string for this HRESULT. + // All error message in English by default. + DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText, + MAXERRORLENGTH, NULL); + + RTC_DCHECK_LE(messageLength, MAXERRORLENGTH); + + // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.). + for (; messageLength && ::isspace(errorText[messageLength - 1]); + --messageLength) { + errorText[messageLength - 1] = '\0'; + } + + RTC_LOG(LS_ERROR) << "Core Audio method failed (hr=" << hr << ")"; + StringCchPrintfW(buf, MAXERRORLENGTH, L"Error details: "); + StringCchCatW(buf, MAXERRORLENGTH, errorText); + RTC_LOG(LS_ERROR) << rtc::ToUtf8(buf); +} + +bool AudioDeviceWindowsCore::KeyPressed() const { + int key_down = 0; + for (int key = VK_SPACE; key < VK_NUMLOCK; key++) { + short res = GetAsyncKeyState(key); + key_down |= res & 0x1; // Get the LSB + } + return (key_down > 0); +} +} // namespace webrtc + +#endif // WEBRTC_WINDOWS_CORE_AUDIO_BUILD diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h new file mode 100644 index 0000000000..380effb449 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_core_win.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_ +#define MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_ + +#if (_MSC_VER >= 1400) // only include for VS 2005 and higher + +#include // CLSID_CWMAudioAEC +//(must be before audioclient.h) + +#include // WASAPI +#include +#include // Avrt +#include +#include // IMediaObject +#include // MMDevice + +#include "api/scoped_refptr.h" +#include "modules/audio_device/audio_device_generic.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/win/scoped_com_initializer.h" +#include "rtc_base/win32.h" + +// Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority +#pragma comment(lib, "avrt.lib") +// AVRT function pointers +typedef BOOL(WINAPI* PAvRevertMmThreadCharacteristics)(HANDLE); +typedef HANDLE(WINAPI* PAvSetMmThreadCharacteristicsA)(LPCSTR, LPDWORD); +typedef BOOL(WINAPI* PAvSetMmThreadPriority)(HANDLE, AVRT_PRIORITY); + +namespace webrtc { + +const float MAX_CORE_SPEAKER_VOLUME = 255.0f; +const float MIN_CORE_SPEAKER_VOLUME = 0.0f; +const float MAX_CORE_MICROPHONE_VOLUME = 255.0f; +const float MIN_CORE_MICROPHONE_VOLUME = 0.0f; +const uint16_t CORE_SPEAKER_VOLUME_STEP_SIZE = 1; +const uint16_t CORE_MICROPHONE_VOLUME_STEP_SIZE = 1; + +class AudioDeviceWindowsCore : public AudioDeviceGeneric { + public: + AudioDeviceWindowsCore(); + ~AudioDeviceWindowsCore(); + + static bool CoreAudioIsSupported(); + + // Retrieve the currently utilized audio layer + virtual int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const; + + // Main initializaton and termination + virtual InitStatus Init() RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t Terminate() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool Initialized() const; + + // Device enumeration + virtual int16_t PlayoutDevices() RTC_LOCKS_EXCLUDED(mutex_); + virtual int16_t RecordingDevices() RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) + RTC_LOCKS_EXCLUDED(mutex_); + + // Device selection + virtual int32_t SetPlayoutDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device); + virtual int32_t SetRecordingDevice(uint16_t index) RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) RTC_LOCKS_EXCLUDED(mutex_); + + // Audio transport initialization + virtual int32_t PlayoutIsAvailable(bool& available); + virtual int32_t InitPlayout() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool PlayoutIsInitialized() const; + virtual int32_t RecordingIsAvailable(bool& available); + virtual int32_t InitRecording() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool RecordingIsInitialized() const; + + // Audio transport control + virtual int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool Playing() const; + virtual int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t StopRecording(); + virtual bool Recording() const; + + // Audio mixer initialization + virtual int32_t InitSpeaker() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool SpeakerIsInitialized() const; + virtual int32_t InitMicrophone() RTC_LOCKS_EXCLUDED(mutex_); + virtual bool MicrophoneIsInitialized() const; + + // Speaker volume controls + virtual int32_t SpeakerVolumeIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetSpeakerVolume(uint32_t volume) RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SpeakerVolume(uint32_t& volume) const + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; + virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const; + + // Microphone volume controls + virtual int32_t MicrophoneVolumeIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetMicrophoneVolume(uint32_t volume) + RTC_LOCKS_EXCLUDED(mutex_, volume_mutex_); + virtual int32_t MicrophoneVolume(uint32_t& volume) const + RTC_LOCKS_EXCLUDED(mutex_, volume_mutex_); + virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const; + virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const; + + // Speaker mute control + virtual int32_t SpeakerMuteIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetSpeakerMute(bool enable) RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SpeakerMute(bool& enabled) const; + + // Microphone mute control + virtual int32_t MicrophoneMuteIsAvailable(bool& available) + RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t SetMicrophoneMute(bool enable); + virtual int32_t MicrophoneMute(bool& enabled) const; + + // Stereo support + virtual int32_t StereoPlayoutIsAvailable(bool& available); + virtual int32_t SetStereoPlayout(bool enable) RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t StereoPlayout(bool& enabled) const; + virtual int32_t StereoRecordingIsAvailable(bool& available); + virtual int32_t SetStereoRecording(bool enable) RTC_LOCKS_EXCLUDED(mutex_); + virtual int32_t StereoRecording(bool& enabled) const + RTC_LOCKS_EXCLUDED(mutex_); + + // Delay information and control + virtual int32_t PlayoutDelay(uint16_t& delayMS) const + RTC_LOCKS_EXCLUDED(mutex_); + + virtual bool BuiltInAECIsAvailable() const; + + virtual int32_t EnableBuiltInAEC(bool enable); + + public: + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); + + private: + bool KeyPressed() const; + + private: // avrt function pointers + PAvRevertMmThreadCharacteristics _PAvRevertMmThreadCharacteristics; + PAvSetMmThreadCharacteristicsA _PAvSetMmThreadCharacteristicsA; + PAvSetMmThreadPriority _PAvSetMmThreadPriority; + HMODULE _avrtLibrary; + bool _winSupportAvrt; + + private: // thread functions + int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int16_t PlayoutDevicesLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int16_t RecordingDevicesLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + DWORD InitCaptureThreadPriority(); + void RevertCaptureThreadPriority(); + static DWORD WINAPI WSAPICaptureThread(LPVOID context); + DWORD DoCaptureThread(); + + static DWORD WINAPI WSAPICaptureThreadPollDMO(LPVOID context); + DWORD DoCaptureThreadPollDMO() RTC_LOCKS_EXCLUDED(mutex_); + + static DWORD WINAPI WSAPIRenderThread(LPVOID context); + DWORD DoRenderThread(); + + void _Lock(); + void _UnLock(); + + int SetDMOProperties(); + + int SetBoolProperty(IPropertyStore* ptrPS, + REFPROPERTYKEY key, + VARIANT_BOOL value); + + int SetVtI4Property(IPropertyStore* ptrPS, REFPROPERTYKEY key, LONG value); + + int32_t _EnumerateEndpointDevicesAll(EDataFlow dataFlow) const; + void _TraceCOMError(HRESULT hr) const; + + int32_t _RefreshDeviceList(EDataFlow dir); + int16_t _DeviceListCount(EDataFlow dir); + int32_t _GetDefaultDeviceName(EDataFlow dir, + ERole role, + LPWSTR szBuffer, + int bufferLen); + int32_t _GetListDeviceName(EDataFlow dir, + int index, + LPWSTR szBuffer, + int bufferLen); + int32_t _GetDeviceName(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen); + int32_t _GetListDeviceID(EDataFlow dir, + int index, + LPWSTR szBuffer, + int bufferLen); + int32_t _GetDefaultDeviceID(EDataFlow dir, + ERole role, + LPWSTR szBuffer, + int bufferLen); + int32_t _GetDefaultDeviceIndex(EDataFlow dir, ERole role, int* index); + int32_t _GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen); + int32_t _GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice); + int32_t _GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice); + + int32_t InitRecordingDMO(); + + ScopedCOMInitializer _comInit; + AudioDeviceBuffer* _ptrAudioBuffer; + mutable Mutex mutex_; + mutable Mutex volume_mutex_ RTC_ACQUIRED_AFTER(mutex_); + + IMMDeviceEnumerator* _ptrEnumerator; + IMMDeviceCollection* _ptrRenderCollection; + IMMDeviceCollection* _ptrCaptureCollection; + IMMDevice* _ptrDeviceOut; + IMMDevice* _ptrDeviceIn; + + IAudioClient* _ptrClientOut; + IAudioClient* _ptrClientIn; + IAudioRenderClient* _ptrRenderClient; + IAudioCaptureClient* _ptrCaptureClient; + IAudioEndpointVolume* _ptrCaptureVolume; + ISimpleAudioVolume* _ptrRenderSimpleVolume; + + // DirectX Media Object (DMO) for the built-in AEC. + rtc::scoped_refptr _dmo; + rtc::scoped_refptr _mediaBuffer; + bool _builtInAecEnabled; + + HANDLE _hRenderSamplesReadyEvent; + HANDLE _hPlayThread; + HANDLE _hRenderStartedEvent; + HANDLE _hShutdownRenderEvent; + + HANDLE _hCaptureSamplesReadyEvent; + HANDLE _hRecThread; + HANDLE _hCaptureStartedEvent; + HANDLE _hShutdownCaptureEvent; + + HANDLE _hMmTask; + + UINT _playAudioFrameSize; + uint32_t _playSampleRate; + uint32_t _devicePlaySampleRate; + uint32_t _playBlockSize; + uint32_t _devicePlayBlockSize; + uint32_t _playChannels; + uint32_t _sndCardPlayDelay; + UINT64 _writtenSamples; + UINT64 _readSamples; + + UINT _recAudioFrameSize; + uint32_t _recSampleRate; + uint32_t _recBlockSize; + uint32_t _recChannels; + + uint16_t _recChannelsPrioList[3]; + uint16_t _playChannelsPrioList[2]; + + LARGE_INTEGER _perfCounterFreq; + double _perfCounterFactor; + + private: + bool _initialized; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + bool _speakerIsInitialized; + bool _microphoneIsInitialized; + + bool _usingInputDeviceIndex; + bool _usingOutputDeviceIndex; + AudioDeviceModule::WindowsDeviceType _inputDevice; + AudioDeviceModule::WindowsDeviceType _outputDevice; + uint16_t _inputDeviceIndex; + uint16_t _outputDeviceIndex; +}; + +#endif // #if (_MSC_VER >= 1400) + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_CORE_WIN_H_ diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc new file mode 100644 index 0000000000..a36c40735e --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.cc @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/win/audio_device_module_win.h" + +#include +#include + +#include "api/make_ref_counted.h" +#include "api/sequence_checker.h" +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/include/audio_device.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/string_utils.h" + +namespace webrtc { +namespace webrtc_win { +namespace { + +#define RETURN_IF_OUTPUT_RESTARTS(...) \ + do { \ + if (output_->Restarting()) { \ + return __VA_ARGS__; \ + } \ + } while (0) + +#define RETURN_IF_INPUT_RESTARTS(...) \ + do { \ + if (input_->Restarting()) { \ + return __VA_ARGS__; \ + } \ + } while (0) + +#define RETURN_IF_OUTPUT_IS_INITIALIZED(...) \ + do { \ + if (output_->PlayoutIsInitialized()) { \ + return __VA_ARGS__; \ + } \ + } while (0) + +#define RETURN_IF_INPUT_IS_INITIALIZED(...) \ + do { \ + if (input_->RecordingIsInitialized()) { \ + return __VA_ARGS__; \ + } \ + } while (0) + +#define RETURN_IF_OUTPUT_IS_ACTIVE(...) \ + do { \ + if (output_->Playing()) { \ + return __VA_ARGS__; \ + } \ + } while (0) + +#define RETURN_IF_INPUT_IS_ACTIVE(...) \ + do { \ + if (input_->Recording()) { \ + return __VA_ARGS__; \ + } \ + } while (0) + +// This class combines a generic instance of an AudioInput and a generic +// instance of an AudioOutput to create an AudioDeviceModule. This is mostly +// done by delegating to the audio input/output with some glue code. This class +// also directly implements some of the AudioDeviceModule methods with dummy +// implementations. +// +// An instance must be created, destroyed and used on one and the same thread, +// i.e., all public methods must also be called on the same thread. A thread +// checker will RTC_DCHECK if any method is called on an invalid thread. +// TODO(henrika): is thread checking needed in AudioInput and AudioOutput? +class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { + public: + enum class InitStatus { + OK = 0, + PLAYOUT_ERROR = 1, + RECORDING_ERROR = 2, + OTHER_ERROR = 3, + NUM_STATUSES = 4 + }; + + WindowsAudioDeviceModule(std::unique_ptr audio_input, + std::unique_ptr audio_output, + TaskQueueFactory* task_queue_factory) + : input_(std::move(audio_input)), + output_(std::move(audio_output)), + task_queue_factory_(task_queue_factory) { + RTC_CHECK(input_); + RTC_CHECK(output_); + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + } + + ~WindowsAudioDeviceModule() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + Terminate(); + } + + WindowsAudioDeviceModule(const WindowsAudioDeviceModule&) = delete; + WindowsAudioDeviceModule& operator=(const WindowsAudioDeviceModule&) = delete; + + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer* audioLayer) const override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + // TODO(henrika): it might be possible to remove this unique signature. + *audioLayer = AudioDeviceModule::kWindowsCoreAudio2; + return 0; + } + + int32_t RegisterAudioCallback(AudioTransport* audioCallback) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK(audio_device_buffer_); + RTC_DCHECK_RUN_ON(&thread_checker_); + return audio_device_buffer_->RegisterAudioCallback(audioCallback); + } + + int32_t Init() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + RETURN_IF_INPUT_RESTARTS(0); + if (initialized_) { + return 0; + } + audio_device_buffer_ = + std::make_unique(task_queue_factory_); + AttachAudioBuffer(); + InitStatus status; + if (output_->Init() != 0) { + status = InitStatus::PLAYOUT_ERROR; + } else if (input_->Init() != 0) { + output_->Terminate(); + status = InitStatus::RECORDING_ERROR; + } else { + initialized_ = true; + status = InitStatus::OK; + } + if (status != InitStatus::OK) { + RTC_LOG(LS_ERROR) << "Audio device initialization failed"; + return -1; + } + return 0; + } + + int32_t Terminate() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + RETURN_IF_INPUT_RESTARTS(0); + if (!initialized_) + return 0; + int32_t err = input_->Terminate(); + err |= output_->Terminate(); + initialized_ = false; + RTC_DCHECK_EQ(err, 0); + return err; + } + + bool Initialized() const override { + RTC_DCHECK_RUN_ON(&thread_checker_); + return initialized_; + } + + int16_t PlayoutDevices() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + return output_->NumDevices(); + } + + int16_t RecordingDevices() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_INPUT_RESTARTS(0); + return input_->NumDevices(); + } + + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + std::string name_str, guid_str; + int ret = -1; + if (guid != nullptr) { + ret = output_->DeviceName(index, &name_str, &guid_str); + rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str()); + } else { + ret = output_->DeviceName(index, &name_str, nullptr); + } + rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str()); + return ret; + } + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_INPUT_RESTARTS(0); + std::string name_str, guid_str; + int ret = -1; + if (guid != nullptr) { + ret = input_->DeviceName(index, &name_str, &guid_str); + rtc::strcpyn(guid, kAdmMaxGuidSize, guid_str.c_str()); + } else { + ret = input_->DeviceName(index, &name_str, nullptr); + } + rtc::strcpyn(name, kAdmMaxDeviceNameSize, name_str.c_str()); + return ret; + } + + int32_t SetPlayoutDevice(uint16_t index) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + return output_->SetDevice(index); + } + + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + return output_->SetDevice(device); + } + int32_t SetRecordingDevice(uint16_t index) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return input_->SetDevice(index); + } + + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return input_->SetDevice(device); + } + + int32_t PlayoutIsAvailable(bool* available) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *available = true; + return 0; + } + + int32_t InitPlayout() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + RETURN_IF_OUTPUT_IS_INITIALIZED(0); + return output_->InitPlayout(); + } + + bool PlayoutIsInitialized() const override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(true); + return output_->PlayoutIsInitialized(); + } + + int32_t RecordingIsAvailable(bool* available) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *available = true; + return 0; + } + + int32_t InitRecording() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_INPUT_RESTARTS(0); + RETURN_IF_INPUT_IS_INITIALIZED(0); + return input_->InitRecording(); + } + + bool RecordingIsInitialized() const override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_INPUT_RESTARTS(true); + return input_->RecordingIsInitialized(); + } + + int32_t StartPlayout() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + RETURN_IF_OUTPUT_IS_ACTIVE(0); + return output_->StartPlayout(); + } + + int32_t StopPlayout() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(-1); + return output_->StopPlayout(); + } + + bool Playing() const override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(true); + return output_->Playing(); + } + + int32_t StartRecording() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_INPUT_RESTARTS(0); + RETURN_IF_INPUT_IS_ACTIVE(0); + return input_->StartRecording(); + } + + int32_t StopRecording() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_INPUT_RESTARTS(-1); + return input_->StopRecording(); + } + + bool Recording() const override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RETURN_IF_INPUT_RESTARTS(true); + return input_->Recording(); + } + + int32_t InitSpeaker() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DLOG(LS_WARNING) << "This method has no effect"; + return initialized_ ? 0 : -1; + } + + bool SpeakerIsInitialized() const override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DLOG(LS_WARNING) << "This method has no effect"; + return initialized_; + } + + int32_t InitMicrophone() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DLOG(LS_WARNING) << "This method has no effect"; + return initialized_ ? 0 : -1; + } + + bool MicrophoneIsInitialized() const override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DLOG(LS_WARNING) << "This method has no effect"; + return initialized_; + } + + int32_t SpeakerVolumeIsAvailable(bool* available) override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *available = false; + return 0; + } + + int32_t SetSpeakerVolume(uint32_t volume) override { return 0; } + int32_t SpeakerVolume(uint32_t* volume) const override { return 0; } + int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return 0; } + int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return 0; } + + int32_t MicrophoneVolumeIsAvailable(bool* available) override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *available = false; + return 0; + } + + int32_t SetMicrophoneVolume(uint32_t volume) override { return 0; } + int32_t MicrophoneVolume(uint32_t* volume) const override { return 0; } + int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return 0; } + int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return 0; } + + int32_t SpeakerMuteIsAvailable(bool* available) override { return 0; } + int32_t SetSpeakerMute(bool enable) override { return 0; } + int32_t SpeakerMute(bool* enabled) const override { return 0; } + + int32_t MicrophoneMuteIsAvailable(bool* available) override { return 0; } + int32_t SetMicrophoneMute(bool enable) override { return 0; } + int32_t MicrophoneMute(bool* enabled) const override { return 0; } + + int32_t StereoPlayoutIsAvailable(bool* available) const override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *available = true; + return 0; + } + + int32_t SetStereoPlayout(bool enable) override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return 0; + } + + int32_t StereoPlayout(bool* enabled) const override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *enabled = true; + return 0; + } + + int32_t StereoRecordingIsAvailable(bool* available) const override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *available = true; + return 0; + } + + int32_t SetStereoRecording(bool enable) override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return 0; + } + + int32_t StereoRecording(bool* enabled) const override { + // TODO(henrika): improve support. + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + *enabled = true; + return 0; + } + + int32_t PlayoutDelay(uint16_t* delayMS) const override { return 0; } + + bool BuiltInAECIsAvailable() const override { return false; } + bool BuiltInAGCIsAvailable() const override { return false; } + bool BuiltInNSIsAvailable() const override { return false; } + + int32_t EnableBuiltInAEC(bool enable) override { return 0; } + int32_t EnableBuiltInAGC(bool enable) override { return 0; } + int32_t EnableBuiltInNS(bool enable) override { return 0; } + + int32_t AttachAudioBuffer() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + output_->AttachAudioBuffer(audio_device_buffer_.get()); + input_->AttachAudioBuffer(audio_device_buffer_.get()); + return 0; + } + + int RestartPlayoutInternally() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + RETURN_IF_OUTPUT_RESTARTS(0); + return output_->RestartPlayout(); + } + + int RestartRecordingInternally() override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return input_->RestartRecording(); + } + + int SetPlayoutSampleRate(uint32_t sample_rate) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return output_->SetSampleRate(sample_rate); + } + + int SetRecordingSampleRate(uint32_t sample_rate) override { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return input_->SetSampleRate(sample_rate); + } + + private: + // Ensures that the class is used on the same thread as it is constructed + // and destroyed on. + SequenceChecker thread_checker_; + + // Implements the AudioInput interface and deals with audio capturing parts. + const std::unique_ptr input_; + + // Implements the AudioOutput interface and deals with audio rendering parts. + const std::unique_ptr output_; + + TaskQueueFactory* const task_queue_factory_; + + // The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio + // to/from the WebRTC layer. Created and owned by this object. Used by + // both `input_` and `output_` but they use orthogonal parts of the ADB. + std::unique_ptr audio_device_buffer_; + + // Set to true after a successful call to Init(). Cleared by Terminate(). + bool initialized_ = false; +}; + +} // namespace + +rtc::scoped_refptr +CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput( + std::unique_ptr audio_input, + std::unique_ptr audio_output, + TaskQueueFactory* task_queue_factory) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + return rtc::make_ref_counted( + std::move(audio_input), std::move(audio_output), task_queue_factory); +} + +} // namespace webrtc_win +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h new file mode 100644 index 0000000000..1ed0b25620 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/audio_device_module_win.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_ +#define MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_ + +#include +#include + +#include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/include/audio_device.h" + +namespace webrtc { + +class AudioDeviceBuffer; + +namespace webrtc_win { + +// This interface represents the main input-related parts of the complete +// AudioDeviceModule interface. +class AudioInput { + public: + virtual ~AudioInput() {} + + virtual int Init() = 0; + virtual int Terminate() = 0; + virtual int NumDevices() const = 0; + virtual int SetDevice(int index) = 0; + virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0; + virtual int DeviceName(int index, std::string* name, std::string* guid) = 0; + virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0; + virtual bool RecordingIsInitialized() const = 0; + virtual int InitRecording() = 0; + virtual int StartRecording() = 0; + virtual int StopRecording() = 0; + virtual bool Recording() = 0; + virtual int VolumeIsAvailable(bool* available) = 0; + virtual int RestartRecording() = 0; + virtual bool Restarting() const = 0; + virtual int SetSampleRate(uint32_t sample_rate) = 0; +}; + +// This interface represents the main output-related parts of the complete +// AudioDeviceModule interface. +class AudioOutput { + public: + virtual ~AudioOutput() {} + + virtual int Init() = 0; + virtual int Terminate() = 0; + virtual int NumDevices() const = 0; + virtual int SetDevice(int index) = 0; + virtual int SetDevice(AudioDeviceModule::WindowsDeviceType device) = 0; + virtual int DeviceName(int index, std::string* name, std::string* guid) = 0; + virtual void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) = 0; + virtual bool PlayoutIsInitialized() const = 0; + virtual int InitPlayout() = 0; + virtual int StartPlayout() = 0; + virtual int StopPlayout() = 0; + virtual bool Playing() = 0; + virtual int VolumeIsAvailable(bool* available) = 0; + virtual int RestartPlayout() = 0; + virtual bool Restarting() const = 0; + virtual int SetSampleRate(uint32_t sample_rate) = 0; +}; + +// Combines an AudioInput and an AudioOutput implementation to build an +// AudioDeviceModule. Hides most parts of the full ADM interface. +rtc::scoped_refptr +CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput( + std::unique_ptr audio_input, + std::unique_ptr audio_output, + TaskQueueFactory* task_queue_factory); + +} // namespace webrtc_win + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_WIN_AUDIO_DEVICE_MODULE_WIN_H_ diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc new file mode 100644 index 0000000000..dc8526b625 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.cc @@ -0,0 +1,948 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/win/core_audio_base_win.h" + +#include +#include + +#include "absl/strings/string_view.h" +#include "modules/audio_device/audio_device_buffer.h" +#include "rtc_base/arraysize.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/time_utils.h" +#include "rtc_base/win/scoped_com_initializer.h" +#include "rtc_base/win/windows_version.h" + +using Microsoft::WRL::ComPtr; + +namespace webrtc { +namespace webrtc_win { +namespace { + +// Even if the device supports low latency and even if IAudioClient3 can be +// used (requires Win10 or higher), we currently disable any attempts to +// initialize the client for low-latency. +// TODO(henrika): more research is needed before we can enable low-latency. +const bool kEnableLowLatencyIfSupported = false; + +// Each unit of reference time is 100 nanoseconds, hence `kReftimesPerSec` +// corresponds to one second. +// TODO(henrika): possibly add usage in Init(). +// const REFERENCE_TIME kReferenceTimesPerSecond = 10000000; + +enum DefaultDeviceType { + kUndefined = -1, + kDefault = 0, + kDefaultCommunications = 1, + kDefaultDeviceTypeMaxCount = kDefaultCommunications + 1, +}; + +const char* DirectionToString(CoreAudioBase::Direction direction) { + switch (direction) { + case CoreAudioBase::Direction::kOutput: + return "Output"; + case CoreAudioBase::Direction::kInput: + return "Input"; + default: + return "Unkown"; + } +} + +const char* RoleToString(const ERole role) { + switch (role) { + case eConsole: + return "Console"; + case eMultimedia: + return "Multimedia"; + case eCommunications: + return "Communications"; + default: + return "Unsupported"; + } +} + +std::string IndexToString(int index) { + std::string ss = std::to_string(index); + switch (index) { + case kDefault: + ss += " (Default)"; + break; + case kDefaultCommunications: + ss += " (Communications)"; + break; + default: + break; + } + return ss; +} + +const char* SessionStateToString(AudioSessionState state) { + switch (state) { + case AudioSessionStateActive: + return "Active"; + case AudioSessionStateInactive: + return "Inactive"; + case AudioSessionStateExpired: + return "Expired"; + default: + return "Invalid"; + } +} + +const char* SessionDisconnectReasonToString( + AudioSessionDisconnectReason reason) { + switch (reason) { + case DisconnectReasonDeviceRemoval: + return "DeviceRemoval"; + case DisconnectReasonServerShutdown: + return "ServerShutdown"; + case DisconnectReasonFormatChanged: + return "FormatChanged"; + case DisconnectReasonSessionLogoff: + return "SessionLogoff"; + case DisconnectReasonSessionDisconnected: + return "Disconnected"; + case DisconnectReasonExclusiveModeOverride: + return "ExclusiveModeOverride"; + default: + return "Invalid"; + } +} + +// Returns true if the selected audio device supports low latency, i.e, if it +// is possible to initialize the engine using periods less than the default +// period (10ms). +bool IsLowLatencySupported(IAudioClient3* client3, + const WAVEFORMATEXTENSIBLE* format, + uint32_t* min_period_in_frames) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + + // Get the range of periodicities supported by the engine for the specified + // stream format. + uint32_t default_period = 0; + uint32_t fundamental_period = 0; + uint32_t min_period = 0; + uint32_t max_period = 0; + if (FAILED(core_audio_utility::GetSharedModeEnginePeriod( + client3, format, &default_period, &fundamental_period, &min_period, + &max_period))) { + return false; + } + + // Low latency is supported if the shortest allowed period is less than the + // default engine period. + // TODO(henrika): verify that this assumption is correct. + const bool low_latency = min_period < default_period; + RTC_LOG(LS_INFO) << "low_latency: " << low_latency; + *min_period_in_frames = low_latency ? min_period : 0; + return low_latency; +} + +} // namespace + +CoreAudioBase::CoreAudioBase(Direction direction, + bool automatic_restart, + OnDataCallback data_callback, + OnErrorCallback error_callback) + : format_(), + direction_(direction), + automatic_restart_(automatic_restart), + on_data_callback_(data_callback), + on_error_callback_(error_callback), + device_index_(kUndefined), + is_restarting_(false) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction) + << "]"; + RTC_DLOG(LS_INFO) << "Automatic restart: " << automatic_restart; + RTC_DLOG(LS_INFO) << "Windows version: " << rtc::rtc_win::GetVersion(); + + // Create the event which the audio engine will signal each time a buffer + // becomes ready to be processed by the client. + audio_samples_event_.Set(CreateEvent(nullptr, false, false, nullptr)); + RTC_DCHECK(audio_samples_event_.IsValid()); + + // Event to be set in Stop() when rendering/capturing shall stop. + stop_event_.Set(CreateEvent(nullptr, false, false, nullptr)); + RTC_DCHECK(stop_event_.IsValid()); + + // Event to be set when it has been detected that an active device has been + // invalidated or the stream format has changed. + restart_event_.Set(CreateEvent(nullptr, false, false, nullptr)); + RTC_DCHECK(restart_event_.IsValid()); +} + +CoreAudioBase::~CoreAudioBase() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_EQ(ref_count_, 1); +} + +EDataFlow CoreAudioBase::GetDataFlow() const { + return direction_ == CoreAudioBase::Direction::kOutput ? eRender : eCapture; +} + +bool CoreAudioBase::IsRestarting() const { + return is_restarting_; +} + +int64_t CoreAudioBase::TimeSinceStart() const { + return rtc::TimeSince(start_time_); +} + +int CoreAudioBase::NumberOfActiveDevices() const { + return core_audio_utility::NumberOfActiveDevices(GetDataFlow()); +} + +int CoreAudioBase::NumberOfEnumeratedDevices() const { + const int num_active = NumberOfActiveDevices(); + return num_active > 0 ? num_active + kDefaultDeviceTypeMaxCount : 0; +} + +void CoreAudioBase::ReleaseCOMObjects() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + // ComPtr::Reset() sets the ComPtr to nullptr releasing any previous + // reference. + if (audio_client_) { + audio_client_.Reset(); + } + if (audio_clock_.Get()) { + audio_clock_.Reset(); + } + if (audio_session_control_.Get()) { + audio_session_control_.Reset(); + } +} + +bool CoreAudioBase::IsDefaultDevice(int index) const { + return index == kDefault; +} + +bool CoreAudioBase::IsDefaultCommunicationsDevice(int index) const { + return index == kDefaultCommunications; +} + +bool CoreAudioBase::IsDefaultDeviceId(absl::string_view device_id) const { + // Returns true if `device_id` corresponds to the id of the default + // device. Note that, if only one device is available (or if the user has not + // explicitly set a default device), `device_id` will also math + // IsDefaultCommunicationsDeviceId(). + return (IsInput() && + (device_id == core_audio_utility::GetDefaultInputDeviceID())) || + (IsOutput() && + (device_id == core_audio_utility::GetDefaultOutputDeviceID())); +} + +bool CoreAudioBase::IsDefaultCommunicationsDeviceId( + absl::string_view device_id) const { + // Returns true if `device_id` corresponds to the id of the default + // communication device. Note that, if only one device is available (or if + // the user has not explicitly set a communication device), `device_id` will + // also math IsDefaultDeviceId(). + return (IsInput() && + (device_id == + core_audio_utility::GetCommunicationsInputDeviceID())) || + (IsOutput() && + (device_id == core_audio_utility::GetCommunicationsOutputDeviceID())); +} + +bool CoreAudioBase::IsInput() const { + return direction_ == CoreAudioBase::Direction::kInput; +} + +bool CoreAudioBase::IsOutput() const { + return direction_ == CoreAudioBase::Direction::kOutput; +} + +std::string CoreAudioBase::GetDeviceID(int index) const { + if (index >= NumberOfEnumeratedDevices()) { + RTC_LOG(LS_ERROR) << "Invalid device index"; + return std::string(); + } + + std::string device_id; + if (IsDefaultDevice(index)) { + device_id = IsInput() ? core_audio_utility::GetDefaultInputDeviceID() + : core_audio_utility::GetDefaultOutputDeviceID(); + } else if (IsDefaultCommunicationsDevice(index)) { + device_id = IsInput() + ? core_audio_utility::GetCommunicationsInputDeviceID() + : core_audio_utility::GetCommunicationsOutputDeviceID(); + } else { + AudioDeviceNames device_names; + bool ok = IsInput() + ? core_audio_utility::GetInputDeviceNames(&device_names) + : core_audio_utility::GetOutputDeviceNames(&device_names); + if (ok) { + device_id = device_names[index].unique_id; + } + } + return device_id; +} + +int CoreAudioBase::SetDevice(int index) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]: index=" << IndexToString(index); + if (initialized_) { + return -1; + } + + std::string device_id = GetDeviceID(index); + RTC_DLOG(LS_INFO) << "index=" << IndexToString(index) + << " => device_id: " << device_id; + device_index_ = index; + device_id_ = device_id; + + return device_id_.empty() ? -1 : 0; +} + +int CoreAudioBase::DeviceName(int index, + std::string* name, + std::string* guid) const { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]: index=" << IndexToString(index); + if (index > NumberOfEnumeratedDevices() - 1) { + RTC_LOG(LS_ERROR) << "Invalid device index"; + return -1; + } + + AudioDeviceNames device_names; + bool ok = IsInput() ? core_audio_utility::GetInputDeviceNames(&device_names) + : core_audio_utility::GetOutputDeviceNames(&device_names); + // Validate the index one extra time in-case the size of the generated list + // did not match NumberOfEnumeratedDevices(). + if (!ok || static_cast(device_names.size()) <= index) { + RTC_LOG(LS_ERROR) << "Failed to get the device name"; + return -1; + } + + *name = device_names[index].device_name; + RTC_DLOG(LS_INFO) << "name: " << *name; + if (guid != nullptr) { + *guid = device_names[index].unique_id; + RTC_DLOG(LS_INFO) << "guid: " << *guid; + } + return 0; +} + +bool CoreAudioBase::Init() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + RTC_DCHECK_GE(device_index_, 0); + RTC_DCHECK(!device_id_.empty()); + RTC_DCHECK(audio_device_buffer_); + RTC_DCHECK(!audio_client_); + RTC_DCHECK(!audio_session_control_.Get()); + + // Use an existing combination of `device_index_` and `device_id_` to set + // parameters which are required to create an audio client. It is up to the + // parent class to set `device_index_` and `device_id_`. + std::string device_id = AudioDeviceName::kDefaultDeviceId; + ERole role = ERole(); + if (IsDefaultDevice(device_index_)) { + role = eConsole; + } else if (IsDefaultCommunicationsDevice(device_index_)) { + role = eCommunications; + } else { + device_id = device_id_; + } + RTC_LOG(LS_INFO) << "Unique device identifier: device_id=" << device_id + << ", role=" << RoleToString(role); + + // Create an IAudioClient interface which enables us to create and initialize + // an audio stream between an audio application and the audio engine. + ComPtr audio_client; + if (core_audio_utility::GetAudioClientVersion() == 3) { + RTC_DLOG(LS_INFO) << "Using IAudioClient3"; + audio_client = + core_audio_utility::CreateClient3(device_id, GetDataFlow(), role); + } else if (core_audio_utility::GetAudioClientVersion() == 2) { + RTC_DLOG(LS_INFO) << "Using IAudioClient2"; + audio_client = + core_audio_utility::CreateClient2(device_id, GetDataFlow(), role); + } else { + RTC_DLOG(LS_INFO) << "Using IAudioClient"; + audio_client = + core_audio_utility::CreateClient(device_id, GetDataFlow(), role); + } + if (!audio_client) { + return false; + } + + // Set extra client properties before initialization if the audio client + // supports it. + // TODO(henrika): evaluate effect(s) of making these changes. Also, perhaps + // these types of settings belongs to the client and not the utility parts. + if (core_audio_utility::GetAudioClientVersion() >= 2) { + if (FAILED(core_audio_utility::SetClientProperties( + static_cast(audio_client.Get())))) { + return false; + } + } + + // Retrieve preferred audio input or output parameters for the given client + // and the specified client properties. Override the preferred rate if sample + // rate has been defined by the user. Rate conversion will be performed by + // the audio engine to match the client if needed. + AudioParameters params; + HRESULT res = sample_rate_ ? core_audio_utility::GetPreferredAudioParameters( + audio_client.Get(), ¶ms, *sample_rate_) + : core_audio_utility::GetPreferredAudioParameters( + audio_client.Get(), ¶ms); + if (FAILED(res)) { + return false; + } + + // Define the output WAVEFORMATEXTENSIBLE format in `format_`. + WAVEFORMATEX* format = &format_.Format; + format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; + // Check the preferred channel configuration and request implicit channel + // upmixing (audio engine extends from 2 to N channels internally) if the + // preferred number of channels is larger than two; i.e., initialize the + // stream in stereo even if the preferred configuration is multi-channel. + if (params.channels() <= 2) { + format->nChannels = rtc::dchecked_cast(params.channels()); + } else { + // TODO(henrika): ensure that this approach works on different multi-channel + // devices. Verified on: + // - Corsair VOID PRO Surround USB Adapter (supports 7.1) + RTC_LOG(LS_WARNING) + << "Using channel upmixing in WASAPI audio engine (2 => " + << params.channels() << ")"; + format->nChannels = 2; + } + format->nSamplesPerSec = params.sample_rate(); + format->wBitsPerSample = rtc::dchecked_cast(params.bits_per_sample()); + format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels; + format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign; + format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); + // Add the parts which are unique for the WAVE_FORMAT_EXTENSIBLE structure. + format_.Samples.wValidBitsPerSample = + rtc::dchecked_cast(params.bits_per_sample()); + format_.dwChannelMask = + format->nChannels == 1 ? KSAUDIO_SPEAKER_MONO : KSAUDIO_SPEAKER_STEREO; + format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; + RTC_DLOG(LS_INFO) << core_audio_utility::WaveFormatToString(&format_); + + // Verify that the format is supported but exclude the test if the default + // sample rate has been overridden. If so, the WASAPI audio engine will do + // any necessary conversions between the client format we have given it and + // the playback mix format or recording split format. + if (!sample_rate_) { + if (!core_audio_utility::IsFormatSupported( + audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &format_)) { + return false; + } + } + + // Check if low-latency is supported and use special initialization if it is. + // Low-latency initialization requires these things: + // - IAudioClient3 (>= Win10) + // - HDAudio driver + // - kEnableLowLatencyIfSupported changed from false (default) to true. + // TODO(henrika): IsLowLatencySupported() returns AUDCLNT_E_UNSUPPORTED_FORMAT + // when `sample_rate_.has_value()` returns true if rate conversion is + // actually required (i.e., client asks for other than the default rate). + bool low_latency_support = false; + uint32_t min_period_in_frames = 0; + if (kEnableLowLatencyIfSupported && + core_audio_utility::GetAudioClientVersion() >= 3) { + low_latency_support = + IsLowLatencySupported(static_cast(audio_client.Get()), + &format_, &min_period_in_frames); + } + + if (low_latency_support) { + RTC_DCHECK_GE(core_audio_utility::GetAudioClientVersion(), 3); + // Use IAudioClient3::InitializeSharedAudioStream() API to initialize a + // low-latency event-driven client. Request the smallest possible + // periodicity. + // TODO(henrika): evaluate this scheme in terms of CPU etc. + if (FAILED(core_audio_utility::SharedModeInitializeLowLatency( + static_cast(audio_client.Get()), &format_, + audio_samples_event_, min_period_in_frames, + sample_rate_.has_value(), &endpoint_buffer_size_frames_))) { + return false; + } + } else { + // Initialize the audio stream between the client and the device in shared + // mode using event-driven buffer handling. Also, using 0 as requested + // buffer size results in a default (minimum) endpoint buffer size. + // TODO(henrika): possibly increase `requested_buffer_size` to add + // robustness. + const REFERENCE_TIME requested_buffer_size = 0; + if (FAILED(core_audio_utility::SharedModeInitialize( + audio_client.Get(), &format_, audio_samples_event_, + requested_buffer_size, sample_rate_.has_value(), + &endpoint_buffer_size_frames_))) { + return false; + } + } + + // Check device period and the preferred buffer size and log a warning if + // WebRTC's buffer size is not an even divisor of the preferred buffer size + // in Core Audio. + // TODO(henrika): sort out if a non-perfect match really is an issue. + // TODO(henrika): compare with IAudioClient3::GetSharedModeEnginePeriod(). + REFERENCE_TIME device_period; + if (FAILED(core_audio_utility::GetDevicePeriod( + audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) { + return false; + } + const double device_period_in_seconds = + static_cast( + core_audio_utility::ReferenceTimeToTimeDelta(device_period).ms()) / + 1000.0L; + const int preferred_frames_per_buffer = + static_cast(params.sample_rate() * device_period_in_seconds + 0.5); + RTC_DLOG(LS_INFO) << "preferred_frames_per_buffer: " + << preferred_frames_per_buffer; + if (preferred_frames_per_buffer % params.frames_per_buffer()) { + RTC_LOG(LS_WARNING) << "Buffer size of " << params.frames_per_buffer() + << " is not an even divisor of " + << preferred_frames_per_buffer; + } + + // Create an AudioSessionControl interface given the initialized client. + // The IAudioControl interface enables a client to configure the control + // parameters for an audio session and to monitor events in the session. + ComPtr audio_session_control = + core_audio_utility::CreateAudioSessionControl(audio_client.Get()); + if (!audio_session_control.Get()) { + return false; + } + + // The Sndvol program displays volume and mute controls for sessions that + // are in the active and inactive states. + AudioSessionState state; + if (FAILED(audio_session_control->GetState(&state))) { + return false; + } + RTC_DLOG(LS_INFO) << "audio session state: " << SessionStateToString(state); + RTC_DCHECK_EQ(state, AudioSessionStateInactive); + + // Register the client to receive notifications of session events, including + // changes in the stream state. + if (FAILED(audio_session_control->RegisterAudioSessionNotification(this))) { + return false; + } + + // Store valid COM interfaces. + audio_client_ = audio_client; + audio_session_control_ = audio_session_control; + + return true; +} + +bool CoreAudioBase::Start() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + if (IsRestarting()) { + // Audio thread should be alive during internal restart since the restart + // callback is triggered on that thread and it also makes the restart + // sequence less complex. + RTC_DCHECK(!audio_thread_.empty()); + } + + // Start an audio thread but only if one does not already exist (which is the + // case during restart). + if (audio_thread_.empty()) { + const absl::string_view name = + IsInput() ? "wasapi_capture_thread" : "wasapi_render_thread"; + audio_thread_ = rtc::PlatformThread::SpawnJoinable( + [this] { ThreadRun(); }, name, + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); + RTC_DLOG(LS_INFO) << "Started thread with name: " << name + << " and handle: " << *audio_thread_.GetHandle(); + } + + // Start streaming data between the endpoint buffer and the audio engine. + _com_error error = audio_client_->Start(); + if (FAILED(error.Error())) { + StopThread(); + RTC_LOG(LS_ERROR) << "IAudioClient::Start failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + + start_time_ = rtc::TimeMillis(); + num_data_callbacks_ = 0; + + return true; +} + +bool CoreAudioBase::Stop() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + RTC_DLOG(LS_INFO) << "total activity time: " << TimeSinceStart(); + + // Stop audio streaming. + _com_error error = audio_client_->Stop(); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::Stop failed: " + << core_audio_utility::ErrorToString(error); + } + // Stop and destroy the audio thread but only when a restart attempt is not + // ongoing. + if (!IsRestarting()) { + StopThread(); + } + + // Flush all pending data and reset the audio clock stream position to 0. + error = audio_client_->Reset(); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::Reset failed: " + << core_audio_utility::ErrorToString(error); + } + + if (IsOutput()) { + // Extra safety check to ensure that the buffers are cleared. + // If the buffers are not cleared correctly, the next call to Start() + // would fail with AUDCLNT_E_BUFFER_ERROR at + // IAudioRenderClient::GetBuffer(). + UINT32 num_queued_frames = 0; + audio_client_->GetCurrentPadding(&num_queued_frames); + RTC_DCHECK_EQ(0u, num_queued_frames); + } + + // Delete the previous registration by the client to receive notifications + // about audio session events. + RTC_DLOG(LS_INFO) << "audio session state: " + << SessionStateToString(GetAudioSessionState()); + error = audio_session_control_->UnregisterAudioSessionNotification(this); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) + << "IAudioSessionControl::UnregisterAudioSessionNotification failed: " + << core_audio_utility::ErrorToString(error); + } + + // To ensure that the restart process is as simple as possible, the audio + // thread is not destroyed during restart attempts triggered by internal + // error callbacks. + if (!IsRestarting()) { + thread_checker_audio_.Detach(); + } + + // Release all allocated COM interfaces to allow for a restart without + // intermediate destruction. + ReleaseCOMObjects(); + + return true; +} + +bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const { + // A valid IAudioClient is required to access the ISimpleAudioVolume interface + // properly. It is possible to use IAudioSessionManager::GetSimpleAudioVolume + // as well but we use the audio client here to ensure that the initialized + // audio session is visible under group box labeled "Applications" in + // Sndvol.exe. + if (!audio_client_) { + return false; + } + + // Try to create an ISimpleAudioVolume instance. + ComPtr audio_volume = + core_audio_utility::CreateSimpleAudioVolume(audio_client_.Get()); + if (!audio_volume.Get()) { + RTC_DLOG(LS_ERROR) << "Volume control is not supported"; + return false; + } + + // Try to use the valid volume control. + float volume = 0.0; + _com_error error = audio_volume->GetMasterVolume(&volume); + if (error.Error() != S_OK) { + RTC_LOG(LS_ERROR) << "ISimpleAudioVolume::GetMasterVolume failed: " + << core_audio_utility::ErrorToString(error); + *available = false; + } + RTC_DLOG(LS_INFO) << "master volume for output audio session: " << volume; + + *available = true; + return false; +} + +// Internal test method which can be used in tests to emulate a restart signal. +// It simply sets the same event which is normally triggered by session and +// device notifications. Hence, the emulated restart sequence covers most parts +// of a real sequence expect the actual device switch. +bool CoreAudioBase::Restart() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + if (!automatic_restart()) { + return false; + } + is_restarting_ = true; + SetEvent(restart_event_.Get()); + return true; +} + +void CoreAudioBase::StopThread() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK(!IsRestarting()); + if (!audio_thread_.empty()) { + RTC_DLOG(LS_INFO) << "Sets stop_event..."; + SetEvent(stop_event_.Get()); + RTC_DLOG(LS_INFO) << "PlatformThread::Finalize..."; + audio_thread_.Finalize(); + + // Ensure that we don't quit the main thread loop immediately next + // time Start() is called. + ResetEvent(stop_event_.Get()); + ResetEvent(restart_event_.Get()); + } +} + +bool CoreAudioBase::HandleRestartEvent() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + RTC_DCHECK(!audio_thread_.empty()); + RTC_DCHECK(IsRestarting()); + // Let each client (input and/or output) take care of its own restart + // sequence since each side might need unique actions. + // TODO(henrika): revisit and investigate if one common base implementation + // is possible + bool restart_ok = on_error_callback_(ErrorType::kStreamDisconnected); + is_restarting_ = false; + return restart_ok; +} + +bool CoreAudioBase::SwitchDeviceIfNeeded() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + RTC_DCHECK(IsRestarting()); + + RTC_DLOG(LS_INFO) << "device_index=" << device_index_ + << " => device_id: " << device_id_; + + // Ensure that at least one device exists and can be utilized. The most + // probable cause for ending up here is that a device has been removed. + if (core_audio_utility::NumberOfActiveDevices(IsInput() ? eCapture + : eRender) < 1) { + RTC_DLOG(LS_ERROR) << "All devices are disabled or removed"; + return false; + } + + // Get the unique device ID for the index which is currently used. It seems + // safe to assume that if the ID is the same as the existing device ID, then + // the device configuration is the same as before. + std::string device_id = GetDeviceID(device_index_); + if (device_id != device_id_) { + RTC_LOG(LS_WARNING) + << "Device configuration has changed => changing device selection..."; + // TODO(henrika): depending on the current state and how we got here, we + // must select a new device here. + if (SetDevice(kDefault) == -1) { + RTC_LOG(LS_WARNING) << "Failed to set new audio device"; + return false; + } + } else { + RTC_LOG(LS_INFO) + << "Device configuration has not changed => keeping selected device"; + } + return true; +} + +AudioSessionState CoreAudioBase::GetAudioSessionState() const { + AudioSessionState state = AudioSessionStateInactive; + RTC_DCHECK(audio_session_control_.Get()); + _com_error error = audio_session_control_->GetState(&state); + if (FAILED(error.Error())) { + RTC_DLOG(LS_ERROR) << "IAudioSessionControl::GetState failed: " + << core_audio_utility::ErrorToString(error); + } + return state; +} + +// TODO(henrika): only used for debugging purposes currently. +ULONG CoreAudioBase::AddRef() { + ULONG new_ref = InterlockedIncrement(&ref_count_); + // RTC_DLOG(LS_INFO) << "__AddRef => " << new_ref; + return new_ref; +} + +// TODO(henrika): does not call delete this. +ULONG CoreAudioBase::Release() { + ULONG new_ref = InterlockedDecrement(&ref_count_); + // RTC_DLOG(LS_INFO) << "__Release => " << new_ref; + return new_ref; +} + +// TODO(henrika): can probably be replaced by "return S_OK" only. +HRESULT CoreAudioBase::QueryInterface(REFIID iid, void** object) { + if (object == nullptr) { + return E_POINTER; + } + if (iid == IID_IUnknown || iid == __uuidof(IAudioSessionEvents)) { + *object = static_cast(this); + return S_OK; + } + *object = nullptr; + return E_NOINTERFACE; +} + +// IAudioSessionEvents::OnStateChanged. +HRESULT CoreAudioBase::OnStateChanged(AudioSessionState new_state) { + RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "[" + << DirectionToString(direction()) + << "] new_state: " << SessionStateToString(new_state); + return S_OK; +} + +// When a session is disconnected because of a device removal or format change +// event, we want to inform the audio thread about the lost audio session and +// trigger an attempt to restart audio using a new (default) device. +// This method is called on separate threads owned by the session manager and +// it can happen that the same type of callback is called more than once for the +// same event. +HRESULT CoreAudioBase::OnSessionDisconnected( + AudioSessionDisconnectReason disconnect_reason) { + RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "[" + << DirectionToString(direction()) << "] reason: " + << SessionDisconnectReasonToString(disconnect_reason); + // Ignore changes in the audio session (don't try to restart) if the user + // has explicitly asked for this type of ADM during construction. + if (!automatic_restart()) { + RTC_DLOG(LS_WARNING) << "___Automatic restart is disabled"; + return S_OK; + } + + if (IsRestarting()) { + RTC_DLOG(LS_WARNING) << "___Ignoring since restart is already active"; + return S_OK; + } + + // By default, automatic restart is enabled and the restart event will be set + // below if the device was removed or the format was changed. + if (disconnect_reason == DisconnectReasonDeviceRemoval || + disconnect_reason == DisconnectReasonFormatChanged) { + is_restarting_ = true; + SetEvent(restart_event_.Get()); + } + return S_OK; +} + +// IAudioSessionEvents::OnDisplayNameChanged +HRESULT CoreAudioBase::OnDisplayNameChanged(LPCWSTR new_display_name, + LPCGUID event_context) { + return S_OK; +} + +// IAudioSessionEvents::OnIconPathChanged +HRESULT CoreAudioBase::OnIconPathChanged(LPCWSTR new_icon_path, + LPCGUID event_context) { + return S_OK; +} + +// IAudioSessionEvents::OnSimpleVolumeChanged +HRESULT CoreAudioBase::OnSimpleVolumeChanged(float new_simple_volume, + BOOL new_mute, + LPCGUID event_context) { + return S_OK; +} + +// IAudioSessionEvents::OnChannelVolumeChanged +HRESULT CoreAudioBase::OnChannelVolumeChanged(DWORD channel_count, + float new_channel_volumes[], + DWORD changed_channel, + LPCGUID event_context) { + return S_OK; +} + +// IAudioSessionEvents::OnGroupingParamChanged +HRESULT CoreAudioBase::OnGroupingParamChanged(LPCGUID new_grouping_param, + LPCGUID event_context) { + return S_OK; +} + +void CoreAudioBase::ThreadRun() { + if (!core_audio_utility::IsMMCSSSupported()) { + RTC_LOG(LS_ERROR) << "MMCSS is not supported"; + return; + } + RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction()) + << "] ThreadRun starts..."; + // TODO(henrika): difference between "Pro Audio" and "Audio"? + ScopedMMCSSRegistration mmcss_registration(L"Pro Audio"); + ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA); + RTC_DCHECK(mmcss_registration.Succeeded()); + RTC_DCHECK(com_initializer.Succeeded()); + RTC_DCHECK(stop_event_.IsValid()); + RTC_DCHECK(audio_samples_event_.IsValid()); + + bool streaming = true; + bool error = false; + HANDLE wait_array[] = {stop_event_.Get(), restart_event_.Get(), + audio_samples_event_.Get()}; + + // The device frequency is the frequency generated by the hardware clock in + // the audio device. The GetFrequency() method reports a constant frequency. + UINT64 device_frequency = 0; + _com_error result(S_FALSE); + if (audio_clock_) { + RTC_DCHECK(IsOutput()); + result = audio_clock_->GetFrequency(&device_frequency); + if (FAILED(result.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClock::GetFrequency failed: " + << core_audio_utility::ErrorToString(result); + } + } + + // Keep streaming audio until the stop event or the stream-switch event + // is signaled. An error event can also break the main thread loop. + while (streaming && !error) { + // Wait for a close-down event, stream-switch event or a new render event. + DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), + wait_array, false, INFINITE); + switch (wait_result) { + case WAIT_OBJECT_0 + 0: + // `stop_event_` has been set. + streaming = false; + break; + case WAIT_OBJECT_0 + 1: + // `restart_event_` has been set. + error = !HandleRestartEvent(); + break; + case WAIT_OBJECT_0 + 2: + // `audio_samples_event_` has been set. + error = !on_data_callback_(device_frequency); + break; + default: + error = true; + break; + } + } + + if (streaming && error) { + RTC_LOG(LS_ERROR) << "[" << DirectionToString(direction()) + << "] WASAPI streaming failed."; + // Stop audio streaming since something has gone wrong in our main thread + // loop. Note that, we are still in a "started" state, hence a Stop() call + // is required to join the thread properly. + result = audio_client_->Stop(); + if (FAILED(result.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::Stop failed: " + << core_audio_utility::ErrorToString(result); + } + + // TODO(henrika): notify clients that something has gone wrong and that + // this stream should be destroyed instead of reused in the future. + } + + RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction()) + << "] ...ThreadRun stops"; +} + +} // namespace webrtc_win +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h new file mode 100644 index 0000000000..6c1357e059 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_base_win.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_ +#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_ + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/sequence_checker.h" +#include "modules/audio_device/win/core_audio_utility_win.h" +#include "rtc_base/platform_thread.h" + +namespace webrtc { + +class AudioDeviceBuffer; +class FineAudioBuffer; + +namespace webrtc_win { + +// Serves as base class for CoreAudioInput and CoreAudioOutput and supports +// device handling and audio streaming where the direction (input or output) +// is set at constructions by the parent. +// The IAudioSessionEvents interface provides notifications of session-related +// events such as changes in the volume level, display name, and session state. +// This class does not use the default ref-counting memory management method +// provided by IUnknown: calling CoreAudioBase::Release() will not delete the +// object. The client will receive notification from the session manager on +// a separate thread owned and controlled by the manager. +// TODO(henrika): investigate if CoreAudioBase should implement +// IMMNotificationClient as well (might improve support for device changes). +class CoreAudioBase : public IAudioSessionEvents { + public: + enum class Direction { + kInput, + kOutput, + }; + + // TODO(henrika): add more error types. + enum class ErrorType { + kStreamDisconnected, + }; + + template + auto as_integer(T const value) -> typename std::underlying_type::type { + return static_cast::type>(value); + } + + // Callback definition for notifications of new audio data. For input clients, + // it means that "new audio data has now been captured", and for output + // clients, "the output layer now needs new audio data". + typedef std::function OnDataCallback; + + // Callback definition for notifications of run-time error messages. It can + // be called e.g. when an active audio device is removed and an audio stream + // is disconnected (`error` is then set to kStreamDisconnected). Both input + // and output clients implements OnErrorCallback() and will trigger an + // internal restart sequence for kStreamDisconnected. + // This method is currently always called on the audio thread. + // TODO(henrika): add support for more error types. + typedef std::function OnErrorCallback; + + void ThreadRun(); + + CoreAudioBase(const CoreAudioBase&) = delete; + CoreAudioBase& operator=(const CoreAudioBase&) = delete; + + protected: + explicit CoreAudioBase(Direction direction, + bool automatic_restart, + OnDataCallback data_callback, + OnErrorCallback error_callback); + ~CoreAudioBase(); + + std::string GetDeviceID(int index) const; + int SetDevice(int index); + int DeviceName(int index, std::string* name, std::string* guid) const; + + // Checks if the current device ID is no longer in use (e.g. due to a + // disconnected stream), and if so, switches device to the default audio + // device. Called on the audio thread during restart attempts. + bool SwitchDeviceIfNeeded(); + + bool Init(); + bool Start(); + bool Stop(); + bool IsVolumeControlAvailable(bool* available) const; + bool Restart(); + + Direction direction() const { return direction_; } + bool automatic_restart() const { return automatic_restart_; } + + // Releases all allocated COM resources in the base class. + void ReleaseCOMObjects(); + + // Returns number of active devices given the specified `direction_` set + // by the parent (input or output). + int NumberOfActiveDevices() const; + + // Returns total number of enumerated audio devices which is the sum of all + // active devices plus two extra (one default and one default + // communications). The value in `direction_` determines if capture or + // render devices are counted. + int NumberOfEnumeratedDevices() const; + + bool IsInput() const; + bool IsOutput() const; + bool IsDefaultDevice(int index) const; + bool IsDefaultCommunicationsDevice(int index) const; + bool IsDefaultDeviceId(absl::string_view device_id) const; + bool IsDefaultCommunicationsDeviceId(absl::string_view device_id) const; + EDataFlow GetDataFlow() const; + bool IsRestarting() const; + int64_t TimeSinceStart() const; + + // TODO(henrika): is the existing thread checker in WindowsAudioDeviceModule + // sufficient? As is, we have one top-level protection and then a second + // level here. In addition, calls to Init(), Start() and Stop() are not + // included to allow for support of internal restart (where these methods are + // called on the audio thread). + SequenceChecker thread_checker_; + SequenceChecker thread_checker_audio_; + AudioDeviceBuffer* audio_device_buffer_ = nullptr; + bool initialized_ = false; + WAVEFORMATEXTENSIBLE format_ = {}; + uint32_t endpoint_buffer_size_frames_ = 0; + Microsoft::WRL::ComPtr audio_clock_; + Microsoft::WRL::ComPtr audio_client_; + bool is_active_ = false; + int64_t num_data_callbacks_ = 0; + int latency_ms_ = 0; + absl::optional sample_rate_; + + private: + const Direction direction_; + const bool automatic_restart_; + const OnDataCallback on_data_callback_; + const OnErrorCallback on_error_callback_; + ScopedHandle audio_samples_event_; + ScopedHandle stop_event_; + ScopedHandle restart_event_; + int64_t start_time_ = 0; + std::string device_id_; + int device_index_ = -1; + // Used by the IAudioSessionEvents implementations. Currently only utilized + // for debugging purposes. + LONG ref_count_ = 1; + // Set when restart process starts and cleared when restart stops + // successfully. Accessed atomically. + std::atomic is_restarting_; + rtc::PlatformThread audio_thread_; + Microsoft::WRL::ComPtr audio_session_control_; + + void StopThread(); + AudioSessionState GetAudioSessionState() const; + + // Called on the audio thread when a restart event has been set. + // It will then trigger calls to the installed error callbacks with error + // type set to kStreamDisconnected. + bool HandleRestartEvent(); + + // IUnknown (required by IAudioSessionEvents and IMMNotificationClient). + ULONG __stdcall AddRef() override; + ULONG __stdcall Release() override; + HRESULT __stdcall QueryInterface(REFIID iid, void** object) override; + + // IAudioSessionEvents implementation. + // These methods are called on separate threads owned by the session manager. + // More than one thread can be involved depending on the type of callback + // and audio session. + HRESULT __stdcall OnStateChanged(AudioSessionState new_state) override; + HRESULT __stdcall OnSessionDisconnected( + AudioSessionDisconnectReason disconnect_reason) override; + HRESULT __stdcall OnDisplayNameChanged(LPCWSTR new_display_name, + LPCGUID event_context) override; + HRESULT __stdcall OnIconPathChanged(LPCWSTR new_icon_path, + LPCGUID event_context) override; + HRESULT __stdcall OnSimpleVolumeChanged(float new_simple_volume, + BOOL new_mute, + LPCGUID event_context) override; + HRESULT __stdcall OnChannelVolumeChanged(DWORD channel_count, + float new_channel_volumes[], + DWORD changed_channel, + LPCGUID event_context) override; + HRESULT __stdcall OnGroupingParamChanged(LPCGUID new_grouping_param, + LPCGUID event_context) override; +}; + +} // namespace webrtc_win +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_BASE_WIN_H_ diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc new file mode 100644 index 0000000000..17790dafc4 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.cc @@ -0,0 +1,453 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/win/core_audio_input_win.h" + +#include + +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/fine_audio_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/numerics/safe_conversions.h" + +using Microsoft::WRL::ComPtr; + +namespace webrtc { +namespace webrtc_win { + +enum AudioDeviceMessageType : uint32_t { + kMessageInputStreamDisconnected, +}; + +CoreAudioInput::CoreAudioInput(bool automatic_restart) + : CoreAudioBase( + CoreAudioBase::Direction::kInput, + automatic_restart, + [this](uint64_t freq) { return OnDataCallback(freq); }, + [this](ErrorType err) { return OnErrorCallback(err); }) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + thread_checker_audio_.Detach(); +} + +CoreAudioInput::~CoreAudioInput() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); +} + +int CoreAudioInput::Init() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return 0; +} + +int CoreAudioInput::Terminate() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + StopRecording(); + return 0; +} + +int CoreAudioInput::NumDevices() const { + RTC_DCHECK_RUN_ON(&thread_checker_); + return core_audio_utility::NumberOfActiveDevices(eCapture); +} + +int CoreAudioInput::SetDevice(int index) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; + RTC_DCHECK_GE(index, 0); + RTC_DCHECK_RUN_ON(&thread_checker_); + return CoreAudioBase::SetDevice(index); +} + +int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " + << ((device == AudioDeviceModule::kDefaultDevice) + ? "Default" + : "DefaultCommunication"); + RTC_DCHECK_RUN_ON(&thread_checker_); + return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1); +} + +int CoreAudioInput::DeviceName(int index, + std::string* name, + std::string* guid) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DCHECK(name); + return CoreAudioBase::DeviceName(index, name, guid); +} + +void CoreAudioInput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + audio_device_buffer_ = audio_buffer; +} + +bool CoreAudioInput::RecordingIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; + RTC_DCHECK_RUN_ON(&thread_checker_); + return initialized_; +} + +int CoreAudioInput::InitRecording() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK(!initialized_); + RTC_DCHECK(!Recording()); + RTC_DCHECK(!audio_capture_client_); + + // Creates an IAudioClient instance and stores the valid interface pointer in + // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on + // platform support. The base class will use optimal input parameters and do + // an event driven shared mode initialization. The utilized format will be + // stored in `format_` and can be used for configuration and allocation of + // audio buffers. + if (!CoreAudioBase::Init()) { + return -1; + } + RTC_DCHECK(audio_client_); + + // Configure the recording side of the audio device buffer using `format_` + // after a trivial sanity check of the format structure. + RTC_DCHECK(audio_device_buffer_); + WAVEFORMATEX* format = &format_.Format; + RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE); + audio_device_buffer_->SetRecordingSampleRate(format->nSamplesPerSec); + audio_device_buffer_->SetRecordingChannels(format->nChannels); + + // Create a modified audio buffer class which allows us to supply any number + // of samples (and not only multiple of 10ms) to match the optimal buffer + // size per callback used by Core Audio. + // TODO(henrika): can we share one FineAudioBuffer with the output side? + fine_audio_buffer_ = std::make_unique(audio_device_buffer_); + + // Create an IAudioCaptureClient for an initialized IAudioClient. + // The IAudioCaptureClient interface enables a client to read input data from + // a capture endpoint buffer. + ComPtr audio_capture_client = + core_audio_utility::CreateCaptureClient(audio_client_.Get()); + if (!audio_capture_client) { + return -1; + } + + // Query performance frequency. + LARGE_INTEGER ticks_per_sec = {}; + qpc_to_100ns_.reset(); + if (::QueryPerformanceFrequency(&ticks_per_sec)) { + double qpc_ticks_per_second = + rtc::dchecked_cast(ticks_per_sec.QuadPart); + qpc_to_100ns_ = 10000000.0 / qpc_ticks_per_second; + } + + // Store valid COM interfaces. + audio_capture_client_ = audio_capture_client; + + initialized_ = true; + return 0; +} + +int CoreAudioInput::StartRecording() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK(!Recording()); + RTC_DCHECK(fine_audio_buffer_); + RTC_DCHECK(audio_device_buffer_); + if (!initialized_) { + RTC_DLOG(LS_WARNING) + << "Recording can not start since InitRecording must succeed first"; + return 0; + } + + fine_audio_buffer_->ResetRecord(); + if (!IsRestarting()) { + audio_device_buffer_->StartRecording(); + } + + if (!Start()) { + return -1; + } + + is_active_ = true; + return 0; +} + +int CoreAudioInput::StopRecording() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + if (!initialized_) { + return 0; + } + + // Release resources allocated in InitRecording() and then return if this + // method is called without any active input audio. + if (!Recording()) { + RTC_DLOG(LS_WARNING) << "No input stream is active"; + ReleaseCOMObjects(); + initialized_ = false; + return 0; + } + + if (!Stop()) { + RTC_LOG(LS_ERROR) << "StopRecording failed"; + return -1; + } + + if (!IsRestarting()) { + RTC_DCHECK(audio_device_buffer_); + audio_device_buffer_->StopRecording(); + } + + // Release all allocated resources to allow for a restart without + // intermediate destruction. + ReleaseCOMObjects(); + qpc_to_100ns_.reset(); + + initialized_ = false; + is_active_ = false; + return 0; +} + +bool CoreAudioInput::Recording() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_; + return is_active_; +} + +// TODO(henrika): finalize support of audio session volume control. As is, we +// are not compatible with the old ADM implementation since it allows accessing +// the volume control with any active audio output stream. +int CoreAudioInput::VolumeIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return IsVolumeControlAvailable(available) ? 0 : -1; +} + +// Triggers the restart sequence. Only used for testing purposes to emulate +// a real event where e.g. an active input device is removed. +int CoreAudioInput::RestartRecording() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + if (!Recording()) { + return 0; + } + + if (!Restart()) { + RTC_LOG(LS_ERROR) << "RestartRecording failed"; + return -1; + } + return 0; +} + +bool CoreAudioInput::Restarting() const { + RTC_DCHECK_RUN_ON(&thread_checker_); + return IsRestarting(); +} + +int CoreAudioInput::SetSampleRate(uint32_t sample_rate) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + sample_rate_ = sample_rate; + return 0; +} + +void CoreAudioInput::ReleaseCOMObjects() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CoreAudioBase::ReleaseCOMObjects(); + if (audio_capture_client_.Get()) { + audio_capture_client_.Reset(); + } +} + +bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + + if (!initialized_ || !is_active_) { + // This is concurrent examination of state across multiple threads so will + // be somewhat error prone, but we should still be defensive and not use + // audio_capture_client_ if we know it's not there. + return false; + } + if (num_data_callbacks_ == 0) { + RTC_LOG(LS_INFO) << "--- Input audio stream is alive ---"; + } + UINT32 num_frames_in_next_packet = 0; + _com_error error = + audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet); + if (error.Error() == AUDCLNT_E_DEVICE_INVALIDATED) { + // Avoid breaking the thread loop implicitly by returning false and return + // true instead for AUDCLNT_E_DEVICE_INVALIDATED even it is a valid error + // message. We will use notifications about device changes instead to stop + // data callbacks and attempt to restart streaming . + RTC_DLOG(LS_ERROR) << "AUDCLNT_E_DEVICE_INVALIDATED"; + return true; + } + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + + // Drain the WASAPI capture buffer fully if audio has been recorded. + while (num_frames_in_next_packet > 0) { + uint8_t* audio_data; + UINT32 num_frames_to_read = 0; + DWORD flags = 0; + UINT64 device_position_frames = 0; + UINT64 capture_time_100ns = 0; + error = audio_capture_client_->GetBuffer(&audio_data, &num_frames_to_read, + &flags, &device_position_frames, + &capture_time_100ns); + if (error.Error() == AUDCLNT_S_BUFFER_EMPTY) { + // The call succeeded but no capture data is available to be read. + // Return and start waiting for new capture event + RTC_DCHECK_EQ(num_frames_to_read, 0u); + return true; + } + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetBuffer failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + + // Update input delay estimate but only about once per second to save + // resources. The estimate is usually stable. + if (num_data_callbacks_ % 100 == 0) { + absl::optional opt_record_delay_ms; + // TODO(henrika): note that FineAudioBuffer adds latency as well. + opt_record_delay_ms = EstimateLatencyMillis(capture_time_100ns); + if (opt_record_delay_ms) { + latency_ms_ = *opt_record_delay_ms; + } else { + RTC_DLOG(LS_WARNING) << "Input latency is set to fixed value"; + latency_ms_ = 20; + } + } + if (num_data_callbacks_ % 500 == 0) { + RTC_DLOG(LS_INFO) << "latency: " << latency_ms_; + } + + // The data in the packet is not correlated with the previous packet's + // device position; possibly due to a stream state transition or timing + // glitch. The behavior of the AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY flag + // is undefined on the application's first call to GetBuffer after Start. + if (device_position_frames != 0 && + flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) { + RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY"; + } + // The time at which the device's stream position was recorded is uncertain. + // Thus, the client might be unable to accurately set a time stamp for the + // current data packet. + if (flags & AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR) { + RTC_DLOG(LS_WARNING) << "AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR"; + } + + // Treat all of the data in the packet as silence and ignore the actual + // data values when AUDCLNT_BUFFERFLAGS_SILENT is set. + if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { + rtc::ExplicitZeroMemory(audio_data, + format_.Format.nBlockAlign * num_frames_to_read); + RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence"; + } else { + // Copy recorded audio in `audio_data` to the WebRTC sink using the + // FineAudioBuffer object. + fine_audio_buffer_->DeliverRecordedData( + rtc::MakeArrayView(reinterpret_cast(audio_data), + format_.Format.nChannels * num_frames_to_read), + + latency_ms_); + } + + error = audio_capture_client_->ReleaseBuffer(num_frames_to_read); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioCaptureClient::ReleaseBuffer failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + + error = + audio_capture_client_->GetNextPacketSize(&num_frames_in_next_packet); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioCaptureClient::GetNextPacketSize failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + } + ++num_data_callbacks_; + return true; +} + +bool CoreAudioInput::OnErrorCallback(ErrorType error) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error); + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + if (error == CoreAudioBase::ErrorType::kStreamDisconnected) { + HandleStreamDisconnected(); + } else { + RTC_DLOG(LS_WARNING) << "Unsupported error type"; + } + return true; +} + +absl::optional CoreAudioInput::EstimateLatencyMillis( + uint64_t capture_time_100ns) { + if (!qpc_to_100ns_) { + return absl::nullopt; + } + // Input parameter `capture_time_100ns` contains the performance counter at + // the time that the audio endpoint device recorded the device position of + // the first audio frame in the data packet converted into 100ns units. + // We derive a delay estimate by: + // - sampling the current performance counter (qpc_now_raw), + // - converting it into 100ns time units (now_time_100ns), and + // - subtracting `capture_time_100ns` from now_time_100ns. + LARGE_INTEGER perf_counter_now = {}; + if (!::QueryPerformanceCounter(&perf_counter_now)) { + return absl::nullopt; + } + uint64_t qpc_now_raw = perf_counter_now.QuadPart; + uint64_t now_time_100ns = qpc_now_raw * (*qpc_to_100ns_); + webrtc::TimeDelta delay_us = webrtc::TimeDelta::Micros( + 0.1 * (now_time_100ns - capture_time_100ns) + 0.5); + return delay_us.ms(); +} + +// Called from OnErrorCallback() when error type is kStreamDisconnected. +// Note that this method is called on the audio thread and the internal restart +// sequence is also executed on that same thread. The audio thread is therefore +// not stopped during restart. Such a scheme also makes the restart process less +// complex. +// Note that, none of the called methods are thread checked since they can also +// be called on the main thread. Thread checkers are instead added on one layer +// above (in audio_device_module.cc) which ensures that the public API is thread +// safe. +// TODO(henrika): add more details. +bool CoreAudioInput::HandleStreamDisconnected() { + RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + RTC_DCHECK(automatic_restart()); + + if (StopRecording() != 0) { + return false; + } + + if (!SwitchDeviceIfNeeded()) { + return false; + } + + if (InitRecording() != 0) { + return false; + } + if (StartRecording() != 0) { + return false; + } + + RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>"; + return true; +} + +} // namespace webrtc_win +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h new file mode 100644 index 0000000000..be290f9f4e --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_input_win.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_ +#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "modules/audio_device/win/audio_device_module_win.h" +#include "modules/audio_device/win/core_audio_base_win.h" + +namespace webrtc { + +class AudioDeviceBuffer; +class FineAudioBuffer; + +namespace webrtc_win { + +// Windows specific AudioInput implementation using a CoreAudioBase class where +// an input direction is set at construction. Supports capture device handling +// and streaming of captured audio to a WebRTC client. +class CoreAudioInput final : public CoreAudioBase, public AudioInput { + public: + CoreAudioInput(bool automatic_restart); + ~CoreAudioInput() override; + + // AudioInput implementation. + int Init() override; + int Terminate() override; + int NumDevices() const override; + int SetDevice(int index) override; + int SetDevice(AudioDeviceModule::WindowsDeviceType device) override; + int DeviceName(int index, std::string* name, std::string* guid) override; + void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override; + bool RecordingIsInitialized() const override; + int InitRecording() override; + int StartRecording() override; + int StopRecording() override; + bool Recording() override; + int VolumeIsAvailable(bool* available) override; + int RestartRecording() override; + bool Restarting() const override; + int SetSampleRate(uint32_t sample_rate) override; + + CoreAudioInput(const CoreAudioInput&) = delete; + CoreAudioInput& operator=(const CoreAudioInput&) = delete; + + private: + void ReleaseCOMObjects(); + bool OnDataCallback(uint64_t device_frequency); + bool OnErrorCallback(ErrorType error); + absl::optional EstimateLatencyMillis(uint64_t capture_time_100ns); + bool HandleStreamDisconnected(); + + std::unique_ptr fine_audio_buffer_; + Microsoft::WRL::ComPtr audio_capture_client_; + absl::optional qpc_to_100ns_; +}; + +} // namespace webrtc_win + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_INPUT_WIN_H_ diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc new file mode 100644 index 0000000000..c92fedf0e9 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.cc @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/win/core_audio_output_win.h" + +#include + +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/fine_audio_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/time_utils.h" + +using Microsoft::WRL::ComPtr; + +namespace webrtc { +namespace webrtc_win { + +CoreAudioOutput::CoreAudioOutput(bool automatic_restart) + : CoreAudioBase( + CoreAudioBase::Direction::kOutput, + automatic_restart, + [this](uint64_t freq) { return OnDataCallback(freq); }, + [this](ErrorType err) { return OnErrorCallback(err); }) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + thread_checker_audio_.Detach(); +} + +CoreAudioOutput::~CoreAudioOutput() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + Terminate(); +} + +int CoreAudioOutput::Init() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return 0; +} + +int CoreAudioOutput::Terminate() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + StopPlayout(); + return 0; +} + +int CoreAudioOutput::NumDevices() const { + RTC_DCHECK_RUN_ON(&thread_checker_); + return core_audio_utility::NumberOfActiveDevices(eRender); +} + +int CoreAudioOutput::SetDevice(int index) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; + RTC_DCHECK_GE(index, 0); + RTC_DCHECK_RUN_ON(&thread_checker_); + return CoreAudioBase::SetDevice(index); +} + +int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " + << ((device == AudioDeviceModule::kDefaultDevice) + ? "Default" + : "DefaultCommunication"); + RTC_DCHECK_RUN_ON(&thread_checker_); + return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1); +} + +int CoreAudioOutput::DeviceName(int index, + std::string* name, + std::string* guid) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DCHECK(name); + return CoreAudioBase::DeviceName(index, name, guid); +} + +void CoreAudioOutput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + audio_device_buffer_ = audio_buffer; +} + +bool CoreAudioOutput::PlayoutIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return initialized_; +} + +int CoreAudioOutput::InitPlayout() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DCHECK(!initialized_); + RTC_DCHECK(!Playing()); + RTC_DCHECK(!audio_render_client_); + + // Creates an IAudioClient instance and stores the valid interface pointer in + // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on + // platform support. The base class will use optimal output parameters and do + // an event driven shared mode initialization. The utilized format will be + // stored in `format_` and can be used for configuration and allocation of + // audio buffers. + if (!CoreAudioBase::Init()) { + return -1; + } + RTC_DCHECK(audio_client_); + + // Configure the playout side of the audio device buffer using `format_` + // after a trivial sanity check of the format structure. + RTC_DCHECK(audio_device_buffer_); + WAVEFORMATEX* format = &format_.Format; + RTC_DCHECK_EQ(format->wFormatTag, WAVE_FORMAT_EXTENSIBLE); + audio_device_buffer_->SetPlayoutSampleRate(format->nSamplesPerSec); + audio_device_buffer_->SetPlayoutChannels(format->nChannels); + + // Create a modified audio buffer class which allows us to ask for any number + // of samples (and not only multiple of 10ms) to match the optimal + // buffer size per callback used by Core Audio. + // TODO(henrika): can we share one FineAudioBuffer with the input side? + fine_audio_buffer_ = std::make_unique(audio_device_buffer_); + + // Create an IAudioRenderClient for an initialized IAudioClient. + // The IAudioRenderClient interface enables us to write output data to + // a rendering endpoint buffer. + ComPtr audio_render_client = + core_audio_utility::CreateRenderClient(audio_client_.Get()); + if (!audio_render_client.Get()) { + return -1; + } + + ComPtr audio_clock = + core_audio_utility::CreateAudioClock(audio_client_.Get()); + if (!audio_clock.Get()) { + return -1; + } + + // Store valid COM interfaces. + audio_render_client_ = audio_render_client; + audio_clock_ = audio_clock; + + initialized_ = true; + return 0; +} + +int CoreAudioOutput::StartPlayout() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DCHECK(!Playing()); + RTC_DCHECK(fine_audio_buffer_); + RTC_DCHECK(audio_device_buffer_); + if (!initialized_) { + RTC_DLOG(LS_WARNING) + << "Playout can not start since InitPlayout must succeed first"; + } + + fine_audio_buffer_->ResetPlayout(); + if (!IsRestarting()) { + audio_device_buffer_->StartPlayout(); + } + + if (!core_audio_utility::FillRenderEndpointBufferWithSilence( + audio_client_.Get(), audio_render_client_.Get())) { + RTC_LOG(LS_WARNING) << "Failed to prepare output endpoint with silence"; + } + + num_frames_written_ = endpoint_buffer_size_frames_; + + if (!Start()) { + return -1; + } + + is_active_ = true; + return 0; +} + +int CoreAudioOutput::StopPlayout() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); + if (!initialized_) { + return 0; + } + + // Release resources allocated in InitPlayout() and then return if this + // method is called without any active output audio. + if (!Playing()) { + RTC_DLOG(LS_WARNING) << "No output stream is active"; + ReleaseCOMObjects(); + initialized_ = false; + return 0; + } + + if (!Stop()) { + RTC_LOG(LS_ERROR) << "StopPlayout failed"; + return -1; + } + + if (!IsRestarting()) { + RTC_DCHECK(audio_device_buffer_); + audio_device_buffer_->StopPlayout(); + } + + // Release all allocated resources to allow for a restart without + // intermediate destruction. + ReleaseCOMObjects(); + + initialized_ = false; + is_active_ = false; + return 0; +} + +bool CoreAudioOutput::Playing() { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_; + return is_active_; +} + +// TODO(henrika): finalize support of audio session volume control. As is, we +// are not compatible with the old ADM implementation since it allows accessing +// the volume control with any active audio output stream. +int CoreAudioOutput::VolumeIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return IsVolumeControlAvailable(available) ? 0 : -1; +} + +// Triggers the restart sequence. Only used for testing purposes to emulate +// a real event where e.g. an active output device is removed. +int CoreAudioOutput::RestartPlayout() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + if (!Playing()) { + return 0; + } + if (!Restart()) { + RTC_LOG(LS_ERROR) << "RestartPlayout failed"; + return -1; + } + return 0; +} + +bool CoreAudioOutput::Restarting() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + return IsRestarting(); +} + +int CoreAudioOutput::SetSampleRate(uint32_t sample_rate) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_); + sample_rate_ = sample_rate; + return 0; +} + +void CoreAudioOutput::ReleaseCOMObjects() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CoreAudioBase::ReleaseCOMObjects(); + if (audio_render_client_.Get()) { + audio_render_client_.Reset(); + } +} + +bool CoreAudioOutput::OnErrorCallback(ErrorType error) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error); + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + if (!initialized_ || !Playing()) { + return true; + } + + if (error == CoreAudioBase::ErrorType::kStreamDisconnected) { + HandleStreamDisconnected(); + } else { + RTC_DLOG(LS_WARNING) << "Unsupported error type"; + } + return true; +} + +bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) { + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + if (num_data_callbacks_ == 0) { + RTC_LOG(LS_INFO) << "--- Output audio stream is alive ---"; + } + // Get the padding value which indicates the amount of valid unread data that + // the endpoint buffer currently contains. + UINT32 num_unread_frames = 0; + _com_error error = audio_client_->GetCurrentPadding(&num_unread_frames); + if (error.Error() == AUDCLNT_E_DEVICE_INVALIDATED) { + // Avoid breaking the thread loop implicitly by returning false and return + // true instead for AUDCLNT_E_DEVICE_INVALIDATED even it is a valid error + // message. We will use notifications about device changes instead to stop + // data callbacks and attempt to restart streaming . + RTC_DLOG(LS_ERROR) << "AUDCLNT_E_DEVICE_INVALIDATED"; + return true; + } + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + + // Contains how much new data we can write to the buffer without the risk of + // overwriting previously written data that the audio engine has not yet read + // from the buffer. I.e., it is the maximum buffer size we can request when + // calling IAudioRenderClient::GetBuffer(). + UINT32 num_requested_frames = + endpoint_buffer_size_frames_ - num_unread_frames; + if (num_requested_frames == 0) { + RTC_DLOG(LS_WARNING) + << "Audio thread is signaled but no new audio samples are needed"; + return true; + } + + // Request all available space in the rendering endpoint buffer into which the + // client can later write an audio packet. + uint8_t* audio_data; + error = audio_render_client_->GetBuffer(num_requested_frames, &audio_data); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + + // Update output delay estimate but only about once per second to save + // resources. The estimate is usually stable. + if (num_data_callbacks_ % 100 == 0) { + // TODO(henrika): note that FineAudioBuffer adds latency as well. + latency_ms_ = EstimateOutputLatencyMillis(device_frequency); + if (num_data_callbacks_ % 500 == 0) { + RTC_DLOG(LS_INFO) << "latency: " << latency_ms_; + } + } + + // Get audio data from WebRTC and write it to the allocated buffer in + // `audio_data`. The playout latency is not updated for each callback. + fine_audio_buffer_->GetPlayoutData( + rtc::MakeArrayView(reinterpret_cast(audio_data), + num_requested_frames * format_.Format.nChannels), + latency_ms_); + + // Release the buffer space acquired in IAudioRenderClient::GetBuffer. + error = audio_render_client_->ReleaseBuffer(num_requested_frames, 0); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: " + << core_audio_utility::ErrorToString(error); + return false; + } + + num_frames_written_ += num_requested_frames; + ++num_data_callbacks_; + + return true; +} + +// TODO(henrika): IAudioClock2::GetDevicePosition could perhaps be used here +// instead. Tried it once, but it crashed for capture devices. +int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) { + UINT64 position = 0; + UINT64 qpc_position = 0; + int delay_ms = 0; + // Get the device position through output parameter `position`. This is the + // stream position of the sample that is currently playing through the + // speakers. + _com_error error = audio_clock_->GetPosition(&position, &qpc_position); + if (error.Error() == S_OK) { + // Number of frames already played out through the speaker. + const uint64_t num_played_out_frames = + format_.Format.nSamplesPerSec * position / device_frequency; + + // Number of frames that have been written to the buffer but not yet + // played out corresponding to the estimated latency measured in number + // of audio frames. + const uint64_t delay_frames = num_frames_written_ - num_played_out_frames; + + // Convert latency in number of frames into milliseconds. + webrtc::TimeDelta delay = + webrtc::TimeDelta::Micros(delay_frames * rtc::kNumMicrosecsPerSec / + format_.Format.nSamplesPerSec); + delay_ms = delay.ms(); + } + return delay_ms; +} + +// Called from OnErrorCallback() when error type is kStreamDisconnected. +// Note that this method is called on the audio thread and the internal restart +// sequence is also executed on that same thread. The audio thread is therefore +// not stopped during restart. Such a scheme also makes the restart process less +// complex. +// Note that, none of the called methods are thread checked since they can also +// be called on the main thread. Thread checkers are instead added on one layer +// above (in audio_device_module.cc) which ensures that the public API is thread +// safe. +// TODO(henrika): add more details. +bool CoreAudioOutput::HandleStreamDisconnected() { + RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__; + RTC_DCHECK_RUN_ON(&thread_checker_audio_); + RTC_DCHECK(automatic_restart()); + + if (StopPlayout() != 0) { + return false; + } + + if (!SwitchDeviceIfNeeded()) { + return false; + } + + if (InitPlayout() != 0) { + return false; + } + if (StartPlayout() != 0) { + return false; + } + + RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>"; + return true; +} + +} // namespace webrtc_win + +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h new file mode 100644 index 0000000000..5a547498a3 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_output_win.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_ +#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_ + +#include +#include + +#include "modules/audio_device/win/audio_device_module_win.h" +#include "modules/audio_device/win/core_audio_base_win.h" + +namespace webrtc { + +class AudioDeviceBuffer; +class FineAudioBuffer; + +namespace webrtc_win { + +// Windows specific AudioOutput implementation using a CoreAudioBase class where +// an output direction is set at construction. Supports render device handling +// and streaming of decoded audio from a WebRTC client to the native audio +// layer. +class CoreAudioOutput final : public CoreAudioBase, public AudioOutput { + public: + CoreAudioOutput(bool automatic_restart); + ~CoreAudioOutput() override; + + // AudioOutput implementation. + int Init() override; + int Terminate() override; + int NumDevices() const override; + int SetDevice(int index) override; + int SetDevice(AudioDeviceModule::WindowsDeviceType device) override; + int DeviceName(int index, std::string* name, std::string* guid) override; + void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) override; + bool PlayoutIsInitialized() const override; + int InitPlayout() override; + int StartPlayout() override; + int StopPlayout() override; + bool Playing() override; + int VolumeIsAvailable(bool* available) override; + int RestartPlayout() override; + bool Restarting() const override; + int SetSampleRate(uint32_t sample_rate) override; + + CoreAudioOutput(const CoreAudioOutput&) = delete; + CoreAudioOutput& operator=(const CoreAudioOutput&) = delete; + + private: + void ReleaseCOMObjects(); + bool OnDataCallback(uint64_t device_frequency); + bool OnErrorCallback(ErrorType error); + int EstimateOutputLatencyMillis(uint64_t device_frequency); + bool HandleStreamDisconnected(); + + std::unique_ptr fine_audio_buffer_; + Microsoft::WRL::ComPtr audio_render_client_; + uint64_t num_frames_written_ = 0; +}; + +} // namespace webrtc_win +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_OUTPUT_WIN_H_ diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc new file mode 100644 index 0000000000..e4e2864db5 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.cc @@ -0,0 +1,1529 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/win/core_audio_utility_win.h" + +#include +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "rtc_base/arraysize.h" +#include "rtc_base/logging.h" +#include "rtc_base/platform_thread_types.h" +#include "rtc_base/string_utils.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_base/win/windows_version.h" + +using Microsoft::WRL::ComPtr; +using webrtc::AudioDeviceName; +using webrtc::AudioParameters; + +namespace webrtc { +namespace webrtc_win { +namespace { + +using core_audio_utility::ErrorToString; + +// Converts from channel mask to list of included channels. +// Each audio data format contains channels for one or more of the positions +// listed below. The number of channels simply equals the number of nonzero +// flag bits in the `channel_mask`. The relative positions of the channels +// within each block of audio data always follow the same relative ordering +// as the flag bits in the table below. For example, if `channel_mask` contains +// the value 0x00000033, the format defines four audio channels that are +// assigned for playback to the front-left, front-right, back-left, +// and back-right speakers, respectively. The channel data should be interleaved +// in that order within each block. +std::string ChannelMaskToString(DWORD channel_mask) { + std::string ss; + int n = 0; + if (channel_mask & SPEAKER_FRONT_LEFT) { + ss += "FRONT_LEFT | "; + ++n; + } + if (channel_mask & SPEAKER_FRONT_RIGHT) { + ss += "FRONT_RIGHT | "; + ++n; + } + if (channel_mask & SPEAKER_FRONT_CENTER) { + ss += "FRONT_CENTER | "; + ++n; + } + if (channel_mask & SPEAKER_LOW_FREQUENCY) { + ss += "LOW_FREQUENCY | "; + ++n; + } + if (channel_mask & SPEAKER_BACK_LEFT) { + ss += "BACK_LEFT | "; + ++n; + } + if (channel_mask & SPEAKER_BACK_RIGHT) { + ss += "BACK_RIGHT | "; + ++n; + } + if (channel_mask & SPEAKER_FRONT_LEFT_OF_CENTER) { + ss += "FRONT_LEFT_OF_CENTER | "; + ++n; + } + if (channel_mask & SPEAKER_FRONT_RIGHT_OF_CENTER) { + ss += "RIGHT_OF_CENTER | "; + ++n; + } + if (channel_mask & SPEAKER_BACK_CENTER) { + ss += "BACK_CENTER | "; + ++n; + } + if (channel_mask & SPEAKER_SIDE_LEFT) { + ss += "SIDE_LEFT | "; + ++n; + } + if (channel_mask & SPEAKER_SIDE_RIGHT) { + ss += "SIDE_RIGHT | "; + ++n; + } + if (channel_mask & SPEAKER_TOP_CENTER) { + ss += "TOP_CENTER | "; + ++n; + } + if (channel_mask & SPEAKER_TOP_FRONT_LEFT) { + ss += "TOP_FRONT_LEFT | "; + ++n; + } + if (channel_mask & SPEAKER_TOP_FRONT_CENTER) { + ss += "TOP_FRONT_CENTER | "; + ++n; + } + if (channel_mask & SPEAKER_TOP_FRONT_RIGHT) { + ss += "TOP_FRONT_RIGHT | "; + ++n; + } + if (channel_mask & SPEAKER_TOP_BACK_LEFT) { + ss += "TOP_BACK_LEFT | "; + ++n; + } + if (channel_mask & SPEAKER_TOP_BACK_CENTER) { + ss += "TOP_BACK_CENTER | "; + ++n; + } + if (channel_mask & SPEAKER_TOP_BACK_RIGHT) { + ss += "TOP_BACK_RIGHT | "; + ++n; + } + + if (!ss.empty()) { + // Delete last appended " | " substring. + ss.erase(ss.end() - 3, ss.end()); + } + ss += " ("; + ss += std::to_string(n); + ss += ")"; + return ss; +} + +#if !defined(KSAUDIO_SPEAKER_1POINT1) +// These values are only defined in ksmedia.h after a certain version, to build +// cleanly for older windows versions this just defines the ones that are +// missing. +#define KSAUDIO_SPEAKER_1POINT1 (SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY) +#define KSAUDIO_SPEAKER_2POINT1 \ + (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY) +#define KSAUDIO_SPEAKER_3POINT0 \ + (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER) +#define KSAUDIO_SPEAKER_3POINT1 \ + (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \ + SPEAKER_LOW_FREQUENCY) +#define KSAUDIO_SPEAKER_5POINT0 \ + (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \ + SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT) +#define KSAUDIO_SPEAKER_7POINT0 \ + (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \ + SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | \ + SPEAKER_SIDE_RIGHT) +#endif + +#if !defined(AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY) +#define AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000 +#define AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000 +#endif + +// Converts the most common format tags defined in mmreg.h into string +// equivalents. Mainly intended for log messages. +const char* WaveFormatTagToString(WORD format_tag) { + switch (format_tag) { + case WAVE_FORMAT_UNKNOWN: + return "WAVE_FORMAT_UNKNOWN"; + case WAVE_FORMAT_PCM: + return "WAVE_FORMAT_PCM"; + case WAVE_FORMAT_IEEE_FLOAT: + return "WAVE_FORMAT_IEEE_FLOAT"; + case WAVE_FORMAT_EXTENSIBLE: + return "WAVE_FORMAT_EXTENSIBLE"; + default: + return "UNKNOWN"; + } +} + +const char* RoleToString(const ERole role) { + switch (role) { + case eConsole: + return "Console"; + case eMultimedia: + return "Multimedia"; + case eCommunications: + return "Communications"; + default: + return "Unsupported"; + } +} + +const char* FlowToString(const EDataFlow flow) { + switch (flow) { + case eRender: + return "Render"; + case eCapture: + return "Capture"; + case eAll: + return "Render or Capture"; + default: + return "Unsupported"; + } +} + +bool LoadAudiosesDll() { + static const wchar_t* const kAudiosesDLL = + L"%WINDIR%\\system32\\audioses.dll"; + wchar_t path[MAX_PATH] = {0}; + ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path)); + RTC_DLOG(LS_INFO) << rtc::ToUtf8(path); + return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) != + nullptr); +} + +bool LoadAvrtDll() { + static const wchar_t* const kAvrtDLL = L"%WINDIR%\\system32\\Avrt.dll"; + wchar_t path[MAX_PATH] = {0}; + ExpandEnvironmentStringsW(kAvrtDLL, path, arraysize(path)); + RTC_DLOG(LS_INFO) << rtc::ToUtf8(path); + return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) != + nullptr); +} + +ComPtr CreateDeviceEnumeratorInternal( + bool allow_reinitialize) { + ComPtr device_enumerator; + _com_error error = + ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, + IID_PPV_ARGS(&device_enumerator)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "CoCreateInstance failed: " << ErrorToString(error); + } + + if (error.Error() == CO_E_NOTINITIALIZED && allow_reinitialize) { + RTC_LOG(LS_ERROR) << "CoCreateInstance failed with CO_E_NOTINITIALIZED"; + // We have seen crashes which indicates that this method can in fact + // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party + // modules. Calling CoInitializeEx() is an attempt to resolve the reported + // issues. See http://crbug.com/378465 for details. + error = CoInitializeEx(nullptr, COINIT_MULTITHREADED); + if (FAILED(error.Error())) { + error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, + CLSCTX_ALL, IID_PPV_ARGS(&device_enumerator)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "CoCreateInstance failed: " + << ErrorToString(error); + } + } + } + return device_enumerator; +} + +bool IsSupportedInternal() { + // The Core Audio APIs are implemented in the user-mode system components + // Audioses.dll and Mmdevapi.dll. Dependency Walker shows that it is + // enough to verify possibility to load the Audioses DLL since it depends + // on Mmdevapi.dll. See http://crbug.com/166397 why this extra step is + // required to guarantee Core Audio support. + if (!LoadAudiosesDll()) + return false; + + // Being able to load the Audioses.dll does not seem to be sufficient for + // all devices to guarantee Core Audio support. To be 100%, we also verify + // that it is possible to a create the IMMDeviceEnumerator interface. If + // this works as well we should be home free. + ComPtr device_enumerator = + CreateDeviceEnumeratorInternal(false); + if (!device_enumerator) { + RTC_LOG(LS_ERROR) + << "Failed to create Core Audio device enumerator on thread with ID " + << rtc::CurrentThreadId(); + return false; + } + + return true; +} + +bool IsDeviceActive(IMMDevice* device) { + DWORD state = DEVICE_STATE_DISABLED; + return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE); +} + +// Retrieve an audio device specified by `device_id` or a default device +// specified by data-flow direction and role if `device_id` is default. +ComPtr CreateDeviceInternal(absl::string_view device_id, + EDataFlow data_flow, + ERole role) { + RTC_DLOG(LS_INFO) << "CreateDeviceInternal: " + "id=" + << device_id << ", flow=" << FlowToString(data_flow) + << ", role=" << RoleToString(role); + ComPtr audio_endpoint_device; + + // Create the IMMDeviceEnumerator interface. + ComPtr device_enum(CreateDeviceEnumeratorInternal(true)); + if (!device_enum.Get()) + return audio_endpoint_device; + + _com_error error(S_FALSE); + if (device_id == AudioDeviceName::kDefaultDeviceId) { + // Get the default audio endpoint for the specified data-flow direction and + // role. Note that, if only a single rendering or capture device is + // available, the system always assigns all three rendering or capture roles + // to that device. If the method fails to find a rendering or capture device + // for the specified role, this means that no rendering or capture device is + // available at all. If no device is available, the method sets the output + // pointer to NULL and returns ERROR_NOT_FOUND. + error = device_enum->GetDefaultAudioEndpoint( + data_flow, role, audio_endpoint_device.GetAddressOf()); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) + << "IMMDeviceEnumerator::GetDefaultAudioEndpoint failed: " + << ErrorToString(error); + } + } else { + // Ask for an audio endpoint device that is identified by an endpoint ID + // string. + error = device_enum->GetDevice(rtc::ToUtf16(device_id).c_str(), + audio_endpoint_device.GetAddressOf()); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDeviceEnumerator::GetDevice failed: " + << ErrorToString(error); + } + } + + // Verify that the audio endpoint device is active, i.e., that the audio + // adapter that connects to the endpoint device is present and enabled. + if (SUCCEEDED(error.Error()) && audio_endpoint_device.Get() && + !IsDeviceActive(audio_endpoint_device.Get())) { + RTC_LOG(LS_WARNING) << "Selected endpoint device is not active"; + audio_endpoint_device.Reset(); + } + + return audio_endpoint_device; +} + +std::string GetDeviceIdInternal(IMMDevice* device) { + // Retrieve unique name of endpoint device. + // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}". + LPWSTR device_id; + if (SUCCEEDED(device->GetId(&device_id))) { + std::string device_id_utf8 = rtc::ToUtf8(device_id, wcslen(device_id)); + CoTaskMemFree(device_id); + return device_id_utf8; + } else { + return std::string(); + } +} + +std::string GetDeviceFriendlyNameInternal(IMMDevice* device) { + // Retrieve user-friendly name of endpoint device. + // Example: "Microphone (Realtek High Definition Audio)". + ComPtr properties; + HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.GetAddressOf()); + if (FAILED(hr)) + return std::string(); + + ScopedPropVariant friendly_name_pv; + hr = properties->GetValue(PKEY_Device_FriendlyName, + friendly_name_pv.Receive()); + if (FAILED(hr)) + return std::string(); + + if (friendly_name_pv.get().vt == VT_LPWSTR && + friendly_name_pv.get().pwszVal) { + return rtc::ToUtf8(friendly_name_pv.get().pwszVal, + wcslen(friendly_name_pv.get().pwszVal)); + } else { + return std::string(); + } +} + +ComPtr CreateSessionManager2Internal( + IMMDevice* audio_device) { + if (!audio_device) + return ComPtr(); + + ComPtr audio_session_manager; + _com_error error = + audio_device->Activate(__uuidof(IAudioSessionManager2), CLSCTX_ALL, + nullptr, &audio_session_manager); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioSessionManager2) failed: " + << ErrorToString(error); + } + return audio_session_manager; +} + +ComPtr CreateSessionEnumeratorInternal( + IMMDevice* audio_device) { + if (!audio_device) { + return ComPtr(); + } + + ComPtr audio_session_enumerator; + ComPtr audio_session_manager = + CreateSessionManager2Internal(audio_device); + if (!audio_session_manager.Get()) { + return audio_session_enumerator; + } + _com_error error = + audio_session_manager->GetSessionEnumerator(&audio_session_enumerator); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) + << "IAudioSessionEnumerator::IAudioSessionEnumerator failed: " + << ErrorToString(error); + return ComPtr(); + } + return audio_session_enumerator; +} + +// Creates and activates an IAudioClient COM object given the selected +// endpoint device. +ComPtr CreateClientInternal(IMMDevice* audio_device) { + if (!audio_device) + return ComPtr(); + + ComPtr audio_client; + _com_error error = audio_device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, + nullptr, &audio_client); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient) failed: " + << ErrorToString(error); + } + return audio_client; +} + +ComPtr CreateClient2Internal(IMMDevice* audio_device) { + if (!audio_device) + return ComPtr(); + + ComPtr audio_client; + _com_error error = audio_device->Activate(__uuidof(IAudioClient2), CLSCTX_ALL, + nullptr, &audio_client); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient2) failed: " + << ErrorToString(error); + } + return audio_client; +} + +ComPtr CreateClient3Internal(IMMDevice* audio_device) { + if (!audio_device) + return ComPtr(); + + ComPtr audio_client; + _com_error error = audio_device->Activate(__uuidof(IAudioClient3), CLSCTX_ALL, + nullptr, &audio_client); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient3) failed: " + << ErrorToString(error); + } + return audio_client; +} + +ComPtr CreateCollectionInternal(EDataFlow data_flow) { + ComPtr device_enumerator( + CreateDeviceEnumeratorInternal(true)); + if (!device_enumerator) { + return ComPtr(); + } + + // Generate a collection of active (present and not disabled) audio endpoint + // devices for the specified data-flow direction. + // This method will succeed even if all devices are disabled. + ComPtr collection; + _com_error error = device_enumerator->EnumAudioEndpoints( + data_flow, DEVICE_STATE_ACTIVE, collection.GetAddressOf()); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDeviceCollection::EnumAudioEndpoints failed: " + << ErrorToString(error); + } + return collection; +} + +bool GetDeviceNamesInternal(EDataFlow data_flow, + webrtc::AudioDeviceNames* device_names) { + RTC_DLOG(LS_INFO) << "GetDeviceNamesInternal: flow=" + << FlowToString(data_flow); + + // Generate a collection of active audio endpoint devices for the specified + // direction. + ComPtr collection = CreateCollectionInternal(data_flow); + if (!collection.Get()) { + RTC_LOG(LS_ERROR) << "Failed to create a collection of active devices"; + return false; + } + + // Retrieve the number of active (present, not disabled and plugged in) audio + // devices for the specified direction. + UINT number_of_active_devices = 0; + _com_error error = collection->GetCount(&number_of_active_devices); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDeviceCollection::GetCount failed: " + << ErrorToString(error); + return false; + } + + if (number_of_active_devices == 0) { + RTC_DLOG(LS_WARNING) << "Found no active devices"; + return false; + } + + // Loop over all active devices and add friendly name and unique id to the + // `device_names` queue. For now, devices are added at indexes 0, 1, ..., N-1 + // but they will be moved to 2,3,..., N+1 at the next stage when default and + // default communication devices are added at index 0 and 1. + ComPtr audio_device; + for (UINT i = 0; i < number_of_active_devices; ++i) { + // Retrieve a pointer to the specified item in the device collection. + error = collection->Item(i, audio_device.GetAddressOf()); + if (FAILED(error.Error())) { + // Skip this item and try to get the next item instead; will result in an + // incomplete list of devices. + RTC_LOG(LS_WARNING) << "IMMDeviceCollection::Item failed: " + << ErrorToString(error); + continue; + } + if (!audio_device.Get()) { + RTC_LOG(LS_WARNING) << "Invalid audio device"; + continue; + } + + // Retrieve the complete device name for the given audio device endpoint. + AudioDeviceName device_name( + GetDeviceFriendlyNameInternal(audio_device.Get()), + GetDeviceIdInternal(audio_device.Get())); + // Add combination of user-friendly and unique name to the output list. + device_names->push_back(device_name); + } + + // Log a warning of the list of device is not complete but let's keep on + // trying to add default and default communications device at the front. + if (device_names->size() != number_of_active_devices) { + RTC_DLOG(LS_WARNING) + << "List of device names does not contain all active devices"; + } + + // Avoid adding default and default communication devices if no active device + // could be added to the queue. We might as well break here and return false + // since no active devices were identified. + if (device_names->empty()) { + RTC_DLOG(LS_ERROR) << "List of active devices is empty"; + return false; + } + + // Prepend the queue with two more elements: one for the default device and + // one for the default communication device (can correspond to the same unique + // id if only one active device exists). The first element (index 0) is the + // default device and the second element (index 1) is the default + // communication device. + ERole role[] = {eCommunications, eConsole}; + ComPtr default_device; + AudioDeviceName default_device_name; + for (size_t i = 0; i < arraysize(role); ++i) { + default_device = CreateDeviceInternal(AudioDeviceName::kDefaultDeviceId, + data_flow, role[i]); + if (!default_device.Get()) { + // Add empty strings to device name if the device could not be created. + RTC_DLOG(LS_WARNING) << "Failed to add device with role: " + << RoleToString(role[i]); + default_device_name.device_name = std::string(); + default_device_name.unique_id = std::string(); + } else { + // Populate the device name with friendly name and unique id. + std::string device_name; + device_name += (role[i] == eConsole ? "Default - " : "Communication - "); + device_name += GetDeviceFriendlyNameInternal(default_device.Get()); + std::string unique_id = GetDeviceIdInternal(default_device.Get()); + default_device_name.device_name = std::move(device_name); + default_device_name.unique_id = std::move(unique_id); + } + + // Add combination of user-friendly and unique name to the output queue. + // The last element (<=> eConsole) will be at the front of the queue, hence + // at index 0. Empty strings will be added for cases where no default + // devices were found. + device_names->push_front(default_device_name); + } + + // Example of log output when only one device is active. Note that the queue + // contains two extra elements at index 0 (Default) and 1 (Communication) to + // allow selection of device by role instead of id. All elements corresponds + // the same unique id. + // [0] friendly name: Default - Headset Microphone (2- Arctis 7 Chat) + // [0] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c} + // [1] friendly name: Communication - Headset Microphone (2- Arctis 7 Chat) + // [1] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c} + // [2] friendly name: Headset Microphone (2- Arctis 7 Chat) + // [2] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c} + for (size_t i = 0; i < device_names->size(); ++i) { + RTC_DLOG(LS_INFO) << "[" << i + << "] friendly name: " << (*device_names)[i].device_name; + RTC_DLOG(LS_INFO) << "[" << i + << "] unique id : " << (*device_names)[i].unique_id; + } + + return true; +} + +HRESULT GetPreferredAudioParametersInternal(IAudioClient* client, + AudioParameters* params, + int fixed_sample_rate) { + WAVEFORMATPCMEX mix_format; + HRESULT hr = core_audio_utility::GetSharedModeMixFormat(client, &mix_format); + if (FAILED(hr)) + return hr; + + REFERENCE_TIME default_period = 0; + hr = core_audio_utility::GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, + &default_period); + if (FAILED(hr)) + return hr; + + int sample_rate = mix_format.Format.nSamplesPerSec; + // Override default sample rate if `fixed_sample_rate` is set and different + // from the default rate. + if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) { + RTC_DLOG(LS_INFO) << "Using fixed sample rate instead of the preferred: " + << sample_rate << " is replaced by " << fixed_sample_rate; + sample_rate = fixed_sample_rate; + } + // TODO(henrika): utilize full mix_format.Format.wBitsPerSample. + // const size_t bits_per_sample = AudioParameters::kBitsPerSample; + // TODO(henrika): improve channel layout support. + const size_t channels = mix_format.Format.nChannels; + + // Use the native device period to derive the smallest possible buffer size + // in shared mode. + double device_period_in_seconds = + static_cast( + core_audio_utility::ReferenceTimeToTimeDelta(default_period).ms()) / + 1000.0L; + const size_t frames_per_buffer = + static_cast(sample_rate * device_period_in_seconds + 0.5); + + AudioParameters audio_params(sample_rate, channels, frames_per_buffer); + *params = audio_params; + RTC_DLOG(LS_INFO) << audio_params.ToString(); + + return hr; +} + +} // namespace + +namespace core_audio_utility { + +// core_audio_utility::WaveFormatWrapper implementation. +WAVEFORMATEXTENSIBLE* WaveFormatWrapper::GetExtensible() const { + RTC_CHECK(IsExtensible()); + return reinterpret_cast(ptr_); +} + +bool WaveFormatWrapper::IsExtensible() const { + return ptr_->wFormatTag == WAVE_FORMAT_EXTENSIBLE && ptr_->cbSize >= 22; +} + +bool WaveFormatWrapper::IsPcm() const { + return IsExtensible() ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_PCM + : ptr_->wFormatTag == WAVE_FORMAT_PCM; +} + +bool WaveFormatWrapper::IsFloat() const { + return IsExtensible() + ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT + : ptr_->wFormatTag == WAVE_FORMAT_IEEE_FLOAT; +} + +size_t WaveFormatWrapper::size() const { + return sizeof(*ptr_) + ptr_->cbSize; +} + +bool IsSupported() { + RTC_DLOG(LS_INFO) << "IsSupported"; + static bool g_is_supported = IsSupportedInternal(); + return g_is_supported; +} + +bool IsMMCSSSupported() { + RTC_DLOG(LS_INFO) << "IsMMCSSSupported"; + return LoadAvrtDll(); +} + +int NumberOfActiveDevices(EDataFlow data_flow) { + // Generate a collection of active audio endpoint devices for the specified + // data-flow direction. + ComPtr collection = CreateCollectionInternal(data_flow); + if (!collection.Get()) { + return 0; + } + + // Retrieve the number of active audio devices for the specified direction. + UINT number_of_active_devices = 0; + collection->GetCount(&number_of_active_devices); + std::string str; + if (data_flow == eCapture) { + str = "Number of capture devices: "; + } else if (data_flow == eRender) { + str = "Number of render devices: "; + } else if (data_flow == eAll) { + str = "Total number of devices: "; + } + RTC_DLOG(LS_INFO) << str << number_of_active_devices; + return static_cast(number_of_active_devices); +} + +uint32_t GetAudioClientVersion() { + uint32_t version = 1; + if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN10) { + version = 3; + } else if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN8) { + version = 2; + } + return version; +} + +ComPtr CreateDeviceEnumerator() { + RTC_DLOG(LS_INFO) << "CreateDeviceEnumerator"; + return CreateDeviceEnumeratorInternal(true); +} + +std::string GetDefaultInputDeviceID() { + RTC_DLOG(LS_INFO) << "GetDefaultInputDeviceID"; + ComPtr device( + CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eConsole)); + return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); +} + +std::string GetDefaultOutputDeviceID() { + RTC_DLOG(LS_INFO) << "GetDefaultOutputDeviceID"; + ComPtr device( + CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eConsole)); + return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); +} + +std::string GetCommunicationsInputDeviceID() { + RTC_DLOG(LS_INFO) << "GetCommunicationsInputDeviceID"; + ComPtr device(CreateDevice(AudioDeviceName::kDefaultDeviceId, + eCapture, eCommunications)); + return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); +} + +std::string GetCommunicationsOutputDeviceID() { + RTC_DLOG(LS_INFO) << "GetCommunicationsOutputDeviceID"; + ComPtr device(CreateDevice(AudioDeviceName::kDefaultDeviceId, + eRender, eCommunications)); + return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); +} + +ComPtr CreateDevice(absl::string_view device_id, + EDataFlow data_flow, + ERole role) { + RTC_DLOG(LS_INFO) << "CreateDevice"; + return CreateDeviceInternal(device_id, data_flow, role); +} + +AudioDeviceName GetDeviceName(IMMDevice* device) { + RTC_DLOG(LS_INFO) << "GetDeviceName"; + RTC_DCHECK(device); + AudioDeviceName device_name(GetDeviceFriendlyNameInternal(device), + GetDeviceIdInternal(device)); + RTC_DLOG(LS_INFO) << "friendly name: " << device_name.device_name; + RTC_DLOG(LS_INFO) << "unique id : " << device_name.unique_id; + return device_name; +} + +std::string GetFriendlyName(absl::string_view device_id, + EDataFlow data_flow, + ERole role) { + RTC_DLOG(LS_INFO) << "GetFriendlyName"; + ComPtr audio_device = CreateDevice(device_id, data_flow, role); + if (!audio_device.Get()) + return std::string(); + + AudioDeviceName device_name = GetDeviceName(audio_device.Get()); + return device_name.device_name; +} + +EDataFlow GetDataFlow(IMMDevice* device) { + RTC_DLOG(LS_INFO) << "GetDataFlow"; + RTC_DCHECK(device); + ComPtr endpoint; + _com_error error = device->QueryInterface(endpoint.GetAddressOf()); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMDevice::QueryInterface failed: " + << ErrorToString(error); + return eAll; + } + + EDataFlow data_flow; + error = endpoint->GetDataFlow(&data_flow); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IMMEndpoint::GetDataFlow failed: " + << ErrorToString(error); + return eAll; + } + return data_flow; +} + +bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names) { + RTC_DLOG(LS_INFO) << "GetInputDeviceNames"; + RTC_DCHECK(device_names); + RTC_DCHECK(device_names->empty()); + return GetDeviceNamesInternal(eCapture, device_names); +} + +bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) { + RTC_DLOG(LS_INFO) << "GetOutputDeviceNames"; + RTC_DCHECK(device_names); + RTC_DCHECK(device_names->empty()); + return GetDeviceNamesInternal(eRender, device_names); +} + +ComPtr CreateSessionManager2(IMMDevice* device) { + RTC_DLOG(LS_INFO) << "CreateSessionManager2"; + return CreateSessionManager2Internal(device); +} + +Microsoft::WRL::ComPtr CreateSessionEnumerator( + IMMDevice* device) { + RTC_DLOG(LS_INFO) << "CreateSessionEnumerator"; + return CreateSessionEnumeratorInternal(device); +} + +int NumberOfActiveSessions(IMMDevice* device) { + RTC_DLOG(LS_INFO) << "NumberOfActiveSessions"; + ComPtr session_enumerator = + CreateSessionEnumerator(device); + + // Iterate over all audio sessions for the given device. + int session_count = 0; + _com_error error = session_enumerator->GetCount(&session_count); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetCount failed: " + << ErrorToString(error); + return 0; + } + RTC_DLOG(LS_INFO) << "Total number of audio sessions: " << session_count; + + int num_active = 0; + for (int session = 0; session < session_count; session++) { + // Acquire the session control interface. + ComPtr session_control; + error = session_enumerator->GetSession(session, &session_control); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetSession failed: " + << ErrorToString(error); + return 0; + } + + // Log the display name of the audio session for debugging purposes. + LPWSTR display_name; + if (SUCCEEDED(session_control->GetDisplayName(&display_name))) { + RTC_DLOG(LS_INFO) << "display name: " + << rtc::ToUtf8(display_name, wcslen(display_name)); + CoTaskMemFree(display_name); + } + + // Get the current state and check if the state is active or not. + AudioSessionState state; + error = session_control->GetState(&state); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioSessionControl::GetState failed: " + << ErrorToString(error); + return 0; + } + if (state == AudioSessionStateActive) { + ++num_active; + } + } + + RTC_DLOG(LS_INFO) << "Number of active audio sessions: " << num_active; + return num_active; +} + +ComPtr CreateClient(absl::string_view device_id, + EDataFlow data_flow, + ERole role) { + RTC_DLOG(LS_INFO) << "CreateClient"; + ComPtr device(CreateDevice(device_id, data_flow, role)); + return CreateClientInternal(device.Get()); +} + +ComPtr CreateClient2(absl::string_view device_id, + EDataFlow data_flow, + ERole role) { + RTC_DLOG(LS_INFO) << "CreateClient2"; + ComPtr device(CreateDevice(device_id, data_flow, role)); + return CreateClient2Internal(device.Get()); +} + +ComPtr CreateClient3(absl::string_view device_id, + EDataFlow data_flow, + ERole role) { + RTC_DLOG(LS_INFO) << "CreateClient3"; + ComPtr device(CreateDevice(device_id, data_flow, role)); + return CreateClient3Internal(device.Get()); +} + +HRESULT SetClientProperties(IAudioClient2* client) { + RTC_DLOG(LS_INFO) << "SetClientProperties"; + RTC_DCHECK(client); + if (GetAudioClientVersion() < 2) { + RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher"; + return AUDCLNT_E_UNSUPPORTED_FORMAT; + } + AudioClientProperties props = {0}; + props.cbSize = sizeof(AudioClientProperties); + // Real-time VoIP communication. + // TODO(henrika): other categories? + props.eCategory = AudioCategory_Communications; + // Hardware-offloaded audio processing allows the main audio processing tasks + // to be performed outside the computer's main CPU. Check support and log the + // result but hard-code `bIsOffload` to FALSE for now. + // TODO(henrika): evaluate hardware-offloading. Might complicate usage of + // IAudioClient::GetMixFormat(). + BOOL supports_offload = FALSE; + _com_error error = + client->IsOffloadCapable(props.eCategory, &supports_offload); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient2::IsOffloadCapable failed: " + << ErrorToString(error); + } + RTC_DLOG(LS_INFO) << "supports_offload: " << supports_offload; + props.bIsOffload = false; +#if (NTDDI_VERSION < NTDDI_WINBLUE) + RTC_DLOG(LS_INFO) << "options: Not supported in this build"; +#else + // TODO(henrika): pros and cons compared with AUDCLNT_STREAMOPTIONS_NONE? + props.Options |= AUDCLNT_STREAMOPTIONS_NONE; + // Requires System.Devices.AudioDevice.RawProcessingSupported. + // The application can choose to *always ignore* the OEM AEC/AGC by setting + // the AUDCLNT_STREAMOPTIONS_RAW flag in the call to SetClientProperties. + // This flag will preserve the user experience aspect of Communications + // streams, but will not insert any OEM provided communications specific + // processing in the audio signal path. + // props.Options |= AUDCLNT_STREAMOPTIONS_RAW; + + // If it is important to avoid resampling in the audio engine, set this flag. + // AUDCLNT_STREAMOPTIONS_MATCH_FORMAT (or anything in IAudioClient3) is not + // an appropriate interface to use for communications scenarios. + // This interface is mainly meant for pro audio scenarios. + // props.Options |= AUDCLNT_STREAMOPTIONS_MATCH_FORMAT; + RTC_DLOG(LS_INFO) << "options: 0x" << rtc::ToHex(props.Options); +#endif + error = client->SetClientProperties(&props); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient2::SetClientProperties failed: " + << ErrorToString(error); + } + return error.Error(); +} + +HRESULT GetBufferSizeLimits(IAudioClient2* client, + const WAVEFORMATEXTENSIBLE* format, + REFERENCE_TIME* min_buffer_duration, + REFERENCE_TIME* max_buffer_duration) { + RTC_DLOG(LS_INFO) << "GetBufferSizeLimits"; + RTC_DCHECK(client); + if (GetAudioClientVersion() < 2) { + RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher"; + return AUDCLNT_E_UNSUPPORTED_FORMAT; + } + REFERENCE_TIME min_duration = 0; + REFERENCE_TIME max_duration = 0; + _com_error error = + client->GetBufferSizeLimits(reinterpret_cast(format), + TRUE, &min_duration, &max_duration); + if (error.Error() == AUDCLNT_E_OFFLOAD_MODE_ONLY) { + // This API seems to be supported in off-load mode only but it is not + // documented as a valid error code. Making a special note about it here. + RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: " + "AUDCLNT_E_OFFLOAD_MODE_ONLY"; + } else if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: " + << ErrorToString(error); + } else { + *min_buffer_duration = min_duration; + *max_buffer_duration = max_duration; + RTC_DLOG(LS_INFO) << "min_buffer_duration: " << min_buffer_duration; + RTC_DLOG(LS_INFO) << "max_buffer_duration: " << max_buffer_duration; + } + return error.Error(); +} + +HRESULT GetSharedModeMixFormat(IAudioClient* client, + WAVEFORMATEXTENSIBLE* format) { + RTC_DLOG(LS_INFO) << "GetSharedModeMixFormat"; + RTC_DCHECK(client); + + // The GetMixFormat method retrieves the stream format that the audio engine + // uses for its internal processing of shared-mode streams. The method + // allocates the storage for the structure and this memory will be released + // when `mix_format` goes out of scope. The GetMixFormat method retrieves a + // format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure + // instead of a standalone WAVEFORMATEX structure. The method outputs a + // pointer to the WAVEFORMATEX structure that is embedded at the start of + // this WAVEFORMATEXTENSIBLE structure. + // Note that, crbug/803056 indicates that some devices can return a format + // where only the WAVEFORMATEX parts is initialized and we must be able to + // account for that. + ScopedCoMem mix_format; + _com_error error = + client->GetMixFormat(reinterpret_cast(&mix_format)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetMixFormat failed: " + << ErrorToString(error); + return error.Error(); + } + + // Use a wave format wrapper to make things simpler. + WaveFormatWrapper wrapped_format(mix_format.Get()); + + // Verify that the reported format can be mixed by the audio engine in + // shared mode. + if (!wrapped_format.IsPcm() && !wrapped_format.IsFloat()) { + RTC_DLOG(LS_ERROR) + << "Only pure PCM or float audio streams can be mixed in shared mode"; + return AUDCLNT_E_UNSUPPORTED_FORMAT; + } + + // Log a warning for the rare case where `mix_format` only contains a + // stand-alone WAVEFORMATEX structure but don't return. + if (!wrapped_format.IsExtensible()) { + RTC_DLOG(LS_WARNING) + << "The returned format contains no extended information. " + "The size is " + << wrapped_format.size() << " bytes."; + } + + // Copy the correct number of bytes into |*format| taking into account if + // the returned structure is correctly extended or not. + RTC_CHECK_LE(wrapped_format.size(), sizeof(WAVEFORMATEXTENSIBLE)); + memcpy(format, wrapped_format.get(), wrapped_format.size()); + RTC_DLOG(LS_INFO) << WaveFormatToString(format); + + return error.Error(); +} + +bool IsFormatSupported(IAudioClient* client, + AUDCLNT_SHAREMODE share_mode, + const WAVEFORMATEXTENSIBLE* format) { + RTC_DLOG(LS_INFO) << "IsFormatSupported"; + RTC_DCHECK(client); + ScopedCoMem closest_match; + // This method provides a way for a client to determine, before calling + // IAudioClient::Initialize, whether the audio engine supports a particular + // stream format or not. In shared mode, the audio engine always supports + // the mix format (see GetSharedModeMixFormat). + // TODO(henrika): verify support for exclusive mode as well? + _com_error error = client->IsFormatSupported( + share_mode, reinterpret_cast(format), + &closest_match); + RTC_LOG(LS_INFO) << WaveFormatToString( + const_cast(format)); + if ((error.Error() == S_OK) && (closest_match == nullptr)) { + RTC_DLOG(LS_INFO) + << "The audio endpoint device supports the specified stream format"; + } else if ((error.Error() == S_FALSE) && (closest_match != nullptr)) { + // Call succeeded with a closest match to the specified format. This log can + // only be triggered for shared mode. + RTC_LOG(LS_WARNING) + << "Exact format is not supported, but a closest match exists"; + RTC_LOG(LS_INFO) << WaveFormatToString(closest_match.Get()); + } else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) && + (closest_match == nullptr)) { + // The audio engine does not support the caller-specified format or any + // similar format. + RTC_DLOG(LS_INFO) << "The audio endpoint device does not support the " + "specified stream format"; + } else { + RTC_LOG(LS_ERROR) << "IAudioClient::IsFormatSupported failed: " + << ErrorToString(error); + } + + return (error.Error() == S_OK); +} + +HRESULT GetDevicePeriod(IAudioClient* client, + AUDCLNT_SHAREMODE share_mode, + REFERENCE_TIME* device_period) { + RTC_DLOG(LS_INFO) << "GetDevicePeriod"; + RTC_DCHECK(client); + // The `default_period` parameter specifies the default scheduling period + // for a shared-mode stream. The `minimum_period` parameter specifies the + // minimum scheduling period for an exclusive-mode stream. + // The time is expressed in 100-nanosecond units. + REFERENCE_TIME default_period = 0; + REFERENCE_TIME minimum_period = 0; + _com_error error = client->GetDevicePeriod(&default_period, &minimum_period); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetDevicePeriod failed: " + << ErrorToString(error); + return error.Error(); + } + + *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period + : minimum_period; + RTC_LOG(LS_INFO) << "device_period: " + << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]"; + RTC_LOG(LS_INFO) << "minimum_period: " + << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]"; + return error.Error(); +} + +HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, + const WAVEFORMATEXTENSIBLE* format, + uint32_t* default_period_in_frames, + uint32_t* fundamental_period_in_frames, + uint32_t* min_period_in_frames, + uint32_t* max_period_in_frames) { + RTC_DLOG(LS_INFO) << "GetSharedModeEnginePeriod"; + RTC_DCHECK(client3); + + UINT32 default_period = 0; + UINT32 fundamental_period = 0; + UINT32 min_period = 0; + UINT32 max_period = 0; + _com_error error = client3->GetSharedModeEnginePeriod( + reinterpret_cast(format), &default_period, + &fundamental_period, &min_period, &max_period); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient3::GetSharedModeEnginePeriod failed: " + << ErrorToString(error); + return error.Error(); + } + + WAVEFORMATEX format_ex = format->Format; + const WORD sample_rate = format_ex.nSamplesPerSec; + RTC_LOG(LS_INFO) << "default_period_in_frames: " << default_period << " (" + << FramesToMilliseconds(default_period, sample_rate) + << " ms)"; + RTC_LOG(LS_INFO) << "fundamental_period_in_frames: " << fundamental_period + << " (" + << FramesToMilliseconds(fundamental_period, sample_rate) + << " ms)"; + RTC_LOG(LS_INFO) << "min_period_in_frames: " << min_period << " (" + << FramesToMilliseconds(min_period, sample_rate) << " ms)"; + RTC_LOG(LS_INFO) << "max_period_in_frames: " << max_period << " (" + << FramesToMilliseconds(max_period, sample_rate) << " ms)"; + *default_period_in_frames = default_period; + *fundamental_period_in_frames = fundamental_period; + *min_period_in_frames = min_period; + *max_period_in_frames = max_period; + return error.Error(); +} + +HRESULT GetPreferredAudioParameters(IAudioClient* client, + AudioParameters* params) { + RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters"; + RTC_DCHECK(client); + return GetPreferredAudioParametersInternal(client, params, -1); +} + +HRESULT GetPreferredAudioParameters(IAudioClient* client, + webrtc::AudioParameters* params, + uint32_t sample_rate) { + RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters: " << sample_rate; + RTC_DCHECK(client); + return GetPreferredAudioParametersInternal(client, params, sample_rate); +} + +HRESULT SharedModeInitialize(IAudioClient* client, + const WAVEFORMATEXTENSIBLE* format, + HANDLE event_handle, + REFERENCE_TIME buffer_duration, + bool auto_convert_pcm, + uint32_t* endpoint_buffer_size) { + RTC_DLOG(LS_INFO) << "SharedModeInitialize: buffer_duration=" + << buffer_duration + << ", auto_convert_pcm=" << auto_convert_pcm; + RTC_DCHECK(client); + RTC_DCHECK_GE(buffer_duration, 0); + if (buffer_duration != 0) { + RTC_DLOG(LS_WARNING) << "Non-default buffer size is used"; + } + if (auto_convert_pcm) { + RTC_DLOG(LS_WARNING) << "Sample rate converter can be utilized"; + } + // The AUDCLNT_STREAMFLAGS_NOPERSIST flag disables persistence of the volume + // and mute settings for a session that contains rendering streams. + // By default, the volume level and muting state for a rendering session are + // persistent across system restarts. The volume level and muting state for a + // capture session are never persistent. + DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST; + + // Enable event-driven streaming if a valid event handle is provided. + // After the stream starts, the audio engine will signal the event handle + // to notify the client each time a buffer becomes ready to process. + // Event-driven buffering is supported for both rendering and capturing. + // Both shared-mode and exclusive-mode streams can use event-driven buffering. + bool use_event = + (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE); + if (use_event) { + stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; + RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven"; + } + + // Check if sample-rate conversion is requested. + if (auto_convert_pcm) { + // Add channel matrixer (not utilized here) and rate converter to convert + // from our (the client's) format to the audio engine mix format. + // Currently only supported for testing, i.e., not possible to enable using + // public APIs. + RTC_DLOG(LS_INFO) << "The stream is initialized to support rate conversion"; + stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; + stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; + } + RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); + + // Initialize the shared mode client for minimal delay if `buffer_duration` + // is 0 or possibly a higher delay (more robust) if `buffer_duration` is + // larger than 0. The actual size is given by IAudioClient::GetBufferSize(). + _com_error error = client->Initialize( + AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0, + reinterpret_cast(format), nullptr); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::Initialize failed: " + << ErrorToString(error); + return error.Error(); + } + + // If a stream is initialized to be event driven and in shared mode, the + // associated application must also obtain a handle by making a call to + // IAudioClient::SetEventHandle. + if (use_event) { + error = client->SetEventHandle(event_handle); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: " + << ErrorToString(error); + return error.Error(); + } + } + + UINT32 buffer_size_in_frames = 0; + // Retrieves the size (maximum capacity) of the endpoint buffer. The size is + // expressed as the number of audio frames the buffer can hold. + // For rendering clients, the buffer length determines the maximum amount of + // rendering data that the application can write to the endpoint buffer + // during a single processing pass. For capture clients, the buffer length + // determines the maximum amount of capture data that the audio engine can + // read from the endpoint buffer during a single processing pass. + error = client->GetBufferSize(&buffer_size_in_frames); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: " + << ErrorToString(error); + return error.Error(); + } + + *endpoint_buffer_size = buffer_size_in_frames; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames + << " [audio frames]"; + const double size_in_ms = static_cast(buffer_size_in_frames) / + (format->Format.nSamplesPerSec / 1000.0); + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << static_cast(size_in_ms + 0.5) << " [ms]"; + RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << buffer_size_in_frames * format->Format.nChannels * + (format->Format.wBitsPerSample / 8) + << " [bytes]"; + + // TODO(henrika): utilize when delay measurements are added. + REFERENCE_TIME latency = 0; + error = client->GetStreamLatency(&latency); + RTC_DLOG(LS_INFO) << "stream latency: " + << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; + return error.Error(); +} + +HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, + const WAVEFORMATEXTENSIBLE* format, + HANDLE event_handle, + uint32_t period_in_frames, + bool auto_convert_pcm, + uint32_t* endpoint_buffer_size) { + RTC_DLOG(LS_INFO) << "SharedModeInitializeLowLatency: period_in_frames=" + << period_in_frames + << ", auto_convert_pcm=" << auto_convert_pcm; + RTC_DCHECK(client); + RTC_DCHECK_GT(period_in_frames, 0); + if (auto_convert_pcm) { + RTC_DLOG(LS_WARNING) << "Sample rate converter is enabled"; + } + + // Define stream flags. + DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST; + bool use_event = + (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE); + if (use_event) { + stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; + RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven"; + } + if (auto_convert_pcm) { + stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; + stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; + } + RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); + + // Initialize the shared mode client for lowest possible latency. + // It is assumed that GetSharedModeEnginePeriod() has been used to query the + // smallest possible engine period and that it is given by `period_in_frames`. + _com_error error = client->InitializeSharedAudioStream( + stream_flags, period_in_frames, + reinterpret_cast(format), nullptr); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient3::InitializeSharedAudioStream failed: " + << ErrorToString(error); + return error.Error(); + } + + // Set the event handle. + if (use_event) { + error = client->SetEventHandle(event_handle); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: " + << ErrorToString(error); + return error.Error(); + } + } + + UINT32 buffer_size_in_frames = 0; + // Retrieve the size (maximum capacity) of the endpoint buffer. + error = client->GetBufferSize(&buffer_size_in_frames); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: " + << ErrorToString(error); + return error.Error(); + } + + *endpoint_buffer_size = buffer_size_in_frames; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames + << " [audio frames]"; + const double size_in_ms = static_cast(buffer_size_in_frames) / + (format->Format.nSamplesPerSec / 1000.0); + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << static_cast(size_in_ms + 0.5) << " [ms]"; + RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << buffer_size_in_frames * format->Format.nChannels * + (format->Format.wBitsPerSample / 8) + << " [bytes]"; + + // TODO(henrika): utilize when delay measurements are added. + REFERENCE_TIME latency = 0; + error = client->GetStreamLatency(&latency); + if (FAILED(error.Error())) { + RTC_LOG(LS_WARNING) << "IAudioClient::GetStreamLatency failed: " + << ErrorToString(error); + } else { + RTC_DLOG(LS_INFO) << "stream latency: " + << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; + } + return error.Error(); +} + +ComPtr CreateRenderClient(IAudioClient* client) { + RTC_DLOG(LS_INFO) << "CreateRenderClient"; + RTC_DCHECK(client); + // Get access to the IAudioRenderClient interface. This interface + // enables us to write output data to a rendering endpoint buffer. + ComPtr audio_render_client; + _com_error error = client->GetService(IID_PPV_ARGS(&audio_render_client)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) + << "IAudioClient::GetService(IID_IAudioRenderClient) failed: " + << ErrorToString(error); + return ComPtr(); + } + return audio_render_client; +} + +ComPtr CreateCaptureClient(IAudioClient* client) { + RTC_DLOG(LS_INFO) << "CreateCaptureClient"; + RTC_DCHECK(client); + // Get access to the IAudioCaptureClient interface. This interface + // enables us to read input data from a capturing endpoint buffer. + ComPtr audio_capture_client; + _com_error error = client->GetService(IID_PPV_ARGS(&audio_capture_client)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) + << "IAudioClient::GetService(IID_IAudioCaptureClient) failed: " + << ErrorToString(error); + return ComPtr(); + } + return audio_capture_client; +} + +ComPtr CreateAudioClock(IAudioClient* client) { + RTC_DLOG(LS_INFO) << "CreateAudioClock"; + RTC_DCHECK(client); + // Get access to the IAudioClock interface. This interface enables us to + // monitor a stream's data rate and the current position in the stream. + ComPtr audio_clock; + _com_error error = client->GetService(IID_PPV_ARGS(&audio_clock)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioClock) failed: " + << ErrorToString(error); + return ComPtr(); + } + return audio_clock; +} + +ComPtr CreateAudioSessionControl(IAudioClient* client) { + RTC_DLOG(LS_INFO) << "CreateAudioSessionControl"; + RTC_DCHECK(client); + ComPtr audio_session_control; + _com_error error = client->GetService(IID_PPV_ARGS(&audio_session_control)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioControl) failed: " + << ErrorToString(error); + return ComPtr(); + } + return audio_session_control; +} + +ComPtr CreateSimpleAudioVolume(IAudioClient* client) { + RTC_DLOG(LS_INFO) << "CreateSimpleAudioVolume"; + RTC_DCHECK(client); + // Get access to the ISimpleAudioVolume interface. This interface enables a + // client to control the master volume level of an audio session. + ComPtr simple_audio_volume; + _com_error error = client->GetService(IID_PPV_ARGS(&simple_audio_volume)); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) + << "IAudioClient::GetService(IID_ISimpleAudioVolume) failed: " + << ErrorToString(error); + return ComPtr(); + } + return simple_audio_volume; +} + +bool FillRenderEndpointBufferWithSilence(IAudioClient* client, + IAudioRenderClient* render_client) { + RTC_DLOG(LS_INFO) << "FillRenderEndpointBufferWithSilence"; + RTC_DCHECK(client); + RTC_DCHECK(render_client); + UINT32 endpoint_buffer_size = 0; + _com_error error = client->GetBufferSize(&endpoint_buffer_size); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: " + << ErrorToString(error); + return false; + } + + UINT32 num_queued_frames = 0; + // Get number of audio frames that are queued up to play in the endpoint + // buffer. + error = client->GetCurrentPadding(&num_queued_frames); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: " + << ErrorToString(error); + return false; + } + RTC_DLOG(LS_INFO) << "num_queued_frames: " << num_queued_frames; + + BYTE* data = nullptr; + int num_frames_to_fill = endpoint_buffer_size - num_queued_frames; + RTC_DLOG(LS_INFO) << "num_frames_to_fill: " << num_frames_to_fill; + error = render_client->GetBuffer(num_frames_to_fill, &data); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: " + << ErrorToString(error); + return false; + } + + // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to + // explicitly write silence data to the rendering buffer. + error = render_client->ReleaseBuffer(num_frames_to_fill, + AUDCLNT_BUFFERFLAGS_SILENT); + if (FAILED(error.Error())) { + RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: " + << ErrorToString(error); + return false; + } + + return true; +} + +std::string WaveFormatToString(const WaveFormatWrapper format) { + char ss_buf[1024]; + rtc::SimpleStringBuilder ss(ss_buf); + // Start with the WAVEFORMATEX part (which always exists). + ss.AppendFormat("wFormatTag: %s (0x%X)", + WaveFormatTagToString(format->wFormatTag), + format->wFormatTag); + ss.AppendFormat(", nChannels: %d", format->nChannels); + ss.AppendFormat(", nSamplesPerSec: %d", format->nSamplesPerSec); + ss.AppendFormat(", nAvgBytesPerSec: %d", format->nAvgBytesPerSec); + ss.AppendFormat(", nBlockAlign: %d", format->nBlockAlign); + ss.AppendFormat(", wBitsPerSample: %d", format->wBitsPerSample); + ss.AppendFormat(", cbSize: %d", format->cbSize); + if (!format.IsExtensible()) + return ss.str(); + + // Append the WAVEFORMATEXTENSIBLE part (which we know exists). + ss.AppendFormat( + " [+] wValidBitsPerSample: %d, dwChannelMask: %s", + format.GetExtensible()->Samples.wValidBitsPerSample, + ChannelMaskToString(format.GetExtensible()->dwChannelMask).c_str()); + if (format.IsPcm()) { + ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_PCM"); + } else if (format.IsFloat()) { + ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_IEEE_FLOAT"); + } else { + ss.AppendFormat("%s", ", SubFormat: NOT_SUPPORTED"); + } + return ss.str(); +} + +webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time) { + // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond. + return webrtc::TimeDelta::Micros(0.1 * time + 0.5); +} + +double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate) { + // Convert the current period in frames into milliseconds. + return static_cast(num_frames) / (sample_rate / 1000.0); +} + +std::string ErrorToString(const _com_error& error) { + char ss_buf[1024]; + rtc::SimpleStringBuilder ss(ss_buf); + ss.AppendFormat("(HRESULT: 0x%08X)", error.Error()); + return ss.str(); +} + +} // namespace core_audio_utility +} // namespace webrtc_win +} // namespace webrtc diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h new file mode 100644 index 0000000000..454e60bf31 --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win.h @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_ +#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "absl/strings/string_view.h" +#include "api/units/time_delta.h" +#include "modules/audio_device/audio_device_name.h" +#include "modules/audio_device/include/audio_device_defines.h" +#include "rtc_base/logging.h" +#include "rtc_base/string_utils.h" + +#pragma comment(lib, "Avrt.lib") + +namespace webrtc { +namespace webrtc_win { + +// Utility class which registers a thread with MMCSS in the constructor and +// deregisters MMCSS in the destructor. The task name is given by `task_name`. +// The Multimedia Class Scheduler service (MMCSS) enables multimedia +// applications to ensure that their time-sensitive processing receives +// prioritized access to CPU resources without denying CPU resources to +// lower-priority applications. +class ScopedMMCSSRegistration { + public: + const char* PriorityClassToString(DWORD priority_class) { + switch (priority_class) { + case ABOVE_NORMAL_PRIORITY_CLASS: + return "ABOVE_NORMAL"; + case BELOW_NORMAL_PRIORITY_CLASS: + return "BELOW_NORMAL"; + case HIGH_PRIORITY_CLASS: + return "HIGH"; + case IDLE_PRIORITY_CLASS: + return "IDLE"; + case NORMAL_PRIORITY_CLASS: + return "NORMAL"; + case REALTIME_PRIORITY_CLASS: + return "REALTIME"; + default: + return "INVALID"; + } + } + + const char* PriorityToString(int priority) { + switch (priority) { + case THREAD_PRIORITY_ABOVE_NORMAL: + return "ABOVE_NORMAL"; + case THREAD_PRIORITY_BELOW_NORMAL: + return "BELOW_NORMAL"; + case THREAD_PRIORITY_HIGHEST: + return "HIGHEST"; + case THREAD_PRIORITY_IDLE: + return "IDLE"; + case THREAD_PRIORITY_LOWEST: + return "LOWEST"; + case THREAD_PRIORITY_NORMAL: + return "NORMAL"; + case THREAD_PRIORITY_TIME_CRITICAL: + return "TIME_CRITICAL"; + default: + // Can happen in combination with REALTIME_PRIORITY_CLASS. + return "INVALID"; + } + } + + explicit ScopedMMCSSRegistration(const wchar_t* task_name) { + RTC_DLOG(LS_INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name); + // Register the calling thread with MMCSS for the supplied `task_name`. + DWORD mmcss_task_index = 0; + mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index); + if (mmcss_handle_ == nullptr) { + RTC_LOG(LS_ERROR) << "Failed to enable MMCSS on this thread: " + << GetLastError(); + } else { + const DWORD priority_class = GetPriorityClass(GetCurrentProcess()); + const int priority = GetThreadPriority(GetCurrentThread()); + RTC_DLOG(LS_INFO) << "priority class: " + << PriorityClassToString(priority_class) << "(" + << priority_class << ")"; + RTC_DLOG(LS_INFO) << "priority: " << PriorityToString(priority) << "(" + << priority << ")"; + } + } + + ~ScopedMMCSSRegistration() { + if (Succeeded()) { + // Deregister with MMCSS. + RTC_DLOG(LS_INFO) << "~ScopedMMCSSRegistration"; + AvRevertMmThreadCharacteristics(mmcss_handle_); + } + } + + ScopedMMCSSRegistration(const ScopedMMCSSRegistration&) = delete; + ScopedMMCSSRegistration& operator=(const ScopedMMCSSRegistration&) = delete; + + bool Succeeded() const { return mmcss_handle_ != nullptr; } + + private: + HANDLE mmcss_handle_ = nullptr; +}; + +// A PROPVARIANT that is automatically initialized and cleared upon respective +// construction and destruction of this class. +class ScopedPropVariant { + public: + ScopedPropVariant() { PropVariantInit(&pv_); } + + ~ScopedPropVariant() { Reset(); } + + ScopedPropVariant(const ScopedPropVariant&) = delete; + ScopedPropVariant& operator=(const ScopedPropVariant&) = delete; + bool operator==(const ScopedPropVariant&) const = delete; + bool operator!=(const ScopedPropVariant&) const = delete; + + // Returns a pointer to the underlying PROPVARIANT for use as an out param in + // a function call. + PROPVARIANT* Receive() { + RTC_DCHECK_EQ(pv_.vt, VT_EMPTY); + return &pv_; + } + + // Clears the instance to prepare it for re-use (e.g., via Receive). + void Reset() { + if (pv_.vt != VT_EMPTY) { + HRESULT result = PropVariantClear(&pv_); + RTC_DCHECK_EQ(result, S_OK); + } + } + + const PROPVARIANT& get() const { return pv_; } + const PROPVARIANT* ptr() const { return &pv_; } + + private: + PROPVARIANT pv_; +}; + +// Simple scoped memory releaser class for COM allocated memory. +template +class ScopedCoMem { + public: + ScopedCoMem() : mem_ptr_(nullptr) {} + + ~ScopedCoMem() { Reset(nullptr); } + + ScopedCoMem(const ScopedCoMem&) = delete; + ScopedCoMem& operator=(const ScopedCoMem&) = delete; + + T** operator&() { // NOLINT + RTC_DCHECK(mem_ptr_ == nullptr); // To catch memory leaks. + return &mem_ptr_; + } + + operator T*() { return mem_ptr_; } + + T* operator->() { + RTC_DCHECK(mem_ptr_ != nullptr); + return mem_ptr_; + } + + const T* operator->() const { + RTC_DCHECK(mem_ptr_ != nullptr); + return mem_ptr_; + } + + explicit operator bool() const { return mem_ptr_; } + + friend bool operator==(const ScopedCoMem& lhs, std::nullptr_t) { + return lhs.Get() == nullptr; + } + + friend bool operator==(std::nullptr_t, const ScopedCoMem& rhs) { + return rhs.Get() == nullptr; + } + + friend bool operator!=(const ScopedCoMem& lhs, std::nullptr_t) { + return lhs.Get() != nullptr; + } + + friend bool operator!=(std::nullptr_t, const ScopedCoMem& rhs) { + return rhs.Get() != nullptr; + } + + void Reset(T* ptr) { + if (mem_ptr_) + CoTaskMemFree(mem_ptr_); + mem_ptr_ = ptr; + } + + T* Get() const { return mem_ptr_; } + + private: + T* mem_ptr_; +}; + +// A HANDLE that is automatically initialized and closed upon respective +// construction and destruction of this class. +class ScopedHandle { + public: + ScopedHandle() : handle_(nullptr) {} + explicit ScopedHandle(HANDLE h) : handle_(nullptr) { Set(h); } + + ~ScopedHandle() { Close(); } + + ScopedHandle& operator=(const ScopedHandle&) = delete; + bool operator==(const ScopedHandle&) const = delete; + bool operator!=(const ScopedHandle&) const = delete; + + // Use this instead of comparing to INVALID_HANDLE_VALUE. + bool IsValid() const { return handle_ != nullptr; } + + void Set(HANDLE new_handle) { + Close(); + // Windows is inconsistent about invalid handles. + // See https://blogs.msdn.microsoft.com/oldnewthing/20040302-00/?p=40443 + // for details. + if (new_handle != INVALID_HANDLE_VALUE) { + handle_ = new_handle; + } + } + + HANDLE Get() const { return handle_; } + + operator HANDLE() const { return handle_; } + + void Close() { + if (handle_) { + if (!::CloseHandle(handle_)) { + RTC_DCHECK_NOTREACHED(); + } + handle_ = nullptr; + } + } + + private: + HANDLE handle_; +}; + +// Utility methods for the Core Audio API on Windows. +// Always ensure that Core Audio is supported before using these methods. +// Use webrtc_win::core_audio_utility::IsSupported() for this purpose. +// Also, all methods must be called on a valid COM thread. This can be done +// by using the ScopedCOMInitializer helper class. +// These methods are based on media::CoreAudioUtil in Chrome. +namespace core_audio_utility { + +// Helper class which automates casting between WAVEFORMATEX and +// WAVEFORMATEXTENSIBLE raw pointers using implicit constructors and +// operator overloading. Note that, no memory is allocated by this utility +// structure. It only serves as a handle (or a wrapper) of the structure +// provided to it at construction. +class WaveFormatWrapper { + public: + WaveFormatWrapper(WAVEFORMATEXTENSIBLE* p) + : ptr_(reinterpret_cast(p)) {} + WaveFormatWrapper(WAVEFORMATEX* p) : ptr_(p) {} + ~WaveFormatWrapper() = default; + + operator WAVEFORMATEX*() const { return ptr_; } + WAVEFORMATEX* operator->() const { return ptr_; } + WAVEFORMATEX* get() const { return ptr_; } + WAVEFORMATEXTENSIBLE* GetExtensible() const; + + bool IsExtensible() const; + bool IsPcm() const; + bool IsFloat() const; + size_t size() const; + + private: + WAVEFORMATEX* ptr_; +}; + +// Returns true if Windows Core Audio is supported. +// Always verify that this method returns true before using any of the +// other methods in this class. +bool IsSupported(); + +// Returns true if Multimedia Class Scheduler service (MMCSS) is supported. +// The MMCSS enables multimedia applications to ensure that their time-sensitive +// processing receives prioritized access to CPU resources without denying CPU +// resources to lower-priority applications. +bool IsMMCSSSupported(); + +// The MMDevice API lets clients discover the audio endpoint devices in the +// system and determine which devices are suitable for the application to use. +// Header file Mmdeviceapi.h defines the interfaces in the MMDevice API. + +// Number of active audio devices in the specified data flow direction. +// Set `data_flow` to eAll to retrieve the total number of active audio +// devices. +int NumberOfActiveDevices(EDataFlow data_flow); + +// Returns 1, 2, or 3 depending on what version of IAudioClient the platform +// supports. +// Example: IAudioClient2 is supported on Windows 8 and higher => 2 is returned. +uint32_t GetAudioClientVersion(); + +// Creates an IMMDeviceEnumerator interface which provides methods for +// enumerating audio endpoint devices. +// TODO(henrika): IMMDeviceEnumerator::RegisterEndpointNotificationCallback. +Microsoft::WRL::ComPtr CreateDeviceEnumerator(); + +// These functions return the unique device id of the default or +// communications input/output device, or an empty string if no such device +// exists or if the device has been disabled. +std::string GetDefaultInputDeviceID(); +std::string GetDefaultOutputDeviceID(); +std::string GetCommunicationsInputDeviceID(); +std::string GetCommunicationsOutputDeviceID(); + +// Creates an IMMDevice interface corresponding to the unique device id in +// `device_id`, or by data-flow direction and role if `device_id` is set to +// AudioDeviceName::kDefaultDeviceId. +Microsoft::WRL::ComPtr CreateDevice(absl::string_view device_id, + EDataFlow data_flow, + ERole role); + +// Returns the unique ID and user-friendly name of a given endpoint device. +// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and +// "Microphone (Realtek High Definition Audio)". +webrtc::AudioDeviceName GetDeviceName(IMMDevice* device); + +// Gets the user-friendly name of the endpoint device which is represented +// by a unique id in `device_id`, or by data-flow direction and role if +// `device_id` is set to AudioDeviceName::kDefaultDeviceId. +std::string GetFriendlyName(absl::string_view device_id, + EDataFlow data_flow, + ERole role); + +// Query if the audio device is a rendering device or a capture device. +EDataFlow GetDataFlow(IMMDevice* device); + +// Enumerates all input devices and adds the names (friendly name and unique +// device id) to the list in `device_names`. +bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names); + +// Enumerates all output devices and adds the names (friendly name and unique +// device id) to the list in `device_names`. +bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names); + +// The Windows Audio Session API (WASAPI) enables client applications to +// manage the flow of audio data between the application and an audio endpoint +// device. Header files Audioclient.h and Audiopolicy.h define the WASAPI +// interfaces. + +// Creates an IAudioSessionManager2 interface for the specified `device`. +// This interface provides access to e.g. the IAudioSessionEnumerator +Microsoft::WRL::ComPtr CreateSessionManager2( + IMMDevice* device); + +// Creates an IAudioSessionEnumerator interface for the specified `device`. +// The client can use the interface to enumerate audio sessions on the audio +// device +Microsoft::WRL::ComPtr CreateSessionEnumerator( + IMMDevice* device); + +// Number of active audio sessions for the given `device`. Expired or inactive +// sessions are not included. +int NumberOfActiveSessions(IMMDevice* device); + +// Creates an IAudioClient instance for a specific device or the default +// device specified by data-flow direction and role. +Microsoft::WRL::ComPtr CreateClient(absl::string_view device_id, + EDataFlow data_flow, + ERole role); +Microsoft::WRL::ComPtr CreateClient2(absl::string_view device_id, + EDataFlow data_flow, + ERole role); +Microsoft::WRL::ComPtr CreateClient3(absl::string_view device_id, + EDataFlow data_flow, + ERole role); + +// Sets the AudioCategory_Communications category. Should be called before +// GetSharedModeMixFormat() and IsFormatSupported(). The `client` argument must +// be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported +// on Windows 8 and above. +// TODO(henrika): evaluate effect (if any). +HRESULT SetClientProperties(IAudioClient2* client); + +// Returns the buffer size limits of the hardware audio engine in +// 100-nanosecond units given a specified `format`. Does not require prior +// audio stream initialization. The `client` argument must be an IAudioClient2 +// or IAudioClient3 interface pointer, hence only supported on Windows 8 and +// above. +// TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY. +HRESULT GetBufferSizeLimits(IAudioClient2* client, + const WAVEFORMATEXTENSIBLE* format, + REFERENCE_TIME* min_buffer_duration, + REFERENCE_TIME* max_buffer_duration); + +// Get the mix format that the audio engine uses internally for processing +// of shared-mode streams. The client can call this method before calling +// IAudioClient::Initialize. When creating a shared-mode stream for an audio +// endpoint device, the Initialize method always accepts the stream format +// obtained by this method. +HRESULT GetSharedModeMixFormat(IAudioClient* client, + WAVEFORMATEXTENSIBLE* format); + +// Returns true if the specified `client` supports the format in `format` +// for the given `share_mode` (shared or exclusive). The client can call this +// method before calling IAudioClient::Initialize. +bool IsFormatSupported(IAudioClient* client, + AUDCLNT_SHAREMODE share_mode, + const WAVEFORMATEXTENSIBLE* format); + +// For a shared-mode stream, the audio engine periodically processes the +// data in the endpoint buffer at the period obtained in `device_period`. +// For an exclusive mode stream, `device_period` corresponds to the minimum +// time interval between successive processing by the endpoint device. +// This period plus the stream latency between the buffer and endpoint device +// represents the minimum possible latency that an audio application can +// achieve. The time in `device_period` is expressed in 100-nanosecond units. +HRESULT GetDevicePeriod(IAudioClient* client, + AUDCLNT_SHAREMODE share_mode, + REFERENCE_TIME* device_period); + +// Returns the range of periodicities supported by the engine for the specified +// stream `format`. The periodicity of the engine is the rate at which the +// engine wakes an event-driven audio client to transfer audio data to or from +// the engine. Can be used for low-latency support on some devices. +// The `client` argument must be an IAudioClient3 interface pointer, hence only +// supported on Windows 10 and above. +HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, + const WAVEFORMATEXTENSIBLE* format, + uint32_t* default_period_in_frames, + uint32_t* fundamental_period_in_frames, + uint32_t* min_period_in_frames, + uint32_t* max_period_in_frames); + +// Get the preferred audio parameters for the given `client` corresponding to +// the stream format that the audio engine uses for its internal processing of +// shared-mode streams. The acquired values should only be utilized for shared +// mode streamed since there are no preferred settings for an exclusive mode +// stream. +HRESULT GetPreferredAudioParameters(IAudioClient* client, + webrtc::AudioParameters* params); +// As above but override the preferred sample rate and use `sample_rate` +// instead. Intended mainly for testing purposes and in combination with rate +// conversion. +HRESULT GetPreferredAudioParameters(IAudioClient* client, + webrtc::AudioParameters* params, + uint32_t sample_rate); + +// After activating an IAudioClient interface on an audio endpoint device, +// the client must initialize it once, and only once, to initialize the audio +// stream between the client and the device. In shared mode, the client +// connects indirectly through the audio engine which does the mixing. +// If a valid event is provided in `event_handle`, the client will be +// initialized for event-driven buffer handling. If `event_handle` is set to +// nullptr, event-driven buffer handling is not utilized. To achieve the +// minimum stream latency between the client application and audio endpoint +// device, set `buffer_duration` to 0. A client has the option of requesting a +// buffer size that is larger than what is strictly necessary to make timing +// glitches rare or nonexistent. Increasing the buffer size does not necessarily +// increase the stream latency. Each unit of reference time is 100 nanoseconds. +// The `auto_convert_pcm` parameter can be used for testing purposes to ensure +// that the sample rate of the client side does not have to match the audio +// engine mix format. If `auto_convert_pcm` is set to true, a rate converter +// will be inserted to convert between the sample rate in `format` and the +// preferred rate given by GetPreferredAudioParameters(). +// The output parameter `endpoint_buffer_size` contains the size of the +// endpoint buffer and it is expressed as the number of audio frames the +// buffer can hold. +HRESULT SharedModeInitialize(IAudioClient* client, + const WAVEFORMATEXTENSIBLE* format, + HANDLE event_handle, + REFERENCE_TIME buffer_duration, + bool auto_convert_pcm, + uint32_t* endpoint_buffer_size); + +// Works as SharedModeInitialize() but adds support for using smaller engine +// periods than the default period. +// The `client` argument must be an IAudioClient3 interface pointer, hence only +// supported on Windows 10 and above. +// TODO(henrika): can probably be merged into SharedModeInitialize() to avoid +// duplicating code. Keeping as separate method for now until decided if we +// need low-latency support. +HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, + const WAVEFORMATEXTENSIBLE* format, + HANDLE event_handle, + uint32_t period_in_frames, + bool auto_convert_pcm, + uint32_t* endpoint_buffer_size); + +// Creates an IAudioRenderClient client for an existing IAudioClient given by +// `client`. The IAudioRenderClient interface enables a client to write +// output data to a rendering endpoint buffer. The methods in this interface +// manage the movement of data packets that contain audio-rendering data. +Microsoft::WRL::ComPtr CreateRenderClient( + IAudioClient* client); + +// Creates an IAudioCaptureClient client for an existing IAudioClient given by +// `client`. The IAudioCaptureClient interface enables a client to read +// input data from a capture endpoint buffer. The methods in this interface +// manage the movement of data packets that contain capture data. +Microsoft::WRL::ComPtr CreateCaptureClient( + IAudioClient* client); + +// Creates an IAudioClock interface for an existing IAudioClient given by +// `client`. The IAudioClock interface enables a client to monitor a stream's +// data rate and the current position in the stream. +Microsoft::WRL::ComPtr CreateAudioClock(IAudioClient* client); + +// Creates an AudioSessionControl interface for an existing IAudioClient given +// by `client`. The IAudioControl interface enables a client to configure the +// control parameters for an audio session and to monitor events in the session. +Microsoft::WRL::ComPtr CreateAudioSessionControl( + IAudioClient* client); + +// Creates an ISimpleAudioVolume interface for an existing IAudioClient given by +// `client`. This interface enables a client to control the master volume level +// of an active audio session. +Microsoft::WRL::ComPtr CreateSimpleAudioVolume( + IAudioClient* client); + +// Fills up the endpoint rendering buffer with silence for an existing +// IAudioClient given by `client` and a corresponding IAudioRenderClient +// given by `render_client`. +bool FillRenderEndpointBufferWithSilence(IAudioClient* client, + IAudioRenderClient* render_client); + +// Prints/logs all fields of the format structure in `format`. +// Also supports extended versions (WAVEFORMATEXTENSIBLE). +std::string WaveFormatToString(WaveFormatWrapper format); + +// Converts Windows internal REFERENCE_TIME (100 nanosecond units) into +// generic webrtc::TimeDelta which then can be converted to any time unit. +webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time); + +// Converts size expressed in number of audio frames, `num_frames`, into +// milliseconds given a specified `sample_rate`. +double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate); + +// Converts a COM error into a human-readable string. +std::string ErrorToString(const _com_error& error); + +} // namespace core_audio_utility +} // namespace webrtc_win +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_ diff --git a/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc new file mode 100644 index 0000000000..fc4a610eef --- /dev/null +++ b/third_party/libwebrtc/modules/audio_device/win/core_audio_utility_win_unittest.cc @@ -0,0 +1,877 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_device/win/core_audio_utility_win.h" + +#include "rtc_base/arraysize.h" +#include "rtc_base/logging.h" +#include "rtc_base/win/scoped_com_initializer.h" +#include "rtc_base/win/windows_version.h" +#include "test/gtest.h" + +using Microsoft::WRL::ComPtr; +using webrtc::AudioDeviceName; + +namespace webrtc { +namespace webrtc_win { +namespace { + +#define ABORT_TEST_IF_NOT(requirements_satisfied) \ + do { \ + bool fail = false; \ + if (ShouldAbortTest(requirements_satisfied, #requirements_satisfied, \ + &fail)) { \ + if (fail) \ + FAIL(); \ + else \ + return; \ + } \ + } while (false) + +bool ShouldAbortTest(bool requirements_satisfied, + const char* requirements_expression, + bool* should_fail) { + if (!requirements_satisfied) { + RTC_LOG(LS_ERROR) << "Requirement(s) not satisfied (" + << requirements_expression << ")"; + // TODO(henrika): improve hard-coded condition to determine if test should + // fail or be ignored. Could use e.g. a command-line argument here to + // determine if the test should fail or not. + *should_fail = false; + return true; + } + *should_fail = false; + return false; +} + +} // namespace + +// CoreAudioUtilityWinTest test fixture. +class CoreAudioUtilityWinTest : public ::testing::Test { + protected: + CoreAudioUtilityWinTest() : com_init_(ScopedCOMInitializer::kMTA) { + // We must initialize the COM library on a thread before we calling any of + // the library functions. All COM functions will return CO_E_NOTINITIALIZED + // otherwise. + EXPECT_TRUE(com_init_.Succeeded()); + + // Configure logging. + rtc::LogMessage::LogToDebug(rtc::LS_INFO); + rtc::LogMessage::LogTimestamps(); + rtc::LogMessage::LogThreads(); + } + + virtual ~CoreAudioUtilityWinTest() {} + + bool DevicesAvailable() { + return core_audio_utility::IsSupported() && + core_audio_utility::NumberOfActiveDevices(eCapture) > 0 && + core_audio_utility::NumberOfActiveDevices(eRender) > 0; + } + + private: + ScopedCOMInitializer com_init_; +}; + +TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapper) { + // Use default constructor for WAVEFORMATEX and verify its size. + WAVEFORMATEX format = {}; + core_audio_utility::WaveFormatWrapper wave_format(&format); + EXPECT_FALSE(wave_format.IsExtensible()); + EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX)); + EXPECT_EQ(wave_format->cbSize, 0); + + // Ensure that the stand-alone WAVEFORMATEX structure has a valid format tag + // and that all accessors work. + format.wFormatTag = WAVE_FORMAT_PCM; + EXPECT_FALSE(wave_format.IsExtensible()); + EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX)); + EXPECT_EQ(wave_format.get()->wFormatTag, WAVE_FORMAT_PCM); + EXPECT_EQ(wave_format->wFormatTag, WAVE_FORMAT_PCM); + + // Next, ensure that the size is valid. Stand-alone is not extended. + EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX)); + + // Verify format types for the stand-alone version. + EXPECT_TRUE(wave_format.IsPcm()); + EXPECT_FALSE(wave_format.IsFloat()); + format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT; + EXPECT_TRUE(wave_format.IsFloat()); +} + +TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) { + // Use default constructor for WAVEFORMATEXTENSIBLE and verify that it + // results in same size as for WAVEFORMATEX even if the size of `format_ex` + // equals the size of WAVEFORMATEXTENSIBLE. + WAVEFORMATEXTENSIBLE format_ex = {}; + core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex); + EXPECT_FALSE(wave_format_ex.IsExtensible()); + EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX)); + EXPECT_EQ(wave_format_ex->cbSize, 0); + + // Ensure that the extended structure has a valid format tag and that all + // accessors work. + format_ex.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + EXPECT_FALSE(wave_format_ex.IsExtensible()); + EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX)); + EXPECT_EQ(wave_format_ex->wFormatTag, WAVE_FORMAT_EXTENSIBLE); + EXPECT_EQ(wave_format_ex.get()->wFormatTag, WAVE_FORMAT_EXTENSIBLE); + + // Next, ensure that the size is valid (sum of stand-alone and extended). + // Now the structure qualifies as extended. + format_ex.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); + EXPECT_TRUE(wave_format_ex.IsExtensible()); + EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEXTENSIBLE)); + EXPECT_TRUE(wave_format_ex.GetExtensible()); + EXPECT_EQ(wave_format_ex.GetExtensible()->Format.wFormatTag, + WAVE_FORMAT_EXTENSIBLE); + + // Verify format types for the extended version. + EXPECT_FALSE(wave_format_ex.IsPcm()); + format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; + EXPECT_TRUE(wave_format_ex.IsPcm()); + EXPECT_FALSE(wave_format_ex.IsFloat()); + format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + EXPECT_TRUE(wave_format_ex.IsFloat()); +} + +TEST_F(CoreAudioUtilityWinTest, NumberOfActiveDevices) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + int render_devices = core_audio_utility::NumberOfActiveDevices(eRender); + EXPECT_GT(render_devices, 0); + int capture_devices = core_audio_utility::NumberOfActiveDevices(eCapture); + EXPECT_GT(capture_devices, 0); + int total_devices = core_audio_utility::NumberOfActiveDevices(eAll); + EXPECT_EQ(total_devices, render_devices + capture_devices); +} + +TEST_F(CoreAudioUtilityWinTest, GetAudioClientVersion) { + uint32_t client_version = core_audio_utility::GetAudioClientVersion(); + EXPECT_GE(client_version, 1u); + EXPECT_LE(client_version, 3u); +} + +TEST_F(CoreAudioUtilityWinTest, CreateDeviceEnumerator) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + ComPtr enumerator = + core_audio_utility::CreateDeviceEnumerator(); + EXPECT_TRUE(enumerator.Get()); +} + +TEST_F(CoreAudioUtilityWinTest, GetDefaultInputDeviceID) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + std::string default_device_id = core_audio_utility::GetDefaultInputDeviceID(); + EXPECT_FALSE(default_device_id.empty()); +} + +TEST_F(CoreAudioUtilityWinTest, GetDefaultOutputDeviceID) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + std::string default_device_id = + core_audio_utility::GetDefaultOutputDeviceID(); + EXPECT_FALSE(default_device_id.empty()); +} + +TEST_F(CoreAudioUtilityWinTest, GetCommunicationsInputDeviceID) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + std::string default_device_id = + core_audio_utility::GetCommunicationsInputDeviceID(); + EXPECT_FALSE(default_device_id.empty()); +} + +TEST_F(CoreAudioUtilityWinTest, GetCommunicationsOutputDeviceID) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + std::string default_device_id = + core_audio_utility::GetCommunicationsOutputDeviceID(); + EXPECT_FALSE(default_device_id.empty()); +} + +TEST_F(CoreAudioUtilityWinTest, CreateDefaultDevice) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + struct { + EDataFlow flow; + ERole role; + } data[] = {{eRender, eConsole}, {eRender, eCommunications}, + {eRender, eMultimedia}, {eCapture, eConsole}, + {eCapture, eCommunications}, {eCapture, eMultimedia}}; + + // Create default devices for all flow/role combinations above. + ComPtr audio_device; + for (size_t i = 0; i < arraysize(data); ++i) { + audio_device = core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role); + EXPECT_TRUE(audio_device.Get()); + EXPECT_EQ(data[i].flow, + core_audio_utility::GetDataFlow(audio_device.Get())); + } + + // Only eRender and eCapture are allowed as flow parameter. + audio_device = core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, eAll, eConsole); + EXPECT_FALSE(audio_device.Get()); +} + +TEST_F(CoreAudioUtilityWinTest, CreateDevice) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + // Get name and ID of default device used for playback. + ComPtr default_render_device = core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + AudioDeviceName default_render_name = + core_audio_utility::GetDeviceName(default_render_device.Get()); + EXPECT_TRUE(default_render_name.IsValid()); + + // Use the unique ID as input to CreateDevice() and create a corresponding + // IMMDevice. The data-flow direction and role parameters are ignored for + // this scenario. + ComPtr audio_device = core_audio_utility::CreateDevice( + default_render_name.unique_id, EDataFlow(), ERole()); + EXPECT_TRUE(audio_device.Get()); + + // Verify that the two IMMDevice interfaces represents the same endpoint + // by comparing their unique IDs. + AudioDeviceName device_name = + core_audio_utility::GetDeviceName(audio_device.Get()); + EXPECT_EQ(default_render_name.unique_id, device_name.unique_id); +} + +TEST_F(CoreAudioUtilityWinTest, GetDefaultDeviceName) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + struct { + EDataFlow flow; + ERole role; + } data[] = {{eRender, eConsole}, + {eRender, eCommunications}, + {eCapture, eConsole}, + {eCapture, eCommunications}}; + + // Get name and ID of default devices for all flow/role combinations above. + ComPtr audio_device; + AudioDeviceName device_name; + for (size_t i = 0; i < arraysize(data); ++i) { + audio_device = core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role); + device_name = core_audio_utility::GetDeviceName(audio_device.Get()); + EXPECT_TRUE(device_name.IsValid()); + } +} + +TEST_F(CoreAudioUtilityWinTest, GetFriendlyName) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + // Get name and ID of default device used for recording. + ComPtr audio_device = core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, eCapture, eConsole); + AudioDeviceName device_name = + core_audio_utility::GetDeviceName(audio_device.Get()); + EXPECT_TRUE(device_name.IsValid()); + + // Use unique ID as input to GetFriendlyName() and compare the result + // with the already obtained friendly name for the default capture device. + std::string friendly_name = core_audio_utility::GetFriendlyName( + device_name.unique_id, eCapture, eConsole); + EXPECT_EQ(friendly_name, device_name.device_name); + + // Same test as above but for playback. + audio_device = core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + device_name = core_audio_utility::GetDeviceName(audio_device.Get()); + friendly_name = core_audio_utility::GetFriendlyName(device_name.unique_id, + eRender, eConsole); + EXPECT_EQ(friendly_name, device_name.device_name); +} + +TEST_F(CoreAudioUtilityWinTest, GetInputDeviceNames) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + webrtc::AudioDeviceNames device_names; + EXPECT_TRUE(core_audio_utility::GetInputDeviceNames(&device_names)); + // Number of elements in the list should be two more than the number of + // active devices since we always add default and default communication + // devices on index 0 and 1. + EXPECT_EQ(static_cast(device_names.size()), + 2 + core_audio_utility::NumberOfActiveDevices(eCapture)); +} + +TEST_F(CoreAudioUtilityWinTest, GetOutputDeviceNames) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + webrtc::AudioDeviceNames device_names; + EXPECT_TRUE(core_audio_utility::GetOutputDeviceNames(&device_names)); + // Number of elements in the list should be two more than the number of + // active devices since we always add default and default communication + // devices on index 0 and 1. + EXPECT_EQ(static_cast(device_names.size()), + 2 + core_audio_utility::NumberOfActiveDevices(eRender)); +} + +TEST_F(CoreAudioUtilityWinTest, CreateSessionManager2) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7); + + EDataFlow data_flow[] = {eRender, eCapture}; + + // Obtain reference to an IAudioSessionManager2 interface for a default audio + // endpoint device specified by two different data flows and the `eConsole` + // role. + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr device(core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole)); + EXPECT_TRUE(device.Get()); + ComPtr session_manager = + core_audio_utility::CreateSessionManager2(device.Get()); + EXPECT_TRUE(session_manager.Get()); + } +} + +TEST_F(CoreAudioUtilityWinTest, CreateSessionEnumerator) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7); + + EDataFlow data_flow[] = {eRender, eCapture}; + + // Obtain reference to an IAudioSessionEnumerator interface for a default + // audio endpoint device specified by two different data flows and the + // `eConsole` role. + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr device(core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole)); + EXPECT_TRUE(device.Get()); + ComPtr session_enumerator = + core_audio_utility::CreateSessionEnumerator(device.Get()); + EXPECT_TRUE(session_enumerator.Get()); + + // Perform a sanity test of the interface by asking for the total number + // of audio sessions that are open on the audio device. Note that, we do + // not check if the session is active or not. + int session_count = 0; + EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&session_count))); + EXPECT_GE(session_count, 0); + } +} + +TEST_F(CoreAudioUtilityWinTest, NumberOfActiveSessions) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN7); + + EDataFlow data_flow[] = {eRender, eCapture}; + + // Count number of active audio session for a default audio endpoint device + // specified by two different data flows and the `eConsole` role. + // Ensure that the number of active audio sessions is less than or equal to + // the total number of audio sessions on that same device. + for (size_t i = 0; i < arraysize(data_flow); ++i) { + // Create an audio endpoint device. + ComPtr device(core_audio_utility::CreateDevice( + AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole)); + EXPECT_TRUE(device.Get()); + + // Ask for total number of audio sessions on the created device. + ComPtr session_enumerator = + core_audio_utility::CreateSessionEnumerator(device.Get()); + EXPECT_TRUE(session_enumerator.Get()); + int total_session_count = 0; + EXPECT_TRUE(SUCCEEDED(session_enumerator->GetCount(&total_session_count))); + EXPECT_GE(total_session_count, 0); + + // Use NumberOfActiveSessions and get number of active audio sessions. + int active_session_count = + core_audio_utility::NumberOfActiveSessions(device.Get()); + EXPECT_LE(active_session_count, total_session_count); + } +} + +TEST_F(CoreAudioUtilityWinTest, CreateClient) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + EDataFlow data_flow[] = {eRender, eCapture}; + + // Obtain reference to an IAudioClient interface for a default audio endpoint + // device specified by two different data flows and the `eConsole` role. + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr client = core_audio_utility::CreateClient( + AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); + EXPECT_TRUE(client.Get()); + } +} + +TEST_F(CoreAudioUtilityWinTest, CreateClient2) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + core_audio_utility::GetAudioClientVersion() >= 2); + + EDataFlow data_flow[] = {eRender, eCapture}; + + // Obtain reference to an IAudioClient2 interface for a default audio endpoint + // device specified by two different data flows and the `eConsole` role. + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr client2 = core_audio_utility::CreateClient2( + AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); + EXPECT_TRUE(client2.Get()); + } +} + +TEST_F(CoreAudioUtilityWinTest, CreateClient3) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + core_audio_utility::GetAudioClientVersion() >= 3); + + EDataFlow data_flow[] = {eRender, eCapture}; + + // Obtain reference to an IAudioClient3 interface for a default audio endpoint + // device specified by two different data flows and the `eConsole` role. + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr client3 = core_audio_utility::CreateClient3( + AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); + EXPECT_TRUE(client3.Get()); + } +} + +TEST_F(CoreAudioUtilityWinTest, SetClientProperties) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + core_audio_utility::GetAudioClientVersion() >= 2); + + ComPtr client2 = core_audio_utility::CreateClient2( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + EXPECT_TRUE(client2.Get()); + EXPECT_TRUE( + SUCCEEDED(core_audio_utility::SetClientProperties(client2.Get()))); + + ComPtr client3 = core_audio_utility::CreateClient3( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + EXPECT_TRUE(client3.Get()); + EXPECT_TRUE( + SUCCEEDED(core_audio_utility::SetClientProperties(client3.Get()))); +} + +TEST_F(CoreAudioUtilityWinTest, GetSharedModeEnginePeriod) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + core_audio_utility::GetAudioClientVersion() >= 3); + + ComPtr client3 = core_audio_utility::CreateClient3( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + EXPECT_TRUE(client3.Get()); + + WAVEFORMATPCMEX format; + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client3.Get(), &format))); + + uint32_t default_period = 0; + uint32_t fundamental_period = 0; + uint32_t min_period = 0; + uint32_t max_period = 0; + EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetSharedModeEnginePeriod( + client3.Get(), &format, &default_period, &fundamental_period, &min_period, + &max_period))); +} + +// TODO(henrika): figure out why usage of this API always reports +// AUDCLNT_E_OFFLOAD_MODE_ONLY. +TEST_F(CoreAudioUtilityWinTest, DISABLED_GetBufferSizeLimits) { + ABORT_TEST_IF_NOT(DevicesAvailable() && + core_audio_utility::GetAudioClientVersion() >= 2); + + ComPtr client2 = core_audio_utility::CreateClient2( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + EXPECT_TRUE(client2.Get()); + + WAVEFORMATPCMEX format; + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client2.Get(), &format))); + + REFERENCE_TIME min_buffer_duration = 0; + REFERENCE_TIME max_buffer_duration = 0; + EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetBufferSizeLimits( + client2.Get(), &format, &min_buffer_duration, &max_buffer_duration))); +} + +TEST_F(CoreAudioUtilityWinTest, GetSharedModeMixFormat) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + ComPtr client = core_audio_utility::CreateClient( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + EXPECT_TRUE(client.Get()); + + // Perform a simple sanity test of the acquired format structure. + WAVEFORMATEXTENSIBLE format; + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + core_audio_utility::WaveFormatWrapper wformat(&format); + EXPECT_GE(wformat->nChannels, 1); + EXPECT_GE(wformat->nSamplesPerSec, 8000u); + EXPECT_GE(wformat->wBitsPerSample, 16); + if (wformat.IsExtensible()) { + EXPECT_EQ(wformat->wFormatTag, WAVE_FORMAT_EXTENSIBLE); + EXPECT_GE(wformat->cbSize, 22); + EXPECT_GE(wformat.GetExtensible()->Samples.wValidBitsPerSample, 16); + } else { + EXPECT_EQ(wformat->cbSize, 0); + } +} + +TEST_F(CoreAudioUtilityWinTest, IsFormatSupported) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + // Create a default render client. + ComPtr client = core_audio_utility::CreateClient( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole); + EXPECT_TRUE(client.Get()); + + // Get the default, shared mode, mixing format. + WAVEFORMATEXTENSIBLE format; + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + + // In shared mode, the audio engine always supports the mix format. + EXPECT_TRUE(core_audio_utility::IsFormatSupported( + client.Get(), AUDCLNT_SHAREMODE_SHARED, &format)); + + // Use an invalid format and verify that it is not supported. + format.Format.nSamplesPerSec += 1; + EXPECT_FALSE(core_audio_utility::IsFormatSupported( + client.Get(), AUDCLNT_SHAREMODE_SHARED, &format)); +} + +TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + EDataFlow data_flow[] = {eRender, eCapture}; + + // Verify that the device periods are valid for the default render and + // capture devices. + ComPtr client; + for (size_t i = 0; i < arraysize(data_flow); ++i) { + REFERENCE_TIME shared_time_period = 0; + REFERENCE_TIME exclusive_time_period = 0; + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + data_flow[i], eConsole); + EXPECT_TRUE(client.Get()); + EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod( + client.Get(), AUDCLNT_SHAREMODE_SHARED, &shared_time_period))); + EXPECT_GT(shared_time_period, 0); + EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod( + client.Get(), AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period))); + EXPECT_GT(exclusive_time_period, 0); + EXPECT_LE(exclusive_time_period, shared_time_period); + } +} + +TEST_F(CoreAudioUtilityWinTest, GetPreferredAudioParameters) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + struct { + EDataFlow flow; + ERole role; + } data[] = {{eRender, eConsole}, + {eRender, eCommunications}, + {eCapture, eConsole}, + {eCapture, eCommunications}}; + + // Verify that the preferred audio parameters are OK for all flow/role + // combinations above. + ComPtr client; + webrtc::AudioParameters params; + for (size_t i = 0; i < arraysize(data); ++i) { + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + data[i].flow, data[i].role); + EXPECT_TRUE(client.Get()); + EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters( + client.Get(), ¶ms))); + EXPECT_TRUE(params.is_valid()); + EXPECT_TRUE(params.is_complete()); + } +} + +TEST_F(CoreAudioUtilityWinTest, SharedModeInitialize) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + ComPtr client; + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + eRender, eConsole); + EXPECT_TRUE(client.Get()); + + WAVEFORMATPCMEX format; + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + + // Perform a shared-mode initialization without event-driven buffer handling. + uint32_t endpoint_buffer_size = 0; + HRESULT hr = core_audio_utility::SharedModeInitialize( + client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size); + EXPECT_TRUE(SUCCEEDED(hr)); + EXPECT_GT(endpoint_buffer_size, 0u); + + // It is only possible to create a client once. + hr = core_audio_utility::SharedModeInitialize( + client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size); + EXPECT_FALSE(SUCCEEDED(hr)); + EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED); + + // Verify that it is possible to reinitialize the client after releasing it + // and then creating a new client. + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + eRender, eConsole); + EXPECT_TRUE(client.Get()); + hr = core_audio_utility::SharedModeInitialize( + client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size); + EXPECT_TRUE(SUCCEEDED(hr)); + EXPECT_GT(endpoint_buffer_size, 0u); + + // Use a non-supported format and verify that initialization fails. + // A simple way to emulate an invalid format is to use the shared-mode + // mixing format and modify the preferred sample rate. + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + eRender, eConsole); + EXPECT_TRUE(client.Get()); + format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1; + EXPECT_FALSE(core_audio_utility::IsFormatSupported( + client.Get(), AUDCLNT_SHAREMODE_SHARED, &format)); + hr = core_audio_utility::SharedModeInitialize( + client.Get(), &format, nullptr, 0, false, &endpoint_buffer_size); + EXPECT_TRUE(FAILED(hr)); + EXPECT_EQ(hr, E_INVALIDARG); + + // Finally, perform a shared-mode initialization using event-driven buffer + // handling. The event handle will be signaled when an audio buffer is ready + // to be processed by the client (not verified here). The event handle should + // be in the non-signaled state. + ScopedHandle event_handle(::CreateEvent(nullptr, TRUE, FALSE, nullptr)); + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + eRender, eConsole); + EXPECT_TRUE(client.Get()); + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + EXPECT_TRUE(core_audio_utility::IsFormatSupported( + client.Get(), AUDCLNT_SHAREMODE_SHARED, &format)); + hr = core_audio_utility::SharedModeInitialize( + client.Get(), &format, event_handle, 0, false, &endpoint_buffer_size); + EXPECT_TRUE(SUCCEEDED(hr)); + EXPECT_GT(endpoint_buffer_size, 0u); + + // TODO(henrika): possibly add test for signature which overrides the default + // sample rate. +} + +TEST_F(CoreAudioUtilityWinTest, CreateRenderAndCaptureClients) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + EDataFlow data_flow[] = {eRender, eCapture}; + + WAVEFORMATPCMEX format; + uint32_t endpoint_buffer_size = 0; + + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr client; + ComPtr render_client; + ComPtr capture_client; + + // Create a default client for the given data-flow direction. + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + data_flow[i], eConsole); + EXPECT_TRUE(client.Get()); + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + if (data_flow[i] == eRender) { + // It is not possible to create a render client using an unitialized + // client interface. + render_client = core_audio_utility::CreateRenderClient(client.Get()); + EXPECT_FALSE(render_client.Get()); + + // Do a proper initialization and verify that it works this time. + core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, + 0, false, &endpoint_buffer_size); + render_client = core_audio_utility::CreateRenderClient(client.Get()); + EXPECT_TRUE(render_client.Get()); + EXPECT_GT(endpoint_buffer_size, 0u); + } else if (data_flow[i] == eCapture) { + // It is not possible to create a capture client using an unitialized + // client interface. + capture_client = core_audio_utility::CreateCaptureClient(client.Get()); + EXPECT_FALSE(capture_client.Get()); + + // Do a proper initialization and verify that it works this time. + core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, + 0, false, &endpoint_buffer_size); + capture_client = core_audio_utility::CreateCaptureClient(client.Get()); + EXPECT_TRUE(capture_client.Get()); + EXPECT_GT(endpoint_buffer_size, 0u); + } + } +} + +TEST_F(CoreAudioUtilityWinTest, CreateAudioClock) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + EDataFlow data_flow[] = {eRender, eCapture}; + + WAVEFORMATPCMEX format; + uint32_t endpoint_buffer_size = 0; + + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr client; + ComPtr audio_clock; + + // Create a default client for the given data-flow direction. + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + data_flow[i], eConsole); + EXPECT_TRUE(client.Get()); + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + + // It is not possible to create an audio clock using an unitialized client + // interface. + audio_clock = core_audio_utility::CreateAudioClock(client.Get()); + EXPECT_FALSE(audio_clock.Get()); + + // Do a proper initialization and verify that it works this time. + core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0, + false, &endpoint_buffer_size); + audio_clock = core_audio_utility::CreateAudioClock(client.Get()); + EXPECT_TRUE(audio_clock.Get()); + EXPECT_GT(endpoint_buffer_size, 0u); + + // Use the audio clock and verify that querying the device frequency works. + UINT64 frequency = 0; + EXPECT_TRUE(SUCCEEDED(audio_clock->GetFrequency(&frequency))); + EXPECT_GT(frequency, 0u); + } +} + +TEST_F(CoreAudioUtilityWinTest, CreateAudioSessionControl) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + EDataFlow data_flow[] = {eRender, eCapture}; + + WAVEFORMATPCMEX format; + uint32_t endpoint_buffer_size = 0; + + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr client; + ComPtr audio_session_control; + + // Create a default client for the given data-flow direction. + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + data_flow[i], eConsole); + EXPECT_TRUE(client.Get()); + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + + // It is not possible to create an audio session control using an + // unitialized client interface. + audio_session_control = + core_audio_utility::CreateAudioSessionControl(client.Get()); + EXPECT_FALSE(audio_session_control.Get()); + + // Do a proper initialization and verify that it works this time. + core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0, + false, &endpoint_buffer_size); + audio_session_control = + core_audio_utility::CreateAudioSessionControl(client.Get()); + EXPECT_TRUE(audio_session_control.Get()); + EXPECT_GT(endpoint_buffer_size, 0u); + + // Use the audio session control and verify that the session state can be + // queried. When a client opens a session by assigning the first stream to + // the session (by calling the IAudioClient::Initialize method), the initial + // session state is inactive. The session state changes from inactive to + // active when a stream in the session begins running (because the client + // has called the IAudioClient::Start method). + AudioSessionState state; + EXPECT_TRUE(SUCCEEDED(audio_session_control->GetState(&state))); + EXPECT_EQ(state, AudioSessionStateInactive); + } +} + +TEST_F(CoreAudioUtilityWinTest, CreateSimpleAudioVolume) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + EDataFlow data_flow[] = {eRender, eCapture}; + + WAVEFORMATPCMEX format; + uint32_t endpoint_buffer_size = 0; + + for (size_t i = 0; i < arraysize(data_flow); ++i) { + ComPtr client; + ComPtr simple_audio_volume; + + // Create a default client for the given data-flow direction. + client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId, + data_flow[i], eConsole); + EXPECT_TRUE(client.Get()); + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + + // It is not possible to create an audio volume using an uninitialized + // client interface. + simple_audio_volume = + core_audio_utility::CreateSimpleAudioVolume(client.Get()); + EXPECT_FALSE(simple_audio_volume.Get()); + + // Do a proper initialization and verify that it works this time. + core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0, + false, &endpoint_buffer_size); + simple_audio_volume = + core_audio_utility::CreateSimpleAudioVolume(client.Get()); + EXPECT_TRUE(simple_audio_volume.Get()); + EXPECT_GT(endpoint_buffer_size, 0u); + + // Use the audio volume interface and validate that it works. The volume + // level should be value in the range 0.0 to 1.0 at first call. + float volume = 0.0; + EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume))); + EXPECT_GE(volume, 0.0); + EXPECT_LE(volume, 1.0); + + // Next, set a new volume and verify that the setter does its job. + const float target_volume = 0.5; + EXPECT_TRUE(SUCCEEDED( + simple_audio_volume->SetMasterVolume(target_volume, nullptr))); + EXPECT_TRUE(SUCCEEDED(simple_audio_volume->GetMasterVolume(&volume))); + EXPECT_EQ(volume, target_volume); + } +} + +TEST_F(CoreAudioUtilityWinTest, FillRenderEndpointBufferWithSilence) { + ABORT_TEST_IF_NOT(DevicesAvailable()); + + // Create default clients using the default mixing format for shared mode. + ComPtr client(core_audio_utility::CreateClient( + AudioDeviceName::kDefaultDeviceId, eRender, eConsole)); + EXPECT_TRUE(client.Get()); + + WAVEFORMATPCMEX format; + uint32_t endpoint_buffer_size = 0; + EXPECT_TRUE(SUCCEEDED( + core_audio_utility::GetSharedModeMixFormat(client.Get(), &format))); + core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr, 0, + false, &endpoint_buffer_size); + EXPECT_GT(endpoint_buffer_size, 0u); + + ComPtr render_client( + core_audio_utility::CreateRenderClient(client.Get())); + EXPECT_TRUE(render_client.Get()); + + // The endpoint audio buffer should not be filled up by default after being + // created. + UINT32 num_queued_frames = 0; + client->GetCurrentPadding(&num_queued_frames); + EXPECT_EQ(num_queued_frames, 0u); + + // Fill it up with zeros and verify that the buffer is full. + // It is not possible to verify that the actual data consists of zeros + // since we can't access data that has already been sent to the endpoint + // buffer. + EXPECT_TRUE(core_audio_utility::FillRenderEndpointBufferWithSilence( + client.Get(), render_client.Get())); + client->GetCurrentPadding(&num_queued_frames); + EXPECT_EQ(num_queued_frames, endpoint_buffer_size); +} + +} // namespace webrtc_win +} // namespace webrtc -- cgit v1.2.3