diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
commit | 36d22d82aa202bb199967e9512281e9a53db42c9 (patch) | |
tree | 105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/libwebrtc/sdk/objc/native/src/audio | |
parent | Initial commit. (diff) | |
download | firefox-esr-upstream.tar.xz firefox-esr-upstream.zip |
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/sdk/objc/native/src/audio')
9 files changed, 3103 insertions, 0 deletions
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.h b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.h new file mode 100644 index 0000000000..a86acb56fe --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.h @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_IOS_H_ +#define SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_IOS_H_ + +#include <atomic> +#include <memory> + +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/task_queue/pending_task_safety_flag.h" +#include "audio_session_observer.h" +#include "modules/audio_device/audio_device_generic.h" +#include "rtc_base/buffer.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "sdk/objc/base/RTCMacros.h" +#include "voice_processing_audio_unit.h" + +RTC_FWD_DECL_OBJC_CLASS(RTCNativeAudioSessionDelegateAdapter); + +namespace webrtc { + +class FineAudioBuffer; + +namespace ios_adm { + +// Implements full duplex 16-bit mono PCM audio support for iOS using a +// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit +// supports audio echo cancellation. It also adds automatic gain control, +// adjustment of voice-processing quality and muting. +// +// An instance must be created and destroyed on one and the same thread. +// All supported public methods must also be called on the same thread. +// A thread checker will RTC_DCHECK if any supported method is called on an +// invalid thread. +// +// Recorded audio will be delivered on a real-time internal I/O thread in the +// audio unit. The audio unit will also ask for audio data to play out on this +// same thread. +class AudioDeviceIOS : public AudioDeviceGeneric, + public AudioSessionObserver, + public VoiceProcessingAudioUnitObserver { + public: + explicit AudioDeviceIOS(bool bypass_voice_processing); + ~AudioDeviceIOS() override; + + void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; + + InitStatus Init() override; + int32_t Terminate() override; + bool Initialized() const override; + + int32_t InitPlayout() override; + bool PlayoutIsInitialized() const override; + + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + int32_t StartPlayout() override; + int32_t StopPlayout() override; + bool Playing() const override; + + int32_t StartRecording() override; + int32_t StopRecording() override; + bool Recording() const override; + + // These methods returns hard-coded delay values and not dynamic delay + // estimates. The reason is that iOS supports a built-in AEC and the WebRTC + // AEC will always be disabled in the Libjingle layer to avoid running two + // AEC implementations at the same time. And, it saves resources to avoid + // updating these delay values continuously. + // TODO(henrika): it would be possible to mark these two methods as not + // implemented since they are only called for A/V-sync purposes today and + // A/V-sync is not supported on iOS. However, we avoid adding error messages + // the log by using these dummy implementations instead. + int32_t PlayoutDelay(uint16_t& delayMS) const override; + + // No implementation for playout underrun on iOS. We override it to avoid a + // periodic log that it isn't available from the base class. + int32_t GetPlayoutUnderrunCount() const override { return -1; } + + // Native audio parameters stored during construction. + // These methods are unique for the iOS implementation. + int GetPlayoutAudioParameters(AudioParameters* params) const override; + int GetRecordAudioParameters(AudioParameters* params) const override; + + // These methods are currently not fully implemented on iOS: + + // See audio_device_not_implemented.cc for trivial implementations. + int32_t ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const override; + int32_t PlayoutIsAvailable(bool& available) override; + int32_t RecordingIsAvailable(bool& available) override; + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device) override; + int32_t InitSpeaker() override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() override; + bool MicrophoneIsInitialized() const override; + int32_t SpeakerVolumeIsAvailable(bool& available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t& volume) const override; + int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t& minVolume) const override; + int32_t MicrophoneVolumeIsAvailable(bool& available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t& volume) const override; + int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t& minVolume) const override; + int32_t MicrophoneMuteIsAvailable(bool& available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool& enabled) const override; + int32_t SpeakerMuteIsAvailable(bool& available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool& enabled) const override; + int32_t StereoPlayoutIsAvailable(bool& available) override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool& enabled) const override; + int32_t StereoRecordingIsAvailable(bool& available) override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool& enabled) const override; + + // AudioSessionObserver methods. May be called from any thread. + void OnInterruptionBegin() override; + void OnInterruptionEnd() override; + void OnValidRouteChange() override; + void OnCanPlayOrRecordChange(bool can_play_or_record) override; + void OnChangedOutputVolume() override; + + // VoiceProcessingAudioUnitObserver methods. + OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) override; + OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) override; + + bool IsInterrupted(); + + private: + // Called by the relevant AudioSessionObserver methods on `thread_`. + void HandleInterruptionBegin(); + void HandleInterruptionEnd(); + void HandleValidRouteChange(); + void HandleCanPlayOrRecordChange(bool can_play_or_record); + void HandleSampleRateChange(); + void HandlePlayoutGlitchDetected(); + void HandleOutputVolumeChange(); + + // Uses current `playout_parameters_` and `record_parameters_` to inform the + // audio device buffer (ADB) about our internal audio parameters. + void UpdateAudioDeviceBuffer(); + + // Since the preferred audio parameters are only hints to the OS, the actual + // values may be different once the AVAudioSession has been activated. + // This method asks for the current hardware parameters and takes actions + // if they should differ from what we have asked for initially. It also + // defines `playout_parameters_` and `record_parameters_`. + void SetupAudioBuffersForActiveAudioSession(); + + // Creates the audio unit. + bool CreateAudioUnit(); + + // Updates the audio unit state based on current state. + void UpdateAudioUnit(bool can_play_or_record); + + // Configures the audio session for WebRTC. + bool ConfigureAudioSession(); + + // Like above, but requires caller to already hold session lock. + bool ConfigureAudioSessionLocked(); + + // Unconfigures the audio session. + void UnconfigureAudioSession(); + + // Activates our audio session, creates and initializes the voice-processing + // audio unit and verifies that we got the preferred native audio parameters. + bool InitPlayOrRecord(); + + // Closes and deletes the voice-processing I/O unit. + void ShutdownPlayOrRecord(); + + // Resets thread-checkers before a call is restarted. + void PrepareForNewStart(); + + // Determines whether voice processing should be enabled or disabled. + const bool bypass_voice_processing_; + + // Native I/O audio thread checker. + SequenceChecker io_thread_checker_; + + // Thread that this object is created on. + rtc::Thread* thread_; + + // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the + // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create(). + // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance + // and therefore outlives this object. + AudioDeviceBuffer* audio_device_buffer_; + + // Contains audio parameters (sample rate, #channels, buffer size etc.) for + // the playout and recording sides. These structure is set in two steps: + // first, native sample rate and #channels are defined in Init(). Next, the + // audio session is activated and we verify that the preferred parameters + // were granted by the OS. At this stage it is also possible to add a third + // component to the parameters; the native I/O buffer duration. + // A RTC_CHECK will be hit if we for some reason fail to open an audio session + // using the specified parameters. + AudioParameters playout_parameters_; + AudioParameters record_parameters_; + + // The AudioUnit used to play and record audio. + std::unique_ptr<VoiceProcessingAudioUnit> audio_unit_; + + // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data + // in chunks of 10ms. It then allows for this data to be pulled in + // a finer or coarser granularity. I.e. interacting with this class instead + // of directly with the AudioDeviceBuffer one can ask for any number of + // audio data samples. Is also supports a similar scheme for the recording + // side. + // Example: native buffer size can be 128 audio frames at 16kHz sample rate. + // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128 + // in each callback (one every 8ms). This class can then ask for 128 and the + // FineAudioBuffer will ask WebRTC for new data only when needed and also + // cache non-utilized audio between callbacks. On the recording side, iOS + // can provide audio data frames of size 128 and these are accumulated until + // enough data to supply one 10ms call exists. This 10ms chunk is then sent + // to WebRTC and the remaining part is stored. + std::unique_ptr<FineAudioBuffer> fine_audio_buffer_; + + // Temporary storage for recorded data. AudioUnitRender() renders into this + // array as soon as a frame of the desired buffer size has been recorded. + // On real iOS devices, the size will be fixed and set once. For iOS + // simulators, the size can vary from callback to callback and the size + // will be changed dynamically to account for this behavior. + rtc::BufferT<int16_t> record_audio_buffer_; + + // Set to 1 when recording is active and 0 otherwise. + std::atomic<int> recording_; + + // Set to 1 when playout is active and 0 otherwise. + std::atomic<int> playing_; + + // Set to true after successful call to Init(), false otherwise. + bool initialized_ RTC_GUARDED_BY(thread_); + + // Set to true after successful call to InitRecording() or InitPlayout(), + // false otherwise. + bool audio_is_initialized_; + + // Set to true if audio session is interrupted, false otherwise. + bool is_interrupted_; + + // Audio interruption observer instance. + RTCNativeAudioSessionDelegateAdapter* audio_session_observer_ + RTC_GUARDED_BY(thread_); + + // Set to true if we've activated the audio session. + bool has_configured_session_ RTC_GUARDED_BY(thread_); + + // Counts number of detected audio glitches on the playout side. + int64_t num_detected_playout_glitches_ RTC_GUARDED_BY(thread_); + int64_t last_playout_time_ RTC_GUARDED_BY(io_thread_checker_); + + // Counts number of playout callbacks per call. + // The value is updated on the native I/O thread and later read on the + // creating `thread_` but at this stage no audio is active. + // Hence, it is a "thread safe" design and no lock is needed. + int64_t num_playout_callbacks_; + + // Contains the time for when the last output volume change was detected. + int64_t last_output_volume_change_time_ RTC_GUARDED_BY(thread_); + + // Avoids running pending task after `this` is Terminated. + rtc::scoped_refptr<PendingTaskSafetyFlag> safety_ = + PendingTaskSafetyFlag::Create(); +}; +} // namespace ios_adm +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_IOS_H_ diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.mm new file mode 100644 index 0000000000..dd2c11bdd2 --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.mm @@ -0,0 +1,1127 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import <AVFoundation/AVFoundation.h> +#import <Foundation/Foundation.h> + +#include "audio_device_ios.h" + +#include <cmath> + +#include "api/array_view.h" +#include "api/task_queue/pending_task_safety_flag.h" +#include "helpers.h" +#include "modules/audio_device/fine_audio_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/time_utils.h" +#include "system_wrappers/include/field_trial.h" +#include "system_wrappers/include/metrics.h" + +#import "base/RTCLogging.h" +#import "components/audio/RTCAudioSession+Private.h" +#import "components/audio/RTCAudioSession.h" +#import "components/audio/RTCAudioSessionConfiguration.h" +#import "components/audio/RTCNativeAudioSessionDelegateAdapter.h" + +namespace webrtc { +namespace ios_adm { + +#define LOGI() RTC_LOG(LS_INFO) << "AudioDeviceIOS::" + +#define LOG_AND_RETURN_IF_ERROR(error, message) \ + do { \ + OSStatus err = error; \ + if (err) { \ + RTC_LOG(LS_ERROR) << message << ": " << err; \ + return false; \ + } \ + } while (0) + +#define LOG_IF_ERROR(error, message) \ + do { \ + OSStatus err = error; \ + if (err) { \ + RTC_LOG(LS_ERROR) << message << ": " << err; \ + } \ + } while (0) + +// Hardcoded delay estimates based on real measurements. +// TODO(henrika): these value is not used in combination with built-in AEC. +// Can most likely be removed. +const UInt16 kFixedPlayoutDelayEstimate = 30; +const UInt16 kFixedRecordDelayEstimate = 30; + +using ios::CheckAndLogError; + +#if !defined(NDEBUG) +// Returns true when the code runs on a device simulator. +static bool DeviceIsSimulator() { + return ios::GetDeviceName() == "x86_64"; +} + +// Helper method that logs essential device information strings. +static void LogDeviceInfo() { + RTC_LOG(LS_INFO) << "LogDeviceInfo"; + @autoreleasepool { + RTC_LOG(LS_INFO) << " system name: " << ios::GetSystemName(); + RTC_LOG(LS_INFO) << " system version: " << ios::GetSystemVersionAsString(); + RTC_LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); + RTC_LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); + RTC_LOG(LS_INFO) << " process name: " << ios::GetProcessName(); + RTC_LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); + RTC_LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); + RTC_LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); + RTC_LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); +#if TARGET_IPHONE_SIMULATOR + RTC_LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined"; +#endif + RTC_LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator(); + } +} +#endif // !defined(NDEBUG) + +AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing) + : bypass_voice_processing_(bypass_voice_processing), + audio_device_buffer_(nullptr), + audio_unit_(nullptr), + recording_(0), + playing_(0), + initialized_(false), + audio_is_initialized_(false), + is_interrupted_(false), + has_configured_session_(false), + num_detected_playout_glitches_(0), + last_playout_time_(0), + num_playout_callbacks_(0), + last_output_volume_change_time_(0) { + LOGI() << "ctor" << ios::GetCurrentThreadDescription() + << ",bypass_voice_processing=" << bypass_voice_processing_; + io_thread_checker_.Detach(); + thread_ = rtc::Thread::Current(); + + audio_session_observer_ = [[RTCNativeAudioSessionDelegateAdapter alloc] initWithObserver:this]; +} + +AudioDeviceIOS::~AudioDeviceIOS() { + RTC_DCHECK_RUN_ON(thread_); + LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); + safety_->SetNotAlive(); + Terminate(); + audio_session_observer_ = nil; +} + +void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { + LOGI() << "AttachAudioBuffer"; + RTC_DCHECK(audioBuffer); + RTC_DCHECK_RUN_ON(thread_); + audio_device_buffer_ = audioBuffer; +} + +AudioDeviceGeneric::InitStatus AudioDeviceIOS::Init() { + LOGI() << "Init"; + io_thread_checker_.Detach(); + + RTC_DCHECK_RUN_ON(thread_); + if (initialized_) { + return InitStatus::OK; + } +#if !defined(NDEBUG) + LogDeviceInfo(); +#endif + // Store the preferred sample rate and preferred number of channels already + // here. They have not been set and confirmed yet since configureForWebRTC + // is not called until audio is about to start. However, it makes sense to + // store the parameters now and then verify at a later stage. + RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* config = + [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration]; + playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels); + record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels); + // Ensure that the audio device buffer (ADB) knows about the internal audio + // parameters. Note that, even if we are unable to get a mono audio session, + // we will always tell the I/O audio unit to do a channel format conversion + // to guarantee mono on the "input side" of the audio unit. + UpdateAudioDeviceBuffer(); + initialized_ = true; + return InitStatus::OK; +} + +int32_t AudioDeviceIOS::Terminate() { + LOGI() << "Terminate"; + RTC_DCHECK_RUN_ON(thread_); + if (!initialized_) { + return 0; + } + StopPlayout(); + StopRecording(); + initialized_ = false; + return 0; +} + +bool AudioDeviceIOS::Initialized() const { + RTC_DCHECK_RUN_ON(thread_); + return initialized_; +} + +int32_t AudioDeviceIOS::InitPlayout() { + LOGI() << "InitPlayout"; + RTC_DCHECK_RUN_ON(thread_); + RTC_DCHECK(initialized_); + RTC_DCHECK(!audio_is_initialized_); + RTC_DCHECK(!playing_.load()); + if (!audio_is_initialized_) { + if (!InitPlayOrRecord()) { + RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!"; + return -1; + } + } + audio_is_initialized_ = true; + return 0; +} + +bool AudioDeviceIOS::PlayoutIsInitialized() const { + RTC_DCHECK_RUN_ON(thread_); + return audio_is_initialized_; +} + +bool AudioDeviceIOS::RecordingIsInitialized() const { + RTC_DCHECK_RUN_ON(thread_); + return audio_is_initialized_; +} + +int32_t AudioDeviceIOS::InitRecording() { + LOGI() << "InitRecording"; + RTC_DCHECK_RUN_ON(thread_); + RTC_DCHECK(initialized_); + RTC_DCHECK(!audio_is_initialized_); + RTC_DCHECK(!recording_.load()); + if (!audio_is_initialized_) { + if (!InitPlayOrRecord()) { + RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!"; + return -1; + } + } + audio_is_initialized_ = true; + return 0; +} + +int32_t AudioDeviceIOS::StartPlayout() { + LOGI() << "StartPlayout"; + RTC_DCHECK_RUN_ON(thread_); + RTC_DCHECK(audio_is_initialized_); + RTC_DCHECK(!playing_.load()); + RTC_DCHECK(audio_unit_); + if (fine_audio_buffer_) { + fine_audio_buffer_->ResetPlayout(); + } + if (!recording_.load() && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { + OSStatus result = audio_unit_->Start(); + if (result != noErr) { + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + [session notifyAudioUnitStartFailedWithError:result]; + RTCLogError(@"StartPlayout failed to start audio unit, reason %d", result); + return -1; + } + RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; + } + playing_.store(1, std::memory_order_release); + num_playout_callbacks_ = 0; + num_detected_playout_glitches_ = 0; + return 0; +} + +int32_t AudioDeviceIOS::StopPlayout() { + LOGI() << "StopPlayout"; + RTC_DCHECK_RUN_ON(thread_); + if (!audio_is_initialized_ || !playing_.load()) { + return 0; + } + if (!recording_.load()) { + ShutdownPlayOrRecord(); + audio_is_initialized_ = false; + } + playing_.store(0, std::memory_order_release); + + // Derive average number of calls to OnGetPlayoutData() between detected + // audio glitches and add the result to a histogram. + int average_number_of_playout_callbacks_between_glitches = 100000; + RTC_DCHECK_GE(num_playout_callbacks_, num_detected_playout_glitches_); + if (num_detected_playout_glitches_ > 0) { + average_number_of_playout_callbacks_between_glitches = + num_playout_callbacks_ / num_detected_playout_glitches_; + } + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches", + average_number_of_playout_callbacks_between_glitches); + RTCLog(@"Average number of playout callbacks between glitches: %d", + average_number_of_playout_callbacks_between_glitches); + return 0; +} + +bool AudioDeviceIOS::Playing() const { + return playing_.load(); +} + +int32_t AudioDeviceIOS::StartRecording() { + LOGI() << "StartRecording"; + RTC_DCHECK_RUN_ON(thread_); + RTC_DCHECK(audio_is_initialized_); + RTC_DCHECK(!recording_.load()); + RTC_DCHECK(audio_unit_); + if (fine_audio_buffer_) { + fine_audio_buffer_->ResetRecord(); + } + if (!playing_.load() && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { + OSStatus result = audio_unit_->Start(); + if (result != noErr) { + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + [session notifyAudioUnitStartFailedWithError:result]; + RTCLogError(@"StartRecording failed to start audio unit, reason %d", result); + return -1; + } + RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; + } + recording_.store(1, std::memory_order_release); + return 0; +} + +int32_t AudioDeviceIOS::StopRecording() { + LOGI() << "StopRecording"; + RTC_DCHECK_RUN_ON(thread_); + if (!audio_is_initialized_ || !recording_.load()) { + return 0; + } + if (!playing_.load()) { + ShutdownPlayOrRecord(); + audio_is_initialized_ = false; + } + recording_.store(0, std::memory_order_release); + return 0; +} + +bool AudioDeviceIOS::Recording() const { + return recording_.load(); +} + +int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { + delayMS = kFixedPlayoutDelayEstimate; + return 0; +} + +int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { + LOGI() << "GetPlayoutAudioParameters"; + RTC_DCHECK(playout_parameters_.is_valid()); + RTC_DCHECK_RUN_ON(thread_); + *params = playout_parameters_; + return 0; +} + +int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { + LOGI() << "GetRecordAudioParameters"; + RTC_DCHECK(record_parameters_.is_valid()); + RTC_DCHECK_RUN_ON(thread_); + *params = record_parameters_; + return 0; +} + +void AudioDeviceIOS::OnInterruptionBegin() { + RTC_DCHECK(thread_); + LOGI() << "OnInterruptionBegin"; + thread_->PostTask(SafeTask(safety_, [this] { HandleInterruptionBegin(); })); +} + +void AudioDeviceIOS::OnInterruptionEnd() { + RTC_DCHECK(thread_); + LOGI() << "OnInterruptionEnd"; + thread_->PostTask(SafeTask(safety_, [this] { HandleInterruptionEnd(); })); +} + +void AudioDeviceIOS::OnValidRouteChange() { + RTC_DCHECK(thread_); + thread_->PostTask(SafeTask(safety_, [this] { HandleValidRouteChange(); })); +} + +void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) { + RTC_DCHECK(thread_); + thread_->PostTask(SafeTask( + safety_, [this, can_play_or_record] { HandleCanPlayOrRecordChange(can_play_or_record); })); +} + +void AudioDeviceIOS::OnChangedOutputVolume() { + RTC_DCHECK(thread_); + thread_->PostTask(SafeTask(safety_, [this] { HandleOutputVolumeChange(); })); +} + +OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* /* io_data */) { + RTC_DCHECK_RUN_ON(&io_thread_checker_); + OSStatus result = noErr; + // Simply return if recording is not enabled. + if (!recording_.load(std::memory_order_acquire)) return result; + + // Set the size of our own audio buffer and clear it first to avoid copying + // in combination with potential reallocations. + // On real iOS devices, the size will only be set once (at first callback). + record_audio_buffer_.Clear(); + record_audio_buffer_.SetSize(num_frames); + + // Allocate AudioBuffers to be used as storage for the received audio. + // The AudioBufferList structure works as a placeholder for the + // AudioBuffer structure, which holds a pointer to the actual data buffer + // in `record_audio_buffer_`. Recorded audio will be rendered into this memory + // at each input callback when calling AudioUnitRender(). + AudioBufferList audio_buffer_list; + audio_buffer_list.mNumberBuffers = 1; + AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0]; + audio_buffer->mNumberChannels = record_parameters_.channels(); + audio_buffer->mDataByteSize = + record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample; + audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data()); + + // Obtain the recorded audio samples by initiating a rendering cycle. + // Since it happens on the input bus, the `io_data` parameter is a reference + // to the preallocated audio buffer list that the audio unit renders into. + // We can make the audio unit provide a buffer instead in io_data, but we + // currently just use our own. + // TODO(henrika): should error handling be improved? + result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list); + if (result != noErr) { + RTCLogError(@"Failed to render audio."); + return result; + } + + // Get a pointer to the recorded audio and send it to the WebRTC ADB. + // Use the FineAudioBuffer instance to convert between native buffer size + // and the 10ms buffer size used by WebRTC. + fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate); + return noErr; +} + +OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) { + RTC_DCHECK_RUN_ON(&io_thread_checker_); + // Verify 16-bit, noninterleaved mono PCM signal format. + RTC_DCHECK_EQ(1, io_data->mNumberBuffers); + AudioBuffer* audio_buffer = &io_data->mBuffers[0]; + RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels); + + // Produce silence and give audio unit a hint about it if playout is not + // activated. + if (!playing_.load(std::memory_order_acquire)) { + const size_t size_in_bytes = audio_buffer->mDataByteSize; + RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames); + *flags |= kAudioUnitRenderAction_OutputIsSilence; + memset(static_cast<int8_t*>(audio_buffer->mData), 0, size_in_bytes); + return noErr; + } + + // Measure time since last call to OnGetPlayoutData() and see if it is larger + // than a well defined threshold which depends on the current IO buffer size. + // If so, we have an indication of a glitch in the output audio since the + // core audio layer will most likely run dry in this state. + ++num_playout_callbacks_; + const int64_t now_time = rtc::TimeMillis(); + if (time_stamp->mSampleTime != num_frames) { + const int64_t delta_time = now_time - last_playout_time_; + const int glitch_threshold = 1.6 * playout_parameters_.GetBufferSizeInMilliseconds(); + if (delta_time > glitch_threshold) { + RTCLogWarning(@"Possible playout audio glitch detected.\n" + " Time since last OnGetPlayoutData was %lld ms.\n", + delta_time); + // Exclude extreme delta values since they do most likely not correspond + // to a real glitch. Instead, the most probable cause is that a headset + // has been plugged in or out. There are more direct ways to detect + // audio device changes (see HandleValidRouteChange()) but experiments + // show that using it leads to more complex implementations. + // TODO(henrika): more tests might be needed to come up with an even + // better upper limit. + if (glitch_threshold < 120 && delta_time > 120) { + RTCLog(@"Glitch warning is ignored. Probably caused by device switch."); + } else { + thread_->PostTask(SafeTask(safety_, [this] { HandlePlayoutGlitchDetected(); })); + } + } + } + last_playout_time_ = now_time; + + // Read decoded 16-bit PCM samples from WebRTC (using a size that matches + // the native I/O audio unit) and copy the result to the audio buffer in the + // `io_data` destination. + fine_audio_buffer_->GetPlayoutData( + rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames), + kFixedPlayoutDelayEstimate); + return noErr; +} + +void AudioDeviceIOS::HandleInterruptionBegin() { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_); + if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { + RTCLog(@"Stopping the audio unit due to interruption begin."); + if (!audio_unit_->Stop()) { + RTCLogError(@"Failed to stop the audio unit for interruption begin."); + } + PrepareForNewStart(); + } + is_interrupted_ = true; +} + +void AudioDeviceIOS::HandleInterruptionEnd() { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. " + "Updating audio unit state.", + is_interrupted_); + is_interrupted_ = false; + if (!audio_unit_) return; + if (webrtc::field_trial::IsEnabled("WebRTC-Audio-iOS-Holding")) { + // Work around an issue where audio does not restart properly after an interruption + // by restarting the audio unit when the interruption ends. + if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { + audio_unit_->Stop(); + PrepareForNewStart(); + } + if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { + audio_unit_->Uninitialize(); + } + // Allocate new buffers given the potentially new stream format. + SetupAudioBuffersForActiveAudioSession(); + } + UpdateAudioUnit([RTC_OBJC_TYPE(RTCAudioSession) sharedInstance].canPlayOrRecord); +} + +void AudioDeviceIOS::HandleValidRouteChange() { + RTC_DCHECK_RUN_ON(thread_); + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + RTCLog(@"%@", session); + HandleSampleRateChange(); +} + +void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) { + RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record); + UpdateAudioUnit(can_play_or_record); +} + +void AudioDeviceIOS::HandleSampleRateChange() { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Handling sample rate change."); + + // Don't do anything if we're interrupted. + if (is_interrupted_) { + RTCLog(@"Ignoring sample rate change due to interruption."); + return; + } + + // If we don't have an audio unit yet, or the audio unit is uninitialized, + // there is no work to do. + if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) { + return; + } + + // The audio unit is already initialized or started. + // Check to see if the sample rate or buffer size has changed. + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + const double new_sample_rate = session.sampleRate; + const NSTimeInterval session_buffer_duration = session.IOBufferDuration; + const size_t new_frames_per_buffer = + static_cast<size_t>(new_sample_rate * session_buffer_duration + .5); + const double current_sample_rate = playout_parameters_.sample_rate(); + const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer(); + RTCLog(@"Handling playout sample rate change:\n" + " Session sample rate: %f frames_per_buffer: %lu\n" + " ADM sample rate: %f frames_per_buffer: %lu", + new_sample_rate, + (unsigned long)new_frames_per_buffer, + current_sample_rate, + (unsigned long)current_frames_per_buffer); + + // Sample rate and buffer size are the same, no work to do. + if (std::abs(current_sample_rate - new_sample_rate) <= DBL_EPSILON && + current_frames_per_buffer == new_frames_per_buffer) { + RTCLog(@"Ignoring sample rate change since audio parameters are intact."); + return; + } + + // Extra sanity check to ensure that the new sample rate is valid. + if (new_sample_rate <= 0.0) { + RTCLogError(@"Sample rate is invalid: %f", new_sample_rate); + return; + } + + // We need to adjust our format and buffer sizes. + // The stream format is about to be changed and it requires that we first + // stop and uninitialize the audio unit to deallocate its resources. + RTCLog(@"Stopping and uninitializing audio unit to adjust buffers."); + bool restart_audio_unit = false; + if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { + audio_unit_->Stop(); + restart_audio_unit = true; + PrepareForNewStart(); + } + if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { + audio_unit_->Uninitialize(); + } + + // Allocate new buffers given the new stream format. + SetupAudioBuffersForActiveAudioSession(); + + // Initialize the audio unit again with the new sample rate. + if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) { + RTCLogError(@"Failed to initialize the audio unit with sample rate: %d", + playout_parameters_.sample_rate()); + return; + } + + // Restart the audio unit if it was already running. + if (restart_audio_unit) { + OSStatus result = audio_unit_->Start(); + if (result != noErr) { + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + [session notifyAudioUnitStartFailedWithError:result]; + RTCLogError(@"Failed to start audio unit with sample rate: %d, reason %d", + playout_parameters_.sample_rate(), + result); + return; + } + } + RTCLog(@"Successfully handled sample rate change."); +} + +void AudioDeviceIOS::HandlePlayoutGlitchDetected() { + RTC_DCHECK_RUN_ON(thread_); + // Don't update metrics if we're interrupted since a "glitch" is expected + // in this state. + if (is_interrupted_) { + RTCLog(@"Ignoring audio glitch due to interruption."); + return; + } + // Avoid doing glitch detection for two seconds after a volume change + // has been detected to reduce the risk of false alarm. + if (last_output_volume_change_time_ > 0 && + rtc::TimeSince(last_output_volume_change_time_) < 2000) { + RTCLog(@"Ignoring audio glitch due to recent output volume change."); + return; + } + num_detected_playout_glitches_++; + RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_); + + int64_t glitch_count = num_detected_playout_glitches_; + dispatch_async(dispatch_get_main_queue(), ^{ + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + [session notifyDidDetectPlayoutGlitch:glitch_count]; + }); +} + +void AudioDeviceIOS::HandleOutputVolumeChange() { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Output volume change detected."); + // Store time of this detection so it can be used to defer detection of + // glitches too close in time to this event. + last_output_volume_change_time_ = rtc::TimeMillis(); +} + +void AudioDeviceIOS::UpdateAudioDeviceBuffer() { + LOGI() << "UpdateAudioDevicebuffer"; + // AttachAudioBuffer() is called at construction by the main class but check + // just in case. + RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; + RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0); + RTC_DCHECK_GT(record_parameters_.sample_rate(), 0); + RTC_DCHECK_EQ(playout_parameters_.channels(), 1); + RTC_DCHECK_EQ(record_parameters_.channels(), 1); + // Inform the audio device buffer (ADB) about the new audio format. + audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); + audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); + audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate()); + audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); +} + +void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { + LOGI() << "SetupAudioBuffersForActiveAudioSession"; + // Verify the current values once the audio session has been activated. + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + double sample_rate = session.sampleRate; + NSTimeInterval io_buffer_duration = session.IOBufferDuration; + RTCLog(@"%@", session); + + // Log a warning message for the case when we are unable to set the preferred + // hardware sample rate but continue and use the non-ideal sample rate after + // reinitializing the audio parameters. Most BT headsets only support 8kHz or + // 16kHz. + RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* webRTCConfig = + [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration]; + if (sample_rate != webRTCConfig.sampleRate) { + RTC_LOG(LS_WARNING) << "Unable to set the preferred sample rate"; + } + + // Crash reports indicates that it can happen in rare cases that the reported + // sample rate is less than or equal to zero. If that happens and if a valid + // sample rate has already been set during initialization, the best guess we + // can do is to reuse the current sample rate. + if (sample_rate <= DBL_EPSILON && playout_parameters_.sample_rate() > 0) { + RTCLogError(@"Reported rate is invalid: %f. " + "Using %d as sample rate instead.", + sample_rate, playout_parameters_.sample_rate()); + sample_rate = playout_parameters_.sample_rate(); + } + + // At this stage, we also know the exact IO buffer duration and can add + // that info to the existing audio parameters where it is converted into + // number of audio frames. + // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. + // Hence, 128 is the size we expect to see in upcoming render callbacks. + playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration); + RTC_DCHECK(playout_parameters_.is_complete()); + record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration); + RTC_DCHECK(record_parameters_.is_complete()); + RTC_LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer(); + RTC_LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer(); + RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer()); + + // Update the ADB parameters since the sample rate might have changed. + UpdateAudioDeviceBuffer(); + + // Create a modified audio buffer class which allows us to ask for, + // or deliver, any number of samples (and not only multiple of 10ms) to match + // the native audio unit buffer size. + RTC_DCHECK(audio_device_buffer_); + fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_)); +} + +bool AudioDeviceIOS::CreateAudioUnit() { + RTC_DCHECK(!audio_unit_); + + audio_unit_.reset(new VoiceProcessingAudioUnit(bypass_voice_processing_, this)); + if (!audio_unit_->Init()) { + audio_unit_.reset(); + return false; + } + + return true; +} + +void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d", + can_play_or_record, + is_interrupted_); + + if (is_interrupted_) { + RTCLog(@"Ignoring audio unit update due to interruption."); + return; + } + + // If we're not initialized we don't need to do anything. Audio unit will + // be initialized on initialization. + if (!audio_is_initialized_) return; + + // If we're initialized, we must have an audio unit. + RTC_DCHECK(audio_unit_); + + bool should_initialize_audio_unit = false; + bool should_uninitialize_audio_unit = false; + bool should_start_audio_unit = false; + bool should_stop_audio_unit = false; + + switch (audio_unit_->GetState()) { + case VoiceProcessingAudioUnit::kInitRequired: + RTCLog(@"VPAU state: InitRequired"); + RTC_DCHECK_NOTREACHED(); + break; + case VoiceProcessingAudioUnit::kUninitialized: + RTCLog(@"VPAU state: Uninitialized"); + should_initialize_audio_unit = can_play_or_record; + should_start_audio_unit = + should_initialize_audio_unit && (playing_.load() || recording_.load()); + break; + case VoiceProcessingAudioUnit::kInitialized: + RTCLog(@"VPAU state: Initialized"); + should_start_audio_unit = can_play_or_record && (playing_.load() || recording_.load()); + should_uninitialize_audio_unit = !can_play_or_record; + break; + case VoiceProcessingAudioUnit::kStarted: + RTCLog(@"VPAU state: Started"); + RTC_DCHECK(playing_.load() || recording_.load()); + should_stop_audio_unit = !can_play_or_record; + should_uninitialize_audio_unit = should_stop_audio_unit; + break; + } + + if (should_initialize_audio_unit) { + RTCLog(@"Initializing audio unit for UpdateAudioUnit"); + ConfigureAudioSession(); + SetupAudioBuffersForActiveAudioSession(); + if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) { + RTCLogError(@"Failed to initialize audio unit."); + return; + } + } + + if (should_start_audio_unit) { + RTCLog(@"Starting audio unit for UpdateAudioUnit"); + // Log session settings before trying to start audio streaming. + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + RTCLog(@"%@", session); + OSStatus result = audio_unit_->Start(); + if (result != noErr) { + [session notifyAudioUnitStartFailedWithError:result]; + RTCLogError(@"Failed to start audio unit, reason %d", result); + return; + } + } + + if (should_stop_audio_unit) { + RTCLog(@"Stopping audio unit for UpdateAudioUnit"); + if (!audio_unit_->Stop()) { + RTCLogError(@"Failed to stop audio unit."); + PrepareForNewStart(); + return; + } + PrepareForNewStart(); + } + + if (should_uninitialize_audio_unit) { + RTCLog(@"Uninitializing audio unit for UpdateAudioUnit"); + audio_unit_->Uninitialize(); + UnconfigureAudioSession(); + } +} + +bool AudioDeviceIOS::ConfigureAudioSession() { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Configuring audio session."); + if (has_configured_session_) { + RTCLogWarning(@"Audio session already configured."); + return false; + } + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + [session lockForConfiguration]; + bool success = [session configureWebRTCSession:nil]; + [session unlockForConfiguration]; + if (success) { + has_configured_session_ = true; + RTCLog(@"Configured audio session."); + } else { + RTCLog(@"Failed to configure audio session."); + } + return success; +} + +bool AudioDeviceIOS::ConfigureAudioSessionLocked() { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Configuring audio session."); + if (has_configured_session_) { + RTCLogWarning(@"Audio session already configured."); + return false; + } + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + bool success = [session configureWebRTCSession:nil]; + if (success) { + has_configured_session_ = true; + RTCLog(@"Configured audio session."); + } else { + RTCLog(@"Failed to configure audio session."); + } + return success; +} + +void AudioDeviceIOS::UnconfigureAudioSession() { + RTC_DCHECK_RUN_ON(thread_); + RTCLog(@"Unconfiguring audio session."); + if (!has_configured_session_) { + RTCLogWarning(@"Audio session already unconfigured."); + return; + } + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + [session lockForConfiguration]; + [session unconfigureWebRTCSession:nil]; + [session endWebRTCSession:nil]; + [session unlockForConfiguration]; + has_configured_session_ = false; + RTCLog(@"Unconfigured audio session."); +} + +bool AudioDeviceIOS::InitPlayOrRecord() { + LOGI() << "InitPlayOrRecord"; + RTC_DCHECK_RUN_ON(thread_); + + // There should be no audio unit at this point. + if (!CreateAudioUnit()) { + return false; + } + + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + // Subscribe to audio session events. + [session pushDelegate:audio_session_observer_]; + is_interrupted_ = session.isInterrupted ? true : false; + + // Lock the session to make configuration changes. + [session lockForConfiguration]; + NSError* error = nil; + if (![session beginWebRTCSession:&error]) { + [session unlockForConfiguration]; + RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription); + audio_unit_.reset(); + return false; + } + + // If we are ready to play or record, and if the audio session can be + // configured, then initialize the audio unit. + if (session.canPlayOrRecord) { + if (!ConfigureAudioSessionLocked()) { + // One possible reason for failure is if an attempt was made to use the + // audio session during or after a Media Services failure. + // See AVAudioSessionErrorCodeMediaServicesFailed for details. + [session unlockForConfiguration]; + audio_unit_.reset(); + return false; + } + SetupAudioBuffersForActiveAudioSession(); + audio_unit_->Initialize(playout_parameters_.sample_rate()); + } + + // Release the lock. + [session unlockForConfiguration]; + return true; +} + +void AudioDeviceIOS::ShutdownPlayOrRecord() { + LOGI() << "ShutdownPlayOrRecord"; + RTC_DCHECK_RUN_ON(thread_); + + // Stop the audio unit to prevent any additional audio callbacks. + audio_unit_->Stop(); + + // Close and delete the voice-processing I/O unit. + audio_unit_.reset(); + + // Detach thread checker for the AURemoteIO::IOThread to ensure that the + // next session uses a fresh thread id. + io_thread_checker_.Detach(); + + // Remove audio session notification observers. + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + [session removeDelegate:audio_session_observer_]; + + // All I/O should be stopped or paused prior to deactivating the audio + // session, hence we deactivate as last action. + UnconfigureAudioSession(); +} + +void AudioDeviceIOS::PrepareForNewStart() { + LOGI() << "PrepareForNewStart"; + // The audio unit has been stopped and preparations are needed for an upcoming + // restart. It will result in audio callbacks from a new native I/O thread + // which means that we must detach thread checkers here to be prepared for an + // upcoming new audio stream. + io_thread_checker_.Detach(); +} + +bool AudioDeviceIOS::IsInterrupted() { + return is_interrupted_; +} + +#pragma mark - Not Implemented + +int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const { + audioLayer = AudioDeviceModule::kPlatformDefaultAudio; + return 0; +} + +int16_t AudioDeviceIOS::PlayoutDevices() { + // TODO(henrika): improve. + RTC_LOG_F(LS_WARNING) << "Not implemented"; + return (int16_t)1; +} + +int16_t AudioDeviceIOS::RecordingDevices() { + // TODO(henrika): improve. + RTC_LOG_F(LS_WARNING) << "Not implemented"; + return (int16_t)1; +} + +int32_t AudioDeviceIOS::InitSpeaker() { + return 0; +} + +bool AudioDeviceIOS::SpeakerIsInitialized() const { + return true; +} + +int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) { + available = false; + return 0; +} + +int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) { + available = false; + return 0; +} + +int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) { + RTC_LOG_F(LS_WARNING) << "Not implemented"; + return 0; +} + +int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::InitMicrophone() { + return 0; +} + +bool AudioDeviceIOS::MicrophoneIsInitialized() const { + return true; +} + +int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) { + available = false; + return 0; +} + +int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) { + available = false; + return 0; +} + +int32_t AudioDeviceIOS::SetStereoRecording(bool enable) { + RTC_LOG_F(LS_WARNING) << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const { + enabled = false; + return 0; +} + +int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) { + available = false; + return 0; +} + +int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) { + RTC_LOG_F(LS_WARNING) << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const { + enabled = false; + return 0; +} + +int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) { + available = false; + return 0; +} + +int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) { + RTC_LOG_F(LS_WARNING) << "Not implemented"; + return 0; +} + +int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) { + RTC_DCHECK_NOTREACHED() << "Not implemented"; + return -1; +} + +int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) { + available = true; + return 0; +} + +int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) { + available = true; + return 0; +} + +} // namespace ios_adm +} // namespace webrtc diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.h b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.h new file mode 100644 index 0000000000..9bcf114e32 --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_MODULE_IOS_H_ +#define SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_MODULE_IOS_H_ + +#include <memory> + +#include "audio_device_ios.h" + +#include "api/task_queue/task_queue_factory.h" +#include "modules/audio_device/audio_device_buffer.h" +#include "modules/audio_device/include/audio_device.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +class AudioDeviceGeneric; + +namespace ios_adm { + +class AudioDeviceModuleIOS : public AudioDeviceModule { + public: + int32_t AttachAudioBuffer(); + + explicit AudioDeviceModuleIOS(bool bypass_voice_processing); + ~AudioDeviceModuleIOS() override; + + // Retrieve the currently utilized audio layer + int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override; + + // Full-duplex transportation of PCM audio + int32_t RegisterAudioCallback(AudioTransport* audioCallback) override; + + // Main initializaton and termination + int32_t Init() override; + int32_t Terminate() override; + bool Initialized() const override; + + // Device enumeration + int16_t PlayoutDevices() override; + int16_t RecordingDevices() override; + int32_t PlayoutDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + int32_t RecordingDeviceName(uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) override; + + // Device selection + int32_t SetPlayoutDevice(uint16_t index) override; + int32_t SetPlayoutDevice(WindowsDeviceType device) override; + int32_t SetRecordingDevice(uint16_t index) override; + int32_t SetRecordingDevice(WindowsDeviceType device) override; + + // Audio transport initialization + int32_t PlayoutIsAvailable(bool* available) override; + int32_t InitPlayout() override; + bool PlayoutIsInitialized() const override; + int32_t RecordingIsAvailable(bool* available) override; + int32_t InitRecording() override; + bool RecordingIsInitialized() const override; + + // Audio transport control + int32_t StartPlayout() override; + int32_t StopPlayout() override; + bool Playing() const override; + int32_t StartRecording() override; + int32_t StopRecording() override; + bool Recording() const override; + + // Audio mixer initialization + int32_t InitSpeaker() override; + bool SpeakerIsInitialized() const override; + int32_t InitMicrophone() override; + bool MicrophoneIsInitialized() const override; + + // Speaker volume controls + int32_t SpeakerVolumeIsAvailable(bool* available) override; + int32_t SetSpeakerVolume(uint32_t volume) override; + int32_t SpeakerVolume(uint32_t* volume) const override; + int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override; + int32_t MinSpeakerVolume(uint32_t* minVolume) const override; + + // Microphone volume controls + int32_t MicrophoneVolumeIsAvailable(bool* available) override; + int32_t SetMicrophoneVolume(uint32_t volume) override; + int32_t MicrophoneVolume(uint32_t* volume) const override; + int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override; + int32_t MinMicrophoneVolume(uint32_t* minVolume) const override; + + // Speaker mute control + int32_t SpeakerMuteIsAvailable(bool* available) override; + int32_t SetSpeakerMute(bool enable) override; + int32_t SpeakerMute(bool* enabled) const override; + + // Microphone mute control + int32_t MicrophoneMuteIsAvailable(bool* available) override; + int32_t SetMicrophoneMute(bool enable) override; + int32_t MicrophoneMute(bool* enabled) const override; + + // Stereo support + int32_t StereoPlayoutIsAvailable(bool* available) const override; + int32_t SetStereoPlayout(bool enable) override; + int32_t StereoPlayout(bool* enabled) const override; + int32_t StereoRecordingIsAvailable(bool* available) const override; + int32_t SetStereoRecording(bool enable) override; + int32_t StereoRecording(bool* enabled) const override; + + // Delay information and control + int32_t PlayoutDelay(uint16_t* delayMS) const override; + + bool BuiltInAECIsAvailable() const override; + int32_t EnableBuiltInAEC(bool enable) override; + bool BuiltInAGCIsAvailable() const override; + int32_t EnableBuiltInAGC(bool enable) override; + bool BuiltInNSIsAvailable() const override; + int32_t EnableBuiltInNS(bool enable) override; + + int32_t GetPlayoutUnderrunCount() const override; + +#if defined(WEBRTC_IOS) + int GetPlayoutAudioParameters(AudioParameters* params) const override; + int GetRecordAudioParameters(AudioParameters* params) const override; +#endif // WEBRTC_IOS + private: + const bool bypass_voice_processing_; + bool initialized_ = false; + const std::unique_ptr<TaskQueueFactory> task_queue_factory_; + std::unique_ptr<AudioDeviceIOS> audio_device_; + std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_; +}; +} // namespace ios_adm +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_MODULE_IOS_H_ diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.mm new file mode 100644 index 0000000000..5effef3abd --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.mm @@ -0,0 +1,669 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_device_module_ios.h" + +#include "api/task_queue/default_task_queue_factory.h" +#include "modules/audio_device/audio_device_config.h" +#include "modules/audio_device/audio_device_generic.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/ref_count.h" +#include "system_wrappers/include/metrics.h" + +#if defined(WEBRTC_IOS) +#include "audio_device_ios.h" +#endif + +#define CHECKinitialized_() \ + { \ + if (!initialized_) { \ + return -1; \ + }; \ + } + +#define CHECKinitialized__BOOL() \ + { \ + if (!initialized_) { \ + return false; \ + }; \ + } + +namespace webrtc { +namespace ios_adm { + +AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing) + : bypass_voice_processing_(bypass_voice_processing), + task_queue_factory_(CreateDefaultTaskQueueFactory()) { + RTC_LOG(LS_INFO) << "current platform is IOS"; + RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized."; +} + + int32_t AudioDeviceModuleIOS::AttachAudioBuffer() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + audio_device_->AttachAudioBuffer(audio_device_buffer_.get()); + return 0; + } + + AudioDeviceModuleIOS::~AudioDeviceModuleIOS() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + } + + int32_t AudioDeviceModuleIOS::ActiveAudioLayer(AudioLayer* audioLayer) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + AudioLayer activeAudio; + if (audio_device_->ActiveAudioLayer(activeAudio) == -1) { + return -1; + } + *audioLayer = activeAudio; + return 0; + } + + int32_t AudioDeviceModuleIOS::Init() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + if (initialized_) + return 0; + + audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer(task_queue_factory_.get())); + audio_device_.reset(new ios_adm::AudioDeviceIOS(bypass_voice_processing_)); + RTC_CHECK(audio_device_); + + this->AttachAudioBuffer(); + + AudioDeviceGeneric::InitStatus status = audio_device_->Init(); + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.Audio.InitializationResult", static_cast<int>(status), + static_cast<int>(AudioDeviceGeneric::InitStatus::NUM_STATUSES)); + if (status != AudioDeviceGeneric::InitStatus::OK) { + RTC_LOG(LS_ERROR) << "Audio device initialization failed."; + return -1; + } + initialized_ = true; + return 0; + } + + int32_t AudioDeviceModuleIOS::Terminate() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + if (!initialized_) + return 0; + if (audio_device_->Terminate() == -1) { + return -1; + } + initialized_ = false; + return 0; + } + + bool AudioDeviceModuleIOS::Initialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; + return initialized_; + } + + int32_t AudioDeviceModuleIOS::InitSpeaker() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->InitSpeaker(); + } + + int32_t AudioDeviceModuleIOS::InitMicrophone() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->InitMicrophone(); + } + + int32_t AudioDeviceModuleIOS::SpeakerVolumeIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::SetSpeakerVolume(uint32_t volume) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; + CHECKinitialized_(); + return audio_device_->SetSpeakerVolume(volume); + } + + int32_t AudioDeviceModuleIOS::SpeakerVolume(uint32_t* volume) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint32_t level = 0; + if (audio_device_->SpeakerVolume(level) == -1) { + return -1; + } + *volume = level; + RTC_DLOG(LS_INFO) << "output: " << *volume; + return 0; + } + + bool AudioDeviceModuleIOS::SpeakerIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isInitialized = audio_device_->SpeakerIsInitialized(); + RTC_DLOG(LS_INFO) << "output: " << isInitialized; + return isInitialized; + } + + bool AudioDeviceModuleIOS::MicrophoneIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isInitialized = audio_device_->MicrophoneIsInitialized(); + RTC_DLOG(LS_INFO) << "output: " << isInitialized; + return isInitialized; + } + + int32_t AudioDeviceModuleIOS::MaxSpeakerVolume(uint32_t* maxVolume) const { + CHECKinitialized_(); + uint32_t maxVol = 0; + if (audio_device_->MaxSpeakerVolume(maxVol) == -1) { + return -1; + } + *maxVolume = maxVol; + return 0; + } + + int32_t AudioDeviceModuleIOS::MinSpeakerVolume(uint32_t* minVolume) const { + CHECKinitialized_(); + uint32_t minVol = 0; + if (audio_device_->MinSpeakerVolume(minVol) == -1) { + return -1; + } + *minVolume = minVol; + return 0; + } + + int32_t AudioDeviceModuleIOS::SpeakerMuteIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::SetSpeakerMute(bool enable) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + return audio_device_->SetSpeakerMute(enable); + } + + int32_t AudioDeviceModuleIOS::SpeakerMute(bool* enabled) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool muted = false; + if (audio_device_->SpeakerMute(muted) == -1) { + return -1; + } + *enabled = muted; + RTC_DLOG(LS_INFO) << "output: " << muted; + return 0; + } + + int32_t AudioDeviceModuleIOS::MicrophoneMuteIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::SetMicrophoneMute(bool enable) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + return (audio_device_->SetMicrophoneMute(enable)); + } + + int32_t AudioDeviceModuleIOS::MicrophoneMute(bool* enabled) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool muted = false; + if (audio_device_->MicrophoneMute(muted) == -1) { + return -1; + } + *enabled = muted; + RTC_DLOG(LS_INFO) << "output: " << muted; + return 0; + } + + int32_t AudioDeviceModuleIOS::MicrophoneVolumeIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::SetMicrophoneVolume(uint32_t volume) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; + CHECKinitialized_(); + return (audio_device_->SetMicrophoneVolume(volume)); + } + + int32_t AudioDeviceModuleIOS::MicrophoneVolume(uint32_t* volume) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint32_t level = 0; + if (audio_device_->MicrophoneVolume(level) == -1) { + return -1; + } + *volume = level; + RTC_DLOG(LS_INFO) << "output: " << *volume; + return 0; + } + + int32_t AudioDeviceModuleIOS::StereoRecordingIsAvailable( + bool* available) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::SetStereoRecording(bool enable) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + if (enable) { + RTC_LOG(LS_WARNING) << "recording in stereo is not supported"; + } + return -1; + } + + int32_t AudioDeviceModuleIOS::StereoRecording(bool* enabled) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool stereo = false; + if (audio_device_->StereoRecording(stereo) == -1) { + return -1; + } + *enabled = stereo; + RTC_DLOG(LS_INFO) << "output: " << stereo; + return 0; + } + + int32_t AudioDeviceModuleIOS::StereoPlayoutIsAvailable(bool* available) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::SetStereoPlayout(bool enable) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + if (audio_device_->PlayoutIsInitialized()) { + RTC_LOG(LS_ERROR) << "unable to set stereo mode while playing side is initialized"; + return -1; + } + if (audio_device_->SetStereoPlayout(enable)) { + RTC_LOG(LS_WARNING) << "stereo playout is not supported"; + return -1; + } + int8_t nChannels(1); + if (enable) { + nChannels = 2; + } + audio_device_buffer_.get()->SetPlayoutChannels(nChannels); + return 0; + } + + int32_t AudioDeviceModuleIOS::StereoPlayout(bool* enabled) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool stereo = false; + if (audio_device_->StereoPlayout(stereo) == -1) { + return -1; + } + *enabled = stereo; + RTC_DLOG(LS_INFO) << "output: " << stereo; + return 0; + } + + int32_t AudioDeviceModuleIOS::PlayoutIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::RecordingIsAvailable(bool* available) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + bool isAvailable = false; + if (audio_device_->RecordingIsAvailable(isAvailable) == -1) { + return -1; + } + *available = isAvailable; + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return 0; + } + + int32_t AudioDeviceModuleIOS::MaxMicrophoneVolume(uint32_t* maxVolume) const { + CHECKinitialized_(); + uint32_t maxVol(0); + if (audio_device_->MaxMicrophoneVolume(maxVol) == -1) { + return -1; + } + *maxVolume = maxVol; + return 0; + } + + int32_t AudioDeviceModuleIOS::MinMicrophoneVolume(uint32_t* minVolume) const { + CHECKinitialized_(); + uint32_t minVol(0); + if (audio_device_->MinMicrophoneVolume(minVol) == -1) { + return -1; + } + *minVolume = minVol; + return 0; + } + + int16_t AudioDeviceModuleIOS::PlayoutDevices() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint16_t nPlayoutDevices = audio_device_->PlayoutDevices(); + RTC_DLOG(LS_INFO) << "output: " << nPlayoutDevices; + return (int16_t)(nPlayoutDevices); + } + + int32_t AudioDeviceModuleIOS::SetPlayoutDevice(uint16_t index) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; + CHECKinitialized_(); + return audio_device_->SetPlayoutDevice(index); + } + + int32_t AudioDeviceModuleIOS::SetPlayoutDevice(WindowsDeviceType device) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->SetPlayoutDevice(device); + } + + int32_t AudioDeviceModuleIOS::PlayoutDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; + CHECKinitialized_(); + if (name == NULL) { + return -1; + } + if (audio_device_->PlayoutDeviceName(index, name, guid) == -1) { + return -1; + } + if (name != NULL) { + RTC_DLOG(LS_INFO) << "output: name = " << name; + } + if (guid != NULL) { + RTC_DLOG(LS_INFO) << "output: guid = " << guid; + } + return 0; + } + + int32_t AudioDeviceModuleIOS::RecordingDeviceName( + uint16_t index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; + CHECKinitialized_(); + if (name == NULL) { + return -1; + } + if (audio_device_->RecordingDeviceName(index, name, guid) == -1) { + return -1; + } + if (name != NULL) { + RTC_DLOG(LS_INFO) << "output: name = " << name; + } + if (guid != NULL) { + RTC_DLOG(LS_INFO) << "output: guid = " << guid; + } + return 0; + } + + int16_t AudioDeviceModuleIOS::RecordingDevices() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + uint16_t nRecordingDevices = audio_device_->RecordingDevices(); + RTC_DLOG(LS_INFO) << "output: " << nRecordingDevices; + return (int16_t)nRecordingDevices; + } + + int32_t AudioDeviceModuleIOS::SetRecordingDevice(uint16_t index) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; + CHECKinitialized_(); + return audio_device_->SetRecordingDevice(index); + } + + int32_t AudioDeviceModuleIOS::SetRecordingDevice(WindowsDeviceType device) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + return audio_device_->SetRecordingDevice(device); + } + + int32_t AudioDeviceModuleIOS::InitPlayout() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (PlayoutIsInitialized()) { + return 0; + } + int32_t result = audio_device_->InitPlayout(); + RTC_DLOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", + static_cast<int>(result == 0)); + return result; + } + + int32_t AudioDeviceModuleIOS::InitRecording() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (RecordingIsInitialized()) { + return 0; + } + int32_t result = audio_device_->InitRecording(); + RTC_DLOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", + static_cast<int>(result == 0)); + return result; + } + + bool AudioDeviceModuleIOS::PlayoutIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->PlayoutIsInitialized(); + } + + bool AudioDeviceModuleIOS::RecordingIsInitialized() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->RecordingIsInitialized(); + } + + int32_t AudioDeviceModuleIOS::StartPlayout() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (Playing()) { + return 0; + } + audio_device_buffer_.get()->StartPlayout(); + int32_t result = audio_device_->StartPlayout(); + RTC_DLOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", + static_cast<int>(result == 0)); + return result; + } + + int32_t AudioDeviceModuleIOS::StopPlayout() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + int32_t result = audio_device_->StopPlayout(); + audio_device_buffer_.get()->StopPlayout(); + RTC_DLOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", + static_cast<int>(result == 0)); + return result; + } + + bool AudioDeviceModuleIOS::Playing() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->Playing(); + } + + int32_t AudioDeviceModuleIOS::StartRecording() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + if (Recording()) { + return 0; + } + audio_device_buffer_.get()->StartRecording(); + int32_t result = audio_device_->StartRecording(); + RTC_DLOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", + static_cast<int>(result == 0)); + return result; + } + + int32_t AudioDeviceModuleIOS::StopRecording() { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized_(); + int32_t result = audio_device_->StopRecording(); + audio_device_buffer_.get()->StopRecording(); + RTC_DLOG(LS_INFO) << "output: " << result; + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", + static_cast<int>(result == 0)); + return result; + } + + bool AudioDeviceModuleIOS::Recording() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + return audio_device_->Recording(); + } + + int32_t AudioDeviceModuleIOS::RegisterAudioCallback( + AudioTransport* audioCallback) { + RTC_DLOG(LS_INFO) << __FUNCTION__; + return audio_device_buffer_.get()->RegisterAudioCallback(audioCallback); + } + + int32_t AudioDeviceModuleIOS::PlayoutDelay(uint16_t* delayMS) const { + CHECKinitialized_(); + uint16_t delay = 0; + if (audio_device_->PlayoutDelay(delay) == -1) { + RTC_LOG(LS_ERROR) << "failed to retrieve the playout delay"; + return -1; + } + *delayMS = delay; + return 0; + } + + bool AudioDeviceModuleIOS::BuiltInAECIsAvailable() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isAvailable = audio_device_->BuiltInAECIsAvailable(); + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return isAvailable; + } + + int32_t AudioDeviceModuleIOS::EnableBuiltInAEC(bool enable) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + int32_t ok = audio_device_->EnableBuiltInAEC(enable); + RTC_DLOG(LS_INFO) << "output: " << ok; + return ok; + } + + bool AudioDeviceModuleIOS::BuiltInAGCIsAvailable() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isAvailable = audio_device_->BuiltInAGCIsAvailable(); + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return isAvailable; + } + + int32_t AudioDeviceModuleIOS::EnableBuiltInAGC(bool enable) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + int32_t ok = audio_device_->EnableBuiltInAGC(enable); + RTC_DLOG(LS_INFO) << "output: " << ok; + return ok; + } + + bool AudioDeviceModuleIOS::BuiltInNSIsAvailable() const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + CHECKinitialized__BOOL(); + bool isAvailable = audio_device_->BuiltInNSIsAvailable(); + RTC_DLOG(LS_INFO) << "output: " << isAvailable; + return isAvailable; + } + + int32_t AudioDeviceModuleIOS::EnableBuiltInNS(bool enable) { + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; + CHECKinitialized_(); + int32_t ok = audio_device_->EnableBuiltInNS(enable); + RTC_DLOG(LS_INFO) << "output: " << ok; + return ok; + } + + int32_t AudioDeviceModuleIOS::GetPlayoutUnderrunCount() const { + // Don't log here, as this method can be called very often. + CHECKinitialized_(); + int32_t ok = audio_device_->GetPlayoutUnderrunCount(); + return ok; + } + +#if defined(WEBRTC_IOS) + int AudioDeviceModuleIOS::GetPlayoutAudioParameters( + AudioParameters* params) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + int r = audio_device_->GetPlayoutAudioParameters(params); + RTC_DLOG(LS_INFO) << "output: " << r; + return r; + } + + int AudioDeviceModuleIOS::GetRecordAudioParameters( + AudioParameters* params) const { + RTC_DLOG(LS_INFO) << __FUNCTION__; + int r = audio_device_->GetRecordAudioParameters(params); + RTC_DLOG(LS_INFO) << "output: " << r; + return r; + } +#endif // WEBRTC_IOS +} +} diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_session_observer.h b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_session_observer.h new file mode 100644 index 0000000000..f7c44c8184 --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_session_observer.h @@ -0,0 +1,41 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_ +#define SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_ + +#include "rtc_base/thread.h" + +namespace webrtc { + +// Observer interface for listening to AVAudioSession events. +class AudioSessionObserver { + public: + // Called when audio session interruption begins. + virtual void OnInterruptionBegin() = 0; + + // Called when audio session interruption ends. + virtual void OnInterruptionEnd() = 0; + + // Called when audio route changes. + virtual void OnValidRouteChange() = 0; + + // Called when the ability to play or record changes. + virtual void OnCanPlayOrRecordChange(bool can_play_or_record) = 0; + + virtual void OnChangedOutputVolume() = 0; + + protected: + virtual ~AudioSessionObserver() {} +}; + +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_ diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.h b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.h new file mode 100644 index 0000000000..12464ac897 --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_HELPERS_H_ +#define SDK_OBJC_NATIVE_SRC_AUDIO_HELPERS_H_ + +#import <Foundation/Foundation.h> +#include <objc/objc.h> + +#include <string> + +namespace webrtc { +namespace ios { + +bool CheckAndLogError(BOOL success, NSError* error); + +NSString* NSStringFromStdString(const std::string& stdString); +std::string StdStringFromNSString(NSString* nsString); + +// Return thread ID as a string. +std::string GetThreadId(); + +// Return thread ID as string suitable for debug logging. +std::string GetThreadInfo(); + +// Returns [NSThread currentThread] description as string. +// Example: <NSThread: 0x170066d80>{number = 1, name = main} +std::string GetCurrentThreadDescription(); + +#if defined(WEBRTC_IOS) +// Returns the current name of the operating system. +std::string GetSystemName(); + +// Returns the current version of the operating system as a string. +std::string GetSystemVersionAsString(); + +// Returns the version of the operating system in double representation. +// Uses a cached value of the system version. +double GetSystemVersion(); + +// Returns the device type. +// Examples: ”iPhone” and ”iPod touch”. +std::string GetDeviceType(); +#endif // defined(WEBRTC_IOS) + +// Returns a more detailed device name. +// Examples: "iPhone 5s (GSM)" and "iPhone 6 Plus". +std::string GetDeviceName(); + +// Returns the name of the process. Does not uniquely identify the process. +std::string GetProcessName(); + +// Returns the identifier of the process (often called process ID). +int GetProcessID(); + +// Returns a string containing the version of the operating system on which the +// process is executing. The string is string is human readable, localized, and +// is appropriate for displaying to the user. +std::string GetOSVersionString(); + +// Returns the number of processing cores available on the device. +int GetProcessorCount(); + +#if defined(WEBRTC_IOS) +// Indicates whether Low Power Mode is enabled on the iOS device. +bool GetLowPowerModeEnabled(); +#endif + +} // namespace ios +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_AUDIO_HELPERS_H_ diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.mm new file mode 100644 index 0000000000..cd0469656a --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.mm @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import <Foundation/Foundation.h> +#import <sys/sysctl.h> +#if defined(WEBRTC_IOS) +#import <UIKit/UIKit.h> +#endif + +#include <memory> + +#include "helpers.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace ios { + +NSString* NSStringFromStdString(const std::string& stdString) { + // std::string may contain null termination character so we construct + // using length. + return [[NSString alloc] initWithBytes:stdString.data() + length:stdString.length() + encoding:NSUTF8StringEncoding]; +} + +std::string StdStringFromNSString(NSString* nsString) { + NSData* charData = [nsString dataUsingEncoding:NSUTF8StringEncoding]; + return std::string(reinterpret_cast<const char*>([charData bytes]), + [charData length]); +} + +bool CheckAndLogError(BOOL success, NSError* error) { + if (!success) { + NSString* msg = + [NSString stringWithFormat:@"Error: %ld, %@, %@", (long)error.code, + error.localizedDescription, + error.localizedFailureReason]; + RTC_LOG(LS_ERROR) << StdStringFromNSString(msg); + return false; + } + return true; +} + +// TODO(henrika): see if it is possible to move to GetThreadName in +// platform_thread.h and base it on pthread methods instead. +std::string GetCurrentThreadDescription() { + NSString* name = [NSString stringWithFormat:@"%@", [NSThread currentThread]]; + return StdStringFromNSString(name); +} + +#if defined(WEBRTC_IOS) +std::string GetSystemName() { + NSString* osName = [[UIDevice currentDevice] systemName]; + return StdStringFromNSString(osName); +} + +std::string GetSystemVersionAsString() { + NSString* osVersion = [[UIDevice currentDevice] systemVersion]; + return StdStringFromNSString(osVersion); +} + +std::string GetDeviceType() { + NSString* deviceModel = [[UIDevice currentDevice] model]; + return StdStringFromNSString(deviceModel); +} + +bool GetLowPowerModeEnabled() { + return [NSProcessInfo processInfo].lowPowerModeEnabled; +} +#endif + +std::string GetDeviceName() { + size_t size; + sysctlbyname("hw.machine", NULL, &size, NULL, 0); + std::unique_ptr<char[]> machine; + machine.reset(new char[size]); + sysctlbyname("hw.machine", machine.get(), &size, NULL, 0); + return std::string(machine.get()); +} + +std::string GetProcessName() { + NSString* processName = [NSProcessInfo processInfo].processName; + return StdStringFromNSString(processName); +} + +int GetProcessID() { + return [NSProcessInfo processInfo].processIdentifier; +} + +std::string GetOSVersionString() { + NSString* osVersion = + [NSProcessInfo processInfo].operatingSystemVersionString; + return StdStringFromNSString(osVersion); +} + +int GetProcessorCount() { + return [NSProcessInfo processInfo].processorCount; +} + +} // namespace ios +} // namespace webrtc diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.h b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.h new file mode 100644 index 0000000000..ed9dd98568 --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.h @@ -0,0 +1,141 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ +#define SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ + +#include <AudioUnit/AudioUnit.h> + +namespace webrtc { +namespace ios_adm { + +class VoiceProcessingAudioUnitObserver { + public: + // Callback function called on a real-time priority I/O thread from the audio + // unit. This method is used to signal that recorded audio is available. + virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) = 0; + + // Callback function called on a real-time priority I/O thread from the audio + // unit. This method is used to provide audio samples to the audio unit. + virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) = 0; + + protected: + ~VoiceProcessingAudioUnitObserver() {} +}; + +// Convenience class to abstract away the management of a Voice Processing +// I/O Audio Unit. The Voice Processing I/O unit has the same characteristics +// as the Remote I/O unit (supports full duplex low-latency audio input and +// output) and adds AEC for for two-way duplex communication. It also adds AGC, +// adjustment of voice-processing quality, and muting. Hence, ideal for +// VoIP applications. +class VoiceProcessingAudioUnit { + public: + VoiceProcessingAudioUnit(bool bypass_voice_processing, + VoiceProcessingAudioUnitObserver* observer); + ~VoiceProcessingAudioUnit(); + + // TODO(tkchin): enum for state and state checking. + enum State : int32_t { + // Init() should be called. + kInitRequired, + // Audio unit created but not initialized. + kUninitialized, + // Initialized but not started. Equivalent to stopped. + kInitialized, + // Initialized and started. + kStarted, + }; + + // Number of bytes per audio sample for 16-bit signed integer representation. + static const UInt32 kBytesPerSample; + + // Initializes this class by creating the underlying audio unit instance. + // Creates a Voice-Processing I/O unit and configures it for full-duplex + // audio. The selected stream format is selected to avoid internal resampling + // and to match the 10ms callback rate for WebRTC as well as possible. + // Does not intialize the audio unit. + bool Init(); + + VoiceProcessingAudioUnit::State GetState() const; + + // Initializes the underlying audio unit with the given sample rate. + bool Initialize(Float64 sample_rate); + + // Starts the underlying audio unit. + OSStatus Start(); + + // Stops the underlying audio unit. + bool Stop(); + + // Uninitializes the underlying audio unit. + bool Uninitialize(); + + // Calls render on the underlying audio unit. + OSStatus Render(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 output_bus_number, + UInt32 num_frames, + AudioBufferList* io_data); + + private: + // The C API used to set callbacks requires static functions. When these are + // called, they will invoke the relevant instance method by casting + // in_ref_con to VoiceProcessingAudioUnit*. + static OSStatus OnGetPlayoutData(void* in_ref_con, + AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data); + static OSStatus OnDeliverRecordedData(void* in_ref_con, + AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data); + + // Notifies observer that samples are needed for playback. + OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data); + // Notifies observer that recorded samples are available for render. + OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data); + + // Returns the predetermined format with a specific sample rate. See + // implementation file for details on format. + AudioStreamBasicDescription GetFormat(Float64 sample_rate) const; + + // Deletes the underlying audio unit. + void DisposeAudioUnit(); + + const bool bypass_voice_processing_; + VoiceProcessingAudioUnitObserver* observer_; + AudioUnit vpio_unit_; + VoiceProcessingAudioUnit::State state_; +}; +} // namespace ios_adm +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.mm new file mode 100644 index 0000000000..3905b6857a --- /dev/null +++ b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.mm @@ -0,0 +1,488 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "voice_processing_audio_unit.h" + +#include "rtc_base/checks.h" +#include "system_wrappers/include/metrics.h" + +#import "base/RTCLogging.h" +#import "sdk/objc/components/audio/RTCAudioSessionConfiguration.h" + +#if !defined(NDEBUG) +static void LogStreamDescription(AudioStreamBasicDescription description) { + char formatIdString[5]; + UInt32 formatId = CFSwapInt32HostToBig(description.mFormatID); + bcopy(&formatId, formatIdString, 4); + formatIdString[4] = '\0'; + RTCLog(@"AudioStreamBasicDescription: {\n" + " mSampleRate: %.2f\n" + " formatIDString: %s\n" + " mFormatFlags: 0x%X\n" + " mBytesPerPacket: %u\n" + " mFramesPerPacket: %u\n" + " mBytesPerFrame: %u\n" + " mChannelsPerFrame: %u\n" + " mBitsPerChannel: %u\n" + " mReserved: %u\n}", + description.mSampleRate, formatIdString, + static_cast<unsigned int>(description.mFormatFlags), + static_cast<unsigned int>(description.mBytesPerPacket), + static_cast<unsigned int>(description.mFramesPerPacket), + static_cast<unsigned int>(description.mBytesPerFrame), + static_cast<unsigned int>(description.mChannelsPerFrame), + static_cast<unsigned int>(description.mBitsPerChannel), + static_cast<unsigned int>(description.mReserved)); +} +#endif + +namespace webrtc { +namespace ios_adm { + +// Calls to AudioUnitInitialize() can fail if called back-to-back on different +// ADM instances. A fall-back solution is to allow multiple sequential calls +// with as small delay between each. This factor sets the max number of allowed +// initialization attempts. +static const int kMaxNumberOfAudioUnitInitializeAttempts = 5; +// A VP I/O unit's bus 1 connects to input hardware (microphone). +static const AudioUnitElement kInputBus = 1; +// A VP I/O unit's bus 0 connects to output hardware (speaker). +static const AudioUnitElement kOutputBus = 0; + +// Returns the automatic gain control (AGC) state on the processed microphone +// signal. Should be on by default for Voice Processing audio units. +static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) { + RTC_DCHECK(audio_unit); + UInt32 size = sizeof(*enabled); + OSStatus result = AudioUnitGetProperty(audio_unit, + kAUVoiceIOProperty_VoiceProcessingEnableAGC, + kAudioUnitScope_Global, + kInputBus, + enabled, + &size); + RTCLog(@"VPIO unit AGC: %u", static_cast<unsigned int>(*enabled)); + return result; +} + +VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(bool bypass_voice_processing, + VoiceProcessingAudioUnitObserver* observer) + : bypass_voice_processing_(bypass_voice_processing), + observer_(observer), + vpio_unit_(nullptr), + state_(kInitRequired) { + RTC_DCHECK(observer); +} + +VoiceProcessingAudioUnit::~VoiceProcessingAudioUnit() { + DisposeAudioUnit(); +} + +const UInt32 VoiceProcessingAudioUnit::kBytesPerSample = 2; + +bool VoiceProcessingAudioUnit::Init() { + RTC_DCHECK_EQ(state_, kInitRequired); + + // Create an audio component description to identify the Voice Processing + // I/O audio unit. + AudioComponentDescription vpio_unit_description; + vpio_unit_description.componentType = kAudioUnitType_Output; + vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; + vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; + vpio_unit_description.componentFlags = 0; + vpio_unit_description.componentFlagsMask = 0; + + // Obtain an audio unit instance given the description. + AudioComponent found_vpio_unit_ref = + AudioComponentFindNext(nullptr, &vpio_unit_description); + + // Create a Voice Processing IO audio unit. + OSStatus result = noErr; + result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_); + if (result != noErr) { + vpio_unit_ = nullptr; + RTCLogError(@"AudioComponentInstanceNew failed. Error=%ld.", (long)result); + return false; + } + + // Enable input on the input scope of the input element. + UInt32 enable_input = 1; + result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Input, kInputBus, &enable_input, + sizeof(enable_input)); + if (result != noErr) { + DisposeAudioUnit(); + RTCLogError(@"Failed to enable input on input scope of input element. " + "Error=%ld.", + (long)result); + return false; + } + + // Enable output on the output scope of the output element. + UInt32 enable_output = 1; + result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Output, kOutputBus, + &enable_output, sizeof(enable_output)); + if (result != noErr) { + DisposeAudioUnit(); + RTCLogError(@"Failed to enable output on output scope of output element. " + "Error=%ld.", + (long)result); + return false; + } + + // Specify the callback function that provides audio samples to the audio + // unit. + AURenderCallbackStruct render_callback; + render_callback.inputProc = OnGetPlayoutData; + render_callback.inputProcRefCon = this; + result = AudioUnitSetProperty( + vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, + kOutputBus, &render_callback, sizeof(render_callback)); + if (result != noErr) { + DisposeAudioUnit(); + RTCLogError(@"Failed to specify the render callback on the output bus. " + "Error=%ld.", + (long)result); + return false; + } + + // Disable AU buffer allocation for the recorder, we allocate our own. + // TODO(henrika): not sure that it actually saves resource to make this call. + UInt32 flag = 0; + result = AudioUnitSetProperty( + vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, + kAudioUnitScope_Output, kInputBus, &flag, sizeof(flag)); + if (result != noErr) { + DisposeAudioUnit(); + RTCLogError(@"Failed to disable buffer allocation on the input bus. " + "Error=%ld.", + (long)result); + return false; + } + + // Specify the callback to be called by the I/O thread to us when input audio + // is available. The recorded samples can then be obtained by calling the + // AudioUnitRender() method. + AURenderCallbackStruct input_callback; + input_callback.inputProc = OnDeliverRecordedData; + input_callback.inputProcRefCon = this; + result = AudioUnitSetProperty(vpio_unit_, + kAudioOutputUnitProperty_SetInputCallback, + kAudioUnitScope_Global, kInputBus, + &input_callback, sizeof(input_callback)); + if (result != noErr) { + DisposeAudioUnit(); + RTCLogError(@"Failed to specify the input callback on the input bus. " + "Error=%ld.", + (long)result); + return false; + } + + state_ = kUninitialized; + return true; +} + +VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const { + return state_; +} + +bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) { + RTC_DCHECK_GE(state_, kUninitialized); + RTCLog(@"Initializing audio unit with sample rate: %f", sample_rate); + + OSStatus result = noErr; + AudioStreamBasicDescription format = GetFormat(sample_rate); + UInt32 size = sizeof(format); +#if !defined(NDEBUG) + LogStreamDescription(format); +#endif + + // Set the format on the output scope of the input element/bus. + result = + AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, kInputBus, &format, size); + if (result != noErr) { + RTCLogError(@"Failed to set format on output scope of input bus. " + "Error=%ld.", + (long)result); + return false; + } + + // Set the format on the input scope of the output element/bus. + result = + AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, kOutputBus, &format, size); + if (result != noErr) { + RTCLogError(@"Failed to set format on input scope of output bus. " + "Error=%ld.", + (long)result); + return false; + } + + // Initialize the Voice Processing I/O unit instance. + // Calls to AudioUnitInitialize() can fail if called back-to-back on + // different ADM instances. The error message in this case is -66635 which is + // undocumented. Tests have shown that calling AudioUnitInitialize a second + // time, after a short sleep, avoids this issue. + // See webrtc:5166 for details. + int failed_initalize_attempts = 0; + result = AudioUnitInitialize(vpio_unit_); + while (result != noErr) { + RTCLogError(@"Failed to initialize the Voice Processing I/O unit. " + "Error=%ld.", + (long)result); + ++failed_initalize_attempts; + if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) { + // Max number of initialization attempts exceeded, hence abort. + RTCLogError(@"Too many initialization attempts."); + return false; + } + RTCLog(@"Pause 100ms and try audio unit initialization again..."); + [NSThread sleepForTimeInterval:0.1f]; + result = AudioUnitInitialize(vpio_unit_); + } + if (result == noErr) { + RTCLog(@"Voice Processing I/O unit is now initialized."); + } + + if (bypass_voice_processing_) { + // Attempt to disable builtin voice processing. + UInt32 toggle = 1; + result = AudioUnitSetProperty(vpio_unit_, + kAUVoiceIOProperty_BypassVoiceProcessing, + kAudioUnitScope_Global, + kInputBus, + &toggle, + sizeof(toggle)); + if (result == noErr) { + RTCLog(@"Successfully bypassed voice processing."); + } else { + RTCLogError(@"Failed to bypass voice processing. Error=%ld.", (long)result); + } + state_ = kInitialized; + return true; + } + + // AGC should be enabled by default for Voice Processing I/O units but it is + // checked below and enabled explicitly if needed. This scheme is used + // to be absolutely sure that the AGC is enabled since we have seen cases + // where only zeros are recorded and a disabled AGC could be one of the + // reasons why it happens. + int agc_was_enabled_by_default = 0; + UInt32 agc_is_enabled = 0; + result = GetAGCState(vpio_unit_, &agc_is_enabled); + if (result != noErr) { + RTCLogError(@"Failed to get AGC state (1st attempt). " + "Error=%ld.", + (long)result); + // Example of error code: kAudioUnitErr_NoConnection (-10876). + // All error codes related to audio units are negative and are therefore + // converted into a postive value to match the UMA APIs. + RTC_HISTOGRAM_COUNTS_SPARSE_100000( + "WebRTC.Audio.GetAGCStateErrorCode1", (-1) * result); + } else if (agc_is_enabled) { + // Remember that the AGC was enabled by default. Will be used in UMA. + agc_was_enabled_by_default = 1; + } else { + // AGC was initially disabled => try to enable it explicitly. + UInt32 enable_agc = 1; + result = + AudioUnitSetProperty(vpio_unit_, + kAUVoiceIOProperty_VoiceProcessingEnableAGC, + kAudioUnitScope_Global, kInputBus, &enable_agc, + sizeof(enable_agc)); + if (result != noErr) { + RTCLogError(@"Failed to enable the built-in AGC. " + "Error=%ld.", + (long)result); + RTC_HISTOGRAM_COUNTS_SPARSE_100000( + "WebRTC.Audio.SetAGCStateErrorCode", (-1) * result); + } + result = GetAGCState(vpio_unit_, &agc_is_enabled); + if (result != noErr) { + RTCLogError(@"Failed to get AGC state (2nd attempt). " + "Error=%ld.", + (long)result); + RTC_HISTOGRAM_COUNTS_SPARSE_100000( + "WebRTC.Audio.GetAGCStateErrorCode2", (-1) * result); + } + } + + // Track if the built-in AGC was enabled by default (as it should) or not. + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCWasEnabledByDefault", + agc_was_enabled_by_default); + RTCLog(@"WebRTC.Audio.BuiltInAGCWasEnabledByDefault: %d", + agc_was_enabled_by_default); + // As a final step, add an UMA histogram for tracking the AGC state. + // At this stage, the AGC should be enabled, and if it is not, more work is + // needed to find out the root cause. + RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCIsEnabled", agc_is_enabled); + RTCLog(@"WebRTC.Audio.BuiltInAGCIsEnabled: %u", + static_cast<unsigned int>(agc_is_enabled)); + + state_ = kInitialized; + return true; +} + +OSStatus VoiceProcessingAudioUnit::Start() { + RTC_DCHECK_GE(state_, kUninitialized); + RTCLog(@"Starting audio unit."); + + OSStatus result = AudioOutputUnitStart(vpio_unit_); + if (result != noErr) { + RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result); + return result; + } else { + RTCLog(@"Started audio unit"); + } + state_ = kStarted; + return noErr; +} + +bool VoiceProcessingAudioUnit::Stop() { + RTC_DCHECK_GE(state_, kUninitialized); + RTCLog(@"Stopping audio unit."); + + OSStatus result = AudioOutputUnitStop(vpio_unit_); + if (result != noErr) { + RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result); + return false; + } else { + RTCLog(@"Stopped audio unit"); + } + + state_ = kInitialized; + return true; +} + +bool VoiceProcessingAudioUnit::Uninitialize() { + RTC_DCHECK_GE(state_, kUninitialized); + RTCLog(@"Unintializing audio unit."); + + OSStatus result = AudioUnitUninitialize(vpio_unit_); + if (result != noErr) { + RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result); + return false; + } else { + RTCLog(@"Uninitialized audio unit."); + } + + state_ = kUninitialized; + return true; +} + +OSStatus VoiceProcessingAudioUnit::Render(AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 output_bus_number, + UInt32 num_frames, + AudioBufferList* io_data) { + RTC_DCHECK(vpio_unit_) << "Init() not called."; + + OSStatus result = AudioUnitRender(vpio_unit_, flags, time_stamp, + output_bus_number, num_frames, io_data); + if (result != noErr) { + RTCLogError(@"Failed to render audio unit. Error=%ld", (long)result); + } + return result; +} + +OSStatus VoiceProcessingAudioUnit::OnGetPlayoutData( + void* in_ref_con, + AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) { + VoiceProcessingAudioUnit* audio_unit = + static_cast<VoiceProcessingAudioUnit*>(in_ref_con); + return audio_unit->NotifyGetPlayoutData(flags, time_stamp, bus_number, + num_frames, io_data); +} + +OSStatus VoiceProcessingAudioUnit::OnDeliverRecordedData( + void* in_ref_con, + AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) { + VoiceProcessingAudioUnit* audio_unit = + static_cast<VoiceProcessingAudioUnit*>(in_ref_con); + return audio_unit->NotifyDeliverRecordedData(flags, time_stamp, bus_number, + num_frames, io_data); +} + +OSStatus VoiceProcessingAudioUnit::NotifyGetPlayoutData( + AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) { + return observer_->OnGetPlayoutData(flags, time_stamp, bus_number, num_frames, + io_data); +} + +OSStatus VoiceProcessingAudioUnit::NotifyDeliverRecordedData( + AudioUnitRenderActionFlags* flags, + const AudioTimeStamp* time_stamp, + UInt32 bus_number, + UInt32 num_frames, + AudioBufferList* io_data) { + return observer_->OnDeliverRecordedData(flags, time_stamp, bus_number, + num_frames, io_data); +} + +AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat( + Float64 sample_rate) const { + // Set the application formats for input and output: + // - use same format in both directions + // - avoid resampling in the I/O unit by using the hardware sample rate + // - linear PCM => noncompressed audio data format with one frame per packet + // - no need to specify interleaving since only mono is supported + AudioStreamBasicDescription format; + RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels); + format.mSampleRate = sample_rate; + format.mFormatID = kAudioFormatLinearPCM; + format.mFormatFlags = + kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; + format.mBytesPerPacket = kBytesPerSample; + format.mFramesPerPacket = 1; // uncompressed. + format.mBytesPerFrame = kBytesPerSample; + format.mChannelsPerFrame = kRTCAudioSessionPreferredNumberOfChannels; + format.mBitsPerChannel = 8 * kBytesPerSample; + return format; +} + +void VoiceProcessingAudioUnit::DisposeAudioUnit() { + if (vpio_unit_) { + switch (state_) { + case kStarted: + Stop(); + [[fallthrough]]; + case kInitialized: + Uninitialize(); + break; + case kUninitialized: + case kInitRequired: + break; + } + + RTCLog(@"Disposing audio unit."); + OSStatus result = AudioComponentInstanceDispose(vpio_unit_); + if (result != noErr) { + RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.", + (long)result); + } + vpio_unit_ = nullptr; + } +} + +} // namespace ios_adm +} // namespace webrtc |