summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/sdk/objc/native/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/libwebrtc/sdk/objc/native/src
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/sdk/objc/native/src')
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.h306
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.mm1127
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.h143
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.mm669
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/audio_session_observer.h41
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/helpers.h79
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/helpers.mm109
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.h141
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.mm488
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/network_monitor_observer.h42
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.h277
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.mm711
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.h35
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.mm194
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.h56
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.mm107
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.h67
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.mm95
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.h41
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.mm123
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.h44
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.mm209
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.h24
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.mm28
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.h39
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.mm38
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.h59
-rw-r--r--third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.mm131
28 files changed, 5423 insertions, 0 deletions
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.h b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.h
new file mode 100644
index 0000000000..a86acb56fe
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_IOS_H_
+#define SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_IOS_H_
+
+#include <atomic>
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "audio_session_observer.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "sdk/objc/base/RTCMacros.h"
+#include "voice_processing_audio_unit.h"
+
+RTC_FWD_DECL_OBJC_CLASS(RTCNativeAudioSessionDelegateAdapter);
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+namespace ios_adm {
+
+// Implements full duplex 16-bit mono PCM audio support for iOS using a
+// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit
+// supports audio echo cancellation. It also adds automatic gain control,
+// adjustment of voice-processing quality and muting.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All supported public methods must also be called on the same thread.
+// A thread checker will RTC_DCHECK if any supported method is called on an
+// invalid thread.
+//
+// Recorded audio will be delivered on a real-time internal I/O thread in the
+// audio unit. The audio unit will also ask for audio data to play out on this
+// same thread.
+class AudioDeviceIOS : public AudioDeviceGeneric,
+ public AudioSessionObserver,
+ public VoiceProcessingAudioUnitObserver {
+ public:
+ explicit AudioDeviceIOS(bool bypass_voice_processing);
+ ~AudioDeviceIOS() override;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+
+ InitStatus Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override;
+
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override;
+
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override;
+
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override;
+
+ // These methods returns hard-coded delay values and not dynamic delay
+ // estimates. The reason is that iOS supports a built-in AEC and the WebRTC
+ // AEC will always be disabled in the Libjingle layer to avoid running two
+ // AEC implementations at the same time. And, it saves resources to avoid
+ // updating these delay values continuously.
+ // TODO(henrika): it would be possible to mark these two methods as not
+ // implemented since they are only called for A/V-sync purposes today and
+ // A/V-sync is not supported on iOS. However, we avoid adding error messages
+ // the log by using these dummy implementations instead.
+ int32_t PlayoutDelay(uint16_t& delayMS) const override;
+
+ // No implementation for playout underrun on iOS. We override it to avoid a
+ // periodic log that it isn't available from the base class.
+ int32_t GetPlayoutUnderrunCount() const override { return -1; }
+
+ // Native audio parameters stored during construction.
+ // These methods are unique for the iOS implementation.
+ int GetPlayoutAudioParameters(AudioParameters* params) const override;
+ int GetRecordAudioParameters(AudioParameters* params) const override;
+
+ // These methods are currently not fully implemented on iOS:
+
+ // See audio_device_not_implemented.cc for trivial implementations.
+ int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const override;
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+ int32_t StereoPlayoutIsAvailable(bool& available) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+
+ // AudioSessionObserver methods. May be called from any thread.
+ void OnInterruptionBegin() override;
+ void OnInterruptionEnd() override;
+ void OnValidRouteChange() override;
+ void OnCanPlayOrRecordChange(bool can_play_or_record) override;
+ void OnChangedOutputVolume() override;
+
+ // VoiceProcessingAudioUnitObserver methods.
+ OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) override;
+ OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) override;
+
+ bool IsInterrupted();
+
+ private:
+ // Called by the relevant AudioSessionObserver methods on `thread_`.
+ void HandleInterruptionBegin();
+ void HandleInterruptionEnd();
+ void HandleValidRouteChange();
+ void HandleCanPlayOrRecordChange(bool can_play_or_record);
+ void HandleSampleRateChange();
+ void HandlePlayoutGlitchDetected();
+ void HandleOutputVolumeChange();
+
+ // Uses current `playout_parameters_` and `record_parameters_` to inform the
+ // audio device buffer (ADB) about our internal audio parameters.
+ void UpdateAudioDeviceBuffer();
+
+ // Since the preferred audio parameters are only hints to the OS, the actual
+ // values may be different once the AVAudioSession has been activated.
+ // This method asks for the current hardware parameters and takes actions
+ // if they should differ from what we have asked for initially. It also
+ // defines `playout_parameters_` and `record_parameters_`.
+ void SetupAudioBuffersForActiveAudioSession();
+
+ // Creates the audio unit.
+ bool CreateAudioUnit();
+
+ // Updates the audio unit state based on current state.
+ void UpdateAudioUnit(bool can_play_or_record);
+
+ // Configures the audio session for WebRTC.
+ bool ConfigureAudioSession();
+
+ // Like above, but requires caller to already hold session lock.
+ bool ConfigureAudioSessionLocked();
+
+ // Unconfigures the audio session.
+ void UnconfigureAudioSession();
+
+ // Activates our audio session, creates and initializes the voice-processing
+ // audio unit and verifies that we got the preferred native audio parameters.
+ bool InitPlayOrRecord();
+
+ // Closes and deletes the voice-processing I/O unit.
+ void ShutdownPlayOrRecord();
+
+ // Resets thread-checkers before a call is restarted.
+ void PrepareForNewStart();
+
+ // Determines whether voice processing should be enabled or disabled.
+ const bool bypass_voice_processing_;
+
+ // Native I/O audio thread checker.
+ SequenceChecker io_thread_checker_;
+
+ // Thread that this object is created on.
+ rtc::Thread* thread_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+ // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+ // and therefore outlives this object.
+ AudioDeviceBuffer* audio_device_buffer_;
+
+ // Contains audio parameters (sample rate, #channels, buffer size etc.) for
+ // the playout and recording sides. These structure is set in two steps:
+ // first, native sample rate and #channels are defined in Init(). Next, the
+ // audio session is activated and we verify that the preferred parameters
+ // were granted by the OS. At this stage it is also possible to add a third
+ // component to the parameters; the native I/O buffer duration.
+ // A RTC_CHECK will be hit if we for some reason fail to open an audio session
+ // using the specified parameters.
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+
+ // The AudioUnit used to play and record audio.
+ std::unique_ptr<VoiceProcessingAudioUnit> audio_unit_;
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples. Is also supports a similar scheme for the recording
+ // side.
+ // Example: native buffer size can be 128 audio frames at 16kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128
+ // in each callback (one every 8ms). This class can then ask for 128 and the
+ // FineAudioBuffer will ask WebRTC for new data only when needed and also
+ // cache non-utilized audio between callbacks. On the recording side, iOS
+ // can provide audio data frames of size 128 and these are accumulated until
+ // enough data to supply one 10ms call exists. This 10ms chunk is then sent
+ // to WebRTC and the remaining part is stored.
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+ // Temporary storage for recorded data. AudioUnitRender() renders into this
+ // array as soon as a frame of the desired buffer size has been recorded.
+ // On real iOS devices, the size will be fixed and set once. For iOS
+ // simulators, the size can vary from callback to callback and the size
+ // will be changed dynamically to account for this behavior.
+ rtc::BufferT<int16_t> record_audio_buffer_;
+
+ // Set to 1 when recording is active and 0 otherwise.
+ std::atomic<int> recording_;
+
+ // Set to 1 when playout is active and 0 otherwise.
+ std::atomic<int> playing_;
+
+ // Set to true after successful call to Init(), false otherwise.
+ bool initialized_ RTC_GUARDED_BY(thread_);
+
+ // Set to true after successful call to InitRecording() or InitPlayout(),
+ // false otherwise.
+ bool audio_is_initialized_;
+
+ // Set to true if audio session is interrupted, false otherwise.
+ bool is_interrupted_;
+
+ // Audio interruption observer instance.
+ RTCNativeAudioSessionDelegateAdapter* audio_session_observer_
+ RTC_GUARDED_BY(thread_);
+
+ // Set to true if we've activated the audio session.
+ bool has_configured_session_ RTC_GUARDED_BY(thread_);
+
+ // Counts number of detected audio glitches on the playout side.
+ int64_t num_detected_playout_glitches_ RTC_GUARDED_BY(thread_);
+ int64_t last_playout_time_ RTC_GUARDED_BY(io_thread_checker_);
+
+ // Counts number of playout callbacks per call.
+ // The value is updated on the native I/O thread and later read on the
+ // creating `thread_` but at this stage no audio is active.
+ // Hence, it is a "thread safe" design and no lock is needed.
+ int64_t num_playout_callbacks_;
+
+ // Contains the time for when the last output volume change was detected.
+ int64_t last_output_volume_change_time_ RTC_GUARDED_BY(thread_);
+
+ // Avoids running pending task after `this` is Terminated.
+ rtc::scoped_refptr<PendingTaskSafetyFlag> safety_ =
+ PendingTaskSafetyFlag::Create();
+};
+} // namespace ios_adm
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_IOS_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.mm
new file mode 100644
index 0000000000..dd2c11bdd2
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_ios.mm
@@ -0,0 +1,1127 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#include "audio_device_ios.h"
+
+#include <cmath>
+
+#include "api/array_view.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "helpers.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+#import "base/RTCLogging.h"
+#import "components/audio/RTCAudioSession+Private.h"
+#import "components/audio/RTCAudioSession.h"
+#import "components/audio/RTCAudioSessionConfiguration.h"
+#import "components/audio/RTCNativeAudioSessionDelegateAdapter.h"
+
+namespace webrtc {
+namespace ios_adm {
+
+#define LOGI() RTC_LOG(LS_INFO) << "AudioDeviceIOS::"
+
+#define LOG_AND_RETURN_IF_ERROR(error, message) \
+ do { \
+ OSStatus err = error; \
+ if (err) { \
+ RTC_LOG(LS_ERROR) << message << ": " << err; \
+ return false; \
+ } \
+ } while (0)
+
+#define LOG_IF_ERROR(error, message) \
+ do { \
+ OSStatus err = error; \
+ if (err) { \
+ RTC_LOG(LS_ERROR) << message << ": " << err; \
+ } \
+ } while (0)
+
+// Hardcoded delay estimates based on real measurements.
+// TODO(henrika): these value is not used in combination with built-in AEC.
+// Can most likely be removed.
+const UInt16 kFixedPlayoutDelayEstimate = 30;
+const UInt16 kFixedRecordDelayEstimate = 30;
+
+using ios::CheckAndLogError;
+
+#if !defined(NDEBUG)
+// Returns true when the code runs on a device simulator.
+static bool DeviceIsSimulator() {
+ return ios::GetDeviceName() == "x86_64";
+}
+
+// Helper method that logs essential device information strings.
+static void LogDeviceInfo() {
+ RTC_LOG(LS_INFO) << "LogDeviceInfo";
+ @autoreleasepool {
+ RTC_LOG(LS_INFO) << " system name: " << ios::GetSystemName();
+ RTC_LOG(LS_INFO) << " system version: " << ios::GetSystemVersionAsString();
+ RTC_LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
+ RTC_LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
+ RTC_LOG(LS_INFO) << " process name: " << ios::GetProcessName();
+ RTC_LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
+ RTC_LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
+ RTC_LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
+ RTC_LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
+#if TARGET_IPHONE_SIMULATOR
+ RTC_LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined";
+#endif
+ RTC_LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator();
+ }
+}
+#endif // !defined(NDEBUG)
+
+AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing)
+ : bypass_voice_processing_(bypass_voice_processing),
+ audio_device_buffer_(nullptr),
+ audio_unit_(nullptr),
+ recording_(0),
+ playing_(0),
+ initialized_(false),
+ audio_is_initialized_(false),
+ is_interrupted_(false),
+ has_configured_session_(false),
+ num_detected_playout_glitches_(0),
+ last_playout_time_(0),
+ num_playout_callbacks_(0),
+ last_output_volume_change_time_(0) {
+ LOGI() << "ctor" << ios::GetCurrentThreadDescription()
+ << ",bypass_voice_processing=" << bypass_voice_processing_;
+ io_thread_checker_.Detach();
+ thread_ = rtc::Thread::Current();
+
+ audio_session_observer_ = [[RTCNativeAudioSessionDelegateAdapter alloc] initWithObserver:this];
+}
+
+AudioDeviceIOS::~AudioDeviceIOS() {
+ RTC_DCHECK_RUN_ON(thread_);
+ LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
+ safety_->SetNotAlive();
+ Terminate();
+ audio_session_observer_ = nil;
+}
+
+void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ LOGI() << "AttachAudioBuffer";
+ RTC_DCHECK(audioBuffer);
+ RTC_DCHECK_RUN_ON(thread_);
+ audio_device_buffer_ = audioBuffer;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceIOS::Init() {
+ LOGI() << "Init";
+ io_thread_checker_.Detach();
+
+ RTC_DCHECK_RUN_ON(thread_);
+ if (initialized_) {
+ return InitStatus::OK;
+ }
+#if !defined(NDEBUG)
+ LogDeviceInfo();
+#endif
+ // Store the preferred sample rate and preferred number of channels already
+ // here. They have not been set and confirmed yet since configureForWebRTC
+ // is not called until audio is about to start. However, it makes sense to
+ // store the parameters now and then verify at a later stage.
+ RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* config =
+ [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration];
+ playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels);
+ record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels);
+ // Ensure that the audio device buffer (ADB) knows about the internal audio
+ // parameters. Note that, even if we are unable to get a mono audio session,
+ // we will always tell the I/O audio unit to do a channel format conversion
+ // to guarantee mono on the "input side" of the audio unit.
+ UpdateAudioDeviceBuffer();
+ initialized_ = true;
+ return InitStatus::OK;
+}
+
+int32_t AudioDeviceIOS::Terminate() {
+ LOGI() << "Terminate";
+ RTC_DCHECK_RUN_ON(thread_);
+ if (!initialized_) {
+ return 0;
+ }
+ StopPlayout();
+ StopRecording();
+ initialized_ = false;
+ return 0;
+}
+
+bool AudioDeviceIOS::Initialized() const {
+ RTC_DCHECK_RUN_ON(thread_);
+ return initialized_;
+}
+
+int32_t AudioDeviceIOS::InitPlayout() {
+ LOGI() << "InitPlayout";
+ RTC_DCHECK_RUN_ON(thread_);
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!audio_is_initialized_);
+ RTC_DCHECK(!playing_.load());
+ if (!audio_is_initialized_) {
+ if (!InitPlayOrRecord()) {
+ RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
+ return -1;
+ }
+ }
+ audio_is_initialized_ = true;
+ return 0;
+}
+
+bool AudioDeviceIOS::PlayoutIsInitialized() const {
+ RTC_DCHECK_RUN_ON(thread_);
+ return audio_is_initialized_;
+}
+
+bool AudioDeviceIOS::RecordingIsInitialized() const {
+ RTC_DCHECK_RUN_ON(thread_);
+ return audio_is_initialized_;
+}
+
+int32_t AudioDeviceIOS::InitRecording() {
+ LOGI() << "InitRecording";
+ RTC_DCHECK_RUN_ON(thread_);
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!audio_is_initialized_);
+ RTC_DCHECK(!recording_.load());
+ if (!audio_is_initialized_) {
+ if (!InitPlayOrRecord()) {
+ RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
+ return -1;
+ }
+ }
+ audio_is_initialized_ = true;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::StartPlayout() {
+ LOGI() << "StartPlayout";
+ RTC_DCHECK_RUN_ON(thread_);
+ RTC_DCHECK(audio_is_initialized_);
+ RTC_DCHECK(!playing_.load());
+ RTC_DCHECK(audio_unit_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetPlayout();
+ }
+ if (!recording_.load() && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+ OSStatus result = audio_unit_->Start();
+ if (result != noErr) {
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ [session notifyAudioUnitStartFailedWithError:result];
+ RTCLogError(@"StartPlayout failed to start audio unit, reason %d", result);
+ return -1;
+ }
+ RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
+ }
+ playing_.store(1, std::memory_order_release);
+ num_playout_callbacks_ = 0;
+ num_detected_playout_glitches_ = 0;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::StopPlayout() {
+ LOGI() << "StopPlayout";
+ RTC_DCHECK_RUN_ON(thread_);
+ if (!audio_is_initialized_ || !playing_.load()) {
+ return 0;
+ }
+ if (!recording_.load()) {
+ ShutdownPlayOrRecord();
+ audio_is_initialized_ = false;
+ }
+ playing_.store(0, std::memory_order_release);
+
+ // Derive average number of calls to OnGetPlayoutData() between detected
+ // audio glitches and add the result to a histogram.
+ int average_number_of_playout_callbacks_between_glitches = 100000;
+ RTC_DCHECK_GE(num_playout_callbacks_, num_detected_playout_glitches_);
+ if (num_detected_playout_glitches_ > 0) {
+ average_number_of_playout_callbacks_between_glitches =
+ num_playout_callbacks_ / num_detected_playout_glitches_;
+ }
+ RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
+ average_number_of_playout_callbacks_between_glitches);
+ RTCLog(@"Average number of playout callbacks between glitches: %d",
+ average_number_of_playout_callbacks_between_glitches);
+ return 0;
+}
+
+bool AudioDeviceIOS::Playing() const {
+ return playing_.load();
+}
+
+int32_t AudioDeviceIOS::StartRecording() {
+ LOGI() << "StartRecording";
+ RTC_DCHECK_RUN_ON(thread_);
+ RTC_DCHECK(audio_is_initialized_);
+ RTC_DCHECK(!recording_.load());
+ RTC_DCHECK(audio_unit_);
+ if (fine_audio_buffer_) {
+ fine_audio_buffer_->ResetRecord();
+ }
+ if (!playing_.load() && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+ OSStatus result = audio_unit_->Start();
+ if (result != noErr) {
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ [session notifyAudioUnitStartFailedWithError:result];
+ RTCLogError(@"StartRecording failed to start audio unit, reason %d", result);
+ return -1;
+ }
+ RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
+ }
+ recording_.store(1, std::memory_order_release);
+ return 0;
+}
+
+int32_t AudioDeviceIOS::StopRecording() {
+ LOGI() << "StopRecording";
+ RTC_DCHECK_RUN_ON(thread_);
+ if (!audio_is_initialized_ || !recording_.load()) {
+ return 0;
+ }
+ if (!playing_.load()) {
+ ShutdownPlayOrRecord();
+ audio_is_initialized_ = false;
+ }
+ recording_.store(0, std::memory_order_release);
+ return 0;
+}
+
+bool AudioDeviceIOS::Recording() const {
+ return recording_.load();
+}
+
+int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
+ delayMS = kFixedPlayoutDelayEstimate;
+ return 0;
+}
+
+int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
+ LOGI() << "GetPlayoutAudioParameters";
+ RTC_DCHECK(playout_parameters_.is_valid());
+ RTC_DCHECK_RUN_ON(thread_);
+ *params = playout_parameters_;
+ return 0;
+}
+
+int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
+ LOGI() << "GetRecordAudioParameters";
+ RTC_DCHECK(record_parameters_.is_valid());
+ RTC_DCHECK_RUN_ON(thread_);
+ *params = record_parameters_;
+ return 0;
+}
+
+void AudioDeviceIOS::OnInterruptionBegin() {
+ RTC_DCHECK(thread_);
+ LOGI() << "OnInterruptionBegin";
+ thread_->PostTask(SafeTask(safety_, [this] { HandleInterruptionBegin(); }));
+}
+
+void AudioDeviceIOS::OnInterruptionEnd() {
+ RTC_DCHECK(thread_);
+ LOGI() << "OnInterruptionEnd";
+ thread_->PostTask(SafeTask(safety_, [this] { HandleInterruptionEnd(); }));
+}
+
+void AudioDeviceIOS::OnValidRouteChange() {
+ RTC_DCHECK(thread_);
+ thread_->PostTask(SafeTask(safety_, [this] { HandleValidRouteChange(); }));
+}
+
+void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
+ RTC_DCHECK(thread_);
+ thread_->PostTask(SafeTask(
+ safety_, [this, can_play_or_record] { HandleCanPlayOrRecordChange(can_play_or_record); }));
+}
+
+void AudioDeviceIOS::OnChangedOutputVolume() {
+ RTC_DCHECK(thread_);
+ thread_->PostTask(SafeTask(safety_, [this] { HandleOutputVolumeChange(); }));
+}
+
+OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* /* io_data */) {
+ RTC_DCHECK_RUN_ON(&io_thread_checker_);
+ OSStatus result = noErr;
+ // Simply return if recording is not enabled.
+ if (!recording_.load(std::memory_order_acquire)) return result;
+
+ // Set the size of our own audio buffer and clear it first to avoid copying
+ // in combination with potential reallocations.
+ // On real iOS devices, the size will only be set once (at first callback).
+ record_audio_buffer_.Clear();
+ record_audio_buffer_.SetSize(num_frames);
+
+ // Allocate AudioBuffers to be used as storage for the received audio.
+ // The AudioBufferList structure works as a placeholder for the
+ // AudioBuffer structure, which holds a pointer to the actual data buffer
+ // in `record_audio_buffer_`. Recorded audio will be rendered into this memory
+ // at each input callback when calling AudioUnitRender().
+ AudioBufferList audio_buffer_list;
+ audio_buffer_list.mNumberBuffers = 1;
+ AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
+ audio_buffer->mNumberChannels = record_parameters_.channels();
+ audio_buffer->mDataByteSize =
+ record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample;
+ audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
+
+ // Obtain the recorded audio samples by initiating a rendering cycle.
+ // Since it happens on the input bus, the `io_data` parameter is a reference
+ // to the preallocated audio buffer list that the audio unit renders into.
+ // We can make the audio unit provide a buffer instead in io_data, but we
+ // currently just use our own.
+ // TODO(henrika): should error handling be improved?
+ result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
+ if (result != noErr) {
+ RTCLogError(@"Failed to render audio.");
+ return result;
+ }
+
+ // Get a pointer to the recorded audio and send it to the WebRTC ADB.
+ // Use the FineAudioBuffer instance to convert between native buffer size
+ // and the 10ms buffer size used by WebRTC.
+ fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate);
+ return noErr;
+}
+
+OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) {
+ RTC_DCHECK_RUN_ON(&io_thread_checker_);
+ // Verify 16-bit, noninterleaved mono PCM signal format.
+ RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
+ AudioBuffer* audio_buffer = &io_data->mBuffers[0];
+ RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
+
+ // Produce silence and give audio unit a hint about it if playout is not
+ // activated.
+ if (!playing_.load(std::memory_order_acquire)) {
+ const size_t size_in_bytes = audio_buffer->mDataByteSize;
+ RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
+ *flags |= kAudioUnitRenderAction_OutputIsSilence;
+ memset(static_cast<int8_t*>(audio_buffer->mData), 0, size_in_bytes);
+ return noErr;
+ }
+
+ // Measure time since last call to OnGetPlayoutData() and see if it is larger
+ // than a well defined threshold which depends on the current IO buffer size.
+ // If so, we have an indication of a glitch in the output audio since the
+ // core audio layer will most likely run dry in this state.
+ ++num_playout_callbacks_;
+ const int64_t now_time = rtc::TimeMillis();
+ if (time_stamp->mSampleTime != num_frames) {
+ const int64_t delta_time = now_time - last_playout_time_;
+ const int glitch_threshold = 1.6 * playout_parameters_.GetBufferSizeInMilliseconds();
+ if (delta_time > glitch_threshold) {
+ RTCLogWarning(@"Possible playout audio glitch detected.\n"
+ " Time since last OnGetPlayoutData was %lld ms.\n",
+ delta_time);
+ // Exclude extreme delta values since they do most likely not correspond
+ // to a real glitch. Instead, the most probable cause is that a headset
+ // has been plugged in or out. There are more direct ways to detect
+ // audio device changes (see HandleValidRouteChange()) but experiments
+ // show that using it leads to more complex implementations.
+ // TODO(henrika): more tests might be needed to come up with an even
+ // better upper limit.
+ if (glitch_threshold < 120 && delta_time > 120) {
+ RTCLog(@"Glitch warning is ignored. Probably caused by device switch.");
+ } else {
+ thread_->PostTask(SafeTask(safety_, [this] { HandlePlayoutGlitchDetected(); }));
+ }
+ }
+ }
+ last_playout_time_ = now_time;
+
+ // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
+ // the native I/O audio unit) and copy the result to the audio buffer in the
+ // `io_data` destination.
+ fine_audio_buffer_->GetPlayoutData(
+ rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
+ kFixedPlayoutDelayEstimate);
+ return noErr;
+}
+
+void AudioDeviceIOS::HandleInterruptionBegin() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_);
+ if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+ RTCLog(@"Stopping the audio unit due to interruption begin.");
+ if (!audio_unit_->Stop()) {
+ RTCLogError(@"Failed to stop the audio unit for interruption begin.");
+ }
+ PrepareForNewStart();
+ }
+ is_interrupted_ = true;
+}
+
+void AudioDeviceIOS::HandleInterruptionEnd() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. "
+ "Updating audio unit state.",
+ is_interrupted_);
+ is_interrupted_ = false;
+ if (!audio_unit_) return;
+ if (webrtc::field_trial::IsEnabled("WebRTC-Audio-iOS-Holding")) {
+ // Work around an issue where audio does not restart properly after an interruption
+ // by restarting the audio unit when the interruption ends.
+ if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+ audio_unit_->Stop();
+ PrepareForNewStart();
+ }
+ if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+ audio_unit_->Uninitialize();
+ }
+ // Allocate new buffers given the potentially new stream format.
+ SetupAudioBuffersForActiveAudioSession();
+ }
+ UpdateAudioUnit([RTC_OBJC_TYPE(RTCAudioSession) sharedInstance].canPlayOrRecord);
+}
+
+void AudioDeviceIOS::HandleValidRouteChange() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ RTCLog(@"%@", session);
+ HandleSampleRateChange();
+}
+
+void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) {
+ RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record);
+ UpdateAudioUnit(can_play_or_record);
+}
+
+void AudioDeviceIOS::HandleSampleRateChange() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Handling sample rate change.");
+
+ // Don't do anything if we're interrupted.
+ if (is_interrupted_) {
+ RTCLog(@"Ignoring sample rate change due to interruption.");
+ return;
+ }
+
+ // If we don't have an audio unit yet, or the audio unit is uninitialized,
+ // there is no work to do.
+ if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
+ return;
+ }
+
+ // The audio unit is already initialized or started.
+ // Check to see if the sample rate or buffer size has changed.
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ const double new_sample_rate = session.sampleRate;
+ const NSTimeInterval session_buffer_duration = session.IOBufferDuration;
+ const size_t new_frames_per_buffer =
+ static_cast<size_t>(new_sample_rate * session_buffer_duration + .5);
+ const double current_sample_rate = playout_parameters_.sample_rate();
+ const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer();
+ RTCLog(@"Handling playout sample rate change:\n"
+ " Session sample rate: %f frames_per_buffer: %lu\n"
+ " ADM sample rate: %f frames_per_buffer: %lu",
+ new_sample_rate,
+ (unsigned long)new_frames_per_buffer,
+ current_sample_rate,
+ (unsigned long)current_frames_per_buffer);
+
+ // Sample rate and buffer size are the same, no work to do.
+ if (std::abs(current_sample_rate - new_sample_rate) <= DBL_EPSILON &&
+ current_frames_per_buffer == new_frames_per_buffer) {
+ RTCLog(@"Ignoring sample rate change since audio parameters are intact.");
+ return;
+ }
+
+ // Extra sanity check to ensure that the new sample rate is valid.
+ if (new_sample_rate <= 0.0) {
+ RTCLogError(@"Sample rate is invalid: %f", new_sample_rate);
+ return;
+ }
+
+ // We need to adjust our format and buffer sizes.
+ // The stream format is about to be changed and it requires that we first
+ // stop and uninitialize the audio unit to deallocate its resources.
+ RTCLog(@"Stopping and uninitializing audio unit to adjust buffers.");
+ bool restart_audio_unit = false;
+ if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+ audio_unit_->Stop();
+ restart_audio_unit = true;
+ PrepareForNewStart();
+ }
+ if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+ audio_unit_->Uninitialize();
+ }
+
+ // Allocate new buffers given the new stream format.
+ SetupAudioBuffersForActiveAudioSession();
+
+ // Initialize the audio unit again with the new sample rate.
+ if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
+ RTCLogError(@"Failed to initialize the audio unit with sample rate: %d",
+ playout_parameters_.sample_rate());
+ return;
+ }
+
+ // Restart the audio unit if it was already running.
+ if (restart_audio_unit) {
+ OSStatus result = audio_unit_->Start();
+ if (result != noErr) {
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ [session notifyAudioUnitStartFailedWithError:result];
+ RTCLogError(@"Failed to start audio unit with sample rate: %d, reason %d",
+ playout_parameters_.sample_rate(),
+ result);
+ return;
+ }
+ }
+ RTCLog(@"Successfully handled sample rate change.");
+}
+
+void AudioDeviceIOS::HandlePlayoutGlitchDetected() {
+ RTC_DCHECK_RUN_ON(thread_);
+ // Don't update metrics if we're interrupted since a "glitch" is expected
+ // in this state.
+ if (is_interrupted_) {
+ RTCLog(@"Ignoring audio glitch due to interruption.");
+ return;
+ }
+ // Avoid doing glitch detection for two seconds after a volume change
+ // has been detected to reduce the risk of false alarm.
+ if (last_output_volume_change_time_ > 0 &&
+ rtc::TimeSince(last_output_volume_change_time_) < 2000) {
+ RTCLog(@"Ignoring audio glitch due to recent output volume change.");
+ return;
+ }
+ num_detected_playout_glitches_++;
+ RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_);
+
+ int64_t glitch_count = num_detected_playout_glitches_;
+ dispatch_async(dispatch_get_main_queue(), ^{
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ [session notifyDidDetectPlayoutGlitch:glitch_count];
+ });
+}
+
+void AudioDeviceIOS::HandleOutputVolumeChange() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Output volume change detected.");
+ // Store time of this detection so it can be used to defer detection of
+ // glitches too close in time to this event.
+ last_output_volume_change_time_ = rtc::TimeMillis();
+}
+
+void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
+ LOGI() << "UpdateAudioDevicebuffer";
+ // AttachAudioBuffer() is called at construction by the main class but check
+ // just in case.
+ RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+ RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0);
+ RTC_DCHECK_GT(record_parameters_.sample_rate(), 0);
+ RTC_DCHECK_EQ(playout_parameters_.channels(), 1);
+ RTC_DCHECK_EQ(record_parameters_.channels(), 1);
+ // Inform the audio device buffer (ADB) about the new audio format.
+ audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+ audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+ audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
+ audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+}
+
+void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
+ LOGI() << "SetupAudioBuffersForActiveAudioSession";
+ // Verify the current values once the audio session has been activated.
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ double sample_rate = session.sampleRate;
+ NSTimeInterval io_buffer_duration = session.IOBufferDuration;
+ RTCLog(@"%@", session);
+
+ // Log a warning message for the case when we are unable to set the preferred
+ // hardware sample rate but continue and use the non-ideal sample rate after
+ // reinitializing the audio parameters. Most BT headsets only support 8kHz or
+ // 16kHz.
+ RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* webRTCConfig =
+ [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration];
+ if (sample_rate != webRTCConfig.sampleRate) {
+ RTC_LOG(LS_WARNING) << "Unable to set the preferred sample rate";
+ }
+
+ // Crash reports indicates that it can happen in rare cases that the reported
+ // sample rate is less than or equal to zero. If that happens and if a valid
+ // sample rate has already been set during initialization, the best guess we
+ // can do is to reuse the current sample rate.
+ if (sample_rate <= DBL_EPSILON && playout_parameters_.sample_rate() > 0) {
+ RTCLogError(@"Reported rate is invalid: %f. "
+ "Using %d as sample rate instead.",
+ sample_rate, playout_parameters_.sample_rate());
+ sample_rate = playout_parameters_.sample_rate();
+ }
+
+ // At this stage, we also know the exact IO buffer duration and can add
+ // that info to the existing audio parameters where it is converted into
+ // number of audio frames.
+ // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
+ // Hence, 128 is the size we expect to see in upcoming render callbacks.
+ playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration);
+ RTC_DCHECK(playout_parameters_.is_complete());
+ record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration);
+ RTC_DCHECK(record_parameters_.is_complete());
+ RTC_LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer();
+ RTC_LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer();
+ RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer());
+
+ // Update the ADB parameters since the sample rate might have changed.
+ UpdateAudioDeviceBuffer();
+
+ // Create a modified audio buffer class which allows us to ask for,
+ // or deliver, any number of samples (and not only multiple of 10ms) to match
+ // the native audio unit buffer size.
+ RTC_DCHECK(audio_device_buffer_);
+ fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_));
+}
+
+bool AudioDeviceIOS::CreateAudioUnit() {
+ RTC_DCHECK(!audio_unit_);
+
+ audio_unit_.reset(new VoiceProcessingAudioUnit(bypass_voice_processing_, this));
+ if (!audio_unit_->Init()) {
+ audio_unit_.reset();
+ return false;
+ }
+
+ return true;
+}
+
+void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
+ can_play_or_record,
+ is_interrupted_);
+
+ if (is_interrupted_) {
+ RTCLog(@"Ignoring audio unit update due to interruption.");
+ return;
+ }
+
+ // If we're not initialized we don't need to do anything. Audio unit will
+ // be initialized on initialization.
+ if (!audio_is_initialized_) return;
+
+ // If we're initialized, we must have an audio unit.
+ RTC_DCHECK(audio_unit_);
+
+ bool should_initialize_audio_unit = false;
+ bool should_uninitialize_audio_unit = false;
+ bool should_start_audio_unit = false;
+ bool should_stop_audio_unit = false;
+
+ switch (audio_unit_->GetState()) {
+ case VoiceProcessingAudioUnit::kInitRequired:
+ RTCLog(@"VPAU state: InitRequired");
+ RTC_DCHECK_NOTREACHED();
+ break;
+ case VoiceProcessingAudioUnit::kUninitialized:
+ RTCLog(@"VPAU state: Uninitialized");
+ should_initialize_audio_unit = can_play_or_record;
+ should_start_audio_unit =
+ should_initialize_audio_unit && (playing_.load() || recording_.load());
+ break;
+ case VoiceProcessingAudioUnit::kInitialized:
+ RTCLog(@"VPAU state: Initialized");
+ should_start_audio_unit = can_play_or_record && (playing_.load() || recording_.load());
+ should_uninitialize_audio_unit = !can_play_or_record;
+ break;
+ case VoiceProcessingAudioUnit::kStarted:
+ RTCLog(@"VPAU state: Started");
+ RTC_DCHECK(playing_.load() || recording_.load());
+ should_stop_audio_unit = !can_play_or_record;
+ should_uninitialize_audio_unit = should_stop_audio_unit;
+ break;
+ }
+
+ if (should_initialize_audio_unit) {
+ RTCLog(@"Initializing audio unit for UpdateAudioUnit");
+ ConfigureAudioSession();
+ SetupAudioBuffersForActiveAudioSession();
+ if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
+ RTCLogError(@"Failed to initialize audio unit.");
+ return;
+ }
+ }
+
+ if (should_start_audio_unit) {
+ RTCLog(@"Starting audio unit for UpdateAudioUnit");
+ // Log session settings before trying to start audio streaming.
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ RTCLog(@"%@", session);
+ OSStatus result = audio_unit_->Start();
+ if (result != noErr) {
+ [session notifyAudioUnitStartFailedWithError:result];
+ RTCLogError(@"Failed to start audio unit, reason %d", result);
+ return;
+ }
+ }
+
+ if (should_stop_audio_unit) {
+ RTCLog(@"Stopping audio unit for UpdateAudioUnit");
+ if (!audio_unit_->Stop()) {
+ RTCLogError(@"Failed to stop audio unit.");
+ PrepareForNewStart();
+ return;
+ }
+ PrepareForNewStart();
+ }
+
+ if (should_uninitialize_audio_unit) {
+ RTCLog(@"Uninitializing audio unit for UpdateAudioUnit");
+ audio_unit_->Uninitialize();
+ UnconfigureAudioSession();
+ }
+}
+
+bool AudioDeviceIOS::ConfigureAudioSession() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Configuring audio session.");
+ if (has_configured_session_) {
+ RTCLogWarning(@"Audio session already configured.");
+ return false;
+ }
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ [session lockForConfiguration];
+ bool success = [session configureWebRTCSession:nil];
+ [session unlockForConfiguration];
+ if (success) {
+ has_configured_session_ = true;
+ RTCLog(@"Configured audio session.");
+ } else {
+ RTCLog(@"Failed to configure audio session.");
+ }
+ return success;
+}
+
+bool AudioDeviceIOS::ConfigureAudioSessionLocked() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Configuring audio session.");
+ if (has_configured_session_) {
+ RTCLogWarning(@"Audio session already configured.");
+ return false;
+ }
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ bool success = [session configureWebRTCSession:nil];
+ if (success) {
+ has_configured_session_ = true;
+ RTCLog(@"Configured audio session.");
+ } else {
+ RTCLog(@"Failed to configure audio session.");
+ }
+ return success;
+}
+
+void AudioDeviceIOS::UnconfigureAudioSession() {
+ RTC_DCHECK_RUN_ON(thread_);
+ RTCLog(@"Unconfiguring audio session.");
+ if (!has_configured_session_) {
+ RTCLogWarning(@"Audio session already unconfigured.");
+ return;
+ }
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ [session lockForConfiguration];
+ [session unconfigureWebRTCSession:nil];
+ [session endWebRTCSession:nil];
+ [session unlockForConfiguration];
+ has_configured_session_ = false;
+ RTCLog(@"Unconfigured audio session.");
+}
+
+bool AudioDeviceIOS::InitPlayOrRecord() {
+ LOGI() << "InitPlayOrRecord";
+ RTC_DCHECK_RUN_ON(thread_);
+
+ // There should be no audio unit at this point.
+ if (!CreateAudioUnit()) {
+ return false;
+ }
+
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ // Subscribe to audio session events.
+ [session pushDelegate:audio_session_observer_];
+ is_interrupted_ = session.isInterrupted ? true : false;
+
+ // Lock the session to make configuration changes.
+ [session lockForConfiguration];
+ NSError* error = nil;
+ if (![session beginWebRTCSession:&error]) {
+ [session unlockForConfiguration];
+ RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription);
+ audio_unit_.reset();
+ return false;
+ }
+
+ // If we are ready to play or record, and if the audio session can be
+ // configured, then initialize the audio unit.
+ if (session.canPlayOrRecord) {
+ if (!ConfigureAudioSessionLocked()) {
+ // One possible reason for failure is if an attempt was made to use the
+ // audio session during or after a Media Services failure.
+ // See AVAudioSessionErrorCodeMediaServicesFailed for details.
+ [session unlockForConfiguration];
+ audio_unit_.reset();
+ return false;
+ }
+ SetupAudioBuffersForActiveAudioSession();
+ audio_unit_->Initialize(playout_parameters_.sample_rate());
+ }
+
+ // Release the lock.
+ [session unlockForConfiguration];
+ return true;
+}
+
+void AudioDeviceIOS::ShutdownPlayOrRecord() {
+ LOGI() << "ShutdownPlayOrRecord";
+ RTC_DCHECK_RUN_ON(thread_);
+
+ // Stop the audio unit to prevent any additional audio callbacks.
+ audio_unit_->Stop();
+
+ // Close and delete the voice-processing I/O unit.
+ audio_unit_.reset();
+
+ // Detach thread checker for the AURemoteIO::IOThread to ensure that the
+ // next session uses a fresh thread id.
+ io_thread_checker_.Detach();
+
+ // Remove audio session notification observers.
+ RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ [session removeDelegate:audio_session_observer_];
+
+ // All I/O should be stopped or paused prior to deactivating the audio
+ // session, hence we deactivate as last action.
+ UnconfigureAudioSession();
+}
+
+void AudioDeviceIOS::PrepareForNewStart() {
+ LOGI() << "PrepareForNewStart";
+ // The audio unit has been stopped and preparations are needed for an upcoming
+ // restart. It will result in audio callbacks from a new native I/O thread
+ // which means that we must detach thread checkers here to be prepared for an
+ // upcoming new audio stream.
+ io_thread_checker_.Detach();
+}
+
+bool AudioDeviceIOS::IsInterrupted() {
+ return is_interrupted_;
+}
+
+#pragma mark - Not Implemented
+
+int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+ return 0;
+}
+
+int16_t AudioDeviceIOS::PlayoutDevices() {
+ // TODO(henrika): improve.
+ RTC_LOG_F(LS_WARNING) << "Not implemented";
+ return (int16_t)1;
+}
+
+int16_t AudioDeviceIOS::RecordingDevices() {
+ // TODO(henrika): improve.
+ RTC_LOG_F(LS_WARNING) << "Not implemented";
+ return (int16_t)1;
+}
+
+int32_t AudioDeviceIOS::InitSpeaker() {
+ return 0;
+}
+
+bool AudioDeviceIOS::SpeakerIsInitialized() const {
+ return true;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
+ RTC_LOG_F(LS_WARNING) << "Not implemented";
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::InitMicrophone() {
+ return 0;
+}
+
+bool AudioDeviceIOS::MicrophoneIsInitialized() const {
+ return true;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
+ RTC_LOG_F(LS_WARNING) << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
+ enabled = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
+ RTC_LOG_F(LS_WARNING) << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
+ enabled = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
+ RTC_LOG_F(LS_WARNING) << "Not implemented";
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) {
+ RTC_DCHECK_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+} // namespace ios_adm
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.h b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.h
new file mode 100644
index 0000000000..9bcf114e32
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_MODULE_IOS_H_
+#define SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_MODULE_IOS_H_
+
+#include <memory>
+
+#include "audio_device_ios.h"
+
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class AudioDeviceGeneric;
+
+namespace ios_adm {
+
+class AudioDeviceModuleIOS : public AudioDeviceModule {
+ public:
+ int32_t AttachAudioBuffer();
+
+ explicit AudioDeviceModuleIOS(bool bypass_voice_processing);
+ ~AudioDeviceModuleIOS() override;
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
+
+ // Full-duplex transportation of PCM audio
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
+
+ // Main initializaton and termination
+ int32_t Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool* available) override;
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool* available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool* available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t* volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t* volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool* available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool* enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool* available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool* enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool* available) const override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool* enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool* available) const override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool* enabled) const override;
+
+ // Delay information and control
+ int32_t PlayoutDelay(uint16_t* delayMS) const override;
+
+ bool BuiltInAECIsAvailable() const override;
+ int32_t EnableBuiltInAEC(bool enable) override;
+ bool BuiltInAGCIsAvailable() const override;
+ int32_t EnableBuiltInAGC(bool enable) override;
+ bool BuiltInNSIsAvailable() const override;
+ int32_t EnableBuiltInNS(bool enable) override;
+
+ int32_t GetPlayoutUnderrunCount() const override;
+
+#if defined(WEBRTC_IOS)
+ int GetPlayoutAudioParameters(AudioParameters* params) const override;
+ int GetRecordAudioParameters(AudioParameters* params) const override;
+#endif // WEBRTC_IOS
+ private:
+ const bool bypass_voice_processing_;
+ bool initialized_ = false;
+ const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ std::unique_ptr<AudioDeviceIOS> audio_device_;
+ std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
+};
+} // namespace ios_adm
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_DEVICE_MODULE_IOS_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.mm
new file mode 100644
index 0000000000..5effef3abd
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_device_module_ios.mm
@@ -0,0 +1,669 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_device_module_ios.h"
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_device/audio_device_config.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ref_count.h"
+#include "system_wrappers/include/metrics.h"
+
+#if defined(WEBRTC_IOS)
+#include "audio_device_ios.h"
+#endif
+
+#define CHECKinitialized_() \
+ { \
+ if (!initialized_) { \
+ return -1; \
+ }; \
+ }
+
+#define CHECKinitialized__BOOL() \
+ { \
+ if (!initialized_) { \
+ return false; \
+ }; \
+ }
+
+namespace webrtc {
+namespace ios_adm {
+
+AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing)
+ : bypass_voice_processing_(bypass_voice_processing),
+ task_queue_factory_(CreateDefaultTaskQueueFactory()) {
+ RTC_LOG(LS_INFO) << "current platform is IOS";
+ RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
+}
+
+ int32_t AudioDeviceModuleIOS::AttachAudioBuffer() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ audio_device_->AttachAudioBuffer(audio_device_buffer_.get());
+ return 0;
+ }
+
+ AudioDeviceModuleIOS::~AudioDeviceModuleIOS() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ }
+
+ int32_t AudioDeviceModuleIOS::ActiveAudioLayer(AudioLayer* audioLayer) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ AudioLayer activeAudio;
+ if (audio_device_->ActiveAudioLayer(activeAudio) == -1) {
+ return -1;
+ }
+ *audioLayer = activeAudio;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::Init() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (initialized_)
+ return 0;
+
+ audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer(task_queue_factory_.get()));
+ audio_device_.reset(new ios_adm::AudioDeviceIOS(bypass_voice_processing_));
+ RTC_CHECK(audio_device_);
+
+ this->AttachAudioBuffer();
+
+ AudioDeviceGeneric::InitStatus status = audio_device_->Init();
+ RTC_HISTOGRAM_ENUMERATION(
+ "WebRTC.Audio.InitializationResult", static_cast<int>(status),
+ static_cast<int>(AudioDeviceGeneric::InitStatus::NUM_STATUSES));
+ if (status != AudioDeviceGeneric::InitStatus::OK) {
+ RTC_LOG(LS_ERROR) << "Audio device initialization failed.";
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::Terminate() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ if (!initialized_)
+ return 0;
+ if (audio_device_->Terminate() == -1) {
+ return -1;
+ }
+ initialized_ = false;
+ return 0;
+ }
+
+ bool AudioDeviceModuleIOS::Initialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_;
+ return initialized_;
+ }
+
+ int32_t AudioDeviceModuleIOS::InitSpeaker() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->InitSpeaker();
+ }
+
+ int32_t AudioDeviceModuleIOS::InitMicrophone() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->InitMicrophone();
+ }
+
+ int32_t AudioDeviceModuleIOS::SpeakerVolumeIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::SetSpeakerVolume(uint32_t volume) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")";
+ CHECKinitialized_();
+ return audio_device_->SetSpeakerVolume(volume);
+ }
+
+ int32_t AudioDeviceModuleIOS::SpeakerVolume(uint32_t* volume) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint32_t level = 0;
+ if (audio_device_->SpeakerVolume(level) == -1) {
+ return -1;
+ }
+ *volume = level;
+ RTC_DLOG(LS_INFO) << "output: " << *volume;
+ return 0;
+ }
+
+ bool AudioDeviceModuleIOS::SpeakerIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isInitialized = audio_device_->SpeakerIsInitialized();
+ RTC_DLOG(LS_INFO) << "output: " << isInitialized;
+ return isInitialized;
+ }
+
+ bool AudioDeviceModuleIOS::MicrophoneIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isInitialized = audio_device_->MicrophoneIsInitialized();
+ RTC_DLOG(LS_INFO) << "output: " << isInitialized;
+ return isInitialized;
+ }
+
+ int32_t AudioDeviceModuleIOS::MaxSpeakerVolume(uint32_t* maxVolume) const {
+ CHECKinitialized_();
+ uint32_t maxVol = 0;
+ if (audio_device_->MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
+ *maxVolume = maxVol;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::MinSpeakerVolume(uint32_t* minVolume) const {
+ CHECKinitialized_();
+ uint32_t minVol = 0;
+ if (audio_device_->MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
+ *minVolume = minVol;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::SpeakerMuteIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::SetSpeakerMute(bool enable) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ return audio_device_->SetSpeakerMute(enable);
+ }
+
+ int32_t AudioDeviceModuleIOS::SpeakerMute(bool* enabled) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool muted = false;
+ if (audio_device_->SpeakerMute(muted) == -1) {
+ return -1;
+ }
+ *enabled = muted;
+ RTC_DLOG(LS_INFO) << "output: " << muted;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::MicrophoneMuteIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::SetMicrophoneMute(bool enable) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ return (audio_device_->SetMicrophoneMute(enable));
+ }
+
+ int32_t AudioDeviceModuleIOS::MicrophoneMute(bool* enabled) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool muted = false;
+ if (audio_device_->MicrophoneMute(muted) == -1) {
+ return -1;
+ }
+ *enabled = muted;
+ RTC_DLOG(LS_INFO) << "output: " << muted;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::MicrophoneVolumeIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::SetMicrophoneVolume(uint32_t volume) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")";
+ CHECKinitialized_();
+ return (audio_device_->SetMicrophoneVolume(volume));
+ }
+
+ int32_t AudioDeviceModuleIOS::MicrophoneVolume(uint32_t* volume) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint32_t level = 0;
+ if (audio_device_->MicrophoneVolume(level) == -1) {
+ return -1;
+ }
+ *volume = level;
+ RTC_DLOG(LS_INFO) << "output: " << *volume;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::StereoRecordingIsAvailable(
+ bool* available) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::SetStereoRecording(bool enable) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ if (enable) {
+ RTC_LOG(LS_WARNING) << "recording in stereo is not supported";
+ }
+ return -1;
+ }
+
+ int32_t AudioDeviceModuleIOS::StereoRecording(bool* enabled) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool stereo = false;
+ if (audio_device_->StereoRecording(stereo) == -1) {
+ return -1;
+ }
+ *enabled = stereo;
+ RTC_DLOG(LS_INFO) << "output: " << stereo;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::StereoPlayoutIsAvailable(bool* available) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::SetStereoPlayout(bool enable) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ if (audio_device_->PlayoutIsInitialized()) {
+ RTC_LOG(LS_ERROR) << "unable to set stereo mode while playing side is initialized";
+ return -1;
+ }
+ if (audio_device_->SetStereoPlayout(enable)) {
+ RTC_LOG(LS_WARNING) << "stereo playout is not supported";
+ return -1;
+ }
+ int8_t nChannels(1);
+ if (enable) {
+ nChannels = 2;
+ }
+ audio_device_buffer_.get()->SetPlayoutChannels(nChannels);
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::StereoPlayout(bool* enabled) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool stereo = false;
+ if (audio_device_->StereoPlayout(stereo) == -1) {
+ return -1;
+ }
+ *enabled = stereo;
+ RTC_DLOG(LS_INFO) << "output: " << stereo;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::PlayoutIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::RecordingIsAvailable(bool* available) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ bool isAvailable = false;
+ if (audio_device_->RecordingIsAvailable(isAvailable) == -1) {
+ return -1;
+ }
+ *available = isAvailable;
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::MaxMicrophoneVolume(uint32_t* maxVolume) const {
+ CHECKinitialized_();
+ uint32_t maxVol(0);
+ if (audio_device_->MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
+ *maxVolume = maxVol;
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::MinMicrophoneVolume(uint32_t* minVolume) const {
+ CHECKinitialized_();
+ uint32_t minVol(0);
+ if (audio_device_->MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
+ *minVolume = minVol;
+ return 0;
+ }
+
+ int16_t AudioDeviceModuleIOS::PlayoutDevices() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint16_t nPlayoutDevices = audio_device_->PlayoutDevices();
+ RTC_DLOG(LS_INFO) << "output: " << nPlayoutDevices;
+ return (int16_t)(nPlayoutDevices);
+ }
+
+ int32_t AudioDeviceModuleIOS::SetPlayoutDevice(uint16_t index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
+ CHECKinitialized_();
+ return audio_device_->SetPlayoutDevice(index);
+ }
+
+ int32_t AudioDeviceModuleIOS::SetPlayoutDevice(WindowsDeviceType device) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->SetPlayoutDevice(device);
+ }
+
+ int32_t AudioDeviceModuleIOS::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)";
+ CHECKinitialized_();
+ if (name == NULL) {
+ return -1;
+ }
+ if (audio_device_->PlayoutDeviceName(index, name, guid) == -1) {
+ return -1;
+ }
+ if (name != NULL) {
+ RTC_DLOG(LS_INFO) << "output: name = " << name;
+ }
+ if (guid != NULL) {
+ RTC_DLOG(LS_INFO) << "output: guid = " << guid;
+ }
+ return 0;
+ }
+
+ int32_t AudioDeviceModuleIOS::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)";
+ CHECKinitialized_();
+ if (name == NULL) {
+ return -1;
+ }
+ if (audio_device_->RecordingDeviceName(index, name, guid) == -1) {
+ return -1;
+ }
+ if (name != NULL) {
+ RTC_DLOG(LS_INFO) << "output: name = " << name;
+ }
+ if (guid != NULL) {
+ RTC_DLOG(LS_INFO) << "output: guid = " << guid;
+ }
+ return 0;
+ }
+
+ int16_t AudioDeviceModuleIOS::RecordingDevices() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ uint16_t nRecordingDevices = audio_device_->RecordingDevices();
+ RTC_DLOG(LS_INFO) << "output: " << nRecordingDevices;
+ return (int16_t)nRecordingDevices;
+ }
+
+ int32_t AudioDeviceModuleIOS::SetRecordingDevice(uint16_t index) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << index << ")";
+ CHECKinitialized_();
+ return audio_device_->SetRecordingDevice(index);
+ }
+
+ int32_t AudioDeviceModuleIOS::SetRecordingDevice(WindowsDeviceType device) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ return audio_device_->SetRecordingDevice(device);
+ }
+
+ int32_t AudioDeviceModuleIOS::InitPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (PlayoutIsInitialized()) {
+ return 0;
+ }
+ int32_t result = audio_device_->InitPlayout();
+ RTC_DLOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess",
+ static_cast<int>(result == 0));
+ return result;
+ }
+
+ int32_t AudioDeviceModuleIOS::InitRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (RecordingIsInitialized()) {
+ return 0;
+ }
+ int32_t result = audio_device_->InitRecording();
+ RTC_DLOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess",
+ static_cast<int>(result == 0));
+ return result;
+ }
+
+ bool AudioDeviceModuleIOS::PlayoutIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->PlayoutIsInitialized();
+ }
+
+ bool AudioDeviceModuleIOS::RecordingIsInitialized() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->RecordingIsInitialized();
+ }
+
+ int32_t AudioDeviceModuleIOS::StartPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (Playing()) {
+ return 0;
+ }
+ audio_device_buffer_.get()->StartPlayout();
+ int32_t result = audio_device_->StartPlayout();
+ RTC_DLOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
+ static_cast<int>(result == 0));
+ return result;
+ }
+
+ int32_t AudioDeviceModuleIOS::StopPlayout() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ int32_t result = audio_device_->StopPlayout();
+ audio_device_buffer_.get()->StopPlayout();
+ RTC_DLOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
+ static_cast<int>(result == 0));
+ return result;
+ }
+
+ bool AudioDeviceModuleIOS::Playing() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->Playing();
+ }
+
+ int32_t AudioDeviceModuleIOS::StartRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ if (Recording()) {
+ return 0;
+ }
+ audio_device_buffer_.get()->StartRecording();
+ int32_t result = audio_device_->StartRecording();
+ RTC_DLOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
+ static_cast<int>(result == 0));
+ return result;
+ }
+
+ int32_t AudioDeviceModuleIOS::StopRecording() {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized_();
+ int32_t result = audio_device_->StopRecording();
+ audio_device_buffer_.get()->StopRecording();
+ RTC_DLOG(LS_INFO) << "output: " << result;
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess",
+ static_cast<int>(result == 0));
+ return result;
+ }
+
+ bool AudioDeviceModuleIOS::Recording() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ return audio_device_->Recording();
+ }
+
+ int32_t AudioDeviceModuleIOS::RegisterAudioCallback(
+ AudioTransport* audioCallback) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ return audio_device_buffer_.get()->RegisterAudioCallback(audioCallback);
+ }
+
+ int32_t AudioDeviceModuleIOS::PlayoutDelay(uint16_t* delayMS) const {
+ CHECKinitialized_();
+ uint16_t delay = 0;
+ if (audio_device_->PlayoutDelay(delay) == -1) {
+ RTC_LOG(LS_ERROR) << "failed to retrieve the playout delay";
+ return -1;
+ }
+ *delayMS = delay;
+ return 0;
+ }
+
+ bool AudioDeviceModuleIOS::BuiltInAECIsAvailable() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_device_->BuiltInAECIsAvailable();
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return isAvailable;
+ }
+
+ int32_t AudioDeviceModuleIOS::EnableBuiltInAEC(bool enable) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ int32_t ok = audio_device_->EnableBuiltInAEC(enable);
+ RTC_DLOG(LS_INFO) << "output: " << ok;
+ return ok;
+ }
+
+ bool AudioDeviceModuleIOS::BuiltInAGCIsAvailable() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_device_->BuiltInAGCIsAvailable();
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return isAvailable;
+ }
+
+ int32_t AudioDeviceModuleIOS::EnableBuiltInAGC(bool enable) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ int32_t ok = audio_device_->EnableBuiltInAGC(enable);
+ RTC_DLOG(LS_INFO) << "output: " << ok;
+ return ok;
+ }
+
+ bool AudioDeviceModuleIOS::BuiltInNSIsAvailable() const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ CHECKinitialized__BOOL();
+ bool isAvailable = audio_device_->BuiltInNSIsAvailable();
+ RTC_DLOG(LS_INFO) << "output: " << isAvailable;
+ return isAvailable;
+ }
+
+ int32_t AudioDeviceModuleIOS::EnableBuiltInNS(bool enable) {
+ RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+ CHECKinitialized_();
+ int32_t ok = audio_device_->EnableBuiltInNS(enable);
+ RTC_DLOG(LS_INFO) << "output: " << ok;
+ return ok;
+ }
+
+ int32_t AudioDeviceModuleIOS::GetPlayoutUnderrunCount() const {
+ // Don't log here, as this method can be called very often.
+ CHECKinitialized_();
+ int32_t ok = audio_device_->GetPlayoutUnderrunCount();
+ return ok;
+ }
+
+#if defined(WEBRTC_IOS)
+ int AudioDeviceModuleIOS::GetPlayoutAudioParameters(
+ AudioParameters* params) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ int r = audio_device_->GetPlayoutAudioParameters(params);
+ RTC_DLOG(LS_INFO) << "output: " << r;
+ return r;
+ }
+
+ int AudioDeviceModuleIOS::GetRecordAudioParameters(
+ AudioParameters* params) const {
+ RTC_DLOG(LS_INFO) << __FUNCTION__;
+ int r = audio_device_->GetRecordAudioParameters(params);
+ RTC_DLOG(LS_INFO) << "output: " << r;
+ return r;
+ }
+#endif // WEBRTC_IOS
+}
+}
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/audio_session_observer.h b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_session_observer.h
new file mode 100644
index 0000000000..f7c44c8184
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/audio_session_observer.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_
+#define SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_
+
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+// Observer interface for listening to AVAudioSession events.
+class AudioSessionObserver {
+ public:
+ // Called when audio session interruption begins.
+ virtual void OnInterruptionBegin() = 0;
+
+ // Called when audio session interruption ends.
+ virtual void OnInterruptionEnd() = 0;
+
+ // Called when audio route changes.
+ virtual void OnValidRouteChange() = 0;
+
+ // Called when the ability to play or record changes.
+ virtual void OnCanPlayOrRecordChange(bool can_play_or_record) = 0;
+
+ virtual void OnChangedOutputVolume() = 0;
+
+ protected:
+ virtual ~AudioSessionObserver() {}
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.h b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.h
new file mode 100644
index 0000000000..12464ac897
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_HELPERS_H_
+#define SDK_OBJC_NATIVE_SRC_AUDIO_HELPERS_H_
+
+#import <Foundation/Foundation.h>
+#include <objc/objc.h>
+
+#include <string>
+
+namespace webrtc {
+namespace ios {
+
+bool CheckAndLogError(BOOL success, NSError* error);
+
+NSString* NSStringFromStdString(const std::string& stdString);
+std::string StdStringFromNSString(NSString* nsString);
+
+// Return thread ID as a string.
+std::string GetThreadId();
+
+// Return thread ID as string suitable for debug logging.
+std::string GetThreadInfo();
+
+// Returns [NSThread currentThread] description as string.
+// Example: <NSThread: 0x170066d80>{number = 1, name = main}
+std::string GetCurrentThreadDescription();
+
+#if defined(WEBRTC_IOS)
+// Returns the current name of the operating system.
+std::string GetSystemName();
+
+// Returns the current version of the operating system as a string.
+std::string GetSystemVersionAsString();
+
+// Returns the version of the operating system in double representation.
+// Uses a cached value of the system version.
+double GetSystemVersion();
+
+// Returns the device type.
+// Examples: ”iPhone” and ”iPod touch”.
+std::string GetDeviceType();
+#endif // defined(WEBRTC_IOS)
+
+// Returns a more detailed device name.
+// Examples: "iPhone 5s (GSM)" and "iPhone 6 Plus".
+std::string GetDeviceName();
+
+// Returns the name of the process. Does not uniquely identify the process.
+std::string GetProcessName();
+
+// Returns the identifier of the process (often called process ID).
+int GetProcessID();
+
+// Returns a string containing the version of the operating system on which the
+// process is executing. The string is string is human readable, localized, and
+// is appropriate for displaying to the user.
+std::string GetOSVersionString();
+
+// Returns the number of processing cores available on the device.
+int GetProcessorCount();
+
+#if defined(WEBRTC_IOS)
+// Indicates whether Low Power Mode is enabled on the iOS device.
+bool GetLowPowerModeEnabled();
+#endif
+
+} // namespace ios
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_AUDIO_HELPERS_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.mm
new file mode 100644
index 0000000000..cd0469656a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/helpers.mm
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+#import <sys/sysctl.h>
+#if defined(WEBRTC_IOS)
+#import <UIKit/UIKit.h>
+#endif
+
+#include <memory>
+
+#include "helpers.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace ios {
+
+NSString* NSStringFromStdString(const std::string& stdString) {
+ // std::string may contain null termination character so we construct
+ // using length.
+ return [[NSString alloc] initWithBytes:stdString.data()
+ length:stdString.length()
+ encoding:NSUTF8StringEncoding];
+}
+
+std::string StdStringFromNSString(NSString* nsString) {
+ NSData* charData = [nsString dataUsingEncoding:NSUTF8StringEncoding];
+ return std::string(reinterpret_cast<const char*>([charData bytes]),
+ [charData length]);
+}
+
+bool CheckAndLogError(BOOL success, NSError* error) {
+ if (!success) {
+ NSString* msg =
+ [NSString stringWithFormat:@"Error: %ld, %@, %@", (long)error.code,
+ error.localizedDescription,
+ error.localizedFailureReason];
+ RTC_LOG(LS_ERROR) << StdStringFromNSString(msg);
+ return false;
+ }
+ return true;
+}
+
+// TODO(henrika): see if it is possible to move to GetThreadName in
+// platform_thread.h and base it on pthread methods instead.
+std::string GetCurrentThreadDescription() {
+ NSString* name = [NSString stringWithFormat:@"%@", [NSThread currentThread]];
+ return StdStringFromNSString(name);
+}
+
+#if defined(WEBRTC_IOS)
+std::string GetSystemName() {
+ NSString* osName = [[UIDevice currentDevice] systemName];
+ return StdStringFromNSString(osName);
+}
+
+std::string GetSystemVersionAsString() {
+ NSString* osVersion = [[UIDevice currentDevice] systemVersion];
+ return StdStringFromNSString(osVersion);
+}
+
+std::string GetDeviceType() {
+ NSString* deviceModel = [[UIDevice currentDevice] model];
+ return StdStringFromNSString(deviceModel);
+}
+
+bool GetLowPowerModeEnabled() {
+ return [NSProcessInfo processInfo].lowPowerModeEnabled;
+}
+#endif
+
+std::string GetDeviceName() {
+ size_t size;
+ sysctlbyname("hw.machine", NULL, &size, NULL, 0);
+ std::unique_ptr<char[]> machine;
+ machine.reset(new char[size]);
+ sysctlbyname("hw.machine", machine.get(), &size, NULL, 0);
+ return std::string(machine.get());
+}
+
+std::string GetProcessName() {
+ NSString* processName = [NSProcessInfo processInfo].processName;
+ return StdStringFromNSString(processName);
+}
+
+int GetProcessID() {
+ return [NSProcessInfo processInfo].processIdentifier;
+}
+
+std::string GetOSVersionString() {
+ NSString* osVersion =
+ [NSProcessInfo processInfo].operatingSystemVersionString;
+ return StdStringFromNSString(osVersion);
+}
+
+int GetProcessorCount() {
+ return [NSProcessInfo processInfo].processorCount;
+}
+
+} // namespace ios
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.h b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.h
new file mode 100644
index 0000000000..ed9dd98568
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_
+#define SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_
+
+#include <AudioUnit/AudioUnit.h>
+
+namespace webrtc {
+namespace ios_adm {
+
+class VoiceProcessingAudioUnitObserver {
+ public:
+ // Callback function called on a real-time priority I/O thread from the audio
+ // unit. This method is used to signal that recorded audio is available.
+ virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) = 0;
+
+ // Callback function called on a real-time priority I/O thread from the audio
+ // unit. This method is used to provide audio samples to the audio unit.
+ virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) = 0;
+
+ protected:
+ ~VoiceProcessingAudioUnitObserver() {}
+};
+
+// Convenience class to abstract away the management of a Voice Processing
+// I/O Audio Unit. The Voice Processing I/O unit has the same characteristics
+// as the Remote I/O unit (supports full duplex low-latency audio input and
+// output) and adds AEC for for two-way duplex communication. It also adds AGC,
+// adjustment of voice-processing quality, and muting. Hence, ideal for
+// VoIP applications.
+class VoiceProcessingAudioUnit {
+ public:
+ VoiceProcessingAudioUnit(bool bypass_voice_processing,
+ VoiceProcessingAudioUnitObserver* observer);
+ ~VoiceProcessingAudioUnit();
+
+ // TODO(tkchin): enum for state and state checking.
+ enum State : int32_t {
+ // Init() should be called.
+ kInitRequired,
+ // Audio unit created but not initialized.
+ kUninitialized,
+ // Initialized but not started. Equivalent to stopped.
+ kInitialized,
+ // Initialized and started.
+ kStarted,
+ };
+
+ // Number of bytes per audio sample for 16-bit signed integer representation.
+ static const UInt32 kBytesPerSample;
+
+ // Initializes this class by creating the underlying audio unit instance.
+ // Creates a Voice-Processing I/O unit and configures it for full-duplex
+ // audio. The selected stream format is selected to avoid internal resampling
+ // and to match the 10ms callback rate for WebRTC as well as possible.
+ // Does not intialize the audio unit.
+ bool Init();
+
+ VoiceProcessingAudioUnit::State GetState() const;
+
+ // Initializes the underlying audio unit with the given sample rate.
+ bool Initialize(Float64 sample_rate);
+
+ // Starts the underlying audio unit.
+ OSStatus Start();
+
+ // Stops the underlying audio unit.
+ bool Stop();
+
+ // Uninitializes the underlying audio unit.
+ bool Uninitialize();
+
+ // Calls render on the underlying audio unit.
+ OSStatus Render(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 output_bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data);
+
+ private:
+ // The C API used to set callbacks requires static functions. When these are
+ // called, they will invoke the relevant instance method by casting
+ // in_ref_con to VoiceProcessingAudioUnit*.
+ static OSStatus OnGetPlayoutData(void* in_ref_con,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data);
+ static OSStatus OnDeliverRecordedData(void* in_ref_con,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data);
+
+ // Notifies observer that samples are needed for playback.
+ OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data);
+ // Notifies observer that recorded samples are available for render.
+ OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data);
+
+ // Returns the predetermined format with a specific sample rate. See
+ // implementation file for details on format.
+ AudioStreamBasicDescription GetFormat(Float64 sample_rate) const;
+
+ // Deletes the underlying audio unit.
+ void DisposeAudioUnit();
+
+ const bool bypass_voice_processing_;
+ VoiceProcessingAudioUnitObserver* observer_;
+ AudioUnit vpio_unit_;
+ VoiceProcessingAudioUnit::State state_;
+};
+} // namespace ios_adm
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.mm b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.mm
new file mode 100644
index 0000000000..3905b6857a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/audio/voice_processing_audio_unit.mm
@@ -0,0 +1,488 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "voice_processing_audio_unit.h"
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/metrics.h"
+
+#import "base/RTCLogging.h"
+#import "sdk/objc/components/audio/RTCAudioSessionConfiguration.h"
+
+#if !defined(NDEBUG)
+static void LogStreamDescription(AudioStreamBasicDescription description) {
+ char formatIdString[5];
+ UInt32 formatId = CFSwapInt32HostToBig(description.mFormatID);
+ bcopy(&formatId, formatIdString, 4);
+ formatIdString[4] = '\0';
+ RTCLog(@"AudioStreamBasicDescription: {\n"
+ " mSampleRate: %.2f\n"
+ " formatIDString: %s\n"
+ " mFormatFlags: 0x%X\n"
+ " mBytesPerPacket: %u\n"
+ " mFramesPerPacket: %u\n"
+ " mBytesPerFrame: %u\n"
+ " mChannelsPerFrame: %u\n"
+ " mBitsPerChannel: %u\n"
+ " mReserved: %u\n}",
+ description.mSampleRate, formatIdString,
+ static_cast<unsigned int>(description.mFormatFlags),
+ static_cast<unsigned int>(description.mBytesPerPacket),
+ static_cast<unsigned int>(description.mFramesPerPacket),
+ static_cast<unsigned int>(description.mBytesPerFrame),
+ static_cast<unsigned int>(description.mChannelsPerFrame),
+ static_cast<unsigned int>(description.mBitsPerChannel),
+ static_cast<unsigned int>(description.mReserved));
+}
+#endif
+
+namespace webrtc {
+namespace ios_adm {
+
+// Calls to AudioUnitInitialize() can fail if called back-to-back on different
+// ADM instances. A fall-back solution is to allow multiple sequential calls
+// with as small delay between each. This factor sets the max number of allowed
+// initialization attempts.
+static const int kMaxNumberOfAudioUnitInitializeAttempts = 5;
+// A VP I/O unit's bus 1 connects to input hardware (microphone).
+static const AudioUnitElement kInputBus = 1;
+// A VP I/O unit's bus 0 connects to output hardware (speaker).
+static const AudioUnitElement kOutputBus = 0;
+
+// Returns the automatic gain control (AGC) state on the processed microphone
+// signal. Should be on by default for Voice Processing audio units.
+static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) {
+ RTC_DCHECK(audio_unit);
+ UInt32 size = sizeof(*enabled);
+ OSStatus result = AudioUnitGetProperty(audio_unit,
+ kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+ kAudioUnitScope_Global,
+ kInputBus,
+ enabled,
+ &size);
+ RTCLog(@"VPIO unit AGC: %u", static_cast<unsigned int>(*enabled));
+ return result;
+}
+
+VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(bool bypass_voice_processing,
+ VoiceProcessingAudioUnitObserver* observer)
+ : bypass_voice_processing_(bypass_voice_processing),
+ observer_(observer),
+ vpio_unit_(nullptr),
+ state_(kInitRequired) {
+ RTC_DCHECK(observer);
+}
+
+VoiceProcessingAudioUnit::~VoiceProcessingAudioUnit() {
+ DisposeAudioUnit();
+}
+
+const UInt32 VoiceProcessingAudioUnit::kBytesPerSample = 2;
+
+bool VoiceProcessingAudioUnit::Init() {
+ RTC_DCHECK_EQ(state_, kInitRequired);
+
+ // Create an audio component description to identify the Voice Processing
+ // I/O audio unit.
+ AudioComponentDescription vpio_unit_description;
+ vpio_unit_description.componentType = kAudioUnitType_Output;
+ vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
+ vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
+ vpio_unit_description.componentFlags = 0;
+ vpio_unit_description.componentFlagsMask = 0;
+
+ // Obtain an audio unit instance given the description.
+ AudioComponent found_vpio_unit_ref =
+ AudioComponentFindNext(nullptr, &vpio_unit_description);
+
+ // Create a Voice Processing IO audio unit.
+ OSStatus result = noErr;
+ result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_);
+ if (result != noErr) {
+ vpio_unit_ = nullptr;
+ RTCLogError(@"AudioComponentInstanceNew failed. Error=%ld.", (long)result);
+ return false;
+ }
+
+ // Enable input on the input scope of the input element.
+ UInt32 enable_input = 1;
+ result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input, kInputBus, &enable_input,
+ sizeof(enable_input));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ RTCLogError(@"Failed to enable input on input scope of input element. "
+ "Error=%ld.",
+ (long)result);
+ return false;
+ }
+
+ // Enable output on the output scope of the output element.
+ UInt32 enable_output = 1;
+ result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output, kOutputBus,
+ &enable_output, sizeof(enable_output));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ RTCLogError(@"Failed to enable output on output scope of output element. "
+ "Error=%ld.",
+ (long)result);
+ return false;
+ }
+
+ // Specify the callback function that provides audio samples to the audio
+ // unit.
+ AURenderCallbackStruct render_callback;
+ render_callback.inputProc = OnGetPlayoutData;
+ render_callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
+ kOutputBus, &render_callback, sizeof(render_callback));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ RTCLogError(@"Failed to specify the render callback on the output bus. "
+ "Error=%ld.",
+ (long)result);
+ return false;
+ }
+
+ // Disable AU buffer allocation for the recorder, we allocate our own.
+ // TODO(henrika): not sure that it actually saves resource to make this call.
+ UInt32 flag = 0;
+ result = AudioUnitSetProperty(
+ vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
+ kAudioUnitScope_Output, kInputBus, &flag, sizeof(flag));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ RTCLogError(@"Failed to disable buffer allocation on the input bus. "
+ "Error=%ld.",
+ (long)result);
+ return false;
+ }
+
+ // Specify the callback to be called by the I/O thread to us when input audio
+ // is available. The recorded samples can then be obtained by calling the
+ // AudioUnitRender() method.
+ AURenderCallbackStruct input_callback;
+ input_callback.inputProc = OnDeliverRecordedData;
+ input_callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(vpio_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global, kInputBus,
+ &input_callback, sizeof(input_callback));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ RTCLogError(@"Failed to specify the input callback on the input bus. "
+ "Error=%ld.",
+ (long)result);
+ return false;
+ }
+
+ state_ = kUninitialized;
+ return true;
+}
+
+VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const {
+ return state_;
+}
+
+bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
+ RTC_DCHECK_GE(state_, kUninitialized);
+ RTCLog(@"Initializing audio unit with sample rate: %f", sample_rate);
+
+ OSStatus result = noErr;
+ AudioStreamBasicDescription format = GetFormat(sample_rate);
+ UInt32 size = sizeof(format);
+#if !defined(NDEBUG)
+ LogStreamDescription(format);
+#endif
+
+ // Set the format on the output scope of the input element/bus.
+ result =
+ AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output, kInputBus, &format, size);
+ if (result != noErr) {
+ RTCLogError(@"Failed to set format on output scope of input bus. "
+ "Error=%ld.",
+ (long)result);
+ return false;
+ }
+
+ // Set the format on the input scope of the output element/bus.
+ result =
+ AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input, kOutputBus, &format, size);
+ if (result != noErr) {
+ RTCLogError(@"Failed to set format on input scope of output bus. "
+ "Error=%ld.",
+ (long)result);
+ return false;
+ }
+
+ // Initialize the Voice Processing I/O unit instance.
+ // Calls to AudioUnitInitialize() can fail if called back-to-back on
+ // different ADM instances. The error message in this case is -66635 which is
+ // undocumented. Tests have shown that calling AudioUnitInitialize a second
+ // time, after a short sleep, avoids this issue.
+ // See webrtc:5166 for details.
+ int failed_initalize_attempts = 0;
+ result = AudioUnitInitialize(vpio_unit_);
+ while (result != noErr) {
+ RTCLogError(@"Failed to initialize the Voice Processing I/O unit. "
+ "Error=%ld.",
+ (long)result);
+ ++failed_initalize_attempts;
+ if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) {
+ // Max number of initialization attempts exceeded, hence abort.
+ RTCLogError(@"Too many initialization attempts.");
+ return false;
+ }
+ RTCLog(@"Pause 100ms and try audio unit initialization again...");
+ [NSThread sleepForTimeInterval:0.1f];
+ result = AudioUnitInitialize(vpio_unit_);
+ }
+ if (result == noErr) {
+ RTCLog(@"Voice Processing I/O unit is now initialized.");
+ }
+
+ if (bypass_voice_processing_) {
+ // Attempt to disable builtin voice processing.
+ UInt32 toggle = 1;
+ result = AudioUnitSetProperty(vpio_unit_,
+ kAUVoiceIOProperty_BypassVoiceProcessing,
+ kAudioUnitScope_Global,
+ kInputBus,
+ &toggle,
+ sizeof(toggle));
+ if (result == noErr) {
+ RTCLog(@"Successfully bypassed voice processing.");
+ } else {
+ RTCLogError(@"Failed to bypass voice processing. Error=%ld.", (long)result);
+ }
+ state_ = kInitialized;
+ return true;
+ }
+
+ // AGC should be enabled by default for Voice Processing I/O units but it is
+ // checked below and enabled explicitly if needed. This scheme is used
+ // to be absolutely sure that the AGC is enabled since we have seen cases
+ // where only zeros are recorded and a disabled AGC could be one of the
+ // reasons why it happens.
+ int agc_was_enabled_by_default = 0;
+ UInt32 agc_is_enabled = 0;
+ result = GetAGCState(vpio_unit_, &agc_is_enabled);
+ if (result != noErr) {
+ RTCLogError(@"Failed to get AGC state (1st attempt). "
+ "Error=%ld.",
+ (long)result);
+ // Example of error code: kAudioUnitErr_NoConnection (-10876).
+ // All error codes related to audio units are negative and are therefore
+ // converted into a postive value to match the UMA APIs.
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+ "WebRTC.Audio.GetAGCStateErrorCode1", (-1) * result);
+ } else if (agc_is_enabled) {
+ // Remember that the AGC was enabled by default. Will be used in UMA.
+ agc_was_enabled_by_default = 1;
+ } else {
+ // AGC was initially disabled => try to enable it explicitly.
+ UInt32 enable_agc = 1;
+ result =
+ AudioUnitSetProperty(vpio_unit_,
+ kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+ kAudioUnitScope_Global, kInputBus, &enable_agc,
+ sizeof(enable_agc));
+ if (result != noErr) {
+ RTCLogError(@"Failed to enable the built-in AGC. "
+ "Error=%ld.",
+ (long)result);
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+ "WebRTC.Audio.SetAGCStateErrorCode", (-1) * result);
+ }
+ result = GetAGCState(vpio_unit_, &agc_is_enabled);
+ if (result != noErr) {
+ RTCLogError(@"Failed to get AGC state (2nd attempt). "
+ "Error=%ld.",
+ (long)result);
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+ "WebRTC.Audio.GetAGCStateErrorCode2", (-1) * result);
+ }
+ }
+
+ // Track if the built-in AGC was enabled by default (as it should) or not.
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCWasEnabledByDefault",
+ agc_was_enabled_by_default);
+ RTCLog(@"WebRTC.Audio.BuiltInAGCWasEnabledByDefault: %d",
+ agc_was_enabled_by_default);
+ // As a final step, add an UMA histogram for tracking the AGC state.
+ // At this stage, the AGC should be enabled, and if it is not, more work is
+ // needed to find out the root cause.
+ RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCIsEnabled", agc_is_enabled);
+ RTCLog(@"WebRTC.Audio.BuiltInAGCIsEnabled: %u",
+ static_cast<unsigned int>(agc_is_enabled));
+
+ state_ = kInitialized;
+ return true;
+}
+
+OSStatus VoiceProcessingAudioUnit::Start() {
+ RTC_DCHECK_GE(state_, kUninitialized);
+ RTCLog(@"Starting audio unit.");
+
+ OSStatus result = AudioOutputUnitStart(vpio_unit_);
+ if (result != noErr) {
+ RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result);
+ return result;
+ } else {
+ RTCLog(@"Started audio unit");
+ }
+ state_ = kStarted;
+ return noErr;
+}
+
+bool VoiceProcessingAudioUnit::Stop() {
+ RTC_DCHECK_GE(state_, kUninitialized);
+ RTCLog(@"Stopping audio unit.");
+
+ OSStatus result = AudioOutputUnitStop(vpio_unit_);
+ if (result != noErr) {
+ RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result);
+ return false;
+ } else {
+ RTCLog(@"Stopped audio unit");
+ }
+
+ state_ = kInitialized;
+ return true;
+}
+
+bool VoiceProcessingAudioUnit::Uninitialize() {
+ RTC_DCHECK_GE(state_, kUninitialized);
+ RTCLog(@"Unintializing audio unit.");
+
+ OSStatus result = AudioUnitUninitialize(vpio_unit_);
+ if (result != noErr) {
+ RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result);
+ return false;
+ } else {
+ RTCLog(@"Uninitialized audio unit.");
+ }
+
+ state_ = kUninitialized;
+ return true;
+}
+
+OSStatus VoiceProcessingAudioUnit::Render(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 output_bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) {
+ RTC_DCHECK(vpio_unit_) << "Init() not called.";
+
+ OSStatus result = AudioUnitRender(vpio_unit_, flags, time_stamp,
+ output_bus_number, num_frames, io_data);
+ if (result != noErr) {
+ RTCLogError(@"Failed to render audio unit. Error=%ld", (long)result);
+ }
+ return result;
+}
+
+OSStatus VoiceProcessingAudioUnit::OnGetPlayoutData(
+ void* in_ref_con,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) {
+ VoiceProcessingAudioUnit* audio_unit =
+ static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
+ return audio_unit->NotifyGetPlayoutData(flags, time_stamp, bus_number,
+ num_frames, io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::OnDeliverRecordedData(
+ void* in_ref_con,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) {
+ VoiceProcessingAudioUnit* audio_unit =
+ static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
+ return audio_unit->NotifyDeliverRecordedData(flags, time_stamp, bus_number,
+ num_frames, io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::NotifyGetPlayoutData(
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) {
+ return observer_->OnGetPlayoutData(flags, time_stamp, bus_number, num_frames,
+ io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::NotifyDeliverRecordedData(
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) {
+ return observer_->OnDeliverRecordedData(flags, time_stamp, bus_number,
+ num_frames, io_data);
+}
+
+AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat(
+ Float64 sample_rate) const {
+ // Set the application formats for input and output:
+ // - use same format in both directions
+ // - avoid resampling in the I/O unit by using the hardware sample rate
+ // - linear PCM => noncompressed audio data format with one frame per packet
+ // - no need to specify interleaving since only mono is supported
+ AudioStreamBasicDescription format;
+ RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels);
+ format.mSampleRate = sample_rate;
+ format.mFormatID = kAudioFormatLinearPCM;
+ format.mFormatFlags =
+ kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
+ format.mBytesPerPacket = kBytesPerSample;
+ format.mFramesPerPacket = 1; // uncompressed.
+ format.mBytesPerFrame = kBytesPerSample;
+ format.mChannelsPerFrame = kRTCAudioSessionPreferredNumberOfChannels;
+ format.mBitsPerChannel = 8 * kBytesPerSample;
+ return format;
+}
+
+void VoiceProcessingAudioUnit::DisposeAudioUnit() {
+ if (vpio_unit_) {
+ switch (state_) {
+ case kStarted:
+ Stop();
+ [[fallthrough]];
+ case kInitialized:
+ Uninitialize();
+ break;
+ case kUninitialized:
+ case kInitRequired:
+ break;
+ }
+
+ RTCLog(@"Disposing audio unit.");
+ OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
+ if (result != noErr) {
+ RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.",
+ (long)result);
+ }
+ vpio_unit_ = nullptr;
+ }
+}
+
+} // namespace ios_adm
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/network_monitor_observer.h b/third_party/libwebrtc/sdk/objc/native/src/network_monitor_observer.h
new file mode 100644
index 0000000000..7c411a1db1
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/network_monitor_observer.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_NETWORK_MONITOR_OBSERVER_H_
+#define SDK_OBJC_NATIVE_SRC_NETWORK_MONITOR_OBSERVER_H_
+
+#include <map>
+#include <string>
+
+#include "absl/strings/string_view.h"
+#include "rtc_base/network_constants.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+// Observer interface for listening to NWPathMonitor updates.
+class NetworkMonitorObserver {
+ public:
+ // Called when a path update occurs, on network monitor dispatch queue.
+ //
+ // `adapter_type_by_name` is a map from interface name (i.e. "pdp_ip0") to
+ // adapter type, for all available interfaces on the current path. If an
+ // interface name isn't present it can be assumed to be unavailable.
+ virtual void OnPathUpdate(
+ std::map<std::string, rtc::AdapterType, rtc::AbslStringViewCmp>
+ adapter_type_by_name) = 0;
+
+ protected:
+ virtual ~NetworkMonitorObserver() {}
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.h b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.h
new file mode 100644
index 0000000000..fcfe7a6e8b
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_H_
+
+#include <memory>
+
+#import "components/audio/RTCAudioDevice.h"
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/thread.h"
+
+@class ObjCAudioDeviceDelegate;
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+namespace objc_adm {
+
+class ObjCAudioDeviceModule : public AudioDeviceModule {
+ public:
+ explicit ObjCAudioDeviceModule(id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device);
+ ~ObjCAudioDeviceModule() override;
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
+
+ // Full-duplex transportation of PCM audio
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
+
+ // Main initialization and termination
+ int32_t Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool* available) override;
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool* available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool* available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t* volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t* volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool* available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool* enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool* available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool* enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool* available) const override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool* enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool* available) const override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool* enabled) const override;
+
+ // Playout delay
+ int32_t PlayoutDelay(uint16_t* delayMS) const override;
+
+ // Only supported on Android.
+ bool BuiltInAECIsAvailable() const override;
+ bool BuiltInAGCIsAvailable() const override;
+ bool BuiltInNSIsAvailable() const override;
+
+ // Enables the built-in audio effects. Only supported on Android.
+ int32_t EnableBuiltInAEC(bool enable) override;
+ int32_t EnableBuiltInAGC(bool enable) override;
+ int32_t EnableBuiltInNS(bool enable) override;
+
+ // Play underrun count. Only supported on Android.
+ int32_t GetPlayoutUnderrunCount() const override;
+
+#if defined(WEBRTC_IOS)
+ int GetPlayoutAudioParameters(AudioParameters* params) const override;
+ int GetRecordAudioParameters(AudioParameters* params) const override;
+#endif // WEBRTC_IOS
+
+ public:
+ OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ NSInteger bus_number,
+ UInt32 num_frames,
+ const AudioBufferList* io_data,
+ void* render_context,
+ RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock) render_block);
+
+ OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ NSInteger bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data);
+
+ // Notifies `ObjCAudioDeviceModule` that at least one of the audio input
+ // parameters or audio input latency of `RTCAudioDevice` has changed. It necessary to
+ // update `record_parameters_` with current audio parameter of `RTCAudioDevice`
+ // via `UpdateAudioParameters` and if parameters are actually change then
+ // ADB parameters are updated with `UpdateInputAudioDeviceBuffer`. Audio input latency
+ // stored in `cached_recording_delay_ms_` is also updated with current latency
+ // of `RTCAudioDevice`.
+ void HandleAudioInputParametersChange();
+
+ // Same as `HandleAudioInputParametersChange` but should be called when audio output
+ // parameters of `RTCAudioDevice` has changed.
+ void HandleAudioOutputParametersChange();
+
+ // Notifies `ObjCAudioDeviceModule` about audio input interruption happen due to
+ // any reason so `ObjCAudioDeviceModule` is can prepare to restart of audio IO.
+ void HandleAudioInputInterrupted();
+
+ // Same as `ObjCAudioDeviceModule` but should be called when audio output
+ // is interrupted.
+ void HandleAudioOutputInterrupted();
+
+ private:
+ // Update our audio parameters if they are different from current device audio parameters
+ // Returns true when our parameters are update, false - otherwise.
+ // `ObjCAudioDeviceModule` has audio device buffer (ADB) which has audio parameters
+ // of playout & recording. The ADB is configured to work with specific sample rate & channel
+ // count. `ObjCAudioDeviceModule` stores audio parameters which were used to configure ADB in the
+ // fields `playout_parameters_` and `recording_parameters_`.
+ // `RTCAudioDevice` protocol has its own audio parameters exposed as individual properties.
+ // `RTCAudioDevice` audio parameters might change when playout/recording is already in progress,
+ // for example, when device is switched. `RTCAudioDevice` audio parameters must be kept in sync
+ // with ADB audio parameters. This method is invoked when `RTCAudioDevice` reports that it's audio
+ // parameters (`device_params`) are changed and it detects if there any difference with our
+ // current audio parameters (`params`). Our parameters are updated in case of actual change and
+ // method returns true. In case of actual change there is follow-up call to either
+ // `UpdateOutputAudioDeviceBuffer` or `UpdateInputAudioDeviceBuffer` to apply updated
+ // `playout_parameters_` or `recording_parameters_` to ADB.
+
+ bool UpdateAudioParameters(AudioParameters& params, const AudioParameters& device_params);
+
+ // Update our cached audio latency with device latency. Device latency is reported by
+ // `RTCAudioDevice` object. Whenever latency is changed, `RTCAudioDevice` is obliged to notify ADM
+ // about the change via `HandleAudioInputParametersChange` or `HandleAudioOutputParametersChange`.
+ // Current device IO latency is cached in the atomic field and used from audio IO thread
+ // to be reported to audio device buffer. It is highly recommended by Apple not to call any
+ // ObjC methods from audio IO thread, that is why implementation relies on caching latency
+ // into a field and being notified when latency is changed, which is the case when device
+ // is switched.
+ void UpdateAudioDelay(std::atomic<int>& delay_ms, const NSTimeInterval device_latency);
+
+ // Uses current `playout_parameters_` to inform the audio device buffer (ADB)
+ // about our internal audio parameters.
+ void UpdateOutputAudioDeviceBuffer();
+
+ // Uses current `record_parameters_` to inform the audio device buffer (ADB)
+ // about our internal audio parameters.
+ void UpdateInputAudioDeviceBuffer();
+
+ private:
+ id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device_;
+
+ const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+
+ // AudioDeviceBuffer is a buffer to consume audio recorded by `RTCAudioDevice`
+ // and provide audio to be played via `RTCAudioDevice`.
+ // Audio PCMs could have different sample rate and channels count, but expected
+ // to be in 16-bit integer interleaved linear PCM format.
+ // The current parameters ADB configured to work with is stored in field
+ // `playout_parameters_` for playout and `record_parameters_` for recording.
+ // These parameters and ADB must kept in sync with `RTCAudioDevice` audio parameters.
+ std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
+
+ // Set to 1 when recording is active and 0 otherwise.
+ std::atomic<bool> recording_ = false;
+
+ // Set to 1 when playout is active and 0 otherwise.
+ std::atomic<bool> playing_ = false;
+
+ // Stores cached value of `RTCAudioDevice outputLatency` to be used from
+ // audio IO thread. Latency is updated on audio output parameters change.
+ std::atomic<int> cached_playout_delay_ms_ = 0;
+
+ // Same as `cached_playout_delay_ms_` but for audio input
+ std::atomic<int> cached_recording_delay_ms_ = 0;
+
+ // Thread that is initialized audio device module.
+ rtc::Thread* thread_;
+
+ // Ensures that methods are called from the same thread as this object is
+ // initialized on.
+ SequenceChecker thread_checker_;
+
+ // I/O audio thread checker.
+ SequenceChecker io_playout_thread_checker_;
+ SequenceChecker io_record_thread_checker_;
+
+ bool is_initialized_ RTC_GUARDED_BY(thread_checker_) = false;
+ bool is_playout_initialized_ RTC_GUARDED_BY(thread_checker_) = false;
+ bool is_recording_initialized_ RTC_GUARDED_BY(thread_checker_) = false;
+
+ // Contains audio parameters (sample rate, #channels, buffer size etc.) for
+ // the playout and recording sides.
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+
+ // `FineAudioBuffer` takes an `AudioDeviceBuffer` which delivers audio data
+ // in chunks of 10ms. `RTCAudioDevice` might deliver recorded data in
+ // chunks which are not 10ms long. `FineAudioBuffer` implements adaptation
+ // from undetermined chunk size to 10ms chunks.
+ std::unique_ptr<FineAudioBuffer> record_fine_audio_buffer_;
+
+ // Same as `record_fine_audio_buffer_` but for audio output.
+ std::unique_ptr<FineAudioBuffer> playout_fine_audio_buffer_;
+
+ // Temporary storage for recorded data.
+ rtc::BufferT<int16_t> record_audio_buffer_;
+
+ // Delegate object provided to RTCAudioDevice during initialization
+ ObjCAudioDeviceDelegate* audio_device_delegate_;
+};
+
+} // namespace objc_adm
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.mm
new file mode 100644
index 0000000000..d629fae20f
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device.mm
@@ -0,0 +1,711 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "objc_audio_device.h"
+#include "objc_audio_device_delegate.h"
+
+#import "components/audio/RTCAudioDevice.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/time_utils.h"
+
+namespace {
+
+webrtc::AudioParameters RecordParameters(id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device) {
+ const double sample_rate = static_cast<int>([audio_device deviceInputSampleRate]);
+ const size_t channels = static_cast<size_t>([audio_device inputNumberOfChannels]);
+ const size_t frames_per_buffer =
+ static_cast<size_t>(sample_rate * [audio_device inputIOBufferDuration] + .5);
+ return webrtc::AudioParameters(sample_rate, channels, frames_per_buffer);
+}
+
+webrtc::AudioParameters PlayoutParameters(id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device) {
+ const double sample_rate = static_cast<int>([audio_device deviceOutputSampleRate]);
+ const size_t channels = static_cast<size_t>([audio_device outputNumberOfChannels]);
+ const size_t frames_per_buffer =
+ static_cast<size_t>(sample_rate * [audio_device outputIOBufferDuration] + .5);
+ return webrtc::AudioParameters(sample_rate, channels, frames_per_buffer);
+}
+
+} // namespace
+
+namespace webrtc {
+namespace objc_adm {
+
+ObjCAudioDeviceModule::ObjCAudioDeviceModule(id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device)
+ : audio_device_(audio_device), task_queue_factory_(CreateDefaultTaskQueueFactory()) {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK(audio_device_);
+ thread_checker_.Detach();
+ io_playout_thread_checker_.Detach();
+ io_record_thread_checker_.Detach();
+}
+
+ObjCAudioDeviceModule::~ObjCAudioDeviceModule() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+}
+
+int32_t ObjCAudioDeviceModule::RegisterAudioCallback(AudioTransport* audioCallback) {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK(audio_device_buffer_);
+ return audio_device_buffer_->RegisterAudioCallback(audioCallback);
+}
+
+int32_t ObjCAudioDeviceModule::Init() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ if (Initialized()) {
+ RTC_LOG_F(LS_INFO) << "Already initialized";
+ return 0;
+ }
+ io_playout_thread_checker_.Detach();
+ io_record_thread_checker_.Detach();
+
+ thread_ = rtc::Thread::Current();
+ audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer(task_queue_factory_.get()));
+
+ if (![audio_device_ isInitialized]) {
+ if (audio_device_delegate_ == nil) {
+ audio_device_delegate_ = [[ObjCAudioDeviceDelegate alloc]
+ initWithAudioDeviceModule:rtc::scoped_refptr<ObjCAudioDeviceModule>(this)
+ audioDeviceThread:thread_];
+ }
+
+ if (![audio_device_ initializeWithDelegate:audio_device_delegate_]) {
+ RTC_LOG_F(LS_WARNING) << "Failed to initialize audio device";
+ [audio_device_delegate_ resetAudioDeviceModule];
+ audio_device_delegate_ = nil;
+ return -1;
+ }
+ }
+
+ playout_parameters_.reset([audio_device_delegate_ preferredOutputSampleRate], 1);
+ UpdateOutputAudioDeviceBuffer();
+
+ record_parameters_.reset([audio_device_delegate_ preferredInputSampleRate], 1);
+ UpdateInputAudioDeviceBuffer();
+
+ is_initialized_ = true;
+
+ RTC_LOG_F(LS_INFO) << "Did initialize";
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::Terminate() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ if (!Initialized()) {
+ RTC_LOG_F(LS_INFO) << "Not initialized";
+ return 0;
+ }
+
+ if ([audio_device_ isInitialized]) {
+ if (![audio_device_ terminateDevice]) {
+ RTC_LOG_F(LS_ERROR) << "Failed to terminate audio device";
+ return -1;
+ }
+ }
+
+ if (audio_device_delegate_ != nil) {
+ [audio_device_delegate_ resetAudioDeviceModule];
+ audio_device_delegate_ = nil;
+ }
+
+ is_initialized_ = false;
+ is_playout_initialized_ = false;
+ is_recording_initialized_ = false;
+ thread_ = nullptr;
+
+ RTC_LOG_F(LS_INFO) << "Did terminate";
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::Initialized() const {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return is_initialized_ && [audio_device_ isInitialized];
+}
+
+int32_t ObjCAudioDeviceModule::PlayoutIsAvailable(bool* available) {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = Initialized();
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::PlayoutIsInitialized() const {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return Initialized() && is_playout_initialized_ && [audio_device_ isPlayoutInitialized];
+}
+
+int32_t ObjCAudioDeviceModule::InitPlayout() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!Initialized()) {
+ return -1;
+ }
+ if (PlayoutIsInitialized()) {
+ return 0;
+ }
+ RTC_DCHECK(!playing_.load());
+
+ if (![audio_device_ isPlayoutInitialized]) {
+ if (![audio_device_ initializePlayout]) {
+ RTC_LOG_F(LS_ERROR) << "Failed to initialize audio device playout";
+ return -1;
+ }
+ }
+
+ if (UpdateAudioParameters(playout_parameters_, PlayoutParameters(audio_device_))) {
+ UpdateOutputAudioDeviceBuffer();
+ }
+
+ is_playout_initialized_ = true;
+ RTC_LOG_F(LS_INFO) << "Did initialize playout";
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::Playing() const {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return playing_.load() && [audio_device_ isPlaying];
+}
+
+int32_t ObjCAudioDeviceModule::StartPlayout() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!PlayoutIsInitialized()) {
+ return -1;
+ }
+ if (Playing()) {
+ return 0;
+ }
+
+ audio_device_buffer_->StartPlayout();
+ if (playout_fine_audio_buffer_) {
+ playout_fine_audio_buffer_->ResetPlayout();
+ }
+ if (![audio_device_ startPlayout]) {
+ RTC_LOG_F(LS_ERROR) << "Failed to start audio device playout";
+ return -1;
+ }
+ playing_.store(true, std::memory_order_release);
+ RTC_LOG_F(LS_INFO) << "Did start playout";
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::StopPlayout() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ if (![audio_device_ stopPlayout]) {
+ RTC_LOG_F(LS_WARNING) << "Failed to stop playout";
+ return -1;
+ }
+
+ audio_device_buffer_->StopPlayout();
+ playing_.store(false, std::memory_order_release);
+ RTC_LOG_F(LS_INFO) << "Did stop playout";
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::PlayoutDelay(uint16_t* delayMS) const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *delayMS = static_cast<uint16_t>(rtc::SafeClamp<int>(
+ cached_playout_delay_ms_.load(), 0, std::numeric_limits<uint16_t>::max()));
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::RecordingIsAvailable(bool* available) {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *available = Initialized();
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::RecordingIsInitialized() const {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return Initialized() && is_recording_initialized_ && [audio_device_ isRecordingInitialized];
+}
+
+int32_t ObjCAudioDeviceModule::InitRecording() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!Initialized()) {
+ return -1;
+ }
+ if (RecordingIsInitialized()) {
+ return 0;
+ }
+ RTC_DCHECK(!recording_.load());
+
+ if (![audio_device_ isRecordingInitialized]) {
+ if (![audio_device_ initializeRecording]) {
+ RTC_LOG_F(LS_ERROR) << "Failed to initialize audio device recording";
+ return -1;
+ }
+ }
+
+ if (UpdateAudioParameters(record_parameters_, RecordParameters(audio_device_))) {
+ UpdateInputAudioDeviceBuffer();
+ }
+
+ is_recording_initialized_ = true;
+ RTC_LOG_F(LS_INFO) << "Did initialize recording";
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::Recording() const {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return recording_.load() && [audio_device_ isRecording];
+}
+
+int32_t ObjCAudioDeviceModule::StartRecording() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!RecordingIsInitialized()) {
+ return -1;
+ }
+ if (Recording()) {
+ return 0;
+ }
+
+ audio_device_buffer_->StartRecording();
+ if (record_fine_audio_buffer_) {
+ record_fine_audio_buffer_->ResetRecord();
+ }
+
+ if (![audio_device_ startRecording]) {
+ RTC_LOG_F(LS_ERROR) << "Failed to start audio device recording";
+ return -1;
+ }
+ recording_.store(true, std::memory_order_release);
+ RTC_LOG_F(LS_INFO) << "Did start recording";
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::StopRecording() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ if (![audio_device_ stopRecording]) {
+ RTC_LOG_F(LS_WARNING) << "Failed to stop recording";
+ return -1;
+ }
+ audio_device_buffer_->StopRecording();
+ recording_.store(false, std::memory_order_release);
+ RTC_LOG_F(LS_INFO) << "Did stop recording";
+ return 0;
+}
+
+#if defined(WEBRTC_IOS)
+
+int ObjCAudioDeviceModule::GetPlayoutAudioParameters(AudioParameters* params) const {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK(playout_parameters_.is_valid());
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *params = playout_parameters_;
+ return 0;
+}
+
+int ObjCAudioDeviceModule::GetRecordAudioParameters(AudioParameters* params) const {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK(record_parameters_.is_valid());
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ *params = record_parameters_;
+ return 0;
+}
+
+#endif // WEBRTC_IOS
+
+void ObjCAudioDeviceModule::UpdateOutputAudioDeviceBuffer() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+
+ RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0);
+ RTC_DCHECK(playout_parameters_.channels() == 1 || playout_parameters_.channels() == 2);
+
+ audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+ audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+ playout_fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_.get()));
+}
+
+void ObjCAudioDeviceModule::UpdateInputAudioDeviceBuffer() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+
+ RTC_DCHECK_GT(record_parameters_.sample_rate(), 0);
+ RTC_DCHECK(record_parameters_.channels() == 1 || record_parameters_.channels() == 2);
+
+ audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
+ audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+ record_fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_.get()));
+}
+
+void ObjCAudioDeviceModule::UpdateAudioDelay(std::atomic<int>& delay_ms,
+ const NSTimeInterval device_latency) {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ int latency_ms = static_cast<int>(rtc::kNumMillisecsPerSec * device_latency);
+ if (latency_ms <= 0) {
+ return;
+ }
+ const int old_latency_ms = delay_ms.exchange(latency_ms);
+ if (old_latency_ms != latency_ms) {
+ RTC_LOG_F(LS_INFO) << "Did change audio IO latency from: " << old_latency_ms
+ << " ms to: " << latency_ms << " ms";
+ }
+}
+
+bool ObjCAudioDeviceModule::UpdateAudioParameters(AudioParameters& params,
+ const AudioParameters& device_params) {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ if (!device_params.is_complete()) {
+ RTC_LOG_F(LS_INFO) << "Device params are incomplete: " << device_params.ToString();
+ return false;
+ }
+ if (params.channels() == device_params.channels() &&
+ params.frames_per_buffer() == device_params.frames_per_buffer() &&
+ params.sample_rate() == device_params.sample_rate()) {
+ RTC_LOG_F(LS_INFO) << "Device params: " << device_params.ToString()
+ << " are not different from: " << params.ToString();
+ return false;
+ }
+
+ RTC_LOG_F(LS_INFO) << "Audio params will be changed from: " << params.ToString()
+ << " to: " << device_params.ToString();
+ params.reset(
+ device_params.sample_rate(), device_params.channels(), device_params.frames_per_buffer());
+ return true;
+}
+
+OSStatus ObjCAudioDeviceModule::OnDeliverRecordedData(
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ NSInteger bus_number,
+ UInt32 num_frames,
+ const AudioBufferList* io_data,
+ void* render_context,
+ RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock) render_block) {
+ RTC_DCHECK_RUN_ON(&io_record_thread_checker_);
+ OSStatus result = noErr;
+ // Simply return if recording is not enabled.
+ if (!recording_.load()) return result;
+
+ if (io_data != nullptr) {
+ // AudioBuffer already fullfilled with audio data
+ RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
+ const AudioBuffer* audio_buffer = &io_data->mBuffers[0];
+ RTC_DCHECK(audio_buffer->mNumberChannels == 1 || audio_buffer->mNumberChannels == 2);
+
+ record_fine_audio_buffer_->DeliverRecordedData(
+ rtc::ArrayView<const int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
+ cached_recording_delay_ms_.load());
+ return noErr;
+ }
+ RTC_DCHECK(render_block != nullptr) << "Either io_data or render_block must be provided";
+
+ // Set the size of our own audio buffer and clear it first to avoid copying
+ // in combination with potential reallocations.
+ // On real iOS devices, the size will only be set once (at first callback).
+ const int channels_count = record_parameters_.channels();
+ record_audio_buffer_.Clear();
+ record_audio_buffer_.SetSize(num_frames * channels_count);
+
+ // Allocate AudioBuffers to be used as storage for the received audio.
+ // The AudioBufferList structure works as a placeholder for the
+ // AudioBuffer structure, which holds a pointer to the actual data buffer
+ // in `record_audio_buffer_`. Recorded audio will be rendered into this memory
+ // at each input callback when calling `render_block`.
+ AudioBufferList audio_buffer_list;
+ audio_buffer_list.mNumberBuffers = 1;
+ AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
+ audio_buffer->mNumberChannels = channels_count;
+ audio_buffer->mDataByteSize =
+ record_audio_buffer_.size() * sizeof(decltype(record_audio_buffer_)::value_type);
+ audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
+
+ // Obtain the recorded audio samples by initiating a rendering cycle into own buffer.
+ result =
+ render_block(flags, time_stamp, bus_number, num_frames, &audio_buffer_list, render_context);
+ if (result != noErr) {
+ RTC_LOG_F(LS_ERROR) << "Failed to render audio: " << result;
+ return result;
+ }
+
+ // Get a pointer to the recorded audio and send it to the WebRTC ADB.
+ // Use the FineAudioBuffer instance to convert between native buffer size
+ // and the 10ms buffer size used by WebRTC.
+ record_fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_,
+ cached_recording_delay_ms_.load());
+ return noErr;
+}
+
+OSStatus ObjCAudioDeviceModule::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ NSInteger bus_number,
+ UInt32 num_frames,
+ AudioBufferList* io_data) {
+ RTC_DCHECK_RUN_ON(&io_playout_thread_checker_);
+ // Verify 16-bit, noninterleaved mono or stereo PCM signal format.
+ RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
+ AudioBuffer* audio_buffer = &io_data->mBuffers[0];
+ RTC_DCHECK(audio_buffer->mNumberChannels == 1 || audio_buffer->mNumberChannels == 2);
+ RTC_DCHECK_EQ(audio_buffer->mDataByteSize,
+ sizeof(int16_t) * num_frames * audio_buffer->mNumberChannels);
+
+ // Produce silence and give player a hint about it if playout is not
+ // activated.
+ if (!playing_.load()) {
+ *flags |= kAudioUnitRenderAction_OutputIsSilence;
+ memset(static_cast<int8_t*>(audio_buffer->mData), 0, audio_buffer->mDataByteSize);
+ return noErr;
+ }
+
+ // Read decoded 16-bit PCM samples from WebRTC into the
+ // `io_data` destination buffer.
+ playout_fine_audio_buffer_->GetPlayoutData(
+ rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData),
+ num_frames * audio_buffer->mNumberChannels),
+ cached_playout_delay_ms_.load());
+
+ return noErr;
+}
+
+void ObjCAudioDeviceModule::HandleAudioInputInterrupted() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ io_record_thread_checker_.Detach();
+}
+
+void ObjCAudioDeviceModule::HandleAudioOutputInterrupted() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ io_playout_thread_checker_.Detach();
+}
+
+void ObjCAudioDeviceModule::HandleAudioInputParametersChange() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ if (UpdateAudioParameters(record_parameters_, RecordParameters(audio_device_))) {
+ UpdateInputAudioDeviceBuffer();
+ }
+
+ UpdateAudioDelay(cached_recording_delay_ms_, [audio_device_ inputLatency]);
+}
+
+void ObjCAudioDeviceModule::HandleAudioOutputParametersChange() {
+ RTC_DLOG_F(LS_VERBOSE) << "";
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+
+ if (UpdateAudioParameters(playout_parameters_, PlayoutParameters(audio_device_))) {
+ UpdateOutputAudioDeviceBuffer();
+ }
+
+ UpdateAudioDelay(cached_playout_delay_ms_, [audio_device_ outputLatency]);
+}
+
+#pragma mark - Not implemented/Not relevant methods from AudioDeviceModule
+
+int32_t ObjCAudioDeviceModule::ActiveAudioLayer(AudioLayer* audioLayer) const {
+ return -1;
+}
+
+int16_t ObjCAudioDeviceModule::PlayoutDevices() {
+ return 0;
+}
+
+int16_t ObjCAudioDeviceModule::RecordingDevices() {
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::SetPlayoutDevice(uint16_t index) {
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetPlayoutDevice(WindowsDeviceType device) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::SetRecordingDevice(uint16_t index) {
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetRecordingDevice(WindowsDeviceType device) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::InitSpeaker() {
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::SpeakerIsInitialized() const {
+ return true;
+}
+
+int32_t ObjCAudioDeviceModule::InitMicrophone() {
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::MicrophoneIsInitialized() const {
+ return true;
+}
+
+int32_t ObjCAudioDeviceModule::SpeakerVolumeIsAvailable(bool* available) {
+ *available = false;
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetSpeakerVolume(uint32_t volume) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::SpeakerVolume(uint32_t* volume) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MaxSpeakerVolume(uint32_t* maxVolume) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MinSpeakerVolume(uint32_t* minVolume) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::SpeakerMuteIsAvailable(bool* available) {
+ *available = false;
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetSpeakerMute(bool enable) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::SpeakerMute(bool* enabled) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MicrophoneMuteIsAvailable(bool* available) {
+ *available = false;
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetMicrophoneMute(bool enable) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MicrophoneMute(bool* enabled) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MicrophoneVolumeIsAvailable(bool* available) {
+ *available = false;
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetMicrophoneVolume(uint32_t volume) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MicrophoneVolume(uint32_t* volume) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MaxMicrophoneVolume(uint32_t* maxVolume) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::MinMicrophoneVolume(uint32_t* minVolume) const {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::StereoPlayoutIsAvailable(bool* available) const {
+ *available = false;
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetStereoPlayout(bool enable) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::StereoPlayout(bool* enabled) const {
+ *enabled = false;
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::StereoRecordingIsAvailable(bool* available) const {
+ *available = false;
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::SetStereoRecording(bool enable) {
+ return -1;
+}
+
+int32_t ObjCAudioDeviceModule::StereoRecording(bool* enabled) const {
+ *enabled = false;
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::BuiltInAECIsAvailable() const {
+ return false;
+}
+
+int32_t ObjCAudioDeviceModule::EnableBuiltInAEC(bool enable) {
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::BuiltInAGCIsAvailable() const {
+ return false;
+}
+
+int32_t ObjCAudioDeviceModule::EnableBuiltInAGC(bool enable) {
+ return 0;
+}
+
+bool ObjCAudioDeviceModule::BuiltInNSIsAvailable() const {
+ return false;
+}
+
+int32_t ObjCAudioDeviceModule::EnableBuiltInNS(bool enable) {
+ return 0;
+}
+
+int32_t ObjCAudioDeviceModule::GetPlayoutUnderrunCount() const {
+ return -1;
+}
+
+} // namespace objc_adm
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.h b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.h
new file mode 100644
index 0000000000..3af079dad9
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_DELEGATE_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_DELEGATE_H_
+
+#include "api/scoped_refptr.h"
+#include "rtc_base/thread.h"
+
+#import "components/audio/RTCAudioDevice.h"
+
+namespace webrtc {
+namespace objc_adm {
+class ObjCAudioDeviceModule;
+} // namespace objc_adm
+} // namespace webrtc
+
+@interface ObjCAudioDeviceDelegate : NSObject <RTC_OBJC_TYPE (RTCAudioDeviceDelegate)>
+
+- (instancetype)initWithAudioDeviceModule:
+ (rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule>)audioDeviceModule
+ audioDeviceThread:(rtc::Thread*)thread;
+
+- (void)resetAudioDeviceModule;
+
+@end
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_DELEGATE_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.mm
new file mode 100644
index 0000000000..156d6326a4
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_audio_device_delegate.mm
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AudioUnit/AudioUnit.h>
+#import <Foundation/Foundation.h>
+
+#import "objc_audio_device.h"
+#import "objc_audio_device_delegate.h"
+
+#include "api/make_ref_counted.h"
+#include "api/ref_counted_base.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+
+namespace {
+
+constexpr double kPreferredInputSampleRate = 48000.0;
+constexpr double kPreferredOutputSampleRate = 48000.0;
+
+// WebRTC processes audio in chunks of 10ms. Preferring 20ms audio chunks
+// is a compromize between performance and power consumption.
+constexpr NSTimeInterval kPeferredInputIOBufferDuration = 0.02;
+constexpr NSTimeInterval kPeferredOutputIOBufferDuration = 0.02;
+
+class AudioDeviceDelegateImpl final : public rtc::RefCountedNonVirtual<AudioDeviceDelegateImpl> {
+ public:
+ AudioDeviceDelegateImpl(
+ rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule> audio_device_module,
+ rtc::Thread* thread)
+ : audio_device_module_(audio_device_module), thread_(thread) {
+ RTC_DCHECK(audio_device_module_);
+ RTC_DCHECK(thread_);
+ }
+
+ webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module() const {
+ return audio_device_module_.get();
+ }
+
+ rtc::Thread* thread() const { return thread_; }
+
+ void reset_audio_device_module() { audio_device_module_ = nullptr; }
+
+ private:
+ rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule> audio_device_module_;
+ rtc::Thread* thread_;
+};
+
+} // namespace
+
+@implementation ObjCAudioDeviceDelegate {
+ rtc::scoped_refptr<AudioDeviceDelegateImpl> impl_;
+}
+
+@synthesize getPlayoutData = getPlayoutData_;
+
+@synthesize deliverRecordedData = deliverRecordedData_;
+
+@synthesize preferredInputSampleRate = preferredInputSampleRate_;
+
+@synthesize preferredInputIOBufferDuration = preferredInputIOBufferDuration_;
+
+@synthesize preferredOutputSampleRate = preferredOutputSampleRate_;
+
+@synthesize preferredOutputIOBufferDuration = preferredOutputIOBufferDuration_;
+
+- (instancetype)initWithAudioDeviceModule:
+ (rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule>)audioDeviceModule
+ audioDeviceThread:(rtc::Thread*)thread {
+ RTC_DCHECK_RUN_ON(thread);
+ if (self = [super init]) {
+ impl_ = rtc::make_ref_counted<AudioDeviceDelegateImpl>(audioDeviceModule, thread);
+ preferredInputSampleRate_ = kPreferredInputSampleRate;
+ preferredInputIOBufferDuration_ = kPeferredInputIOBufferDuration;
+ preferredOutputSampleRate_ = kPreferredOutputSampleRate;
+ preferredOutputIOBufferDuration_ = kPeferredOutputIOBufferDuration;
+
+ rtc::scoped_refptr<AudioDeviceDelegateImpl> playout_delegate = impl_;
+ getPlayoutData_ = ^OSStatus(AudioUnitRenderActionFlags* _Nonnull actionFlags,
+ const AudioTimeStamp* _Nonnull timestamp,
+ NSInteger inputBusNumber,
+ UInt32 frameCount,
+ AudioBufferList* _Nonnull outputData) {
+ webrtc::objc_adm::ObjCAudioDeviceModule* audio_device =
+ playout_delegate->audio_device_module();
+ if (audio_device) {
+ return audio_device->OnGetPlayoutData(
+ actionFlags, timestamp, inputBusNumber, frameCount, outputData);
+ } else {
+ *actionFlags |= kAudioUnitRenderAction_OutputIsSilence;
+ RTC_LOG(LS_VERBOSE) << "No alive audio device";
+ return noErr;
+ }
+ };
+
+ rtc::scoped_refptr<AudioDeviceDelegateImpl> record_delegate = impl_;
+ deliverRecordedData_ =
+ ^OSStatus(AudioUnitRenderActionFlags* _Nonnull actionFlags,
+ const AudioTimeStamp* _Nonnull timestamp,
+ NSInteger inputBusNumber,
+ UInt32 frameCount,
+ const AudioBufferList* _Nullable inputData,
+ void* renderContext,
+ RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock) _Nullable renderBlock) {
+ webrtc::objc_adm::ObjCAudioDeviceModule* audio_device =
+ record_delegate->audio_device_module();
+ if (audio_device) {
+ return audio_device->OnDeliverRecordedData(actionFlags,
+ timestamp,
+ inputBusNumber,
+ frameCount,
+ inputData,
+ renderContext,
+ renderBlock);
+ } else {
+ RTC_LOG(LS_VERBOSE) << "No alive audio device";
+ return noErr;
+ }
+ };
+ }
+ return self;
+}
+
+- (void)notifyAudioInputParametersChange {
+ RTC_DCHECK_RUN_ON(impl_->thread());
+ webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+ if (audio_device_module) {
+ audio_device_module->HandleAudioInputParametersChange();
+ }
+}
+
+- (void)notifyAudioOutputParametersChange {
+ RTC_DCHECK_RUN_ON(impl_->thread());
+ webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+ if (audio_device_module) {
+ audio_device_module->HandleAudioOutputParametersChange();
+ }
+}
+
+- (void)notifyAudioInputInterrupted {
+ RTC_DCHECK_RUN_ON(impl_->thread());
+ webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+ if (audio_device_module) {
+ audio_device_module->HandleAudioInputInterrupted();
+ }
+}
+
+- (void)notifyAudioOutputInterrupted {
+ RTC_DCHECK_RUN_ON(impl_->thread());
+ webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+ if (audio_device_module) {
+ audio_device_module->HandleAudioOutputInterrupted();
+ }
+}
+
+- (void)dispatchAsync:(dispatch_block_t)block {
+ rtc::Thread* thread = impl_->thread();
+ RTC_DCHECK(thread);
+ thread->PostTask([block] {
+ @autoreleasepool {
+ block();
+ }
+ });
+}
+
+- (void)dispatchSync:(dispatch_block_t)block {
+ rtc::Thread* thread = impl_->thread();
+ RTC_DCHECK(thread);
+ if (thread->IsCurrent()) {
+ @autoreleasepool {
+ block();
+ }
+ } else {
+ thread->BlockingCall([block] {
+ @autoreleasepool {
+ block();
+ }
+ });
+ }
+}
+
+- (void)resetAudioDeviceModule {
+ RTC_DCHECK_RUN_ON(impl_->thread());
+ impl_->reset_audio_device_module();
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.h b/third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.h
new file mode 100644
index 0000000000..944690c8bc
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_FRAME_BUFFER_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_FRAME_BUFFER_H_
+
+#import <CoreVideo/CoreVideo.h>
+
+#import "base/RTCMacros.h"
+
+#include "common_video/include/video_frame_buffer.h"
+
+@protocol RTC_OBJC_TYPE
+(RTCVideoFrameBuffer);
+
+namespace webrtc {
+
+class ObjCFrameBuffer : public VideoFrameBuffer {
+ public:
+ explicit ObjCFrameBuffer(id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)>);
+ ~ObjCFrameBuffer() override;
+
+ Type type() const override;
+
+ int width() const override;
+ int height() const override;
+
+ rtc::scoped_refptr<I420BufferInterface> ToI420() override;
+ rtc::scoped_refptr<VideoFrameBuffer> CropAndScale(int offset_x,
+ int offset_y,
+ int crop_width,
+ int crop_height,
+ int scaled_width,
+ int scaled_height) override;
+
+ id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)> wrapped_frame_buffer() const;
+
+ private:
+ id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)> frame_buffer_;
+ int width_;
+ int height_;
+};
+
+id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)> ToObjCVideoFrameBuffer(
+ const rtc::scoped_refptr<VideoFrameBuffer>& buffer);
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_FRAME_BUFFER_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.mm
new file mode 100644
index 0000000000..00e4b4be85
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_frame_buffer.mm
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/objc/native/src/objc_frame_buffer.h"
+
+#include "api/make_ref_counted.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "sdk/objc/api/video_frame_buffer/RTCNativeI420Buffer+Private.h"
+
+namespace webrtc {
+
+namespace {
+
+/** ObjCFrameBuffer that conforms to I420BufferInterface by wrapping RTC_OBJC_TYPE(RTCI420Buffer) */
+class ObjCI420FrameBuffer : public I420BufferInterface {
+ public:
+ explicit ObjCI420FrameBuffer(id<RTC_OBJC_TYPE(RTCI420Buffer)> frame_buffer)
+ : frame_buffer_(frame_buffer), width_(frame_buffer.width), height_(frame_buffer.height) {}
+ ~ObjCI420FrameBuffer() override {}
+
+ int width() const override { return width_; }
+
+ int height() const override { return height_; }
+
+ const uint8_t* DataY() const override { return frame_buffer_.dataY; }
+
+ const uint8_t* DataU() const override { return frame_buffer_.dataU; }
+
+ const uint8_t* DataV() const override { return frame_buffer_.dataV; }
+
+ int StrideY() const override { return frame_buffer_.strideY; }
+
+ int StrideU() const override { return frame_buffer_.strideU; }
+
+ int StrideV() const override { return frame_buffer_.strideV; }
+
+ private:
+ id<RTC_OBJC_TYPE(RTCI420Buffer)> frame_buffer_;
+ int width_;
+ int height_;
+};
+
+} // namespace
+
+ObjCFrameBuffer::ObjCFrameBuffer(id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)> frame_buffer)
+ : frame_buffer_(frame_buffer), width_(frame_buffer.width), height_(frame_buffer.height) {}
+
+ObjCFrameBuffer::~ObjCFrameBuffer() {}
+
+VideoFrameBuffer::Type ObjCFrameBuffer::type() const {
+ return Type::kNative;
+}
+
+int ObjCFrameBuffer::width() const {
+ return width_;
+}
+
+int ObjCFrameBuffer::height() const {
+ return height_;
+}
+
+rtc::scoped_refptr<I420BufferInterface> ObjCFrameBuffer::ToI420() {
+ return rtc::make_ref_counted<ObjCI420FrameBuffer>([frame_buffer_ toI420]);
+}
+
+rtc::scoped_refptr<VideoFrameBuffer> ObjCFrameBuffer::CropAndScale(int offset_x,
+ int offset_y,
+ int crop_width,
+ int crop_height,
+ int scaled_width,
+ int scaled_height) {
+ if ([frame_buffer_ respondsToSelector:@selector
+ (cropAndScaleWith:offsetY:cropWidth:cropHeight:scaleWidth:scaleHeight:)]) {
+ return rtc::make_ref_counted<ObjCFrameBuffer>([frame_buffer_ cropAndScaleWith:offset_x
+ offsetY:offset_y
+ cropWidth:crop_width
+ cropHeight:crop_height
+ scaleWidth:scaled_width
+ scaleHeight:scaled_height]);
+ }
+
+ // Use the default implementation.
+ return VideoFrameBuffer::CropAndScale(
+ offset_x, offset_y, crop_width, crop_height, scaled_width, scaled_height);
+}
+
+id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)> ObjCFrameBuffer::wrapped_frame_buffer() const {
+ return frame_buffer_;
+}
+
+id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)> ToObjCVideoFrameBuffer(
+ const rtc::scoped_refptr<VideoFrameBuffer>& buffer) {
+ if (buffer->type() == VideoFrameBuffer::Type::kNative) {
+ return static_cast<ObjCFrameBuffer*>(buffer.get())->wrapped_frame_buffer();
+ } else {
+ return [[RTC_OBJC_TYPE(RTCI420Buffer) alloc] initWithFrameBuffer:buffer->ToI420()];
+ }
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.h b/third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.h
new file mode 100644
index 0000000000..709e9dfbe5
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_NETWORK_MONITOR_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_NETWORK_MONITOR_H_
+
+#include <vector>
+
+#include "absl/strings/string_view.h"
+#include "api/field_trials_view.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/pending_task_safety_flag.h"
+#include "rtc_base/network_monitor.h"
+#include "rtc_base/network_monitor_factory.h"
+#include "rtc_base/string_utils.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "sdk/objc/components/network/RTCNetworkMonitor+Private.h"
+#include "sdk/objc/native/src/network_monitor_observer.h"
+
+namespace webrtc {
+
+class ObjCNetworkMonitorFactory : public rtc::NetworkMonitorFactory {
+ public:
+ ObjCNetworkMonitorFactory() = default;
+ ~ObjCNetworkMonitorFactory() override = default;
+
+ rtc::NetworkMonitorInterface* CreateNetworkMonitor(
+ const FieldTrialsView& field_trials) override;
+};
+
+class ObjCNetworkMonitor : public rtc::NetworkMonitorInterface,
+ public NetworkMonitorObserver {
+ public:
+ ObjCNetworkMonitor();
+ ~ObjCNetworkMonitor() override;
+
+ void Start() override;
+ void Stop() override;
+
+ InterfaceInfo GetInterfaceInfo(absl::string_view interface_name) override;
+
+ // NetworkMonitorObserver override.
+ // Fans out updates to observers on the correct thread.
+ void OnPathUpdate(
+ std::map<std::string, rtc::AdapterType, rtc::AbslStringViewCmp>
+ adapter_type_by_name) override;
+
+ private:
+ rtc::Thread* thread_ = nullptr;
+ bool started_ = false;
+ std::map<std::string, rtc::AdapterType, rtc::AbslStringViewCmp>
+ adapter_type_by_name_ RTC_GUARDED_BY(thread_);
+ rtc::scoped_refptr<PendingTaskSafetyFlag> safety_flag_;
+ RTCNetworkMonitor* network_monitor_ = nil;
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_NETWORK_MONITOR_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.mm
new file mode 100644
index 0000000000..535548c64c
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_network_monitor.mm
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/objc/native/src/objc_network_monitor.h"
+#include "absl/strings/string_view.h"
+
+#include <algorithm>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/string_utils.h"
+
+namespace webrtc {
+
+rtc::NetworkMonitorInterface* ObjCNetworkMonitorFactory::CreateNetworkMonitor(
+ const FieldTrialsView& field_trials) {
+ return new ObjCNetworkMonitor();
+}
+
+ObjCNetworkMonitor::ObjCNetworkMonitor() {
+ safety_flag_ = PendingTaskSafetyFlag::Create();
+}
+
+ObjCNetworkMonitor::~ObjCNetworkMonitor() {
+ [network_monitor_ stop];
+ network_monitor_ = nil;
+}
+
+void ObjCNetworkMonitor::Start() {
+ if (started_) {
+ return;
+ }
+ thread_ = rtc::Thread::Current();
+ RTC_DCHECK_RUN_ON(thread_);
+ safety_flag_->SetAlive();
+ network_monitor_ = [[RTCNetworkMonitor alloc] initWithObserver:this];
+ if (network_monitor_ == nil) {
+ RTC_LOG(LS_WARNING) << "Failed to create RTCNetworkMonitor; not available on this OS?";
+ }
+ started_ = true;
+}
+
+void ObjCNetworkMonitor::Stop() {
+ RTC_DCHECK_RUN_ON(thread_);
+ if (!started_) {
+ return;
+ }
+ safety_flag_->SetNotAlive();
+ [network_monitor_ stop];
+ network_monitor_ = nil;
+ started_ = false;
+}
+
+rtc::NetworkMonitorInterface::InterfaceInfo ObjCNetworkMonitor::GetInterfaceInfo(
+ absl::string_view interface_name) {
+ RTC_DCHECK_RUN_ON(thread_);
+ if (adapter_type_by_name_.empty()) {
+ // If we have no path update, assume everything's available, because it's
+ // preferable for WebRTC to try all interfaces rather than none at all.
+ return {
+ .adapter_type = rtc::ADAPTER_TYPE_UNKNOWN,
+ .available = true,
+ };
+ }
+ auto iter = adapter_type_by_name_.find(interface_name);
+ if (iter == adapter_type_by_name_.end()) {
+ return {
+ .adapter_type = rtc::ADAPTER_TYPE_UNKNOWN,
+ .available = false,
+ };
+ }
+
+ return {
+ .adapter_type = iter->second,
+ .available = true,
+ };
+}
+
+void ObjCNetworkMonitor::OnPathUpdate(
+ std::map<std::string, rtc::AdapterType, rtc::AbslStringViewCmp> adapter_type_by_name) {
+ RTC_DCHECK(network_monitor_ != nil);
+ thread_->PostTask(SafeTask(safety_flag_, [this, adapter_type_by_name] {
+ RTC_DCHECK_RUN_ON(thread_);
+ adapter_type_by_name_ = adapter_type_by_name;
+ InvokeNetworksChangedCallback();
+ }));
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.h b/third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.h
new file mode 100644
index 0000000000..30ad8c2a4b
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_DECODER_FACTORY_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_DECODER_FACTORY_H_
+
+#import "base/RTCMacros.h"
+
+#include "api/video_codecs/video_decoder_factory.h"
+#include "media/base/codec.h"
+
+@protocol RTC_OBJC_TYPE
+(RTCVideoDecoderFactory);
+
+namespace webrtc {
+
+class ObjCVideoDecoderFactory : public VideoDecoderFactory {
+ public:
+ explicit ObjCVideoDecoderFactory(id<RTC_OBJC_TYPE(RTCVideoDecoderFactory)>);
+ ~ObjCVideoDecoderFactory() override;
+
+ id<RTC_OBJC_TYPE(RTCVideoDecoderFactory)> wrapped_decoder_factory() const;
+
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ const SdpVideoFormat& format) override;
+
+ private:
+ id<RTC_OBJC_TYPE(RTCVideoDecoderFactory)> decoder_factory_;
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_DECODER_FACTORY_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.mm
new file mode 100644
index 0000000000..da3b302275
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_decoder_factory.mm
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/objc/native/src/objc_video_decoder_factory.h"
+
+#import "base/RTCMacros.h"
+#import "base/RTCVideoDecoder.h"
+#import "base/RTCVideoDecoderFactory.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_codec/RTCCodecSpecificInfoH264.h"
+#import "sdk/objc/api/peerconnection/RTCEncodedImage+Private.h"
+#import "sdk/objc/api/peerconnection/RTCVideoCodecInfo+Private.h"
+#import "sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.h"
+#import "sdk/objc/helpers/NSString+StdString.h"
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_decoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "sdk/objc/native/src/objc_frame_buffer.h"
+
+namespace webrtc {
+
+namespace {
+class ObjCVideoDecoder : public VideoDecoder {
+ public:
+ ObjCVideoDecoder(id<RTC_OBJC_TYPE(RTCVideoDecoder)> decoder)
+ : decoder_(decoder), implementation_name_([decoder implementationName].stdString) {}
+
+ bool Configure(const Settings &settings) override {
+ return
+ [decoder_ startDecodeWithNumberOfCores:settings.number_of_cores()] == WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ int32_t Decode(const EncodedImage &input_image,
+ bool missing_frames,
+ int64_t render_time_ms = -1) override {
+ RTC_OBJC_TYPE(RTCEncodedImage) *encodedImage =
+ [[RTC_OBJC_TYPE(RTCEncodedImage) alloc] initWithNativeEncodedImage:input_image];
+
+ return [decoder_ decode:encodedImage
+ missingFrames:missing_frames
+ codecSpecificInfo:nil
+ renderTimeMs:render_time_ms];
+ }
+
+ int32_t RegisterDecodeCompleteCallback(DecodedImageCallback *callback) override {
+ [decoder_ setCallback:^(RTC_OBJC_TYPE(RTCVideoFrame) * frame) {
+ const auto buffer = rtc::make_ref_counted<ObjCFrameBuffer>(frame.buffer);
+ VideoFrame videoFrame =
+ VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_timestamp_rtp((uint32_t)(frame.timeStampNs / rtc::kNumNanosecsPerMicrosec))
+ .set_timestamp_ms(0)
+ .set_rotation((VideoRotation)frame.rotation)
+ .build();
+ videoFrame.set_timestamp(frame.timeStamp);
+
+ callback->Decoded(videoFrame);
+ }];
+
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ int32_t Release() override { return [decoder_ releaseDecoder]; }
+
+ const char *ImplementationName() const override { return implementation_name_.c_str(); }
+
+ private:
+ id<RTC_OBJC_TYPE(RTCVideoDecoder)> decoder_;
+ const std::string implementation_name_;
+};
+} // namespace
+
+ObjCVideoDecoderFactory::ObjCVideoDecoderFactory(
+ id<RTC_OBJC_TYPE(RTCVideoDecoderFactory)> decoder_factory)
+ : decoder_factory_(decoder_factory) {}
+
+ObjCVideoDecoderFactory::~ObjCVideoDecoderFactory() {}
+
+id<RTC_OBJC_TYPE(RTCVideoDecoderFactory)> ObjCVideoDecoderFactory::wrapped_decoder_factory() const {
+ return decoder_factory_;
+}
+
+std::unique_ptr<VideoDecoder> ObjCVideoDecoderFactory::CreateVideoDecoder(
+ const SdpVideoFormat &format) {
+ NSString *codecName = [NSString stringWithUTF8String:format.name.c_str()];
+ for (RTC_OBJC_TYPE(RTCVideoCodecInfo) * codecInfo in decoder_factory_.supportedCodecs) {
+ if ([codecName isEqualToString:codecInfo.name]) {
+ id<RTC_OBJC_TYPE(RTCVideoDecoder)> decoder = [decoder_factory_ createDecoder:codecInfo];
+
+ if ([decoder isKindOfClass:[RTC_OBJC_TYPE(RTCWrappedNativeVideoDecoder) class]]) {
+ return [(RTC_OBJC_TYPE(RTCWrappedNativeVideoDecoder) *)decoder releaseWrappedDecoder];
+ } else {
+ return std::unique_ptr<ObjCVideoDecoder>(new ObjCVideoDecoder(decoder));
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+std::vector<SdpVideoFormat> ObjCVideoDecoderFactory::GetSupportedFormats() const {
+ std::vector<SdpVideoFormat> supported_formats;
+ for (RTC_OBJC_TYPE(RTCVideoCodecInfo) * supportedCodec in decoder_factory_.supportedCodecs) {
+ SdpVideoFormat format = [supportedCodec nativeSdpVideoFormat];
+ supported_formats.push_back(format);
+ }
+
+ return supported_formats;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.h b/third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.h
new file mode 100644
index 0000000000..38db5e6ae7
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_ENCODER_FACTORY_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_ENCODER_FACTORY_H_
+
+#import <Foundation/Foundation.h>
+
+#import "base/RTCMacros.h"
+
+#include "api/video_codecs/video_encoder_factory.h"
+
+@protocol RTC_OBJC_TYPE
+(RTCVideoEncoderFactory);
+
+namespace webrtc {
+
+class ObjCVideoEncoderFactory : public VideoEncoderFactory {
+ public:
+ explicit ObjCVideoEncoderFactory(id<RTC_OBJC_TYPE(RTCVideoEncoderFactory)>);
+ ~ObjCVideoEncoderFactory() override;
+
+ id<RTC_OBJC_TYPE(RTCVideoEncoderFactory)> wrapped_encoder_factory() const;
+
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override;
+ std::vector<SdpVideoFormat> GetImplementations() const override;
+ std::unique_ptr<VideoEncoder> CreateVideoEncoder(
+ const SdpVideoFormat& format) override;
+ std::unique_ptr<EncoderSelectorInterface> GetEncoderSelector() const override;
+
+ private:
+ id<RTC_OBJC_TYPE(RTCVideoEncoderFactory)> encoder_factory_;
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_ENCODER_FACTORY_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.mm
new file mode 100644
index 0000000000..d4ea79cc88
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_encoder_factory.mm
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/objc/native/src/objc_video_encoder_factory.h"
+
+#include <string>
+
+#import "base/RTCMacros.h"
+#import "base/RTCVideoEncoder.h"
+#import "base/RTCVideoEncoderFactory.h"
+#import "components/video_codec/RTCCodecSpecificInfoH264+Private.h"
+#import "sdk/objc/api/peerconnection/RTCEncodedImage+Private.h"
+#import "sdk/objc/api/peerconnection/RTCVideoCodecInfo+Private.h"
+#import "sdk/objc/api/peerconnection/RTCVideoEncoderSettings+Private.h"
+#import "sdk/objc/api/video_codec/RTCVideoCodecConstants.h"
+#import "sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.h"
+#import "sdk/objc/helpers/NSString+StdString.h"
+
+#include "api/video/video_frame.h"
+#include "api/video_codecs/sdp_video_format.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/logging.h"
+#include "sdk/objc/native/src/objc_video_frame.h"
+
+namespace webrtc {
+
+namespace {
+
+class ObjCVideoEncoder : public VideoEncoder {
+ public:
+ ObjCVideoEncoder(id<RTC_OBJC_TYPE(RTCVideoEncoder)> encoder)
+ : encoder_(encoder), implementation_name_([encoder implementationName].stdString) {}
+
+ int32_t InitEncode(const VideoCodec *codec_settings, const Settings &encoder_settings) override {
+ RTC_OBJC_TYPE(RTCVideoEncoderSettings) *settings =
+ [[RTC_OBJC_TYPE(RTCVideoEncoderSettings) alloc] initWithNativeVideoCodec:codec_settings];
+ return [encoder_ startEncodeWithSettings:settings
+ numberOfCores:encoder_settings.number_of_cores];
+ }
+
+ int32_t RegisterEncodeCompleteCallback(EncodedImageCallback *callback) override {
+ if (callback) {
+ [encoder_ setCallback:^BOOL(RTC_OBJC_TYPE(RTCEncodedImage) * _Nonnull frame,
+ id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)> _Nonnull info) {
+ EncodedImage encodedImage = [frame nativeEncodedImage];
+
+ // Handle types that can be converted into one of CodecSpecificInfo's hard coded cases.
+ CodecSpecificInfo codecSpecificInfo;
+ if ([info isKindOfClass:[RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) class]]) {
+ codecSpecificInfo =
+ [(RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) *)info nativeCodecSpecificInfo];
+ }
+
+ EncodedImageCallback::Result res = callback->OnEncodedImage(encodedImage, &codecSpecificInfo);
+ return res.error == EncodedImageCallback::Result::OK;
+ }];
+ } else {
+ [encoder_ setCallback:nil];
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ int32_t Release() override { return [encoder_ releaseEncoder]; }
+
+ int32_t Encode(const VideoFrame &frame,
+ const std::vector<VideoFrameType> *frame_types) override {
+ NSMutableArray<NSNumber *> *rtcFrameTypes = [NSMutableArray array];
+ for (size_t i = 0; i < frame_types->size(); ++i) {
+ [rtcFrameTypes addObject:@(RTCFrameType(frame_types->at(i)))];
+ }
+
+ return [encoder_ encode:ToObjCVideoFrame(frame)
+ codecSpecificInfo:nil
+ frameTypes:rtcFrameTypes];
+ }
+
+ void SetRates(const RateControlParameters &parameters) override {
+ const uint32_t bitrate = parameters.bitrate.get_sum_kbps();
+ const uint32_t framerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
+ [encoder_ setBitrate:bitrate framerate:framerate];
+ }
+
+ VideoEncoder::EncoderInfo GetEncoderInfo() const override {
+ EncoderInfo info;
+ info.implementation_name = implementation_name_;
+
+ RTC_OBJC_TYPE(RTCVideoEncoderQpThresholds) *qp_thresholds = [encoder_ scalingSettings];
+ info.scaling_settings = qp_thresholds ? ScalingSettings(qp_thresholds.low, qp_thresholds.high) :
+ ScalingSettings::kOff;
+
+ info.requested_resolution_alignment = encoder_.resolutionAlignment > 0 ?: 1;
+ info.apply_alignment_to_all_simulcast_layers = encoder_.applyAlignmentToAllSimulcastLayers;
+ info.supports_native_handle = encoder_.supportsNativeHandle;
+ info.is_hardware_accelerated = true;
+ return info;
+ }
+
+ private:
+ id<RTC_OBJC_TYPE(RTCVideoEncoder)> encoder_;
+ const std::string implementation_name_;
+};
+
+class ObjcVideoEncoderSelector : public VideoEncoderFactory::EncoderSelectorInterface {
+ public:
+ ObjcVideoEncoderSelector(id<RTC_OBJC_TYPE(RTCVideoEncoderSelector)> selector) {
+ selector_ = selector;
+ }
+ void OnCurrentEncoder(const SdpVideoFormat &format) override {
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *info =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithNativeSdpVideoFormat:format];
+ [selector_ registerCurrentEncoderInfo:info];
+ }
+ absl::optional<SdpVideoFormat> OnEncoderBroken() override {
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *info = [selector_ encoderForBrokenEncoder];
+ if (info) {
+ return [info nativeSdpVideoFormat];
+ }
+ return absl::nullopt;
+ }
+ absl::optional<SdpVideoFormat> OnAvailableBitrate(const DataRate &rate) override {
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *info = [selector_ encoderForBitrate:rate.kbps<NSInteger>()];
+ if (info) {
+ return [info nativeSdpVideoFormat];
+ }
+ return absl::nullopt;
+ }
+
+ absl::optional<SdpVideoFormat> OnResolutionChange(const RenderResolution &resolution) override {
+ if ([selector_ respondsToSelector:@selector(encoderForResolutionChangeBySize:)]) {
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *info = [selector_
+ encoderForResolutionChangeBySize:CGSizeMake(resolution.Width(), resolution.Height())];
+ if (info) {
+ return [info nativeSdpVideoFormat];
+ }
+ }
+ return absl::nullopt;
+ }
+
+ private:
+ id<RTC_OBJC_TYPE(RTCVideoEncoderSelector)> selector_;
+};
+
+} // namespace
+
+ObjCVideoEncoderFactory::ObjCVideoEncoderFactory(
+ id<RTC_OBJC_TYPE(RTCVideoEncoderFactory)> encoder_factory)
+ : encoder_factory_(encoder_factory) {}
+
+ObjCVideoEncoderFactory::~ObjCVideoEncoderFactory() {}
+
+id<RTC_OBJC_TYPE(RTCVideoEncoderFactory)> ObjCVideoEncoderFactory::wrapped_encoder_factory() const {
+ return encoder_factory_;
+}
+
+std::vector<SdpVideoFormat> ObjCVideoEncoderFactory::GetSupportedFormats() const {
+ std::vector<SdpVideoFormat> supported_formats;
+ for (RTC_OBJC_TYPE(RTCVideoCodecInfo) * supportedCodec in [encoder_factory_ supportedCodecs]) {
+ SdpVideoFormat format = [supportedCodec nativeSdpVideoFormat];
+ supported_formats.push_back(format);
+ }
+
+ return supported_formats;
+}
+
+std::vector<SdpVideoFormat> ObjCVideoEncoderFactory::GetImplementations() const {
+ if ([encoder_factory_ respondsToSelector:@selector(implementations)]) {
+ std::vector<SdpVideoFormat> supported_formats;
+ for (RTC_OBJC_TYPE(RTCVideoCodecInfo) * supportedCodec in [encoder_factory_ implementations]) {
+ SdpVideoFormat format = [supportedCodec nativeSdpVideoFormat];
+ supported_formats.push_back(format);
+ }
+ return supported_formats;
+ }
+ return GetSupportedFormats();
+}
+
+std::unique_ptr<VideoEncoder> ObjCVideoEncoderFactory::CreateVideoEncoder(
+ const SdpVideoFormat &format) {
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *info =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithNativeSdpVideoFormat:format];
+ id<RTC_OBJC_TYPE(RTCVideoEncoder)> encoder = [encoder_factory_ createEncoder:info];
+ if ([encoder isKindOfClass:[RTC_OBJC_TYPE(RTCWrappedNativeVideoEncoder) class]]) {
+ return [(RTC_OBJC_TYPE(RTCWrappedNativeVideoEncoder) *)encoder releaseWrappedEncoder];
+ } else {
+ return std::unique_ptr<ObjCVideoEncoder>(new ObjCVideoEncoder(encoder));
+ }
+}
+
+std::unique_ptr<VideoEncoderFactory::EncoderSelectorInterface>
+ ObjCVideoEncoderFactory::GetEncoderSelector() const {
+ if ([encoder_factory_ respondsToSelector:@selector(encoderSelector)]) {
+ id<RTC_OBJC_TYPE(RTCVideoEncoderSelector)> selector = [encoder_factory_ encoderSelector];
+ if (selector) {
+ return absl::make_unique<ObjcVideoEncoderSelector>(selector);
+ }
+ }
+ return nullptr;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.h b/third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.h
new file mode 100644
index 0000000000..c2931cb2f8
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_FRAME_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_FRAME_H_
+
+#import "base/RTCVideoFrame.h"
+
+#include "api/video/video_frame.h"
+
+namespace webrtc {
+
+RTC_OBJC_TYPE(RTCVideoFrame) * ToObjCVideoFrame(const VideoFrame& frame);
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_FRAME_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.mm
new file mode 100644
index 0000000000..2e8ce6153e
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_frame.mm
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/objc/native/src/objc_video_frame.h"
+
+#include "rtc_base/time_utils.h"
+#include "sdk/objc/native/src/objc_frame_buffer.h"
+
+namespace webrtc {
+
+RTC_OBJC_TYPE(RTCVideoFrame) * ToObjCVideoFrame(const VideoFrame &frame) {
+ RTC_OBJC_TYPE(RTCVideoFrame) *videoFrame = [[RTC_OBJC_TYPE(RTCVideoFrame) alloc]
+ initWithBuffer:ToObjCVideoFrameBuffer(frame.video_frame_buffer())
+ rotation:RTCVideoRotation(frame.rotation())
+ timeStampNs:frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec];
+ videoFrame.timeStamp = frame.timestamp();
+
+ return videoFrame;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.h b/third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.h
new file mode 100644
index 0000000000..f9c35eae96
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_RENDERER_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_RENDERER_H_
+
+#import <CoreGraphics/CoreGraphics.h>
+#import <Foundation/Foundation.h>
+
+#import "base/RTCMacros.h"
+
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+
+@protocol RTC_OBJC_TYPE
+(RTCVideoRenderer);
+
+namespace webrtc {
+
+class ObjCVideoRenderer : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ ObjCVideoRenderer(id<RTC_OBJC_TYPE(RTCVideoRenderer)> renderer);
+ void OnFrame(const VideoFrame& nativeVideoFrame) override;
+
+ private:
+ id<RTC_OBJC_TYPE(RTCVideoRenderer)> renderer_;
+ CGSize size_;
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_NATIVE_SRC_OBJC_VIDEO_RENDERER_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.mm
new file mode 100644
index 0000000000..4a9b647ec3
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_renderer.mm
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/objc/native/src/objc_video_renderer.h"
+
+#import "base/RTCMacros.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoRenderer.h"
+
+#include "sdk/objc/native/src/objc_video_frame.h"
+
+namespace webrtc {
+
+ObjCVideoRenderer::ObjCVideoRenderer(id<RTC_OBJC_TYPE(RTCVideoRenderer)> renderer)
+ : renderer_(renderer), size_(CGSizeZero) {}
+
+void ObjCVideoRenderer::OnFrame(const VideoFrame& nativeVideoFrame) {
+ RTC_OBJC_TYPE(RTCVideoFrame)* videoFrame = ToObjCVideoFrame(nativeVideoFrame);
+
+ CGSize current_size = (videoFrame.rotation % 180 == 0) ?
+ CGSizeMake(videoFrame.width, videoFrame.height) :
+ CGSizeMake(videoFrame.height, videoFrame.width);
+
+ if (!CGSizeEqualToSize(size_, current_size)) {
+ size_ = current_size;
+ [renderer_ setSize:size_];
+ }
+ [renderer_ renderFrame:videoFrame];
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.h b/third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.h
new file mode 100644
index 0000000000..19a3d6db43
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_CLASSES_VIDEO_OBJC_VIDEO_TRACK_SOURCE_H_
+#define SDK_OBJC_CLASSES_VIDEO_OBJC_VIDEO_TRACK_SOURCE_H_
+
+#import "base/RTCVideoCapturer.h"
+
+#include "base/RTCMacros.h"
+#include "media/base/adapted_video_track_source.h"
+#include "rtc_base/timestamp_aligner.h"
+
+RTC_FWD_DECL_OBJC_CLASS(RTC_OBJC_TYPE(RTCVideoFrame));
+
+@interface RTCObjCVideoSourceAdapter : NSObject <RTC_OBJC_TYPE (RTCVideoCapturerDelegate)>
+@end
+
+namespace webrtc {
+
+class ObjCVideoTrackSource : public rtc::AdaptedVideoTrackSource {
+ public:
+ ObjCVideoTrackSource();
+ explicit ObjCVideoTrackSource(bool is_screencast);
+ explicit ObjCVideoTrackSource(RTCObjCVideoSourceAdapter* adapter);
+
+ bool is_screencast() const override;
+
+ // Indicates that the encoder should denoise video before encoding it.
+ // If it is not set, the default configuration is used which is different
+ // depending on video codec.
+ absl::optional<bool> needs_denoising() const override;
+
+ SourceState state() const override;
+
+ bool remote() const override;
+
+ void OnCapturedFrame(RTC_OBJC_TYPE(RTCVideoFrame) * frame);
+
+ // Called by RTCVideoSource.
+ void OnOutputFormatRequest(int width, int height, int fps);
+
+ private:
+ rtc::VideoBroadcaster broadcaster_;
+ rtc::TimestampAligner timestamp_aligner_;
+
+ RTCObjCVideoSourceAdapter* adapter_;
+ bool is_screencast_;
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_CLASSES_VIDEO_OBJC_VIDEO_TRACK_SOURCE_H_
diff --git a/third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.mm b/third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.mm
new file mode 100644
index 0000000000..7937e90505
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/native/src/objc_video_track_source.mm
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "sdk/objc/native/src/objc_video_track_source.h"
+
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+
+#include "api/video/i420_buffer.h"
+#include "sdk/objc/native/src/objc_frame_buffer.h"
+
+@interface RTCObjCVideoSourceAdapter ()
+@property(nonatomic) webrtc::ObjCVideoTrackSource *objCVideoTrackSource;
+@end
+
+@implementation RTCObjCVideoSourceAdapter
+
+@synthesize objCVideoTrackSource = _objCVideoTrackSource;
+
+- (void)capturer:(RTC_OBJC_TYPE(RTCVideoCapturer) *)capturer
+ didCaptureVideoFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ _objCVideoTrackSource->OnCapturedFrame(frame);
+}
+
+@end
+
+namespace webrtc {
+
+ObjCVideoTrackSource::ObjCVideoTrackSource() : ObjCVideoTrackSource(false) {}
+
+ObjCVideoTrackSource::ObjCVideoTrackSource(bool is_screencast)
+ : AdaptedVideoTrackSource(/* required resolution alignment */ 2),
+ is_screencast_(is_screencast) {}
+
+ObjCVideoTrackSource::ObjCVideoTrackSource(RTCObjCVideoSourceAdapter *adapter) : adapter_(adapter) {
+ adapter_.objCVideoTrackSource = this;
+}
+
+bool ObjCVideoTrackSource::is_screencast() const {
+ return is_screencast_;
+}
+
+absl::optional<bool> ObjCVideoTrackSource::needs_denoising() const {
+ return false;
+}
+
+MediaSourceInterface::SourceState ObjCVideoTrackSource::state() const {
+ return SourceState::kLive;
+}
+
+bool ObjCVideoTrackSource::remote() const {
+ return false;
+}
+
+void ObjCVideoTrackSource::OnOutputFormatRequest(int width, int height, int fps) {
+ cricket::VideoFormat format(width, height, cricket::VideoFormat::FpsToInterval(fps), 0);
+ video_adapter()->OnOutputFormatRequest(format);
+}
+
+void ObjCVideoTrackSource::OnCapturedFrame(RTC_OBJC_TYPE(RTCVideoFrame) * frame) {
+ const int64_t timestamp_us = frame.timeStampNs / rtc::kNumNanosecsPerMicrosec;
+ const int64_t translated_timestamp_us =
+ timestamp_aligner_.TranslateTimestamp(timestamp_us, rtc::TimeMicros());
+
+ int adapted_width;
+ int adapted_height;
+ int crop_width;
+ int crop_height;
+ int crop_x;
+ int crop_y;
+ if (!AdaptFrame(frame.width,
+ frame.height,
+ timestamp_us,
+ &adapted_width,
+ &adapted_height,
+ &crop_width,
+ &crop_height,
+ &crop_x,
+ &crop_y)) {
+ return;
+ }
+
+ rtc::scoped_refptr<VideoFrameBuffer> buffer;
+ if (adapted_width == frame.width && adapted_height == frame.height) {
+ // No adaption - optimized path.
+ buffer = rtc::make_ref_counted<ObjCFrameBuffer>(frame.buffer);
+ } else if ([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
+ // Adapted CVPixelBuffer frame.
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
+ (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ buffer = rtc::make_ref_counted<ObjCFrameBuffer>([[RTC_OBJC_TYPE(RTCCVPixelBuffer) alloc]
+ initWithPixelBuffer:rtcPixelBuffer.pixelBuffer
+ adaptedWidth:adapted_width
+ adaptedHeight:adapted_height
+ cropWidth:crop_width
+ cropHeight:crop_height
+ cropX:crop_x + rtcPixelBuffer.cropX
+ cropY:crop_y + rtcPixelBuffer.cropY]);
+ } else {
+ // Adapted I420 frame.
+ // TODO(magjed): Optimize this I420 path.
+ rtc::scoped_refptr<I420Buffer> i420_buffer = I420Buffer::Create(adapted_width, adapted_height);
+ buffer = rtc::make_ref_counted<ObjCFrameBuffer>(frame.buffer);
+ i420_buffer->CropAndScaleFrom(*buffer->ToI420(), crop_x, crop_y, crop_width, crop_height);
+ buffer = i420_buffer;
+ }
+
+ // Applying rotation is only supported for legacy reasons and performance is
+ // not critical here.
+ VideoRotation rotation = static_cast<VideoRotation>(frame.rotation);
+ if (apply_rotation() && rotation != kVideoRotation_0) {
+ buffer = I420Buffer::Rotate(*buffer->ToI420(), rotation);
+ rotation = kVideoRotation_0;
+ }
+
+ OnFrame(VideoFrame::Builder()
+ .set_video_frame_buffer(buffer)
+ .set_rotation(rotation)
+ .set_timestamp_us(translated_timestamp_us)
+ .build());
+}
+
+} // namespace webrtc