summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/sdk/objc/components
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/libwebrtc/sdk/objc/components
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/sdk/objc/components')
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCAudioDevice.h308
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Configuration.mm176
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Private.h95
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.h265
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.mm1000
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.h48
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.m133
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h33
-rw-r--r--third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.mm89
-rw-r--r--third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.h56
-rw-r--r--third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.m535
-rw-r--r--third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.h51
-rw-r--r--third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.m215
-rw-r--r--third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor+Private.h26
-rw-r--r--third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.h24
-rw-r--r--third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.mm126
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.h17
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.mm170
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.h24
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.m122
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.h18
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.mm164
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.h22
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.mm164
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer+Private.h33
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.h61
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.mm328
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.h44
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.m265
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.h23
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.mm207
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.h24
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.m59
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.h45
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m295
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.h25
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.mm157
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.h42
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m199
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.h33
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.m113
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCOpenGLDefines.h37
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.h21
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.mm189
-rw-r--r--third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCVideoViewShading.h39
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h25
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h27
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm29
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h26
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m85
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h31
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m102
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h60
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm120
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h18
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m49
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h18
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm276
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h18
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m49
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h22
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm828
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h19
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm205
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc90
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h47
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc327
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h113
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h52
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.mm367
70 files changed, 9123 insertions, 0 deletions
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioDevice.h b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioDevice.h
new file mode 100644
index 0000000000..f445825ff0
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioDevice.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AudioUnit/AudioUnit.h>
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+typedef OSStatus (^RTC_OBJC_TYPE(RTCAudioDeviceGetPlayoutDataBlock))(
+ AudioUnitRenderActionFlags *_Nonnull actionFlags,
+ const AudioTimeStamp *_Nonnull timestamp,
+ NSInteger inputBusNumber,
+ UInt32 frameCount,
+ AudioBufferList *_Nonnull outputData);
+
+typedef OSStatus (^RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock))(
+ AudioUnitRenderActionFlags *_Nonnull actionFlags,
+ const AudioTimeStamp *_Nonnull timestamp,
+ NSInteger inputBusNumber,
+ UInt32 frameCount,
+ AudioBufferList *_Nonnull inputData,
+ void *_Nullable renderContext);
+
+typedef OSStatus (^RTC_OBJC_TYPE(RTCAudioDeviceDeliverRecordedDataBlock))(
+ AudioUnitRenderActionFlags *_Nonnull actionFlags,
+ const AudioTimeStamp *_Nonnull timestamp,
+ NSInteger inputBusNumber,
+ UInt32 frameCount,
+ const AudioBufferList *_Nullable inputData,
+ void *_Nullable renderContext,
+ NS_NOESCAPE RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock) _Nullable renderBlock);
+
+/**
+ * Delegate object provided by native ADM during RTCAudioDevice initialization.
+ * Provides blocks to poll playback audio samples from native ADM and to feed
+ * recorded audio samples into native ADM.
+ */
+RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE
+(RTCAudioDeviceDelegate)<NSObject>
+ /**
+ * Implementation of RTCAudioSource should call this block to feed recorded PCM (16-bit integer)
+ * into native ADM. Stereo data is expected to be interleaved starting with the left channel.
+ * Either `inputData` with pre-filled audio data must be provided during block
+ * call or `renderBlock` must be provided which must fill provided audio buffer with recorded
+ * samples.
+ *
+ * NOTE: Implementation of RTCAudioDevice is expected to call the block on the same thread until
+ * `notifyAudioInterrupted` is called. When `notifyAudioInterrupted` is called implementation
+ * can call the block on a different thread.
+ */
+ @property(readonly, nonnull)
+ RTC_OBJC_TYPE(RTCAudioDeviceDeliverRecordedDataBlock) deliverRecordedData;
+
+/**
+ * Provides input sample rate preference as it preferred by native ADM.
+ */
+@property(readonly) double preferredInputSampleRate;
+
+/**
+ * Provides input IO buffer duration preference as it preferred by native ADM.
+ */
+@property(readonly) NSTimeInterval preferredInputIOBufferDuration;
+
+/**
+ * Provides output sample rate preference as it preferred by native ADM.
+ */
+@property(readonly) double preferredOutputSampleRate;
+
+/**
+ * Provides output IO buffer duration preference as it preferred by native ADM.
+ */
+@property(readonly) NSTimeInterval preferredOutputIOBufferDuration;
+
+/**
+ * Implementation of RTCAudioDevice should call this block to request PCM (16-bit integer)
+ * from native ADM to play. Stereo data is interleaved starting with the left channel.
+ *
+ * NOTE: Implementation of RTCAudioDevice is expected to invoke of this block on the
+ * same thread until `notifyAudioInterrupted` is called. When `notifyAudioInterrupted` is called
+ * implementation can call the block from a different thread.
+ */
+@property(readonly, nonnull) RTC_OBJC_TYPE(RTCAudioDeviceGetPlayoutDataBlock) getPlayoutData;
+
+/**
+ * Notifies native ADM that some of the audio input parameters of RTCAudioDevice like
+ * samle rate and/or IO buffer duration and/or IO latency had possibly changed.
+ * Native ADM will adjust its audio input buffer to match current parameters of audio device.
+ *
+ * NOTE: Must be called within block executed via `dispatchAsync` or `dispatchSync`.
+ */
+- (void)notifyAudioInputParametersChange;
+
+/**
+ * Notifies native ADM that some of the audio output parameters of RTCAudioDevice like
+ * samle rate and/or IO buffer duration and/or IO latency had possibly changed.
+ * Native ADM will adjust its audio output buffer to match current parameters of audio device.
+ *
+ * NOTE: Must be called within block executed via `dispatchAsync` or `dispatchSync`.
+ */
+- (void)notifyAudioOutputParametersChange;
+
+/**
+ * Notifies native ADM that audio input is interrupted and further audio playout
+ * and recording might happen on a different thread.
+ *
+ * NOTE: Must be called within block executed via `dispatchAsync` or `dispatchSync`.
+ */
+- (void)notifyAudioInputInterrupted;
+
+/**
+ * Notifies native ADM that audio output is interrupted and further audio playout
+ * and recording might happen on a different thread.
+ *
+ * NOTE: Must be called within block executed via `dispatchAsync` or `dispatchSync`.
+ */
+- (void)notifyAudioOutputInterrupted;
+
+/**
+ * Asynchronously execute block of code within the context of
+ * thread which owns native ADM.
+ *
+ * NOTE: Intended to be used to invoke `notifyAudioInputParametersChange`,
+ * `notifyAudioOutputParametersChange`, `notifyAudioInputInterrupted`,
+ * `notifyAudioOutputInterrupted` on native ADM thread.
+ * Also could be used by `RTCAudioDevice` implementation to tie
+ * mutations of underlying audio objects (AVAudioEngine, AudioUnit, etc)
+ * to the native ADM thread. Could be useful to handle events like audio route change, which
+ * could lead to audio parameters change.
+ */
+- (void)dispatchAsync:(dispatch_block_t)block;
+
+/**
+ * Synchronously execute block of code within the context of
+ * thread which owns native ADM. Allows reentrancy.
+ *
+ * NOTE: Intended to be used to invoke `notifyAudioInputParametersChange`,
+ * `notifyAudioOutputParametersChange`, `notifyAudioInputInterrupted`,
+ * `notifyAudioOutputInterrupted` on native ADM thread and make sure
+ * aforementioned is completed before `dispatchSync` returns. Could be useful
+ * when implementation of `RTCAudioDevice` tie mutation to underlying audio objects (AVAudioEngine,
+ * AudioUnit, etc) to own thread to satisfy requirement that native ADM audio parameters
+ * must be kept in sync with current audio parameters before audio is actually played or recorded.
+ */
+- (void)dispatchSync:(dispatch_block_t)block;
+
+@end
+
+/**
+ * Protocol to abstract platform specific ways to implement playback and recording.
+ *
+ * NOTE: All the members of protocol are called by native ADM from the same thread
+ * between calls to `initializeWithDelegate` and `terminate`.
+ * NOTE: Implementation is fully responsible for configuring application's AVAudioSession.
+ * An example implementation of RTCAudioDevice: https://github.com/mstyura/RTCAudioDevice
+ * TODO(yura.yaroshevich): Implement custom RTCAudioDevice for AppRTCMobile demo app.
+ */
+RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE
+(RTCAudioDevice)<NSObject>
+
+ /**
+ * Indicates current sample rate of audio recording. Changes to this property
+ * must be notified back to native ADM via `-[RTCAudioDeviceDelegate
+ * notifyAudioParametersChange]`.
+ */
+ @property(readonly) double deviceInputSampleRate;
+
+/**
+ * Indicates current size of record buffer. Changes to this property
+ * must be notified back to native ADM via `-[RTCAudioDeviceDelegate notifyAudioParametersChange]`.
+ */
+@property(readonly) NSTimeInterval inputIOBufferDuration;
+
+/**
+ * Indicates current number of recorded audio channels. Changes to this property
+ * must be notified back to native ADM via `-[RTCAudioDeviceDelegate notifyAudioParametersChange]`.
+ */
+@property(readonly) NSInteger inputNumberOfChannels;
+
+/**
+ * Indicates current input latency
+ */
+@property(readonly) NSTimeInterval inputLatency;
+
+/**
+ * Indicates current sample rate of audio playback. Changes to this property
+ * must be notified back to native ADM via `-[RTCAudioDeviceDelegate notifyAudioParametersChange]`.
+ */
+@property(readonly) double deviceOutputSampleRate;
+
+/**
+ * Indicates current size of playback buffer. Changes to this property
+ * must be notified back to native ADM via `-[RTCAudioDeviceDelegate notifyAudioParametersChange]`.
+ */
+@property(readonly) NSTimeInterval outputIOBufferDuration;
+
+/**
+ * Indicates current number of playback audio channels. Changes to this property
+ * must be notified back to WebRTC via `[RTCAudioDeviceDelegate notifyAudioParametersChange]`.
+ */
+@property(readonly) NSInteger outputNumberOfChannels;
+
+/**
+ * Indicates current output latency
+ */
+@property(readonly) NSTimeInterval outputLatency;
+
+/**
+ * Indicates if invocation of `initializeWithDelegate` required before usage of RTCAudioDevice.
+ * YES indicates that `initializeWithDelegate` was called earlier without subsequent call to
+ * `terminate`. NO indicates that either `initializeWithDelegate` not called or `terminate` called.
+ */
+@property(readonly) BOOL isInitialized;
+
+/**
+ * Initializes RTCAudioDevice with RTCAudioDeviceDelegate.
+ * Implementation must return YES if RTCAudioDevice initialized successfully and NO otherwise.
+ */
+- (BOOL)initializeWithDelegate:(id<RTC_OBJC_TYPE(RTCAudioDeviceDelegate)>)delegate;
+
+/**
+ * De-initializes RTCAudioDevice. Implementation should forget about `delegate` provided in
+ * `initializeWithDelegate`.
+ */
+- (BOOL)terminateDevice;
+
+/**
+ * Property to indicate if `initializePlayout` call required before invocation of `startPlayout`.
+ * YES indicates that `initializePlayout` was successfully invoked earlier or not necessary,
+ * NO indicates that `initializePlayout` invocation required.
+ */
+@property(readonly) BOOL isPlayoutInitialized;
+
+/**
+ * Prepares RTCAudioDevice to play audio.
+ * Called by native ADM before invocation of `startPlayout`.
+ * Implementation is expected to return YES in case of successful playout initialization and NO
+ * otherwise.
+ */
+- (BOOL)initializePlayout;
+
+/**
+ * Property to indicate if RTCAudioDevice should be playing according to
+ * earlier calls of `startPlayout` and `stopPlayout`.
+ */
+@property(readonly) BOOL isPlaying;
+
+/**
+ * Method is called when native ADM wants to play audio.
+ * Implementation is expected to return YES if playback start request
+ * successfully handled and NO otherwise.
+ */
+- (BOOL)startPlayout;
+
+/**
+ * Method is called when native ADM no longer needs to play audio.
+ * Implementation is expected to return YES if playback stop request
+ * successfully handled and NO otherwise.
+ */
+- (BOOL)stopPlayout;
+
+/**
+ * Property to indicate if `initializeRecording` call required before usage of `startRecording`.
+ * YES indicates that `initializeRecording` was successfully invoked earlier or not necessary,
+ * NO indicates that `initializeRecording` invocation required.
+ */
+@property(readonly) BOOL isRecordingInitialized;
+
+/**
+ * Prepares RTCAudioDevice to record audio.
+ * Called by native ADM before invocation of `startRecording`.
+ * Implementation may use this method to prepare resources required to record audio.
+ * Implementation is expected to return YES in case of successful record initialization and NO
+ * otherwise.
+ */
+- (BOOL)initializeRecording;
+
+/**
+ * Property to indicate if RTCAudioDevice should record audio according to
+ * earlier calls to `startRecording` and `stopRecording`.
+ */
+@property(readonly) BOOL isRecording;
+
+/**
+ * Method is called when native ADM wants to record audio.
+ * Implementation is expected to return YES if recording start request
+ * successfully handled and NO otherwise.
+ */
+- (BOOL)startRecording;
+
+/**
+ * Method is called when native ADM no longer needs to record audio.
+ * Implementation is expected to return YES if recording stop request
+ * successfully handled and NO otherwise.
+ */
+- (BOOL)stopRecording;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Configuration.mm b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Configuration.mm
new file mode 100644
index 0000000000..449f31e9dd
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Configuration.mm
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCAudioSession+Private.h"
+#import "RTCAudioSessionConfiguration.h"
+
+#import "base/RTCLogging.h"
+
+@implementation RTC_OBJC_TYPE (RTCAudioSession)
+(Configuration)
+
+ - (BOOL)setConfiguration : (RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration error
+ : (NSError **)outError {
+ return [self setConfiguration:configuration
+ active:NO
+ shouldSetActive:NO
+ error:outError];
+}
+
+- (BOOL)setConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration
+ active:(BOOL)active
+ error:(NSError **)outError {
+ return [self setConfiguration:configuration
+ active:active
+ shouldSetActive:YES
+ error:outError];
+}
+
+#pragma mark - Private
+
+- (BOOL)setConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration
+ active:(BOOL)active
+ shouldSetActive:(BOOL)shouldSetActive
+ error:(NSError **)outError {
+ NSParameterAssert(configuration);
+ if (outError) {
+ *outError = nil;
+ }
+
+ // Provide an error even if there isn't one so we can log it. We will not
+ // return immediately on error in this function and instead try to set
+ // everything we can.
+ NSError *error = nil;
+
+ if (self.category != configuration.category ||
+ self.categoryOptions != configuration.categoryOptions) {
+ NSError *categoryError = nil;
+ if (![self setCategory:configuration.category
+ withOptions:configuration.categoryOptions
+ error:&categoryError]) {
+ RTCLogError(@"Failed to set category: %@",
+ categoryError.localizedDescription);
+ error = categoryError;
+ } else {
+ RTCLog(@"Set category to: %@", configuration.category);
+ }
+ }
+
+ if (self.mode != configuration.mode) {
+ NSError *modeError = nil;
+ if (![self setMode:configuration.mode error:&modeError]) {
+ RTCLogError(@"Failed to set mode: %@",
+ modeError.localizedDescription);
+ error = modeError;
+ } else {
+ RTCLog(@"Set mode to: %@", configuration.mode);
+ }
+ }
+
+ // Sometimes category options don't stick after setting mode.
+ if (self.categoryOptions != configuration.categoryOptions) {
+ NSError *categoryError = nil;
+ if (![self setCategory:configuration.category
+ withOptions:configuration.categoryOptions
+ error:&categoryError]) {
+ RTCLogError(@"Failed to set category options: %@",
+ categoryError.localizedDescription);
+ error = categoryError;
+ } else {
+ RTCLog(@"Set category options to: %ld",
+ (long)configuration.categoryOptions);
+ }
+ }
+
+ if (self.preferredSampleRate != configuration.sampleRate) {
+ NSError *sampleRateError = nil;
+ if (![self setPreferredSampleRate:configuration.sampleRate
+ error:&sampleRateError]) {
+ RTCLogError(@"Failed to set preferred sample rate: %@",
+ sampleRateError.localizedDescription);
+ if (!self.ignoresPreferredAttributeConfigurationErrors) {
+ error = sampleRateError;
+ }
+ } else {
+ RTCLog(@"Set preferred sample rate to: %.2f",
+ configuration.sampleRate);
+ }
+ }
+
+ if (self.preferredIOBufferDuration != configuration.ioBufferDuration) {
+ NSError *bufferDurationError = nil;
+ if (![self setPreferredIOBufferDuration:configuration.ioBufferDuration
+ error:&bufferDurationError]) {
+ RTCLogError(@"Failed to set preferred IO buffer duration: %@",
+ bufferDurationError.localizedDescription);
+ if (!self.ignoresPreferredAttributeConfigurationErrors) {
+ error = bufferDurationError;
+ }
+ } else {
+ RTCLog(@"Set preferred IO buffer duration to: %f",
+ configuration.ioBufferDuration);
+ }
+ }
+
+ if (shouldSetActive) {
+ NSError *activeError = nil;
+ if (![self setActive:active error:&activeError]) {
+ RTCLogError(@"Failed to setActive to %d: %@",
+ active, activeError.localizedDescription);
+ error = activeError;
+ }
+ }
+
+ if (self.isActive &&
+ // TODO(tkchin): Figure out which category/mode numChannels is valid for.
+ [self.mode isEqualToString:AVAudioSessionModeVoiceChat]) {
+ // Try to set the preferred number of hardware audio channels. These calls
+ // must be done after setting the audio session’s category and mode and
+ // activating the session.
+ NSInteger inputNumberOfChannels = configuration.inputNumberOfChannels;
+ if (self.inputNumberOfChannels != inputNumberOfChannels) {
+ NSError *inputChannelsError = nil;
+ if (![self setPreferredInputNumberOfChannels:inputNumberOfChannels
+ error:&inputChannelsError]) {
+ RTCLogError(@"Failed to set preferred input number of channels: %@",
+ inputChannelsError.localizedDescription);
+ if (!self.ignoresPreferredAttributeConfigurationErrors) {
+ error = inputChannelsError;
+ }
+ } else {
+ RTCLog(@"Set input number of channels to: %ld",
+ (long)inputNumberOfChannels);
+ }
+ }
+ NSInteger outputNumberOfChannels = configuration.outputNumberOfChannels;
+ if (self.outputNumberOfChannels != outputNumberOfChannels) {
+ NSError *outputChannelsError = nil;
+ if (![self setPreferredOutputNumberOfChannels:outputNumberOfChannels
+ error:&outputChannelsError]) {
+ RTCLogError(@"Failed to set preferred output number of channels: %@",
+ outputChannelsError.localizedDescription);
+ if (!self.ignoresPreferredAttributeConfigurationErrors) {
+ error = outputChannelsError;
+ }
+ } else {
+ RTCLog(@"Set output number of channels to: %ld",
+ (long)outputNumberOfChannels);
+ }
+ }
+ }
+
+ if (outError) {
+ *outError = error;
+ }
+
+ return error == nil;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Private.h b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Private.h
new file mode 100644
index 0000000000..2be1b9fb3d
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession+Private.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCAudioSession.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+@class RTC_OBJC_TYPE(RTCAudioSessionConfiguration);
+
+@interface RTC_OBJC_TYPE (RTCAudioSession)
+()
+
+ /** Number of times setActive:YES has succeeded without a balanced call to
+ * setActive:NO.
+ */
+ @property(nonatomic, readonly) int activationCount;
+
+/** The number of times `beginWebRTCSession` was called without a balanced call
+ * to `endWebRTCSession`.
+ */
+@property(nonatomic, readonly) int webRTCSessionCount;
+
+/** Convenience BOOL that checks useManualAudio and isAudioEnebled. */
+@property(readonly) BOOL canPlayOrRecord;
+
+/** Tracks whether we have been sent an interruption event that hasn't been matched by either an
+ * interrupted end event or a foreground event.
+ */
+@property(nonatomic, assign) BOOL isInterrupted;
+
+/** Adds the delegate to the list of delegates, and places it at the front of
+ * the list. This delegate will be notified before other delegates of
+ * audio events.
+ */
+- (void)pushDelegate:(id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)>)delegate;
+
+/** Signals RTCAudioSession that a WebRTC session is about to begin and
+ * audio configuration is needed. Will configure the audio session for WebRTC
+ * if not already configured and if configuration is not delayed.
+ * Successful calls must be balanced by a call to endWebRTCSession.
+ */
+- (BOOL)beginWebRTCSession:(NSError **)outError;
+
+/** Signals RTCAudioSession that a WebRTC session is about to end and audio
+ * unconfiguration is needed. Will unconfigure the audio session for WebRTC
+ * if this is the last unmatched call and if configuration is not delayed.
+ */
+- (BOOL)endWebRTCSession:(NSError **)outError;
+
+/** Configure the audio session for WebRTC. This call will fail if the session
+ * is already configured. On other failures, we will attempt to restore the
+ * previously used audio session configuration.
+ * `lockForConfiguration` must be called first.
+ * Successful calls to configureWebRTCSession must be matched by calls to
+ * `unconfigureWebRTCSession`.
+ */
+- (BOOL)configureWebRTCSession:(NSError **)outError;
+
+/** Unconfigures the session for WebRTC. This will attempt to restore the
+ * audio session to the settings used before `configureWebRTCSession` was
+ * called.
+ * `lockForConfiguration` must be called first.
+ */
+- (BOOL)unconfigureWebRTCSession:(NSError **)outError;
+
+/** Returns a configuration error with the given description. */
+- (NSError *)configurationErrorWithDescription:(NSString *)description;
+
+/** Notifies the receiver that a playout glitch was detected. */
+- (void)notifyDidDetectPlayoutGlitch:(int64_t)totalNumberOfGlitches;
+
+/** Notifies the receiver that there was an error when starting an audio unit. */
+- (void)notifyAudioUnitStartFailedWithError:(OSStatus)error;
+
+// Properties and methods for tests.
+- (void)notifyDidBeginInterruption;
+- (void)notifyDidEndInterruptionWithShouldResumeSession:(BOOL)shouldResumeSession;
+- (void)notifyDidChangeRouteWithReason:(AVAudioSessionRouteChangeReason)reason
+ previousRoute:(AVAudioSessionRouteDescription *)previousRoute;
+- (void)notifyMediaServicesWereLost;
+- (void)notifyMediaServicesWereReset;
+- (void)notifyDidChangeCanPlayOrRecord:(BOOL)canPlayOrRecord;
+- (void)notifyDidStartPlayOrRecord;
+- (void)notifyDidStopPlayOrRecord;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.h b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.h
new file mode 100644
index 0000000000..3b83b27ba5
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+extern NSString *const kRTCAudioSessionErrorDomain;
+/** Method that requires lock was called without lock. */
+extern NSInteger const kRTCAudioSessionErrorLockRequired;
+/** Unknown configuration error occurred. */
+extern NSInteger const kRTCAudioSessionErrorConfiguration;
+
+@class RTC_OBJC_TYPE(RTCAudioSession);
+@class RTC_OBJC_TYPE(RTCAudioSessionConfiguration);
+
+// Surfaces AVAudioSession events. WebRTC will listen directly for notifications
+// from AVAudioSession and handle them before calling these delegate methods,
+// at which point applications can perform additional processing if required.
+RTC_OBJC_EXPORT
+@protocol RTC_OBJC_TYPE
+(RTCAudioSessionDelegate)<NSObject>
+
+ @optional
+/** Called on a system notification thread when AVAudioSession starts an
+ * interruption event.
+ */
+- (void)audioSessionDidBeginInterruption:(RTC_OBJC_TYPE(RTCAudioSession) *)session;
+
+/** Called on a system notification thread when AVAudioSession ends an
+ * interruption event.
+ */
+- (void)audioSessionDidEndInterruption:(RTC_OBJC_TYPE(RTCAudioSession) *)session
+ shouldResumeSession:(BOOL)shouldResumeSession;
+
+/** Called on a system notification thread when AVAudioSession changes the
+ * route.
+ */
+- (void)audioSessionDidChangeRoute:(RTC_OBJC_TYPE(RTCAudioSession) *)session
+ reason:(AVAudioSessionRouteChangeReason)reason
+ previousRoute:(AVAudioSessionRouteDescription *)previousRoute;
+
+/** Called on a system notification thread when AVAudioSession media server
+ * terminates.
+ */
+- (void)audioSessionMediaServerTerminated:(RTC_OBJC_TYPE(RTCAudioSession) *)session;
+
+/** Called on a system notification thread when AVAudioSession media server
+ * restarts.
+ */
+- (void)audioSessionMediaServerReset:(RTC_OBJC_TYPE(RTCAudioSession) *)session;
+
+// TODO(tkchin): Maybe handle SilenceSecondaryAudioHintNotification.
+
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)session
+ didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord;
+
+/** Called on a WebRTC thread when the audio device is notified to begin
+ * playback or recording.
+ */
+- (void)audioSessionDidStartPlayOrRecord:(RTC_OBJC_TYPE(RTCAudioSession) *)session;
+
+/** Called on a WebRTC thread when the audio device is notified to stop
+ * playback or recording.
+ */
+- (void)audioSessionDidStopPlayOrRecord:(RTC_OBJC_TYPE(RTCAudioSession) *)session;
+
+/** Called when the AVAudioSession output volume value changes. */
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)audioSession
+ didChangeOutputVolume:(float)outputVolume;
+
+/** Called when the audio device detects a playout glitch. The argument is the
+ * number of glitches detected so far in the current audio playout session.
+ */
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)audioSession
+ didDetectPlayoutGlitch:(int64_t)totalNumberOfGlitches;
+
+/** Called when the audio session is about to change the active state.
+ */
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)audioSession willSetActive:(BOOL)active;
+
+/** Called after the audio session sucessfully changed the active state.
+ */
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)audioSession didSetActive:(BOOL)active;
+
+/** Called after the audio session failed to change the active state.
+ */
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)audioSession
+ failedToSetActive:(BOOL)active
+ error:(NSError *)error;
+
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)audioSession
+ audioUnitStartFailedWithError:(NSError *)error;
+
+@end
+
+/** This is a protocol used to inform RTCAudioSession when the audio session
+ * activation state has changed outside of RTCAudioSession. The current known use
+ * case of this is when CallKit activates the audio session for the application
+ */
+RTC_OBJC_EXPORT
+@protocol RTC_OBJC_TYPE
+(RTCAudioSessionActivationDelegate)<NSObject>
+
+ /** Called when the audio session is activated outside of the app by iOS. */
+ - (void)audioSessionDidActivate : (AVAudioSession *)session;
+
+/** Called when the audio session is deactivated outside of the app by iOS. */
+- (void)audioSessionDidDeactivate:(AVAudioSession *)session;
+
+@end
+
+/** Proxy class for AVAudioSession that adds a locking mechanism similar to
+ * AVCaptureDevice. This is used to that interleaving configurations between
+ * WebRTC and the application layer are avoided.
+ *
+ * RTCAudioSession also coordinates activation so that the audio session is
+ * activated only once. See `setActive:error:`.
+ */
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCAudioSession) : NSObject <RTC_OBJC_TYPE(RTCAudioSessionActivationDelegate)>
+
+/** Convenience property to access the AVAudioSession singleton. Callers should
+ * not call setters on AVAudioSession directly, but other method invocations
+ * are fine.
+ */
+@property(nonatomic, readonly) AVAudioSession *session;
+
+/** Our best guess at whether the session is active based on results of calls to
+ * AVAudioSession.
+ */
+@property(nonatomic, readonly) BOOL isActive;
+
+/** If YES, WebRTC will not initialize the audio unit automatically when an
+ * audio track is ready for playout or recording. Instead, applications should
+ * call setIsAudioEnabled. If NO, WebRTC will initialize the audio unit
+ * as soon as an audio track is ready for playout or recording.
+ */
+@property(nonatomic, assign) BOOL useManualAudio;
+
+/** This property is only effective if useManualAudio is YES.
+ * Represents permission for WebRTC to initialize the VoIP audio unit.
+ * When set to NO, if the VoIP audio unit used by WebRTC is active, it will be
+ * stopped and uninitialized. This will stop incoming and outgoing audio.
+ * When set to YES, WebRTC will initialize and start the audio unit when it is
+ * needed (e.g. due to establishing an audio connection).
+ * This property was introduced to work around an issue where if an AVPlayer is
+ * playing audio while the VoIP audio unit is initialized, its audio would be
+ * either cut off completely or played at a reduced volume. By preventing
+ * the audio unit from being initialized until after the audio has completed,
+ * we are able to prevent the abrupt cutoff.
+ */
+@property(nonatomic, assign) BOOL isAudioEnabled;
+
+// Proxy properties.
+@property(readonly) NSString *category;
+@property(readonly) AVAudioSessionCategoryOptions categoryOptions;
+@property(readonly) NSString *mode;
+@property(readonly) BOOL secondaryAudioShouldBeSilencedHint;
+@property(readonly) AVAudioSessionRouteDescription *currentRoute;
+@property(readonly) NSInteger maximumInputNumberOfChannels;
+@property(readonly) NSInteger maximumOutputNumberOfChannels;
+@property(readonly) float inputGain;
+@property(readonly) BOOL inputGainSettable;
+@property(readonly) BOOL inputAvailable;
+@property(readonly, nullable) NSArray<AVAudioSessionDataSourceDescription *> *inputDataSources;
+@property(readonly, nullable) AVAudioSessionDataSourceDescription *inputDataSource;
+@property(readonly, nullable) NSArray<AVAudioSessionDataSourceDescription *> *outputDataSources;
+@property(readonly, nullable) AVAudioSessionDataSourceDescription *outputDataSource;
+@property(readonly) double sampleRate;
+@property(readonly) double preferredSampleRate;
+@property(readonly) NSInteger inputNumberOfChannels;
+@property(readonly) NSInteger outputNumberOfChannels;
+@property(readonly) float outputVolume;
+@property(readonly) NSTimeInterval inputLatency;
+@property(readonly) NSTimeInterval outputLatency;
+@property(readonly) NSTimeInterval IOBufferDuration;
+@property(readonly) NSTimeInterval preferredIOBufferDuration;
+
+/**
+ When YES, calls to -setConfiguration:error: and -setConfiguration:active:error: ignore errors in
+ configuring the audio session's "preferred" attributes (e.g. preferredInputNumberOfChannels).
+ Typically, configurations to preferred attributes are optimizations, and ignoring this type of
+ configuration error allows code flow to continue along the happy path when these optimization are
+ not available. The default value of this property is NO.
+ */
+@property(nonatomic) BOOL ignoresPreferredAttributeConfigurationErrors;
+
+/** Default constructor. */
++ (instancetype)sharedInstance;
+- (instancetype)init NS_UNAVAILABLE;
+
+/** Adds a delegate, which is held weakly. */
+- (void)addDelegate:(id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)>)delegate;
+/** Removes an added delegate. */
+- (void)removeDelegate:(id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)>)delegate;
+
+/** Request exclusive access to the audio session for configuration. This call
+ * will block if the lock is held by another object.
+ */
+- (void)lockForConfiguration;
+/** Relinquishes exclusive access to the audio session. */
+- (void)unlockForConfiguration;
+
+/** If `active`, activates the audio session if it isn't already active.
+ * Successful calls must be balanced with a setActive:NO when activation is no
+ * longer required. If not `active`, deactivates the audio session if one is
+ * active and this is the last balanced call. When deactivating, the
+ * AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation option is passed to
+ * AVAudioSession.
+ */
+- (BOOL)setActive:(BOOL)active error:(NSError **)outError;
+
+// The following methods are proxies for the associated methods on
+// AVAudioSession. `lockForConfiguration` must be called before using them
+// otherwise they will fail with kRTCAudioSessionErrorLockRequired.
+
+- (BOOL)setCategory:(NSString *)category
+ withOptions:(AVAudioSessionCategoryOptions)options
+ error:(NSError **)outError;
+- (BOOL)setMode:(NSString *)mode error:(NSError **)outError;
+- (BOOL)setInputGain:(float)gain error:(NSError **)outError;
+- (BOOL)setPreferredSampleRate:(double)sampleRate error:(NSError **)outError;
+- (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration error:(NSError **)outError;
+- (BOOL)setPreferredInputNumberOfChannels:(NSInteger)count error:(NSError **)outError;
+- (BOOL)setPreferredOutputNumberOfChannels:(NSInteger)count error:(NSError **)outError;
+- (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride error:(NSError **)outError;
+- (BOOL)setPreferredInput:(AVAudioSessionPortDescription *)inPort error:(NSError **)outError;
+- (BOOL)setInputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+ error:(NSError **)outError;
+- (BOOL)setOutputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+ error:(NSError **)outError;
+@end
+
+@interface RTC_OBJC_TYPE (RTCAudioSession)
+(Configuration)
+
+ /** Applies the configuration to the current session. Attempts to set all
+ * properties even if previous ones fail. Only the last error will be
+ * returned.
+ * `lockForConfiguration` must be called first.
+ */
+ - (BOOL)setConfiguration : (RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration error
+ : (NSError **)outError;
+
+/** Convenience method that calls both setConfiguration and setActive.
+ * `lockForConfiguration` must be called first.
+ */
+- (BOOL)setConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration
+ active:(BOOL)active
+ error:(NSError **)outError;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.mm b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.mm
new file mode 100644
index 0000000000..550a426d36
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSession.mm
@@ -0,0 +1,1000 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCAudioSession+Private.h"
+
+#import <UIKit/UIKit.h>
+
+#include <atomic>
+#include <vector>
+
+#include "absl/base/attributes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/synchronization/mutex.h"
+
+#import "RTCAudioSessionConfiguration.h"
+#import "base/RTCLogging.h"
+
+#if !defined(ABSL_HAVE_THREAD_LOCAL)
+#error ABSL_HAVE_THREAD_LOCAL should be defined for MacOS / iOS Targets.
+#endif
+
+NSString *const kRTCAudioSessionErrorDomain = @"org.webrtc.RTC_OBJC_TYPE(RTCAudioSession)";
+NSInteger const kRTCAudioSessionErrorLockRequired = -1;
+NSInteger const kRTCAudioSessionErrorConfiguration = -2;
+NSString * const kRTCAudioSessionOutputVolumeSelector = @"outputVolume";
+
+namespace {
+// Since webrtc::Mutex is not a reentrant lock and cannot check if the mutex is locked,
+// we need a separate variable to check that the mutex is locked in the RTCAudioSession.
+ABSL_CONST_INIT thread_local bool mutex_locked = false;
+} // namespace
+
+@interface RTC_OBJC_TYPE (RTCAudioSession)
+() @property(nonatomic,
+ readonly) std::vector<__weak id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)> > delegates;
+@end
+
+// This class needs to be thread-safe because it is accessed from many threads.
+// TODO(tkchin): Consider more granular locking. We're not expecting a lot of
+// lock contention so coarse locks should be fine for now.
+@implementation RTC_OBJC_TYPE (RTCAudioSession) {
+ webrtc::Mutex _mutex;
+ AVAudioSession *_session;
+ std::atomic<int> _activationCount;
+ std::atomic<int> _webRTCSessionCount;
+ BOOL _isActive;
+ BOOL _useManualAudio;
+ BOOL _isAudioEnabled;
+ BOOL _canPlayOrRecord;
+ BOOL _isInterrupted;
+}
+
+@synthesize session = _session;
+@synthesize delegates = _delegates;
+@synthesize ignoresPreferredAttributeConfigurationErrors =
+ _ignoresPreferredAttributeConfigurationErrors;
+
++ (instancetype)sharedInstance {
+ static dispatch_once_t onceToken;
+ static RTC_OBJC_TYPE(RTCAudioSession) *sharedInstance = nil;
+ dispatch_once(&onceToken, ^{
+ sharedInstance = [[self alloc] init];
+ });
+ return sharedInstance;
+}
+
+- (instancetype)init {
+ return [self initWithAudioSession:[AVAudioSession sharedInstance]];
+}
+
+/** This initializer provides a way for unit tests to inject a fake/mock audio session. */
+- (instancetype)initWithAudioSession:(id)audioSession {
+ if (self = [super init]) {
+ _session = audioSession;
+
+ NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
+ [center addObserver:self
+ selector:@selector(handleInterruptionNotification:)
+ name:AVAudioSessionInterruptionNotification
+ object:nil];
+ [center addObserver:self
+ selector:@selector(handleRouteChangeNotification:)
+ name:AVAudioSessionRouteChangeNotification
+ object:nil];
+ [center addObserver:self
+ selector:@selector(handleMediaServicesWereLost:)
+ name:AVAudioSessionMediaServicesWereLostNotification
+ object:nil];
+ [center addObserver:self
+ selector:@selector(handleMediaServicesWereReset:)
+ name:AVAudioSessionMediaServicesWereResetNotification
+ object:nil];
+ // Posted on the main thread when the primary audio from other applications
+ // starts and stops. Foreground applications may use this notification as a
+ // hint to enable or disable audio that is secondary.
+ [center addObserver:self
+ selector:@selector(handleSilenceSecondaryAudioHintNotification:)
+ name:AVAudioSessionSilenceSecondaryAudioHintNotification
+ object:nil];
+ // Also track foreground event in order to deal with interruption ended situation.
+ [center addObserver:self
+ selector:@selector(handleApplicationDidBecomeActive:)
+ name:UIApplicationDidBecomeActiveNotification
+ object:nil];
+ [_session addObserver:self
+ forKeyPath:kRTCAudioSessionOutputVolumeSelector
+ options:NSKeyValueObservingOptionNew | NSKeyValueObservingOptionOld
+ context:(__bridge void *)RTC_OBJC_TYPE(RTCAudioSession).class];
+
+ RTCLog(@"RTC_OBJC_TYPE(RTCAudioSession) (%p): init.", self);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+ [_session removeObserver:self
+ forKeyPath:kRTCAudioSessionOutputVolumeSelector
+ context:(__bridge void *)RTC_OBJC_TYPE(RTCAudioSession).class];
+ RTCLog(@"RTC_OBJC_TYPE(RTCAudioSession) (%p): dealloc.", self);
+}
+
+- (NSString *)description {
+ NSString *format = @"RTC_OBJC_TYPE(RTCAudioSession): {\n"
+ " category: %@\n"
+ " categoryOptions: %ld\n"
+ " mode: %@\n"
+ " isActive: %d\n"
+ " sampleRate: %.2f\n"
+ " IOBufferDuration: %f\n"
+ " outputNumberOfChannels: %ld\n"
+ " inputNumberOfChannels: %ld\n"
+ " outputLatency: %f\n"
+ " inputLatency: %f\n"
+ " outputVolume: %f\n"
+ "}";
+ NSString *description = [NSString stringWithFormat:format,
+ self.category, (long)self.categoryOptions, self.mode,
+ self.isActive, self.sampleRate, self.IOBufferDuration,
+ self.outputNumberOfChannels, self.inputNumberOfChannels,
+ self.outputLatency, self.inputLatency, self.outputVolume];
+ return description;
+}
+
+- (void)setIsActive:(BOOL)isActive {
+ @synchronized(self) {
+ _isActive = isActive;
+ }
+}
+
+- (BOOL)isActive {
+ @synchronized(self) {
+ return _isActive;
+ }
+}
+
+- (void)setUseManualAudio:(BOOL)useManualAudio {
+ @synchronized(self) {
+ if (_useManualAudio == useManualAudio) {
+ return;
+ }
+ _useManualAudio = useManualAudio;
+ }
+ [self updateCanPlayOrRecord];
+}
+
+- (BOOL)useManualAudio {
+ @synchronized(self) {
+ return _useManualAudio;
+ }
+}
+
+- (void)setIsAudioEnabled:(BOOL)isAudioEnabled {
+ @synchronized(self) {
+ if (_isAudioEnabled == isAudioEnabled) {
+ return;
+ }
+ _isAudioEnabled = isAudioEnabled;
+ }
+ [self updateCanPlayOrRecord];
+}
+
+- (BOOL)isAudioEnabled {
+ @synchronized(self) {
+ return _isAudioEnabled;
+ }
+}
+
+- (void)setIgnoresPreferredAttributeConfigurationErrors:
+ (BOOL)ignoresPreferredAttributeConfigurationErrors {
+ @synchronized(self) {
+ if (_ignoresPreferredAttributeConfigurationErrors ==
+ ignoresPreferredAttributeConfigurationErrors) {
+ return;
+ }
+ _ignoresPreferredAttributeConfigurationErrors = ignoresPreferredAttributeConfigurationErrors;
+ }
+}
+
+- (BOOL)ignoresPreferredAttributeConfigurationErrors {
+ @synchronized(self) {
+ return _ignoresPreferredAttributeConfigurationErrors;
+ }
+}
+
+// TODO(tkchin): Check for duplicates.
+- (void)addDelegate:(id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)>)delegate {
+ RTCLog(@"Adding delegate: (%p)", delegate);
+ if (!delegate) {
+ return;
+ }
+ @synchronized(self) {
+ _delegates.push_back(delegate);
+ [self removeZeroedDelegates];
+ }
+}
+
+- (void)removeDelegate:(id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)>)delegate {
+ RTCLog(@"Removing delegate: (%p)", delegate);
+ if (!delegate) {
+ return;
+ }
+ @synchronized(self) {
+ _delegates.erase(std::remove(_delegates.begin(),
+ _delegates.end(),
+ delegate),
+ _delegates.end());
+ [self removeZeroedDelegates];
+ }
+}
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wthread-safety-analysis"
+
+- (void)lockForConfiguration {
+ RTC_CHECK(!mutex_locked);
+ _mutex.Lock();
+ mutex_locked = true;
+}
+
+- (void)unlockForConfiguration {
+ mutex_locked = false;
+ _mutex.Unlock();
+}
+
+#pragma clang diagnostic pop
+
+#pragma mark - AVAudioSession proxy methods
+
+- (NSString *)category {
+ return self.session.category;
+}
+
+- (AVAudioSessionCategoryOptions)categoryOptions {
+ return self.session.categoryOptions;
+}
+
+- (NSString *)mode {
+ return self.session.mode;
+}
+
+- (BOOL)secondaryAudioShouldBeSilencedHint {
+ return self.session.secondaryAudioShouldBeSilencedHint;
+}
+
+- (AVAudioSessionRouteDescription *)currentRoute {
+ return self.session.currentRoute;
+}
+
+- (NSInteger)maximumInputNumberOfChannels {
+ return self.session.maximumInputNumberOfChannels;
+}
+
+- (NSInteger)maximumOutputNumberOfChannels {
+ return self.session.maximumOutputNumberOfChannels;
+}
+
+- (float)inputGain {
+ return self.session.inputGain;
+}
+
+- (BOOL)inputGainSettable {
+ return self.session.inputGainSettable;
+}
+
+- (BOOL)inputAvailable {
+ return self.session.inputAvailable;
+}
+
+- (NSArray<AVAudioSessionDataSourceDescription *> *)inputDataSources {
+ return self.session.inputDataSources;
+}
+
+- (AVAudioSessionDataSourceDescription *)inputDataSource {
+ return self.session.inputDataSource;
+}
+
+- (NSArray<AVAudioSessionDataSourceDescription *> *)outputDataSources {
+ return self.session.outputDataSources;
+}
+
+- (AVAudioSessionDataSourceDescription *)outputDataSource {
+ return self.session.outputDataSource;
+}
+
+- (double)sampleRate {
+ return self.session.sampleRate;
+}
+
+- (double)preferredSampleRate {
+ return self.session.preferredSampleRate;
+}
+
+- (NSInteger)inputNumberOfChannels {
+ return self.session.inputNumberOfChannels;
+}
+
+- (NSInteger)outputNumberOfChannels {
+ return self.session.outputNumberOfChannels;
+}
+
+- (float)outputVolume {
+ return self.session.outputVolume;
+}
+
+- (NSTimeInterval)inputLatency {
+ return self.session.inputLatency;
+}
+
+- (NSTimeInterval)outputLatency {
+ return self.session.outputLatency;
+}
+
+- (NSTimeInterval)IOBufferDuration {
+ return self.session.IOBufferDuration;
+}
+
+- (NSTimeInterval)preferredIOBufferDuration {
+ return self.session.preferredIOBufferDuration;
+}
+
+- (BOOL)setActive:(BOOL)active
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ int activationCount = _activationCount.load();
+ if (!active && activationCount == 0) {
+ RTCLogWarning(@"Attempting to deactivate without prior activation.");
+ }
+ [self notifyWillSetActive:active];
+ BOOL success = YES;
+ BOOL isActive = self.isActive;
+ // Keep a local error so we can log it.
+ NSError *error = nil;
+ BOOL shouldSetActive =
+ (active && !isActive) || (!active && isActive && activationCount == 1);
+ // Attempt to activate if we're not active.
+ // Attempt to deactivate if we're active and it's the last unbalanced call.
+ if (shouldSetActive) {
+ AVAudioSession *session = self.session;
+ // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to ensure
+ // that other audio sessions that were interrupted by our session can return
+ // to their active state. It is recommended for VoIP apps to use this
+ // option.
+ AVAudioSessionSetActiveOptions options =
+ active ? 0 : AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation;
+ success = [session setActive:active
+ withOptions:options
+ error:&error];
+ if (outError) {
+ *outError = error;
+ }
+ }
+ if (success) {
+ if (active) {
+ if (shouldSetActive) {
+ self.isActive = active;
+ if (self.isInterrupted) {
+ self.isInterrupted = NO;
+ [self notifyDidEndInterruptionWithShouldResumeSession:YES];
+ }
+ }
+ [self incrementActivationCount];
+ [self notifyDidSetActive:active];
+ }
+ } else {
+ RTCLogError(@"Failed to setActive:%d. Error: %@",
+ active, error.localizedDescription);
+ [self notifyFailedToSetActive:active error:error];
+ }
+ // Set isActive and decrement activation count on deactivation
+ // whether or not it succeeded.
+ if (!active) {
+ self.isActive = active;
+ [self notifyDidSetActive:active];
+ [self decrementActivationCount];
+ }
+ RTCLog(@"Number of current activations: %d", _activationCount.load());
+ return success;
+}
+
+- (BOOL)setCategory:(NSString *)category
+ withOptions:(AVAudioSessionCategoryOptions)options
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setCategory:category withOptions:options error:outError];
+}
+
+- (BOOL)setMode:(NSString *)mode error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setMode:mode error:outError];
+}
+
+- (BOOL)setInputGain:(float)gain error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setInputGain:gain error:outError];
+}
+
+- (BOOL)setPreferredSampleRate:(double)sampleRate error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setPreferredSampleRate:sampleRate error:outError];
+}
+
+- (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setPreferredIOBufferDuration:duration error:outError];
+}
+
+- (BOOL)setPreferredInputNumberOfChannels:(NSInteger)count
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setPreferredInputNumberOfChannels:count error:outError];
+}
+- (BOOL)setPreferredOutputNumberOfChannels:(NSInteger)count
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setPreferredOutputNumberOfChannels:count error:outError];
+}
+
+- (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session overrideOutputAudioPort:portOverride error:outError];
+}
+
+- (BOOL)setPreferredInput:(AVAudioSessionPortDescription *)inPort
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setPreferredInput:inPort error:outError];
+}
+
+- (BOOL)setInputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setInputDataSource:dataSource error:outError];
+}
+
+- (BOOL)setOutputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+ error:(NSError **)outError {
+ if (![self checkLock:outError]) {
+ return NO;
+ }
+ return [self.session setOutputDataSource:dataSource error:outError];
+}
+
+#pragma mark - Notifications
+
+- (void)handleInterruptionNotification:(NSNotification *)notification {
+ NSNumber* typeNumber =
+ notification.userInfo[AVAudioSessionInterruptionTypeKey];
+ AVAudioSessionInterruptionType type =
+ (AVAudioSessionInterruptionType)typeNumber.unsignedIntegerValue;
+ switch (type) {
+ case AVAudioSessionInterruptionTypeBegan:
+ RTCLog(@"Audio session interruption began.");
+ self.isActive = NO;
+ self.isInterrupted = YES;
+ [self notifyDidBeginInterruption];
+ break;
+ case AVAudioSessionInterruptionTypeEnded: {
+ RTCLog(@"Audio session interruption ended.");
+ self.isInterrupted = NO;
+ [self updateAudioSessionAfterEvent];
+ NSNumber *optionsNumber =
+ notification.userInfo[AVAudioSessionInterruptionOptionKey];
+ AVAudioSessionInterruptionOptions options =
+ optionsNumber.unsignedIntegerValue;
+ BOOL shouldResume =
+ options & AVAudioSessionInterruptionOptionShouldResume;
+ [self notifyDidEndInterruptionWithShouldResumeSession:shouldResume];
+ break;
+ }
+ }
+}
+
+- (void)handleRouteChangeNotification:(NSNotification *)notification {
+ // Get reason for current route change.
+ NSNumber* reasonNumber =
+ notification.userInfo[AVAudioSessionRouteChangeReasonKey];
+ AVAudioSessionRouteChangeReason reason =
+ (AVAudioSessionRouteChangeReason)reasonNumber.unsignedIntegerValue;
+ RTCLog(@"Audio route changed:");
+ switch (reason) {
+ case AVAudioSessionRouteChangeReasonUnknown:
+ RTCLog(@"Audio route changed: ReasonUnknown");
+ break;
+ case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
+ RTCLog(@"Audio route changed: NewDeviceAvailable");
+ break;
+ case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
+ RTCLog(@"Audio route changed: OldDeviceUnavailable");
+ break;
+ case AVAudioSessionRouteChangeReasonCategoryChange:
+ RTCLog(@"Audio route changed: CategoryChange to :%@",
+ self.session.category);
+ break;
+ case AVAudioSessionRouteChangeReasonOverride:
+ RTCLog(@"Audio route changed: Override");
+ break;
+ case AVAudioSessionRouteChangeReasonWakeFromSleep:
+ RTCLog(@"Audio route changed: WakeFromSleep");
+ break;
+ case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
+ RTCLog(@"Audio route changed: NoSuitableRouteForCategory");
+ break;
+ case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
+ RTCLog(@"Audio route changed: RouteConfigurationChange");
+ break;
+ }
+ AVAudioSessionRouteDescription* previousRoute =
+ notification.userInfo[AVAudioSessionRouteChangePreviousRouteKey];
+ // Log previous route configuration.
+ RTCLog(@"Previous route: %@\nCurrent route:%@",
+ previousRoute, self.session.currentRoute);
+ [self notifyDidChangeRouteWithReason:reason previousRoute:previousRoute];
+}
+
+- (void)handleMediaServicesWereLost:(NSNotification *)notification {
+ RTCLog(@"Media services were lost.");
+ [self updateAudioSessionAfterEvent];
+ [self notifyMediaServicesWereLost];
+}
+
+- (void)handleMediaServicesWereReset:(NSNotification *)notification {
+ RTCLog(@"Media services were reset.");
+ [self updateAudioSessionAfterEvent];
+ [self notifyMediaServicesWereReset];
+}
+
+- (void)handleSilenceSecondaryAudioHintNotification:(NSNotification *)notification {
+ // TODO(henrika): just adding logs here for now until we know if we are ever
+ // see this notification and might be affected by it or if further actions
+ // are required.
+ NSNumber *typeNumber =
+ notification.userInfo[AVAudioSessionSilenceSecondaryAudioHintTypeKey];
+ AVAudioSessionSilenceSecondaryAudioHintType type =
+ (AVAudioSessionSilenceSecondaryAudioHintType)typeNumber.unsignedIntegerValue;
+ switch (type) {
+ case AVAudioSessionSilenceSecondaryAudioHintTypeBegin:
+ RTCLog(@"Another application's primary audio has started.");
+ break;
+ case AVAudioSessionSilenceSecondaryAudioHintTypeEnd:
+ RTCLog(@"Another application's primary audio has stopped.");
+ break;
+ }
+}
+
+- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
+ BOOL isInterrupted = self.isInterrupted;
+ RTCLog(@"Application became active after an interruption. Treating as interruption "
+ "end. isInterrupted changed from %d to 0.",
+ isInterrupted);
+ if (isInterrupted) {
+ self.isInterrupted = NO;
+ [self updateAudioSessionAfterEvent];
+ }
+ // Always treat application becoming active as an interruption end event.
+ [self notifyDidEndInterruptionWithShouldResumeSession:YES];
+}
+
+#pragma mark - Private
+
++ (NSError *)lockError {
+ NSDictionary *userInfo =
+ @{NSLocalizedDescriptionKey : @"Must call lockForConfiguration before calling this method."};
+ NSError *error = [[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain
+ code:kRTCAudioSessionErrorLockRequired
+ userInfo:userInfo];
+ return error;
+}
+
+- (std::vector<__weak id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)> >)delegates {
+ @synchronized(self) {
+ // Note: this returns a copy.
+ return _delegates;
+ }
+}
+
+// TODO(tkchin): check for duplicates.
+- (void)pushDelegate:(id<RTC_OBJC_TYPE(RTCAudioSessionDelegate)>)delegate {
+ @synchronized(self) {
+ _delegates.insert(_delegates.begin(), delegate);
+ }
+}
+
+- (void)removeZeroedDelegates {
+ @synchronized(self) {
+ _delegates.erase(
+ std::remove_if(_delegates.begin(),
+ _delegates.end(),
+ [](id delegate) -> bool { return delegate == nil; }),
+ _delegates.end());
+ }
+}
+
+- (int)activationCount {
+ return _activationCount.load();
+}
+
+- (int)incrementActivationCount {
+ RTCLog(@"Incrementing activation count.");
+ return _activationCount.fetch_add(1) + 1;
+}
+
+- (NSInteger)decrementActivationCount {
+ RTCLog(@"Decrementing activation count.");
+ return _activationCount.fetch_sub(1) - 1;
+}
+
+- (int)webRTCSessionCount {
+ return _webRTCSessionCount.load();
+}
+
+- (BOOL)canPlayOrRecord {
+ return !self.useManualAudio || self.isAudioEnabled;
+}
+
+- (BOOL)isInterrupted {
+ @synchronized(self) {
+ return _isInterrupted;
+ }
+}
+
+- (void)setIsInterrupted:(BOOL)isInterrupted {
+ @synchronized(self) {
+ if (_isInterrupted == isInterrupted) {
+ return;
+ }
+ _isInterrupted = isInterrupted;
+ }
+}
+
+- (BOOL)checkLock:(NSError **)outError {
+ if (!mutex_locked) {
+ if (outError) {
+ *outError = [RTC_OBJC_TYPE(RTCAudioSession) lockError];
+ }
+ return NO;
+ }
+ return YES;
+}
+
+- (BOOL)beginWebRTCSession:(NSError **)outError {
+ if (outError) {
+ *outError = nil;
+ }
+ _webRTCSessionCount.fetch_add(1);
+ [self notifyDidStartPlayOrRecord];
+ return YES;
+}
+
+- (BOOL)endWebRTCSession:(NSError **)outError {
+ if (outError) {
+ *outError = nil;
+ }
+ _webRTCSessionCount.fetch_sub(1);
+ [self notifyDidStopPlayOrRecord];
+ return YES;
+}
+
+- (BOOL)configureWebRTCSession:(NSError **)outError {
+ if (outError) {
+ *outError = nil;
+ }
+ RTCLog(@"Configuring audio session for WebRTC.");
+
+ // Configure the AVAudioSession and activate it.
+ // Provide an error even if there isn't one so we can log it.
+ NSError *error = nil;
+ RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *webRTCConfig =
+ [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration];
+ if (![self setConfiguration:webRTCConfig active:YES error:&error]) {
+ RTCLogError(@"Failed to set WebRTC audio configuration: %@",
+ error.localizedDescription);
+ // Do not call setActive:NO if setActive:YES failed.
+ if (outError) {
+ *outError = error;
+ }
+ return NO;
+ }
+
+ // Ensure that the device currently supports audio input.
+ // TODO(tkchin): Figure out if this is really necessary.
+ if (!self.inputAvailable) {
+ RTCLogError(@"No audio input path is available!");
+ [self unconfigureWebRTCSession:nil];
+ if (outError) {
+ *outError = [self configurationErrorWithDescription:@"No input path."];
+ }
+ return NO;
+ }
+
+ // It can happen (e.g. in combination with BT devices) that the attempt to set
+ // the preferred sample rate for WebRTC (48kHz) fails. If so, make a new
+ // configuration attempt using the sample rate that worked using the active
+ // audio session. A typical case is that only 8 or 16kHz can be set, e.g. in
+ // combination with BT headsets. Using this "trick" seems to avoid a state
+ // where Core Audio asks for a different number of audio frames than what the
+ // session's I/O buffer duration corresponds to.
+ // TODO(henrika): this fix resolves bugs.webrtc.org/6004 but it has only been
+ // tested on a limited set of iOS devices and BT devices.
+ double sessionSampleRate = self.sampleRate;
+ double preferredSampleRate = webRTCConfig.sampleRate;
+ if (sessionSampleRate != preferredSampleRate) {
+ RTCLogWarning(
+ @"Current sample rate (%.2f) is not the preferred rate (%.2f)",
+ sessionSampleRate, preferredSampleRate);
+ if (![self setPreferredSampleRate:sessionSampleRate
+ error:&error]) {
+ RTCLogError(@"Failed to set preferred sample rate: %@",
+ error.localizedDescription);
+ if (outError) {
+ *outError = error;
+ }
+ }
+ }
+
+ return YES;
+}
+
+- (BOOL)unconfigureWebRTCSession:(NSError **)outError {
+ if (outError) {
+ *outError = nil;
+ }
+ RTCLog(@"Unconfiguring audio session for WebRTC.");
+ [self setActive:NO error:outError];
+
+ return YES;
+}
+
+- (NSError *)configurationErrorWithDescription:(NSString *)description {
+ NSDictionary* userInfo = @{
+ NSLocalizedDescriptionKey: description,
+ };
+ return [[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain
+ code:kRTCAudioSessionErrorConfiguration
+ userInfo:userInfo];
+}
+
+- (void)updateAudioSessionAfterEvent {
+ BOOL shouldActivate = self.activationCount > 0;
+ AVAudioSessionSetActiveOptions options = shouldActivate ?
+ 0 : AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation;
+ NSError *error = nil;
+ if ([self.session setActive:shouldActivate
+ withOptions:options
+ error:&error]) {
+ self.isActive = shouldActivate;
+ } else {
+ RTCLogError(@"Failed to set session active to %d. Error:%@",
+ shouldActivate, error.localizedDescription);
+ }
+}
+
+- (void)updateCanPlayOrRecord {
+ BOOL canPlayOrRecord = NO;
+ BOOL shouldNotify = NO;
+ @synchronized(self) {
+ canPlayOrRecord = !self.useManualAudio || self.isAudioEnabled;
+ if (_canPlayOrRecord == canPlayOrRecord) {
+ return;
+ }
+ _canPlayOrRecord = canPlayOrRecord;
+ shouldNotify = YES;
+ }
+ if (shouldNotify) {
+ [self notifyDidChangeCanPlayOrRecord:canPlayOrRecord];
+ }
+}
+
+- (void)audioSessionDidActivate:(AVAudioSession *)session {
+ if (_session != session) {
+ RTCLogError(@"audioSessionDidActivate called on different AVAudioSession");
+ }
+ RTCLog(@"Audio session was externally activated.");
+ [self incrementActivationCount];
+ self.isActive = YES;
+ // When a CallKit call begins, it's possible that we receive an interruption
+ // begin without a corresponding end. Since we know that we have an activated
+ // audio session at this point, just clear any saved interruption flag since
+ // the app may never be foregrounded during the duration of the call.
+ if (self.isInterrupted) {
+ RTCLog(@"Clearing interrupted state due to external activation.");
+ self.isInterrupted = NO;
+ }
+ // Treat external audio session activation as an end interruption event.
+ [self notifyDidEndInterruptionWithShouldResumeSession:YES];
+}
+
+- (void)audioSessionDidDeactivate:(AVAudioSession *)session {
+ if (_session != session) {
+ RTCLogError(@"audioSessionDidDeactivate called on different AVAudioSession");
+ }
+ RTCLog(@"Audio session was externally deactivated.");
+ self.isActive = NO;
+ [self decrementActivationCount];
+}
+
+- (void)observeValueForKeyPath:(NSString *)keyPath
+ ofObject:(id)object
+ change:(NSDictionary *)change
+ context:(void *)context {
+ if (context == (__bridge void *)RTC_OBJC_TYPE(RTCAudioSession).class) {
+ if (object == _session) {
+ NSNumber *newVolume = change[NSKeyValueChangeNewKey];
+ RTCLog(@"OutputVolumeDidChange to %f", newVolume.floatValue);
+ [self notifyDidChangeOutputVolume:newVolume.floatValue];
+ }
+ } else {
+ [super observeValueForKeyPath:keyPath
+ ofObject:object
+ change:change
+ context:context];
+ }
+}
+
+- (void)notifyAudioUnitStartFailedWithError:(OSStatus)error {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSession:audioUnitStartFailedWithError:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSession:self
+ audioUnitStartFailedWithError:[NSError errorWithDomain:kRTCAudioSessionErrorDomain
+ code:error
+ userInfo:nil]];
+ }
+ }
+}
+
+- (void)notifyDidBeginInterruption {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSessionDidBeginInterruption:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSessionDidBeginInterruption:self];
+ }
+ }
+}
+
+- (void)notifyDidEndInterruptionWithShouldResumeSession:
+ (BOOL)shouldResumeSession {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSessionDidEndInterruption:shouldResumeSession:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSessionDidEndInterruption:self
+ shouldResumeSession:shouldResumeSession];
+ }
+ }
+}
+
+- (void)notifyDidChangeRouteWithReason:(AVAudioSessionRouteChangeReason)reason
+ previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSessionDidChangeRoute:reason:previousRoute:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSessionDidChangeRoute:self
+ reason:reason
+ previousRoute:previousRoute];
+ }
+ }
+}
+
+- (void)notifyMediaServicesWereLost {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSessionMediaServerTerminated:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSessionMediaServerTerminated:self];
+ }
+ }
+}
+
+- (void)notifyMediaServicesWereReset {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSessionMediaServerReset:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSessionMediaServerReset:self];
+ }
+ }
+}
+
+- (void)notifyDidChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSession:didChangeCanPlayOrRecord:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSession:self didChangeCanPlayOrRecord:canPlayOrRecord];
+ }
+ }
+}
+
+- (void)notifyDidStartPlayOrRecord {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSessionDidStartPlayOrRecord:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSessionDidStartPlayOrRecord:self];
+ }
+ }
+}
+
+- (void)notifyDidStopPlayOrRecord {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSessionDidStopPlayOrRecord:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSessionDidStopPlayOrRecord:self];
+ }
+ }
+}
+
+- (void)notifyDidChangeOutputVolume:(float)volume {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSession:didChangeOutputVolume:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSession:self didChangeOutputVolume:volume];
+ }
+ }
+}
+
+- (void)notifyDidDetectPlayoutGlitch:(int64_t)totalNumberOfGlitches {
+ for (auto delegate : self.delegates) {
+ SEL sel = @selector(audioSession:didDetectPlayoutGlitch:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSession:self didDetectPlayoutGlitch:totalNumberOfGlitches];
+ }
+ }
+}
+
+- (void)notifyWillSetActive:(BOOL)active {
+ for (id delegate : self.delegates) {
+ SEL sel = @selector(audioSession:willSetActive:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSession:self willSetActive:active];
+ }
+ }
+}
+
+- (void)notifyDidSetActive:(BOOL)active {
+ for (id delegate : self.delegates) {
+ SEL sel = @selector(audioSession:didSetActive:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSession:self didSetActive:active];
+ }
+ }
+}
+
+- (void)notifyFailedToSetActive:(BOOL)active error:(NSError *)error {
+ for (id delegate : self.delegates) {
+ SEL sel = @selector(audioSession:failedToSetActive:error:);
+ if ([delegate respondsToSelector:sel]) {
+ [delegate audioSession:self failedToSetActive:active error:error];
+ }
+ }
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.h b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.h
new file mode 100644
index 0000000000..4582b80557
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+RTC_EXTERN const int kRTCAudioSessionPreferredNumberOfChannels;
+RTC_EXTERN const double kRTCAudioSessionHighPerformanceSampleRate;
+RTC_EXTERN const double kRTCAudioSessionLowComplexitySampleRate;
+RTC_EXTERN const double kRTCAudioSessionHighPerformanceIOBufferDuration;
+RTC_EXTERN const double kRTCAudioSessionLowComplexityIOBufferDuration;
+
+// Struct to hold configuration values.
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCAudioSessionConfiguration) : NSObject
+
+@property(nonatomic, strong) NSString *category;
+@property(nonatomic, assign) AVAudioSessionCategoryOptions categoryOptions;
+@property(nonatomic, strong) NSString *mode;
+@property(nonatomic, assign) double sampleRate;
+@property(nonatomic, assign) NSTimeInterval ioBufferDuration;
+@property(nonatomic, assign) NSInteger inputNumberOfChannels;
+@property(nonatomic, assign) NSInteger outputNumberOfChannels;
+
+/** Initializes configuration to defaults. */
+- (instancetype)init NS_DESIGNATED_INITIALIZER;
+
+/** Returns the current configuration of the audio session. */
++ (instancetype)currentConfiguration;
+/** Returns the configuration that WebRTC needs. */
++ (instancetype)webRTCConfiguration;
+/** Provide a way to override the default configuration. */
++ (void)setWebRTCConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.m b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.m
new file mode 100644
index 0000000000..39e9ac13ec
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCAudioSessionConfiguration.m
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCAudioSessionConfiguration.h"
+#import "RTCAudioSession.h"
+
+#import "helpers/RTCDispatcher.h"
+#import "helpers/UIDevice+RTCDevice.h"
+
+// Try to use mono to save resources. Also avoids channel format conversion
+// in the I/O audio unit. Initial tests have shown that it is possible to use
+// mono natively for built-in microphones and for BT headsets but not for
+// wired headsets. Wired headsets only support stereo as native channel format
+// but it is a low cost operation to do a format conversion to mono in the
+// audio unit. Hence, we will not hit a RTC_CHECK in
+// VerifyAudioParametersForActiveAudioSession() for a mismatch between the
+// preferred number of channels and the actual number of channels.
+const int kRTCAudioSessionPreferredNumberOfChannels = 1;
+
+// Preferred hardware sample rate (unit is in Hertz). The client sample rate
+// will be set to this value as well to avoid resampling the the audio unit's
+// format converter. Note that, some devices, e.g. BT headsets, only supports
+// 8000Hz as native sample rate.
+const double kRTCAudioSessionHighPerformanceSampleRate = 48000.0;
+
+// A lower sample rate will be used for devices with only one core
+// (e.g. iPhone 4). The goal is to reduce the CPU load of the application.
+const double kRTCAudioSessionLowComplexitySampleRate = 16000.0;
+
+// Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
+// size used by WebRTC. The exact actual size will differ between devices.
+// Example: using 48kHz on iPhone 6 results in a native buffer size of
+// ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
+// take care of any buffering required to convert between native buffers and
+// buffers used by WebRTC. It is beneficial for the performance if the native
+// size is as an even multiple of 10ms as possible since it results in "clean"
+// callback sequence without bursts of callbacks back to back.
+const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.02;
+
+// Use a larger buffer size on devices with only one core (e.g. iPhone 4).
+// It will result in a lower CPU consumption at the cost of a larger latency.
+// The size of 60ms is based on instrumentation that shows a significant
+// reduction in CPU load compared with 10ms on low-end devices.
+// TODO(henrika): monitor this size and determine if it should be modified.
+const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06;
+
+static RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *gWebRTCConfiguration = nil;
+
+@implementation RTC_OBJC_TYPE (RTCAudioSessionConfiguration)
+
+@synthesize category = _category;
+@synthesize categoryOptions = _categoryOptions;
+@synthesize mode = _mode;
+@synthesize sampleRate = _sampleRate;
+@synthesize ioBufferDuration = _ioBufferDuration;
+@synthesize inputNumberOfChannels = _inputNumberOfChannels;
+@synthesize outputNumberOfChannels = _outputNumberOfChannels;
+
+- (instancetype)init {
+ if (self = [super init]) {
+ // Use a category which supports simultaneous recording and playback.
+ // By default, using this category implies that our app’s audio is
+ // nonmixable, hence activating the session will interrupt any other
+ // audio sessions which are also nonmixable.
+ _category = AVAudioSessionCategoryPlayAndRecord;
+ _categoryOptions = AVAudioSessionCategoryOptionAllowBluetooth;
+
+ // Specify mode for two-way voice communication (e.g. VoIP).
+ _mode = AVAudioSessionModeVoiceChat;
+
+ // Set the session's sample rate or the hardware sample rate.
+ // It is essential that we use the same sample rate as stream format
+ // to ensure that the I/O unit does not have to do sample rate conversion.
+ // Set the preferred audio I/O buffer duration, in seconds.
+ NSUInteger processorCount = [NSProcessInfo processInfo].processorCount;
+ // Use best sample rate and buffer duration if the CPU has more than one
+ // core.
+ if (processorCount > 1 && [UIDevice deviceType] != RTCDeviceTypeIPhone4S) {
+ _sampleRate = kRTCAudioSessionHighPerformanceSampleRate;
+ _ioBufferDuration = kRTCAudioSessionHighPerformanceIOBufferDuration;
+ } else {
+ _sampleRate = kRTCAudioSessionLowComplexitySampleRate;
+ _ioBufferDuration = kRTCAudioSessionLowComplexityIOBufferDuration;
+ }
+
+ // We try to use mono in both directions to save resources and format
+ // conversions in the audio unit. Some devices does only support stereo;
+ // e.g. wired headset on iPhone 6.
+ // TODO(henrika): add support for stereo if needed.
+ _inputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels;
+ _outputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels;
+ }
+ return self;
+}
+
++ (void)initialize {
+ gWebRTCConfiguration = [[self alloc] init];
+}
+
++ (instancetype)currentConfiguration {
+ RTC_OBJC_TYPE(RTCAudioSession) *session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
+ RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *config =
+ [[RTC_OBJC_TYPE(RTCAudioSessionConfiguration) alloc] init];
+ config.category = session.category;
+ config.categoryOptions = session.categoryOptions;
+ config.mode = session.mode;
+ config.sampleRate = session.sampleRate;
+ config.ioBufferDuration = session.IOBufferDuration;
+ config.inputNumberOfChannels = session.inputNumberOfChannels;
+ config.outputNumberOfChannels = session.outputNumberOfChannels;
+ return config;
+}
+
++ (instancetype)webRTCConfiguration {
+ @synchronized(self) {
+ return (RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)gWebRTCConfiguration;
+ }
+}
+
++ (void)setWebRTCConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration {
+ @synchronized(self) {
+ gWebRTCConfiguration = configuration;
+ }
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h b/third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h
new file mode 100644
index 0000000000..6a75f01479
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCAudioSession.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+namespace webrtc {
+class AudioSessionObserver;
+}
+
+/** Adapter that forwards RTCAudioSessionDelegate calls to the appropriate
+ * methods on the AudioSessionObserver.
+ */
+@interface RTCNativeAudioSessionDelegateAdapter : NSObject <RTC_OBJC_TYPE (RTCAudioSessionDelegate)>
+
+- (instancetype)init NS_UNAVAILABLE;
+
+/** `observer` is a raw pointer and should be kept alive
+ * for this object's lifetime.
+ */
+- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer NS_DESIGNATED_INITIALIZER;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.mm b/third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.mm
new file mode 100644
index 0000000000..daddf314a4
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.mm
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCNativeAudioSessionDelegateAdapter.h"
+
+#include "sdk/objc/native/src/audio/audio_session_observer.h"
+
+#import "base/RTCLogging.h"
+
+@implementation RTCNativeAudioSessionDelegateAdapter {
+ webrtc::AudioSessionObserver *_observer;
+}
+
+- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer {
+ RTC_DCHECK(observer);
+ if (self = [super init]) {
+ _observer = observer;
+ }
+ return self;
+}
+
+#pragma mark - RTC_OBJC_TYPE(RTCAudioSessionDelegate)
+
+- (void)audioSessionDidBeginInterruption:(RTC_OBJC_TYPE(RTCAudioSession) *)session {
+ _observer->OnInterruptionBegin();
+}
+
+- (void)audioSessionDidEndInterruption:(RTC_OBJC_TYPE(RTCAudioSession) *)session
+ shouldResumeSession:(BOOL)shouldResumeSession {
+ _observer->OnInterruptionEnd();
+}
+
+- (void)audioSessionDidChangeRoute:(RTC_OBJC_TYPE(RTCAudioSession) *)session
+ reason:(AVAudioSessionRouteChangeReason)reason
+ previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
+ switch (reason) {
+ case AVAudioSessionRouteChangeReasonUnknown:
+ case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
+ case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
+ case AVAudioSessionRouteChangeReasonCategoryChange:
+ // It turns out that we see a category change (at least in iOS 9.2)
+ // when making a switch from a BT device to e.g. Speaker using the
+ // iOS Control Center and that we therefore must check if the sample
+ // rate has changed. And if so is the case, restart the audio unit.
+ case AVAudioSessionRouteChangeReasonOverride:
+ case AVAudioSessionRouteChangeReasonWakeFromSleep:
+ case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
+ _observer->OnValidRouteChange();
+ break;
+ case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
+ // The set of input and output ports has not changed, but their
+ // configuration has, e.g., a port’s selected data source has
+ // changed. Ignore this type of route change since we are focusing
+ // on detecting headset changes.
+ RTCLog(@"Ignoring RouteConfigurationChange");
+ break;
+ }
+}
+
+- (void)audioSessionMediaServerTerminated:(RTC_OBJC_TYPE(RTCAudioSession) *)session {
+}
+
+- (void)audioSessionMediaServerReset:(RTC_OBJC_TYPE(RTCAudioSession) *)session {
+}
+
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)session
+ didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
+ _observer->OnCanPlayOrRecordChange(canPlayOrRecord);
+}
+
+- (void)audioSessionDidStartPlayOrRecord:(RTC_OBJC_TYPE(RTCAudioSession) *)session {
+}
+
+- (void)audioSessionDidStopPlayOrRecord:(RTC_OBJC_TYPE(RTCAudioSession) *)session {
+}
+
+- (void)audioSession:(RTC_OBJC_TYPE(RTCAudioSession) *)audioSession
+ didChangeOutputVolume:(float)outputVolume {
+ _observer->OnChangedOutputVolume();
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.h b/third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.h
new file mode 100644
index 0000000000..370bfa70f0
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoCapturer.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+RTC_OBJC_EXPORT
+// Camera capture that implements RTCVideoCapturer. Delivers frames to a
+// RTCVideoCapturerDelegate (usually RTCVideoSource).
+NS_EXTENSION_UNAVAILABLE_IOS("Camera not available in app extensions.")
+@interface RTC_OBJC_TYPE (RTCCameraVideoCapturer) : RTC_OBJC_TYPE(RTCVideoCapturer)
+
+// Capture session that is used for capturing. Valid from initialization to dealloc.
+@property(readonly, nonatomic) AVCaptureSession *captureSession;
+
+// Returns list of available capture devices that support video capture.
++ (NSArray<AVCaptureDevice *> *)captureDevices;
+// Returns list of formats that are supported by this class for this device.
++ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device;
+
+// Returns the most efficient supported output pixel format for this capturer.
+- (FourCharCode)preferredOutputPixelFormat;
+
+// Starts the capture session asynchronously and notifies callback on completion.
+// The device will capture video in the format given in the `format` parameter. If the pixel format
+// in `format` is supported by the WebRTC pipeline, the same pixel format will be used for the
+// output. Otherwise, the format returned by `preferredOutputPixelFormat` will be used.
+- (void)startCaptureWithDevice:(AVCaptureDevice *)device
+ format:(AVCaptureDeviceFormat *)format
+ fps:(NSInteger)fps
+ completionHandler:(nullable void (^)(NSError *_Nullable))completionHandler;
+// Stops the capture session asynchronously and notifies callback on completion.
+- (void)stopCaptureWithCompletionHandler:(nullable void (^)(void))completionHandler;
+
+// Starts the capture session asynchronously.
+- (void)startCaptureWithDevice:(AVCaptureDevice *)device
+ format:(AVCaptureDeviceFormat *)format
+ fps:(NSInteger)fps;
+// Stops the capture session asynchronously.
+- (void)stopCapture;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.m b/third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.m
new file mode 100644
index 0000000000..98d3cf9f45
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/capturer/RTCCameraVideoCapturer.m
@@ -0,0 +1,535 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCCameraVideoCapturer.h"
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+
+#if TARGET_OS_IPHONE
+#import "helpers/UIDevice+RTCDevice.h"
+#endif
+
+#import "helpers/AVCaptureSession+DevicePosition.h"
+#import "helpers/RTCDispatcher+Private.h"
+#include "rtc_base/system/gcd_helpers.h"
+
+const int64_t kNanosecondsPerSecond = 1000000000;
+
+@interface RTC_OBJC_TYPE (RTCCameraVideoCapturer)
+()<AVCaptureVideoDataOutputSampleBufferDelegate> @property(nonatomic,
+ readonly) dispatch_queue_t frameQueue;
+@property(nonatomic, strong) AVCaptureDevice *currentDevice;
+@property(nonatomic, assign) BOOL hasRetriedOnFatalError;
+@property(nonatomic, assign) BOOL isRunning;
+// Will the session be running once all asynchronous operations have been completed?
+@property(nonatomic, assign) BOOL willBeRunning;
+@end
+
+@implementation RTC_OBJC_TYPE (RTCCameraVideoCapturer) {
+ AVCaptureVideoDataOutput *_videoDataOutput;
+ AVCaptureSession *_captureSession;
+ FourCharCode _preferredOutputPixelFormat;
+ FourCharCode _outputPixelFormat;
+ RTCVideoRotation _rotation;
+#if TARGET_OS_IPHONE
+ UIDeviceOrientation _orientation;
+ BOOL _generatingOrientationNotifications;
+#endif
+}
+
+@synthesize frameQueue = _frameQueue;
+@synthesize captureSession = _captureSession;
+@synthesize currentDevice = _currentDevice;
+@synthesize hasRetriedOnFatalError = _hasRetriedOnFatalError;
+@synthesize isRunning = _isRunning;
+@synthesize willBeRunning = _willBeRunning;
+
+- (instancetype)init {
+ return [self initWithDelegate:nil captureSession:[[AVCaptureSession alloc] init]];
+}
+
+- (instancetype)initWithDelegate:(__weak id<RTC_OBJC_TYPE(RTCVideoCapturerDelegate)>)delegate {
+ return [self initWithDelegate:delegate captureSession:[[AVCaptureSession alloc] init]];
+}
+
+// This initializer is used for testing.
+- (instancetype)initWithDelegate:(__weak id<RTC_OBJC_TYPE(RTCVideoCapturerDelegate)>)delegate
+ captureSession:(AVCaptureSession *)captureSession {
+ if (self = [super initWithDelegate:delegate]) {
+ // Create the capture session and all relevant inputs and outputs. We need
+ // to do this in init because the application may want the capture session
+ // before we start the capturer for e.g. AVCapturePreviewLayer. All objects
+ // created here are retained until dealloc and never recreated.
+ if (![self setupCaptureSession:captureSession]) {
+ return nil;
+ }
+ NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
+#if TARGET_OS_IPHONE
+ _orientation = UIDeviceOrientationPortrait;
+ _rotation = RTCVideoRotation_90;
+ [center addObserver:self
+ selector:@selector(deviceOrientationDidChange:)
+ name:UIDeviceOrientationDidChangeNotification
+ object:nil];
+ [center addObserver:self
+ selector:@selector(handleCaptureSessionInterruption:)
+ name:AVCaptureSessionWasInterruptedNotification
+ object:_captureSession];
+ [center addObserver:self
+ selector:@selector(handleCaptureSessionInterruptionEnded:)
+ name:AVCaptureSessionInterruptionEndedNotification
+ object:_captureSession];
+ [center addObserver:self
+ selector:@selector(handleApplicationDidBecomeActive:)
+ name:UIApplicationDidBecomeActiveNotification
+ object:[UIApplication sharedApplication]];
+#endif
+ [center addObserver:self
+ selector:@selector(handleCaptureSessionRuntimeError:)
+ name:AVCaptureSessionRuntimeErrorNotification
+ object:_captureSession];
+ [center addObserver:self
+ selector:@selector(handleCaptureSessionDidStartRunning:)
+ name:AVCaptureSessionDidStartRunningNotification
+ object:_captureSession];
+ [center addObserver:self
+ selector:@selector(handleCaptureSessionDidStopRunning:)
+ name:AVCaptureSessionDidStopRunningNotification
+ object:_captureSession];
+ }
+ return self;
+}
+
+- (void)dealloc {
+ NSAssert(!_willBeRunning,
+ @"Session was still running in RTC_OBJC_TYPE(RTCCameraVideoCapturer) dealloc. Forgot to "
+ @"call stopCapture?");
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+}
+
++ (NSArray<AVCaptureDevice *> *)captureDevices {
+#if defined(WEBRTC_IOS) && defined(__IPHONE_10_0) && \
+ __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_10_0
+ AVCaptureDeviceDiscoverySession *session = [AVCaptureDeviceDiscoverySession
+ discoverySessionWithDeviceTypes:@[ AVCaptureDeviceTypeBuiltInWideAngleCamera ]
+ mediaType:AVMediaTypeVideo
+ position:AVCaptureDevicePositionUnspecified];
+ return session.devices;
+#else
+ return [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
+#endif
+}
+
++ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device {
+ // Support opening the device in any format. We make sure it's converted to a format we
+ // can handle, if needed, in the method `-setupVideoDataOutput`.
+ return device.formats;
+}
+
+- (FourCharCode)preferredOutputPixelFormat {
+ return _preferredOutputPixelFormat;
+}
+
+- (void)startCaptureWithDevice:(AVCaptureDevice *)device
+ format:(AVCaptureDeviceFormat *)format
+ fps:(NSInteger)fps {
+ [self startCaptureWithDevice:device format:format fps:fps completionHandler:nil];
+}
+
+- (void)stopCapture {
+ [self stopCaptureWithCompletionHandler:nil];
+}
+
+- (void)startCaptureWithDevice:(AVCaptureDevice *)device
+ format:(AVCaptureDeviceFormat *)format
+ fps:(NSInteger)fps
+ completionHandler:(nullable void (^)(NSError *_Nullable error))completionHandler {
+ _willBeRunning = YES;
+ [RTC_OBJC_TYPE(RTCDispatcher)
+ dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ RTCLogInfo("startCaptureWithDevice %@ @ %ld fps", format, (long)fps);
+
+#if TARGET_OS_IPHONE
+ dispatch_async(dispatch_get_main_queue(), ^{
+ if (!self->_generatingOrientationNotifications) {
+ [[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
+ self->_generatingOrientationNotifications = YES;
+ }
+ });
+#endif
+
+ self.currentDevice = device;
+
+ NSError *error = nil;
+ if (![self.currentDevice lockForConfiguration:&error]) {
+ RTCLogError(@"Failed to lock device %@. Error: %@",
+ self.currentDevice,
+ error.userInfo);
+ if (completionHandler) {
+ completionHandler(error);
+ }
+ self.willBeRunning = NO;
+ return;
+ }
+ [self reconfigureCaptureSessionInput];
+ [self updateOrientation];
+ [self updateDeviceCaptureFormat:format fps:fps];
+ [self updateVideoDataOutputPixelFormat:format];
+ [self.captureSession startRunning];
+ [self.currentDevice unlockForConfiguration];
+ self.isRunning = YES;
+ if (completionHandler) {
+ completionHandler(nil);
+ }
+ }];
+}
+
+- (void)stopCaptureWithCompletionHandler:(nullable void (^)(void))completionHandler {
+ _willBeRunning = NO;
+ [RTC_OBJC_TYPE(RTCDispatcher)
+ dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ RTCLogInfo("Stop");
+ self.currentDevice = nil;
+ for (AVCaptureDeviceInput *oldInput in [self.captureSession.inputs copy]) {
+ [self.captureSession removeInput:oldInput];
+ }
+ [self.captureSession stopRunning];
+
+#if TARGET_OS_IPHONE
+ dispatch_async(dispatch_get_main_queue(), ^{
+ if (self->_generatingOrientationNotifications) {
+ [[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
+ self->_generatingOrientationNotifications = NO;
+ }
+ });
+#endif
+ self.isRunning = NO;
+ if (completionHandler) {
+ completionHandler();
+ }
+ }];
+}
+
+#pragma mark iOS notifications
+
+#if TARGET_OS_IPHONE
+- (void)deviceOrientationDidChange:(NSNotification *)notification {
+ [RTC_OBJC_TYPE(RTCDispatcher) dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ [self updateOrientation];
+ }];
+}
+#endif
+
+#pragma mark AVCaptureVideoDataOutputSampleBufferDelegate
+
+- (void)captureOutput:(AVCaptureOutput *)captureOutput
+ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
+ fromConnection:(AVCaptureConnection *)connection {
+ NSParameterAssert(captureOutput == _videoDataOutput);
+
+ if (CMSampleBufferGetNumSamples(sampleBuffer) != 1 || !CMSampleBufferIsValid(sampleBuffer) ||
+ !CMSampleBufferDataIsReady(sampleBuffer)) {
+ return;
+ }
+
+ CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
+ if (pixelBuffer == nil) {
+ return;
+ }
+
+#if TARGET_OS_IPHONE
+ // Default to portrait orientation on iPhone.
+ BOOL usingFrontCamera = NO;
+ // Check the image's EXIF for the camera the image came from as the image could have been
+ // delayed as we set alwaysDiscardsLateVideoFrames to NO.
+ AVCaptureDevicePosition cameraPosition =
+ [AVCaptureSession devicePositionForSampleBuffer:sampleBuffer];
+ if (cameraPosition != AVCaptureDevicePositionUnspecified) {
+ usingFrontCamera = AVCaptureDevicePositionFront == cameraPosition;
+ } else {
+ AVCaptureDeviceInput *deviceInput =
+ (AVCaptureDeviceInput *)((AVCaptureInputPort *)connection.inputPorts.firstObject).input;
+ usingFrontCamera = AVCaptureDevicePositionFront == deviceInput.device.position;
+ }
+ switch (_orientation) {
+ case UIDeviceOrientationPortrait:
+ _rotation = RTCVideoRotation_90;
+ break;
+ case UIDeviceOrientationPortraitUpsideDown:
+ _rotation = RTCVideoRotation_270;
+ break;
+ case UIDeviceOrientationLandscapeLeft:
+ _rotation = usingFrontCamera ? RTCVideoRotation_180 : RTCVideoRotation_0;
+ break;
+ case UIDeviceOrientationLandscapeRight:
+ _rotation = usingFrontCamera ? RTCVideoRotation_0 : RTCVideoRotation_180;
+ break;
+ case UIDeviceOrientationFaceUp:
+ case UIDeviceOrientationFaceDown:
+ case UIDeviceOrientationUnknown:
+ // Ignore.
+ break;
+ }
+#else
+ // No rotation on Mac.
+ _rotation = RTCVideoRotation_0;
+#endif
+
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
+ [[RTC_OBJC_TYPE(RTCCVPixelBuffer) alloc] initWithPixelBuffer:pixelBuffer];
+ int64_t timeStampNs = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) *
+ kNanosecondsPerSecond;
+ RTC_OBJC_TYPE(RTCVideoFrame) *videoFrame =
+ [[RTC_OBJC_TYPE(RTCVideoFrame) alloc] initWithBuffer:rtcPixelBuffer
+ rotation:_rotation
+ timeStampNs:timeStampNs];
+ [self.delegate capturer:self didCaptureVideoFrame:videoFrame];
+}
+
+- (void)captureOutput:(AVCaptureOutput *)captureOutput
+ didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
+ fromConnection:(AVCaptureConnection *)connection {
+#if TARGET_OS_IPHONE
+ CFStringRef droppedReason =
+ CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_DroppedFrameReason, nil);
+#else
+ // DroppedFrameReason unavailable on macOS.
+ CFStringRef droppedReason = nil;
+#endif
+ RTCLogError(@"Dropped sample buffer. Reason: %@", (__bridge NSString *)droppedReason);
+}
+
+#pragma mark - AVCaptureSession notifications
+
+- (void)handleCaptureSessionInterruption:(NSNotification *)notification {
+ NSString *reasonString = nil;
+#if TARGET_OS_IPHONE
+ NSNumber *reason = notification.userInfo[AVCaptureSessionInterruptionReasonKey];
+ if (reason) {
+ switch (reason.intValue) {
+ case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableInBackground:
+ reasonString = @"VideoDeviceNotAvailableInBackground";
+ break;
+ case AVCaptureSessionInterruptionReasonAudioDeviceInUseByAnotherClient:
+ reasonString = @"AudioDeviceInUseByAnotherClient";
+ break;
+ case AVCaptureSessionInterruptionReasonVideoDeviceInUseByAnotherClient:
+ reasonString = @"VideoDeviceInUseByAnotherClient";
+ break;
+ case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableWithMultipleForegroundApps:
+ reasonString = @"VideoDeviceNotAvailableWithMultipleForegroundApps";
+ break;
+ }
+ }
+#endif
+ RTCLog(@"Capture session interrupted: %@", reasonString);
+}
+
+- (void)handleCaptureSessionInterruptionEnded:(NSNotification *)notification {
+ RTCLog(@"Capture session interruption ended.");
+}
+
+- (void)handleCaptureSessionRuntimeError:(NSNotification *)notification {
+ NSError *error = [notification.userInfo objectForKey:AVCaptureSessionErrorKey];
+ RTCLogError(@"Capture session runtime error: %@", error);
+
+ [RTC_OBJC_TYPE(RTCDispatcher) dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+#if TARGET_OS_IPHONE
+ if (error.code == AVErrorMediaServicesWereReset) {
+ [self handleNonFatalError];
+ } else {
+ [self handleFatalError];
+ }
+#else
+ [self handleFatalError];
+#endif
+ }];
+}
+
+- (void)handleCaptureSessionDidStartRunning:(NSNotification *)notification {
+ RTCLog(@"Capture session started.");
+
+ [RTC_OBJC_TYPE(RTCDispatcher) dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ // If we successfully restarted after an unknown
+ // error, allow future retries on fatal errors.
+ self.hasRetriedOnFatalError = NO;
+ }];
+}
+
+- (void)handleCaptureSessionDidStopRunning:(NSNotification *)notification {
+ RTCLog(@"Capture session stopped.");
+}
+
+- (void)handleFatalError {
+ [RTC_OBJC_TYPE(RTCDispatcher)
+ dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ if (!self.hasRetriedOnFatalError) {
+ RTCLogWarning(@"Attempting to recover from fatal capture error.");
+ [self handleNonFatalError];
+ self.hasRetriedOnFatalError = YES;
+ } else {
+ RTCLogError(@"Previous fatal error recovery failed.");
+ }
+ }];
+}
+
+- (void)handleNonFatalError {
+ [RTC_OBJC_TYPE(RTCDispatcher) dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ RTCLog(@"Restarting capture session after error.");
+ if (self.isRunning) {
+ [self.captureSession startRunning];
+ }
+ }];
+}
+
+#if TARGET_OS_IPHONE
+
+#pragma mark - UIApplication notifications
+
+- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
+ [RTC_OBJC_TYPE(RTCDispatcher)
+ dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ if (self.isRunning && !self.captureSession.isRunning) {
+ RTCLog(@"Restarting capture session on active.");
+ [self.captureSession startRunning];
+ }
+ }];
+}
+
+#endif // TARGET_OS_IPHONE
+
+#pragma mark - Private
+
+- (dispatch_queue_t)frameQueue {
+ if (!_frameQueue) {
+ _frameQueue = RTCDispatchQueueCreateWithTarget(
+ "org.webrtc.cameravideocapturer.video",
+ DISPATCH_QUEUE_SERIAL,
+ dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
+ }
+ return _frameQueue;
+}
+
+- (BOOL)setupCaptureSession:(AVCaptureSession *)captureSession {
+ NSAssert(_captureSession == nil, @"Setup capture session called twice.");
+ _captureSession = captureSession;
+#if defined(WEBRTC_IOS)
+ _captureSession.sessionPreset = AVCaptureSessionPresetInputPriority;
+ _captureSession.usesApplicationAudioSession = NO;
+#endif
+ [self setupVideoDataOutput];
+ // Add the output.
+ if (![_captureSession canAddOutput:_videoDataOutput]) {
+ RTCLogError(@"Video data output unsupported.");
+ return NO;
+ }
+ [_captureSession addOutput:_videoDataOutput];
+
+ return YES;
+}
+
+- (void)setupVideoDataOutput {
+ NSAssert(_videoDataOutput == nil, @"Setup video data output called twice.");
+ AVCaptureVideoDataOutput *videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
+
+ // `videoDataOutput.availableVideoCVPixelFormatTypes` returns the pixel formats supported by the
+ // device with the most efficient output format first. Find the first format that we support.
+ NSSet<NSNumber *> *supportedPixelFormats =
+ [RTC_OBJC_TYPE(RTCCVPixelBuffer) supportedPixelFormats];
+ NSMutableOrderedSet *availablePixelFormats =
+ [NSMutableOrderedSet orderedSetWithArray:videoDataOutput.availableVideoCVPixelFormatTypes];
+ [availablePixelFormats intersectSet:supportedPixelFormats];
+ NSNumber *pixelFormat = availablePixelFormats.firstObject;
+ NSAssert(pixelFormat, @"Output device has no supported formats.");
+
+ _preferredOutputPixelFormat = [pixelFormat unsignedIntValue];
+ _outputPixelFormat = _preferredOutputPixelFormat;
+ videoDataOutput.videoSettings = @{(NSString *)kCVPixelBufferPixelFormatTypeKey : pixelFormat};
+ videoDataOutput.alwaysDiscardsLateVideoFrames = NO;
+ [videoDataOutput setSampleBufferDelegate:self queue:self.frameQueue];
+ _videoDataOutput = videoDataOutput;
+}
+
+- (void)updateVideoDataOutputPixelFormat:(AVCaptureDeviceFormat *)format {
+ FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(format.formatDescription);
+ if (![[RTC_OBJC_TYPE(RTCCVPixelBuffer) supportedPixelFormats] containsObject:@(mediaSubType)]) {
+ mediaSubType = _preferredOutputPixelFormat;
+ }
+
+ if (mediaSubType != _outputPixelFormat) {
+ _outputPixelFormat = mediaSubType;
+ }
+
+ // Update videoSettings with dimensions, as some virtual cameras, e.g. Snap Camera, may not work
+ // otherwise.
+ CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
+ _videoDataOutput.videoSettings = @{
+ (id)kCVPixelBufferWidthKey : @(dimensions.width),
+ (id)kCVPixelBufferHeightKey : @(dimensions.height),
+ (id)kCVPixelBufferPixelFormatTypeKey : @(_outputPixelFormat),
+ };
+}
+
+#pragma mark - Private, called inside capture queue
+
+- (void)updateDeviceCaptureFormat:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps {
+ NSAssert([RTC_OBJC_TYPE(RTCDispatcher) isOnQueueForType:RTCDispatcherTypeCaptureSession],
+ @"updateDeviceCaptureFormat must be called on the capture queue.");
+ @try {
+ _currentDevice.activeFormat = format;
+ _currentDevice.activeVideoMinFrameDuration = CMTimeMake(1, fps);
+ } @catch (NSException *exception) {
+ RTCLogError(@"Failed to set active format!\n User info:%@", exception.userInfo);
+ return;
+ }
+}
+
+- (void)reconfigureCaptureSessionInput {
+ NSAssert([RTC_OBJC_TYPE(RTCDispatcher) isOnQueueForType:RTCDispatcherTypeCaptureSession],
+ @"reconfigureCaptureSessionInput must be called on the capture queue.");
+ NSError *error = nil;
+ AVCaptureDeviceInput *input =
+ [AVCaptureDeviceInput deviceInputWithDevice:_currentDevice error:&error];
+ if (!input) {
+ RTCLogError(@"Failed to create front camera input: %@", error.localizedDescription);
+ return;
+ }
+ [_captureSession beginConfiguration];
+ for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
+ [_captureSession removeInput:oldInput];
+ }
+ if ([_captureSession canAddInput:input]) {
+ [_captureSession addInput:input];
+ } else {
+ RTCLogError(@"Cannot add camera as an input to the session.");
+ }
+ [_captureSession commitConfiguration];
+}
+
+- (void)updateOrientation {
+ NSAssert([RTC_OBJC_TYPE(RTCDispatcher) isOnQueueForType:RTCDispatcherTypeCaptureSession],
+ @"updateOrientation must be called on the capture queue.");
+#if TARGET_OS_IPHONE
+ _orientation = [UIDevice currentDevice].orientation;
+#endif
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.h b/third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.h
new file mode 100644
index 0000000000..19262c64cf
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCVideoCapturer.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/**
+ * Error passing block.
+ */
+typedef void (^RTCFileVideoCapturerErrorBlock)(NSError *error);
+
+/**
+ * Captures buffers from bundled video file.
+ *
+ * See @c RTCVideoCapturer for more info on capturers.
+ */
+RTC_OBJC_EXPORT
+
+NS_CLASS_AVAILABLE_IOS(10)
+@interface RTC_OBJC_TYPE (RTCFileVideoCapturer) : RTC_OBJC_TYPE(RTCVideoCapturer)
+
+/**
+ * Starts asynchronous capture of frames from video file.
+ *
+ * Capturing is not started if error occurs. Underlying error will be
+ * relayed in the errorBlock if one is provided.
+ * Successfully captured video frames will be passed to the delegate.
+ *
+ * @param nameOfFile The name of the bundled video file to be read.
+ * @errorBlock block to be executed upon error.
+ */
+- (void)startCapturingFromFileNamed:(NSString *)nameOfFile
+ onError:(__nullable RTCFileVideoCapturerErrorBlock)errorBlock;
+
+/**
+ * Immediately stops capture.
+ */
+- (void)stopCapture;
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.m b/third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.m
new file mode 100644
index 0000000000..bcf1506259
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/capturer/RTCFileVideoCapturer.m
@@ -0,0 +1,215 @@
+/**
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCFileVideoCapturer.h"
+
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+#include "rtc_base/system/gcd_helpers.h"
+
+NSString *const kRTCFileVideoCapturerErrorDomain =
+ @"org.webrtc.RTC_OBJC_TYPE(RTCFileVideoCapturer)";
+
+typedef NS_ENUM(NSInteger, RTCFileVideoCapturerErrorCode) {
+ RTCFileVideoCapturerErrorCode_CapturerRunning = 2000,
+ RTCFileVideoCapturerErrorCode_FileNotFound
+};
+
+typedef NS_ENUM(NSInteger, RTCFileVideoCapturerStatus) {
+ RTCFileVideoCapturerStatusNotInitialized,
+ RTCFileVideoCapturerStatusStarted,
+ RTCFileVideoCapturerStatusStopped
+};
+
+@interface RTC_OBJC_TYPE (RTCFileVideoCapturer)
+() @property(nonatomic, assign) CMTime lastPresentationTime;
+@property(nonatomic, strong) NSURL *fileURL;
+@end
+
+@implementation RTC_OBJC_TYPE (RTCFileVideoCapturer) {
+ AVAssetReader *_reader;
+ AVAssetReaderTrackOutput *_outTrack;
+ RTCFileVideoCapturerStatus _status;
+ dispatch_queue_t _frameQueue;
+}
+
+@synthesize lastPresentationTime = _lastPresentationTime;
+@synthesize fileURL = _fileURL;
+
+- (void)startCapturingFromFileNamed:(NSString *)nameOfFile
+ onError:(RTCFileVideoCapturerErrorBlock)errorBlock {
+ if (_status == RTCFileVideoCapturerStatusStarted) {
+ NSError *error =
+ [NSError errorWithDomain:kRTCFileVideoCapturerErrorDomain
+ code:RTCFileVideoCapturerErrorCode_CapturerRunning
+ userInfo:@{NSUnderlyingErrorKey : @"Capturer has been started."}];
+
+ errorBlock(error);
+ return;
+ } else {
+ _status = RTCFileVideoCapturerStatusStarted;
+ }
+
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
+ NSString *pathForFile = [self pathForFileName:nameOfFile];
+ if (!pathForFile) {
+ NSString *errorString =
+ [NSString stringWithFormat:@"File %@ not found in bundle", nameOfFile];
+ NSError *error = [NSError errorWithDomain:kRTCFileVideoCapturerErrorDomain
+ code:RTCFileVideoCapturerErrorCode_FileNotFound
+ userInfo:@{NSUnderlyingErrorKey : errorString}];
+ errorBlock(error);
+ return;
+ }
+
+ self.lastPresentationTime = CMTimeMake(0, 0);
+
+ self.fileURL = [NSURL fileURLWithPath:pathForFile];
+ [self setupReaderOnError:errorBlock];
+ });
+}
+
+- (void)setupReaderOnError:(RTCFileVideoCapturerErrorBlock)errorBlock {
+ AVURLAsset *asset = [AVURLAsset URLAssetWithURL:_fileURL options:nil];
+
+ NSArray *allTracks = [asset tracksWithMediaType:AVMediaTypeVideo];
+ NSError *error = nil;
+
+ _reader = [[AVAssetReader alloc] initWithAsset:asset error:&error];
+ if (error) {
+ errorBlock(error);
+ return;
+ }
+
+ NSDictionary *options = @{
+ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
+ };
+ _outTrack =
+ [[AVAssetReaderTrackOutput alloc] initWithTrack:allTracks.firstObject outputSettings:options];
+ [_reader addOutput:_outTrack];
+
+ [_reader startReading];
+ RTCLog(@"File capturer started reading");
+ [self readNextBuffer];
+}
+- (void)stopCapture {
+ _status = RTCFileVideoCapturerStatusStopped;
+ RTCLog(@"File capturer stopped.");
+}
+
+#pragma mark - Private
+
+- (nullable NSString *)pathForFileName:(NSString *)fileName {
+ NSArray *nameComponents = [fileName componentsSeparatedByString:@"."];
+ if (nameComponents.count != 2) {
+ return nil;
+ }
+
+ NSString *path =
+ [[NSBundle mainBundle] pathForResource:nameComponents[0] ofType:nameComponents[1]];
+ return path;
+}
+
+- (dispatch_queue_t)frameQueue {
+ if (!_frameQueue) {
+ _frameQueue = RTCDispatchQueueCreateWithTarget(
+ "org.webrtc.filecapturer.video",
+ DISPATCH_QUEUE_SERIAL,
+ dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0));
+ }
+ return _frameQueue;
+}
+
+- (void)readNextBuffer {
+ if (_status == RTCFileVideoCapturerStatusStopped) {
+ [_reader cancelReading];
+ _reader = nil;
+ return;
+ }
+
+ if (_reader.status == AVAssetReaderStatusCompleted) {
+ [_reader cancelReading];
+ _reader = nil;
+ [self setupReaderOnError:nil];
+ return;
+ }
+
+ CMSampleBufferRef sampleBuffer = [_outTrack copyNextSampleBuffer];
+ if (!sampleBuffer) {
+ [self readNextBuffer];
+ return;
+ }
+ if (CMSampleBufferGetNumSamples(sampleBuffer) != 1 || !CMSampleBufferIsValid(sampleBuffer) ||
+ !CMSampleBufferDataIsReady(sampleBuffer)) {
+ CFRelease(sampleBuffer);
+ [self readNextBuffer];
+ return;
+ }
+
+ [self publishSampleBuffer:sampleBuffer];
+}
+
+- (void)publishSampleBuffer:(CMSampleBufferRef)sampleBuffer {
+ CMTime presentationTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
+ Float64 presentationDifference =
+ CMTimeGetSeconds(CMTimeSubtract(presentationTime, _lastPresentationTime));
+ _lastPresentationTime = presentationTime;
+ int64_t presentationDifferenceRound = lroundf(presentationDifference * NSEC_PER_SEC);
+
+ __block dispatch_source_t timer = [self createStrictTimer];
+ // Strict timer that will fire `presentationDifferenceRound` ns from now and never again.
+ dispatch_source_set_timer(timer,
+ dispatch_time(DISPATCH_TIME_NOW, presentationDifferenceRound),
+ DISPATCH_TIME_FOREVER,
+ 0);
+ dispatch_source_set_event_handler(timer, ^{
+ dispatch_source_cancel(timer);
+ timer = nil;
+
+ CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
+ if (!pixelBuffer) {
+ CFRelease(sampleBuffer);
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
+ [self readNextBuffer];
+ });
+ return;
+ }
+
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
+ [[RTC_OBJC_TYPE(RTCCVPixelBuffer) alloc] initWithPixelBuffer:pixelBuffer];
+ NSTimeInterval timeStampSeconds = CACurrentMediaTime();
+ int64_t timeStampNs = lroundf(timeStampSeconds * NSEC_PER_SEC);
+ RTC_OBJC_TYPE(RTCVideoFrame) *videoFrame =
+ [[RTC_OBJC_TYPE(RTCVideoFrame) alloc] initWithBuffer:rtcPixelBuffer
+ rotation:0
+ timeStampNs:timeStampNs];
+ CFRelease(sampleBuffer);
+
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
+ [self readNextBuffer];
+ });
+
+ [self.delegate capturer:self didCaptureVideoFrame:videoFrame];
+ });
+ dispatch_activate(timer);
+}
+
+- (dispatch_source_t)createStrictTimer {
+ dispatch_source_t timer = dispatch_source_create(
+ DISPATCH_SOURCE_TYPE_TIMER, 0, DISPATCH_TIMER_STRICT, [self frameQueue]);
+ return timer;
+}
+
+- (void)dealloc {
+ [self stopCapture];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor+Private.h b/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor+Private.h
new file mode 100644
index 0000000000..b5c786be18
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor+Private.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCNetworkMonitor.h"
+
+#include "sdk/objc/native/src/network_monitor_observer.h"
+
+@interface RTCNetworkMonitor ()
+
+/** `observer` is a raw pointer and should be kept alive
+ * for this object's lifetime.
+ */
+- (instancetype)initWithObserver:(webrtc::NetworkMonitorObserver *)observer
+ NS_DESIGNATED_INITIALIZER;
+
+/** Stops the receiver from posting updates to `observer`. */
+- (void)stop;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.h b/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.h
new file mode 100644
index 0000000000..21d22f5463
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+NS_ASSUME_NONNULL_BEGIN
+
+/** Listens for NWPathMonitor updates and forwards the results to a C++
+ * observer.
+ */
+@interface RTCNetworkMonitor : NSObject
+
+- (instancetype)init NS_UNAVAILABLE;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.mm b/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.mm
new file mode 100644
index 0000000000..7e75b2b4c0
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/network/RTCNetworkMonitor.mm
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCNetworkMonitor+Private.h"
+
+#import <Network/Network.h>
+
+#import "base/RTCLogging.h"
+#import "helpers/RTCDispatcher+Private.h"
+
+#include "rtc_base/string_utils.h"
+
+namespace {
+
+rtc::AdapterType AdapterTypeFromInterfaceType(nw_interface_type_t interfaceType) {
+ rtc::AdapterType adapterType = rtc::ADAPTER_TYPE_UNKNOWN;
+ switch (interfaceType) {
+ case nw_interface_type_other:
+ adapterType = rtc::ADAPTER_TYPE_UNKNOWN;
+ break;
+ case nw_interface_type_wifi:
+ adapterType = rtc::ADAPTER_TYPE_WIFI;
+ break;
+ case nw_interface_type_cellular:
+ adapterType = rtc::ADAPTER_TYPE_CELLULAR;
+ break;
+ case nw_interface_type_wired:
+ adapterType = rtc::ADAPTER_TYPE_ETHERNET;
+ break;
+ case nw_interface_type_loopback:
+ adapterType = rtc::ADAPTER_TYPE_LOOPBACK;
+ break;
+ default:
+ adapterType = rtc::ADAPTER_TYPE_UNKNOWN;
+ break;
+ }
+ return adapterType;
+}
+
+} // namespace
+
+@implementation RTCNetworkMonitor {
+ webrtc::NetworkMonitorObserver *_observer;
+ nw_path_monitor_t _pathMonitor;
+ dispatch_queue_t _monitorQueue;
+}
+
+- (instancetype)initWithObserver:(webrtc::NetworkMonitorObserver *)observer {
+ RTC_DCHECK(observer);
+ if (self = [super init]) {
+ _observer = observer;
+ if (@available(iOS 12, *)) {
+ _pathMonitor = nw_path_monitor_create();
+ if (_pathMonitor == nil) {
+ RTCLog(@"nw_path_monitor_create failed.");
+ return nil;
+ }
+ RTCLog(@"NW path monitor created.");
+ __weak RTCNetworkMonitor *weakSelf = self;
+ nw_path_monitor_set_update_handler(_pathMonitor, ^(nw_path_t path) {
+ if (weakSelf == nil) {
+ return;
+ }
+ RTCNetworkMonitor *strongSelf = weakSelf;
+ RTCLog(@"NW path monitor: updated.");
+ nw_path_status_t status = nw_path_get_status(path);
+ if (status == nw_path_status_invalid) {
+ RTCLog(@"NW path monitor status: invalid.");
+ } else if (status == nw_path_status_unsatisfied) {
+ RTCLog(@"NW path monitor status: unsatisfied.");
+ } else if (status == nw_path_status_satisfied) {
+ RTCLog(@"NW path monitor status: satisfied.");
+ } else if (status == nw_path_status_satisfiable) {
+ RTCLog(@"NW path monitor status: satisfiable.");
+ }
+ std::map<std::string, rtc::AdapterType, rtc::AbslStringViewCmp> *map =
+ new std::map<std::string, rtc::AdapterType, rtc::AbslStringViewCmp>();
+ nw_path_enumerate_interfaces(
+ path, (nw_path_enumerate_interfaces_block_t) ^ (nw_interface_t interface) {
+ const char *name = nw_interface_get_name(interface);
+ nw_interface_type_t interfaceType = nw_interface_get_type(interface);
+ RTCLog(@"NW path monitor available interface: %s", name);
+ rtc::AdapterType adapterType = AdapterTypeFromInterfaceType(interfaceType);
+ map->insert(std::pair<std::string, rtc::AdapterType>(name, adapterType));
+ });
+ @synchronized(strongSelf) {
+ webrtc::NetworkMonitorObserver *observer = strongSelf->_observer;
+ if (observer) {
+ observer->OnPathUpdate(std::move(*map));
+ }
+ }
+ delete map;
+ });
+ nw_path_monitor_set_queue(
+ _pathMonitor,
+ [RTC_OBJC_TYPE(RTCDispatcher) dispatchQueueForType:RTCDispatcherTypeNetworkMonitor]);
+ nw_path_monitor_start(_pathMonitor);
+ }
+ }
+ return self;
+}
+
+- (void)cancel {
+ if (@available(iOS 12, *)) {
+ nw_path_monitor_cancel(_pathMonitor);
+ }
+}
+- (void)stop {
+ [self cancel];
+ @synchronized(self) {
+ _observer = nil;
+ }
+}
+
+- (void)dealloc {
+ [self cancel];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.h b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.h
new file mode 100644
index 0000000000..e5987fe22a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMTLRenderer.h"
+
+NS_AVAILABLE(10_11, 9_0)
+@interface RTCMTLI420Renderer : RTCMTLRenderer
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.mm b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.mm
new file mode 100644
index 0000000000..f4c76fa313
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLI420Renderer.mm
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCMTLI420Renderer.h"
+
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+
+#import "base/RTCI420Buffer.h"
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+
+#import "RTCMTLRenderer+Private.h"
+
+static NSString *const shaderSource = MTL_STRINGIFY(
+ using namespace metal;
+
+ typedef struct {
+ packed_float2 position;
+ packed_float2 texcoord;
+ } Vertex;
+
+ typedef struct {
+ float4 position[[position]];
+ float2 texcoord;
+ } Varyings;
+
+ vertex Varyings vertexPassthrough(constant Vertex *verticies[[buffer(0)]],
+ unsigned int vid[[vertex_id]]) {
+ Varyings out;
+ constant Vertex &v = verticies[vid];
+ out.position = float4(float2(v.position), 0.0, 1.0);
+ out.texcoord = v.texcoord;
+
+ return out;
+ }
+
+ fragment half4 fragmentColorConversion(
+ Varyings in[[stage_in]],
+ texture2d<float, access::sample> textureY[[texture(0)]],
+ texture2d<float, access::sample> textureU[[texture(1)]],
+ texture2d<float, access::sample> textureV[[texture(2)]]) {
+ constexpr sampler s(address::clamp_to_edge, filter::linear);
+ float y;
+ float u;
+ float v;
+ float r;
+ float g;
+ float b;
+ // Conversion for YUV to rgb from http://www.fourcc.org/fccyvrgb.php
+ y = textureY.sample(s, in.texcoord).r;
+ u = textureU.sample(s, in.texcoord).r;
+ v = textureV.sample(s, in.texcoord).r;
+ u = u - 0.5;
+ v = v - 0.5;
+ r = y + 1.403 * v;
+ g = y - 0.344 * u - 0.714 * v;
+ b = y + 1.770 * u;
+
+ float4 out = float4(r, g, b, 1.0);
+
+ return half4(out);
+ });
+
+@implementation RTCMTLI420Renderer {
+ // Textures.
+ id<MTLTexture> _yTexture;
+ id<MTLTexture> _uTexture;
+ id<MTLTexture> _vTexture;
+
+ MTLTextureDescriptor *_descriptor;
+ MTLTextureDescriptor *_chromaDescriptor;
+
+ int _width;
+ int _height;
+ int _chromaWidth;
+ int _chromaHeight;
+}
+
+#pragma mark - Virtual
+
+- (NSString *)shaderSource {
+ return shaderSource;
+}
+
+- (void)getWidth:(nonnull int *)width
+ height:(nonnull int *)height
+ cropWidth:(nonnull int *)cropWidth
+ cropHeight:(nonnull int *)cropHeight
+ cropX:(nonnull int *)cropX
+ cropY:(nonnull int *)cropY
+ ofFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ *width = frame.width;
+ *height = frame.height;
+ *cropWidth = frame.width;
+ *cropHeight = frame.height;
+ *cropX = 0;
+ *cropY = 0;
+}
+
+- (BOOL)setupTexturesForFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ if (![super setupTexturesForFrame:frame]) {
+ return NO;
+ }
+
+ id<MTLDevice> device = [self currentMetalDevice];
+ if (!device) {
+ return NO;
+ }
+
+ id<RTC_OBJC_TYPE(RTCI420Buffer)> buffer = [frame.buffer toI420];
+
+ // Luma (y) texture.
+ if (!_descriptor || _width != frame.width || _height != frame.height) {
+ _width = frame.width;
+ _height = frame.height;
+ _descriptor = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatR8Unorm
+ width:_width
+ height:_height
+ mipmapped:NO];
+ _descriptor.usage = MTLTextureUsageShaderRead;
+ _yTexture = [device newTextureWithDescriptor:_descriptor];
+ }
+
+ // Chroma (u,v) textures
+ [_yTexture replaceRegion:MTLRegionMake2D(0, 0, _width, _height)
+ mipmapLevel:0
+ withBytes:buffer.dataY
+ bytesPerRow:buffer.strideY];
+
+ if (!_chromaDescriptor || _chromaWidth != frame.width / 2 || _chromaHeight != frame.height / 2) {
+ _chromaWidth = frame.width / 2;
+ _chromaHeight = frame.height / 2;
+ _chromaDescriptor =
+ [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatR8Unorm
+ width:_chromaWidth
+ height:_chromaHeight
+ mipmapped:NO];
+ _chromaDescriptor.usage = MTLTextureUsageShaderRead;
+ _uTexture = [device newTextureWithDescriptor:_chromaDescriptor];
+ _vTexture = [device newTextureWithDescriptor:_chromaDescriptor];
+ }
+
+ [_uTexture replaceRegion:MTLRegionMake2D(0, 0, _chromaWidth, _chromaHeight)
+ mipmapLevel:0
+ withBytes:buffer.dataU
+ bytesPerRow:buffer.strideU];
+ [_vTexture replaceRegion:MTLRegionMake2D(0, 0, _chromaWidth, _chromaHeight)
+ mipmapLevel:0
+ withBytes:buffer.dataV
+ bytesPerRow:buffer.strideV];
+
+ return (_uTexture != nil) && (_yTexture != nil) && (_vTexture != nil);
+}
+
+- (void)uploadTexturesToRenderEncoder:(id<MTLRenderCommandEncoder>)renderEncoder {
+ [renderEncoder setFragmentTexture:_yTexture atIndex:0];
+ [renderEncoder setFragmentTexture:_uTexture atIndex:1];
+ [renderEncoder setFragmentTexture:_vTexture atIndex:2];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.h b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.h
new file mode 100644
index 0000000000..f70e2ad5ee
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AppKit/AppKit.h>
+
+#import "RTCVideoRenderer.h"
+
+NS_AVAILABLE_MAC(10.11)
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCMTLNSVideoView) : NSView <RTC_OBJC_TYPE(RTCVideoRenderer)>
+
+@property(nonatomic, weak) id<RTC_OBJC_TYPE(RTCVideoViewDelegate)> delegate;
+
++ (BOOL)isMetalAvailable;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.m b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.m
new file mode 100644
index 0000000000..625fb1caa7
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNSVideoView.m
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCMTLNSVideoView.h"
+
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+
+#import "base/RTCVideoFrame.h"
+
+#import "RTCMTLI420Renderer.h"
+
+@interface RTC_OBJC_TYPE (RTCMTLNSVideoView)
+()<MTKViewDelegate> @property(nonatomic) id<RTCMTLRenderer> renderer;
+@property(nonatomic, strong) MTKView *metalView;
+@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) * videoFrame;
+@end
+
+@implementation RTC_OBJC_TYPE (RTCMTLNSVideoView) {
+ id<RTCMTLRenderer> _renderer;
+}
+
+@synthesize delegate = _delegate;
+@synthesize renderer = _renderer;
+@synthesize metalView = _metalView;
+@synthesize videoFrame = _videoFrame;
+
+- (instancetype)initWithFrame:(CGRect)frameRect {
+ self = [super initWithFrame:frameRect];
+ if (self) {
+ [self configure];
+ }
+ return self;
+}
+
+- (instancetype)initWithCoder:(NSCoder *)aCoder {
+ self = [super initWithCoder:aCoder];
+ if (self) {
+ [self configure];
+ }
+ return self;
+}
+
+#pragma mark - Private
+
++ (BOOL)isMetalAvailable {
+ return [MTLCopyAllDevices() count] > 0;
+}
+
+- (void)configure {
+ if ([[self class] isMetalAvailable]) {
+ _metalView = [[MTKView alloc] initWithFrame:self.bounds];
+ [self addSubview:_metalView];
+ _metalView.layerContentsPlacement = NSViewLayerContentsPlacementScaleProportionallyToFit;
+ _metalView.translatesAutoresizingMaskIntoConstraints = NO;
+ _metalView.framebufferOnly = YES;
+ _metalView.delegate = self;
+
+ _renderer = [[RTCMTLI420Renderer alloc] init];
+ if (![(RTCMTLI420Renderer *)_renderer addRenderingDestination:_metalView]) {
+ _renderer = nil;
+ };
+ }
+}
+
+- (void)updateConstraints {
+ NSDictionary *views = NSDictionaryOfVariableBindings(_metalView);
+
+ NSArray *constraintsHorizontal =
+ [NSLayoutConstraint constraintsWithVisualFormat:@"H:|-0-[_metalView]-0-|"
+ options:0
+ metrics:nil
+ views:views];
+ [self addConstraints:constraintsHorizontal];
+
+ NSArray *constraintsVertical =
+ [NSLayoutConstraint constraintsWithVisualFormat:@"V:|-0-[_metalView]-0-|"
+ options:0
+ metrics:nil
+ views:views];
+ [self addConstraints:constraintsVertical];
+ [super updateConstraints];
+}
+
+#pragma mark - MTKViewDelegate methods
+- (void)drawInMTKView:(nonnull MTKView *)view {
+ if (self.videoFrame == nil) {
+ return;
+ }
+ if (view == self.metalView) {
+ [_renderer drawFrame:self.videoFrame];
+ }
+}
+
+- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
+}
+
+#pragma mark - RTC_OBJC_TYPE(RTCVideoRenderer)
+
+- (void)setSize:(CGSize)size {
+ _metalView.drawableSize = size;
+ dispatch_async(dispatch_get_main_queue(), ^{
+ [self.delegate videoView:self didChangeVideoSize:size];
+ });
+ [_metalView draw];
+}
+
+- (void)renderFrame:(nullable RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ if (frame == nil) {
+ return;
+ }
+ self.videoFrame = [frame newI420VideoFrame];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.h b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.h
new file mode 100644
index 0000000000..866b7ea17e
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMTLRenderer.h"
+
+NS_AVAILABLE(10_11, 9_0)
+@interface RTCMTLNV12Renderer : RTCMTLRenderer
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.mm b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.mm
new file mode 100644
index 0000000000..7b037c6dbc
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLNV12Renderer.mm
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCMTLNV12Renderer.h"
+
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+
+#import "RTCMTLRenderer+Private.h"
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+
+#include "rtc_base/checks.h"
+
+static NSString *const shaderSource = MTL_STRINGIFY(
+ using namespace metal;
+
+ typedef struct {
+ packed_float2 position;
+ packed_float2 texcoord;
+ } Vertex;
+
+ typedef struct {
+ float4 position[[position]];
+ float2 texcoord;
+ } Varyings;
+
+ vertex Varyings vertexPassthrough(constant Vertex *verticies[[buffer(0)]],
+ unsigned int vid[[vertex_id]]) {
+ Varyings out;
+ constant Vertex &v = verticies[vid];
+ out.position = float4(float2(v.position), 0.0, 1.0);
+ out.texcoord = v.texcoord;
+ return out;
+ }
+
+ // Receiving YCrCb textures.
+ fragment half4 fragmentColorConversion(
+ Varyings in[[stage_in]],
+ texture2d<float, access::sample> textureY[[texture(0)]],
+ texture2d<float, access::sample> textureCbCr[[texture(1)]]) {
+ constexpr sampler s(address::clamp_to_edge, filter::linear);
+ float y;
+ float2 uv;
+ y = textureY.sample(s, in.texcoord).r;
+ uv = textureCbCr.sample(s, in.texcoord).rg - float2(0.5, 0.5);
+
+ // Conversion for YUV to rgb from http://www.fourcc.org/fccyvrgb.php
+ float4 out = float4(y + 1.403 * uv.y, y - 0.344 * uv.x - 0.714 * uv.y, y + 1.770 * uv.x, 1.0);
+
+ return half4(out);
+ });
+
+@implementation RTCMTLNV12Renderer {
+ // Textures.
+ CVMetalTextureCacheRef _textureCache;
+ id<MTLTexture> _yTexture;
+ id<MTLTexture> _CrCbTexture;
+}
+
+- (BOOL)addRenderingDestination:(__kindof MTKView *)view {
+ if ([super addRenderingDestination:view]) {
+ return [self initializeTextureCache];
+ }
+ return NO;
+}
+
+- (BOOL)initializeTextureCache {
+ CVReturn status = CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, [self currentMetalDevice],
+ nil, &_textureCache);
+ if (status != kCVReturnSuccess) {
+ RTCLogError(@"Metal: Failed to initialize metal texture cache. Return status is %d", status);
+ return NO;
+ }
+
+ return YES;
+}
+
+- (NSString *)shaderSource {
+ return shaderSource;
+}
+
+- (void)getWidth:(nonnull int *)width
+ height:(nonnull int *)height
+ cropWidth:(nonnull int *)cropWidth
+ cropHeight:(nonnull int *)cropHeight
+ cropX:(nonnull int *)cropX
+ cropY:(nonnull int *)cropY
+ ofFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *pixelBuffer = (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ *width = CVPixelBufferGetWidth(pixelBuffer.pixelBuffer);
+ *height = CVPixelBufferGetHeight(pixelBuffer.pixelBuffer);
+ *cropWidth = pixelBuffer.cropWidth;
+ *cropHeight = pixelBuffer.cropHeight;
+ *cropX = pixelBuffer.cropX;
+ *cropY = pixelBuffer.cropY;
+}
+
+- (BOOL)setupTexturesForFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ RTC_DCHECK([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]);
+ if (![super setupTexturesForFrame:frame]) {
+ return NO;
+ }
+ CVPixelBufferRef pixelBuffer = ((RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer).pixelBuffer;
+
+ id<MTLTexture> lumaTexture = nil;
+ id<MTLTexture> chromaTexture = nil;
+ CVMetalTextureRef outTexture = nullptr;
+
+ // Luma (y) texture.
+ int lumaWidth = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0);
+ int lumaHeight = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0);
+
+ int indexPlane = 0;
+ CVReturn result = CVMetalTextureCacheCreateTextureFromImage(
+ kCFAllocatorDefault, _textureCache, pixelBuffer, nil, MTLPixelFormatR8Unorm, lumaWidth,
+ lumaHeight, indexPlane, &outTexture);
+
+ if (result == kCVReturnSuccess) {
+ lumaTexture = CVMetalTextureGetTexture(outTexture);
+ }
+
+ // Same as CFRelease except it can be passed NULL without crashing.
+ CVBufferRelease(outTexture);
+ outTexture = nullptr;
+
+ // Chroma (CrCb) texture.
+ indexPlane = 1;
+ result = CVMetalTextureCacheCreateTextureFromImage(
+ kCFAllocatorDefault, _textureCache, pixelBuffer, nil, MTLPixelFormatRG8Unorm, lumaWidth / 2,
+ lumaHeight / 2, indexPlane, &outTexture);
+ if (result == kCVReturnSuccess) {
+ chromaTexture = CVMetalTextureGetTexture(outTexture);
+ }
+ CVBufferRelease(outTexture);
+
+ if (lumaTexture != nil && chromaTexture != nil) {
+ _yTexture = lumaTexture;
+ _CrCbTexture = chromaTexture;
+ return YES;
+ }
+ return NO;
+}
+
+- (void)uploadTexturesToRenderEncoder:(id<MTLRenderCommandEncoder>)renderEncoder {
+ [renderEncoder setFragmentTexture:_yTexture atIndex:0];
+ [renderEncoder setFragmentTexture:_CrCbTexture atIndex:1];
+}
+
+- (void)dealloc {
+ if (_textureCache) {
+ CFRelease(_textureCache);
+ }
+}
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.h b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.h
new file mode 100644
index 0000000000..9db422cd22
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMTLRenderer.h"
+
+/** @abstract RGB/BGR renderer.
+ * @discussion This renderer handles both kCVPixelFormatType_32BGRA and
+ * kCVPixelFormatType_32ARGB.
+ */
+NS_AVAILABLE(10_11, 9_0)
+@interface RTCMTLRGBRenderer : RTCMTLRenderer
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.mm b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.mm
new file mode 100644
index 0000000000..e5dc4ef80a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRGBRenderer.mm
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCMTLRGBRenderer.h"
+
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+
+#import "RTCMTLRenderer+Private.h"
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+
+#include "rtc_base/checks.h"
+
+static NSString *const shaderSource = MTL_STRINGIFY(
+ using namespace metal;
+
+ typedef struct {
+ packed_float2 position;
+ packed_float2 texcoord;
+ } Vertex;
+
+ typedef struct {
+ float4 position[[position]];
+ float2 texcoord;
+ } VertexIO;
+
+ vertex VertexIO vertexPassthrough(constant Vertex *verticies[[buffer(0)]],
+ uint vid[[vertex_id]]) {
+ VertexIO out;
+ constant Vertex &v = verticies[vid];
+ out.position = float4(float2(v.position), 0.0, 1.0);
+ out.texcoord = v.texcoord;
+ return out;
+ }
+
+ fragment half4 fragmentColorConversion(VertexIO in[[stage_in]],
+ texture2d<half, access::sample> texture[[texture(0)]],
+ constant bool &isARGB[[buffer(0)]]) {
+ constexpr sampler s(address::clamp_to_edge, filter::linear);
+
+ half4 out = texture.sample(s, in.texcoord);
+ if (isARGB) {
+ out = half4(out.g, out.b, out.a, out.r);
+ }
+
+ return out;
+ });
+
+@implementation RTCMTLRGBRenderer {
+ // Textures.
+ CVMetalTextureCacheRef _textureCache;
+ id<MTLTexture> _texture;
+
+ // Uniforms.
+ id<MTLBuffer> _uniformsBuffer;
+}
+
+- (BOOL)addRenderingDestination:(__kindof MTKView *)view {
+ if ([super addRenderingDestination:view]) {
+ return [self initializeTextureCache];
+ }
+ return NO;
+}
+
+- (BOOL)initializeTextureCache {
+ CVReturn status = CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, [self currentMetalDevice],
+ nil, &_textureCache);
+ if (status != kCVReturnSuccess) {
+ RTCLogError(@"Metal: Failed to initialize metal texture cache. Return status is %d", status);
+ return NO;
+ }
+
+ return YES;
+}
+
+- (NSString *)shaderSource {
+ return shaderSource;
+}
+
+- (void)getWidth:(nonnull int *)width
+ height:(nonnull int *)height
+ cropWidth:(nonnull int *)cropWidth
+ cropHeight:(nonnull int *)cropHeight
+ cropX:(nonnull int *)cropX
+ cropY:(nonnull int *)cropY
+ ofFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *pixelBuffer = (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ *width = CVPixelBufferGetWidth(pixelBuffer.pixelBuffer);
+ *height = CVPixelBufferGetHeight(pixelBuffer.pixelBuffer);
+ *cropWidth = pixelBuffer.cropWidth;
+ *cropHeight = pixelBuffer.cropHeight;
+ *cropX = pixelBuffer.cropX;
+ *cropY = pixelBuffer.cropY;
+}
+
+- (BOOL)setupTexturesForFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ RTC_DCHECK([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]);
+ if (![super setupTexturesForFrame:frame]) {
+ return NO;
+ }
+ CVPixelBufferRef pixelBuffer = ((RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer).pixelBuffer;
+
+ id<MTLTexture> gpuTexture = nil;
+ CVMetalTextureRef textureOut = nullptr;
+ bool isARGB;
+
+ int width = CVPixelBufferGetWidth(pixelBuffer);
+ int height = CVPixelBufferGetHeight(pixelBuffer);
+ OSType pixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer);
+
+ MTLPixelFormat mtlPixelFormat;
+ if (pixelFormat == kCVPixelFormatType_32BGRA) {
+ mtlPixelFormat = MTLPixelFormatBGRA8Unorm;
+ isARGB = false;
+ } else if (pixelFormat == kCVPixelFormatType_32ARGB) {
+ mtlPixelFormat = MTLPixelFormatRGBA8Unorm;
+ isARGB = true;
+ } else {
+ RTC_DCHECK_NOTREACHED();
+ return NO;
+ }
+
+ CVReturn result = CVMetalTextureCacheCreateTextureFromImage(
+ kCFAllocatorDefault, _textureCache, pixelBuffer, nil, mtlPixelFormat,
+ width, height, 0, &textureOut);
+ if (result == kCVReturnSuccess) {
+ gpuTexture = CVMetalTextureGetTexture(textureOut);
+ }
+ CVBufferRelease(textureOut);
+
+ if (gpuTexture != nil) {
+ _texture = gpuTexture;
+ _uniformsBuffer =
+ [[self currentMetalDevice] newBufferWithBytes:&isARGB
+ length:sizeof(isARGB)
+ options:MTLResourceCPUCacheModeDefaultCache];
+ return YES;
+ }
+
+ return NO;
+}
+
+- (void)uploadTexturesToRenderEncoder:(id<MTLRenderCommandEncoder>)renderEncoder {
+ [renderEncoder setFragmentTexture:_texture atIndex:0];
+ [renderEncoder setFragmentBuffer:_uniformsBuffer offset:0 atIndex:0];
+}
+
+- (void)dealloc {
+ if (_textureCache) {
+ CFRelease(_textureCache);
+ }
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer+Private.h b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer+Private.h
new file mode 100644
index 0000000000..916d4d4430
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer+Private.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Metal/Metal.h>
+
+#import "RTCMTLRenderer.h"
+
+#define MTL_STRINGIFY(s) @ #s
+
+NS_ASSUME_NONNULL_BEGIN
+
+@interface RTCMTLRenderer (Private)
+- (nullable id<MTLDevice>)currentMetalDevice;
+- (NSString *)shaderSource;
+- (BOOL)setupTexturesForFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame;
+- (void)uploadTexturesToRenderEncoder:(id<MTLRenderCommandEncoder>)renderEncoder;
+- (void)getWidth:(nonnull int *)width
+ height:(nonnull int *)height
+ cropWidth:(nonnull int *)cropWidth
+ cropHeight:(nonnull int *)cropHeight
+ cropX:(nonnull int *)cropX
+ cropY:(nonnull int *)cropY
+ ofFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame;
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.h b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.h
new file mode 100644
index 0000000000..aa31545973
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+#if TARGET_OS_IPHONE
+#import <UIKit/UIKit.h>
+#else
+#import <AppKit/AppKit.h>
+#endif
+
+#import "base/RTCVideoFrame.h"
+
+NS_ASSUME_NONNULL_BEGIN
+/**
+ * Protocol defining ability to render RTCVideoFrame in Metal enabled views.
+ */
+@protocol RTCMTLRenderer <NSObject>
+
+/**
+ * Method to be implemented to perform actual rendering of the provided frame.
+ *
+ * @param frame The frame to be rendered.
+ */
+- (void)drawFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame;
+
+/**
+ * Sets the provided view as rendering destination if possible.
+ *
+ * If not possible method returns NO and callers of the method are responisble for performing
+ * cleanups.
+ */
+
+#if TARGET_OS_IOS
+- (BOOL)addRenderingDestination:(__kindof UIView *)view;
+#else
+- (BOOL)addRenderingDestination:(__kindof NSView *)view;
+#endif
+
+@end
+
+/**
+ * Implementation of RTCMTLRenderer protocol.
+ */
+NS_AVAILABLE(10_11, 9_0)
+@interface RTCMTLRenderer : NSObject <RTCMTLRenderer>
+
+/** @abstract A wrapped RTCVideoRotation, or nil.
+ @discussion When not nil, the rotation of the actual frame is ignored when rendering.
+ */
+@property(atomic, nullable) NSValue *rotationOverride;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.mm b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.mm
new file mode 100644
index 0000000000..410590a7b1
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLRenderer.mm
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCMTLRenderer+Private.h"
+
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+
+#include "api/video/video_rotation.h"
+#include "rtc_base/checks.h"
+
+// As defined in shaderSource.
+static NSString *const vertexFunctionName = @"vertexPassthrough";
+static NSString *const fragmentFunctionName = @"fragmentColorConversion";
+
+static NSString *const pipelineDescriptorLabel = @"RTCPipeline";
+static NSString *const commandBufferLabel = @"RTCCommandBuffer";
+static NSString *const renderEncoderLabel = @"RTCEncoder";
+static NSString *const renderEncoderDebugGroup = @"RTCDrawFrame";
+
+// Computes the texture coordinates given rotation and cropping.
+static inline void getCubeVertexData(int cropX,
+ int cropY,
+ int cropWidth,
+ int cropHeight,
+ size_t frameWidth,
+ size_t frameHeight,
+ RTCVideoRotation rotation,
+ float *buffer) {
+ // The computed values are the adjusted texture coordinates, in [0..1].
+ // For the left and top, 0.0 means no cropping and e.g. 0.2 means we're skipping 20% of the
+ // left/top edge.
+ // For the right and bottom, 1.0 means no cropping and e.g. 0.8 means we're skipping 20% of the
+ // right/bottom edge (i.e. render up to 80% of the width/height).
+ float cropLeft = cropX / (float)frameWidth;
+ float cropRight = (cropX + cropWidth) / (float)frameWidth;
+ float cropTop = cropY / (float)frameHeight;
+ float cropBottom = (cropY + cropHeight) / (float)frameHeight;
+
+ // These arrays map the view coordinates to texture coordinates, taking cropping and rotation
+ // into account. The first two columns are view coordinates, the last two are texture coordinates.
+ switch (rotation) {
+ case RTCVideoRotation_0: {
+ float values[16] = {-1.0, -1.0, cropLeft, cropBottom,
+ 1.0, -1.0, cropRight, cropBottom,
+ -1.0, 1.0, cropLeft, cropTop,
+ 1.0, 1.0, cropRight, cropTop};
+ memcpy(buffer, &values, sizeof(values));
+ } break;
+ case RTCVideoRotation_90: {
+ float values[16] = {-1.0, -1.0, cropRight, cropBottom,
+ 1.0, -1.0, cropRight, cropTop,
+ -1.0, 1.0, cropLeft, cropBottom,
+ 1.0, 1.0, cropLeft, cropTop};
+ memcpy(buffer, &values, sizeof(values));
+ } break;
+ case RTCVideoRotation_180: {
+ float values[16] = {-1.0, -1.0, cropRight, cropTop,
+ 1.0, -1.0, cropLeft, cropTop,
+ -1.0, 1.0, cropRight, cropBottom,
+ 1.0, 1.0, cropLeft, cropBottom};
+ memcpy(buffer, &values, sizeof(values));
+ } break;
+ case RTCVideoRotation_270: {
+ float values[16] = {-1.0, -1.0, cropLeft, cropTop,
+ 1.0, -1.0, cropLeft, cropBottom,
+ -1.0, 1.0, cropRight, cropTop,
+ 1.0, 1.0, cropRight, cropBottom};
+ memcpy(buffer, &values, sizeof(values));
+ } break;
+ }
+}
+
+// The max number of command buffers in flight (submitted to GPU).
+// For now setting it up to 1.
+// In future we might use triple buffering method if it improves performance.
+static const NSInteger kMaxInflightBuffers = 1;
+
+@implementation RTCMTLRenderer {
+ __kindof MTKView *_view;
+
+ // Controller.
+ dispatch_semaphore_t _inflight_semaphore;
+
+ // Renderer.
+ id<MTLDevice> _device;
+ id<MTLCommandQueue> _commandQueue;
+ id<MTLLibrary> _defaultLibrary;
+ id<MTLRenderPipelineState> _pipelineState;
+
+ // Buffers.
+ id<MTLBuffer> _vertexBuffer;
+
+ // Values affecting the vertex buffer. Stored for comparison to avoid unnecessary recreation.
+ int _oldFrameWidth;
+ int _oldFrameHeight;
+ int _oldCropWidth;
+ int _oldCropHeight;
+ int _oldCropX;
+ int _oldCropY;
+ RTCVideoRotation _oldRotation;
+}
+
+@synthesize rotationOverride = _rotationOverride;
+
+- (instancetype)init {
+ if (self = [super init]) {
+ _inflight_semaphore = dispatch_semaphore_create(kMaxInflightBuffers);
+ }
+
+ return self;
+}
+
+- (BOOL)addRenderingDestination:(__kindof MTKView *)view {
+ return [self setupWithView:view];
+}
+
+#pragma mark - Private
+
+- (BOOL)setupWithView:(__kindof MTKView *)view {
+ BOOL success = NO;
+ if ([self setupMetal]) {
+ _view = view;
+ view.device = _device;
+ view.preferredFramesPerSecond = 30;
+ view.autoResizeDrawable = NO;
+
+ [self loadAssets];
+
+ float vertexBufferArray[16] = {0};
+ _vertexBuffer = [_device newBufferWithBytes:vertexBufferArray
+ length:sizeof(vertexBufferArray)
+ options:MTLResourceCPUCacheModeWriteCombined];
+ success = YES;
+ }
+ return success;
+}
+#pragma mark - Inheritance
+
+- (id<MTLDevice>)currentMetalDevice {
+ return _device;
+}
+
+- (NSString *)shaderSource {
+ RTC_DCHECK_NOTREACHED() << "Virtual method not implemented in subclass.";
+ return nil;
+}
+
+- (void)uploadTexturesToRenderEncoder:(id<MTLRenderCommandEncoder>)renderEncoder {
+ RTC_DCHECK_NOTREACHED() << "Virtual method not implemented in subclass.";
+}
+
+- (void)getWidth:(int *)width
+ height:(int *)height
+ cropWidth:(int *)cropWidth
+ cropHeight:(int *)cropHeight
+ cropX:(int *)cropX
+ cropY:(int *)cropY
+ ofFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ RTC_DCHECK_NOTREACHED() << "Virtual method not implemented in subclass.";
+}
+
+- (BOOL)setupTexturesForFrame:(nonnull RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ // Apply rotation override if set.
+ RTCVideoRotation rotation;
+ NSValue *rotationOverride = self.rotationOverride;
+ if (rotationOverride) {
+#if defined(__IPHONE_11_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
+ (__IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_11_0)
+ if (@available(iOS 11, *)) {
+ [rotationOverride getValue:&rotation size:sizeof(rotation)];
+ } else
+#endif
+ {
+ [rotationOverride getValue:&rotation];
+ }
+ } else {
+ rotation = frame.rotation;
+ }
+
+ int frameWidth, frameHeight, cropWidth, cropHeight, cropX, cropY;
+ [self getWidth:&frameWidth
+ height:&frameHeight
+ cropWidth:&cropWidth
+ cropHeight:&cropHeight
+ cropX:&cropX
+ cropY:&cropY
+ ofFrame:frame];
+
+ // Recompute the texture cropping and recreate vertexBuffer if necessary.
+ if (cropX != _oldCropX || cropY != _oldCropY || cropWidth != _oldCropWidth ||
+ cropHeight != _oldCropHeight || rotation != _oldRotation || frameWidth != _oldFrameWidth ||
+ frameHeight != _oldFrameHeight) {
+ getCubeVertexData(cropX,
+ cropY,
+ cropWidth,
+ cropHeight,
+ frameWidth,
+ frameHeight,
+ rotation,
+ (float *)_vertexBuffer.contents);
+ _oldCropX = cropX;
+ _oldCropY = cropY;
+ _oldCropWidth = cropWidth;
+ _oldCropHeight = cropHeight;
+ _oldRotation = rotation;
+ _oldFrameWidth = frameWidth;
+ _oldFrameHeight = frameHeight;
+ }
+
+ return YES;
+}
+
+#pragma mark - GPU methods
+
+- (BOOL)setupMetal {
+ // Set the view to use the default device.
+ _device = MTLCreateSystemDefaultDevice();
+ if (!_device) {
+ return NO;
+ }
+
+ // Create a new command queue.
+ _commandQueue = [_device newCommandQueue];
+
+ // Load metal library from source.
+ NSError *libraryError = nil;
+ NSString *shaderSource = [self shaderSource];
+
+ id<MTLLibrary> sourceLibrary =
+ [_device newLibraryWithSource:shaderSource options:NULL error:&libraryError];
+
+ if (libraryError) {
+ RTCLogError(@"Metal: Library with source failed\n%@", libraryError);
+ return NO;
+ }
+
+ if (!sourceLibrary) {
+ RTCLogError(@"Metal: Failed to load library. %@", libraryError);
+ return NO;
+ }
+ _defaultLibrary = sourceLibrary;
+
+ return YES;
+}
+
+- (void)loadAssets {
+ id<MTLFunction> vertexFunction = [_defaultLibrary newFunctionWithName:vertexFunctionName];
+ id<MTLFunction> fragmentFunction = [_defaultLibrary newFunctionWithName:fragmentFunctionName];
+
+ MTLRenderPipelineDescriptor *pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
+ pipelineDescriptor.label = pipelineDescriptorLabel;
+ pipelineDescriptor.vertexFunction = vertexFunction;
+ pipelineDescriptor.fragmentFunction = fragmentFunction;
+ pipelineDescriptor.colorAttachments[0].pixelFormat = _view.colorPixelFormat;
+ pipelineDescriptor.depthAttachmentPixelFormat = MTLPixelFormatInvalid;
+ NSError *error = nil;
+ _pipelineState = [_device newRenderPipelineStateWithDescriptor:pipelineDescriptor error:&error];
+
+ if (!_pipelineState) {
+ RTCLogError(@"Metal: Failed to create pipeline state. %@", error);
+ }
+}
+
+- (void)render {
+ id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
+ commandBuffer.label = commandBufferLabel;
+
+ __block dispatch_semaphore_t block_semaphore = _inflight_semaphore;
+ [commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> _Nonnull) {
+ // GPU work completed.
+ dispatch_semaphore_signal(block_semaphore);
+ }];
+
+ MTLRenderPassDescriptor *renderPassDescriptor = _view.currentRenderPassDescriptor;
+ if (renderPassDescriptor) { // Valid drawable.
+ id<MTLRenderCommandEncoder> renderEncoder =
+ [commandBuffer renderCommandEncoderWithDescriptor:renderPassDescriptor];
+ renderEncoder.label = renderEncoderLabel;
+
+ // Set context state.
+ [renderEncoder pushDebugGroup:renderEncoderDebugGroup];
+ [renderEncoder setRenderPipelineState:_pipelineState];
+ [renderEncoder setVertexBuffer:_vertexBuffer offset:0 atIndex:0];
+ [self uploadTexturesToRenderEncoder:renderEncoder];
+
+ [renderEncoder drawPrimitives:MTLPrimitiveTypeTriangleStrip
+ vertexStart:0
+ vertexCount:4
+ instanceCount:1];
+ [renderEncoder popDebugGroup];
+ [renderEncoder endEncoding];
+
+ [commandBuffer presentDrawable:_view.currentDrawable];
+ }
+
+ // CPU work is completed, GPU work can be started.
+ [commandBuffer commit];
+}
+
+#pragma mark - RTCMTLRenderer
+
+- (void)drawFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ @autoreleasepool {
+ // Wait until the inflight (curently sent to GPU) command buffer
+ // has completed the GPU work.
+ dispatch_semaphore_wait(_inflight_semaphore, DISPATCH_TIME_FOREVER);
+
+ if ([self setupTexturesForFrame:frame]) {
+ [self render];
+ } else {
+ dispatch_semaphore_signal(_inflight_semaphore);
+ }
+ }
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.h b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.h
new file mode 100644
index 0000000000..3320d12076
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoFrame.h"
+#import "RTCVideoRenderer.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/**
+ * RTCMTLVideoView is thin wrapper around MTKView.
+ *
+ * It has id<RTCVideoRenderer> property that renders video frames in the view's
+ * bounds using Metal.
+ */
+NS_CLASS_AVAILABLE_IOS(9)
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCMTLVideoView) : UIView<RTC_OBJC_TYPE(RTCVideoRenderer)>
+
+@property(nonatomic, weak) id<RTC_OBJC_TYPE(RTCVideoViewDelegate)> delegate;
+
+@property(nonatomic) UIViewContentMode videoContentMode;
+
+/** @abstract Enables/disables rendering.
+ */
+@property(nonatomic, getter=isEnabled) BOOL enabled;
+
+/** @abstract Wrapped RTCVideoRotation, or nil.
+ */
+@property(nonatomic, nullable) NSValue* rotationOverride;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.m b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.m
new file mode 100644
index 0000000000..c5d9e4385f
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/metal/RTCMTLVideoView.m
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCMTLVideoView.h"
+
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+
+#import "RTCMTLI420Renderer.h"
+#import "RTCMTLNV12Renderer.h"
+#import "RTCMTLRGBRenderer.h"
+
+// To avoid unreconized symbol linker errors, we're taking advantage of the objc runtime.
+// Linking errors occur when compiling for architectures that don't support Metal.
+#define MTKViewClass NSClassFromString(@"MTKView")
+#define RTCMTLNV12RendererClass NSClassFromString(@"RTCMTLNV12Renderer")
+#define RTCMTLI420RendererClass NSClassFromString(@"RTCMTLI420Renderer")
+#define RTCMTLRGBRendererClass NSClassFromString(@"RTCMTLRGBRenderer")
+
+@interface RTC_OBJC_TYPE (RTCMTLVideoView)
+()<MTKViewDelegate> @property(nonatomic) RTCMTLI420Renderer *rendererI420;
+@property(nonatomic) RTCMTLNV12Renderer *rendererNV12;
+@property(nonatomic) RTCMTLRGBRenderer *rendererRGB;
+@property(nonatomic) MTKView *metalView;
+@property(atomic) RTC_OBJC_TYPE(RTCVideoFrame) * videoFrame;
+@property(nonatomic) CGSize videoFrameSize;
+@property(nonatomic) int64_t lastFrameTimeNs;
+@end
+
+@implementation RTC_OBJC_TYPE (RTCMTLVideoView)
+
+@synthesize delegate = _delegate;
+@synthesize rendererI420 = _rendererI420;
+@synthesize rendererNV12 = _rendererNV12;
+@synthesize rendererRGB = _rendererRGB;
+@synthesize metalView = _metalView;
+@synthesize videoFrame = _videoFrame;
+@synthesize videoFrameSize = _videoFrameSize;
+@synthesize lastFrameTimeNs = _lastFrameTimeNs;
+@synthesize rotationOverride = _rotationOverride;
+
+- (instancetype)initWithFrame:(CGRect)frameRect {
+ self = [super initWithFrame:frameRect];
+ if (self) {
+ [self configure];
+ }
+ return self;
+}
+
+- (instancetype)initWithCoder:(NSCoder *)aCoder {
+ self = [super initWithCoder:aCoder];
+ if (self) {
+ [self configure];
+ }
+ return self;
+}
+
+- (BOOL)isEnabled {
+ return !self.metalView.paused;
+}
+
+- (void)setEnabled:(BOOL)enabled {
+ self.metalView.paused = !enabled;
+}
+
+- (UIViewContentMode)videoContentMode {
+ return self.metalView.contentMode;
+}
+
+- (void)setVideoContentMode:(UIViewContentMode)mode {
+ self.metalView.contentMode = mode;
+}
+
+#pragma mark - Private
+
++ (BOOL)isMetalAvailable {
+ return MTLCreateSystemDefaultDevice() != nil;
+}
+
++ (MTKView *)createMetalView:(CGRect)frame {
+ return [[MTKViewClass alloc] initWithFrame:frame];
+}
+
++ (RTCMTLNV12Renderer *)createNV12Renderer {
+ return [[RTCMTLNV12RendererClass alloc] init];
+}
+
++ (RTCMTLI420Renderer *)createI420Renderer {
+ return [[RTCMTLI420RendererClass alloc] init];
+}
+
++ (RTCMTLRGBRenderer *)createRGBRenderer {
+ return [[RTCMTLRGBRenderer alloc] init];
+}
+
+- (void)configure {
+ NSAssert([RTC_OBJC_TYPE(RTCMTLVideoView) isMetalAvailable],
+ @"Metal not availiable on this device");
+
+ self.metalView = [RTC_OBJC_TYPE(RTCMTLVideoView) createMetalView:self.bounds];
+ self.metalView.delegate = self;
+ self.metalView.contentMode = UIViewContentModeScaleAspectFill;
+ [self addSubview:self.metalView];
+ self.videoFrameSize = CGSizeZero;
+}
+
+- (void)setMultipleTouchEnabled:(BOOL)multipleTouchEnabled {
+ [super setMultipleTouchEnabled:multipleTouchEnabled];
+ self.metalView.multipleTouchEnabled = multipleTouchEnabled;
+}
+
+- (void)layoutSubviews {
+ [super layoutSubviews];
+
+ CGRect bounds = self.bounds;
+ self.metalView.frame = bounds;
+ if (!CGSizeEqualToSize(self.videoFrameSize, CGSizeZero)) {
+ self.metalView.drawableSize = [self drawableSize];
+ } else {
+ self.metalView.drawableSize = bounds.size;
+ }
+}
+
+#pragma mark - MTKViewDelegate methods
+
+- (void)drawInMTKView:(nonnull MTKView *)view {
+ NSAssert(view == self.metalView, @"Receiving draw callbacks from foreign instance.");
+ RTC_OBJC_TYPE(RTCVideoFrame) *videoFrame = self.videoFrame;
+ // Skip rendering if we've already rendered this frame.
+ if (!videoFrame || videoFrame.width <= 0 || videoFrame.height <= 0 ||
+ videoFrame.timeStampNs == self.lastFrameTimeNs) {
+ return;
+ }
+
+ if (CGRectIsEmpty(view.bounds)) {
+ return;
+ }
+
+ RTCMTLRenderer *renderer;
+ if ([videoFrame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *buffer = (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)videoFrame.buffer;
+ const OSType pixelFormat = CVPixelBufferGetPixelFormatType(buffer.pixelBuffer);
+ if (pixelFormat == kCVPixelFormatType_32BGRA || pixelFormat == kCVPixelFormatType_32ARGB) {
+ if (!self.rendererRGB) {
+ self.rendererRGB = [RTC_OBJC_TYPE(RTCMTLVideoView) createRGBRenderer];
+ if (![self.rendererRGB addRenderingDestination:self.metalView]) {
+ self.rendererRGB = nil;
+ RTCLogError(@"Failed to create RGB renderer");
+ return;
+ }
+ }
+ renderer = self.rendererRGB;
+ } else {
+ if (!self.rendererNV12) {
+ self.rendererNV12 = [RTC_OBJC_TYPE(RTCMTLVideoView) createNV12Renderer];
+ if (![self.rendererNV12 addRenderingDestination:self.metalView]) {
+ self.rendererNV12 = nil;
+ RTCLogError(@"Failed to create NV12 renderer");
+ return;
+ }
+ }
+ renderer = self.rendererNV12;
+ }
+ } else {
+ if (!self.rendererI420) {
+ self.rendererI420 = [RTC_OBJC_TYPE(RTCMTLVideoView) createI420Renderer];
+ if (![self.rendererI420 addRenderingDestination:self.metalView]) {
+ self.rendererI420 = nil;
+ RTCLogError(@"Failed to create I420 renderer");
+ return;
+ }
+ }
+ renderer = self.rendererI420;
+ }
+
+ renderer.rotationOverride = self.rotationOverride;
+
+ [renderer drawFrame:videoFrame];
+ self.lastFrameTimeNs = videoFrame.timeStampNs;
+}
+
+- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
+}
+
+#pragma mark -
+
+- (void)setRotationOverride:(NSValue *)rotationOverride {
+ _rotationOverride = rotationOverride;
+
+ self.metalView.drawableSize = [self drawableSize];
+ [self setNeedsLayout];
+}
+
+- (RTCVideoRotation)frameRotation {
+ if (self.rotationOverride) {
+ RTCVideoRotation rotation;
+ if (@available(iOS 11, *)) {
+ [self.rotationOverride getValue:&rotation size:sizeof(rotation)];
+ } else {
+ [self.rotationOverride getValue:&rotation];
+ }
+ return rotation;
+ }
+
+ return self.videoFrame.rotation;
+}
+
+- (CGSize)drawableSize {
+ // Flip width/height if the rotations are not the same.
+ CGSize videoFrameSize = self.videoFrameSize;
+ RTCVideoRotation frameRotation = [self frameRotation];
+
+ BOOL useLandscape =
+ (frameRotation == RTCVideoRotation_0) || (frameRotation == RTCVideoRotation_180);
+ BOOL sizeIsLandscape = (self.videoFrame.rotation == RTCVideoRotation_0) ||
+ (self.videoFrame.rotation == RTCVideoRotation_180);
+
+ if (useLandscape == sizeIsLandscape) {
+ return videoFrameSize;
+ } else {
+ return CGSizeMake(videoFrameSize.height, videoFrameSize.width);
+ }
+}
+
+#pragma mark - RTC_OBJC_TYPE(RTCVideoRenderer)
+
+- (void)setSize:(CGSize)size {
+ __weak RTC_OBJC_TYPE(RTCMTLVideoView) *weakSelf = self;
+ dispatch_async(dispatch_get_main_queue(), ^{
+ RTC_OBJC_TYPE(RTCMTLVideoView) *strongSelf = weakSelf;
+
+ strongSelf.videoFrameSize = size;
+ CGSize drawableSize = [strongSelf drawableSize];
+
+ strongSelf.metalView.drawableSize = drawableSize;
+ [strongSelf setNeedsLayout];
+ [strongSelf.delegate videoView:self didChangeVideoSize:size];
+ });
+}
+
+- (void)renderFrame:(nullable RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ if (!self.isEnabled) {
+ return;
+ }
+
+ if (frame == nil) {
+ RTCLogInfo(@"Incoming frame is nil. Exiting render callback.");
+ return;
+ }
+ self.videoFrame = frame;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.h
new file mode 100644
index 0000000000..71a073ab21
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCVideoViewShading.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/** Default RTCVideoViewShading that will be used in RTCNSGLVideoView
+ * and RTCEAGLVideoView if no external shader is specified. This shader will render
+ * the video in a rectangle without any color or geometric transformations.
+ */
+@interface RTCDefaultShader : NSObject <RTC_OBJC_TYPE (RTCVideoViewShading)>
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.mm b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.mm
new file mode 100644
index 0000000000..51dca3223d
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDefaultShader.mm
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCDefaultShader.h"
+
+#if TARGET_OS_IPHONE
+#import <OpenGLES/ES3/gl.h>
+#else
+#import <OpenGL/gl3.h>
+#endif
+
+#import "RTCOpenGLDefines.h"
+#import "RTCShader.h"
+#import "base/RTCLogging.h"
+
+#include "absl/types/optional.h"
+
+static const int kYTextureUnit = 0;
+static const int kUTextureUnit = 1;
+static const int kVTextureUnit = 2;
+static const int kUvTextureUnit = 1;
+
+// Fragment shader converts YUV values from input textures into a final RGB
+// pixel. The conversion formula is from http://www.fourcc.org/fccyvrgb.php.
+static const char kI420FragmentShaderSource[] =
+ SHADER_VERSION
+ "precision highp float;"
+ FRAGMENT_SHADER_IN " vec2 v_texcoord;\n"
+ "uniform lowp sampler2D s_textureY;\n"
+ "uniform lowp sampler2D s_textureU;\n"
+ "uniform lowp sampler2D s_textureV;\n"
+ FRAGMENT_SHADER_OUT
+ "void main() {\n"
+ " float y, u, v, r, g, b;\n"
+ " y = " FRAGMENT_SHADER_TEXTURE "(s_textureY, v_texcoord).r;\n"
+ " u = " FRAGMENT_SHADER_TEXTURE "(s_textureU, v_texcoord).r;\n"
+ " v = " FRAGMENT_SHADER_TEXTURE "(s_textureV, v_texcoord).r;\n"
+ " u = u - 0.5;\n"
+ " v = v - 0.5;\n"
+ " r = y + 1.403 * v;\n"
+ " g = y - 0.344 * u - 0.714 * v;\n"
+ " b = y + 1.770 * u;\n"
+ " " FRAGMENT_SHADER_COLOR " = vec4(r, g, b, 1.0);\n"
+ " }\n";
+
+static const char kNV12FragmentShaderSource[] =
+ SHADER_VERSION
+ "precision mediump float;"
+ FRAGMENT_SHADER_IN " vec2 v_texcoord;\n"
+ "uniform lowp sampler2D s_textureY;\n"
+ "uniform lowp sampler2D s_textureUV;\n"
+ FRAGMENT_SHADER_OUT
+ "void main() {\n"
+ " mediump float y;\n"
+ " mediump vec2 uv;\n"
+ " y = " FRAGMENT_SHADER_TEXTURE "(s_textureY, v_texcoord).r;\n"
+ " uv = " FRAGMENT_SHADER_TEXTURE "(s_textureUV, v_texcoord).ra -\n"
+ " vec2(0.5, 0.5);\n"
+ " " FRAGMENT_SHADER_COLOR " = vec4(y + 1.403 * uv.y,\n"
+ " y - 0.344 * uv.x - 0.714 * uv.y,\n"
+ " y + 1.770 * uv.x,\n"
+ " 1.0);\n"
+ " }\n";
+
+@implementation RTCDefaultShader {
+ GLuint _vertexBuffer;
+ GLuint _vertexArray;
+ // Store current rotation and only upload new vertex data when rotation changes.
+ absl::optional<RTCVideoRotation> _currentRotation;
+
+ GLuint _i420Program;
+ GLuint _nv12Program;
+}
+
+- (void)dealloc {
+ glDeleteProgram(_i420Program);
+ glDeleteProgram(_nv12Program);
+ glDeleteBuffers(1, &_vertexBuffer);
+ glDeleteVertexArrays(1, &_vertexArray);
+}
+
+- (BOOL)createAndSetupI420Program {
+ NSAssert(!_i420Program, @"I420 program already created");
+ _i420Program = RTCCreateProgramFromFragmentSource(kI420FragmentShaderSource);
+ if (!_i420Program) {
+ return NO;
+ }
+ GLint ySampler = glGetUniformLocation(_i420Program, "s_textureY");
+ GLint uSampler = glGetUniformLocation(_i420Program, "s_textureU");
+ GLint vSampler = glGetUniformLocation(_i420Program, "s_textureV");
+
+ if (ySampler < 0 || uSampler < 0 || vSampler < 0) {
+ RTCLog(@"Failed to get uniform variable locations in I420 shader");
+ glDeleteProgram(_i420Program);
+ _i420Program = 0;
+ return NO;
+ }
+
+ glUseProgram(_i420Program);
+ glUniform1i(ySampler, kYTextureUnit);
+ glUniform1i(uSampler, kUTextureUnit);
+ glUniform1i(vSampler, kVTextureUnit);
+
+ return YES;
+}
+
+- (BOOL)createAndSetupNV12Program {
+ NSAssert(!_nv12Program, @"NV12 program already created");
+ _nv12Program = RTCCreateProgramFromFragmentSource(kNV12FragmentShaderSource);
+ if (!_nv12Program) {
+ return NO;
+ }
+ GLint ySampler = glGetUniformLocation(_nv12Program, "s_textureY");
+ GLint uvSampler = glGetUniformLocation(_nv12Program, "s_textureUV");
+
+ if (ySampler < 0 || uvSampler < 0) {
+ RTCLog(@"Failed to get uniform variable locations in NV12 shader");
+ glDeleteProgram(_nv12Program);
+ _nv12Program = 0;
+ return NO;
+ }
+
+ glUseProgram(_nv12Program);
+ glUniform1i(ySampler, kYTextureUnit);
+ glUniform1i(uvSampler, kUvTextureUnit);
+
+ return YES;
+}
+
+- (BOOL)prepareVertexBufferWithRotation:(RTCVideoRotation)rotation {
+ if (!_vertexBuffer && !RTCCreateVertexBuffer(&_vertexBuffer, &_vertexArray)) {
+ RTCLog(@"Failed to setup vertex buffer");
+ return NO;
+ }
+#if !TARGET_OS_IPHONE
+ glBindVertexArray(_vertexArray);
+#endif
+ glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
+ if (!_currentRotation || rotation != *_currentRotation) {
+ _currentRotation = absl::optional<RTCVideoRotation>(rotation);
+ RTCSetVertexData(*_currentRotation);
+ }
+ return YES;
+}
+
+- (void)applyShadingForFrameWithWidth:(int)width
+ height:(int)height
+ rotation:(RTCVideoRotation)rotation
+ yPlane:(GLuint)yPlane
+ uPlane:(GLuint)uPlane
+ vPlane:(GLuint)vPlane {
+ if (![self prepareVertexBufferWithRotation:rotation]) {
+ return;
+ }
+
+ if (!_i420Program && ![self createAndSetupI420Program]) {
+ RTCLog(@"Failed to setup I420 program");
+ return;
+ }
+
+ glUseProgram(_i420Program);
+
+ glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + kYTextureUnit));
+ glBindTexture(GL_TEXTURE_2D, yPlane);
+
+ glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + kUTextureUnit));
+ glBindTexture(GL_TEXTURE_2D, uPlane);
+
+ glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + kVTextureUnit));
+ glBindTexture(GL_TEXTURE_2D, vPlane);
+
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+}
+
+- (void)applyShadingForFrameWithWidth:(int)width
+ height:(int)height
+ rotation:(RTCVideoRotation)rotation
+ yPlane:(GLuint)yPlane
+ uvPlane:(GLuint)uvPlane {
+ if (![self prepareVertexBufferWithRotation:rotation]) {
+ return;
+ }
+
+ if (!_nv12Program && ![self createAndSetupNV12Program]) {
+ RTCLog(@"Failed to setup NV12 shader");
+ return;
+ }
+
+ glUseProgram(_nv12Program);
+
+ glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + kYTextureUnit));
+ glBindTexture(GL_TEXTURE_2D, yPlane);
+
+ glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + kUvTextureUnit));
+ glBindTexture(GL_TEXTURE_2D, uvPlane);
+
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.h
new file mode 100644
index 0000000000..b78501e9e6
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+// RTCDisplayLinkTimer wraps a CADisplayLink and is set to fire every two screen
+// refreshes, which should be 30fps. We wrap the display link in order to avoid
+// a retain cycle since CADisplayLink takes a strong reference onto its target.
+// The timer is paused by default.
+@interface RTCDisplayLinkTimer : NSObject
+
+@property(nonatomic) BOOL isPaused;
+
+- (instancetype)initWithTimerHandler:(void (^)(void))timerHandler;
+- (void)invalidate;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.m b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.m
new file mode 100644
index 0000000000..906bb898d6
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCDisplayLinkTimer.m
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCDisplayLinkTimer.h"
+
+#import <UIKit/UIKit.h>
+
+@implementation RTCDisplayLinkTimer {
+ CADisplayLink *_displayLink;
+ void (^_timerHandler)(void);
+}
+
+- (instancetype)initWithTimerHandler:(void (^)(void))timerHandler {
+ NSParameterAssert(timerHandler);
+ if (self = [super init]) {
+ _timerHandler = timerHandler;
+ _displayLink =
+ [CADisplayLink displayLinkWithTarget:self
+ selector:@selector(displayLinkDidFire:)];
+ _displayLink.paused = YES;
+#if __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_10_0
+ _displayLink.preferredFramesPerSecond = 30;
+#else
+ [_displayLink setFrameInterval:2];
+#endif
+ [_displayLink addToRunLoop:[NSRunLoop currentRunLoop]
+ forMode:NSRunLoopCommonModes];
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [self invalidate];
+}
+
+- (BOOL)isPaused {
+ return _displayLink.paused;
+}
+
+- (void)setIsPaused:(BOOL)isPaused {
+ _displayLink.paused = isPaused;
+}
+
+- (void)invalidate {
+ [_displayLink invalidate];
+}
+
+- (void)displayLinkDidFire:(CADisplayLink *)displayLink {
+ _timerHandler();
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.h
new file mode 100644
index 0000000000..24b26cd602
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+#import <UIKit/UIKit.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoRenderer.h"
+#import "RTCVideoViewShading.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+@class RTC_OBJC_TYPE(RTCEAGLVideoView);
+
+/**
+ * RTCEAGLVideoView is an RTCVideoRenderer which renders video frames
+ * in its bounds using OpenGLES 2.0 or OpenGLES 3.0.
+ */
+RTC_OBJC_EXPORT
+NS_EXTENSION_UNAVAILABLE_IOS("Rendering not available in app extensions.")
+@interface RTC_OBJC_TYPE (RTCEAGLVideoView) : UIView <RTC_OBJC_TYPE(RTCVideoRenderer)>
+
+@property(nonatomic, weak) id<RTC_OBJC_TYPE(RTCVideoViewDelegate)> delegate;
+
+- (instancetype)initWithFrame:(CGRect)frame
+ shader:(id<RTC_OBJC_TYPE(RTCVideoViewShading)>)shader
+ NS_DESIGNATED_INITIALIZER;
+
+- (instancetype)initWithCoder:(NSCoder *)aDecoder
+ shader:(id<RTC_OBJC_TYPE(RTCVideoViewShading)>)shader
+ NS_DESIGNATED_INITIALIZER;
+
+/** @abstract Wrapped RTCVideoRotation, or nil.
+ */
+@property(nonatomic, nullable) NSValue *rotationOverride;
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m
new file mode 100644
index 0000000000..89e62d2ce7
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCEAGLVideoView.h"
+
+#import <GLKit/GLKit.h>
+
+#import "RTCDefaultShader.h"
+#import "RTCDisplayLinkTimer.h"
+#import "RTCI420TextureCache.h"
+#import "RTCNV12TextureCache.h"
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+
+// RTC_OBJC_TYPE(RTCEAGLVideoView) wraps a GLKView which is setup with
+// enableSetNeedsDisplay = NO for the purpose of gaining control of
+// exactly when to call -[GLKView display]. This need for extra
+// control is required to avoid triggering method calls on GLKView
+// that results in attempting to bind the underlying render buffer
+// when the drawable size would be empty which would result in the
+// error GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT. -[GLKView display] is
+// the method that will trigger the binding of the render
+// buffer. Because the standard behaviour of -[UIView setNeedsDisplay]
+// is disabled for the reasons above, the RTC_OBJC_TYPE(RTCEAGLVideoView) maintains
+// its own `isDirty` flag.
+
+@interface RTC_OBJC_TYPE (RTCEAGLVideoView)
+()<GLKViewDelegate>
+ // `videoFrame` is set when we receive a frame from a worker thread and is read
+ // from the display link callback so atomicity is required.
+ @property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) * videoFrame;
+@property(nonatomic, readonly) GLKView *glkView;
+@end
+
+@implementation RTC_OBJC_TYPE (RTCEAGLVideoView) {
+ RTCDisplayLinkTimer *_timer;
+ EAGLContext *_glContext;
+ // This flag should only be set and read on the main thread (e.g. by
+ // setNeedsDisplay)
+ BOOL _isDirty;
+ id<RTC_OBJC_TYPE(RTCVideoViewShading)> _shader;
+ RTCNV12TextureCache *_nv12TextureCache;
+ RTCI420TextureCache *_i420TextureCache;
+ // As timestamps should be unique between frames, will store last
+ // drawn frame timestamp instead of the whole frame to reduce memory usage.
+ int64_t _lastDrawnFrameTimeStampNs;
+}
+
+@synthesize delegate = _delegate;
+@synthesize videoFrame = _videoFrame;
+@synthesize glkView = _glkView;
+@synthesize rotationOverride = _rotationOverride;
+
+- (instancetype)initWithFrame:(CGRect)frame {
+ return [self initWithFrame:frame shader:[[RTCDefaultShader alloc] init]];
+}
+
+- (instancetype)initWithCoder:(NSCoder *)aDecoder {
+ return [self initWithCoder:aDecoder shader:[[RTCDefaultShader alloc] init]];
+}
+
+- (instancetype)initWithFrame:(CGRect)frame shader:(id<RTC_OBJC_TYPE(RTCVideoViewShading)>)shader {
+ if (self = [super initWithFrame:frame]) {
+ _shader = shader;
+ if (![self configure]) {
+ return nil;
+ }
+ }
+ return self;
+}
+
+- (instancetype)initWithCoder:(NSCoder *)aDecoder
+ shader:(id<RTC_OBJC_TYPE(RTCVideoViewShading)>)shader {
+ if (self = [super initWithCoder:aDecoder]) {
+ _shader = shader;
+ if (![self configure]) {
+ return nil;
+ }
+ }
+ return self;
+}
+
+- (BOOL)configure {
+ EAGLContext *glContext =
+ [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES3];
+ if (!glContext) {
+ glContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
+ }
+ if (!glContext) {
+ RTCLogError(@"Failed to create EAGLContext");
+ return NO;
+ }
+ _glContext = glContext;
+
+ // GLKView manages a framebuffer for us.
+ _glkView = [[GLKView alloc] initWithFrame:CGRectZero
+ context:_glContext];
+ _glkView.drawableColorFormat = GLKViewDrawableColorFormatRGBA8888;
+ _glkView.drawableDepthFormat = GLKViewDrawableDepthFormatNone;
+ _glkView.drawableStencilFormat = GLKViewDrawableStencilFormatNone;
+ _glkView.drawableMultisample = GLKViewDrawableMultisampleNone;
+ _glkView.delegate = self;
+ _glkView.layer.masksToBounds = YES;
+ _glkView.enableSetNeedsDisplay = NO;
+ [self addSubview:_glkView];
+
+ // Listen to application state in order to clean up OpenGL before app goes
+ // away.
+ NSNotificationCenter *notificationCenter =
+ [NSNotificationCenter defaultCenter];
+ [notificationCenter addObserver:self
+ selector:@selector(willResignActive)
+ name:UIApplicationWillResignActiveNotification
+ object:nil];
+ [notificationCenter addObserver:self
+ selector:@selector(didBecomeActive)
+ name:UIApplicationDidBecomeActiveNotification
+ object:nil];
+
+ // Frames are received on a separate thread, so we poll for current frame
+ // using a refresh rate proportional to screen refresh frequency. This
+ // occurs on the main thread.
+ __weak RTC_OBJC_TYPE(RTCEAGLVideoView) *weakSelf = self;
+ _timer = [[RTCDisplayLinkTimer alloc] initWithTimerHandler:^{
+ RTC_OBJC_TYPE(RTCEAGLVideoView) *strongSelf = weakSelf;
+ [strongSelf displayLinkTimerDidFire];
+ }];
+ if ([[UIApplication sharedApplication] applicationState] == UIApplicationStateActive) {
+ [self setupGL];
+ }
+ return YES;
+}
+
+- (void)setMultipleTouchEnabled:(BOOL)multipleTouchEnabled {
+ [super setMultipleTouchEnabled:multipleTouchEnabled];
+ _glkView.multipleTouchEnabled = multipleTouchEnabled;
+}
+
+- (void)dealloc {
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+ UIApplicationState appState =
+ [UIApplication sharedApplication].applicationState;
+ if (appState == UIApplicationStateActive) {
+ [self teardownGL];
+ }
+ [_timer invalidate];
+ [self ensureGLContext];
+ _shader = nil;
+ if (_glContext && [EAGLContext currentContext] == _glContext) {
+ [EAGLContext setCurrentContext:nil];
+ }
+}
+
+#pragma mark - UIView
+
+- (void)setNeedsDisplay {
+ [super setNeedsDisplay];
+ _isDirty = YES;
+}
+
+- (void)setNeedsDisplayInRect:(CGRect)rect {
+ [super setNeedsDisplayInRect:rect];
+ _isDirty = YES;
+}
+
+- (void)layoutSubviews {
+ [super layoutSubviews];
+ _glkView.frame = self.bounds;
+}
+
+#pragma mark - GLKViewDelegate
+
+// This method is called when the GLKView's content is dirty and needs to be
+// redrawn. This occurs on main thread.
+- (void)glkView:(GLKView *)view drawInRect:(CGRect)rect {
+ // The renderer will draw the frame to the framebuffer corresponding to the
+ // one used by `view`.
+ RTC_OBJC_TYPE(RTCVideoFrame) *frame = self.videoFrame;
+ if (!frame || frame.timeStampNs == _lastDrawnFrameTimeStampNs) {
+ return;
+ }
+ RTCVideoRotation rotation = frame.rotation;
+ if(_rotationOverride != nil) {
+ [_rotationOverride getValue: &rotation];
+ }
+ [self ensureGLContext];
+ glClear(GL_COLOR_BUFFER_BIT);
+ if ([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
+ if (!_nv12TextureCache) {
+ _nv12TextureCache = [[RTCNV12TextureCache alloc] initWithContext:_glContext];
+ }
+ if (_nv12TextureCache) {
+ [_nv12TextureCache uploadFrameToTextures:frame];
+ [_shader applyShadingForFrameWithWidth:frame.width
+ height:frame.height
+ rotation:rotation
+ yPlane:_nv12TextureCache.yTexture
+ uvPlane:_nv12TextureCache.uvTexture];
+ [_nv12TextureCache releaseTextures];
+
+ _lastDrawnFrameTimeStampNs = self.videoFrame.timeStampNs;
+ }
+ } else {
+ if (!_i420TextureCache) {
+ _i420TextureCache = [[RTCI420TextureCache alloc] initWithContext:_glContext];
+ }
+ [_i420TextureCache uploadFrameToTextures:frame];
+ [_shader applyShadingForFrameWithWidth:frame.width
+ height:frame.height
+ rotation:rotation
+ yPlane:_i420TextureCache.yTexture
+ uPlane:_i420TextureCache.uTexture
+ vPlane:_i420TextureCache.vTexture];
+
+ _lastDrawnFrameTimeStampNs = self.videoFrame.timeStampNs;
+ }
+}
+
+#pragma mark - RTC_OBJC_TYPE(RTCVideoRenderer)
+
+// These methods may be called on non-main thread.
+- (void)setSize:(CGSize)size {
+ __weak RTC_OBJC_TYPE(RTCEAGLVideoView) *weakSelf = self;
+ dispatch_async(dispatch_get_main_queue(), ^{
+ RTC_OBJC_TYPE(RTCEAGLVideoView) *strongSelf = weakSelf;
+ [strongSelf.delegate videoView:strongSelf didChangeVideoSize:size];
+ });
+}
+
+- (void)renderFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ self.videoFrame = frame;
+}
+
+#pragma mark - Private
+
+- (void)displayLinkTimerDidFire {
+ // Don't render unless video frame have changed or the view content
+ // has explicitly been marked dirty.
+ if (!_isDirty && _lastDrawnFrameTimeStampNs == self.videoFrame.timeStampNs) {
+ return;
+ }
+
+ // Always reset isDirty at this point, even if -[GLKView display]
+ // won't be called in the case the drawable size is empty.
+ _isDirty = NO;
+
+ // Only call -[GLKView display] if the drawable size is
+ // non-empty. Calling display will make the GLKView setup its
+ // render buffer if necessary, but that will fail with error
+ // GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT if size is empty.
+ if (self.bounds.size.width > 0 && self.bounds.size.height > 0) {
+ [_glkView display];
+ }
+}
+
+- (void)setupGL {
+ [self ensureGLContext];
+ glDisable(GL_DITHER);
+ _timer.isPaused = NO;
+}
+
+- (void)teardownGL {
+ self.videoFrame = nil;
+ _timer.isPaused = YES;
+ [_glkView deleteDrawable];
+ [self ensureGLContext];
+ _nv12TextureCache = nil;
+ _i420TextureCache = nil;
+}
+
+- (void)didBecomeActive {
+ [self setupGL];
+}
+
+- (void)willResignActive {
+ [self teardownGL];
+}
+
+- (void)ensureGLContext {
+ NSAssert(_glContext, @"context shouldn't be nil");
+ if ([EAGLContext currentContext] != _glContext) {
+ [EAGLContext setCurrentContext:_glContext];
+ }
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.h
new file mode 100644
index 0000000000..9fdcc5a695
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCOpenGLDefines.h"
+#import "base/RTCVideoFrame.h"
+
+@interface RTCI420TextureCache : NSObject
+
+@property(nonatomic, readonly) GLuint yTexture;
+@property(nonatomic, readonly) GLuint uTexture;
+@property(nonatomic, readonly) GLuint vTexture;
+
+- (instancetype)init NS_UNAVAILABLE;
+- (instancetype)initWithContext:(GlContextType *)context NS_DESIGNATED_INITIALIZER;
+
+- (void)uploadFrameToTextures:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.mm b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.mm
new file mode 100644
index 0000000000..5dccd4bf6a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCI420TextureCache.mm
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCI420TextureCache.h"
+
+#if TARGET_OS_IPHONE
+#import <OpenGLES/ES3/gl.h>
+#else
+#import <OpenGL/gl3.h>
+#endif
+
+#import "base/RTCI420Buffer.h"
+#import "base/RTCVideoFrameBuffer.h"
+
+#include <vector>
+
+// Two sets of 3 textures are used here, one for each of the Y, U and V planes. Having two sets
+// alleviates CPU blockage in the event that the GPU is asked to render to a texture that is already
+// in use.
+static const GLsizei kNumTextureSets = 2;
+static const GLsizei kNumTexturesPerSet = 3;
+static const GLsizei kNumTextures = kNumTexturesPerSet * kNumTextureSets;
+
+@implementation RTCI420TextureCache {
+ BOOL _hasUnpackRowLength;
+ GLint _currentTextureSet;
+ // Handles for OpenGL constructs.
+ GLuint _textures[kNumTextures];
+ // Used to create a non-padded plane for GPU upload when we receive padded frames.
+ std::vector<uint8_t> _planeBuffer;
+}
+
+- (GLuint)yTexture {
+ return _textures[_currentTextureSet * kNumTexturesPerSet];
+}
+
+- (GLuint)uTexture {
+ return _textures[_currentTextureSet * kNumTexturesPerSet + 1];
+}
+
+- (GLuint)vTexture {
+ return _textures[_currentTextureSet * kNumTexturesPerSet + 2];
+}
+
+- (instancetype)initWithContext:(GlContextType *)context {
+ if (self = [super init]) {
+#if TARGET_OS_IPHONE
+ _hasUnpackRowLength = (context.API == kEAGLRenderingAPIOpenGLES3);
+#else
+ _hasUnpackRowLength = YES;
+#endif
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+
+ [self setupTextures];
+ }
+ return self;
+}
+
+- (void)dealloc {
+ glDeleteTextures(kNumTextures, _textures);
+}
+
+- (void)setupTextures {
+ glGenTextures(kNumTextures, _textures);
+ // Set parameters for each of the textures we created.
+ for (GLsizei i = 0; i < kNumTextures; i++) {
+ glBindTexture(GL_TEXTURE_2D, _textures[i]);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ }
+}
+
+- (void)uploadPlane:(const uint8_t *)plane
+ texture:(GLuint)texture
+ width:(size_t)width
+ height:(size_t)height
+ stride:(int32_t)stride {
+ glBindTexture(GL_TEXTURE_2D, texture);
+
+ const uint8_t *uploadPlane = plane;
+ if ((size_t)stride != width) {
+ if (_hasUnpackRowLength) {
+ // GLES3 allows us to specify stride.
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, stride);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ RTC_PIXEL_FORMAT,
+ static_cast<GLsizei>(width),
+ static_cast<GLsizei>(height),
+ 0,
+ RTC_PIXEL_FORMAT,
+ GL_UNSIGNED_BYTE,
+ uploadPlane);
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ return;
+ } else {
+ // Make an unpadded copy and upload that instead. Quick profiling showed
+ // that this is faster than uploading row by row using glTexSubImage2D.
+ uint8_t *unpaddedPlane = _planeBuffer.data();
+ for (size_t y = 0; y < height; ++y) {
+ memcpy(unpaddedPlane + y * width, plane + y * stride, width);
+ }
+ uploadPlane = unpaddedPlane;
+ }
+ }
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ RTC_PIXEL_FORMAT,
+ static_cast<GLsizei>(width),
+ static_cast<GLsizei>(height),
+ 0,
+ RTC_PIXEL_FORMAT,
+ GL_UNSIGNED_BYTE,
+ uploadPlane);
+}
+
+- (void)uploadFrameToTextures:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ _currentTextureSet = (_currentTextureSet + 1) % kNumTextureSets;
+
+ id<RTC_OBJC_TYPE(RTCI420Buffer)> buffer = [frame.buffer toI420];
+
+ const int chromaWidth = buffer.chromaWidth;
+ const int chromaHeight = buffer.chromaHeight;
+ if (buffer.strideY != frame.width || buffer.strideU != chromaWidth ||
+ buffer.strideV != chromaWidth) {
+ _planeBuffer.resize(buffer.width * buffer.height);
+ }
+
+ [self uploadPlane:buffer.dataY
+ texture:self.yTexture
+ width:buffer.width
+ height:buffer.height
+ stride:buffer.strideY];
+
+ [self uploadPlane:buffer.dataU
+ texture:self.uTexture
+ width:chromaWidth
+ height:chromaHeight
+ stride:buffer.strideU];
+
+ [self uploadPlane:buffer.dataV
+ texture:self.vTexture
+ width:chromaWidth
+ height:chromaHeight
+ stride:buffer.strideV];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.h
new file mode 100644
index 0000000000..c9ee986f88
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#if !TARGET_OS_IPHONE
+
+#import <AppKit/NSOpenGLView.h>
+
+#import "RTCVideoRenderer.h"
+#import "RTCVideoViewShading.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+@class RTC_OBJC_TYPE(RTCNSGLVideoView);
+
+RTC_OBJC_EXPORT
+@protocol RTC_OBJC_TYPE
+(RTCNSGLVideoViewDelegate)<RTC_OBJC_TYPE(RTCVideoViewDelegate)> @end
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCNSGLVideoView) : NSOpenGLView <RTC_OBJC_TYPE(RTCVideoRenderer)>
+
+@property(nonatomic, weak) id<RTC_OBJC_TYPE(RTCVideoViewDelegate)> delegate;
+
+- (instancetype)initWithFrame:(NSRect)frameRect
+ pixelFormat:(NSOpenGLPixelFormat *)format
+ shader:(id<RTC_OBJC_TYPE(RTCVideoViewShading)>)shader
+ NS_DESIGNATED_INITIALIZER;
+
+@end
+
+NS_ASSUME_NONNULL_END
+
+#endif
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m
new file mode 100644
index 0000000000..168c73126f
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#if !TARGET_OS_IPHONE
+
+#import "RTCNSGLVideoView.h"
+
+#import <AppKit/NSOpenGL.h>
+#import <CoreVideo/CVDisplayLink.h>
+#import <OpenGL/gl3.h>
+
+#import "RTCDefaultShader.h"
+#import "RTCI420TextureCache.h"
+#import "base/RTCLogging.h"
+#import "base/RTCVideoFrame.h"
+
+@interface RTC_OBJC_TYPE (RTCNSGLVideoView)
+()
+ // `videoFrame` is set when we receive a frame from a worker thread and is read
+ // from the display link callback so atomicity is required.
+ @property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *
+ videoFrame;
+@property(atomic, strong) RTCI420TextureCache *i420TextureCache;
+
+- (void)drawFrame;
+@end
+
+static CVReturn OnDisplayLinkFired(CVDisplayLinkRef displayLink,
+ const CVTimeStamp *now,
+ const CVTimeStamp *outputTime,
+ CVOptionFlags flagsIn,
+ CVOptionFlags *flagsOut,
+ void *displayLinkContext) {
+ RTC_OBJC_TYPE(RTCNSGLVideoView) *view =
+ (__bridge RTC_OBJC_TYPE(RTCNSGLVideoView) *)displayLinkContext;
+ [view drawFrame];
+ return kCVReturnSuccess;
+}
+
+@implementation RTC_OBJC_TYPE (RTCNSGLVideoView) {
+ CVDisplayLinkRef _displayLink;
+ RTC_OBJC_TYPE(RTCVideoFrame) * _lastDrawnFrame;
+ id<RTC_OBJC_TYPE(RTCVideoViewShading)> _shader;
+}
+
+@synthesize delegate = _delegate;
+@synthesize videoFrame = _videoFrame;
+@synthesize i420TextureCache = _i420TextureCache;
+
+- (instancetype)initWithFrame:(NSRect)frame pixelFormat:(NSOpenGLPixelFormat *)format {
+ return [self initWithFrame:frame pixelFormat:format shader:[[RTCDefaultShader alloc] init]];
+}
+
+- (instancetype)initWithFrame:(NSRect)frame
+ pixelFormat:(NSOpenGLPixelFormat *)format
+ shader:(id<RTC_OBJC_TYPE(RTCVideoViewShading)>)shader {
+ if (self = [super initWithFrame:frame pixelFormat:format]) {
+ _shader = shader;
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [self teardownDisplayLink];
+}
+
+- (void)drawRect:(NSRect)rect {
+ [self drawFrame];
+}
+
+- (void)reshape {
+ [super reshape];
+ NSRect frame = [self frame];
+ [self ensureGLContext];
+ CGLLockContext([[self openGLContext] CGLContextObj]);
+ glViewport(0, 0, frame.size.width, frame.size.height);
+ CGLUnlockContext([[self openGLContext] CGLContextObj]);
+}
+
+- (void)lockFocus {
+ NSOpenGLContext *context = [self openGLContext];
+ [super lockFocus];
+ if ([context view] != self) {
+ [context setView:self];
+ }
+ [context makeCurrentContext];
+}
+
+- (void)prepareOpenGL {
+ [super prepareOpenGL];
+ [self ensureGLContext];
+ glDisable(GL_DITHER);
+ [self setupDisplayLink];
+}
+
+- (void)clearGLContext {
+ [self ensureGLContext];
+ self.i420TextureCache = nil;
+ [super clearGLContext];
+}
+
+#pragma mark - RTC_OBJC_TYPE(RTCVideoRenderer)
+
+// These methods may be called on non-main thread.
+- (void)setSize:(CGSize)size {
+ dispatch_async(dispatch_get_main_queue(), ^{
+ [self.delegate videoView:self didChangeVideoSize:size];
+ });
+}
+
+- (void)renderFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ self.videoFrame = frame;
+}
+
+#pragma mark - Private
+
+- (void)drawFrame {
+ RTC_OBJC_TYPE(RTCVideoFrame) *frame = self.videoFrame;
+ if (!frame || frame == _lastDrawnFrame) {
+ return;
+ }
+ // This method may be called from CVDisplayLink callback which isn't on the
+ // main thread so we have to lock the GL context before drawing.
+ NSOpenGLContext *context = [self openGLContext];
+ CGLLockContext([context CGLContextObj]);
+
+ [self ensureGLContext];
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ // Rendering native CVPixelBuffer is not supported on OS X.
+ // TODO(magjed): Add support for NV12 texture cache on OS X.
+ frame = [frame newI420VideoFrame];
+ if (!self.i420TextureCache) {
+ self.i420TextureCache = [[RTCI420TextureCache alloc] initWithContext:context];
+ }
+ RTCI420TextureCache *i420TextureCache = self.i420TextureCache;
+ if (i420TextureCache) {
+ [i420TextureCache uploadFrameToTextures:frame];
+ [_shader applyShadingForFrameWithWidth:frame.width
+ height:frame.height
+ rotation:frame.rotation
+ yPlane:i420TextureCache.yTexture
+ uPlane:i420TextureCache.uTexture
+ vPlane:i420TextureCache.vTexture];
+ [context flushBuffer];
+ _lastDrawnFrame = frame;
+ }
+ CGLUnlockContext([context CGLContextObj]);
+}
+
+- (void)setupDisplayLink {
+ if (_displayLink) {
+ return;
+ }
+ // Synchronize buffer swaps with vertical refresh rate.
+ GLint swapInt = 1;
+ [[self openGLContext] setValues:&swapInt forParameter:NSOpenGLCPSwapInterval];
+
+ // Create display link.
+ CVDisplayLinkCreateWithActiveCGDisplays(&_displayLink);
+ CVDisplayLinkSetOutputCallback(_displayLink,
+ &OnDisplayLinkFired,
+ (__bridge void *)self);
+ // Set the display link for the current renderer.
+ CGLContextObj cglContext = [[self openGLContext] CGLContextObj];
+ CGLPixelFormatObj cglPixelFormat = [[self pixelFormat] CGLPixelFormatObj];
+ CVDisplayLinkSetCurrentCGDisplayFromOpenGLContext(
+ _displayLink, cglContext, cglPixelFormat);
+ CVDisplayLinkStart(_displayLink);
+}
+
+- (void)teardownDisplayLink {
+ if (!_displayLink) {
+ return;
+ }
+ CVDisplayLinkRelease(_displayLink);
+ _displayLink = NULL;
+}
+
+- (void)ensureGLContext {
+ NSOpenGLContext* context = [self openGLContext];
+ NSAssert(context, @"context shouldn't be nil");
+ if ([NSOpenGLContext currentContext] != context) {
+ [context makeCurrentContext];
+ }
+}
+
+@end
+
+#endif // !TARGET_OS_IPHONE
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.h
new file mode 100644
index 0000000000..f202b836b5
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <GLKit/GLKit.h>
+
+#import "base/RTCMacros.h"
+
+@class RTC_OBJC_TYPE(RTCVideoFrame);
+
+NS_ASSUME_NONNULL_BEGIN
+
+@interface RTCNV12TextureCache : NSObject
+
+@property(nonatomic, readonly) GLuint yTexture;
+@property(nonatomic, readonly) GLuint uvTexture;
+
+- (instancetype)init NS_UNAVAILABLE;
+- (nullable instancetype)initWithContext:(EAGLContext *)context NS_DESIGNATED_INITIALIZER;
+
+- (BOOL)uploadFrameToTextures:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame;
+
+- (void)releaseTextures;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.m b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.m
new file mode 100644
index 0000000000..a520ac45b4
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCNV12TextureCache.m
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCNV12TextureCache.h"
+
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+
+@implementation RTCNV12TextureCache {
+ CVOpenGLESTextureCacheRef _textureCache;
+ CVOpenGLESTextureRef _yTextureRef;
+ CVOpenGLESTextureRef _uvTextureRef;
+}
+
+- (GLuint)yTexture {
+ return CVOpenGLESTextureGetName(_yTextureRef);
+}
+
+- (GLuint)uvTexture {
+ return CVOpenGLESTextureGetName(_uvTextureRef);
+}
+
+- (instancetype)initWithContext:(EAGLContext *)context {
+ if (self = [super init]) {
+ CVReturn ret = CVOpenGLESTextureCacheCreate(
+ kCFAllocatorDefault, NULL,
+#if COREVIDEO_USE_EAGLCONTEXT_CLASS_IN_API
+ context,
+#else
+ (__bridge void *)context,
+#endif
+ NULL, &_textureCache);
+ if (ret != kCVReturnSuccess) {
+ self = nil;
+ }
+ }
+ return self;
+}
+
+- (BOOL)loadTexture:(CVOpenGLESTextureRef *)textureOut
+ pixelBuffer:(CVPixelBufferRef)pixelBuffer
+ planeIndex:(int)planeIndex
+ pixelFormat:(GLenum)pixelFormat {
+ const int width = CVPixelBufferGetWidthOfPlane(pixelBuffer, planeIndex);
+ const int height = CVPixelBufferGetHeightOfPlane(pixelBuffer, planeIndex);
+
+ if (*textureOut) {
+ CFRelease(*textureOut);
+ *textureOut = nil;
+ }
+ CVReturn ret = CVOpenGLESTextureCacheCreateTextureFromImage(
+ kCFAllocatorDefault, _textureCache, pixelBuffer, NULL, GL_TEXTURE_2D, pixelFormat, width,
+ height, pixelFormat, GL_UNSIGNED_BYTE, planeIndex, textureOut);
+ if (ret != kCVReturnSuccess) {
+ if (*textureOut) {
+ CFRelease(*textureOut);
+ *textureOut = nil;
+ }
+ return NO;
+ }
+ NSAssert(CVOpenGLESTextureGetTarget(*textureOut) == GL_TEXTURE_2D,
+ @"Unexpected GLES texture target");
+ glBindTexture(GL_TEXTURE_2D, CVOpenGLESTextureGetName(*textureOut));
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ return YES;
+}
+
+- (BOOL)uploadFrameToTextures:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ NSAssert([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]],
+ @"frame must be CVPixelBuffer backed");
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer = (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ CVPixelBufferRef pixelBuffer = rtcPixelBuffer.pixelBuffer;
+ return [self loadTexture:&_yTextureRef
+ pixelBuffer:pixelBuffer
+ planeIndex:0
+ pixelFormat:GL_LUMINANCE] &&
+ [self loadTexture:&_uvTextureRef
+ pixelBuffer:pixelBuffer
+ planeIndex:1
+ pixelFormat:GL_LUMINANCE_ALPHA];
+}
+
+- (void)releaseTextures {
+ if (_uvTextureRef) {
+ CFRelease(_uvTextureRef);
+ _uvTextureRef = nil;
+ }
+ if (_yTextureRef) {
+ CFRelease(_yTextureRef);
+ _yTextureRef = nil;
+ }
+}
+
+- (void)dealloc {
+ [self releaseTextures];
+ if (_textureCache) {
+ CFRelease(_textureCache);
+ _textureCache = nil;
+ }
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCOpenGLDefines.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCOpenGLDefines.h
new file mode 100644
index 0000000000..4088535861
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCOpenGLDefines.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#if TARGET_OS_IPHONE
+#define RTC_PIXEL_FORMAT GL_LUMINANCE
+#define SHADER_VERSION
+#define VERTEX_SHADER_IN "attribute"
+#define VERTEX_SHADER_OUT "varying"
+#define FRAGMENT_SHADER_IN "varying"
+#define FRAGMENT_SHADER_OUT
+#define FRAGMENT_SHADER_COLOR "gl_FragColor"
+#define FRAGMENT_SHADER_TEXTURE "texture2D"
+
+@class EAGLContext;
+typedef EAGLContext GlContextType;
+#else
+#define RTC_PIXEL_FORMAT GL_RED
+#define SHADER_VERSION "#version 150\n"
+#define VERTEX_SHADER_IN "in"
+#define VERTEX_SHADER_OUT "out"
+#define FRAGMENT_SHADER_IN "in"
+#define FRAGMENT_SHADER_OUT "out vec4 fragColor;\n"
+#define FRAGMENT_SHADER_COLOR "fragColor"
+#define FRAGMENT_SHADER_TEXTURE "texture"
+
+@class NSOpenGLContext;
+typedef NSOpenGLContext GlContextType;
+#endif
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.h
new file mode 100644
index 0000000000..d1b91fb643
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "base/RTCVideoFrame.h"
+
+RTC_EXTERN const char kRTCVertexShaderSource[];
+
+RTC_EXTERN GLuint RTCCreateShader(GLenum type, const GLchar* source);
+RTC_EXTERN GLuint RTCCreateProgram(GLuint vertexShader, GLuint fragmentShader);
+RTC_EXTERN GLuint
+RTCCreateProgramFromFragmentSource(const char fragmentShaderSource[]);
+RTC_EXTERN BOOL RTCCreateVertexBuffer(GLuint* vertexBuffer,
+ GLuint* vertexArray);
+RTC_EXTERN void RTCSetVertexData(RTCVideoRotation rotation);
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.mm b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.mm
new file mode 100644
index 0000000000..8eccd7fbec
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCShader.mm
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCShader.h"
+
+#if TARGET_OS_IPHONE
+#import <OpenGLES/ES3/gl.h>
+#else
+#import <OpenGL/gl3.h>
+#endif
+
+#include <algorithm>
+#include <array>
+#include <memory>
+
+#import "RTCOpenGLDefines.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// Vertex shader doesn't do anything except pass coordinates through.
+const char kRTCVertexShaderSource[] =
+ SHADER_VERSION
+ VERTEX_SHADER_IN " vec2 position;\n"
+ VERTEX_SHADER_IN " vec2 texcoord;\n"
+ VERTEX_SHADER_OUT " vec2 v_texcoord;\n"
+ "void main() {\n"
+ " gl_Position = vec4(position.x, position.y, 0.0, 1.0);\n"
+ " v_texcoord = texcoord;\n"
+ "}\n";
+
+// Compiles a shader of the given `type` with GLSL source `source` and returns
+// the shader handle or 0 on error.
+GLuint RTCCreateShader(GLenum type, const GLchar *source) {
+ GLuint shader = glCreateShader(type);
+ if (!shader) {
+ return 0;
+ }
+ glShaderSource(shader, 1, &source, NULL);
+ glCompileShader(shader);
+ GLint compileStatus = GL_FALSE;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
+ if (compileStatus == GL_FALSE) {
+ GLint logLength = 0;
+ // The null termination character is included in the returned log length.
+ glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &logLength);
+ if (logLength > 0) {
+ std::unique_ptr<char[]> compileLog(new char[logLength]);
+ // The returned string is null terminated.
+ glGetShaderInfoLog(shader, logLength, NULL, compileLog.get());
+ RTC_LOG(LS_ERROR) << "Shader compile error: " << compileLog.get();
+ }
+ glDeleteShader(shader);
+ shader = 0;
+ }
+ return shader;
+}
+
+// Links a shader program with the given vertex and fragment shaders and
+// returns the program handle or 0 on error.
+GLuint RTCCreateProgram(GLuint vertexShader, GLuint fragmentShader) {
+ if (vertexShader == 0 || fragmentShader == 0) {
+ return 0;
+ }
+ GLuint program = glCreateProgram();
+ if (!program) {
+ return 0;
+ }
+ glAttachShader(program, vertexShader);
+ glAttachShader(program, fragmentShader);
+ glLinkProgram(program);
+ GLint linkStatus = GL_FALSE;
+ glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
+ if (linkStatus == GL_FALSE) {
+ glDeleteProgram(program);
+ program = 0;
+ }
+ return program;
+}
+
+// Creates and links a shader program with the given fragment shader source and
+// a plain vertex shader. Returns the program handle or 0 on error.
+GLuint RTCCreateProgramFromFragmentSource(const char fragmentShaderSource[]) {
+ GLuint vertexShader = RTCCreateShader(GL_VERTEX_SHADER, kRTCVertexShaderSource);
+ RTC_CHECK(vertexShader) << "failed to create vertex shader";
+ GLuint fragmentShader =
+ RTCCreateShader(GL_FRAGMENT_SHADER, fragmentShaderSource);
+ RTC_CHECK(fragmentShader) << "failed to create fragment shader";
+ GLuint program = RTCCreateProgram(vertexShader, fragmentShader);
+ // Shaders are created only to generate program.
+ if (vertexShader) {
+ glDeleteShader(vertexShader);
+ }
+ if (fragmentShader) {
+ glDeleteShader(fragmentShader);
+ }
+
+ // Set vertex shader variables 'position' and 'texcoord' in program.
+ GLint position = glGetAttribLocation(program, "position");
+ GLint texcoord = glGetAttribLocation(program, "texcoord");
+ if (position < 0 || texcoord < 0) {
+ glDeleteProgram(program);
+ return 0;
+ }
+
+ // Read position attribute with size of 2 and stride of 4 beginning at the start of the array. The
+ // last argument indicates offset of data within the vertex buffer.
+ glVertexAttribPointer(position, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), (void *)0);
+ glEnableVertexAttribArray(position);
+
+ // Read texcoord attribute with size of 2 and stride of 4 beginning at the first texcoord in the
+ // array. The last argument indicates offset of data within the vertex buffer.
+ glVertexAttribPointer(
+ texcoord, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), (void *)(2 * sizeof(GLfloat)));
+ glEnableVertexAttribArray(texcoord);
+
+ return program;
+}
+
+BOOL RTCCreateVertexBuffer(GLuint *vertexBuffer, GLuint *vertexArray) {
+#if !TARGET_OS_IPHONE
+ glGenVertexArrays(1, vertexArray);
+ if (*vertexArray == 0) {
+ return NO;
+ }
+ glBindVertexArray(*vertexArray);
+#endif
+ glGenBuffers(1, vertexBuffer);
+ if (*vertexBuffer == 0) {
+ glDeleteVertexArrays(1, vertexArray);
+ return NO;
+ }
+ glBindBuffer(GL_ARRAY_BUFFER, *vertexBuffer);
+ glBufferData(GL_ARRAY_BUFFER, 4 * 4 * sizeof(GLfloat), NULL, GL_DYNAMIC_DRAW);
+ return YES;
+}
+
+// Set vertex data to the currently bound vertex buffer.
+void RTCSetVertexData(RTCVideoRotation rotation) {
+ // When modelview and projection matrices are identity (default) the world is
+ // contained in the square around origin with unit size 2. Drawing to these
+ // coordinates is equivalent to drawing to the entire screen. The texture is
+ // stretched over that square using texture coordinates (u, v) that range
+ // from (0, 0) to (1, 1) inclusive. Texture coordinates are flipped vertically
+ // here because the incoming frame has origin in upper left hand corner but
+ // OpenGL expects origin in bottom left corner.
+ std::array<std::array<GLfloat, 2>, 4> UVCoords = {{
+ {{0, 1}}, // Lower left.
+ {{1, 1}}, // Lower right.
+ {{1, 0}}, // Upper right.
+ {{0, 0}}, // Upper left.
+ }};
+
+ // Rotate the UV coordinates.
+ int rotation_offset;
+ switch (rotation) {
+ case RTCVideoRotation_0:
+ rotation_offset = 0;
+ break;
+ case RTCVideoRotation_90:
+ rotation_offset = 1;
+ break;
+ case RTCVideoRotation_180:
+ rotation_offset = 2;
+ break;
+ case RTCVideoRotation_270:
+ rotation_offset = 3;
+ break;
+ }
+ std::rotate(UVCoords.begin(), UVCoords.begin() + rotation_offset,
+ UVCoords.end());
+
+ const GLfloat gVertices[] = {
+ // X, Y, U, V.
+ -1, -1, UVCoords[0][0], UVCoords[0][1],
+ 1, -1, UVCoords[1][0], UVCoords[1][1],
+ 1, 1, UVCoords[2][0], UVCoords[2][1],
+ -1, 1, UVCoords[3][0], UVCoords[3][1],
+ };
+
+ glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(gVertices), gVertices);
+}
diff --git a/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCVideoViewShading.h b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCVideoViewShading.h
new file mode 100644
index 0000000000..9df30a8fa0
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/renderer/opengl/RTCVideoViewShading.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCVideoFrame.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/**
+ * RTCVideoViewShading provides a way for apps to customize the OpenGL(ES shaders
+ * used in rendering for the RTCEAGLVideoView/RTCNSGLVideoView.
+ */
+RTC_OBJC_EXPORT
+@protocol RTC_OBJC_TYPE
+(RTCVideoViewShading)<NSObject>
+
+ /** Callback for I420 frames. Each plane is given as a texture. */
+ - (void)applyShadingForFrameWithWidth : (int)width height : (int)height rotation
+ : (RTCVideoRotation)rotation yPlane : (GLuint)yPlane uPlane : (GLuint)uPlane vPlane
+ : (GLuint)vPlane;
+
+/** Callback for NV12 frames. Each plane is given as a texture. */
+- (void)applyShadingForFrameWithWidth:(int)width
+ height:(int)height
+ rotation:(RTCVideoRotation)rotation
+ yPlane:(GLuint)yPlane
+ uvPlane:(GLuint)uvPlane;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h
new file mode 100644
index 0000000000..a0cd8515d1
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCCodecSpecificInfoH264.h"
+
+#include "modules/video_coding/include/video_codec_interface.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/* Interfaces for converting to/from internal C++ formats. */
+@interface RTC_OBJC_TYPE (RTCCodecSpecificInfoH264)
+()
+
+ - (webrtc::CodecSpecificInfo)nativeCodecSpecificInfo;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h
new file mode 100644
index 0000000000..ae3003a115
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCCodecSpecificInfo.h"
+#import "RTCMacros.h"
+
+/** Class for H264 specific config. */
+typedef NS_ENUM(NSUInteger, RTCH264PacketizationMode) {
+ RTCH264PacketizationModeNonInterleaved = 0, // Mode 1 - STAP-A, FU-A is allowed
+ RTCH264PacketizationModeSingleNalUnit // Mode 0 - only single NALU allowed
+};
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCCodecSpecificInfoH264) : NSObject <RTC_OBJC_TYPE(RTCCodecSpecificInfo)>
+
+@property(nonatomic, assign) RTCH264PacketizationMode packetizationMode;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm
new file mode 100644
index 0000000000..e38ed307b3
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCCodecSpecificInfoH264+Private.h"
+
+#import "RTCH264ProfileLevelId.h"
+
+// H264 specific settings.
+@implementation RTC_OBJC_TYPE (RTCCodecSpecificInfoH264)
+
+@synthesize packetizationMode = _packetizationMode;
+
+- (webrtc::CodecSpecificInfo)nativeCodecSpecificInfo {
+ webrtc::CodecSpecificInfo codecSpecificInfo;
+ codecSpecificInfo.codecType = webrtc::kVideoCodecH264;
+ codecSpecificInfo.codecSpecific.H264.packetization_mode =
+ (webrtc::H264PacketizationMode)_packetizationMode;
+
+ return codecSpecificInfo;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h
new file mode 100644
index 0000000000..de5a9c4684
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoDecoderFactory.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/** This decoder factory include support for all codecs bundled with WebRTC. If using custom
+ * codecs, create custom implementations of RTCVideoEncoderFactory and
+ * RTCVideoDecoderFactory.
+ */
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCDefaultVideoDecoderFactory) : NSObject <RTC_OBJC_TYPE(RTCVideoDecoderFactory)>
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m
new file mode 100644
index 0000000000..6e3baa8750
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCDefaultVideoDecoderFactory.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoDecoderH264.h"
+#import "api/video_codec/RTCVideoCodecConstants.h"
+#import "api/video_codec/RTCVideoDecoderVP8.h"
+#import "api/video_codec/RTCVideoDecoderVP9.h"
+#import "base/RTCVideoCodecInfo.h"
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+#import "api/video_codec/RTCVideoDecoderAV1.h" // nogncheck
+#endif
+
+@implementation RTC_OBJC_TYPE (RTCDefaultVideoDecoderFactory)
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedHighParams];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedBaselineParams];
+
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp8Info =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp8Name];
+
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *result = [@[
+ constrainedHighInfo,
+ constrainedBaselineInfo,
+ vp8Info,
+ ] mutableCopy];
+
+ if ([RTC_OBJC_TYPE(RTCVideoDecoderVP9) isSupported]) {
+ [result
+ addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]];
+ }
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+ [result addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecAv1Name]];
+#endif
+
+ return result;
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoDecoder)>)createDecoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
+ return [[RTC_OBJC_TYPE(RTCVideoDecoderH264) alloc] init];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoDecoderVP8) vp8Decoder];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name] &&
+ [RTC_OBJC_TYPE(RTCVideoDecoderVP9) isSupported]) {
+ return [RTC_OBJC_TYPE(RTCVideoDecoderVP9) vp9Decoder];
+ }
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+ if ([info.name isEqualToString:kRTCVideoCodecAv1Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoDecoderAV1) av1Decoder];
+ }
+#endif
+
+ return nil;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h
new file mode 100644
index 0000000000..92ab40c95b
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoEncoderFactory.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/** This encoder factory include support for all codecs bundled with WebRTC. If using custom
+ * codecs, create custom implementations of RTCVideoEncoderFactory and
+ * RTCVideoDecoderFactory.
+ */
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCDefaultVideoEncoderFactory) : NSObject <RTC_OBJC_TYPE(RTCVideoEncoderFactory)>
+
+@property(nonatomic, retain) RTC_OBJC_TYPE(RTCVideoCodecInfo) *preferredCodec;
+
++ (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m
new file mode 100644
index 0000000000..8de55bde4a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCDefaultVideoEncoderFactory.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoEncoderH264.h"
+#import "api/video_codec/RTCVideoCodecConstants.h"
+#import "api/video_codec/RTCVideoEncoderVP8.h"
+#import "api/video_codec/RTCVideoEncoderVP9.h"
+#import "base/RTCVideoCodecInfo.h"
+
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+#import "api/video_codec/RTCVideoEncoderAV1.h" // nogncheck
+#endif
+
+@implementation RTC_OBJC_TYPE (RTCDefaultVideoEncoderFactory)
+
+@synthesize preferredCodec;
+
++ (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedHighParams];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedBaselineParams];
+
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp8Info =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp8Name];
+
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *result = [@[
+ constrainedHighInfo,
+ constrainedBaselineInfo,
+ vp8Info,
+ ] mutableCopy];
+
+ if ([RTC_OBJC_TYPE(RTCVideoEncoderVP9) isSupported]) {
+ [result
+ addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]];
+ }
+
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+ [result addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecAv1Name]];
+#endif
+
+ return result;
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoEncoder)>)createEncoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
+ return [[RTC_OBJC_TYPE(RTCVideoEncoderH264) alloc] initWithCodecInfo:info];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoEncoderVP8) vp8Encoder];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name] &&
+ [RTC_OBJC_TYPE(RTCVideoEncoderVP9) isSupported]) {
+ return [RTC_OBJC_TYPE(RTCVideoEncoderVP9) vp9Encoder];
+ }
+
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+ if ([info.name isEqualToString:kRTCVideoCodecAv1Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoEncoderAV1) av1Encoder];
+ }
+#endif
+
+ return nil;
+}
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *codecs =
+ [[[self class] supportedCodecs] mutableCopy];
+
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *orderedCodecs = [NSMutableArray array];
+ NSUInteger index = [codecs indexOfObject:self.preferredCodec];
+ if (index != NSNotFound) {
+ [orderedCodecs addObject:[codecs objectAtIndex:index]];
+ [codecs removeObjectAtIndex:index];
+ }
+ [orderedCodecs addObjectsFromArray:codecs];
+
+ return [orderedCodecs copy];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h
new file mode 100644
index 0000000000..dac7bb5610
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+
+RTC_OBJC_EXPORT extern NSString *const kRTCVideoCodecH264Name;
+RTC_OBJC_EXPORT extern NSString *const kRTCLevel31ConstrainedHigh;
+RTC_OBJC_EXPORT extern NSString *const kRTCLevel31ConstrainedBaseline;
+RTC_OBJC_EXPORT extern NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedHigh;
+RTC_OBJC_EXPORT extern NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedBaseline;
+
+/** H264 Profiles and levels. */
+typedef NS_ENUM(NSUInteger, RTCH264Profile) {
+ RTCH264ProfileConstrainedBaseline,
+ RTCH264ProfileBaseline,
+ RTCH264ProfileMain,
+ RTCH264ProfileConstrainedHigh,
+ RTCH264ProfileHigh,
+};
+
+typedef NS_ENUM(NSUInteger, RTCH264Level) {
+ RTCH264Level1_b = 0,
+ RTCH264Level1 = 10,
+ RTCH264Level1_1 = 11,
+ RTCH264Level1_2 = 12,
+ RTCH264Level1_3 = 13,
+ RTCH264Level2 = 20,
+ RTCH264Level2_1 = 21,
+ RTCH264Level2_2 = 22,
+ RTCH264Level3 = 30,
+ RTCH264Level3_1 = 31,
+ RTCH264Level3_2 = 32,
+ RTCH264Level4 = 40,
+ RTCH264Level4_1 = 41,
+ RTCH264Level4_2 = 42,
+ RTCH264Level5 = 50,
+ RTCH264Level5_1 = 51,
+ RTCH264Level5_2 = 52
+};
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCH264ProfileLevelId) : NSObject
+
+@property(nonatomic, readonly) RTCH264Profile profile;
+@property(nonatomic, readonly) RTCH264Level level;
+@property(nonatomic, readonly) NSString *hexString;
+
+- (instancetype)initWithHexString:(NSString *)hexString;
+- (instancetype)initWithProfile:(RTCH264Profile)profile level:(RTCH264Level)level;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm
new file mode 100644
index 0000000000..f0ef3ec232
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#import "RTCH264ProfileLevelId.h"
+
+#import "helpers/NSString+StdString.h"
+#if defined(WEBRTC_IOS)
+#import "UIDevice+H264Profile.h"
+#endif
+
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "media/base/media_constants.h"
+
+namespace {
+
+NSString *MaxSupportedProfileLevelConstrainedHigh();
+NSString *MaxSupportedProfileLevelConstrainedBaseline();
+
+} // namespace
+
+NSString *const kRTCVideoCodecH264Name = @(cricket::kH264CodecName);
+NSString *const kRTCLevel31ConstrainedHigh = @"640c1f";
+NSString *const kRTCLevel31ConstrainedBaseline = @"42e01f";
+NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedHigh =
+ MaxSupportedProfileLevelConstrainedHigh();
+NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedBaseline =
+ MaxSupportedProfileLevelConstrainedBaseline();
+
+namespace {
+
+#if defined(WEBRTC_IOS)
+
+NSString *MaxSupportedLevelForProfile(webrtc::H264Profile profile) {
+ const absl::optional<webrtc::H264ProfileLevelId> profileLevelId =
+ [UIDevice maxSupportedH264Profile];
+ if (profileLevelId && profileLevelId->profile >= profile) {
+ const absl::optional<std::string> profileString =
+ H264ProfileLevelIdToString(webrtc::H264ProfileLevelId(profile, profileLevelId->level));
+ if (profileString) {
+ return [NSString stringForStdString:*profileString];
+ }
+ }
+ return nil;
+}
+#endif
+
+NSString *MaxSupportedProfileLevelConstrainedBaseline() {
+#if defined(WEBRTC_IOS)
+ NSString *profile = MaxSupportedLevelForProfile(webrtc::H264Profile::kProfileConstrainedBaseline);
+ if (profile != nil) {
+ return profile;
+ }
+#endif
+ return kRTCLevel31ConstrainedBaseline;
+}
+
+NSString *MaxSupportedProfileLevelConstrainedHigh() {
+#if defined(WEBRTC_IOS)
+ NSString *profile = MaxSupportedLevelForProfile(webrtc::H264Profile::kProfileConstrainedHigh);
+ if (profile != nil) {
+ return profile;
+ }
+#endif
+ return kRTCLevel31ConstrainedHigh;
+}
+
+} // namespace
+
+@interface RTC_OBJC_TYPE (RTCH264ProfileLevelId)
+()
+
+ @property(nonatomic, assign) RTCH264Profile profile;
+@property(nonatomic, assign) RTCH264Level level;
+@property(nonatomic, strong) NSString *hexString;
+
+@end
+
+@implementation RTC_OBJC_TYPE (RTCH264ProfileLevelId)
+
+@synthesize profile = _profile;
+@synthesize level = _level;
+@synthesize hexString = _hexString;
+
+- (instancetype)initWithHexString:(NSString *)hexString {
+ if (self = [super init]) {
+ self.hexString = hexString;
+
+ absl::optional<webrtc::H264ProfileLevelId> profile_level_id =
+ webrtc::ParseH264ProfileLevelId([hexString cStringUsingEncoding:NSUTF8StringEncoding]);
+ if (profile_level_id.has_value()) {
+ self.profile = static_cast<RTCH264Profile>(profile_level_id->profile);
+ self.level = static_cast<RTCH264Level>(profile_level_id->level);
+ }
+ }
+ return self;
+}
+
+- (instancetype)initWithProfile:(RTCH264Profile)profile level:(RTCH264Level)level {
+ if (self = [super init]) {
+ self.profile = profile;
+ self.level = level;
+
+ absl::optional<std::string> hex_string =
+ webrtc::H264ProfileLevelIdToString(webrtc::H264ProfileLevelId(
+ static_cast<webrtc::H264Profile>(profile), static_cast<webrtc::H264Level>(level)));
+ self.hexString =
+ [NSString stringWithCString:hex_string.value_or("").c_str() encoding:NSUTF8StringEncoding];
+ }
+ return self;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h
new file mode 100644
index 0000000000..88bacbbdfe
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoDecoderFactory.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoDecoderFactoryH264) : NSObject <RTC_OBJC_TYPE(RTCVideoDecoderFactory)>
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m
new file mode 100644
index 0000000000..bdae19d687
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCVideoDecoderFactoryH264.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoDecoderH264.h"
+
+@implementation RTC_OBJC_TYPE (RTCVideoDecoderFactoryH264)
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *codecs = [NSMutableArray array];
+ NSString *codecName = kRTCVideoCodecH264Name;
+
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedHighParams];
+ [codecs addObject:constrainedHighInfo];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedBaselineParams];
+ [codecs addObject:constrainedBaselineInfo];
+
+ return [codecs copy];
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoDecoder)>)createDecoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ return [[RTC_OBJC_TYPE(RTCVideoDecoderH264) alloc] init];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h
new file mode 100644
index 0000000000..a12e4212a7
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoDecoder.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoDecoderH264) : NSObject <RTC_OBJC_TYPE(RTCVideoDecoder)>
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm
new file mode 100644
index 0000000000..09e642bc37
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#import "RTCVideoDecoderH264.h"
+
+#import <VideoToolbox/VideoToolbox.h>
+
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+#import "helpers.h"
+#import "helpers/scoped_cftyperef.h"
+
+#if defined(WEBRTC_IOS)
+#import "helpers/UIDevice+RTCDevice.h"
+#endif
+
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "sdk/objc/components/video_codec/nalu_rewriter.h"
+
+// Struct that we pass to the decoder per frame to decode. We receive it again
+// in the decoder callback.
+struct RTCFrameDecodeParams {
+ RTCFrameDecodeParams(RTCVideoDecoderCallback cb, int64_t ts) : callback(cb), timestamp(ts) {}
+ RTCVideoDecoderCallback callback;
+ int64_t timestamp;
+};
+
+@interface RTC_OBJC_TYPE (RTCVideoDecoderH264)
+() - (void)setError : (OSStatus)error;
+@end
+
+// This is the callback function that VideoToolbox calls when decode is
+// complete.
+void decompressionOutputCallback(void *decoderRef,
+ void *params,
+ OSStatus status,
+ VTDecodeInfoFlags infoFlags,
+ CVImageBufferRef imageBuffer,
+ CMTime timestamp,
+ CMTime duration) {
+ std::unique_ptr<RTCFrameDecodeParams> decodeParams(
+ reinterpret_cast<RTCFrameDecodeParams *>(params));
+ if (status != noErr) {
+ RTC_OBJC_TYPE(RTCVideoDecoderH264) *decoder =
+ (__bridge RTC_OBJC_TYPE(RTCVideoDecoderH264) *)decoderRef;
+ [decoder setError:status];
+ RTC_LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
+ return;
+ }
+ // TODO(tkchin): Handle CVO properly.
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *frameBuffer =
+ [[RTC_OBJC_TYPE(RTCCVPixelBuffer) alloc] initWithPixelBuffer:imageBuffer];
+ RTC_OBJC_TYPE(RTCVideoFrame) *decodedFrame = [[RTC_OBJC_TYPE(RTCVideoFrame) alloc]
+ initWithBuffer:frameBuffer
+ rotation:RTCVideoRotation_0
+ timeStampNs:CMTimeGetSeconds(timestamp) * rtc::kNumNanosecsPerSec];
+ decodedFrame.timeStamp = decodeParams->timestamp;
+ decodeParams->callback(decodedFrame);
+}
+
+// Decoder.
+@implementation RTC_OBJC_TYPE (RTCVideoDecoderH264) {
+ CMVideoFormatDescriptionRef _videoFormat;
+ CMMemoryPoolRef _memoryPool;
+ VTDecompressionSessionRef _decompressionSession;
+ RTCVideoDecoderCallback _callback;
+ OSStatus _error;
+}
+
+- (instancetype)init {
+ self = [super init];
+ if (self) {
+ _memoryPool = CMMemoryPoolCreate(nil);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ CMMemoryPoolInvalidate(_memoryPool);
+ CFRelease(_memoryPool);
+ [self destroyDecompressionSession];
+ [self setVideoFormat:nullptr];
+}
+
+- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)inputImage
+ missingFrames:(BOOL)missingFrames
+ codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info
+ renderTimeMs:(int64_t)renderTimeMs {
+ RTC_DCHECK(inputImage.buffer);
+
+ if (_error != noErr) {
+ RTC_LOG(LS_WARNING) << "Last frame decode failed.";
+ _error = noErr;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::ScopedCFTypeRef<CMVideoFormatDescriptionRef> inputFormat =
+ rtc::ScopedCF(webrtc::CreateVideoFormatDescription((uint8_t *)inputImage.buffer.bytes,
+ inputImage.buffer.length));
+ if (inputFormat) {
+ // Check if the video format has changed, and reinitialize decoder if
+ // needed.
+ if (!CMFormatDescriptionEqual(inputFormat.get(), _videoFormat)) {
+ [self setVideoFormat:inputFormat.get()];
+ int resetDecompressionSessionError = [self resetDecompressionSession];
+ if (resetDecompressionSessionError != WEBRTC_VIDEO_CODEC_OK) {
+ return resetDecompressionSessionError;
+ }
+ }
+ }
+ if (!_videoFormat) {
+ // We received a frame but we don't have format information so we can't
+ // decode it.
+ // This can happen after backgrounding. We need to wait for the next
+ // sps/pps before we can resume so we request a keyframe by returning an
+ // error.
+ RTC_LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ CMSampleBufferRef sampleBuffer = nullptr;
+ if (!webrtc::H264AnnexBBufferToCMSampleBuffer((uint8_t *)inputImage.buffer.bytes,
+ inputImage.buffer.length,
+ _videoFormat,
+ &sampleBuffer,
+ _memoryPool)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(sampleBuffer);
+ VTDecodeFrameFlags decodeFlags = kVTDecodeFrame_EnableAsynchronousDecompression;
+ std::unique_ptr<RTCFrameDecodeParams> frameDecodeParams;
+ frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
+ OSStatus status = VTDecompressionSessionDecodeFrame(
+ _decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
+#if defined(WEBRTC_IOS)
+ // Re-initialize the decoder if we have an invalid session while the app is
+ // active or decoder malfunctions and retry the decode request.
+ if ((status == kVTInvalidSessionErr || status == kVTVideoDecoderMalfunctionErr) &&
+ [self resetDecompressionSession] == WEBRTC_VIDEO_CODEC_OK) {
+ RTC_LOG(LS_INFO) << "Failed to decode frame with code: " << status
+ << " retrying decode after decompression session reset";
+ frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
+ status = VTDecompressionSessionDecodeFrame(
+ _decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
+ }
+#endif
+ CFRelease(sampleBuffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)setCallback:(RTCVideoDecoderCallback)callback {
+ _callback = callback;
+}
+
+- (void)setError:(OSStatus)error {
+ _error = error;
+}
+
+- (NSInteger)releaseDecoder {
+ // Need to invalidate the session so that callbacks no longer occur and it
+ // is safe to null out the callback.
+ [self destroyDecompressionSession];
+ [self setVideoFormat:nullptr];
+ _callback = nullptr;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+#pragma mark - Private
+
+- (int)resetDecompressionSession {
+ [self destroyDecompressionSession];
+
+ // Need to wait for the first SPS to initialize decoder.
+ if (!_videoFormat) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ // Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
+ // create pixel buffers with GPU backed memory. The intent here is to pass
+ // the pixel buffers directly so we avoid a texture upload later during
+ // rendering. This currently is moot because we are converting back to an
+ // I420 frame after decode, but eventually we will be able to plumb
+ // CVPixelBuffers directly to the renderer.
+ // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
+ // we can pass CVPixelBuffers as native handles in decoder output.
+ NSDictionary *attributes = @{
+#if defined(WEBRTC_IOS) && (TARGET_OS_MACCATALYST || TARGET_OS_SIMULATOR)
+ (NSString *)kCVPixelBufferMetalCompatibilityKey : @(YES),
+#elif defined(WEBRTC_IOS)
+ (NSString *)kCVPixelBufferOpenGLESCompatibilityKey : @(YES),
+#elif defined(WEBRTC_MAC) && !defined(WEBRTC_ARCH_ARM64)
+ (NSString *)kCVPixelBufferOpenGLCompatibilityKey : @(YES),
+#endif
+#if !(TARGET_OS_SIMULATOR)
+ (NSString *)kCVPixelBufferIOSurfacePropertiesKey : @{},
+#endif
+ (NSString *)
+ kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange),
+ };
+
+ VTDecompressionOutputCallbackRecord record = {
+ decompressionOutputCallback, (__bridge void *)self,
+ };
+ OSStatus status = VTDecompressionSessionCreate(nullptr,
+ _videoFormat,
+ nullptr,
+ (__bridge CFDictionaryRef)attributes,
+ &record,
+ &_decompressionSession);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create decompression session: " << status;
+ [self destroyDecompressionSession];
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ [self configureDecompressionSession];
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)configureDecompressionSession {
+ RTC_DCHECK(_decompressionSession);
+#if defined(WEBRTC_IOS)
+ VTSessionSetProperty(_decompressionSession, kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
+#endif
+}
+
+- (void)destroyDecompressionSession {
+ if (_decompressionSession) {
+#if defined(WEBRTC_IOS)
+ if ([UIDevice isIOS11OrLater]) {
+ VTDecompressionSessionWaitForAsynchronousFrames(_decompressionSession);
+ }
+#endif
+ VTDecompressionSessionInvalidate(_decompressionSession);
+ CFRelease(_decompressionSession);
+ _decompressionSession = nullptr;
+ }
+}
+
+- (void)setVideoFormat:(CMVideoFormatDescriptionRef)videoFormat {
+ if (_videoFormat == videoFormat) {
+ return;
+ }
+ if (_videoFormat) {
+ CFRelease(_videoFormat);
+ }
+ _videoFormat = videoFormat;
+ if (_videoFormat) {
+ CFRetain(_videoFormat);
+ }
+}
+
+- (NSString *)implementationName {
+ return @"VideoToolbox";
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h
new file mode 100644
index 0000000000..45fc4be2ea
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoEncoderFactory.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoEncoderFactoryH264) : NSObject <RTC_OBJC_TYPE(RTCVideoEncoderFactory)>
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m
new file mode 100644
index 0000000000..9843849307
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCVideoEncoderFactoryH264.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoEncoderH264.h"
+
+@implementation RTC_OBJC_TYPE (RTCVideoEncoderFactoryH264)
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *codecs = [NSMutableArray array];
+ NSString *codecName = kRTCVideoCodecH264Name;
+
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedHighParams];
+ [codecs addObject:constrainedHighInfo];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedBaselineParams];
+ [codecs addObject:constrainedBaselineInfo];
+
+ return [codecs copy];
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoEncoder)>)createEncoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ return [[RTC_OBJC_TYPE(RTCVideoEncoderH264) alloc] initWithCodecInfo:info];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h
new file mode 100644
index 0000000000..9f4f4c7c8d
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoCodecInfo.h"
+#import "RTCVideoEncoder.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoEncoderH264) : NSObject <RTC_OBJC_TYPE(RTCVideoEncoder)>
+
+- (instancetype)initWithCodecInfo:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)codecInfo;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm
new file mode 100644
index 0000000000..2160d79ae5
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm
@@ -0,0 +1,828 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#import "RTCVideoEncoderH264.h"
+
+#import <VideoToolbox/VideoToolbox.h>
+#include <vector>
+
+#if defined(WEBRTC_IOS)
+#import "helpers/UIDevice+RTCDevice.h"
+#endif
+#import "RTCCodecSpecificInfoH264.h"
+#import "RTCH264ProfileLevelId.h"
+#import "api/peerconnection/RTCVideoCodecInfo+Private.h"
+#import "base/RTCCodecSpecificInfo.h"
+#import "base/RTCI420Buffer.h"
+#import "base/RTCVideoEncoder.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+#import "helpers.h"
+
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "common_video/include/bitrate_adjuster.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "sdk/objc/components/video_codec/nalu_rewriter.h"
+#include "third_party/libyuv/include/libyuv/convert_from.h"
+
+@interface RTC_OBJC_TYPE (RTCVideoEncoderH264)
+()
+
+ - (void)frameWasEncoded : (OSStatus)status flags : (VTEncodeInfoFlags)infoFlags sampleBuffer
+ : (CMSampleBufferRef)sampleBuffer codecSpecificInfo
+ : (id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)codecSpecificInfo width : (int32_t)width height
+ : (int32_t)height renderTimeMs : (int64_t)renderTimeMs timestamp : (uint32_t)timestamp rotation
+ : (RTCVideoRotation)rotation;
+
+@end
+
+namespace { // anonymous namespace
+
+// The ratio between kVTCompressionPropertyKey_DataRateLimits and
+// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher
+// than the average bit rate to avoid undershooting the target.
+const float kLimitToAverageBitRateFactor = 1.5f;
+// These thresholds deviate from the default h264 QP thresholds, as they
+// have been found to work better on devices that support VideoToolbox
+const int kLowH264QpThreshold = 28;
+const int kHighH264QpThreshold = 39;
+
+const OSType kNV12PixelFormat = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
+
+// Struct that we pass to the encoder per frame to encode. We receive it again
+// in the encoder callback.
+struct RTCFrameEncodeParams {
+ RTCFrameEncodeParams(RTC_OBJC_TYPE(RTCVideoEncoderH264) * e,
+ RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) * csi,
+ int32_t w,
+ int32_t h,
+ int64_t rtms,
+ uint32_t ts,
+ RTCVideoRotation r)
+ : encoder(e), width(w), height(h), render_time_ms(rtms), timestamp(ts), rotation(r) {
+ if (csi) {
+ codecSpecificInfo = csi;
+ } else {
+ codecSpecificInfo = [[RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) alloc] init];
+ }
+ }
+
+ RTC_OBJC_TYPE(RTCVideoEncoderH264) * encoder;
+ RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) * codecSpecificInfo;
+ int32_t width;
+ int32_t height;
+ int64_t render_time_ms;
+ uint32_t timestamp;
+ RTCVideoRotation rotation;
+};
+
+// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
+// encoder. This performs the copy and format conversion.
+// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
+bool CopyVideoFrameToNV12PixelBuffer(id<RTC_OBJC_TYPE(RTCI420Buffer)> frameBuffer,
+ CVPixelBufferRef pixelBuffer) {
+ RTC_DCHECK(pixelBuffer);
+ RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixelBuffer), kNV12PixelFormat);
+ RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixelBuffer, 0), frameBuffer.height);
+ RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixelBuffer, 0), frameBuffer.width);
+
+ CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
+ if (cvRet != kCVReturnSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
+ return false;
+ }
+ uint8_t *dstY = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
+ int dstStrideY = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
+ uint8_t *dstUV = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1));
+ int dstStrideUV = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
+ // Convert I420 to NV12.
+ int ret = libyuv::I420ToNV12(frameBuffer.dataY,
+ frameBuffer.strideY,
+ frameBuffer.dataU,
+ frameBuffer.strideU,
+ frameBuffer.dataV,
+ frameBuffer.strideV,
+ dstY,
+ dstStrideY,
+ dstUV,
+ dstStrideUV,
+ frameBuffer.width,
+ frameBuffer.height);
+ CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
+ if (ret) {
+ RTC_LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
+ return false;
+ }
+ return true;
+}
+
+CVPixelBufferRef CreatePixelBuffer(VTCompressionSessionRef compression_session) {
+ if (!compression_session) {
+ RTC_LOG(LS_ERROR) << "Failed to get compression session.";
+ return nullptr;
+ }
+ CVPixelBufferPoolRef pixel_buffer_pool =
+ VTCompressionSessionGetPixelBufferPool(compression_session);
+
+ if (!pixel_buffer_pool) {
+ RTC_LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
+ return nullptr;
+ }
+ CVPixelBufferRef pixel_buffer;
+ CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool, &pixel_buffer);
+ if (ret != kCVReturnSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
+ // We probably want to drop frames here, since failure probably means
+ // that the pool is empty.
+ return nullptr;
+ }
+ return pixel_buffer;
+}
+
+// This is the callback function that VideoToolbox calls when encode is
+// complete. From inspection this happens on its own queue.
+void compressionOutputCallback(void *encoder,
+ void *params,
+ OSStatus status,
+ VTEncodeInfoFlags infoFlags,
+ CMSampleBufferRef sampleBuffer) {
+ if (!params) {
+ // If there are pending callbacks when the encoder is destroyed, this can happen.
+ return;
+ }
+ std::unique_ptr<RTCFrameEncodeParams> encodeParams(
+ reinterpret_cast<RTCFrameEncodeParams *>(params));
+ [encodeParams->encoder frameWasEncoded:status
+ flags:infoFlags
+ sampleBuffer:sampleBuffer
+ codecSpecificInfo:encodeParams->codecSpecificInfo
+ width:encodeParams->width
+ height:encodeParams->height
+ renderTimeMs:encodeParams->render_time_ms
+ timestamp:encodeParams->timestamp
+ rotation:encodeParams->rotation];
+}
+
+// Extract VideoToolbox profile out of the webrtc::SdpVideoFormat. If there is
+// no specific VideoToolbox profile for the specified level, AutoLevel will be
+// returned. The user must initialize the encoder with a resolution and
+// framerate conforming to the selected H264 level regardless.
+CFStringRef ExtractProfile(const webrtc::H264ProfileLevelId &profile_level_id) {
+ switch (profile_level_id.profile) {
+ case webrtc::H264Profile::kProfileConstrainedBaseline:
+ case webrtc::H264Profile::kProfileBaseline:
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return kVTProfileLevel_H264_Baseline_3_0;
+ case webrtc::H264Level::kLevel3_1:
+ return kVTProfileLevel_H264_Baseline_3_1;
+ case webrtc::H264Level::kLevel3_2:
+ return kVTProfileLevel_H264_Baseline_3_2;
+ case webrtc::H264Level::kLevel4:
+ return kVTProfileLevel_H264_Baseline_4_0;
+ case webrtc::H264Level::kLevel4_1:
+ return kVTProfileLevel_H264_Baseline_4_1;
+ case webrtc::H264Level::kLevel4_2:
+ return kVTProfileLevel_H264_Baseline_4_2;
+ case webrtc::H264Level::kLevel5:
+ return kVTProfileLevel_H264_Baseline_5_0;
+ case webrtc::H264Level::kLevel5_1:
+ return kVTProfileLevel_H264_Baseline_5_1;
+ case webrtc::H264Level::kLevel5_2:
+ return kVTProfileLevel_H264_Baseline_5_2;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ return kVTProfileLevel_H264_Baseline_AutoLevel;
+ }
+
+ case webrtc::H264Profile::kProfileMain:
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return kVTProfileLevel_H264_Main_3_0;
+ case webrtc::H264Level::kLevel3_1:
+ return kVTProfileLevel_H264_Main_3_1;
+ case webrtc::H264Level::kLevel3_2:
+ return kVTProfileLevel_H264_Main_3_2;
+ case webrtc::H264Level::kLevel4:
+ return kVTProfileLevel_H264_Main_4_0;
+ case webrtc::H264Level::kLevel4_1:
+ return kVTProfileLevel_H264_Main_4_1;
+ case webrtc::H264Level::kLevel4_2:
+ return kVTProfileLevel_H264_Main_4_2;
+ case webrtc::H264Level::kLevel5:
+ return kVTProfileLevel_H264_Main_5_0;
+ case webrtc::H264Level::kLevel5_1:
+ return kVTProfileLevel_H264_Main_5_1;
+ case webrtc::H264Level::kLevel5_2:
+ return kVTProfileLevel_H264_Main_5_2;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ return kVTProfileLevel_H264_Main_AutoLevel;
+ }
+
+ case webrtc::H264Profile::kProfileConstrainedHigh:
+ case webrtc::H264Profile::kProfileHigh:
+ case webrtc::H264Profile::kProfilePredictiveHigh444:
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return kVTProfileLevel_H264_High_3_0;
+ case webrtc::H264Level::kLevel3_1:
+ return kVTProfileLevel_H264_High_3_1;
+ case webrtc::H264Level::kLevel3_2:
+ return kVTProfileLevel_H264_High_3_2;
+ case webrtc::H264Level::kLevel4:
+ return kVTProfileLevel_H264_High_4_0;
+ case webrtc::H264Level::kLevel4_1:
+ return kVTProfileLevel_H264_High_4_1;
+ case webrtc::H264Level::kLevel4_2:
+ return kVTProfileLevel_H264_High_4_2;
+ case webrtc::H264Level::kLevel5:
+ return kVTProfileLevel_H264_High_5_0;
+ case webrtc::H264Level::kLevel5_1:
+ return kVTProfileLevel_H264_High_5_1;
+ case webrtc::H264Level::kLevel5_2:
+ return kVTProfileLevel_H264_High_5_2;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ return kVTProfileLevel_H264_High_AutoLevel;
+ }
+ }
+}
+
+// The function returns the max allowed sample rate (pixels per second) that
+// can be processed by given encoder with `profile_level_id`.
+// See https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-201610-S!!PDF-E&type=items
+// for details.
+NSUInteger GetMaxSampleRate(const webrtc::H264ProfileLevelId &profile_level_id) {
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return 10368000;
+ case webrtc::H264Level::kLevel3_1:
+ return 27648000;
+ case webrtc::H264Level::kLevel3_2:
+ return 55296000;
+ case webrtc::H264Level::kLevel4:
+ case webrtc::H264Level::kLevel4_1:
+ return 62914560;
+ case webrtc::H264Level::kLevel4_2:
+ return 133693440;
+ case webrtc::H264Level::kLevel5:
+ return 150994944;
+ case webrtc::H264Level::kLevel5_1:
+ return 251658240;
+ case webrtc::H264Level::kLevel5_2:
+ return 530841600;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ // Zero means auto rate setting.
+ return 0;
+ }
+}
+} // namespace
+
+@implementation RTC_OBJC_TYPE (RTCVideoEncoderH264) {
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) * _codecInfo;
+ std::unique_ptr<webrtc::BitrateAdjuster> _bitrateAdjuster;
+ uint32_t _targetBitrateBps;
+ uint32_t _encoderBitrateBps;
+ uint32_t _encoderFrameRate;
+ uint32_t _maxAllowedFrameRate;
+ RTCH264PacketizationMode _packetizationMode;
+ absl::optional<webrtc::H264ProfileLevelId> _profile_level_id;
+ RTCVideoEncoderCallback _callback;
+ int32_t _width;
+ int32_t _height;
+ VTCompressionSessionRef _compressionSession;
+ RTCVideoCodecMode _mode;
+
+ webrtc::H264BitstreamParser _h264BitstreamParser;
+ std::vector<uint8_t> _frameScaleBuffer;
+}
+
+// .5 is set as a mininum to prevent overcompensating for large temporary
+// overshoots. We don't want to degrade video quality too badly.
+// .95 is set to prevent oscillations. When a lower bitrate is set on the
+// encoder than previously set, its output seems to have a brief period of
+// drastically reduced bitrate, so we want to avoid that. In steady state
+// conditions, 0.95 seems to give us better overall bitrate over long periods
+// of time.
+- (instancetype)initWithCodecInfo:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)codecInfo {
+ if (self = [super init]) {
+ _codecInfo = codecInfo;
+ _bitrateAdjuster.reset(new webrtc::BitrateAdjuster(.5, .95));
+ _packetizationMode = RTCH264PacketizationModeNonInterleaved;
+ _profile_level_id =
+ webrtc::ParseSdpForH264ProfileLevelId([codecInfo nativeSdpVideoFormat].parameters);
+ RTC_DCHECK(_profile_level_id);
+ RTC_LOG(LS_INFO) << "Using profile " << CFStringToString(ExtractProfile(*_profile_level_id));
+ RTC_CHECK([codecInfo.name isEqualToString:kRTCVideoCodecH264Name]);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [self destroyCompressionSession];
+}
+
+- (NSInteger)startEncodeWithSettings:(RTC_OBJC_TYPE(RTCVideoEncoderSettings) *)settings
+ numberOfCores:(int)numberOfCores {
+ RTC_DCHECK(settings);
+ RTC_DCHECK([settings.name isEqualToString:kRTCVideoCodecH264Name]);
+
+ _width = settings.width;
+ _height = settings.height;
+ _mode = settings.mode;
+
+ uint32_t aligned_width = (((_width + 15) >> 4) << 4);
+ uint32_t aligned_height = (((_height + 15) >> 4) << 4);
+ _maxAllowedFrameRate = static_cast<uint32_t>(GetMaxSampleRate(*_profile_level_id) /
+ (aligned_width * aligned_height));
+
+ // We can only set average bitrate on the HW encoder.
+ _targetBitrateBps = settings.startBitrate * 1000; // startBitrate is in kbps.
+ _bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
+ _encoderFrameRate = MIN(settings.maxFramerate, _maxAllowedFrameRate);
+ if (settings.maxFramerate > _maxAllowedFrameRate && _maxAllowedFrameRate > 0) {
+ RTC_LOG(LS_WARNING) << "Initial encoder frame rate setting " << settings.maxFramerate
+ << " is larger than the "
+ << "maximal allowed frame rate " << _maxAllowedFrameRate << ".";
+ }
+
+ // TODO(tkchin): Try setting payload size via
+ // kVTCompressionPropertyKey_MaxH264SliceBytes.
+
+ return [self resetCompressionSessionWithPixelFormat:kNV12PixelFormat];
+}
+
+- (NSInteger)encode:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame
+ codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)codecSpecificInfo
+ frameTypes:(NSArray<NSNumber *> *)frameTypes {
+ if (!_callback || !_compressionSession) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ BOOL isKeyframeRequired = NO;
+
+ // Get a pixel buffer from the pool and copy frame data over.
+ if ([self resetCompressionSessionIfNeededWithFrame:frame]) {
+ isKeyframeRequired = YES;
+ }
+
+ CVPixelBufferRef pixelBuffer = nullptr;
+ if ([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
+ // Native frame buffer
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
+ (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ if (![rtcPixelBuffer requiresCropping]) {
+ // This pixel buffer might have a higher resolution than what the
+ // compression session is configured to. The compression session can
+ // handle that and will output encoded frames in the configured
+ // resolution regardless of the input pixel buffer resolution.
+ pixelBuffer = rtcPixelBuffer.pixelBuffer;
+ CVBufferRetain(pixelBuffer);
+ } else {
+ // Cropping required, we need to crop and scale to a new pixel buffer.
+ pixelBuffer = CreatePixelBuffer(_compressionSession);
+ if (!pixelBuffer) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ int dstWidth = CVPixelBufferGetWidth(pixelBuffer);
+ int dstHeight = CVPixelBufferGetHeight(pixelBuffer);
+ if ([rtcPixelBuffer requiresScalingToWidth:dstWidth height:dstHeight]) {
+ int size =
+ [rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:dstWidth height:dstHeight];
+ _frameScaleBuffer.resize(size);
+ } else {
+ _frameScaleBuffer.clear();
+ }
+ _frameScaleBuffer.shrink_to_fit();
+ if (![rtcPixelBuffer cropAndScaleTo:pixelBuffer withTempBuffer:_frameScaleBuffer.data()]) {
+ CVBufferRelease(pixelBuffer);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+ }
+
+ if (!pixelBuffer) {
+ // We did not have a native frame buffer
+ RTC_DCHECK_EQ(frame.width, _width);
+ RTC_DCHECK_EQ(frame.height, _height);
+
+ pixelBuffer = CreatePixelBuffer(_compressionSession);
+ if (!pixelBuffer) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(pixelBuffer);
+ if (!CopyVideoFrameToNV12PixelBuffer([frame.buffer toI420], pixelBuffer)) {
+ RTC_LOG(LS_ERROR) << "Failed to copy frame data.";
+ CVBufferRelease(pixelBuffer);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ // Check if we need a keyframe.
+ if (!isKeyframeRequired && frameTypes) {
+ for (NSNumber *frameType in frameTypes) {
+ if ((RTCFrameType)frameType.intValue == RTCFrameTypeVideoFrameKey) {
+ isKeyframeRequired = YES;
+ break;
+ }
+ }
+ }
+
+ CMTime presentationTimeStamp = CMTimeMake(frame.timeStampNs / rtc::kNumNanosecsPerMillisec, 1000);
+ CFDictionaryRef frameProperties = nullptr;
+ if (isKeyframeRequired) {
+ CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
+ CFTypeRef values[] = {kCFBooleanTrue};
+ frameProperties = CreateCFTypeDictionary(keys, values, 1);
+ }
+
+ std::unique_ptr<RTCFrameEncodeParams> encodeParams;
+ encodeParams.reset(new RTCFrameEncodeParams(self,
+ codecSpecificInfo,
+ _width,
+ _height,
+ frame.timeStampNs / rtc::kNumNanosecsPerMillisec,
+ frame.timeStamp,
+ frame.rotation));
+ encodeParams->codecSpecificInfo.packetizationMode = _packetizationMode;
+
+ // Update the bitrate if needed.
+ [self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps() frameRate:_encoderFrameRate];
+
+ OSStatus status = VTCompressionSessionEncodeFrame(_compressionSession,
+ pixelBuffer,
+ presentationTimeStamp,
+ kCMTimeInvalid,
+ frameProperties,
+ encodeParams.release(),
+ nullptr);
+ if (frameProperties) {
+ CFRelease(frameProperties);
+ }
+ if (pixelBuffer) {
+ CVBufferRelease(pixelBuffer);
+ }
+
+ if (status == kVTInvalidSessionErr) {
+ // This error occurs when entering foreground after backgrounding the app.
+ RTC_LOG(LS_ERROR) << "Invalid compression session, resetting.";
+ [self resetCompressionSessionWithPixelFormat:[self pixelFormatOfFrame:frame]];
+
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ } else if (status == kVTVideoEncoderMalfunctionErr) {
+ // Sometimes the encoder malfunctions and needs to be restarted.
+ RTC_LOG(LS_ERROR)
+ << "Encountered video encoder malfunction error. Resetting compression session.";
+ [self resetCompressionSessionWithPixelFormat:[self pixelFormatOfFrame:frame]];
+
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ } else if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)setCallback:(RTCVideoEncoderCallback)callback {
+ _callback = callback;
+}
+
+- (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate {
+ _targetBitrateBps = 1000 * bitrateKbit;
+ _bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
+ if (framerate > _maxAllowedFrameRate && _maxAllowedFrameRate > 0) {
+ RTC_LOG(LS_WARNING) << "Encoder frame rate setting " << framerate << " is larger than the "
+ << "maximal allowed frame rate " << _maxAllowedFrameRate << ".";
+ }
+ framerate = MIN(framerate, _maxAllowedFrameRate);
+ [self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps() frameRate:framerate];
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (NSInteger)resolutionAlignment {
+ return 1;
+}
+
+- (BOOL)applyAlignmentToAllSimulcastLayers {
+ return NO;
+}
+
+- (BOOL)supportsNativeHandle {
+ return YES;
+}
+
+#pragma mark - Private
+
+- (NSInteger)releaseEncoder {
+ // Need to destroy so that the session is invalidated and won't use the
+ // callback anymore. Do not remove callback until the session is invalidated
+ // since async encoder callbacks can occur until invalidation.
+ [self destroyCompressionSession];
+ _callback = nullptr;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (OSType)pixelFormatOfFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ // Use NV12 for non-native frames.
+ if ([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
+ (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ return CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer);
+ }
+
+ return kNV12PixelFormat;
+}
+
+- (BOOL)resetCompressionSessionIfNeededWithFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ BOOL resetCompressionSession = NO;
+
+ // If we're capturing native frames in another pixel format than the compression session is
+ // configured with, make sure the compression session is reset using the correct pixel format.
+ OSType framePixelFormat = [self pixelFormatOfFrame:frame];
+
+ if (_compressionSession) {
+ // The pool attribute `kCVPixelBufferPixelFormatTypeKey` can contain either an array of pixel
+ // formats or a single pixel format.
+
+ CVPixelBufferPoolRef pixelBufferPool =
+ VTCompressionSessionGetPixelBufferPool(_compressionSession);
+ if (!pixelBufferPool) {
+ return NO;
+ }
+
+ NSDictionary *poolAttributes =
+ (__bridge NSDictionary *)CVPixelBufferPoolGetPixelBufferAttributes(pixelBufferPool);
+ id pixelFormats =
+ [poolAttributes objectForKey:(__bridge NSString *)kCVPixelBufferPixelFormatTypeKey];
+ NSArray<NSNumber *> *compressionSessionPixelFormats = nil;
+ if ([pixelFormats isKindOfClass:[NSArray class]]) {
+ compressionSessionPixelFormats = (NSArray *)pixelFormats;
+ } else if ([pixelFormats isKindOfClass:[NSNumber class]]) {
+ compressionSessionPixelFormats = @[ (NSNumber *)pixelFormats ];
+ }
+
+ if (![compressionSessionPixelFormats
+ containsObject:[NSNumber numberWithLong:framePixelFormat]]) {
+ resetCompressionSession = YES;
+ RTC_LOG(LS_INFO) << "Resetting compression session due to non-matching pixel format.";
+ }
+ } else {
+ resetCompressionSession = YES;
+ }
+
+ if (resetCompressionSession) {
+ [self resetCompressionSessionWithPixelFormat:framePixelFormat];
+ }
+ return resetCompressionSession;
+}
+
+- (int)resetCompressionSessionWithPixelFormat:(OSType)framePixelFormat {
+ [self destroyCompressionSession];
+
+ // Set source image buffer attributes. These attributes will be present on
+ // buffers retrieved from the encoder's pixel buffer pool.
+ NSDictionary *sourceAttributes = @{
+#if defined(WEBRTC_IOS) && (TARGET_OS_MACCATALYST || TARGET_OS_SIMULATOR)
+ (NSString *)kCVPixelBufferMetalCompatibilityKey : @(YES),
+#elif defined(WEBRTC_IOS)
+ (NSString *)kCVPixelBufferOpenGLESCompatibilityKey : @(YES),
+#elif defined(WEBRTC_MAC) && !defined(WEBRTC_ARCH_ARM64)
+ (NSString *)kCVPixelBufferOpenGLCompatibilityKey : @(YES),
+#endif
+ (NSString *)kCVPixelBufferIOSurfacePropertiesKey : @{},
+ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(framePixelFormat),
+ };
+
+ NSDictionary *encoder_specs;
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ // Currently hw accl is supported above 360p on mac, below 360p
+ // the compression session will be created with hw accl disabled.
+ encoder_specs = @{
+ (NSString *)kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder : @(YES),
+ };
+
+#endif
+ OSStatus status = VTCompressionSessionCreate(
+ nullptr, // use default allocator
+ _width,
+ _height,
+ kCMVideoCodecType_H264,
+ (__bridge CFDictionaryRef)encoder_specs, // use hardware accelerated encoder if available
+ (__bridge CFDictionaryRef)sourceAttributes,
+ nullptr, // use default compressed data allocator
+ compressionOutputCallback,
+ nullptr,
+ &_compressionSession);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create compression session: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ CFBooleanRef hwaccl_enabled = nullptr;
+ status = VTSessionCopyProperty(_compressionSession,
+ kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder,
+ nullptr,
+ &hwaccl_enabled);
+ if (status == noErr && (CFBooleanGetValue(hwaccl_enabled))) {
+ RTC_LOG(LS_INFO) << "Compression session created with hw accl enabled";
+ } else {
+ RTC_LOG(LS_INFO) << "Compression session created with hw accl disabled";
+ }
+#endif
+ [self configureCompressionSession];
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)configureCompressionSession {
+ RTC_DCHECK(_compressionSession);
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_RealTime, true);
+ SetVTSessionProperty(_compressionSession,
+ kVTCompressionPropertyKey_ProfileLevel,
+ ExtractProfile(*_profile_level_id));
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AllowFrameReordering, false);
+ [self setEncoderBitrateBps:_targetBitrateBps frameRate:_encoderFrameRate];
+ // TODO(tkchin): Look at entropy mode and colorspace matrices.
+ // TODO(tkchin): Investigate to see if there's any way to make this work.
+ // May need it to interop with Android. Currently this call just fails.
+ // On inspecting encoder output on iOS8, this value is set to 6.
+ // internal::SetVTSessionProperty(compression_session_,
+ // kVTCompressionPropertyKey_MaxFrameDelayCount,
+ // 1);
+
+ // Set a relatively large value for keyframe emission (7200 frames or 4 minutes).
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
+ SetVTSessionProperty(
+ _compressionSession, kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
+}
+
+- (void)destroyCompressionSession {
+ if (_compressionSession) {
+ VTCompressionSessionInvalidate(_compressionSession);
+ CFRelease(_compressionSession);
+ _compressionSession = nullptr;
+ }
+}
+
+- (NSString *)implementationName {
+ return @"VideoToolbox";
+}
+
+- (void)setBitrateBps:(uint32_t)bitrateBps frameRate:(uint32_t)frameRate {
+ if (_encoderBitrateBps != bitrateBps || _encoderFrameRate != frameRate) {
+ [self setEncoderBitrateBps:bitrateBps frameRate:frameRate];
+ }
+}
+
+- (void)setEncoderBitrateBps:(uint32_t)bitrateBps frameRate:(uint32_t)frameRate {
+ if (_compressionSession) {
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
+
+ // With zero `_maxAllowedFrameRate`, we fall back to automatic frame rate detection.
+ if (_maxAllowedFrameRate > 0) {
+ SetVTSessionProperty(
+ _compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, frameRate);
+ }
+
+ // TODO(tkchin): Add a helper method to set array value.
+ int64_t dataLimitBytesPerSecondValue =
+ static_cast<int64_t>(bitrateBps * kLimitToAverageBitRateFactor / 8);
+ CFNumberRef bytesPerSecond =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &dataLimitBytesPerSecondValue);
+ int64_t oneSecondValue = 1;
+ CFNumberRef oneSecond =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &oneSecondValue);
+ const void *nums[2] = {bytesPerSecond, oneSecond};
+ CFArrayRef dataRateLimits = CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks);
+ OSStatus status = VTSessionSetProperty(
+ _compressionSession, kVTCompressionPropertyKey_DataRateLimits, dataRateLimits);
+ if (bytesPerSecond) {
+ CFRelease(bytesPerSecond);
+ }
+ if (oneSecond) {
+ CFRelease(oneSecond);
+ }
+ if (dataRateLimits) {
+ CFRelease(dataRateLimits);
+ }
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to set data rate limit with code: " << status;
+ }
+
+ _encoderBitrateBps = bitrateBps;
+ _encoderFrameRate = frameRate;
+ }
+}
+
+- (void)frameWasEncoded:(OSStatus)status
+ flags:(VTEncodeInfoFlags)infoFlags
+ sampleBuffer:(CMSampleBufferRef)sampleBuffer
+ codecSpecificInfo:(id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)codecSpecificInfo
+ width:(int32_t)width
+ height:(int32_t)height
+ renderTimeMs:(int64_t)renderTimeMs
+ timestamp:(uint32_t)timestamp
+ rotation:(RTCVideoRotation)rotation {
+ RTCVideoEncoderCallback callback = _callback;
+ if (!callback) {
+ return;
+ }
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "H264 encode failed with code: " << status;
+ return;
+ }
+ if (infoFlags & kVTEncodeInfo_FrameDropped) {
+ RTC_LOG(LS_INFO) << "H264 encode dropped frame.";
+ return;
+ }
+
+ BOOL isKeyframe = NO;
+ CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, 0);
+ if (attachments != nullptr && CFArrayGetCount(attachments)) {
+ CFDictionaryRef attachment =
+ static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
+ isKeyframe = !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
+ }
+
+ if (isKeyframe) {
+ RTC_LOG(LS_INFO) << "Generated keyframe";
+ }
+
+ __block std::unique_ptr<rtc::Buffer> buffer = std::make_unique<rtc::Buffer>();
+ if (!webrtc::H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get())) {
+ return;
+ }
+
+ RTC_OBJC_TYPE(RTCEncodedImage) *frame = [[RTC_OBJC_TYPE(RTCEncodedImage) alloc] init];
+ // This assumes ownership of `buffer` and is responsible for freeing it when done.
+ frame.buffer = [[NSData alloc] initWithBytesNoCopy:buffer->data()
+ length:buffer->size()
+ deallocator:^(void *bytes, NSUInteger size) {
+ buffer.reset();
+ }];
+ frame.encodedWidth = width;
+ frame.encodedHeight = height;
+ frame.frameType = isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta;
+ frame.captureTimeMs = renderTimeMs;
+ frame.timeStamp = timestamp;
+ frame.rotation = rotation;
+ frame.contentType = (_mode == RTCVideoCodecModeScreensharing) ? RTCVideoContentTypeScreenshare :
+ RTCVideoContentTypeUnspecified;
+ frame.flags = webrtc::VideoSendTiming::kInvalid;
+
+ _h264BitstreamParser.ParseBitstream(*buffer);
+ frame.qp = @(_h264BitstreamParser.GetLastSliceQp().value_or(-1));
+
+ BOOL res = callback(frame, codecSpecificInfo);
+ if (!res) {
+ RTC_LOG(LS_ERROR) << "Encode callback failed";
+ return;
+ }
+ _bitrateAdjuster->Update(frame.buffer.length);
+}
+
+- (nullable RTC_OBJC_TYPE(RTCVideoEncoderQpThresholds) *)scalingSettings {
+ return [[RTC_OBJC_TYPE(RTCVideoEncoderQpThresholds) alloc]
+ initWithThresholdsLow:kLowH264QpThreshold
+ high:kHighH264QpThreshold];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h
new file mode 100644
index 0000000000..a51debb9fa
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <UIKit/UIKit.h>
+
+#include "api/video_codecs/h264_profile_level_id.h"
+
+@interface UIDevice (H264Profile)
+
++ (absl::optional<webrtc::H264ProfileLevelId>)maxSupportedH264Profile;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm
new file mode 100644
index 0000000000..0ef6a8d77c
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "UIDevice+H264Profile.h"
+#import "helpers/UIDevice+RTCDevice.h"
+
+#include <algorithm>
+
+namespace {
+
+using namespace webrtc;
+
+struct SupportedH264Profile {
+ const RTCDeviceType deviceType;
+ const H264ProfileLevelId profile;
+};
+
+constexpr SupportedH264Profile kH264MaxSupportedProfiles[] = {
+ // iPhones with at least iOS 9
+ {RTCDeviceTypeIPhone13ProMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP848
+ {RTCDeviceTypeIPhone13Pro,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP852
+ {RTCDeviceTypeIPhone13,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP851
+ {RTCDeviceTypeIPhone13Mini,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP847
+ {RTCDeviceTypeIPhoneSE2Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP820
+ {RTCDeviceTypeIPhone12ProMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP832
+ {RTCDeviceTypeIPhone12Pro,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP831
+ {RTCDeviceTypeIPhone12,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP830
+ {RTCDeviceTypeIPhone12Mini,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP829
+ {RTCDeviceTypeIPhone11ProMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP806
+ {RTCDeviceTypeIPhone11Pro,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP805
+ {RTCDeviceTypeIPhone11,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP804
+ {RTCDeviceTypeIPhoneXS,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP779
+ {RTCDeviceTypeIPhoneXSMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP780
+ {RTCDeviceTypeIPhoneXR,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP781
+ {RTCDeviceTypeIPhoneX,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP770
+ {RTCDeviceTypeIPhone8,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP767
+ {RTCDeviceTypeIPhone8Plus,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP768
+ {RTCDeviceTypeIPhone7,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_1}}, // https://support.apple.com/kb/SP743
+ {RTCDeviceTypeIPhone7Plus,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_1}}, // https://support.apple.com/kb/SP744
+ {RTCDeviceTypeIPhoneSE,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP738
+ {RTCDeviceTypeIPhone6S,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP726
+ {RTCDeviceTypeIPhone6SPlus,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP727
+ {RTCDeviceTypeIPhone6,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP705
+ {RTCDeviceTypeIPhone6Plus,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP706
+ {RTCDeviceTypeIPhone5SGSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP685
+ {RTCDeviceTypeIPhone5SGSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP685
+ {RTCDeviceTypeIPhone5GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP655
+ {RTCDeviceTypeIPhone5GSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP655
+ {RTCDeviceTypeIPhone5CGSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP684
+ {RTCDeviceTypeIPhone5CGSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP684
+ {RTCDeviceTypeIPhone4S,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP643
+
+ // iPods with at least iOS 9
+ {RTCDeviceTypeIPodTouch7G,
+ {H264Profile::kProfileMain, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP796
+ {RTCDeviceTypeIPodTouch6G,
+ {H264Profile::kProfileMain, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP720
+ {RTCDeviceTypeIPodTouch5G,
+ {H264Profile::kProfileMain, H264Level::kLevel3_1}}, // https://support.apple.com/kb/SP657
+
+ // iPads with at least iOS 9
+ {RTCDeviceTypeIPadAir4Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP828
+ {RTCDeviceTypeIPad8,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP822
+ {RTCDeviceTypeIPadPro4Gen12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP815
+ {RTCDeviceTypeIPadPro4Gen11Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP814
+ {RTCDeviceTypeIPadAir3Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP787
+ {RTCDeviceTypeIPadMini5Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP788
+ {RTCDeviceTypeIPadPro3Gen12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP785
+ {RTCDeviceTypeIPadPro3Gen11Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP784
+ {RTCDeviceTypeIPad7Gen10Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP807
+ {RTCDeviceTypeIPad2Wifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPad2GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPad2CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPad2Wifi2,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPadMiniWifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661
+ {RTCDeviceTypeIPadMiniGSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661
+ {RTCDeviceTypeIPadMiniGSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661
+ {RTCDeviceTypeIPad3Wifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647
+ {RTCDeviceTypeIPad3GSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647
+ {RTCDeviceTypeIPad3GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647
+ {RTCDeviceTypeIPad4Wifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662
+ {RTCDeviceTypeIPad4GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662
+ {RTCDeviceTypeIPad4GSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662
+ {RTCDeviceTypeIPad5,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP751
+ {RTCDeviceTypeIPad6,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP774
+ {RTCDeviceTypeIPadAirWifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692
+ {RTCDeviceTypeIPadAirCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692
+ {RTCDeviceTypeIPadAirWifiCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692
+ {RTCDeviceTypeIPadAir2,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP708
+ {RTCDeviceTypeIPadMini2GWifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693
+ {RTCDeviceTypeIPadMini2GCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693
+ {RTCDeviceTypeIPadMini2GWifiCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693
+ {RTCDeviceTypeIPadMini3,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP709
+ {RTCDeviceTypeIPadMini4,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP725
+ {RTCDeviceTypeIPadPro9Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP739
+ {RTCDeviceTypeIPadPro12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/sp723
+ {RTCDeviceTypeIPadPro12Inch2,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP761
+ {RTCDeviceTypeIPadPro10Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP762
+ {RTCDeviceTypeIPadMini6,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP850
+ {RTCDeviceTypeIPad9,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP849
+ {RTCDeviceTypeIPadPro5Gen12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP844
+ {RTCDeviceTypeIPadPro5Gen11Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP843
+};
+
+absl::optional<H264ProfileLevelId> FindMaxSupportedProfileForDevice(RTCDeviceType deviceType) {
+ const auto* result = std::find_if(std::begin(kH264MaxSupportedProfiles),
+ std::end(kH264MaxSupportedProfiles),
+ [deviceType](const SupportedH264Profile& supportedProfile) {
+ return supportedProfile.deviceType == deviceType;
+ });
+ if (result != std::end(kH264MaxSupportedProfiles)) {
+ return result->profile;
+ }
+ return absl::nullopt;
+}
+
+} // namespace
+
+@implementation UIDevice (H264Profile)
+
++ (absl::optional<webrtc::H264ProfileLevelId>)maxSupportedH264Profile {
+ return FindMaxSupportedProfileForDevice([self deviceType]);
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc
new file mode 100644
index 0000000000..ac957f1b49
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "helpers.h"
+
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// Copies characters from a CFStringRef into a std::string.
+std::string CFStringToString(const CFStringRef cf_string) {
+ RTC_DCHECK(cf_string);
+ std::string std_string;
+ // Get the size needed for UTF8 plus terminating character.
+ size_t buffer_size =
+ CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string),
+ kCFStringEncodingUTF8) +
+ 1;
+ std::unique_ptr<char[]> buffer(new char[buffer_size]);
+ if (CFStringGetCString(cf_string, buffer.get(), buffer_size,
+ kCFStringEncodingUTF8)) {
+ // Copy over the characters.
+ std_string.assign(buffer.get());
+ }
+ return std_string;
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ int32_t value) {
+ CFNumberRef cfNum =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value);
+ OSStatus status = VTSessionSetProperty(session, key, cfNum);
+ CFRelease(cfNum);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ uint32_t value) {
+ int64_t value_64 = value;
+ CFNumberRef cfNum =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &value_64);
+ OSStatus status = VTSessionSetProperty(session, key, cfNum);
+ CFRelease(cfNum);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) {
+ CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse;
+ OSStatus status = VTSessionSetProperty(session, key, cf_bool);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ CFStringRef value) {
+ OSStatus status = VTSessionSetProperty(session, key, value);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ std::string val_string = CFStringToString(value);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << val_string << ": " << status;
+ }
+}
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h
new file mode 100644
index 0000000000..7c9ef1cd87
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
+#define SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <VideoToolbox/VideoToolbox.h>
+#include <string>
+
+// Convenience function for creating a dictionary.
+inline CFDictionaryRef CreateCFTypeDictionary(CFTypeRef* keys,
+ CFTypeRef* values,
+ size_t size) {
+ return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+// Copies characters from a CFStringRef into a std::string.
+std::string CFStringToString(CFStringRef cf_string);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, int32_t value);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ uint32_t value);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ CFStringRef value);
+
+#endif // SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc
new file mode 100644
index 0000000000..b7330e1f9c
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "sdk/objc/components/video_codec/nalu_rewriter.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <memory>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+using H264::kAud;
+using H264::kSps;
+using H264::NaluIndex;
+using H264::NaluType;
+using H264::ParseNaluType;
+
+const char kAnnexBHeaderBytes[4] = {0, 0, 0, 1};
+const size_t kAvccHeaderByteSize = sizeof(uint32_t);
+
+bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer) {
+ RTC_DCHECK(avcc_sample_buffer);
+
+ // Get format description from the sample buffer.
+ CMVideoFormatDescriptionRef description =
+ CMSampleBufferGetFormatDescription(avcc_sample_buffer);
+ if (description == nullptr) {
+ RTC_LOG(LS_ERROR) << "Failed to get sample buffer's description.";
+ return false;
+ }
+
+ // Get parameter set information.
+ int nalu_header_size = 0;
+ size_t param_set_count = 0;
+ OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, 0, nullptr, nullptr, &param_set_count, &nalu_header_size);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ RTC_CHECK_EQ(nalu_header_size, kAvccHeaderByteSize);
+ RTC_DCHECK_EQ(param_set_count, 2);
+
+ // Truncate any previous data in the buffer without changing its capacity.
+ annexb_buffer->SetSize(0);
+
+ // Place all parameter sets at the front of buffer.
+ if (is_keyframe) {
+ size_t param_set_size = 0;
+ const uint8_t* param_set = nullptr;
+ for (size_t i = 0; i < param_set_count; ++i) {
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, i, &param_set, &param_set_size, nullptr, nullptr);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(reinterpret_cast<const char*>(param_set),
+ param_set_size);
+ }
+ }
+
+ // Get block buffer from the sample buffer.
+ CMBlockBufferRef block_buffer =
+ CMSampleBufferGetDataBuffer(avcc_sample_buffer);
+ if (block_buffer == nullptr) {
+ RTC_LOG(LS_ERROR) << "Failed to get sample buffer's block buffer.";
+ return false;
+ }
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ // Make sure block buffer is contiguous.
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(
+ nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ // Retain to make cleanup easier.
+ CFRetain(contiguous_buffer);
+ block_buffer = nullptr;
+ }
+
+ // Now copy the actual data.
+ char* data_ptr = nullptr;
+ size_t block_buffer_size = CMBlockBufferGetDataLength(contiguous_buffer);
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr, nullptr,
+ &data_ptr);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get block buffer data.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ size_t bytes_remaining = block_buffer_size;
+ while (bytes_remaining > 0) {
+ // The size type here must match `nalu_header_size`, we expect 4 bytes.
+ // Read the length of the next packet of data. Must convert from big endian
+ // to host endian.
+ RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
+ uint32_t* uint32_data_ptr = reinterpret_cast<uint32_t*>(data_ptr);
+ uint32_t packet_size = CFSwapInt32BigToHost(*uint32_data_ptr);
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(data_ptr + nalu_header_size, packet_size);
+
+ size_t bytes_written = packet_size + sizeof(kAnnexBHeaderBytes);
+ bytes_remaining -= bytes_written;
+ data_ptr += bytes_written;
+ }
+ RTC_DCHECK_EQ(bytes_remaining, (size_t)0);
+
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer,
+ CMMemoryPoolRef memory_pool) {
+ RTC_DCHECK(annexb_buffer);
+ RTC_DCHECK(out_sample_buffer);
+ RTC_DCHECK(video_format);
+ *out_sample_buffer = nullptr;
+
+ AnnexBBufferReader reader(annexb_buffer, annexb_buffer_size);
+ if (reader.SeekToNextNaluOfType(kSps)) {
+ // Buffer contains an SPS NALU - skip it and the following PPS
+ const uint8_t* data;
+ size_t data_len;
+ if (!reader.ReadNalu(&data, &data_len)) {
+ RTC_LOG(LS_ERROR) << "Failed to read SPS";
+ return false;
+ }
+ if (!reader.ReadNalu(&data, &data_len)) {
+ RTC_LOG(LS_ERROR) << "Failed to read PPS";
+ return false;
+ }
+ } else {
+ // No SPS NALU - start reading from the first NALU in the buffer
+ reader.SeekToStart();
+ }
+
+ // Allocate memory as a block buffer.
+ CMBlockBufferRef block_buffer = nullptr;
+ CFAllocatorRef block_allocator = CMMemoryPoolGetAllocator(memory_pool);
+ OSStatus status = CMBlockBufferCreateWithMemoryBlock(
+ kCFAllocatorDefault, nullptr, reader.BytesRemaining(), block_allocator,
+ nullptr, 0, reader.BytesRemaining(), kCMBlockBufferAssureMemoryNowFlag,
+ &block_buffer);
+ if (status != kCMBlockBufferNoErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create block buffer.";
+ return false;
+ }
+
+ // Make sure block buffer is contiguous.
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(kCFAllocatorDefault, block_buffer,
+ block_allocator, nullptr, 0, 0, 0,
+ &contiguous_buffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ CFRelease(block_buffer);
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ block_buffer = nullptr;
+ }
+
+ // Get a raw pointer into allocated memory.
+ size_t block_buffer_size = 0;
+ char* data_ptr = nullptr;
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr,
+ &block_buffer_size, &data_ptr);
+ if (status != kCMBlockBufferNoErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get block buffer data pointer.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ RTC_DCHECK(block_buffer_size == reader.BytesRemaining());
+
+ // Write Avcc NALUs into block buffer memory.
+ AvccBufferWriter writer(reinterpret_cast<uint8_t*>(data_ptr),
+ block_buffer_size);
+ while (reader.BytesRemaining() > 0) {
+ const uint8_t* nalu_data_ptr = nullptr;
+ size_t nalu_data_size = 0;
+ if (reader.ReadNalu(&nalu_data_ptr, &nalu_data_size)) {
+ writer.WriteNalu(nalu_data_ptr, nalu_data_size);
+ }
+ }
+
+ // Create sample buffer.
+ status = CMSampleBufferCreate(kCFAllocatorDefault, contiguous_buffer, true,
+ nullptr, nullptr, video_format, 1, 0, nullptr,
+ 0, nullptr, out_sample_buffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create sample buffer.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+CMVideoFormatDescriptionRef CreateVideoFormatDescription(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size) {
+ const uint8_t* param_set_ptrs[2] = {};
+ size_t param_set_sizes[2] = {};
+ AnnexBBufferReader reader(annexb_buffer, annexb_buffer_size);
+ // Skip everyting before the SPS, then read the SPS and PPS
+ if (!reader.SeekToNextNaluOfType(kSps)) {
+ return nullptr;
+ }
+ if (!reader.ReadNalu(&param_set_ptrs[0], &param_set_sizes[0])) {
+ RTC_LOG(LS_ERROR) << "Failed to read SPS";
+ return nullptr;
+ }
+ if (!reader.ReadNalu(&param_set_ptrs[1], &param_set_sizes[1])) {
+ RTC_LOG(LS_ERROR) << "Failed to read PPS";
+ return nullptr;
+ }
+
+ // Parse the SPS and PPS into a CMVideoFormatDescription.
+ CMVideoFormatDescriptionRef description = nullptr;
+ OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
+ kCFAllocatorDefault, 2, param_set_ptrs, param_set_sizes, 4, &description);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create video format description.";
+ return nullptr;
+ }
+ return description;
+}
+
+AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
+ size_t length)
+ : start_(annexb_buffer), length_(length) {
+ RTC_DCHECK(annexb_buffer);
+ offsets_ = H264::FindNaluIndices(annexb_buffer, length);
+ offset_ = offsets_.begin();
+}
+
+AnnexBBufferReader::~AnnexBBufferReader() = default;
+
+bool AnnexBBufferReader::ReadNalu(const uint8_t** out_nalu,
+ size_t* out_length) {
+ RTC_DCHECK(out_nalu);
+ RTC_DCHECK(out_length);
+ *out_nalu = nullptr;
+ *out_length = 0;
+
+ if (offset_ == offsets_.end()) {
+ return false;
+ }
+ *out_nalu = start_ + offset_->payload_start_offset;
+ *out_length = offset_->payload_size;
+ ++offset_;
+ return true;
+}
+
+size_t AnnexBBufferReader::BytesRemaining() const {
+ if (offset_ == offsets_.end()) {
+ return 0;
+ }
+ return length_ - offset_->start_offset;
+}
+
+void AnnexBBufferReader::SeekToStart() {
+ offset_ = offsets_.begin();
+}
+
+bool AnnexBBufferReader::SeekToNextNaluOfType(NaluType type) {
+ for (; offset_ != offsets_.end(); ++offset_) {
+ if (offset_->payload_size < 1)
+ continue;
+ if (ParseNaluType(*(start_ + offset_->payload_start_offset)) == type)
+ return true;
+ }
+ return false;
+}
+AvccBufferWriter::AvccBufferWriter(uint8_t* const avcc_buffer, size_t length)
+ : start_(avcc_buffer), offset_(0), length_(length) {
+ RTC_DCHECK(avcc_buffer);
+}
+
+bool AvccBufferWriter::WriteNalu(const uint8_t* data, size_t data_size) {
+ // Check if we can write this length of data.
+ if (data_size + kAvccHeaderByteSize > BytesRemaining()) {
+ return false;
+ }
+ // Write length header, which needs to be big endian.
+ uint32_t big_endian_length = CFSwapInt32HostToBig(data_size);
+ memcpy(start_ + offset_, &big_endian_length, sizeof(big_endian_length));
+ offset_ += sizeof(big_endian_length);
+ // Write data.
+ memcpy(start_ + offset_, data, data_size);
+ offset_ += data_size;
+ return true;
+}
+
+size_t AvccBufferWriter::BytesRemaining() const {
+ return length_ - offset_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h
new file mode 100644
index 0000000000..c6474971e2
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_NALU_REWRITER_H_
+#define SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_NALU_REWRITER_H_
+
+#include "modules/video_coding/codecs/h264/include/h264.h"
+
+#include <CoreMedia/CoreMedia.h>
+#include <vector>
+
+#include "common_video/h264/h264_common.h"
+#include "rtc_base/buffer.h"
+
+using webrtc::H264::NaluIndex;
+
+namespace webrtc {
+
+// Converts a sample buffer emitted from the VideoToolbox encoder into a buffer
+// suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer
+// needs to be in Annex B format. Data is written directly to `annexb_buffer`.
+bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer);
+
+// Converts a buffer received from RTP into a sample buffer suitable for the
+// VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample
+// buffer is in avcc format.
+// If `is_keyframe` is true then `video_format` is ignored since the format will
+// be read from the buffer. Otherwise `video_format` must be provided.
+// Caller is responsible for releasing the created sample buffer.
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer,
+ CMMemoryPoolRef memory_pool);
+
+// Returns a video format description created from the sps/pps information in
+// the Annex B buffer. If there is no such information, nullptr is returned.
+// The caller is responsible for releasing the description.
+CMVideoFormatDescriptionRef CreateVideoFormatDescription(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size);
+
+// Helper class for reading NALUs from an RTP Annex B buffer.
+class AnnexBBufferReader final {
+ public:
+ AnnexBBufferReader(const uint8_t* annexb_buffer, size_t length);
+ ~AnnexBBufferReader();
+ AnnexBBufferReader(const AnnexBBufferReader& other) = delete;
+ void operator=(const AnnexBBufferReader& other) = delete;
+
+ // Returns a pointer to the beginning of the next NALU slice without the
+ // header bytes and its length. Returns false if no more slices remain.
+ bool ReadNalu(const uint8_t** out_nalu, size_t* out_length);
+
+ // Returns the number of unread NALU bytes, including the size of the header.
+ // If the buffer has no remaining NALUs this will return zero.
+ size_t BytesRemaining() const;
+
+ // Reset the reader to start reading from the first NALU
+ void SeekToStart();
+
+ // Seek to the next position that holds a NALU of the desired type,
+ // or the end if no such NALU is found.
+ // Return true if a NALU of the desired type is found, false if we
+ // reached the end instead
+ bool SeekToNextNaluOfType(H264::NaluType type);
+
+ private:
+ // Returns the the next offset that contains NALU data.
+ size_t FindNextNaluHeader(const uint8_t* start,
+ size_t length,
+ size_t offset) const;
+
+ const uint8_t* const start_;
+ std::vector<NaluIndex> offsets_;
+ std::vector<NaluIndex>::iterator offset_;
+ const size_t length_;
+};
+
+// Helper class for writing NALUs using avcc format into a buffer.
+class AvccBufferWriter final {
+ public:
+ AvccBufferWriter(uint8_t* const avcc_buffer, size_t length);
+ ~AvccBufferWriter() {}
+ AvccBufferWriter(const AvccBufferWriter& other) = delete;
+ void operator=(const AvccBufferWriter& other) = delete;
+
+ // Writes the data slice into the buffer. Returns false if there isn't
+ // enough space left.
+ bool WriteNalu(const uint8_t* data, size_t data_size);
+
+ // Returns the unused bytes in the buffer.
+ size_t BytesRemaining() const;
+
+ private:
+ uint8_t* const start_;
+ size_t offset_;
+ const size_t length_;
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_NALU_REWRITER_H_
diff --git a/third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h b/third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h
new file mode 100644
index 0000000000..664d9bb904
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoFrameBuffer.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/** RTCVideoFrameBuffer containing a CVPixelBufferRef */
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCCVPixelBuffer) : NSObject <RTC_OBJC_TYPE(RTCVideoFrameBuffer)>
+
+@property(nonatomic, readonly) CVPixelBufferRef pixelBuffer;
+@property(nonatomic, readonly) int cropX;
+@property(nonatomic, readonly) int cropY;
+@property(nonatomic, readonly) int cropWidth;
+@property(nonatomic, readonly) int cropHeight;
+
++ (NSSet<NSNumber *> *)supportedPixelFormats;
+
+- (instancetype)initWithPixelBuffer:(CVPixelBufferRef)pixelBuffer;
+- (instancetype)initWithPixelBuffer:(CVPixelBufferRef)pixelBuffer
+ adaptedWidth:(int)adaptedWidth
+ adaptedHeight:(int)adaptedHeight
+ cropWidth:(int)cropWidth
+ cropHeight:(int)cropHeight
+ cropX:(int)cropX
+ cropY:(int)cropY;
+
+- (BOOL)requiresCropping;
+- (BOOL)requiresScalingToWidth:(int)width height:(int)height;
+- (int)bufferSizeForCroppingAndScalingToWidth:(int)width height:(int)height;
+
+/** The minimum size of the `tmpBuffer` must be the number of bytes returned from the
+ * bufferSizeForCroppingAndScalingToWidth:height: method.
+ * If that size is 0, the `tmpBuffer` may be nil.
+ */
+- (BOOL)cropAndScaleTo:(CVPixelBufferRef)outputPixelBuffer
+ withTempBuffer:(nullable uint8_t *)tmpBuffer;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.mm b/third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.mm
new file mode 100644
index 0000000000..1a9b672d1a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.mm
@@ -0,0 +1,367 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCCVPixelBuffer.h"
+
+#import "api/video_frame_buffer/RTCNativeMutableI420Buffer.h"
+
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "third_party/libyuv/include/libyuv.h"
+
+#if !defined(NDEBUG) && defined(WEBRTC_IOS)
+#import <UIKit/UIKit.h>
+#import <VideoToolbox/VideoToolbox.h>
+#endif
+
+@implementation RTC_OBJC_TYPE (RTCCVPixelBuffer) {
+ int _width;
+ int _height;
+ int _bufferWidth;
+ int _bufferHeight;
+ int _cropWidth;
+ int _cropHeight;
+}
+
+@synthesize pixelBuffer = _pixelBuffer;
+@synthesize cropX = _cropX;
+@synthesize cropY = _cropY;
+@synthesize cropWidth = _cropWidth;
+@synthesize cropHeight = _cropHeight;
+
++ (NSSet<NSNumber*>*)supportedPixelFormats {
+ return [NSSet setWithObjects:@(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange),
+ @(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange),
+ @(kCVPixelFormatType_32BGRA),
+ @(kCVPixelFormatType_32ARGB),
+ nil];
+}
+
+- (instancetype)initWithPixelBuffer:(CVPixelBufferRef)pixelBuffer {
+ return [self initWithPixelBuffer:pixelBuffer
+ adaptedWidth:CVPixelBufferGetWidth(pixelBuffer)
+ adaptedHeight:CVPixelBufferGetHeight(pixelBuffer)
+ cropWidth:CVPixelBufferGetWidth(pixelBuffer)
+ cropHeight:CVPixelBufferGetHeight(pixelBuffer)
+ cropX:0
+ cropY:0];
+}
+
+- (instancetype)initWithPixelBuffer:(CVPixelBufferRef)pixelBuffer
+ adaptedWidth:(int)adaptedWidth
+ adaptedHeight:(int)adaptedHeight
+ cropWidth:(int)cropWidth
+ cropHeight:(int)cropHeight
+ cropX:(int)cropX
+ cropY:(int)cropY {
+ if (self = [super init]) {
+ _width = adaptedWidth;
+ _height = adaptedHeight;
+ _pixelBuffer = pixelBuffer;
+ _bufferWidth = CVPixelBufferGetWidth(_pixelBuffer);
+ _bufferHeight = CVPixelBufferGetHeight(_pixelBuffer);
+ _cropWidth = cropWidth;
+ _cropHeight = cropHeight;
+ // Can only crop at even pixels.
+ _cropX = cropX & ~1;
+ _cropY = cropY & ~1;
+ CVBufferRetain(_pixelBuffer);
+ }
+
+ return self;
+}
+
+- (void)dealloc {
+ CVBufferRelease(_pixelBuffer);
+}
+
+- (int)width {
+ return _width;
+}
+
+- (int)height {
+ return _height;
+}
+
+- (BOOL)requiresCropping {
+ return _cropWidth != _bufferWidth || _cropHeight != _bufferHeight;
+}
+
+- (BOOL)requiresScalingToWidth:(int)width height:(int)height {
+ return _cropWidth != width || _cropHeight != height;
+}
+
+- (int)bufferSizeForCroppingAndScalingToWidth:(int)width height:(int)height {
+ const OSType srcPixelFormat = CVPixelBufferGetPixelFormatType(_pixelBuffer);
+ switch (srcPixelFormat) {
+ case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
+ case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: {
+ int srcChromaWidth = (_cropWidth + 1) / 2;
+ int srcChromaHeight = (_cropHeight + 1) / 2;
+ int dstChromaWidth = (width + 1) / 2;
+ int dstChromaHeight = (height + 1) / 2;
+
+ return srcChromaWidth * srcChromaHeight * 2 + dstChromaWidth * dstChromaHeight * 2;
+ }
+ case kCVPixelFormatType_32BGRA:
+ case kCVPixelFormatType_32ARGB: {
+ return 0; // Scaling RGBA frames does not require a temporary buffer.
+ }
+ }
+ RTC_DCHECK_NOTREACHED() << "Unsupported pixel format.";
+ return 0;
+}
+
+- (BOOL)cropAndScaleTo:(CVPixelBufferRef)outputPixelBuffer
+ withTempBuffer:(nullable uint8_t*)tmpBuffer {
+ const OSType srcPixelFormat = CVPixelBufferGetPixelFormatType(_pixelBuffer);
+ const OSType dstPixelFormat = CVPixelBufferGetPixelFormatType(outputPixelBuffer);
+
+ switch (srcPixelFormat) {
+ case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
+ case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: {
+ size_t dstWidth = CVPixelBufferGetWidth(outputPixelBuffer);
+ size_t dstHeight = CVPixelBufferGetHeight(outputPixelBuffer);
+ if (dstWidth > 0 && dstHeight > 0) {
+ RTC_DCHECK(dstPixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ||
+ dstPixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange);
+ if ([self requiresScalingToWidth:dstWidth height:dstHeight]) {
+ RTC_DCHECK(tmpBuffer);
+ }
+ [self cropAndScaleNV12To:outputPixelBuffer withTempBuffer:tmpBuffer];
+ }
+ break;
+ }
+ case kCVPixelFormatType_32BGRA:
+ case kCVPixelFormatType_32ARGB: {
+ RTC_DCHECK(srcPixelFormat == dstPixelFormat);
+ [self cropAndScaleARGBTo:outputPixelBuffer];
+ break;
+ }
+ default: {
+ RTC_DCHECK_NOTREACHED() << "Unsupported pixel format.";
+ }
+ }
+
+ return YES;
+}
+- (id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)>)cropAndScaleWith:(int)offsetX
+ offsetY:(int)offsetY
+ cropWidth:(int)cropWidth
+ cropHeight:(int)cropHeight
+ scaleWidth:(int)scaleWidth
+ scaleHeight:(int)scaleHeight {
+ return [[RTC_OBJC_TYPE(RTCCVPixelBuffer) alloc]
+ initWithPixelBuffer:_pixelBuffer
+ adaptedWidth:scaleWidth
+ adaptedHeight:scaleHeight
+ cropWidth:cropWidth * _cropWidth / _width
+ cropHeight:cropHeight * _cropHeight / _height
+ cropX:_cropX + offsetX * _cropWidth / _width
+ cropY:_cropY + offsetY * _cropHeight / _height];
+}
+
+- (id<RTC_OBJC_TYPE(RTCI420Buffer)>)toI420 {
+ const OSType pixelFormat = CVPixelBufferGetPixelFormatType(_pixelBuffer);
+
+ CVPixelBufferLockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);
+
+ RTC_OBJC_TYPE(RTCMutableI420Buffer)* i420Buffer =
+ [[RTC_OBJC_TYPE(RTCMutableI420Buffer) alloc] initWithWidth:[self width] height:[self height]];
+
+ switch (pixelFormat) {
+ case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
+ case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: {
+ const uint8_t* srcY =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(_pixelBuffer, 0));
+ const int srcYStride = CVPixelBufferGetBytesPerRowOfPlane(_pixelBuffer, 0);
+ const uint8_t* srcUV =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(_pixelBuffer, 1));
+ const int srcUVStride = CVPixelBufferGetBytesPerRowOfPlane(_pixelBuffer, 1);
+
+ // Crop just by modifying pointers.
+ srcY += srcYStride * _cropY + _cropX;
+ srcUV += srcUVStride * (_cropY / 2) + _cropX;
+
+ // TODO(magjed): Use a frame buffer pool.
+ webrtc::NV12ToI420Scaler nv12ToI420Scaler;
+ nv12ToI420Scaler.NV12ToI420Scale(srcY,
+ srcYStride,
+ srcUV,
+ srcUVStride,
+ _cropWidth,
+ _cropHeight,
+ i420Buffer.mutableDataY,
+ i420Buffer.strideY,
+ i420Buffer.mutableDataU,
+ i420Buffer.strideU,
+ i420Buffer.mutableDataV,
+ i420Buffer.strideV,
+ i420Buffer.width,
+ i420Buffer.height);
+ break;
+ }
+ case kCVPixelFormatType_32BGRA:
+ case kCVPixelFormatType_32ARGB: {
+ CVPixelBufferRef scaledPixelBuffer = NULL;
+ CVPixelBufferRef sourcePixelBuffer = NULL;
+ if ([self requiresCropping] ||
+ [self requiresScalingToWidth:i420Buffer.width height:i420Buffer.height]) {
+ CVPixelBufferCreate(
+ NULL, i420Buffer.width, i420Buffer.height, pixelFormat, NULL, &scaledPixelBuffer);
+ [self cropAndScaleTo:scaledPixelBuffer withTempBuffer:NULL];
+
+ CVPixelBufferLockBaseAddress(scaledPixelBuffer, kCVPixelBufferLock_ReadOnly);
+ sourcePixelBuffer = scaledPixelBuffer;
+ } else {
+ sourcePixelBuffer = _pixelBuffer;
+ }
+ const uint8_t* src = static_cast<uint8_t*>(CVPixelBufferGetBaseAddress(sourcePixelBuffer));
+ const size_t bytesPerRow = CVPixelBufferGetBytesPerRow(sourcePixelBuffer);
+
+ if (pixelFormat == kCVPixelFormatType_32BGRA) {
+ // Corresponds to libyuv::FOURCC_ARGB
+ libyuv::ARGBToI420(src,
+ bytesPerRow,
+ i420Buffer.mutableDataY,
+ i420Buffer.strideY,
+ i420Buffer.mutableDataU,
+ i420Buffer.strideU,
+ i420Buffer.mutableDataV,
+ i420Buffer.strideV,
+ i420Buffer.width,
+ i420Buffer.height);
+ } else if (pixelFormat == kCVPixelFormatType_32ARGB) {
+ // Corresponds to libyuv::FOURCC_BGRA
+ libyuv::BGRAToI420(src,
+ bytesPerRow,
+ i420Buffer.mutableDataY,
+ i420Buffer.strideY,
+ i420Buffer.mutableDataU,
+ i420Buffer.strideU,
+ i420Buffer.mutableDataV,
+ i420Buffer.strideV,
+ i420Buffer.width,
+ i420Buffer.height);
+ }
+
+ if (scaledPixelBuffer) {
+ CVPixelBufferUnlockBaseAddress(scaledPixelBuffer, kCVPixelBufferLock_ReadOnly);
+ CVBufferRelease(scaledPixelBuffer);
+ }
+ break;
+ }
+ default: {
+ RTC_DCHECK_NOTREACHED() << "Unsupported pixel format.";
+ }
+ }
+
+ CVPixelBufferUnlockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);
+
+ return i420Buffer;
+}
+
+#pragma mark - Debugging
+
+#if !defined(NDEBUG) && defined(WEBRTC_IOS)
+- (id)debugQuickLookObject {
+ CGImageRef cgImage;
+ VTCreateCGImageFromCVPixelBuffer(_pixelBuffer, NULL, &cgImage);
+ UIImage *image = [UIImage imageWithCGImage:cgImage scale:1.0 orientation:UIImageOrientationUp];
+ CGImageRelease(cgImage);
+ return image;
+}
+#endif
+
+#pragma mark - Private
+
+- (void)cropAndScaleNV12To:(CVPixelBufferRef)outputPixelBuffer withTempBuffer:(uint8_t*)tmpBuffer {
+ // Prepare output pointers.
+ CVReturn cvRet = CVPixelBufferLockBaseAddress(outputPixelBuffer, 0);
+ if (cvRet != kCVReturnSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
+ }
+ const int dstWidth = CVPixelBufferGetWidth(outputPixelBuffer);
+ const int dstHeight = CVPixelBufferGetHeight(outputPixelBuffer);
+ uint8_t* dstY =
+ reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(outputPixelBuffer, 0));
+ const int dstYStride = CVPixelBufferGetBytesPerRowOfPlane(outputPixelBuffer, 0);
+ uint8_t* dstUV =
+ reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(outputPixelBuffer, 1));
+ const int dstUVStride = CVPixelBufferGetBytesPerRowOfPlane(outputPixelBuffer, 1);
+
+ // Prepare source pointers.
+ CVPixelBufferLockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);
+ const uint8_t* srcY = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(_pixelBuffer, 0));
+ const int srcYStride = CVPixelBufferGetBytesPerRowOfPlane(_pixelBuffer, 0);
+ const uint8_t* srcUV = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(_pixelBuffer, 1));
+ const int srcUVStride = CVPixelBufferGetBytesPerRowOfPlane(_pixelBuffer, 1);
+
+ // Crop just by modifying pointers.
+ srcY += srcYStride * _cropY + _cropX;
+ srcUV += srcUVStride * (_cropY / 2) + _cropX;
+
+ webrtc::NV12Scale(tmpBuffer,
+ srcY,
+ srcYStride,
+ srcUV,
+ srcUVStride,
+ _cropWidth,
+ _cropHeight,
+ dstY,
+ dstYStride,
+ dstUV,
+ dstUVStride,
+ dstWidth,
+ dstHeight);
+
+ CVPixelBufferUnlockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);
+ CVPixelBufferUnlockBaseAddress(outputPixelBuffer, 0);
+}
+
+- (void)cropAndScaleARGBTo:(CVPixelBufferRef)outputPixelBuffer {
+ // Prepare output pointers.
+ CVReturn cvRet = CVPixelBufferLockBaseAddress(outputPixelBuffer, 0);
+ if (cvRet != kCVReturnSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
+ }
+ const int dstWidth = CVPixelBufferGetWidth(outputPixelBuffer);
+ const int dstHeight = CVPixelBufferGetHeight(outputPixelBuffer);
+
+ uint8_t* dst = reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddress(outputPixelBuffer));
+ const int dstStride = CVPixelBufferGetBytesPerRow(outputPixelBuffer);
+
+ // Prepare source pointers.
+ CVPixelBufferLockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);
+ const uint8_t* src = static_cast<uint8_t*>(CVPixelBufferGetBaseAddress(_pixelBuffer));
+ const int srcStride = CVPixelBufferGetBytesPerRow(_pixelBuffer);
+
+ // Crop just by modifying pointers. Need to ensure that src pointer points to a byte corresponding
+ // to the start of a new pixel (byte with B for BGRA) so that libyuv scales correctly.
+ const int bytesPerPixel = 4;
+ src += srcStride * _cropY + (_cropX * bytesPerPixel);
+
+ // kCVPixelFormatType_32BGRA corresponds to libyuv::FOURCC_ARGB
+ libyuv::ARGBScale(src,
+ srcStride,
+ _cropWidth,
+ _cropHeight,
+ dst,
+ dstStride,
+ dstWidth,
+ dstHeight,
+ libyuv::kFilterBox);
+
+ CVPixelBufferUnlockBaseAddress(_pixelBuffer, kCVPixelBufferLock_ReadOnly);
+ CVPixelBufferUnlockBaseAddress(outputPixelBuffer, 0);
+}
+
+@end