summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/sdk/objc/components/video_codec
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/libwebrtc/sdk/objc/components/video_codec
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/libwebrtc/sdk/objc/components/video_codec')
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h25
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h27
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm29
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h26
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m85
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h31
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m102
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h60
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm120
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h18
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m49
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h18
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm276
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h18
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m49
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h22
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm819
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h19
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm205
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc90
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h47
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc327
-rw-r--r--third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h113
23 files changed, 2575 insertions, 0 deletions
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h
new file mode 100644
index 0000000000..a0cd8515d1
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264+Private.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCCodecSpecificInfoH264.h"
+
+#include "modules/video_coding/include/video_codec_interface.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/* Interfaces for converting to/from internal C++ formats. */
+@interface RTC_OBJC_TYPE (RTCCodecSpecificInfoH264)
+()
+
+ - (webrtc::CodecSpecificInfo)nativeCodecSpecificInfo;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h
new file mode 100644
index 0000000000..ae3003a115
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCCodecSpecificInfo.h"
+#import "RTCMacros.h"
+
+/** Class for H264 specific config. */
+typedef NS_ENUM(NSUInteger, RTCH264PacketizationMode) {
+ RTCH264PacketizationModeNonInterleaved = 0, // Mode 1 - STAP-A, FU-A is allowed
+ RTCH264PacketizationModeSingleNalUnit // Mode 0 - only single NALU allowed
+};
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCCodecSpecificInfoH264) : NSObject <RTC_OBJC_TYPE(RTCCodecSpecificInfo)>
+
+@property(nonatomic, assign) RTCH264PacketizationMode packetizationMode;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm
new file mode 100644
index 0000000000..e38ed307b3
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCCodecSpecificInfoH264.mm
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCCodecSpecificInfoH264+Private.h"
+
+#import "RTCH264ProfileLevelId.h"
+
+// H264 specific settings.
+@implementation RTC_OBJC_TYPE (RTCCodecSpecificInfoH264)
+
+@synthesize packetizationMode = _packetizationMode;
+
+- (webrtc::CodecSpecificInfo)nativeCodecSpecificInfo {
+ webrtc::CodecSpecificInfo codecSpecificInfo;
+ codecSpecificInfo.codecType = webrtc::kVideoCodecH264;
+ codecSpecificInfo.codecSpecific.H264.packetization_mode =
+ (webrtc::H264PacketizationMode)_packetizationMode;
+
+ return codecSpecificInfo;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h
new file mode 100644
index 0000000000..de5a9c4684
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoDecoderFactory.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/** This decoder factory include support for all codecs bundled with WebRTC. If using custom
+ * codecs, create custom implementations of RTCVideoEncoderFactory and
+ * RTCVideoDecoderFactory.
+ */
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCDefaultVideoDecoderFactory) : NSObject <RTC_OBJC_TYPE(RTCVideoDecoderFactory)>
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m
new file mode 100644
index 0000000000..6e3baa8750
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCDefaultVideoDecoderFactory.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoDecoderH264.h"
+#import "api/video_codec/RTCVideoCodecConstants.h"
+#import "api/video_codec/RTCVideoDecoderVP8.h"
+#import "api/video_codec/RTCVideoDecoderVP9.h"
+#import "base/RTCVideoCodecInfo.h"
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+#import "api/video_codec/RTCVideoDecoderAV1.h" // nogncheck
+#endif
+
+@implementation RTC_OBJC_TYPE (RTCDefaultVideoDecoderFactory)
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedHighParams];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedBaselineParams];
+
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp8Info =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp8Name];
+
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *result = [@[
+ constrainedHighInfo,
+ constrainedBaselineInfo,
+ vp8Info,
+ ] mutableCopy];
+
+ if ([RTC_OBJC_TYPE(RTCVideoDecoderVP9) isSupported]) {
+ [result
+ addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]];
+ }
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+ [result addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecAv1Name]];
+#endif
+
+ return result;
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoDecoder)>)createDecoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
+ return [[RTC_OBJC_TYPE(RTCVideoDecoderH264) alloc] init];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoDecoderVP8) vp8Decoder];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name] &&
+ [RTC_OBJC_TYPE(RTCVideoDecoderVP9) isSupported]) {
+ return [RTC_OBJC_TYPE(RTCVideoDecoderVP9) vp9Decoder];
+ }
+
+#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY)
+ if ([info.name isEqualToString:kRTCVideoCodecAv1Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoDecoderAV1) av1Decoder];
+ }
+#endif
+
+ return nil;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h
new file mode 100644
index 0000000000..92ab40c95b
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoEncoderFactory.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+/** This encoder factory include support for all codecs bundled with WebRTC. If using custom
+ * codecs, create custom implementations of RTCVideoEncoderFactory and
+ * RTCVideoDecoderFactory.
+ */
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCDefaultVideoEncoderFactory) : NSObject <RTC_OBJC_TYPE(RTCVideoEncoderFactory)>
+
+@property(nonatomic, retain) RTC_OBJC_TYPE(RTCVideoCodecInfo) *preferredCodec;
+
++ (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m
new file mode 100644
index 0000000000..8de55bde4a
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCDefaultVideoEncoderFactory.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoEncoderH264.h"
+#import "api/video_codec/RTCVideoCodecConstants.h"
+#import "api/video_codec/RTCVideoEncoderVP8.h"
+#import "api/video_codec/RTCVideoEncoderVP9.h"
+#import "base/RTCVideoCodecInfo.h"
+
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+#import "api/video_codec/RTCVideoEncoderAV1.h" // nogncheck
+#endif
+
+@implementation RTC_OBJC_TYPE (RTCDefaultVideoEncoderFactory)
+
+@synthesize preferredCodec;
+
++ (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedHighParams];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecH264Name
+ parameters:constrainedBaselineParams];
+
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp8Info =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp8Name];
+
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *result = [@[
+ constrainedHighInfo,
+ constrainedBaselineInfo,
+ vp8Info,
+ ] mutableCopy];
+
+ if ([RTC_OBJC_TYPE(RTCVideoEncoderVP9) isSupported]) {
+ [result
+ addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]];
+ }
+
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+ [result addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecAv1Name]];
+#endif
+
+ return result;
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoEncoder)>)createEncoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
+ return [[RTC_OBJC_TYPE(RTCVideoEncoderH264) alloc] initWithCodecInfo:info];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoEncoderVP8) vp8Encoder];
+ } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name] &&
+ [RTC_OBJC_TYPE(RTCVideoEncoderVP9) isSupported]) {
+ return [RTC_OBJC_TYPE(RTCVideoEncoderVP9) vp9Encoder];
+ }
+
+#if defined(RTC_USE_LIBAOM_AV1_ENCODER)
+ if ([info.name isEqualToString:kRTCVideoCodecAv1Name]) {
+ return [RTC_OBJC_TYPE(RTCVideoEncoderAV1) av1Encoder];
+ }
+#endif
+
+ return nil;
+}
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *codecs =
+ [[[self class] supportedCodecs] mutableCopy];
+
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *orderedCodecs = [NSMutableArray array];
+ NSUInteger index = [codecs indexOfObject:self.preferredCodec];
+ if (index != NSNotFound) {
+ [orderedCodecs addObject:[codecs objectAtIndex:index]];
+ [codecs removeObjectAtIndex:index];
+ }
+ [orderedCodecs addObjectsFromArray:codecs];
+
+ return [orderedCodecs copy];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h
new file mode 100644
index 0000000000..dac7bb5610
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+
+RTC_OBJC_EXPORT extern NSString *const kRTCVideoCodecH264Name;
+RTC_OBJC_EXPORT extern NSString *const kRTCLevel31ConstrainedHigh;
+RTC_OBJC_EXPORT extern NSString *const kRTCLevel31ConstrainedBaseline;
+RTC_OBJC_EXPORT extern NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedHigh;
+RTC_OBJC_EXPORT extern NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedBaseline;
+
+/** H264 Profiles and levels. */
+typedef NS_ENUM(NSUInteger, RTCH264Profile) {
+ RTCH264ProfileConstrainedBaseline,
+ RTCH264ProfileBaseline,
+ RTCH264ProfileMain,
+ RTCH264ProfileConstrainedHigh,
+ RTCH264ProfileHigh,
+};
+
+typedef NS_ENUM(NSUInteger, RTCH264Level) {
+ RTCH264Level1_b = 0,
+ RTCH264Level1 = 10,
+ RTCH264Level1_1 = 11,
+ RTCH264Level1_2 = 12,
+ RTCH264Level1_3 = 13,
+ RTCH264Level2 = 20,
+ RTCH264Level2_1 = 21,
+ RTCH264Level2_2 = 22,
+ RTCH264Level3 = 30,
+ RTCH264Level3_1 = 31,
+ RTCH264Level3_2 = 32,
+ RTCH264Level4 = 40,
+ RTCH264Level4_1 = 41,
+ RTCH264Level4_2 = 42,
+ RTCH264Level5 = 50,
+ RTCH264Level5_1 = 51,
+ RTCH264Level5_2 = 52
+};
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCH264ProfileLevelId) : NSObject
+
+@property(nonatomic, readonly) RTCH264Profile profile;
+@property(nonatomic, readonly) RTCH264Level level;
+@property(nonatomic, readonly) NSString *hexString;
+
+- (instancetype)initWithHexString:(NSString *)hexString;
+- (instancetype)initWithProfile:(RTCH264Profile)profile level:(RTCH264Level)level;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm
new file mode 100644
index 0000000000..f0ef3ec232
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#import "RTCH264ProfileLevelId.h"
+
+#import "helpers/NSString+StdString.h"
+#if defined(WEBRTC_IOS)
+#import "UIDevice+H264Profile.h"
+#endif
+
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "media/base/media_constants.h"
+
+namespace {
+
+NSString *MaxSupportedProfileLevelConstrainedHigh();
+NSString *MaxSupportedProfileLevelConstrainedBaseline();
+
+} // namespace
+
+NSString *const kRTCVideoCodecH264Name = @(cricket::kH264CodecName);
+NSString *const kRTCLevel31ConstrainedHigh = @"640c1f";
+NSString *const kRTCLevel31ConstrainedBaseline = @"42e01f";
+NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedHigh =
+ MaxSupportedProfileLevelConstrainedHigh();
+NSString *const kRTCMaxSupportedH264ProfileLevelConstrainedBaseline =
+ MaxSupportedProfileLevelConstrainedBaseline();
+
+namespace {
+
+#if defined(WEBRTC_IOS)
+
+NSString *MaxSupportedLevelForProfile(webrtc::H264Profile profile) {
+ const absl::optional<webrtc::H264ProfileLevelId> profileLevelId =
+ [UIDevice maxSupportedH264Profile];
+ if (profileLevelId && profileLevelId->profile >= profile) {
+ const absl::optional<std::string> profileString =
+ H264ProfileLevelIdToString(webrtc::H264ProfileLevelId(profile, profileLevelId->level));
+ if (profileString) {
+ return [NSString stringForStdString:*profileString];
+ }
+ }
+ return nil;
+}
+#endif
+
+NSString *MaxSupportedProfileLevelConstrainedBaseline() {
+#if defined(WEBRTC_IOS)
+ NSString *profile = MaxSupportedLevelForProfile(webrtc::H264Profile::kProfileConstrainedBaseline);
+ if (profile != nil) {
+ return profile;
+ }
+#endif
+ return kRTCLevel31ConstrainedBaseline;
+}
+
+NSString *MaxSupportedProfileLevelConstrainedHigh() {
+#if defined(WEBRTC_IOS)
+ NSString *profile = MaxSupportedLevelForProfile(webrtc::H264Profile::kProfileConstrainedHigh);
+ if (profile != nil) {
+ return profile;
+ }
+#endif
+ return kRTCLevel31ConstrainedHigh;
+}
+
+} // namespace
+
+@interface RTC_OBJC_TYPE (RTCH264ProfileLevelId)
+()
+
+ @property(nonatomic, assign) RTCH264Profile profile;
+@property(nonatomic, assign) RTCH264Level level;
+@property(nonatomic, strong) NSString *hexString;
+
+@end
+
+@implementation RTC_OBJC_TYPE (RTCH264ProfileLevelId)
+
+@synthesize profile = _profile;
+@synthesize level = _level;
+@synthesize hexString = _hexString;
+
+- (instancetype)initWithHexString:(NSString *)hexString {
+ if (self = [super init]) {
+ self.hexString = hexString;
+
+ absl::optional<webrtc::H264ProfileLevelId> profile_level_id =
+ webrtc::ParseH264ProfileLevelId([hexString cStringUsingEncoding:NSUTF8StringEncoding]);
+ if (profile_level_id.has_value()) {
+ self.profile = static_cast<RTCH264Profile>(profile_level_id->profile);
+ self.level = static_cast<RTCH264Level>(profile_level_id->level);
+ }
+ }
+ return self;
+}
+
+- (instancetype)initWithProfile:(RTCH264Profile)profile level:(RTCH264Level)level {
+ if (self = [super init]) {
+ self.profile = profile;
+ self.level = level;
+
+ absl::optional<std::string> hex_string =
+ webrtc::H264ProfileLevelIdToString(webrtc::H264ProfileLevelId(
+ static_cast<webrtc::H264Profile>(profile), static_cast<webrtc::H264Level>(level)));
+ self.hexString =
+ [NSString stringWithCString:hex_string.value_or("").c_str() encoding:NSUTF8StringEncoding];
+ }
+ return self;
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h
new file mode 100644
index 0000000000..88bacbbdfe
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoDecoderFactory.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoDecoderFactoryH264) : NSObject <RTC_OBJC_TYPE(RTCVideoDecoderFactory)>
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m
new file mode 100644
index 0000000000..bdae19d687
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.m
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCVideoDecoderFactoryH264.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoDecoderH264.h"
+
+@implementation RTC_OBJC_TYPE (RTCVideoDecoderFactoryH264)
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *codecs = [NSMutableArray array];
+ NSString *codecName = kRTCVideoCodecH264Name;
+
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedHighParams];
+ [codecs addObject:constrainedHighInfo];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedBaselineParams];
+ [codecs addObject:constrainedBaselineInfo];
+
+ return [codecs copy];
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoDecoder)>)createDecoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ return [[RTC_OBJC_TYPE(RTCVideoDecoderH264) alloc] init];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h
new file mode 100644
index 0000000000..a12e4212a7
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoDecoder.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoDecoderH264) : NSObject <RTC_OBJC_TYPE(RTCVideoDecoder)>
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm
new file mode 100644
index 0000000000..09e642bc37
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#import "RTCVideoDecoderH264.h"
+
+#import <VideoToolbox/VideoToolbox.h>
+
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+#import "helpers.h"
+#import "helpers/scoped_cftyperef.h"
+
+#if defined(WEBRTC_IOS)
+#import "helpers/UIDevice+RTCDevice.h"
+#endif
+
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "sdk/objc/components/video_codec/nalu_rewriter.h"
+
+// Struct that we pass to the decoder per frame to decode. We receive it again
+// in the decoder callback.
+struct RTCFrameDecodeParams {
+ RTCFrameDecodeParams(RTCVideoDecoderCallback cb, int64_t ts) : callback(cb), timestamp(ts) {}
+ RTCVideoDecoderCallback callback;
+ int64_t timestamp;
+};
+
+@interface RTC_OBJC_TYPE (RTCVideoDecoderH264)
+() - (void)setError : (OSStatus)error;
+@end
+
+// This is the callback function that VideoToolbox calls when decode is
+// complete.
+void decompressionOutputCallback(void *decoderRef,
+ void *params,
+ OSStatus status,
+ VTDecodeInfoFlags infoFlags,
+ CVImageBufferRef imageBuffer,
+ CMTime timestamp,
+ CMTime duration) {
+ std::unique_ptr<RTCFrameDecodeParams> decodeParams(
+ reinterpret_cast<RTCFrameDecodeParams *>(params));
+ if (status != noErr) {
+ RTC_OBJC_TYPE(RTCVideoDecoderH264) *decoder =
+ (__bridge RTC_OBJC_TYPE(RTCVideoDecoderH264) *)decoderRef;
+ [decoder setError:status];
+ RTC_LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
+ return;
+ }
+ // TODO(tkchin): Handle CVO properly.
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *frameBuffer =
+ [[RTC_OBJC_TYPE(RTCCVPixelBuffer) alloc] initWithPixelBuffer:imageBuffer];
+ RTC_OBJC_TYPE(RTCVideoFrame) *decodedFrame = [[RTC_OBJC_TYPE(RTCVideoFrame) alloc]
+ initWithBuffer:frameBuffer
+ rotation:RTCVideoRotation_0
+ timeStampNs:CMTimeGetSeconds(timestamp) * rtc::kNumNanosecsPerSec];
+ decodedFrame.timeStamp = decodeParams->timestamp;
+ decodeParams->callback(decodedFrame);
+}
+
+// Decoder.
+@implementation RTC_OBJC_TYPE (RTCVideoDecoderH264) {
+ CMVideoFormatDescriptionRef _videoFormat;
+ CMMemoryPoolRef _memoryPool;
+ VTDecompressionSessionRef _decompressionSession;
+ RTCVideoDecoderCallback _callback;
+ OSStatus _error;
+}
+
+- (instancetype)init {
+ self = [super init];
+ if (self) {
+ _memoryPool = CMMemoryPoolCreate(nil);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ CMMemoryPoolInvalidate(_memoryPool);
+ CFRelease(_memoryPool);
+ [self destroyDecompressionSession];
+ [self setVideoFormat:nullptr];
+}
+
+- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (NSInteger)decode:(RTC_OBJC_TYPE(RTCEncodedImage) *)inputImage
+ missingFrames:(BOOL)missingFrames
+ codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)info
+ renderTimeMs:(int64_t)renderTimeMs {
+ RTC_DCHECK(inputImage.buffer);
+
+ if (_error != noErr) {
+ RTC_LOG(LS_WARNING) << "Last frame decode failed.";
+ _error = noErr;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ rtc::ScopedCFTypeRef<CMVideoFormatDescriptionRef> inputFormat =
+ rtc::ScopedCF(webrtc::CreateVideoFormatDescription((uint8_t *)inputImage.buffer.bytes,
+ inputImage.buffer.length));
+ if (inputFormat) {
+ // Check if the video format has changed, and reinitialize decoder if
+ // needed.
+ if (!CMFormatDescriptionEqual(inputFormat.get(), _videoFormat)) {
+ [self setVideoFormat:inputFormat.get()];
+ int resetDecompressionSessionError = [self resetDecompressionSession];
+ if (resetDecompressionSessionError != WEBRTC_VIDEO_CODEC_OK) {
+ return resetDecompressionSessionError;
+ }
+ }
+ }
+ if (!_videoFormat) {
+ // We received a frame but we don't have format information so we can't
+ // decode it.
+ // This can happen after backgrounding. We need to wait for the next
+ // sps/pps before we can resume so we request a keyframe by returning an
+ // error.
+ RTC_LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ CMSampleBufferRef sampleBuffer = nullptr;
+ if (!webrtc::H264AnnexBBufferToCMSampleBuffer((uint8_t *)inputImage.buffer.bytes,
+ inputImage.buffer.length,
+ _videoFormat,
+ &sampleBuffer,
+ _memoryPool)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(sampleBuffer);
+ VTDecodeFrameFlags decodeFlags = kVTDecodeFrame_EnableAsynchronousDecompression;
+ std::unique_ptr<RTCFrameDecodeParams> frameDecodeParams;
+ frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
+ OSStatus status = VTDecompressionSessionDecodeFrame(
+ _decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
+#if defined(WEBRTC_IOS)
+ // Re-initialize the decoder if we have an invalid session while the app is
+ // active or decoder malfunctions and retry the decode request.
+ if ((status == kVTInvalidSessionErr || status == kVTVideoDecoderMalfunctionErr) &&
+ [self resetDecompressionSession] == WEBRTC_VIDEO_CODEC_OK) {
+ RTC_LOG(LS_INFO) << "Failed to decode frame with code: " << status
+ << " retrying decode after decompression session reset";
+ frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
+ status = VTDecompressionSessionDecodeFrame(
+ _decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
+ }
+#endif
+ CFRelease(sampleBuffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)setCallback:(RTCVideoDecoderCallback)callback {
+ _callback = callback;
+}
+
+- (void)setError:(OSStatus)error {
+ _error = error;
+}
+
+- (NSInteger)releaseDecoder {
+ // Need to invalidate the session so that callbacks no longer occur and it
+ // is safe to null out the callback.
+ [self destroyDecompressionSession];
+ [self setVideoFormat:nullptr];
+ _callback = nullptr;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+#pragma mark - Private
+
+- (int)resetDecompressionSession {
+ [self destroyDecompressionSession];
+
+ // Need to wait for the first SPS to initialize decoder.
+ if (!_videoFormat) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ // Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
+ // create pixel buffers with GPU backed memory. The intent here is to pass
+ // the pixel buffers directly so we avoid a texture upload later during
+ // rendering. This currently is moot because we are converting back to an
+ // I420 frame after decode, but eventually we will be able to plumb
+ // CVPixelBuffers directly to the renderer.
+ // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
+ // we can pass CVPixelBuffers as native handles in decoder output.
+ NSDictionary *attributes = @{
+#if defined(WEBRTC_IOS) && (TARGET_OS_MACCATALYST || TARGET_OS_SIMULATOR)
+ (NSString *)kCVPixelBufferMetalCompatibilityKey : @(YES),
+#elif defined(WEBRTC_IOS)
+ (NSString *)kCVPixelBufferOpenGLESCompatibilityKey : @(YES),
+#elif defined(WEBRTC_MAC) && !defined(WEBRTC_ARCH_ARM64)
+ (NSString *)kCVPixelBufferOpenGLCompatibilityKey : @(YES),
+#endif
+#if !(TARGET_OS_SIMULATOR)
+ (NSString *)kCVPixelBufferIOSurfacePropertiesKey : @{},
+#endif
+ (NSString *)
+ kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange),
+ };
+
+ VTDecompressionOutputCallbackRecord record = {
+ decompressionOutputCallback, (__bridge void *)self,
+ };
+ OSStatus status = VTDecompressionSessionCreate(nullptr,
+ _videoFormat,
+ nullptr,
+ (__bridge CFDictionaryRef)attributes,
+ &record,
+ &_decompressionSession);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create decompression session: " << status;
+ [self destroyDecompressionSession];
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ [self configureDecompressionSession];
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)configureDecompressionSession {
+ RTC_DCHECK(_decompressionSession);
+#if defined(WEBRTC_IOS)
+ VTSessionSetProperty(_decompressionSession, kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
+#endif
+}
+
+- (void)destroyDecompressionSession {
+ if (_decompressionSession) {
+#if defined(WEBRTC_IOS)
+ if ([UIDevice isIOS11OrLater]) {
+ VTDecompressionSessionWaitForAsynchronousFrames(_decompressionSession);
+ }
+#endif
+ VTDecompressionSessionInvalidate(_decompressionSession);
+ CFRelease(_decompressionSession);
+ _decompressionSession = nullptr;
+ }
+}
+
+- (void)setVideoFormat:(CMVideoFormatDescriptionRef)videoFormat {
+ if (_videoFormat == videoFormat) {
+ return;
+ }
+ if (_videoFormat) {
+ CFRelease(_videoFormat);
+ }
+ _videoFormat = videoFormat;
+ if (_videoFormat) {
+ CFRetain(_videoFormat);
+ }
+}
+
+- (NSString *)implementationName {
+ return @"VideoToolbox";
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h
new file mode 100644
index 0000000000..45fc4be2ea
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoEncoderFactory.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoEncoderFactoryH264) : NSObject <RTC_OBJC_TYPE(RTCVideoEncoderFactory)>
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m
new file mode 100644
index 0000000000..9843849307
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.m
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCVideoEncoderFactoryH264.h"
+
+#import "RTCH264ProfileLevelId.h"
+#import "RTCVideoEncoderH264.h"
+
+@implementation RTC_OBJC_TYPE (RTCVideoEncoderFactoryH264)
+
+- (NSArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *)supportedCodecs {
+ NSMutableArray<RTC_OBJC_TYPE(RTCVideoCodecInfo) *> *codecs = [NSMutableArray array];
+ NSString *codecName = kRTCVideoCodecH264Name;
+
+ NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedHighInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedHighParams];
+ [codecs addObject:constrainedHighInfo];
+
+ NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
+ @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
+ @"level-asymmetry-allowed" : @"1",
+ @"packetization-mode" : @"1",
+ };
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) *constrainedBaselineInfo =
+ [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:codecName
+ parameters:constrainedBaselineParams];
+ [codecs addObject:constrainedBaselineInfo];
+
+ return [codecs copy];
+}
+
+- (id<RTC_OBJC_TYPE(RTCVideoEncoder)>)createEncoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info {
+ return [[RTC_OBJC_TYPE(RTCVideoEncoderH264) alloc] initWithCodecInfo:info];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h
new file mode 100644
index 0000000000..9f4f4c7c8d
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCMacros.h"
+#import "RTCVideoCodecInfo.h"
+#import "RTCVideoEncoder.h"
+
+RTC_OBJC_EXPORT
+@interface RTC_OBJC_TYPE (RTCVideoEncoderH264) : NSObject <RTC_OBJC_TYPE(RTCVideoEncoder)>
+
+- (instancetype)initWithCodecInfo:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)codecInfo;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm
new file mode 100644
index 0000000000..7dbbfaf019
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm
@@ -0,0 +1,819 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#import "RTCVideoEncoderH264.h"
+
+#import <VideoToolbox/VideoToolbox.h>
+#include <vector>
+
+#if defined(WEBRTC_IOS)
+#import "helpers/UIDevice+RTCDevice.h"
+#endif
+#import "RTCCodecSpecificInfoH264.h"
+#import "RTCH264ProfileLevelId.h"
+#import "api/peerconnection/RTCVideoCodecInfo+Private.h"
+#import "base/RTCCodecSpecificInfo.h"
+#import "base/RTCI420Buffer.h"
+#import "base/RTCVideoEncoder.h"
+#import "base/RTCVideoFrame.h"
+#import "base/RTCVideoFrameBuffer.h"
+#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
+#import "helpers.h"
+
+#include "api/video_codecs/h264_profile_level_id.h"
+#include "common_video/h264/h264_bitstream_parser.h"
+#include "common_video/include/bitrate_adjuster.h"
+#include "modules/video_coding/include/video_error_codes.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "sdk/objc/components/video_codec/nalu_rewriter.h"
+#include "third_party/libyuv/include/libyuv/convert_from.h"
+
+@interface RTC_OBJC_TYPE (RTCVideoEncoderH264)
+()
+
+ - (void)frameWasEncoded : (OSStatus)status flags : (VTEncodeInfoFlags)infoFlags sampleBuffer
+ : (CMSampleBufferRef)sampleBuffer codecSpecificInfo
+ : (id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)codecSpecificInfo width : (int32_t)width height
+ : (int32_t)height renderTimeMs : (int64_t)renderTimeMs timestamp : (uint32_t)timestamp rotation
+ : (RTCVideoRotation)rotation;
+
+@end
+
+namespace { // anonymous namespace
+
+// The ratio between kVTCompressionPropertyKey_DataRateLimits and
+// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher
+// than the average bit rate to avoid undershooting the target.
+const float kLimitToAverageBitRateFactor = 1.5f;
+// These thresholds deviate from the default h264 QP thresholds, as they
+// have been found to work better on devices that support VideoToolbox
+const int kLowH264QpThreshold = 28;
+const int kHighH264QpThreshold = 39;
+
+const OSType kNV12PixelFormat = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
+
+// Struct that we pass to the encoder per frame to encode. We receive it again
+// in the encoder callback.
+struct RTCFrameEncodeParams {
+ RTCFrameEncodeParams(RTC_OBJC_TYPE(RTCVideoEncoderH264) * e,
+ RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) * csi,
+ int32_t w,
+ int32_t h,
+ int64_t rtms,
+ uint32_t ts,
+ RTCVideoRotation r)
+ : encoder(e), width(w), height(h), render_time_ms(rtms), timestamp(ts), rotation(r) {
+ if (csi) {
+ codecSpecificInfo = csi;
+ } else {
+ codecSpecificInfo = [[RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) alloc] init];
+ }
+ }
+
+ RTC_OBJC_TYPE(RTCVideoEncoderH264) * encoder;
+ RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) * codecSpecificInfo;
+ int32_t width;
+ int32_t height;
+ int64_t render_time_ms;
+ uint32_t timestamp;
+ RTCVideoRotation rotation;
+};
+
+// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
+// encoder. This performs the copy and format conversion.
+// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
+bool CopyVideoFrameToNV12PixelBuffer(id<RTC_OBJC_TYPE(RTCI420Buffer)> frameBuffer,
+ CVPixelBufferRef pixelBuffer) {
+ RTC_DCHECK(pixelBuffer);
+ RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixelBuffer), kNV12PixelFormat);
+ RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixelBuffer, 0), frameBuffer.height);
+ RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixelBuffer, 0), frameBuffer.width);
+
+ CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
+ if (cvRet != kCVReturnSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
+ return false;
+ }
+ uint8_t *dstY = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
+ int dstStrideY = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
+ uint8_t *dstUV = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1));
+ int dstStrideUV = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
+ // Convert I420 to NV12.
+ int ret = libyuv::I420ToNV12(frameBuffer.dataY,
+ frameBuffer.strideY,
+ frameBuffer.dataU,
+ frameBuffer.strideU,
+ frameBuffer.dataV,
+ frameBuffer.strideV,
+ dstY,
+ dstStrideY,
+ dstUV,
+ dstStrideUV,
+ frameBuffer.width,
+ frameBuffer.height);
+ CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
+ if (ret) {
+ RTC_LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
+ return false;
+ }
+ return true;
+}
+
+CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) {
+ if (!pixel_buffer_pool) {
+ RTC_LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
+ return nullptr;
+ }
+ CVPixelBufferRef pixel_buffer;
+ CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool, &pixel_buffer);
+ if (ret != kCVReturnSuccess) {
+ RTC_LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
+ // We probably want to drop frames here, since failure probably means
+ // that the pool is empty.
+ return nullptr;
+ }
+ return pixel_buffer;
+}
+
+// This is the callback function that VideoToolbox calls when encode is
+// complete. From inspection this happens on its own queue.
+void compressionOutputCallback(void *encoder,
+ void *params,
+ OSStatus status,
+ VTEncodeInfoFlags infoFlags,
+ CMSampleBufferRef sampleBuffer) {
+ if (!params) {
+ // If there are pending callbacks when the encoder is destroyed, this can happen.
+ return;
+ }
+ std::unique_ptr<RTCFrameEncodeParams> encodeParams(
+ reinterpret_cast<RTCFrameEncodeParams *>(params));
+ [encodeParams->encoder frameWasEncoded:status
+ flags:infoFlags
+ sampleBuffer:sampleBuffer
+ codecSpecificInfo:encodeParams->codecSpecificInfo
+ width:encodeParams->width
+ height:encodeParams->height
+ renderTimeMs:encodeParams->render_time_ms
+ timestamp:encodeParams->timestamp
+ rotation:encodeParams->rotation];
+}
+
+// Extract VideoToolbox profile out of the webrtc::SdpVideoFormat. If there is
+// no specific VideoToolbox profile for the specified level, AutoLevel will be
+// returned. The user must initialize the encoder with a resolution and
+// framerate conforming to the selected H264 level regardless.
+CFStringRef ExtractProfile(const webrtc::H264ProfileLevelId &profile_level_id) {
+ switch (profile_level_id.profile) {
+ case webrtc::H264Profile::kProfileConstrainedBaseline:
+ case webrtc::H264Profile::kProfileBaseline:
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return kVTProfileLevel_H264_Baseline_3_0;
+ case webrtc::H264Level::kLevel3_1:
+ return kVTProfileLevel_H264_Baseline_3_1;
+ case webrtc::H264Level::kLevel3_2:
+ return kVTProfileLevel_H264_Baseline_3_2;
+ case webrtc::H264Level::kLevel4:
+ return kVTProfileLevel_H264_Baseline_4_0;
+ case webrtc::H264Level::kLevel4_1:
+ return kVTProfileLevel_H264_Baseline_4_1;
+ case webrtc::H264Level::kLevel4_2:
+ return kVTProfileLevel_H264_Baseline_4_2;
+ case webrtc::H264Level::kLevel5:
+ return kVTProfileLevel_H264_Baseline_5_0;
+ case webrtc::H264Level::kLevel5_1:
+ return kVTProfileLevel_H264_Baseline_5_1;
+ case webrtc::H264Level::kLevel5_2:
+ return kVTProfileLevel_H264_Baseline_5_2;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ return kVTProfileLevel_H264_Baseline_AutoLevel;
+ }
+
+ case webrtc::H264Profile::kProfileMain:
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return kVTProfileLevel_H264_Main_3_0;
+ case webrtc::H264Level::kLevel3_1:
+ return kVTProfileLevel_H264_Main_3_1;
+ case webrtc::H264Level::kLevel3_2:
+ return kVTProfileLevel_H264_Main_3_2;
+ case webrtc::H264Level::kLevel4:
+ return kVTProfileLevel_H264_Main_4_0;
+ case webrtc::H264Level::kLevel4_1:
+ return kVTProfileLevel_H264_Main_4_1;
+ case webrtc::H264Level::kLevel4_2:
+ return kVTProfileLevel_H264_Main_4_2;
+ case webrtc::H264Level::kLevel5:
+ return kVTProfileLevel_H264_Main_5_0;
+ case webrtc::H264Level::kLevel5_1:
+ return kVTProfileLevel_H264_Main_5_1;
+ case webrtc::H264Level::kLevel5_2:
+ return kVTProfileLevel_H264_Main_5_2;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ return kVTProfileLevel_H264_Main_AutoLevel;
+ }
+
+ case webrtc::H264Profile::kProfileConstrainedHigh:
+ case webrtc::H264Profile::kProfileHigh:
+ case webrtc::H264Profile::kProfilePredictiveHigh444:
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return kVTProfileLevel_H264_High_3_0;
+ case webrtc::H264Level::kLevel3_1:
+ return kVTProfileLevel_H264_High_3_1;
+ case webrtc::H264Level::kLevel3_2:
+ return kVTProfileLevel_H264_High_3_2;
+ case webrtc::H264Level::kLevel4:
+ return kVTProfileLevel_H264_High_4_0;
+ case webrtc::H264Level::kLevel4_1:
+ return kVTProfileLevel_H264_High_4_1;
+ case webrtc::H264Level::kLevel4_2:
+ return kVTProfileLevel_H264_High_4_2;
+ case webrtc::H264Level::kLevel5:
+ return kVTProfileLevel_H264_High_5_0;
+ case webrtc::H264Level::kLevel5_1:
+ return kVTProfileLevel_H264_High_5_1;
+ case webrtc::H264Level::kLevel5_2:
+ return kVTProfileLevel_H264_High_5_2;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ return kVTProfileLevel_H264_High_AutoLevel;
+ }
+ }
+}
+
+// The function returns the max allowed sample rate (pixels per second) that
+// can be processed by given encoder with `profile_level_id`.
+// See https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-201610-S!!PDF-E&type=items
+// for details.
+NSUInteger GetMaxSampleRate(const webrtc::H264ProfileLevelId &profile_level_id) {
+ switch (profile_level_id.level) {
+ case webrtc::H264Level::kLevel3:
+ return 10368000;
+ case webrtc::H264Level::kLevel3_1:
+ return 27648000;
+ case webrtc::H264Level::kLevel3_2:
+ return 55296000;
+ case webrtc::H264Level::kLevel4:
+ case webrtc::H264Level::kLevel4_1:
+ return 62914560;
+ case webrtc::H264Level::kLevel4_2:
+ return 133693440;
+ case webrtc::H264Level::kLevel5:
+ return 150994944;
+ case webrtc::H264Level::kLevel5_1:
+ return 251658240;
+ case webrtc::H264Level::kLevel5_2:
+ return 530841600;
+ case webrtc::H264Level::kLevel1:
+ case webrtc::H264Level::kLevel1_b:
+ case webrtc::H264Level::kLevel1_1:
+ case webrtc::H264Level::kLevel1_2:
+ case webrtc::H264Level::kLevel1_3:
+ case webrtc::H264Level::kLevel2:
+ case webrtc::H264Level::kLevel2_1:
+ case webrtc::H264Level::kLevel2_2:
+ // Zero means auto rate setting.
+ return 0;
+ }
+}
+} // namespace
+
+@implementation RTC_OBJC_TYPE (RTCVideoEncoderH264) {
+ RTC_OBJC_TYPE(RTCVideoCodecInfo) * _codecInfo;
+ std::unique_ptr<webrtc::BitrateAdjuster> _bitrateAdjuster;
+ uint32_t _targetBitrateBps;
+ uint32_t _encoderBitrateBps;
+ uint32_t _encoderFrameRate;
+ uint32_t _maxAllowedFrameRate;
+ RTCH264PacketizationMode _packetizationMode;
+ absl::optional<webrtc::H264ProfileLevelId> _profile_level_id;
+ RTCVideoEncoderCallback _callback;
+ int32_t _width;
+ int32_t _height;
+ VTCompressionSessionRef _compressionSession;
+ CVPixelBufferPoolRef _pixelBufferPool;
+ RTCVideoCodecMode _mode;
+
+ webrtc::H264BitstreamParser _h264BitstreamParser;
+ std::vector<uint8_t> _frameScaleBuffer;
+}
+
+// .5 is set as a mininum to prevent overcompensating for large temporary
+// overshoots. We don't want to degrade video quality too badly.
+// .95 is set to prevent oscillations. When a lower bitrate is set on the
+// encoder than previously set, its output seems to have a brief period of
+// drastically reduced bitrate, so we want to avoid that. In steady state
+// conditions, 0.95 seems to give us better overall bitrate over long periods
+// of time.
+- (instancetype)initWithCodecInfo:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)codecInfo {
+ if (self = [super init]) {
+ _codecInfo = codecInfo;
+ _bitrateAdjuster.reset(new webrtc::BitrateAdjuster(.5, .95));
+ _packetizationMode = RTCH264PacketizationModeNonInterleaved;
+ _profile_level_id =
+ webrtc::ParseSdpForH264ProfileLevelId([codecInfo nativeSdpVideoFormat].parameters);
+ RTC_DCHECK(_profile_level_id);
+ RTC_LOG(LS_INFO) << "Using profile " << CFStringToString(ExtractProfile(*_profile_level_id));
+ RTC_CHECK([codecInfo.name isEqualToString:kRTCVideoCodecH264Name]);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [self destroyCompressionSession];
+}
+
+- (NSInteger)startEncodeWithSettings:(RTC_OBJC_TYPE(RTCVideoEncoderSettings) *)settings
+ numberOfCores:(int)numberOfCores {
+ RTC_DCHECK(settings);
+ RTC_DCHECK([settings.name isEqualToString:kRTCVideoCodecH264Name]);
+
+ _width = settings.width;
+ _height = settings.height;
+ _mode = settings.mode;
+
+ uint32_t aligned_width = (((_width + 15) >> 4) << 4);
+ uint32_t aligned_height = (((_height + 15) >> 4) << 4);
+ _maxAllowedFrameRate = static_cast<uint32_t>(GetMaxSampleRate(*_profile_level_id) /
+ (aligned_width * aligned_height));
+
+ // We can only set average bitrate on the HW encoder.
+ _targetBitrateBps = settings.startBitrate * 1000; // startBitrate is in kbps.
+ _bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
+ _encoderFrameRate = MIN(settings.maxFramerate, _maxAllowedFrameRate);
+ if (settings.maxFramerate > _maxAllowedFrameRate && _maxAllowedFrameRate > 0) {
+ RTC_LOG(LS_WARNING) << "Initial encoder frame rate setting " << settings.maxFramerate
+ << " is larger than the "
+ << "maximal allowed frame rate " << _maxAllowedFrameRate << ".";
+ }
+
+ // TODO(tkchin): Try setting payload size via
+ // kVTCompressionPropertyKey_MaxH264SliceBytes.
+
+ return [self resetCompressionSessionWithPixelFormat:kNV12PixelFormat];
+}
+
+- (NSInteger)encode:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame
+ codecSpecificInfo:(nullable id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)codecSpecificInfo
+ frameTypes:(NSArray<NSNumber *> *)frameTypes {
+ if (!_callback || !_compressionSession) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ BOOL isKeyframeRequired = NO;
+
+ // Get a pixel buffer from the pool and copy frame data over.
+ if ([self resetCompressionSessionIfNeededWithFrame:frame]) {
+ isKeyframeRequired = YES;
+ }
+
+ CVPixelBufferRef pixelBuffer = nullptr;
+ if ([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
+ // Native frame buffer
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
+ (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ if (![rtcPixelBuffer requiresCropping]) {
+ // This pixel buffer might have a higher resolution than what the
+ // compression session is configured to. The compression session can
+ // handle that and will output encoded frames in the configured
+ // resolution regardless of the input pixel buffer resolution.
+ pixelBuffer = rtcPixelBuffer.pixelBuffer;
+ CVBufferRetain(pixelBuffer);
+ } else {
+ // Cropping required, we need to crop and scale to a new pixel buffer.
+ pixelBuffer = CreatePixelBuffer(_pixelBufferPool);
+ if (!pixelBuffer) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ int dstWidth = CVPixelBufferGetWidth(pixelBuffer);
+ int dstHeight = CVPixelBufferGetHeight(pixelBuffer);
+ if ([rtcPixelBuffer requiresScalingToWidth:dstWidth height:dstHeight]) {
+ int size =
+ [rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:dstWidth height:dstHeight];
+ _frameScaleBuffer.resize(size);
+ } else {
+ _frameScaleBuffer.clear();
+ }
+ _frameScaleBuffer.shrink_to_fit();
+ if (![rtcPixelBuffer cropAndScaleTo:pixelBuffer withTempBuffer:_frameScaleBuffer.data()]) {
+ CVBufferRelease(pixelBuffer);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+ }
+
+ if (!pixelBuffer) {
+ // We did not have a native frame buffer
+ RTC_DCHECK_EQ(frame.width, _width);
+ RTC_DCHECK_EQ(frame.height, _height);
+ pixelBuffer = CreatePixelBuffer(_pixelBufferPool);
+ if (!pixelBuffer) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(pixelBuffer);
+ if (!CopyVideoFrameToNV12PixelBuffer([frame.buffer toI420], pixelBuffer)) {
+ RTC_LOG(LS_ERROR) << "Failed to copy frame data.";
+ CVBufferRelease(pixelBuffer);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ // Check if we need a keyframe.
+ if (!isKeyframeRequired && frameTypes) {
+ for (NSNumber *frameType in frameTypes) {
+ if ((RTCFrameType)frameType.intValue == RTCFrameTypeVideoFrameKey) {
+ isKeyframeRequired = YES;
+ break;
+ }
+ }
+ }
+
+ CMTime presentationTimeStamp = CMTimeMake(frame.timeStampNs / rtc::kNumNanosecsPerMillisec, 1000);
+ CFDictionaryRef frameProperties = nullptr;
+ if (isKeyframeRequired) {
+ CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
+ CFTypeRef values[] = {kCFBooleanTrue};
+ frameProperties = CreateCFTypeDictionary(keys, values, 1);
+ }
+
+ std::unique_ptr<RTCFrameEncodeParams> encodeParams;
+ encodeParams.reset(new RTCFrameEncodeParams(self,
+ codecSpecificInfo,
+ _width,
+ _height,
+ frame.timeStampNs / rtc::kNumNanosecsPerMillisec,
+ frame.timeStamp,
+ frame.rotation));
+ encodeParams->codecSpecificInfo.packetizationMode = _packetizationMode;
+
+ // Update the bitrate if needed.
+ [self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps() frameRate:_encoderFrameRate];
+
+ OSStatus status = VTCompressionSessionEncodeFrame(_compressionSession,
+ pixelBuffer,
+ presentationTimeStamp,
+ kCMTimeInvalid,
+ frameProperties,
+ encodeParams.release(),
+ nullptr);
+ if (frameProperties) {
+ CFRelease(frameProperties);
+ }
+ if (pixelBuffer) {
+ CVBufferRelease(pixelBuffer);
+ }
+
+ if (status == kVTInvalidSessionErr) {
+ // This error occurs when entering foreground after backgrounding the app.
+ RTC_LOG(LS_ERROR) << "Invalid compression session, resetting.";
+ [self resetCompressionSessionWithPixelFormat:[self pixelFormatOfFrame:frame]];
+
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ } else if (status == kVTVideoEncoderMalfunctionErr) {
+ // Sometimes the encoder malfunctions and needs to be restarted.
+ RTC_LOG(LS_ERROR)
+ << "Encountered video encoder malfunction error. Resetting compression session.";
+ [self resetCompressionSessionWithPixelFormat:[self pixelFormatOfFrame:frame]];
+
+ return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
+ } else if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)setCallback:(RTCVideoEncoderCallback)callback {
+ _callback = callback;
+}
+
+- (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate {
+ _targetBitrateBps = 1000 * bitrateKbit;
+ _bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
+ if (framerate > _maxAllowedFrameRate && _maxAllowedFrameRate > 0) {
+ RTC_LOG(LS_WARNING) << "Encoder frame rate setting " << framerate << " is larger than the "
+ << "maximal allowed frame rate " << _maxAllowedFrameRate << ".";
+ }
+ framerate = MIN(framerate, _maxAllowedFrameRate);
+ [self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps() frameRate:framerate];
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (NSInteger)resolutionAlignment {
+ return 1;
+}
+
+- (BOOL)applyAlignmentToAllSimulcastLayers {
+ return NO;
+}
+
+- (BOOL)supportsNativeHandle {
+ return YES;
+}
+
+#pragma mark - Private
+
+- (NSInteger)releaseEncoder {
+ // Need to destroy so that the session is invalidated and won't use the
+ // callback anymore. Do not remove callback until the session is invalidated
+ // since async encoder callbacks can occur until invalidation.
+ [self destroyCompressionSession];
+ _callback = nullptr;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (OSType)pixelFormatOfFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ // Use NV12 for non-native frames.
+ if ([frame.buffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
+ RTC_OBJC_TYPE(RTCCVPixelBuffer) *rtcPixelBuffer =
+ (RTC_OBJC_TYPE(RTCCVPixelBuffer) *)frame.buffer;
+ return CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer);
+ }
+
+ return kNV12PixelFormat;
+}
+
+- (BOOL)resetCompressionSessionIfNeededWithFrame:(RTC_OBJC_TYPE(RTCVideoFrame) *)frame {
+ BOOL resetCompressionSession = NO;
+
+ // If we're capturing native frames in another pixel format than the compression session is
+ // configured with, make sure the compression session is reset using the correct pixel format.
+ OSType framePixelFormat = [self pixelFormatOfFrame:frame];
+
+ if (_compressionSession) {
+ // The pool attribute `kCVPixelBufferPixelFormatTypeKey` can contain either an array of pixel
+ // formats or a single pixel format.
+ NSDictionary *poolAttributes =
+ (__bridge NSDictionary *)CVPixelBufferPoolGetPixelBufferAttributes(_pixelBufferPool);
+ id pixelFormats =
+ [poolAttributes objectForKey:(__bridge NSString *)kCVPixelBufferPixelFormatTypeKey];
+ NSArray<NSNumber *> *compressionSessionPixelFormats = nil;
+ if ([pixelFormats isKindOfClass:[NSArray class]]) {
+ compressionSessionPixelFormats = (NSArray *)pixelFormats;
+ } else if ([pixelFormats isKindOfClass:[NSNumber class]]) {
+ compressionSessionPixelFormats = @[ (NSNumber *)pixelFormats ];
+ }
+
+ if (![compressionSessionPixelFormats
+ containsObject:[NSNumber numberWithLong:framePixelFormat]]) {
+ resetCompressionSession = YES;
+ RTC_LOG(LS_INFO) << "Resetting compression session due to non-matching pixel format.";
+ }
+ } else {
+ resetCompressionSession = YES;
+ }
+
+ if (resetCompressionSession) {
+ [self resetCompressionSessionWithPixelFormat:framePixelFormat];
+ }
+ return resetCompressionSession;
+}
+
+- (int)resetCompressionSessionWithPixelFormat:(OSType)framePixelFormat {
+ [self destroyCompressionSession];
+
+ // Set source image buffer attributes. These attributes will be present on
+ // buffers retrieved from the encoder's pixel buffer pool.
+ NSDictionary *sourceAttributes = @{
+#if defined(WEBRTC_IOS) && (TARGET_OS_MACCATALYST || TARGET_OS_SIMULATOR)
+ (NSString *)kCVPixelBufferMetalCompatibilityKey : @(YES),
+#elif defined(WEBRTC_IOS)
+ (NSString *)kCVPixelBufferOpenGLESCompatibilityKey : @(YES),
+#elif defined(WEBRTC_MAC) && !defined(WEBRTC_ARCH_ARM64)
+ (NSString *)kCVPixelBufferOpenGLCompatibilityKey : @(YES),
+#endif
+ (NSString *)kCVPixelBufferIOSurfacePropertiesKey : @{},
+ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(framePixelFormat),
+ };
+
+ NSDictionary *encoder_specs;
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ // Currently hw accl is supported above 360p on mac, below 360p
+ // the compression session will be created with hw accl disabled.
+ encoder_specs = @{
+ (NSString *)kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder : @(YES),
+ };
+
+#endif
+ OSStatus status = VTCompressionSessionCreate(
+ nullptr, // use default allocator
+ _width,
+ _height,
+ kCMVideoCodecType_H264,
+ (__bridge CFDictionaryRef)encoder_specs, // use hardware accelerated encoder if available
+ (__bridge CFDictionaryRef)sourceAttributes,
+ nullptr, // use default compressed data allocator
+ compressionOutputCallback,
+ nullptr,
+ &_compressionSession);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create compression session: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+ CFBooleanRef hwaccl_enabled = nullptr;
+ status = VTSessionCopyProperty(_compressionSession,
+ kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder,
+ nullptr,
+ &hwaccl_enabled);
+ if (status == noErr && (CFBooleanGetValue(hwaccl_enabled))) {
+ RTC_LOG(LS_INFO) << "Compression session created with hw accl enabled";
+ } else {
+ RTC_LOG(LS_INFO) << "Compression session created with hw accl disabled";
+ }
+#endif
+ [self configureCompressionSession];
+
+ // The pixel buffer pool is dependent on the compression session so if the session is reset, the
+ // pool should be reset as well.
+ _pixelBufferPool = VTCompressionSessionGetPixelBufferPool(_compressionSession);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+- (void)configureCompressionSession {
+ RTC_DCHECK(_compressionSession);
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_RealTime, true);
+ SetVTSessionProperty(_compressionSession,
+ kVTCompressionPropertyKey_ProfileLevel,
+ ExtractProfile(*_profile_level_id));
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AllowFrameReordering, false);
+ [self setEncoderBitrateBps:_targetBitrateBps frameRate:_encoderFrameRate];
+ // TODO(tkchin): Look at entropy mode and colorspace matrices.
+ // TODO(tkchin): Investigate to see if there's any way to make this work.
+ // May need it to interop with Android. Currently this call just fails.
+ // On inspecting encoder output on iOS8, this value is set to 6.
+ // internal::SetVTSessionProperty(compression_session_,
+ // kVTCompressionPropertyKey_MaxFrameDelayCount,
+ // 1);
+
+ // Set a relatively large value for keyframe emission (7200 frames or 4 minutes).
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
+ SetVTSessionProperty(
+ _compressionSession, kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
+}
+
+- (void)destroyCompressionSession {
+ if (_compressionSession) {
+ VTCompressionSessionInvalidate(_compressionSession);
+ CFRelease(_compressionSession);
+ _compressionSession = nullptr;
+ _pixelBufferPool = nullptr;
+ }
+}
+
+- (NSString *)implementationName {
+ return @"VideoToolbox";
+}
+
+- (void)setBitrateBps:(uint32_t)bitrateBps frameRate:(uint32_t)frameRate {
+ if (_encoderBitrateBps != bitrateBps || _encoderFrameRate != frameRate) {
+ [self setEncoderBitrateBps:bitrateBps frameRate:frameRate];
+ }
+}
+
+- (void)setEncoderBitrateBps:(uint32_t)bitrateBps frameRate:(uint32_t)frameRate {
+ if (_compressionSession) {
+ SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
+
+ // With zero `_maxAllowedFrameRate`, we fall back to automatic frame rate detection.
+ if (_maxAllowedFrameRate > 0) {
+ SetVTSessionProperty(
+ _compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, frameRate);
+ }
+
+ // TODO(tkchin): Add a helper method to set array value.
+ int64_t dataLimitBytesPerSecondValue =
+ static_cast<int64_t>(bitrateBps * kLimitToAverageBitRateFactor / 8);
+ CFNumberRef bytesPerSecond =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &dataLimitBytesPerSecondValue);
+ int64_t oneSecondValue = 1;
+ CFNumberRef oneSecond =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &oneSecondValue);
+ const void *nums[2] = {bytesPerSecond, oneSecond};
+ CFArrayRef dataRateLimits = CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks);
+ OSStatus status = VTSessionSetProperty(
+ _compressionSession, kVTCompressionPropertyKey_DataRateLimits, dataRateLimits);
+ if (bytesPerSecond) {
+ CFRelease(bytesPerSecond);
+ }
+ if (oneSecond) {
+ CFRelease(oneSecond);
+ }
+ if (dataRateLimits) {
+ CFRelease(dataRateLimits);
+ }
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to set data rate limit with code: " << status;
+ }
+
+ _encoderBitrateBps = bitrateBps;
+ _encoderFrameRate = frameRate;
+ }
+}
+
+- (void)frameWasEncoded:(OSStatus)status
+ flags:(VTEncodeInfoFlags)infoFlags
+ sampleBuffer:(CMSampleBufferRef)sampleBuffer
+ codecSpecificInfo:(id<RTC_OBJC_TYPE(RTCCodecSpecificInfo)>)codecSpecificInfo
+ width:(int32_t)width
+ height:(int32_t)height
+ renderTimeMs:(int64_t)renderTimeMs
+ timestamp:(uint32_t)timestamp
+ rotation:(RTCVideoRotation)rotation {
+ RTCVideoEncoderCallback callback = _callback;
+ if (!callback) {
+ return;
+ }
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "H264 encode failed with code: " << status;
+ return;
+ }
+ if (infoFlags & kVTEncodeInfo_FrameDropped) {
+ RTC_LOG(LS_INFO) << "H264 encode dropped frame.";
+ return;
+ }
+
+ BOOL isKeyframe = NO;
+ CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, 0);
+ if (attachments != nullptr && CFArrayGetCount(attachments)) {
+ CFDictionaryRef attachment =
+ static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
+ isKeyframe = !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
+ }
+
+ if (isKeyframe) {
+ RTC_LOG(LS_INFO) << "Generated keyframe";
+ }
+
+ __block std::unique_ptr<rtc::Buffer> buffer = std::make_unique<rtc::Buffer>();
+ if (!webrtc::H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get())) {
+ return;
+ }
+
+ RTC_OBJC_TYPE(RTCEncodedImage) *frame = [[RTC_OBJC_TYPE(RTCEncodedImage) alloc] init];
+ // This assumes ownership of `buffer` and is responsible for freeing it when done.
+ frame.buffer = [[NSData alloc] initWithBytesNoCopy:buffer->data()
+ length:buffer->size()
+ deallocator:^(void *bytes, NSUInteger size) {
+ buffer.reset();
+ }];
+ frame.encodedWidth = width;
+ frame.encodedHeight = height;
+ frame.frameType = isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta;
+ frame.captureTimeMs = renderTimeMs;
+ frame.timeStamp = timestamp;
+ frame.rotation = rotation;
+ frame.contentType = (_mode == RTCVideoCodecModeScreensharing) ? RTCVideoContentTypeScreenshare :
+ RTCVideoContentTypeUnspecified;
+ frame.flags = webrtc::VideoSendTiming::kInvalid;
+
+ _h264BitstreamParser.ParseBitstream(*buffer);
+ frame.qp = @(_h264BitstreamParser.GetLastSliceQp().value_or(-1));
+
+ BOOL res = callback(frame, codecSpecificInfo);
+ if (!res) {
+ RTC_LOG(LS_ERROR) << "Encode callback failed";
+ return;
+ }
+ _bitrateAdjuster->Update(frame.buffer.length);
+}
+
+- (nullable RTC_OBJC_TYPE(RTCVideoEncoderQpThresholds) *)scalingSettings {
+ return [[RTC_OBJC_TYPE(RTCVideoEncoderQpThresholds) alloc]
+ initWithThresholdsLow:kLowH264QpThreshold
+ high:kHighH264QpThreshold];
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h
new file mode 100644
index 0000000000..a51debb9fa
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <UIKit/UIKit.h>
+
+#include "api/video_codecs/h264_profile_level_id.h"
+
+@interface UIDevice (H264Profile)
+
++ (absl::optional<webrtc::H264ProfileLevelId>)maxSupportedH264Profile;
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm
new file mode 100644
index 0000000000..0ef6a8d77c
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/UIDevice+H264Profile.mm
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "UIDevice+H264Profile.h"
+#import "helpers/UIDevice+RTCDevice.h"
+
+#include <algorithm>
+
+namespace {
+
+using namespace webrtc;
+
+struct SupportedH264Profile {
+ const RTCDeviceType deviceType;
+ const H264ProfileLevelId profile;
+};
+
+constexpr SupportedH264Profile kH264MaxSupportedProfiles[] = {
+ // iPhones with at least iOS 9
+ {RTCDeviceTypeIPhone13ProMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP848
+ {RTCDeviceTypeIPhone13Pro,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP852
+ {RTCDeviceTypeIPhone13,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP851
+ {RTCDeviceTypeIPhone13Mini,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP847
+ {RTCDeviceTypeIPhoneSE2Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP820
+ {RTCDeviceTypeIPhone12ProMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP832
+ {RTCDeviceTypeIPhone12Pro,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP831
+ {RTCDeviceTypeIPhone12,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP830
+ {RTCDeviceTypeIPhone12Mini,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP829
+ {RTCDeviceTypeIPhone11ProMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP806
+ {RTCDeviceTypeIPhone11Pro,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP805
+ {RTCDeviceTypeIPhone11,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP804
+ {RTCDeviceTypeIPhoneXS,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP779
+ {RTCDeviceTypeIPhoneXSMax,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP780
+ {RTCDeviceTypeIPhoneXR,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP781
+ {RTCDeviceTypeIPhoneX,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP770
+ {RTCDeviceTypeIPhone8,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP767
+ {RTCDeviceTypeIPhone8Plus,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP768
+ {RTCDeviceTypeIPhone7,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_1}}, // https://support.apple.com/kb/SP743
+ {RTCDeviceTypeIPhone7Plus,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_1}}, // https://support.apple.com/kb/SP744
+ {RTCDeviceTypeIPhoneSE,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP738
+ {RTCDeviceTypeIPhone6S,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP726
+ {RTCDeviceTypeIPhone6SPlus,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP727
+ {RTCDeviceTypeIPhone6,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP705
+ {RTCDeviceTypeIPhone6Plus,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP706
+ {RTCDeviceTypeIPhone5SGSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP685
+ {RTCDeviceTypeIPhone5SGSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP685
+ {RTCDeviceTypeIPhone5GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP655
+ {RTCDeviceTypeIPhone5GSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP655
+ {RTCDeviceTypeIPhone5CGSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP684
+ {RTCDeviceTypeIPhone5CGSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP684
+ {RTCDeviceTypeIPhone4S,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP643
+
+ // iPods with at least iOS 9
+ {RTCDeviceTypeIPodTouch7G,
+ {H264Profile::kProfileMain, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP796
+ {RTCDeviceTypeIPodTouch6G,
+ {H264Profile::kProfileMain, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP720
+ {RTCDeviceTypeIPodTouch5G,
+ {H264Profile::kProfileMain, H264Level::kLevel3_1}}, // https://support.apple.com/kb/SP657
+
+ // iPads with at least iOS 9
+ {RTCDeviceTypeIPadAir4Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP828
+ {RTCDeviceTypeIPad8,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP822
+ {RTCDeviceTypeIPadPro4Gen12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP815
+ {RTCDeviceTypeIPadPro4Gen11Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP814
+ {RTCDeviceTypeIPadAir3Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP787
+ {RTCDeviceTypeIPadMini5Gen,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP788
+ {RTCDeviceTypeIPadPro3Gen12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP785
+ {RTCDeviceTypeIPadPro3Gen11Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP784
+ {RTCDeviceTypeIPad7Gen10Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP807
+ {RTCDeviceTypeIPad2Wifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPad2GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPad2CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPad2Wifi2,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622
+ {RTCDeviceTypeIPadMiniWifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661
+ {RTCDeviceTypeIPadMiniGSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661
+ {RTCDeviceTypeIPadMiniGSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661
+ {RTCDeviceTypeIPad3Wifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647
+ {RTCDeviceTypeIPad3GSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647
+ {RTCDeviceTypeIPad3GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647
+ {RTCDeviceTypeIPad4Wifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662
+ {RTCDeviceTypeIPad4GSM,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662
+ {RTCDeviceTypeIPad4GSM_CDMA,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662
+ {RTCDeviceTypeIPad5,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP751
+ {RTCDeviceTypeIPad6,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP774
+ {RTCDeviceTypeIPadAirWifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692
+ {RTCDeviceTypeIPadAirCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692
+ {RTCDeviceTypeIPadAirWifiCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692
+ {RTCDeviceTypeIPadAir2,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP708
+ {RTCDeviceTypeIPadMini2GWifi,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693
+ {RTCDeviceTypeIPadMini2GCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693
+ {RTCDeviceTypeIPadMini2GWifiCellular,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693
+ {RTCDeviceTypeIPadMini3,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP709
+ {RTCDeviceTypeIPadMini4,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP725
+ {RTCDeviceTypeIPadPro9Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP739
+ {RTCDeviceTypeIPadPro12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/sp723
+ {RTCDeviceTypeIPadPro12Inch2,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP761
+ {RTCDeviceTypeIPadPro10Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP762
+ {RTCDeviceTypeIPadMini6,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP850
+ {RTCDeviceTypeIPad9,
+ {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP849
+ {RTCDeviceTypeIPadPro5Gen12Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP844
+ {RTCDeviceTypeIPadPro5Gen11Inch,
+ {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP843
+};
+
+absl::optional<H264ProfileLevelId> FindMaxSupportedProfileForDevice(RTCDeviceType deviceType) {
+ const auto* result = std::find_if(std::begin(kH264MaxSupportedProfiles),
+ std::end(kH264MaxSupportedProfiles),
+ [deviceType](const SupportedH264Profile& supportedProfile) {
+ return supportedProfile.deviceType == deviceType;
+ });
+ if (result != std::end(kH264MaxSupportedProfiles)) {
+ return result->profile;
+ }
+ return absl::nullopt;
+}
+
+} // namespace
+
+@implementation UIDevice (H264Profile)
+
++ (absl::optional<webrtc::H264ProfileLevelId>)maxSupportedH264Profile {
+ return FindMaxSupportedProfileForDevice([self deviceType]);
+}
+
+@end
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc
new file mode 100644
index 0000000000..ac957f1b49
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "helpers.h"
+
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// Copies characters from a CFStringRef into a std::string.
+std::string CFStringToString(const CFStringRef cf_string) {
+ RTC_DCHECK(cf_string);
+ std::string std_string;
+ // Get the size needed for UTF8 plus terminating character.
+ size_t buffer_size =
+ CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string),
+ kCFStringEncodingUTF8) +
+ 1;
+ std::unique_ptr<char[]> buffer(new char[buffer_size]);
+ if (CFStringGetCString(cf_string, buffer.get(), buffer_size,
+ kCFStringEncodingUTF8)) {
+ // Copy over the characters.
+ std_string.assign(buffer.get());
+ }
+ return std_string;
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ int32_t value) {
+ CFNumberRef cfNum =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value);
+ OSStatus status = VTSessionSetProperty(session, key, cfNum);
+ CFRelease(cfNum);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ uint32_t value) {
+ int64_t value_64 = value;
+ CFNumberRef cfNum =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &value_64);
+ OSStatus status = VTSessionSetProperty(session, key, cfNum);
+ CFRelease(cfNum);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) {
+ CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse;
+ OSStatus status = VTSessionSetProperty(session, key, cf_bool);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ CFStringRef value) {
+ OSStatus status = VTSessionSetProperty(session, key, value);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ std::string val_string = CFStringToString(value);
+ RTC_LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << val_string << ": " << status;
+ }
+}
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h
new file mode 100644
index 0000000000..7c9ef1cd87
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/helpers.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
+#define SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <VideoToolbox/VideoToolbox.h>
+#include <string>
+
+// Convenience function for creating a dictionary.
+inline CFDictionaryRef CreateCFTypeDictionary(CFTypeRef* keys,
+ CFTypeRef* values,
+ size_t size) {
+ return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+// Copies characters from a CFStringRef into a std::string.
+std::string CFStringToString(CFStringRef cf_string);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, int32_t value);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ uint32_t value);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value);
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ CFStringRef value);
+
+#endif // SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc
new file mode 100644
index 0000000000..b7330e1f9c
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.cc
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "sdk/objc/components/video_codec/nalu_rewriter.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <memory>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+using H264::kAud;
+using H264::kSps;
+using H264::NaluIndex;
+using H264::NaluType;
+using H264::ParseNaluType;
+
+const char kAnnexBHeaderBytes[4] = {0, 0, 0, 1};
+const size_t kAvccHeaderByteSize = sizeof(uint32_t);
+
+bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer) {
+ RTC_DCHECK(avcc_sample_buffer);
+
+ // Get format description from the sample buffer.
+ CMVideoFormatDescriptionRef description =
+ CMSampleBufferGetFormatDescription(avcc_sample_buffer);
+ if (description == nullptr) {
+ RTC_LOG(LS_ERROR) << "Failed to get sample buffer's description.";
+ return false;
+ }
+
+ // Get parameter set information.
+ int nalu_header_size = 0;
+ size_t param_set_count = 0;
+ OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, 0, nullptr, nullptr, &param_set_count, &nalu_header_size);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ RTC_CHECK_EQ(nalu_header_size, kAvccHeaderByteSize);
+ RTC_DCHECK_EQ(param_set_count, 2);
+
+ // Truncate any previous data in the buffer without changing its capacity.
+ annexb_buffer->SetSize(0);
+
+ // Place all parameter sets at the front of buffer.
+ if (is_keyframe) {
+ size_t param_set_size = 0;
+ const uint8_t* param_set = nullptr;
+ for (size_t i = 0; i < param_set_count; ++i) {
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, i, &param_set, &param_set_size, nullptr, nullptr);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(reinterpret_cast<const char*>(param_set),
+ param_set_size);
+ }
+ }
+
+ // Get block buffer from the sample buffer.
+ CMBlockBufferRef block_buffer =
+ CMSampleBufferGetDataBuffer(avcc_sample_buffer);
+ if (block_buffer == nullptr) {
+ RTC_LOG(LS_ERROR) << "Failed to get sample buffer's block buffer.";
+ return false;
+ }
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ // Make sure block buffer is contiguous.
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(
+ nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ // Retain to make cleanup easier.
+ CFRetain(contiguous_buffer);
+ block_buffer = nullptr;
+ }
+
+ // Now copy the actual data.
+ char* data_ptr = nullptr;
+ size_t block_buffer_size = CMBlockBufferGetDataLength(contiguous_buffer);
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr, nullptr,
+ &data_ptr);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get block buffer data.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ size_t bytes_remaining = block_buffer_size;
+ while (bytes_remaining > 0) {
+ // The size type here must match `nalu_header_size`, we expect 4 bytes.
+ // Read the length of the next packet of data. Must convert from big endian
+ // to host endian.
+ RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
+ uint32_t* uint32_data_ptr = reinterpret_cast<uint32_t*>(data_ptr);
+ uint32_t packet_size = CFSwapInt32BigToHost(*uint32_data_ptr);
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(data_ptr + nalu_header_size, packet_size);
+
+ size_t bytes_written = packet_size + sizeof(kAnnexBHeaderBytes);
+ bytes_remaining -= bytes_written;
+ data_ptr += bytes_written;
+ }
+ RTC_DCHECK_EQ(bytes_remaining, (size_t)0);
+
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer,
+ CMMemoryPoolRef memory_pool) {
+ RTC_DCHECK(annexb_buffer);
+ RTC_DCHECK(out_sample_buffer);
+ RTC_DCHECK(video_format);
+ *out_sample_buffer = nullptr;
+
+ AnnexBBufferReader reader(annexb_buffer, annexb_buffer_size);
+ if (reader.SeekToNextNaluOfType(kSps)) {
+ // Buffer contains an SPS NALU - skip it and the following PPS
+ const uint8_t* data;
+ size_t data_len;
+ if (!reader.ReadNalu(&data, &data_len)) {
+ RTC_LOG(LS_ERROR) << "Failed to read SPS";
+ return false;
+ }
+ if (!reader.ReadNalu(&data, &data_len)) {
+ RTC_LOG(LS_ERROR) << "Failed to read PPS";
+ return false;
+ }
+ } else {
+ // No SPS NALU - start reading from the first NALU in the buffer
+ reader.SeekToStart();
+ }
+
+ // Allocate memory as a block buffer.
+ CMBlockBufferRef block_buffer = nullptr;
+ CFAllocatorRef block_allocator = CMMemoryPoolGetAllocator(memory_pool);
+ OSStatus status = CMBlockBufferCreateWithMemoryBlock(
+ kCFAllocatorDefault, nullptr, reader.BytesRemaining(), block_allocator,
+ nullptr, 0, reader.BytesRemaining(), kCMBlockBufferAssureMemoryNowFlag,
+ &block_buffer);
+ if (status != kCMBlockBufferNoErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create block buffer.";
+ return false;
+ }
+
+ // Make sure block buffer is contiguous.
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(kCFAllocatorDefault, block_buffer,
+ block_allocator, nullptr, 0, 0, 0,
+ &contiguous_buffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ CFRelease(block_buffer);
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ block_buffer = nullptr;
+ }
+
+ // Get a raw pointer into allocated memory.
+ size_t block_buffer_size = 0;
+ char* data_ptr = nullptr;
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr,
+ &block_buffer_size, &data_ptr);
+ if (status != kCMBlockBufferNoErr) {
+ RTC_LOG(LS_ERROR) << "Failed to get block buffer data pointer.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ RTC_DCHECK(block_buffer_size == reader.BytesRemaining());
+
+ // Write Avcc NALUs into block buffer memory.
+ AvccBufferWriter writer(reinterpret_cast<uint8_t*>(data_ptr),
+ block_buffer_size);
+ while (reader.BytesRemaining() > 0) {
+ const uint8_t* nalu_data_ptr = nullptr;
+ size_t nalu_data_size = 0;
+ if (reader.ReadNalu(&nalu_data_ptr, &nalu_data_size)) {
+ writer.WriteNalu(nalu_data_ptr, nalu_data_size);
+ }
+ }
+
+ // Create sample buffer.
+ status = CMSampleBufferCreate(kCFAllocatorDefault, contiguous_buffer, true,
+ nullptr, nullptr, video_format, 1, 0, nullptr,
+ 0, nullptr, out_sample_buffer);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create sample buffer.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+CMVideoFormatDescriptionRef CreateVideoFormatDescription(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size) {
+ const uint8_t* param_set_ptrs[2] = {};
+ size_t param_set_sizes[2] = {};
+ AnnexBBufferReader reader(annexb_buffer, annexb_buffer_size);
+ // Skip everyting before the SPS, then read the SPS and PPS
+ if (!reader.SeekToNextNaluOfType(kSps)) {
+ return nullptr;
+ }
+ if (!reader.ReadNalu(&param_set_ptrs[0], &param_set_sizes[0])) {
+ RTC_LOG(LS_ERROR) << "Failed to read SPS";
+ return nullptr;
+ }
+ if (!reader.ReadNalu(&param_set_ptrs[1], &param_set_sizes[1])) {
+ RTC_LOG(LS_ERROR) << "Failed to read PPS";
+ return nullptr;
+ }
+
+ // Parse the SPS and PPS into a CMVideoFormatDescription.
+ CMVideoFormatDescriptionRef description = nullptr;
+ OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
+ kCFAllocatorDefault, 2, param_set_ptrs, param_set_sizes, 4, &description);
+ if (status != noErr) {
+ RTC_LOG(LS_ERROR) << "Failed to create video format description.";
+ return nullptr;
+ }
+ return description;
+}
+
+AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
+ size_t length)
+ : start_(annexb_buffer), length_(length) {
+ RTC_DCHECK(annexb_buffer);
+ offsets_ = H264::FindNaluIndices(annexb_buffer, length);
+ offset_ = offsets_.begin();
+}
+
+AnnexBBufferReader::~AnnexBBufferReader() = default;
+
+bool AnnexBBufferReader::ReadNalu(const uint8_t** out_nalu,
+ size_t* out_length) {
+ RTC_DCHECK(out_nalu);
+ RTC_DCHECK(out_length);
+ *out_nalu = nullptr;
+ *out_length = 0;
+
+ if (offset_ == offsets_.end()) {
+ return false;
+ }
+ *out_nalu = start_ + offset_->payload_start_offset;
+ *out_length = offset_->payload_size;
+ ++offset_;
+ return true;
+}
+
+size_t AnnexBBufferReader::BytesRemaining() const {
+ if (offset_ == offsets_.end()) {
+ return 0;
+ }
+ return length_ - offset_->start_offset;
+}
+
+void AnnexBBufferReader::SeekToStart() {
+ offset_ = offsets_.begin();
+}
+
+bool AnnexBBufferReader::SeekToNextNaluOfType(NaluType type) {
+ for (; offset_ != offsets_.end(); ++offset_) {
+ if (offset_->payload_size < 1)
+ continue;
+ if (ParseNaluType(*(start_ + offset_->payload_start_offset)) == type)
+ return true;
+ }
+ return false;
+}
+AvccBufferWriter::AvccBufferWriter(uint8_t* const avcc_buffer, size_t length)
+ : start_(avcc_buffer), offset_(0), length_(length) {
+ RTC_DCHECK(avcc_buffer);
+}
+
+bool AvccBufferWriter::WriteNalu(const uint8_t* data, size_t data_size) {
+ // Check if we can write this length of data.
+ if (data_size + kAvccHeaderByteSize > BytesRemaining()) {
+ return false;
+ }
+ // Write length header, which needs to be big endian.
+ uint32_t big_endian_length = CFSwapInt32HostToBig(data_size);
+ memcpy(start_ + offset_, &big_endian_length, sizeof(big_endian_length));
+ offset_ += sizeof(big_endian_length);
+ // Write data.
+ memcpy(start_ + offset_, data, data_size);
+ offset_ += data_size;
+ return true;
+}
+
+size_t AvccBufferWriter::BytesRemaining() const {
+ return length_ - offset_;
+}
+
+} // namespace webrtc
diff --git a/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h
new file mode 100644
index 0000000000..c6474971e2
--- /dev/null
+++ b/third_party/libwebrtc/sdk/objc/components/video_codec/nalu_rewriter.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_NALU_REWRITER_H_
+#define SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_NALU_REWRITER_H_
+
+#include "modules/video_coding/codecs/h264/include/h264.h"
+
+#include <CoreMedia/CoreMedia.h>
+#include <vector>
+
+#include "common_video/h264/h264_common.h"
+#include "rtc_base/buffer.h"
+
+using webrtc::H264::NaluIndex;
+
+namespace webrtc {
+
+// Converts a sample buffer emitted from the VideoToolbox encoder into a buffer
+// suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer
+// needs to be in Annex B format. Data is written directly to `annexb_buffer`.
+bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer);
+
+// Converts a buffer received from RTP into a sample buffer suitable for the
+// VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample
+// buffer is in avcc format.
+// If `is_keyframe` is true then `video_format` is ignored since the format will
+// be read from the buffer. Otherwise `video_format` must be provided.
+// Caller is responsible for releasing the created sample buffer.
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer,
+ CMMemoryPoolRef memory_pool);
+
+// Returns a video format description created from the sps/pps information in
+// the Annex B buffer. If there is no such information, nullptr is returned.
+// The caller is responsible for releasing the description.
+CMVideoFormatDescriptionRef CreateVideoFormatDescription(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size);
+
+// Helper class for reading NALUs from an RTP Annex B buffer.
+class AnnexBBufferReader final {
+ public:
+ AnnexBBufferReader(const uint8_t* annexb_buffer, size_t length);
+ ~AnnexBBufferReader();
+ AnnexBBufferReader(const AnnexBBufferReader& other) = delete;
+ void operator=(const AnnexBBufferReader& other) = delete;
+
+ // Returns a pointer to the beginning of the next NALU slice without the
+ // header bytes and its length. Returns false if no more slices remain.
+ bool ReadNalu(const uint8_t** out_nalu, size_t* out_length);
+
+ // Returns the number of unread NALU bytes, including the size of the header.
+ // If the buffer has no remaining NALUs this will return zero.
+ size_t BytesRemaining() const;
+
+ // Reset the reader to start reading from the first NALU
+ void SeekToStart();
+
+ // Seek to the next position that holds a NALU of the desired type,
+ // or the end if no such NALU is found.
+ // Return true if a NALU of the desired type is found, false if we
+ // reached the end instead
+ bool SeekToNextNaluOfType(H264::NaluType type);
+
+ private:
+ // Returns the the next offset that contains NALU data.
+ size_t FindNextNaluHeader(const uint8_t* start,
+ size_t length,
+ size_t offset) const;
+
+ const uint8_t* const start_;
+ std::vector<NaluIndex> offsets_;
+ std::vector<NaluIndex>::iterator offset_;
+ const size_t length_;
+};
+
+// Helper class for writing NALUs using avcc format into a buffer.
+class AvccBufferWriter final {
+ public:
+ AvccBufferWriter(uint8_t* const avcc_buffer, size_t length);
+ ~AvccBufferWriter() {}
+ AvccBufferWriter(const AvccBufferWriter& other) = delete;
+ void operator=(const AvccBufferWriter& other) = delete;
+
+ // Writes the data slice into the buffer. Returns false if there isn't
+ // enough space left.
+ bool WriteNalu(const uint8_t* data, size_t data_size);
+
+ // Returns the unused bytes in the buffer.
+ size_t BytesRemaining() const;
+
+ private:
+ uint8_t* const start_;
+ size_t offset_;
+ const size_t length_;
+};
+
+} // namespace webrtc
+
+#endif // SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_NALU_REWRITER_H_