1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
|
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "common_video/h264/sps_parser.h"
#include "common_video/h264/h264_common.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/bit_buffer.h"
#include "rtc_base/buffer.h"
#include "test/gtest.h"
namespace webrtc {
// Example SPS can be generated with ffmpeg. Here's an example set of commands,
// runnable on OS X:
// 1) Generate a video, from the camera:
// ffmpeg -f avfoundation -i "0" -video_size 640x360 camera.mov
//
// 2) Scale the video to the desired size:
// ffmpeg -i camera.mov -vf scale=640x360 scaled.mov
//
// 3) Get just the H.264 bitstream in AnnexB:
// ffmpeg -i scaled.mov -vcodec copy -vbsf h264_mp4toannexb -an out.h264
//
// 4) Open out.h264 and find the SPS, generally everything between the first
// two start codes (0 0 0 1 or 0 0 1). The first byte should be 0x67,
// which should be stripped out before being passed to the parser.
static const size_t kSpsBufferMaxSize = 256;
// Generates a fake SPS with basically everything empty but the width/height.
// Pass in a buffer of at least kSpsBufferMaxSize.
// The fake SPS that this generates also always has at least one emulation byte
// at offset 2, since the first two bytes are always 0, and has a 0x3 as the
// level_idc, to make sure the parser doesn't eat all 0x3 bytes.
void GenerateFakeSps(uint16_t width,
uint16_t height,
int id,
uint32_t log2_max_frame_num_minus4,
uint32_t log2_max_pic_order_cnt_lsb_minus4,
rtc::Buffer* out_buffer) {
uint8_t rbsp[kSpsBufferMaxSize] = {0};
rtc::BitBufferWriter writer(rbsp, kSpsBufferMaxSize);
// Profile byte.
writer.WriteUInt8(0);
// Constraint sets and reserved zero bits.
writer.WriteUInt8(0);
// level_idc.
writer.WriteUInt8(0x3u);
// seq_paramter_set_id.
writer.WriteExponentialGolomb(id);
// Profile is not special, so we skip all the chroma format settings.
// Now some bit magic.
// log2_max_frame_num_minus4: ue(v).
writer.WriteExponentialGolomb(log2_max_frame_num_minus4);
// pic_order_cnt_type: ue(v). 0 is the type we want.
writer.WriteExponentialGolomb(0);
// log2_max_pic_order_cnt_lsb_minus4: ue(v). 0 is fine.
writer.WriteExponentialGolomb(log2_max_pic_order_cnt_lsb_minus4);
// max_num_ref_frames: ue(v). 0 is fine.
writer.WriteExponentialGolomb(0);
// gaps_in_frame_num_value_allowed_flag: u(1).
writer.WriteBits(0, 1);
// Next are width/height. First, calculate the mbs/map_units versions.
uint16_t width_in_mbs_minus1 = (width + 15) / 16 - 1;
// For the height, we're going to define frame_mbs_only_flag, so we need to
// divide by 2. See the parser for the full calculation.
uint16_t height_in_map_units_minus1 = ((height + 15) / 16 - 1) / 2;
// Write each as ue(v).
writer.WriteExponentialGolomb(width_in_mbs_minus1);
writer.WriteExponentialGolomb(height_in_map_units_minus1);
// frame_mbs_only_flag: u(1). Needs to be false.
writer.WriteBits(0, 1);
// mb_adaptive_frame_field_flag: u(1).
writer.WriteBits(0, 1);
// direct_8x8_inferene_flag: u(1).
writer.WriteBits(0, 1);
// frame_cropping_flag: u(1). 1, so we can supply crop.
writer.WriteBits(1, 1);
// Now we write the left/right/top/bottom crop. For simplicity, we'll put all
// the crop at the left/top.
// We picked a 4:2:0 format, so the crops are 1/2 the pixel crop values.
// Left/right.
writer.WriteExponentialGolomb(((16 - (width % 16)) % 16) / 2);
writer.WriteExponentialGolomb(0);
// Top/bottom.
writer.WriteExponentialGolomb(((16 - (height % 16)) % 16) / 2);
writer.WriteExponentialGolomb(0);
// vui_parameters_present_flag: u(1)
writer.WriteBits(0, 1);
// Get the number of bytes written (including the last partial byte).
size_t byte_count, bit_offset;
writer.GetCurrentOffset(&byte_count, &bit_offset);
if (bit_offset > 0) {
byte_count++;
}
out_buffer->Clear();
H264::WriteRbsp(rbsp, byte_count, out_buffer);
}
TEST(H264SpsParserTest, TestSampleSPSHdLandscape) {
// SPS for a 1280x720 camera capture from ffmpeg on osx. Contains
// emulation bytes but no cropping.
const uint8_t buffer[] = {0x7A, 0x00, 0x1F, 0xBC, 0xD9, 0x40, 0x50, 0x05,
0xBA, 0x10, 0x00, 0x00, 0x03, 0x00, 0xC0, 0x00,
0x00, 0x2A, 0xE0, 0xF1, 0x83, 0x19, 0x60};
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer, arraysize(buffer));
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(1280u, sps->width);
EXPECT_EQ(720u, sps->height);
}
TEST(H264SpsParserTest, TestSampleSPSVgaLandscape) {
// SPS for a 640x360 camera capture from ffmpeg on osx. Contains emulation
// bytes and cropping (360 isn't divisible by 16).
const uint8_t buffer[] = {0x7A, 0x00, 0x1E, 0xBC, 0xD9, 0x40, 0xA0, 0x2F,
0xF8, 0x98, 0x40, 0x00, 0x00, 0x03, 0x01, 0x80,
0x00, 0x00, 0x56, 0x83, 0xC5, 0x8B, 0x65, 0x80};
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer, arraysize(buffer));
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(640u, sps->width);
EXPECT_EQ(360u, sps->height);
}
TEST(H264SpsParserTest, TestSampleSPSWeirdResolution) {
// SPS for a 200x400 camera capture from ffmpeg on osx. Horizontal and
// veritcal crop (neither dimension is divisible by 16).
const uint8_t buffer[] = {0x7A, 0x00, 0x0D, 0xBC, 0xD9, 0x43, 0x43, 0x3E,
0x5E, 0x10, 0x00, 0x00, 0x03, 0x00, 0x60, 0x00,
0x00, 0x15, 0xA0, 0xF1, 0x42, 0x99, 0x60};
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer, arraysize(buffer));
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(200u, sps->width);
EXPECT_EQ(400u, sps->height);
}
TEST(H264SpsParserTest, TestSyntheticSPSQvgaLandscape) {
rtc::Buffer buffer;
GenerateFakeSps(320u, 180u, 1, 0, 0, &buffer);
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer.data(), buffer.size());
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(320u, sps->width);
EXPECT_EQ(180u, sps->height);
EXPECT_EQ(1u, sps->id);
}
TEST(H264SpsParserTest, TestSyntheticSPSWeirdResolution) {
rtc::Buffer buffer;
GenerateFakeSps(156u, 122u, 2, 0, 0, &buffer);
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer.data(), buffer.size());
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(156u, sps->width);
EXPECT_EQ(122u, sps->height);
EXPECT_EQ(2u, sps->id);
}
TEST(H264SpsParserTest, TestSampleSPSWithScalingLists) {
// SPS from a 1920x1080 video. Contains scaling lists (and vertical cropping).
const uint8_t buffer[] = {0x64, 0x00, 0x2a, 0xad, 0x84, 0x01, 0x0c, 0x20,
0x08, 0x61, 0x00, 0x43, 0x08, 0x02, 0x18, 0x40,
0x10, 0xc2, 0x00, 0x84, 0x3b, 0x50, 0x3c, 0x01,
0x13, 0xf2, 0xcd, 0xc0, 0x40, 0x40, 0x50, 0x00,
0x00, 0x00, 0x10, 0x00, 0x00, 0x01, 0xe8, 0x40};
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer, arraysize(buffer));
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(1920u, sps->width);
EXPECT_EQ(1080u, sps->height);
}
TEST(H264SpsParserTest, TestLog2MaxFrameNumMinus4) {
rtc::Buffer buffer;
GenerateFakeSps(320u, 180u, 1, 0, 0, &buffer);
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer.data(), buffer.size());
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(320u, sps->width);
EXPECT_EQ(180u, sps->height);
EXPECT_EQ(1u, sps->id);
EXPECT_EQ(4u, sps->log2_max_frame_num);
GenerateFakeSps(320u, 180u, 1, 28, 0, &buffer);
sps = SpsParser::ParseSps(buffer.data(), buffer.size());
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(320u, sps->width);
EXPECT_EQ(180u, sps->height);
EXPECT_EQ(1u, sps->id);
EXPECT_EQ(32u, sps->log2_max_frame_num);
GenerateFakeSps(320u, 180u, 1, 29, 0, &buffer);
EXPECT_FALSE(SpsParser::ParseSps(buffer.data(), buffer.size()));
}
TEST(H264SpsParserTest, TestLog2MaxPicOrderCntMinus4) {
rtc::Buffer buffer;
GenerateFakeSps(320u, 180u, 1, 0, 0, &buffer);
absl::optional<SpsParser::SpsState> sps =
SpsParser::ParseSps(buffer.data(), buffer.size());
ASSERT_TRUE(sps.has_value());
EXPECT_EQ(320u, sps->width);
EXPECT_EQ(180u, sps->height);
EXPECT_EQ(1u, sps->id);
EXPECT_EQ(4u, sps->log2_max_pic_order_cnt_lsb);
GenerateFakeSps(320u, 180u, 1, 0, 28, &buffer);
EXPECT_TRUE(static_cast<bool>(
sps = SpsParser::ParseSps(buffer.data(), buffer.size())));
EXPECT_EQ(320u, sps->width);
EXPECT_EQ(180u, sps->height);
EXPECT_EQ(1u, sps->id);
EXPECT_EQ(32u, sps->log2_max_pic_order_cnt_lsb);
GenerateFakeSps(320u, 180u, 1, 0, 29, &buffer);
EXPECT_FALSE(SpsParser::ParseSps(buffer.data(), buffer.size()));
}
} // namespace webrtc
|